1 /* 2 * Testsuite for eBPF verifier 3 * 4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 */ 10 11 #include <endian.h> 12 #include <asm/types.h> 13 #include <linux/types.h> 14 #include <stdint.h> 15 #include <stdio.h> 16 #include <stdlib.h> 17 #include <unistd.h> 18 #include <errno.h> 19 #include <string.h> 20 #include <stddef.h> 21 #include <stdbool.h> 22 #include <sched.h> 23 24 #include <sys/capability.h> 25 #include <sys/resource.h> 26 27 #include <linux/unistd.h> 28 #include <linux/filter.h> 29 #include <linux/bpf_perf_event.h> 30 #include <linux/bpf.h> 31 32 #include <bpf/bpf.h> 33 34 #ifdef HAVE_GENHDR 35 # include "autoconf.h" 36 #else 37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) 38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 39 # endif 40 #endif 41 42 #include "../../../include/linux/filter.h" 43 44 #ifndef ARRAY_SIZE 45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 46 #endif 47 48 #define MAX_INSNS 512 49 #define MAX_FIXUPS 8 50 #define MAX_NR_MAPS 4 51 52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) 54 55 struct bpf_test { 56 const char *descr; 57 struct bpf_insn insns[MAX_INSNS]; 58 int fixup_map1[MAX_FIXUPS]; 59 int fixup_map2[MAX_FIXUPS]; 60 int fixup_prog[MAX_FIXUPS]; 61 int fixup_map_in_map[MAX_FIXUPS]; 62 const char *errstr; 63 const char *errstr_unpriv; 64 enum { 65 UNDEF, 66 ACCEPT, 67 REJECT 68 } result, result_unpriv; 69 enum bpf_prog_type prog_type; 70 uint8_t flags; 71 }; 72 73 /* Note we want this to be 64 bit aligned so that the end of our array is 74 * actually the end of the structure. 75 */ 76 #define MAX_ENTRIES 11 77 78 struct test_val { 79 unsigned int index; 80 int foo[MAX_ENTRIES]; 81 }; 82 83 static struct bpf_test tests[] = { 84 { 85 "add+sub+mul", 86 .insns = { 87 BPF_MOV64_IMM(BPF_REG_1, 1), 88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2), 89 BPF_MOV64_IMM(BPF_REG_2, 3), 90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), 91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), 92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3), 93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 94 BPF_EXIT_INSN(), 95 }, 96 .result = ACCEPT, 97 }, 98 { 99 "unreachable", 100 .insns = { 101 BPF_EXIT_INSN(), 102 BPF_EXIT_INSN(), 103 }, 104 .errstr = "unreachable", 105 .result = REJECT, 106 }, 107 { 108 "unreachable2", 109 .insns = { 110 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 111 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 112 BPF_EXIT_INSN(), 113 }, 114 .errstr = "unreachable", 115 .result = REJECT, 116 }, 117 { 118 "out of range jump", 119 .insns = { 120 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 121 BPF_EXIT_INSN(), 122 }, 123 .errstr = "jump out of range", 124 .result = REJECT, 125 }, 126 { 127 "out of range jump2", 128 .insns = { 129 BPF_JMP_IMM(BPF_JA, 0, 0, -2), 130 BPF_EXIT_INSN(), 131 }, 132 .errstr = "jump out of range", 133 .result = REJECT, 134 }, 135 { 136 "test1 ld_imm64", 137 .insns = { 138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 139 BPF_LD_IMM64(BPF_REG_0, 0), 140 BPF_LD_IMM64(BPF_REG_0, 0), 141 BPF_LD_IMM64(BPF_REG_0, 1), 142 BPF_LD_IMM64(BPF_REG_0, 1), 143 BPF_MOV64_IMM(BPF_REG_0, 2), 144 BPF_EXIT_INSN(), 145 }, 146 .errstr = "invalid BPF_LD_IMM insn", 147 .errstr_unpriv = "R1 pointer comparison", 148 .result = REJECT, 149 }, 150 { 151 "test2 ld_imm64", 152 .insns = { 153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 154 BPF_LD_IMM64(BPF_REG_0, 0), 155 BPF_LD_IMM64(BPF_REG_0, 0), 156 BPF_LD_IMM64(BPF_REG_0, 1), 157 BPF_LD_IMM64(BPF_REG_0, 1), 158 BPF_EXIT_INSN(), 159 }, 160 .errstr = "invalid BPF_LD_IMM insn", 161 .errstr_unpriv = "R1 pointer comparison", 162 .result = REJECT, 163 }, 164 { 165 "test3 ld_imm64", 166 .insns = { 167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 169 BPF_LD_IMM64(BPF_REG_0, 0), 170 BPF_LD_IMM64(BPF_REG_0, 0), 171 BPF_LD_IMM64(BPF_REG_0, 1), 172 BPF_LD_IMM64(BPF_REG_0, 1), 173 BPF_EXIT_INSN(), 174 }, 175 .errstr = "invalid bpf_ld_imm64 insn", 176 .result = REJECT, 177 }, 178 { 179 "test4 ld_imm64", 180 .insns = { 181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 182 BPF_EXIT_INSN(), 183 }, 184 .errstr = "invalid bpf_ld_imm64 insn", 185 .result = REJECT, 186 }, 187 { 188 "test5 ld_imm64", 189 .insns = { 190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 191 }, 192 .errstr = "invalid bpf_ld_imm64 insn", 193 .result = REJECT, 194 }, 195 { 196 "test6 ld_imm64", 197 .insns = { 198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 199 BPF_RAW_INSN(0, 0, 0, 0, 0), 200 BPF_EXIT_INSN(), 201 }, 202 .result = ACCEPT, 203 }, 204 { 205 "test7 ld_imm64", 206 .insns = { 207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 208 BPF_RAW_INSN(0, 0, 0, 0, 1), 209 BPF_EXIT_INSN(), 210 }, 211 .result = ACCEPT, 212 }, 213 { 214 "test8 ld_imm64", 215 .insns = { 216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), 217 BPF_RAW_INSN(0, 0, 0, 0, 1), 218 BPF_EXIT_INSN(), 219 }, 220 .errstr = "uses reserved fields", 221 .result = REJECT, 222 }, 223 { 224 "test9 ld_imm64", 225 .insns = { 226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 227 BPF_RAW_INSN(0, 0, 0, 1, 1), 228 BPF_EXIT_INSN(), 229 }, 230 .errstr = "invalid bpf_ld_imm64 insn", 231 .result = REJECT, 232 }, 233 { 234 "test10 ld_imm64", 235 .insns = { 236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), 238 BPF_EXIT_INSN(), 239 }, 240 .errstr = "invalid bpf_ld_imm64 insn", 241 .result = REJECT, 242 }, 243 { 244 "test11 ld_imm64", 245 .insns = { 246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), 248 BPF_EXIT_INSN(), 249 }, 250 .errstr = "invalid bpf_ld_imm64 insn", 251 .result = REJECT, 252 }, 253 { 254 "test12 ld_imm64", 255 .insns = { 256 BPF_MOV64_IMM(BPF_REG_1, 0), 257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), 258 BPF_RAW_INSN(0, 0, 0, 0, 1), 259 BPF_EXIT_INSN(), 260 }, 261 .errstr = "not pointing to valid bpf_map", 262 .result = REJECT, 263 }, 264 { 265 "test13 ld_imm64", 266 .insns = { 267 BPF_MOV64_IMM(BPF_REG_1, 0), 268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), 269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), 270 BPF_EXIT_INSN(), 271 }, 272 .errstr = "invalid bpf_ld_imm64 insn", 273 .result = REJECT, 274 }, 275 { 276 "no bpf_exit", 277 .insns = { 278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), 279 }, 280 .errstr = "jump out of range", 281 .result = REJECT, 282 }, 283 { 284 "loop (back-edge)", 285 .insns = { 286 BPF_JMP_IMM(BPF_JA, 0, 0, -1), 287 BPF_EXIT_INSN(), 288 }, 289 .errstr = "back-edge", 290 .result = REJECT, 291 }, 292 { 293 "loop2 (back-edge)", 294 .insns = { 295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 298 BPF_JMP_IMM(BPF_JA, 0, 0, -4), 299 BPF_EXIT_INSN(), 300 }, 301 .errstr = "back-edge", 302 .result = REJECT, 303 }, 304 { 305 "conditional loop", 306 .insns = { 307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 311 BPF_EXIT_INSN(), 312 }, 313 .errstr = "back-edge", 314 .result = REJECT, 315 }, 316 { 317 "read uninitialized register", 318 .insns = { 319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 320 BPF_EXIT_INSN(), 321 }, 322 .errstr = "R2 !read_ok", 323 .result = REJECT, 324 }, 325 { 326 "read invalid register", 327 .insns = { 328 BPF_MOV64_REG(BPF_REG_0, -1), 329 BPF_EXIT_INSN(), 330 }, 331 .errstr = "R15 is invalid", 332 .result = REJECT, 333 }, 334 { 335 "program doesn't init R0 before exit", 336 .insns = { 337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 338 BPF_EXIT_INSN(), 339 }, 340 .errstr = "R0 !read_ok", 341 .result = REJECT, 342 }, 343 { 344 "program doesn't init R0 before exit in all branches", 345 .insns = { 346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 347 BPF_MOV64_IMM(BPF_REG_0, 1), 348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 349 BPF_EXIT_INSN(), 350 }, 351 .errstr = "R0 !read_ok", 352 .errstr_unpriv = "R1 pointer comparison", 353 .result = REJECT, 354 }, 355 { 356 "stack out of bounds", 357 .insns = { 358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), 359 BPF_EXIT_INSN(), 360 }, 361 .errstr = "invalid stack", 362 .result = REJECT, 363 }, 364 { 365 "invalid call insn1", 366 .insns = { 367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0), 368 BPF_EXIT_INSN(), 369 }, 370 .errstr = "BPF_CALL uses reserved", 371 .result = REJECT, 372 }, 373 { 374 "invalid call insn2", 375 .insns = { 376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0), 377 BPF_EXIT_INSN(), 378 }, 379 .errstr = "BPF_CALL uses reserved", 380 .result = REJECT, 381 }, 382 { 383 "invalid function call", 384 .insns = { 385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567), 386 BPF_EXIT_INSN(), 387 }, 388 .errstr = "invalid func unknown#1234567", 389 .result = REJECT, 390 }, 391 { 392 "uninitialized stack1", 393 .insns = { 394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 396 BPF_LD_MAP_FD(BPF_REG_1, 0), 397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 398 BPF_FUNC_map_lookup_elem), 399 BPF_EXIT_INSN(), 400 }, 401 .fixup_map1 = { 2 }, 402 .errstr = "invalid indirect read from stack", 403 .result = REJECT, 404 }, 405 { 406 "uninitialized stack2", 407 .insns = { 408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), 410 BPF_EXIT_INSN(), 411 }, 412 .errstr = "invalid read from stack", 413 .result = REJECT, 414 }, 415 { 416 "invalid fp arithmetic", 417 /* If this gets ever changed, make sure JITs can deal with it. */ 418 .insns = { 419 BPF_MOV64_IMM(BPF_REG_0, 0), 420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), 422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 423 BPF_EXIT_INSN(), 424 }, 425 .errstr_unpriv = "R1 pointer arithmetic", 426 .result_unpriv = REJECT, 427 .errstr = "R1 invalid mem access", 428 .result = REJECT, 429 }, 430 { 431 "non-invalid fp arithmetic", 432 .insns = { 433 BPF_MOV64_IMM(BPF_REG_0, 0), 434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 435 BPF_EXIT_INSN(), 436 }, 437 .result = ACCEPT, 438 }, 439 { 440 "invalid argument register", 441 .insns = { 442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 443 BPF_FUNC_get_cgroup_classid), 444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 445 BPF_FUNC_get_cgroup_classid), 446 BPF_EXIT_INSN(), 447 }, 448 .errstr = "R1 !read_ok", 449 .result = REJECT, 450 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 451 }, 452 { 453 "non-invalid argument register", 454 .insns = { 455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 457 BPF_FUNC_get_cgroup_classid), 458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6), 459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 460 BPF_FUNC_get_cgroup_classid), 461 BPF_EXIT_INSN(), 462 }, 463 .result = ACCEPT, 464 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 465 }, 466 { 467 "check valid spill/fill", 468 .insns = { 469 /* spill R1(ctx) into stack */ 470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 471 /* fill it back into R2 */ 472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 473 /* should be able to access R0 = *(R2 + 8) */ 474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ 475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 476 BPF_EXIT_INSN(), 477 }, 478 .errstr_unpriv = "R0 leaks addr", 479 .result = ACCEPT, 480 .result_unpriv = REJECT, 481 }, 482 { 483 "check valid spill/fill, skb mark", 484 .insns = { 485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 489 offsetof(struct __sk_buff, mark)), 490 BPF_EXIT_INSN(), 491 }, 492 .result = ACCEPT, 493 .result_unpriv = ACCEPT, 494 }, 495 { 496 "check corrupted spill/fill", 497 .insns = { 498 /* spill R1(ctx) into stack */ 499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 500 /* mess up with R1 pointer on stack */ 501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), 502 /* fill back into R0 should fail */ 503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 504 BPF_EXIT_INSN(), 505 }, 506 .errstr_unpriv = "attempt to corrupt spilled", 507 .errstr = "corrupted spill", 508 .result = REJECT, 509 }, 510 { 511 "invalid src register in STX", 512 .insns = { 513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), 514 BPF_EXIT_INSN(), 515 }, 516 .errstr = "R15 is invalid", 517 .result = REJECT, 518 }, 519 { 520 "invalid dst register in STX", 521 .insns = { 522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1), 523 BPF_EXIT_INSN(), 524 }, 525 .errstr = "R14 is invalid", 526 .result = REJECT, 527 }, 528 { 529 "invalid dst register in ST", 530 .insns = { 531 BPF_ST_MEM(BPF_B, 14, -1, -1), 532 BPF_EXIT_INSN(), 533 }, 534 .errstr = "R14 is invalid", 535 .result = REJECT, 536 }, 537 { 538 "invalid src register in LDX", 539 .insns = { 540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0), 541 BPF_EXIT_INSN(), 542 }, 543 .errstr = "R12 is invalid", 544 .result = REJECT, 545 }, 546 { 547 "invalid dst register in LDX", 548 .insns = { 549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0), 550 BPF_EXIT_INSN(), 551 }, 552 .errstr = "R11 is invalid", 553 .result = REJECT, 554 }, 555 { 556 "junk insn", 557 .insns = { 558 BPF_RAW_INSN(0, 0, 0, 0, 0), 559 BPF_EXIT_INSN(), 560 }, 561 .errstr = "invalid BPF_LD_IMM", 562 .result = REJECT, 563 }, 564 { 565 "junk insn2", 566 .insns = { 567 BPF_RAW_INSN(1, 0, 0, 0, 0), 568 BPF_EXIT_INSN(), 569 }, 570 .errstr = "BPF_LDX uses reserved fields", 571 .result = REJECT, 572 }, 573 { 574 "junk insn3", 575 .insns = { 576 BPF_RAW_INSN(-1, 0, 0, 0, 0), 577 BPF_EXIT_INSN(), 578 }, 579 .errstr = "invalid BPF_ALU opcode f0", 580 .result = REJECT, 581 }, 582 { 583 "junk insn4", 584 .insns = { 585 BPF_RAW_INSN(-1, -1, -1, -1, -1), 586 BPF_EXIT_INSN(), 587 }, 588 .errstr = "invalid BPF_ALU opcode f0", 589 .result = REJECT, 590 }, 591 { 592 "junk insn5", 593 .insns = { 594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1), 595 BPF_EXIT_INSN(), 596 }, 597 .errstr = "BPF_ALU uses reserved fields", 598 .result = REJECT, 599 }, 600 { 601 "misaligned read from stack", 602 .insns = { 603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), 605 BPF_EXIT_INSN(), 606 }, 607 .errstr = "misaligned access", 608 .result = REJECT, 609 }, 610 { 611 "invalid map_fd for function call", 612 .insns = { 613 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 614 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), 615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 616 BPF_LD_MAP_FD(BPF_REG_1, 0), 617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 618 BPF_FUNC_map_delete_elem), 619 BPF_EXIT_INSN(), 620 }, 621 .errstr = "fd 0 is not pointing to valid bpf_map", 622 .result = REJECT, 623 }, 624 { 625 "don't check return value before access", 626 .insns = { 627 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 630 BPF_LD_MAP_FD(BPF_REG_1, 0), 631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 632 BPF_FUNC_map_lookup_elem), 633 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 634 BPF_EXIT_INSN(), 635 }, 636 .fixup_map1 = { 3 }, 637 .errstr = "R0 invalid mem access 'map_value_or_null'", 638 .result = REJECT, 639 }, 640 { 641 "access memory with incorrect alignment", 642 .insns = { 643 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 646 BPF_LD_MAP_FD(BPF_REG_1, 0), 647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 648 BPF_FUNC_map_lookup_elem), 649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 650 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), 651 BPF_EXIT_INSN(), 652 }, 653 .fixup_map1 = { 3 }, 654 .errstr = "misaligned access", 655 .result = REJECT, 656 }, 657 { 658 "sometimes access memory with incorrect alignment", 659 .insns = { 660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 663 BPF_LD_MAP_FD(BPF_REG_1, 0), 664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 665 BPF_FUNC_map_lookup_elem), 666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 667 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 668 BPF_EXIT_INSN(), 669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), 670 BPF_EXIT_INSN(), 671 }, 672 .fixup_map1 = { 3 }, 673 .errstr = "R0 invalid mem access", 674 .errstr_unpriv = "R0 leaks addr", 675 .result = REJECT, 676 }, 677 { 678 "jump test 1", 679 .insns = { 680 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 681 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8), 682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 683 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), 685 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1), 686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1), 687 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2), 688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1), 689 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3), 690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1), 691 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4), 692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), 693 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5), 694 BPF_MOV64_IMM(BPF_REG_0, 0), 695 BPF_EXIT_INSN(), 696 }, 697 .errstr_unpriv = "R1 pointer comparison", 698 .result_unpriv = REJECT, 699 .result = ACCEPT, 700 }, 701 { 702 "jump test 2", 703 .insns = { 704 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 706 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 707 BPF_JMP_IMM(BPF_JA, 0, 0, 14), 708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), 709 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), 710 BPF_JMP_IMM(BPF_JA, 0, 0, 11), 711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2), 712 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), 713 BPF_JMP_IMM(BPF_JA, 0, 0, 8), 714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2), 715 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), 716 BPF_JMP_IMM(BPF_JA, 0, 0, 5), 717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2), 718 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), 719 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), 721 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), 722 BPF_MOV64_IMM(BPF_REG_0, 0), 723 BPF_EXIT_INSN(), 724 }, 725 .errstr_unpriv = "R1 pointer comparison", 726 .result_unpriv = REJECT, 727 .result = ACCEPT, 728 }, 729 { 730 "jump test 3", 731 .insns = { 732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 733 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 734 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 736 BPF_JMP_IMM(BPF_JA, 0, 0, 19), 737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3), 738 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), 739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 740 BPF_JMP_IMM(BPF_JA, 0, 0, 15), 741 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3), 742 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), 743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), 744 BPF_JMP_IMM(BPF_JA, 0, 0, 11), 745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3), 746 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), 747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40), 748 BPF_JMP_IMM(BPF_JA, 0, 0, 7), 749 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3), 750 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), 751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), 752 BPF_JMP_IMM(BPF_JA, 0, 0, 3), 753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0), 754 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), 755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), 756 BPF_LD_MAP_FD(BPF_REG_1, 0), 757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 758 BPF_FUNC_map_delete_elem), 759 BPF_EXIT_INSN(), 760 }, 761 .fixup_map1 = { 24 }, 762 .errstr_unpriv = "R1 pointer comparison", 763 .result_unpriv = REJECT, 764 .result = ACCEPT, 765 }, 766 { 767 "jump test 4", 768 .insns = { 769 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 809 BPF_MOV64_IMM(BPF_REG_0, 0), 810 BPF_EXIT_INSN(), 811 }, 812 .errstr_unpriv = "R1 pointer comparison", 813 .result_unpriv = REJECT, 814 .result = ACCEPT, 815 }, 816 { 817 "jump test 5", 818 .insns = { 819 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 820 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 821 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 822 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 823 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 824 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 825 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 826 BPF_MOV64_IMM(BPF_REG_0, 0), 827 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 828 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 829 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 830 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 831 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 832 BPF_MOV64_IMM(BPF_REG_0, 0), 833 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 834 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 835 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 836 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 837 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 838 BPF_MOV64_IMM(BPF_REG_0, 0), 839 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 840 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 841 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 842 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 843 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 844 BPF_MOV64_IMM(BPF_REG_0, 0), 845 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 846 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 847 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 848 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 849 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 850 BPF_MOV64_IMM(BPF_REG_0, 0), 851 BPF_EXIT_INSN(), 852 }, 853 .errstr_unpriv = "R1 pointer comparison", 854 .result_unpriv = REJECT, 855 .result = ACCEPT, 856 }, 857 { 858 "access skb fields ok", 859 .insns = { 860 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 861 offsetof(struct __sk_buff, len)), 862 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 864 offsetof(struct __sk_buff, mark)), 865 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 867 offsetof(struct __sk_buff, pkt_type)), 868 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 870 offsetof(struct __sk_buff, queue_mapping)), 871 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 873 offsetof(struct __sk_buff, protocol)), 874 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 876 offsetof(struct __sk_buff, vlan_present)), 877 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 879 offsetof(struct __sk_buff, vlan_tci)), 880 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 882 offsetof(struct __sk_buff, napi_id)), 883 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 884 BPF_EXIT_INSN(), 885 }, 886 .result = ACCEPT, 887 }, 888 { 889 "access skb fields bad1", 890 .insns = { 891 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4), 892 BPF_EXIT_INSN(), 893 }, 894 .errstr = "invalid bpf_context access", 895 .result = REJECT, 896 }, 897 { 898 "access skb fields bad2", 899 .insns = { 900 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9), 901 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 902 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 904 BPF_LD_MAP_FD(BPF_REG_1, 0), 905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 906 BPF_FUNC_map_lookup_elem), 907 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 908 BPF_EXIT_INSN(), 909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 910 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 911 offsetof(struct __sk_buff, pkt_type)), 912 BPF_EXIT_INSN(), 913 }, 914 .fixup_map1 = { 4 }, 915 .errstr = "different pointers", 916 .errstr_unpriv = "R1 pointer comparison", 917 .result = REJECT, 918 }, 919 { 920 "access skb fields bad3", 921 .insns = { 922 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 923 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 924 offsetof(struct __sk_buff, pkt_type)), 925 BPF_EXIT_INSN(), 926 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 927 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 929 BPF_LD_MAP_FD(BPF_REG_1, 0), 930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 931 BPF_FUNC_map_lookup_elem), 932 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 933 BPF_EXIT_INSN(), 934 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 935 BPF_JMP_IMM(BPF_JA, 0, 0, -12), 936 }, 937 .fixup_map1 = { 6 }, 938 .errstr = "different pointers", 939 .errstr_unpriv = "R1 pointer comparison", 940 .result = REJECT, 941 }, 942 { 943 "access skb fields bad4", 944 .insns = { 945 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), 946 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 947 offsetof(struct __sk_buff, len)), 948 BPF_MOV64_IMM(BPF_REG_0, 0), 949 BPF_EXIT_INSN(), 950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 953 BPF_LD_MAP_FD(BPF_REG_1, 0), 954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 955 BPF_FUNC_map_lookup_elem), 956 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 957 BPF_EXIT_INSN(), 958 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 959 BPF_JMP_IMM(BPF_JA, 0, 0, -13), 960 }, 961 .fixup_map1 = { 7 }, 962 .errstr = "different pointers", 963 .errstr_unpriv = "R1 pointer comparison", 964 .result = REJECT, 965 }, 966 { 967 "check skb->mark is not writeable by sockets", 968 .insns = { 969 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 970 offsetof(struct __sk_buff, mark)), 971 BPF_EXIT_INSN(), 972 }, 973 .errstr = "invalid bpf_context access", 974 .errstr_unpriv = "R1 leaks addr", 975 .result = REJECT, 976 }, 977 { 978 "check skb->tc_index is not writeable by sockets", 979 .insns = { 980 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 981 offsetof(struct __sk_buff, tc_index)), 982 BPF_EXIT_INSN(), 983 }, 984 .errstr = "invalid bpf_context access", 985 .errstr_unpriv = "R1 leaks addr", 986 .result = REJECT, 987 }, 988 { 989 "check cb access: byte", 990 .insns = { 991 BPF_MOV64_IMM(BPF_REG_0, 0), 992 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 993 offsetof(struct __sk_buff, cb[0])), 994 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 995 offsetof(struct __sk_buff, cb[0]) + 1), 996 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 997 offsetof(struct __sk_buff, cb[0]) + 2), 998 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 999 offsetof(struct __sk_buff, cb[0]) + 3), 1000 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1001 offsetof(struct __sk_buff, cb[1])), 1002 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1003 offsetof(struct __sk_buff, cb[1]) + 1), 1004 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1005 offsetof(struct __sk_buff, cb[1]) + 2), 1006 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1007 offsetof(struct __sk_buff, cb[1]) + 3), 1008 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1009 offsetof(struct __sk_buff, cb[2])), 1010 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1011 offsetof(struct __sk_buff, cb[2]) + 1), 1012 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1013 offsetof(struct __sk_buff, cb[2]) + 2), 1014 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1015 offsetof(struct __sk_buff, cb[2]) + 3), 1016 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1017 offsetof(struct __sk_buff, cb[3])), 1018 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1019 offsetof(struct __sk_buff, cb[3]) + 1), 1020 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1021 offsetof(struct __sk_buff, cb[3]) + 2), 1022 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1023 offsetof(struct __sk_buff, cb[3]) + 3), 1024 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1025 offsetof(struct __sk_buff, cb[4])), 1026 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1027 offsetof(struct __sk_buff, cb[4]) + 1), 1028 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1029 offsetof(struct __sk_buff, cb[4]) + 2), 1030 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1031 offsetof(struct __sk_buff, cb[4]) + 3), 1032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1033 offsetof(struct __sk_buff, cb[0])), 1034 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1035 offsetof(struct __sk_buff, cb[0]) + 1), 1036 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1037 offsetof(struct __sk_buff, cb[0]) + 2), 1038 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1039 offsetof(struct __sk_buff, cb[0]) + 3), 1040 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1041 offsetof(struct __sk_buff, cb[1])), 1042 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1043 offsetof(struct __sk_buff, cb[1]) + 1), 1044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1045 offsetof(struct __sk_buff, cb[1]) + 2), 1046 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1047 offsetof(struct __sk_buff, cb[1]) + 3), 1048 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1049 offsetof(struct __sk_buff, cb[2])), 1050 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1051 offsetof(struct __sk_buff, cb[2]) + 1), 1052 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1053 offsetof(struct __sk_buff, cb[2]) + 2), 1054 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1055 offsetof(struct __sk_buff, cb[2]) + 3), 1056 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1057 offsetof(struct __sk_buff, cb[3])), 1058 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1059 offsetof(struct __sk_buff, cb[3]) + 1), 1060 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1061 offsetof(struct __sk_buff, cb[3]) + 2), 1062 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1063 offsetof(struct __sk_buff, cb[3]) + 3), 1064 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1065 offsetof(struct __sk_buff, cb[4])), 1066 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1067 offsetof(struct __sk_buff, cb[4]) + 1), 1068 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1069 offsetof(struct __sk_buff, cb[4]) + 2), 1070 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1071 offsetof(struct __sk_buff, cb[4]) + 3), 1072 BPF_EXIT_INSN(), 1073 }, 1074 .result = ACCEPT, 1075 }, 1076 { 1077 "__sk_buff->hash, offset 0, byte store not permitted", 1078 .insns = { 1079 BPF_MOV64_IMM(BPF_REG_0, 0), 1080 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1081 offsetof(struct __sk_buff, hash)), 1082 BPF_EXIT_INSN(), 1083 }, 1084 .errstr = "invalid bpf_context access", 1085 .result = REJECT, 1086 }, 1087 { 1088 "__sk_buff->tc_index, offset 3, byte store not permitted", 1089 .insns = { 1090 BPF_MOV64_IMM(BPF_REG_0, 0), 1091 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1092 offsetof(struct __sk_buff, tc_index) + 3), 1093 BPF_EXIT_INSN(), 1094 }, 1095 .errstr = "invalid bpf_context access", 1096 .result = REJECT, 1097 }, 1098 { 1099 "check skb->hash byte load permitted", 1100 .insns = { 1101 BPF_MOV64_IMM(BPF_REG_0, 0), 1102 #if __BYTE_ORDER == __LITTLE_ENDIAN 1103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1104 offsetof(struct __sk_buff, hash)), 1105 #else 1106 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1107 offsetof(struct __sk_buff, hash) + 3), 1108 #endif 1109 BPF_EXIT_INSN(), 1110 }, 1111 .result = ACCEPT, 1112 }, 1113 { 1114 "check skb->hash byte load not permitted 1", 1115 .insns = { 1116 BPF_MOV64_IMM(BPF_REG_0, 0), 1117 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1118 offsetof(struct __sk_buff, hash) + 1), 1119 BPF_EXIT_INSN(), 1120 }, 1121 .errstr = "invalid bpf_context access", 1122 .result = REJECT, 1123 }, 1124 { 1125 "check skb->hash byte load not permitted 2", 1126 .insns = { 1127 BPF_MOV64_IMM(BPF_REG_0, 0), 1128 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1129 offsetof(struct __sk_buff, hash) + 2), 1130 BPF_EXIT_INSN(), 1131 }, 1132 .errstr = "invalid bpf_context access", 1133 .result = REJECT, 1134 }, 1135 { 1136 "check skb->hash byte load not permitted 3", 1137 .insns = { 1138 BPF_MOV64_IMM(BPF_REG_0, 0), 1139 #if __BYTE_ORDER == __LITTLE_ENDIAN 1140 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1141 offsetof(struct __sk_buff, hash) + 3), 1142 #else 1143 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1144 offsetof(struct __sk_buff, hash)), 1145 #endif 1146 BPF_EXIT_INSN(), 1147 }, 1148 .errstr = "invalid bpf_context access", 1149 .result = REJECT, 1150 }, 1151 { 1152 "check cb access: byte, wrong type", 1153 .insns = { 1154 BPF_MOV64_IMM(BPF_REG_0, 0), 1155 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1156 offsetof(struct __sk_buff, cb[0])), 1157 BPF_EXIT_INSN(), 1158 }, 1159 .errstr = "invalid bpf_context access", 1160 .result = REJECT, 1161 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1162 }, 1163 { 1164 "check cb access: half", 1165 .insns = { 1166 BPF_MOV64_IMM(BPF_REG_0, 0), 1167 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1168 offsetof(struct __sk_buff, cb[0])), 1169 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1170 offsetof(struct __sk_buff, cb[0]) + 2), 1171 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1172 offsetof(struct __sk_buff, cb[1])), 1173 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1174 offsetof(struct __sk_buff, cb[1]) + 2), 1175 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1176 offsetof(struct __sk_buff, cb[2])), 1177 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1178 offsetof(struct __sk_buff, cb[2]) + 2), 1179 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1180 offsetof(struct __sk_buff, cb[3])), 1181 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1182 offsetof(struct __sk_buff, cb[3]) + 2), 1183 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1184 offsetof(struct __sk_buff, cb[4])), 1185 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1186 offsetof(struct __sk_buff, cb[4]) + 2), 1187 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1188 offsetof(struct __sk_buff, cb[0])), 1189 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1190 offsetof(struct __sk_buff, cb[0]) + 2), 1191 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1192 offsetof(struct __sk_buff, cb[1])), 1193 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1194 offsetof(struct __sk_buff, cb[1]) + 2), 1195 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1196 offsetof(struct __sk_buff, cb[2])), 1197 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1198 offsetof(struct __sk_buff, cb[2]) + 2), 1199 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1200 offsetof(struct __sk_buff, cb[3])), 1201 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1202 offsetof(struct __sk_buff, cb[3]) + 2), 1203 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1204 offsetof(struct __sk_buff, cb[4])), 1205 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1206 offsetof(struct __sk_buff, cb[4]) + 2), 1207 BPF_EXIT_INSN(), 1208 }, 1209 .result = ACCEPT, 1210 }, 1211 { 1212 "check cb access: half, unaligned", 1213 .insns = { 1214 BPF_MOV64_IMM(BPF_REG_0, 0), 1215 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1216 offsetof(struct __sk_buff, cb[0]) + 1), 1217 BPF_EXIT_INSN(), 1218 }, 1219 .errstr = "misaligned access", 1220 .result = REJECT, 1221 }, 1222 { 1223 "check __sk_buff->hash, offset 0, half store not permitted", 1224 .insns = { 1225 BPF_MOV64_IMM(BPF_REG_0, 0), 1226 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1227 offsetof(struct __sk_buff, hash)), 1228 BPF_EXIT_INSN(), 1229 }, 1230 .errstr = "invalid bpf_context access", 1231 .result = REJECT, 1232 }, 1233 { 1234 "check __sk_buff->tc_index, offset 2, half store not permitted", 1235 .insns = { 1236 BPF_MOV64_IMM(BPF_REG_0, 0), 1237 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1238 offsetof(struct __sk_buff, tc_index) + 2), 1239 BPF_EXIT_INSN(), 1240 }, 1241 .errstr = "invalid bpf_context access", 1242 .result = REJECT, 1243 }, 1244 { 1245 "check skb->hash half load permitted", 1246 .insns = { 1247 BPF_MOV64_IMM(BPF_REG_0, 0), 1248 #if __BYTE_ORDER == __LITTLE_ENDIAN 1249 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1250 offsetof(struct __sk_buff, hash)), 1251 #else 1252 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1253 offsetof(struct __sk_buff, hash) + 2), 1254 #endif 1255 BPF_EXIT_INSN(), 1256 }, 1257 .result = ACCEPT, 1258 }, 1259 { 1260 "check skb->hash half load not permitted", 1261 .insns = { 1262 BPF_MOV64_IMM(BPF_REG_0, 0), 1263 #if __BYTE_ORDER == __LITTLE_ENDIAN 1264 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1265 offsetof(struct __sk_buff, hash) + 2), 1266 #else 1267 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1268 offsetof(struct __sk_buff, hash)), 1269 #endif 1270 BPF_EXIT_INSN(), 1271 }, 1272 .errstr = "invalid bpf_context access", 1273 .result = REJECT, 1274 }, 1275 { 1276 "check cb access: half, wrong type", 1277 .insns = { 1278 BPF_MOV64_IMM(BPF_REG_0, 0), 1279 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1280 offsetof(struct __sk_buff, cb[0])), 1281 BPF_EXIT_INSN(), 1282 }, 1283 .errstr = "invalid bpf_context access", 1284 .result = REJECT, 1285 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1286 }, 1287 { 1288 "check cb access: word", 1289 .insns = { 1290 BPF_MOV64_IMM(BPF_REG_0, 0), 1291 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1292 offsetof(struct __sk_buff, cb[0])), 1293 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1294 offsetof(struct __sk_buff, cb[1])), 1295 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1296 offsetof(struct __sk_buff, cb[2])), 1297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1298 offsetof(struct __sk_buff, cb[3])), 1299 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1300 offsetof(struct __sk_buff, cb[4])), 1301 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1302 offsetof(struct __sk_buff, cb[0])), 1303 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1304 offsetof(struct __sk_buff, cb[1])), 1305 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1306 offsetof(struct __sk_buff, cb[2])), 1307 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1308 offsetof(struct __sk_buff, cb[3])), 1309 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1310 offsetof(struct __sk_buff, cb[4])), 1311 BPF_EXIT_INSN(), 1312 }, 1313 .result = ACCEPT, 1314 }, 1315 { 1316 "check cb access: word, unaligned 1", 1317 .insns = { 1318 BPF_MOV64_IMM(BPF_REG_0, 0), 1319 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1320 offsetof(struct __sk_buff, cb[0]) + 2), 1321 BPF_EXIT_INSN(), 1322 }, 1323 .errstr = "misaligned access", 1324 .result = REJECT, 1325 }, 1326 { 1327 "check cb access: word, unaligned 2", 1328 .insns = { 1329 BPF_MOV64_IMM(BPF_REG_0, 0), 1330 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1331 offsetof(struct __sk_buff, cb[4]) + 1), 1332 BPF_EXIT_INSN(), 1333 }, 1334 .errstr = "misaligned access", 1335 .result = REJECT, 1336 }, 1337 { 1338 "check cb access: word, unaligned 3", 1339 .insns = { 1340 BPF_MOV64_IMM(BPF_REG_0, 0), 1341 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1342 offsetof(struct __sk_buff, cb[4]) + 2), 1343 BPF_EXIT_INSN(), 1344 }, 1345 .errstr = "misaligned access", 1346 .result = REJECT, 1347 }, 1348 { 1349 "check cb access: word, unaligned 4", 1350 .insns = { 1351 BPF_MOV64_IMM(BPF_REG_0, 0), 1352 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1353 offsetof(struct __sk_buff, cb[4]) + 3), 1354 BPF_EXIT_INSN(), 1355 }, 1356 .errstr = "misaligned access", 1357 .result = REJECT, 1358 }, 1359 { 1360 "check cb access: double", 1361 .insns = { 1362 BPF_MOV64_IMM(BPF_REG_0, 0), 1363 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1364 offsetof(struct __sk_buff, cb[0])), 1365 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1366 offsetof(struct __sk_buff, cb[2])), 1367 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1368 offsetof(struct __sk_buff, cb[0])), 1369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1370 offsetof(struct __sk_buff, cb[2])), 1371 BPF_EXIT_INSN(), 1372 }, 1373 .result = ACCEPT, 1374 }, 1375 { 1376 "check cb access: double, unaligned 1", 1377 .insns = { 1378 BPF_MOV64_IMM(BPF_REG_0, 0), 1379 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1380 offsetof(struct __sk_buff, cb[1])), 1381 BPF_EXIT_INSN(), 1382 }, 1383 .errstr = "misaligned access", 1384 .result = REJECT, 1385 }, 1386 { 1387 "check cb access: double, unaligned 2", 1388 .insns = { 1389 BPF_MOV64_IMM(BPF_REG_0, 0), 1390 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1391 offsetof(struct __sk_buff, cb[3])), 1392 BPF_EXIT_INSN(), 1393 }, 1394 .errstr = "misaligned access", 1395 .result = REJECT, 1396 }, 1397 { 1398 "check cb access: double, oob 1", 1399 .insns = { 1400 BPF_MOV64_IMM(BPF_REG_0, 0), 1401 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1402 offsetof(struct __sk_buff, cb[4])), 1403 BPF_EXIT_INSN(), 1404 }, 1405 .errstr = "invalid bpf_context access", 1406 .result = REJECT, 1407 }, 1408 { 1409 "check cb access: double, oob 2", 1410 .insns = { 1411 BPF_MOV64_IMM(BPF_REG_0, 0), 1412 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1413 offsetof(struct __sk_buff, cb[4])), 1414 BPF_EXIT_INSN(), 1415 }, 1416 .errstr = "invalid bpf_context access", 1417 .result = REJECT, 1418 }, 1419 { 1420 "check __sk_buff->ifindex dw store not permitted", 1421 .insns = { 1422 BPF_MOV64_IMM(BPF_REG_0, 0), 1423 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1424 offsetof(struct __sk_buff, ifindex)), 1425 BPF_EXIT_INSN(), 1426 }, 1427 .errstr = "invalid bpf_context access", 1428 .result = REJECT, 1429 }, 1430 { 1431 "check __sk_buff->ifindex dw load not permitted", 1432 .insns = { 1433 BPF_MOV64_IMM(BPF_REG_0, 0), 1434 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1435 offsetof(struct __sk_buff, ifindex)), 1436 BPF_EXIT_INSN(), 1437 }, 1438 .errstr = "invalid bpf_context access", 1439 .result = REJECT, 1440 }, 1441 { 1442 "check cb access: double, wrong type", 1443 .insns = { 1444 BPF_MOV64_IMM(BPF_REG_0, 0), 1445 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1446 offsetof(struct __sk_buff, cb[0])), 1447 BPF_EXIT_INSN(), 1448 }, 1449 .errstr = "invalid bpf_context access", 1450 .result = REJECT, 1451 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1452 }, 1453 { 1454 "check out of range skb->cb access", 1455 .insns = { 1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1457 offsetof(struct __sk_buff, cb[0]) + 256), 1458 BPF_EXIT_INSN(), 1459 }, 1460 .errstr = "invalid bpf_context access", 1461 .errstr_unpriv = "", 1462 .result = REJECT, 1463 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 1464 }, 1465 { 1466 "write skb fields from socket prog", 1467 .insns = { 1468 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1469 offsetof(struct __sk_buff, cb[4])), 1470 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 1471 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1472 offsetof(struct __sk_buff, mark)), 1473 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1474 offsetof(struct __sk_buff, tc_index)), 1475 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 1476 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1477 offsetof(struct __sk_buff, cb[0])), 1478 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1479 offsetof(struct __sk_buff, cb[2])), 1480 BPF_EXIT_INSN(), 1481 }, 1482 .result = ACCEPT, 1483 .errstr_unpriv = "R1 leaks addr", 1484 .result_unpriv = REJECT, 1485 }, 1486 { 1487 "write skb fields from tc_cls_act prog", 1488 .insns = { 1489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1490 offsetof(struct __sk_buff, cb[0])), 1491 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1492 offsetof(struct __sk_buff, mark)), 1493 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1494 offsetof(struct __sk_buff, tc_index)), 1495 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1496 offsetof(struct __sk_buff, tc_index)), 1497 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1498 offsetof(struct __sk_buff, cb[3])), 1499 BPF_EXIT_INSN(), 1500 }, 1501 .errstr_unpriv = "", 1502 .result_unpriv = REJECT, 1503 .result = ACCEPT, 1504 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1505 }, 1506 { 1507 "PTR_TO_STACK store/load", 1508 .insns = { 1509 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), 1511 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), 1512 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), 1513 BPF_EXIT_INSN(), 1514 }, 1515 .result = ACCEPT, 1516 }, 1517 { 1518 "PTR_TO_STACK store/load - bad alignment on off", 1519 .insns = { 1520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1522 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), 1523 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), 1524 BPF_EXIT_INSN(), 1525 }, 1526 .result = REJECT, 1527 .errstr = "misaligned access off -6 size 8", 1528 }, 1529 { 1530 "PTR_TO_STACK store/load - bad alignment on reg", 1531 .insns = { 1532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), 1534 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1535 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1536 BPF_EXIT_INSN(), 1537 }, 1538 .result = REJECT, 1539 .errstr = "misaligned access off -2 size 8", 1540 }, 1541 { 1542 "PTR_TO_STACK store/load - out of bounds low", 1543 .insns = { 1544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000), 1546 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1547 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1548 BPF_EXIT_INSN(), 1549 }, 1550 .result = REJECT, 1551 .errstr = "invalid stack off=-79992 size=8", 1552 }, 1553 { 1554 "PTR_TO_STACK store/load - out of bounds high", 1555 .insns = { 1556 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1558 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1559 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1560 BPF_EXIT_INSN(), 1561 }, 1562 .result = REJECT, 1563 .errstr = "invalid stack off=0 size=8", 1564 }, 1565 { 1566 "unpriv: return pointer", 1567 .insns = { 1568 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 1569 BPF_EXIT_INSN(), 1570 }, 1571 .result = ACCEPT, 1572 .result_unpriv = REJECT, 1573 .errstr_unpriv = "R0 leaks addr", 1574 }, 1575 { 1576 "unpriv: add const to pointer", 1577 .insns = { 1578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 1579 BPF_MOV64_IMM(BPF_REG_0, 0), 1580 BPF_EXIT_INSN(), 1581 }, 1582 .result = ACCEPT, 1583 .result_unpriv = REJECT, 1584 .errstr_unpriv = "R1 pointer arithmetic", 1585 }, 1586 { 1587 "unpriv: add pointer to pointer", 1588 .insns = { 1589 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), 1590 BPF_MOV64_IMM(BPF_REG_0, 0), 1591 BPF_EXIT_INSN(), 1592 }, 1593 .result = ACCEPT, 1594 .result_unpriv = REJECT, 1595 .errstr_unpriv = "R1 pointer arithmetic", 1596 }, 1597 { 1598 "unpriv: neg pointer", 1599 .insns = { 1600 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 1601 BPF_MOV64_IMM(BPF_REG_0, 0), 1602 BPF_EXIT_INSN(), 1603 }, 1604 .result = ACCEPT, 1605 .result_unpriv = REJECT, 1606 .errstr_unpriv = "R1 pointer arithmetic", 1607 }, 1608 { 1609 "unpriv: cmp pointer with const", 1610 .insns = { 1611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 1612 BPF_MOV64_IMM(BPF_REG_0, 0), 1613 BPF_EXIT_INSN(), 1614 }, 1615 .result = ACCEPT, 1616 .result_unpriv = REJECT, 1617 .errstr_unpriv = "R1 pointer comparison", 1618 }, 1619 { 1620 "unpriv: cmp pointer with pointer", 1621 .insns = { 1622 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 1623 BPF_MOV64_IMM(BPF_REG_0, 0), 1624 BPF_EXIT_INSN(), 1625 }, 1626 .result = ACCEPT, 1627 .result_unpriv = REJECT, 1628 .errstr_unpriv = "R10 pointer comparison", 1629 }, 1630 { 1631 "unpriv: check that printk is disallowed", 1632 .insns = { 1633 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1634 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1636 BPF_MOV64_IMM(BPF_REG_2, 8), 1637 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 1638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1639 BPF_FUNC_trace_printk), 1640 BPF_MOV64_IMM(BPF_REG_0, 0), 1641 BPF_EXIT_INSN(), 1642 }, 1643 .errstr_unpriv = "unknown func bpf_trace_printk#6", 1644 .result_unpriv = REJECT, 1645 .result = ACCEPT, 1646 }, 1647 { 1648 "unpriv: pass pointer to helper function", 1649 .insns = { 1650 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1653 BPF_LD_MAP_FD(BPF_REG_1, 0), 1654 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 1655 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 1656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1657 BPF_FUNC_map_update_elem), 1658 BPF_MOV64_IMM(BPF_REG_0, 0), 1659 BPF_EXIT_INSN(), 1660 }, 1661 .fixup_map1 = { 3 }, 1662 .errstr_unpriv = "R4 leaks addr", 1663 .result_unpriv = REJECT, 1664 .result = ACCEPT, 1665 }, 1666 { 1667 "unpriv: indirectly pass pointer on stack to helper function", 1668 .insns = { 1669 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1670 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1672 BPF_LD_MAP_FD(BPF_REG_1, 0), 1673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1674 BPF_FUNC_map_lookup_elem), 1675 BPF_MOV64_IMM(BPF_REG_0, 0), 1676 BPF_EXIT_INSN(), 1677 }, 1678 .fixup_map1 = { 3 }, 1679 .errstr = "invalid indirect read from stack off -8+0 size 8", 1680 .result = REJECT, 1681 }, 1682 { 1683 "unpriv: mangle pointer on stack 1", 1684 .insns = { 1685 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1686 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), 1687 BPF_MOV64_IMM(BPF_REG_0, 0), 1688 BPF_EXIT_INSN(), 1689 }, 1690 .errstr_unpriv = "attempt to corrupt spilled", 1691 .result_unpriv = REJECT, 1692 .result = ACCEPT, 1693 }, 1694 { 1695 "unpriv: mangle pointer on stack 2", 1696 .insns = { 1697 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1698 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), 1699 BPF_MOV64_IMM(BPF_REG_0, 0), 1700 BPF_EXIT_INSN(), 1701 }, 1702 .errstr_unpriv = "attempt to corrupt spilled", 1703 .result_unpriv = REJECT, 1704 .result = ACCEPT, 1705 }, 1706 { 1707 "unpriv: read pointer from stack in small chunks", 1708 .insns = { 1709 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1710 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), 1711 BPF_MOV64_IMM(BPF_REG_0, 0), 1712 BPF_EXIT_INSN(), 1713 }, 1714 .errstr = "invalid size", 1715 .result = REJECT, 1716 }, 1717 { 1718 "unpriv: write pointer into ctx", 1719 .insns = { 1720 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 1721 BPF_MOV64_IMM(BPF_REG_0, 0), 1722 BPF_EXIT_INSN(), 1723 }, 1724 .errstr_unpriv = "R1 leaks addr", 1725 .result_unpriv = REJECT, 1726 .errstr = "invalid bpf_context access", 1727 .result = REJECT, 1728 }, 1729 { 1730 "unpriv: spill/fill of ctx", 1731 .insns = { 1732 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1734 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1735 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1736 BPF_MOV64_IMM(BPF_REG_0, 0), 1737 BPF_EXIT_INSN(), 1738 }, 1739 .result = ACCEPT, 1740 }, 1741 { 1742 "unpriv: spill/fill of ctx 2", 1743 .insns = { 1744 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1746 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1747 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1748 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1749 BPF_FUNC_get_hash_recalc), 1750 BPF_EXIT_INSN(), 1751 }, 1752 .result = ACCEPT, 1753 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1754 }, 1755 { 1756 "unpriv: spill/fill of ctx 3", 1757 .insns = { 1758 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1760 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1761 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), 1762 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1764 BPF_FUNC_get_hash_recalc), 1765 BPF_EXIT_INSN(), 1766 }, 1767 .result = REJECT, 1768 .errstr = "R1 type=fp expected=ctx", 1769 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1770 }, 1771 { 1772 "unpriv: spill/fill of ctx 4", 1773 .insns = { 1774 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1776 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1777 BPF_MOV64_IMM(BPF_REG_0, 1), 1778 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, 1779 BPF_REG_0, -8, 0), 1780 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1782 BPF_FUNC_get_hash_recalc), 1783 BPF_EXIT_INSN(), 1784 }, 1785 .result = REJECT, 1786 .errstr = "R1 type=inv expected=ctx", 1787 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1788 }, 1789 { 1790 "unpriv: spill/fill of different pointers stx", 1791 .insns = { 1792 BPF_MOV64_IMM(BPF_REG_3, 42), 1793 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 1796 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1798 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), 1799 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 1800 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1801 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1802 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, 1803 offsetof(struct __sk_buff, mark)), 1804 BPF_MOV64_IMM(BPF_REG_0, 0), 1805 BPF_EXIT_INSN(), 1806 }, 1807 .result = REJECT, 1808 .errstr = "same insn cannot be used with different pointers", 1809 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1810 }, 1811 { 1812 "unpriv: spill/fill of different pointers ldx", 1813 .insns = { 1814 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 1817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1819 -(__s32)offsetof(struct bpf_perf_event_data, 1820 sample_period) - 8), 1821 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), 1822 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 1823 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 1824 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 1825 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 1826 offsetof(struct bpf_perf_event_data, 1827 sample_period)), 1828 BPF_MOV64_IMM(BPF_REG_0, 0), 1829 BPF_EXIT_INSN(), 1830 }, 1831 .result = REJECT, 1832 .errstr = "same insn cannot be used with different pointers", 1833 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 1834 }, 1835 { 1836 "unpriv: write pointer into map elem value", 1837 .insns = { 1838 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1841 BPF_LD_MAP_FD(BPF_REG_1, 0), 1842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1843 BPF_FUNC_map_lookup_elem), 1844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1845 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 1846 BPF_EXIT_INSN(), 1847 }, 1848 .fixup_map1 = { 3 }, 1849 .errstr_unpriv = "R0 leaks addr", 1850 .result_unpriv = REJECT, 1851 .result = ACCEPT, 1852 }, 1853 { 1854 "unpriv: partial copy of pointer", 1855 .insns = { 1856 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), 1857 BPF_MOV64_IMM(BPF_REG_0, 0), 1858 BPF_EXIT_INSN(), 1859 }, 1860 .errstr_unpriv = "R10 partial copy", 1861 .result_unpriv = REJECT, 1862 .result = ACCEPT, 1863 }, 1864 { 1865 "unpriv: pass pointer to tail_call", 1866 .insns = { 1867 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 1868 BPF_LD_MAP_FD(BPF_REG_2, 0), 1869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1870 BPF_FUNC_tail_call), 1871 BPF_MOV64_IMM(BPF_REG_0, 0), 1872 BPF_EXIT_INSN(), 1873 }, 1874 .fixup_prog = { 1 }, 1875 .errstr_unpriv = "R3 leaks addr into helper", 1876 .result_unpriv = REJECT, 1877 .result = ACCEPT, 1878 }, 1879 { 1880 "unpriv: cmp map pointer with zero", 1881 .insns = { 1882 BPF_MOV64_IMM(BPF_REG_1, 0), 1883 BPF_LD_MAP_FD(BPF_REG_1, 0), 1884 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 1885 BPF_MOV64_IMM(BPF_REG_0, 0), 1886 BPF_EXIT_INSN(), 1887 }, 1888 .fixup_map1 = { 1 }, 1889 .errstr_unpriv = "R1 pointer comparison", 1890 .result_unpriv = REJECT, 1891 .result = ACCEPT, 1892 }, 1893 { 1894 "unpriv: write into frame pointer", 1895 .insns = { 1896 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), 1897 BPF_MOV64_IMM(BPF_REG_0, 0), 1898 BPF_EXIT_INSN(), 1899 }, 1900 .errstr = "frame pointer is read only", 1901 .result = REJECT, 1902 }, 1903 { 1904 "unpriv: spill/fill frame pointer", 1905 .insns = { 1906 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1908 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), 1909 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), 1910 BPF_MOV64_IMM(BPF_REG_0, 0), 1911 BPF_EXIT_INSN(), 1912 }, 1913 .errstr = "frame pointer is read only", 1914 .result = REJECT, 1915 }, 1916 { 1917 "unpriv: cmp of frame pointer", 1918 .insns = { 1919 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), 1920 BPF_MOV64_IMM(BPF_REG_0, 0), 1921 BPF_EXIT_INSN(), 1922 }, 1923 .errstr_unpriv = "R10 pointer comparison", 1924 .result_unpriv = REJECT, 1925 .result = ACCEPT, 1926 }, 1927 { 1928 "unpriv: adding of fp", 1929 .insns = { 1930 BPF_MOV64_IMM(BPF_REG_0, 0), 1931 BPF_MOV64_IMM(BPF_REG_1, 0), 1932 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), 1933 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), 1934 BPF_EXIT_INSN(), 1935 }, 1936 .errstr_unpriv = "pointer arithmetic prohibited", 1937 .result_unpriv = REJECT, 1938 .errstr = "R1 invalid mem access", 1939 .result = REJECT, 1940 }, 1941 { 1942 "unpriv: cmp of stack pointer", 1943 .insns = { 1944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1946 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), 1947 BPF_MOV64_IMM(BPF_REG_0, 0), 1948 BPF_EXIT_INSN(), 1949 }, 1950 .errstr_unpriv = "R2 pointer comparison", 1951 .result_unpriv = REJECT, 1952 .result = ACCEPT, 1953 }, 1954 { 1955 "stack pointer arithmetic", 1956 .insns = { 1957 BPF_MOV64_IMM(BPF_REG_1, 4), 1958 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 1959 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), 1961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), 1962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 1963 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), 1964 BPF_ST_MEM(0, BPF_REG_2, 4, 0), 1965 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 1966 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 1967 BPF_ST_MEM(0, BPF_REG_2, 4, 0), 1968 BPF_MOV64_IMM(BPF_REG_0, 0), 1969 BPF_EXIT_INSN(), 1970 }, 1971 .result = ACCEPT, 1972 }, 1973 { 1974 "raw_stack: no skb_load_bytes", 1975 .insns = { 1976 BPF_MOV64_IMM(BPF_REG_2, 4), 1977 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1979 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1980 BPF_MOV64_IMM(BPF_REG_4, 8), 1981 /* Call to skb_load_bytes() omitted. */ 1982 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1983 BPF_EXIT_INSN(), 1984 }, 1985 .result = REJECT, 1986 .errstr = "invalid read from stack off -8+0 size 8", 1987 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1988 }, 1989 { 1990 "raw_stack: skb_load_bytes, negative len", 1991 .insns = { 1992 BPF_MOV64_IMM(BPF_REG_2, 4), 1993 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 1994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 1995 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 1996 BPF_MOV64_IMM(BPF_REG_4, -8), 1997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1998 BPF_FUNC_skb_load_bytes), 1999 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2000 BPF_EXIT_INSN(), 2001 }, 2002 .result = REJECT, 2003 .errstr = "invalid stack type R3", 2004 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2005 }, 2006 { 2007 "raw_stack: skb_load_bytes, negative len 2", 2008 .insns = { 2009 BPF_MOV64_IMM(BPF_REG_2, 4), 2010 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2012 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2013 BPF_MOV64_IMM(BPF_REG_4, ~0), 2014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2015 BPF_FUNC_skb_load_bytes), 2016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2017 BPF_EXIT_INSN(), 2018 }, 2019 .result = REJECT, 2020 .errstr = "invalid stack type R3", 2021 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2022 }, 2023 { 2024 "raw_stack: skb_load_bytes, zero len", 2025 .insns = { 2026 BPF_MOV64_IMM(BPF_REG_2, 4), 2027 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2029 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2030 BPF_MOV64_IMM(BPF_REG_4, 0), 2031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2032 BPF_FUNC_skb_load_bytes), 2033 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2034 BPF_EXIT_INSN(), 2035 }, 2036 .result = REJECT, 2037 .errstr = "invalid stack type R3", 2038 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2039 }, 2040 { 2041 "raw_stack: skb_load_bytes, no init", 2042 .insns = { 2043 BPF_MOV64_IMM(BPF_REG_2, 4), 2044 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2046 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2047 BPF_MOV64_IMM(BPF_REG_4, 8), 2048 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2049 BPF_FUNC_skb_load_bytes), 2050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2051 BPF_EXIT_INSN(), 2052 }, 2053 .result = ACCEPT, 2054 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2055 }, 2056 { 2057 "raw_stack: skb_load_bytes, init", 2058 .insns = { 2059 BPF_MOV64_IMM(BPF_REG_2, 4), 2060 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2062 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), 2063 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2064 BPF_MOV64_IMM(BPF_REG_4, 8), 2065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2066 BPF_FUNC_skb_load_bytes), 2067 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2068 BPF_EXIT_INSN(), 2069 }, 2070 .result = ACCEPT, 2071 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2072 }, 2073 { 2074 "raw_stack: skb_load_bytes, spilled regs around bounds", 2075 .insns = { 2076 BPF_MOV64_IMM(BPF_REG_2, 4), 2077 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2079 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2080 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2081 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2082 BPF_MOV64_IMM(BPF_REG_4, 8), 2083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2084 BPF_FUNC_skb_load_bytes), 2085 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2086 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2087 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2088 offsetof(struct __sk_buff, mark)), 2089 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2090 offsetof(struct __sk_buff, priority)), 2091 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2092 BPF_EXIT_INSN(), 2093 }, 2094 .result = ACCEPT, 2095 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2096 }, 2097 { 2098 "raw_stack: skb_load_bytes, spilled regs corruption", 2099 .insns = { 2100 BPF_MOV64_IMM(BPF_REG_2, 4), 2101 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2103 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2104 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2105 BPF_MOV64_IMM(BPF_REG_4, 8), 2106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2107 BPF_FUNC_skb_load_bytes), 2108 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2110 offsetof(struct __sk_buff, mark)), 2111 BPF_EXIT_INSN(), 2112 }, 2113 .result = REJECT, 2114 .errstr = "R0 invalid mem access 'inv'", 2115 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2116 }, 2117 { 2118 "raw_stack: skb_load_bytes, spilled regs corruption 2", 2119 .insns = { 2120 BPF_MOV64_IMM(BPF_REG_2, 4), 2121 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2123 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2124 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2125 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2126 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2127 BPF_MOV64_IMM(BPF_REG_4, 8), 2128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2129 BPF_FUNC_skb_load_bytes), 2130 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2131 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2132 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 2133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2134 offsetof(struct __sk_buff, mark)), 2135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2136 offsetof(struct __sk_buff, priority)), 2137 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2138 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, 2139 offsetof(struct __sk_buff, pkt_type)), 2140 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 2141 BPF_EXIT_INSN(), 2142 }, 2143 .result = REJECT, 2144 .errstr = "R3 invalid mem access 'inv'", 2145 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2146 }, 2147 { 2148 "raw_stack: skb_load_bytes, spilled regs + data", 2149 .insns = { 2150 BPF_MOV64_IMM(BPF_REG_2, 4), 2151 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2153 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2154 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2155 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2156 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2157 BPF_MOV64_IMM(BPF_REG_4, 8), 2158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2159 BPF_FUNC_skb_load_bytes), 2160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2161 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2162 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 2163 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2164 offsetof(struct __sk_buff, mark)), 2165 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2166 offsetof(struct __sk_buff, priority)), 2167 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2168 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 2169 BPF_EXIT_INSN(), 2170 }, 2171 .result = ACCEPT, 2172 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2173 }, 2174 { 2175 "raw_stack: skb_load_bytes, invalid access 1", 2176 .insns = { 2177 BPF_MOV64_IMM(BPF_REG_2, 4), 2178 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), 2180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2181 BPF_MOV64_IMM(BPF_REG_4, 8), 2182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2183 BPF_FUNC_skb_load_bytes), 2184 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2185 BPF_EXIT_INSN(), 2186 }, 2187 .result = REJECT, 2188 .errstr = "invalid stack type R3 off=-513 access_size=8", 2189 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2190 }, 2191 { 2192 "raw_stack: skb_load_bytes, invalid access 2", 2193 .insns = { 2194 BPF_MOV64_IMM(BPF_REG_2, 4), 2195 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 2197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2198 BPF_MOV64_IMM(BPF_REG_4, 8), 2199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2200 BPF_FUNC_skb_load_bytes), 2201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2202 BPF_EXIT_INSN(), 2203 }, 2204 .result = REJECT, 2205 .errstr = "invalid stack type R3 off=-1 access_size=8", 2206 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2207 }, 2208 { 2209 "raw_stack: skb_load_bytes, invalid access 3", 2210 .insns = { 2211 BPF_MOV64_IMM(BPF_REG_2, 4), 2212 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), 2214 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2215 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 2216 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2217 BPF_FUNC_skb_load_bytes), 2218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2219 BPF_EXIT_INSN(), 2220 }, 2221 .result = REJECT, 2222 .errstr = "invalid stack type R3 off=-1 access_size=-1", 2223 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2224 }, 2225 { 2226 "raw_stack: skb_load_bytes, invalid access 4", 2227 .insns = { 2228 BPF_MOV64_IMM(BPF_REG_2, 4), 2229 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 2231 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2232 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 2233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2234 BPF_FUNC_skb_load_bytes), 2235 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2236 BPF_EXIT_INSN(), 2237 }, 2238 .result = REJECT, 2239 .errstr = "invalid stack type R3 off=-1 access_size=2147483647", 2240 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2241 }, 2242 { 2243 "raw_stack: skb_load_bytes, invalid access 5", 2244 .insns = { 2245 BPF_MOV64_IMM(BPF_REG_2, 4), 2246 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2248 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2249 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 2250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2251 BPF_FUNC_skb_load_bytes), 2252 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2253 BPF_EXIT_INSN(), 2254 }, 2255 .result = REJECT, 2256 .errstr = "invalid stack type R3 off=-512 access_size=2147483647", 2257 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2258 }, 2259 { 2260 "raw_stack: skb_load_bytes, invalid access 6", 2261 .insns = { 2262 BPF_MOV64_IMM(BPF_REG_2, 4), 2263 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2265 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2266 BPF_MOV64_IMM(BPF_REG_4, 0), 2267 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2268 BPF_FUNC_skb_load_bytes), 2269 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2270 BPF_EXIT_INSN(), 2271 }, 2272 .result = REJECT, 2273 .errstr = "invalid stack type R3 off=-512 access_size=0", 2274 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2275 }, 2276 { 2277 "raw_stack: skb_load_bytes, large access", 2278 .insns = { 2279 BPF_MOV64_IMM(BPF_REG_2, 4), 2280 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2282 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2283 BPF_MOV64_IMM(BPF_REG_4, 512), 2284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2285 BPF_FUNC_skb_load_bytes), 2286 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2287 BPF_EXIT_INSN(), 2288 }, 2289 .result = ACCEPT, 2290 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2291 }, 2292 { 2293 "direct packet access: test1", 2294 .insns = { 2295 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2296 offsetof(struct __sk_buff, data)), 2297 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2298 offsetof(struct __sk_buff, data_end)), 2299 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2301 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2302 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2303 BPF_MOV64_IMM(BPF_REG_0, 0), 2304 BPF_EXIT_INSN(), 2305 }, 2306 .result = ACCEPT, 2307 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2308 }, 2309 { 2310 "direct packet access: test2", 2311 .insns = { 2312 BPF_MOV64_IMM(BPF_REG_0, 1), 2313 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 2314 offsetof(struct __sk_buff, data_end)), 2315 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2316 offsetof(struct __sk_buff, data)), 2317 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 2319 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), 2320 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), 2321 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), 2322 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), 2323 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2324 offsetof(struct __sk_buff, data)), 2325 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2327 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48), 2328 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48), 2329 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2330 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), 2331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 2332 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 2333 offsetof(struct __sk_buff, data_end)), 2334 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 2335 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), 2336 BPF_MOV64_IMM(BPF_REG_0, 0), 2337 BPF_EXIT_INSN(), 2338 }, 2339 .result = ACCEPT, 2340 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2341 }, 2342 { 2343 "direct packet access: test3", 2344 .insns = { 2345 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2346 offsetof(struct __sk_buff, data)), 2347 BPF_MOV64_IMM(BPF_REG_0, 0), 2348 BPF_EXIT_INSN(), 2349 }, 2350 .errstr = "invalid bpf_context access off=76", 2351 .result = REJECT, 2352 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2353 }, 2354 { 2355 "direct packet access: test4 (write)", 2356 .insns = { 2357 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2358 offsetof(struct __sk_buff, data)), 2359 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2360 offsetof(struct __sk_buff, data_end)), 2361 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2363 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2364 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2365 BPF_MOV64_IMM(BPF_REG_0, 0), 2366 BPF_EXIT_INSN(), 2367 }, 2368 .result = ACCEPT, 2369 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2370 }, 2371 { 2372 "direct packet access: test5 (pkt_end >= reg, good access)", 2373 .insns = { 2374 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2375 offsetof(struct __sk_buff, data)), 2376 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2377 offsetof(struct __sk_buff, data_end)), 2378 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2380 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), 2381 BPF_MOV64_IMM(BPF_REG_0, 1), 2382 BPF_EXIT_INSN(), 2383 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2384 BPF_MOV64_IMM(BPF_REG_0, 0), 2385 BPF_EXIT_INSN(), 2386 }, 2387 .result = ACCEPT, 2388 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2389 }, 2390 { 2391 "direct packet access: test6 (pkt_end >= reg, bad access)", 2392 .insns = { 2393 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2394 offsetof(struct __sk_buff, data)), 2395 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2396 offsetof(struct __sk_buff, data_end)), 2397 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2399 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), 2400 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2401 BPF_MOV64_IMM(BPF_REG_0, 1), 2402 BPF_EXIT_INSN(), 2403 BPF_MOV64_IMM(BPF_REG_0, 0), 2404 BPF_EXIT_INSN(), 2405 }, 2406 .errstr = "invalid access to packet", 2407 .result = REJECT, 2408 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2409 }, 2410 { 2411 "direct packet access: test7 (pkt_end >= reg, both accesses)", 2412 .insns = { 2413 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2414 offsetof(struct __sk_buff, data)), 2415 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2416 offsetof(struct __sk_buff, data_end)), 2417 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2419 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), 2420 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2421 BPF_MOV64_IMM(BPF_REG_0, 1), 2422 BPF_EXIT_INSN(), 2423 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2424 BPF_MOV64_IMM(BPF_REG_0, 0), 2425 BPF_EXIT_INSN(), 2426 }, 2427 .errstr = "invalid access to packet", 2428 .result = REJECT, 2429 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2430 }, 2431 { 2432 "direct packet access: test8 (double test, variant 1)", 2433 .insns = { 2434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2435 offsetof(struct __sk_buff, data)), 2436 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2437 offsetof(struct __sk_buff, data_end)), 2438 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2440 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), 2441 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2442 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2443 BPF_MOV64_IMM(BPF_REG_0, 1), 2444 BPF_EXIT_INSN(), 2445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2446 BPF_MOV64_IMM(BPF_REG_0, 0), 2447 BPF_EXIT_INSN(), 2448 }, 2449 .result = ACCEPT, 2450 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2451 }, 2452 { 2453 "direct packet access: test9 (double test, variant 2)", 2454 .insns = { 2455 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2456 offsetof(struct __sk_buff, data)), 2457 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2458 offsetof(struct __sk_buff, data_end)), 2459 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2461 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), 2462 BPF_MOV64_IMM(BPF_REG_0, 1), 2463 BPF_EXIT_INSN(), 2464 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2466 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2467 BPF_MOV64_IMM(BPF_REG_0, 0), 2468 BPF_EXIT_INSN(), 2469 }, 2470 .result = ACCEPT, 2471 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2472 }, 2473 { 2474 "direct packet access: test10 (write invalid)", 2475 .insns = { 2476 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2477 offsetof(struct __sk_buff, data)), 2478 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2479 offsetof(struct __sk_buff, data_end)), 2480 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2482 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 2483 BPF_MOV64_IMM(BPF_REG_0, 0), 2484 BPF_EXIT_INSN(), 2485 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2486 BPF_MOV64_IMM(BPF_REG_0, 0), 2487 BPF_EXIT_INSN(), 2488 }, 2489 .errstr = "invalid access to packet", 2490 .result = REJECT, 2491 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2492 }, 2493 { 2494 "direct packet access: test11 (shift, good access)", 2495 .insns = { 2496 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2497 offsetof(struct __sk_buff, data)), 2498 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2499 offsetof(struct __sk_buff, data_end)), 2500 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2502 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2503 BPF_MOV64_IMM(BPF_REG_3, 144), 2504 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2506 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), 2507 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2508 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2509 BPF_MOV64_IMM(BPF_REG_0, 1), 2510 BPF_EXIT_INSN(), 2511 BPF_MOV64_IMM(BPF_REG_0, 0), 2512 BPF_EXIT_INSN(), 2513 }, 2514 .result = ACCEPT, 2515 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2516 }, 2517 { 2518 "direct packet access: test12 (and, good access)", 2519 .insns = { 2520 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2521 offsetof(struct __sk_buff, data)), 2522 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2523 offsetof(struct __sk_buff, data_end)), 2524 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2526 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2527 BPF_MOV64_IMM(BPF_REG_3, 144), 2528 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2530 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), 2531 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2532 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2533 BPF_MOV64_IMM(BPF_REG_0, 1), 2534 BPF_EXIT_INSN(), 2535 BPF_MOV64_IMM(BPF_REG_0, 0), 2536 BPF_EXIT_INSN(), 2537 }, 2538 .result = ACCEPT, 2539 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2540 }, 2541 { 2542 "direct packet access: test13 (branches, good access)", 2543 .insns = { 2544 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2545 offsetof(struct __sk_buff, data)), 2546 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2547 offsetof(struct __sk_buff, data_end)), 2548 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2550 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), 2551 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2552 offsetof(struct __sk_buff, mark)), 2553 BPF_MOV64_IMM(BPF_REG_4, 1), 2554 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), 2555 BPF_MOV64_IMM(BPF_REG_3, 14), 2556 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 2557 BPF_MOV64_IMM(BPF_REG_3, 24), 2558 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2560 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), 2561 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2562 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2563 BPF_MOV64_IMM(BPF_REG_0, 1), 2564 BPF_EXIT_INSN(), 2565 BPF_MOV64_IMM(BPF_REG_0, 0), 2566 BPF_EXIT_INSN(), 2567 }, 2568 .result = ACCEPT, 2569 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2570 }, 2571 { 2572 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", 2573 .insns = { 2574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2575 offsetof(struct __sk_buff, data)), 2576 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2577 offsetof(struct __sk_buff, data_end)), 2578 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2580 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), 2581 BPF_MOV64_IMM(BPF_REG_5, 12), 2582 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), 2583 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2584 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2585 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), 2586 BPF_MOV64_IMM(BPF_REG_0, 1), 2587 BPF_EXIT_INSN(), 2588 BPF_MOV64_IMM(BPF_REG_0, 0), 2589 BPF_EXIT_INSN(), 2590 }, 2591 .result = ACCEPT, 2592 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2593 }, 2594 { 2595 "direct packet access: test15 (spill with xadd)", 2596 .insns = { 2597 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2598 offsetof(struct __sk_buff, data)), 2599 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2600 offsetof(struct __sk_buff, data_end)), 2601 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2603 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2604 BPF_MOV64_IMM(BPF_REG_5, 4096), 2605 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2607 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2608 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), 2609 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 2610 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), 2611 BPF_MOV64_IMM(BPF_REG_0, 0), 2612 BPF_EXIT_INSN(), 2613 }, 2614 .errstr = "R2 invalid mem access 'inv'", 2615 .result = REJECT, 2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2617 }, 2618 { 2619 "direct packet access: test16 (arith on data_end)", 2620 .insns = { 2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2622 offsetof(struct __sk_buff, data)), 2623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2624 offsetof(struct __sk_buff, data_end)), 2625 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), 2628 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2629 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2630 BPF_MOV64_IMM(BPF_REG_0, 0), 2631 BPF_EXIT_INSN(), 2632 }, 2633 .errstr = "invalid access to packet", 2634 .result = REJECT, 2635 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2636 }, 2637 { 2638 "direct packet access: test17 (pruning, alignment)", 2639 .insns = { 2640 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2641 offsetof(struct __sk_buff, data)), 2642 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2643 offsetof(struct __sk_buff, data_end)), 2644 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 2645 offsetof(struct __sk_buff, mark)), 2646 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), 2648 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), 2649 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2650 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), 2651 BPF_MOV64_IMM(BPF_REG_0, 0), 2652 BPF_EXIT_INSN(), 2653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), 2654 BPF_JMP_A(-6), 2655 }, 2656 .errstr = "misaligned packet access off 2+15+-4 size 4", 2657 .result = REJECT, 2658 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2659 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 2660 }, 2661 { 2662 "direct packet access: test18 (imm += pkt_ptr, 1)", 2663 .insns = { 2664 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2665 offsetof(struct __sk_buff, data)), 2666 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2667 offsetof(struct __sk_buff, data_end)), 2668 BPF_MOV64_IMM(BPF_REG_0, 8), 2669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2670 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2671 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2672 BPF_MOV64_IMM(BPF_REG_0, 0), 2673 BPF_EXIT_INSN(), 2674 }, 2675 .result = ACCEPT, 2676 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2677 }, 2678 { 2679 "direct packet access: test19 (imm += pkt_ptr, 2)", 2680 .insns = { 2681 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2682 offsetof(struct __sk_buff, data)), 2683 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2684 offsetof(struct __sk_buff, data_end)), 2685 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2687 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2688 BPF_MOV64_IMM(BPF_REG_4, 4), 2689 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2690 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), 2691 BPF_MOV64_IMM(BPF_REG_0, 0), 2692 BPF_EXIT_INSN(), 2693 }, 2694 .result = ACCEPT, 2695 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2696 }, 2697 { 2698 "direct packet access: test20 (x += pkt_ptr, 1)", 2699 .insns = { 2700 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2701 offsetof(struct __sk_buff, data)), 2702 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2703 offsetof(struct __sk_buff, data_end)), 2704 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 2705 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2706 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2707 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), 2708 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 2709 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2710 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 2711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1), 2712 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 2713 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), 2714 BPF_MOV64_IMM(BPF_REG_0, 0), 2715 BPF_EXIT_INSN(), 2716 }, 2717 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2718 .result = ACCEPT, 2719 }, 2720 { 2721 "direct packet access: test21 (x += pkt_ptr, 2)", 2722 .insns = { 2723 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2724 offsetof(struct __sk_buff, data)), 2725 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2726 offsetof(struct __sk_buff, data_end)), 2727 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2729 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), 2730 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 2731 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), 2732 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2733 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff), 2734 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2735 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 2736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1), 2737 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 2738 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), 2739 BPF_MOV64_IMM(BPF_REG_0, 0), 2740 BPF_EXIT_INSN(), 2741 }, 2742 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2743 .result = ACCEPT, 2744 }, 2745 { 2746 "direct packet access: test22 (x += pkt_ptr, 3)", 2747 .insns = { 2748 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2749 offsetof(struct __sk_buff, data)), 2750 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2751 offsetof(struct __sk_buff, data_end)), 2752 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2754 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), 2755 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), 2756 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), 2757 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), 2758 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 2759 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 2760 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), 2761 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 2762 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48), 2763 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2764 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 2765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 2766 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 2767 BPF_MOV64_IMM(BPF_REG_2, 1), 2768 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), 2769 BPF_MOV64_IMM(BPF_REG_0, 0), 2770 BPF_EXIT_INSN(), 2771 }, 2772 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2773 .result = ACCEPT, 2774 }, 2775 { 2776 "direct packet access: test23 (x += pkt_ptr, 4)", 2777 .insns = { 2778 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2779 offsetof(struct __sk_buff, data)), 2780 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2781 offsetof(struct __sk_buff, data_end)), 2782 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 2783 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2784 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2785 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), 2786 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 2787 BPF_MOV64_IMM(BPF_REG_0, 31), 2788 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 2789 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2790 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), 2791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), 2792 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2793 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), 2794 BPF_MOV64_IMM(BPF_REG_0, 0), 2795 BPF_EXIT_INSN(), 2796 }, 2797 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2798 .result = REJECT, 2799 .errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet", 2800 }, 2801 { 2802 "direct packet access: test24 (x += pkt_ptr, 5)", 2803 .insns = { 2804 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2805 offsetof(struct __sk_buff, data)), 2806 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2807 offsetof(struct __sk_buff, data_end)), 2808 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 2809 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2810 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2811 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), 2812 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 2813 BPF_MOV64_IMM(BPF_REG_0, 64), 2814 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 2815 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2816 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), 2817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), 2818 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2819 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), 2820 BPF_MOV64_IMM(BPF_REG_0, 0), 2821 BPF_EXIT_INSN(), 2822 }, 2823 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2824 .result = ACCEPT, 2825 }, 2826 { 2827 "helper access to packet: test1, valid packet_ptr range", 2828 .insns = { 2829 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2830 offsetof(struct xdp_md, data)), 2831 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2832 offsetof(struct xdp_md, data_end)), 2833 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 2834 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 2835 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 2836 BPF_LD_MAP_FD(BPF_REG_1, 0), 2837 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 2838 BPF_MOV64_IMM(BPF_REG_4, 0), 2839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2840 BPF_FUNC_map_update_elem), 2841 BPF_MOV64_IMM(BPF_REG_0, 0), 2842 BPF_EXIT_INSN(), 2843 }, 2844 .fixup_map1 = { 5 }, 2845 .result_unpriv = ACCEPT, 2846 .result = ACCEPT, 2847 .prog_type = BPF_PROG_TYPE_XDP, 2848 }, 2849 { 2850 "helper access to packet: test2, unchecked packet_ptr", 2851 .insns = { 2852 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2853 offsetof(struct xdp_md, data)), 2854 BPF_LD_MAP_FD(BPF_REG_1, 0), 2855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2856 BPF_FUNC_map_lookup_elem), 2857 BPF_MOV64_IMM(BPF_REG_0, 0), 2858 BPF_EXIT_INSN(), 2859 }, 2860 .fixup_map1 = { 1 }, 2861 .result = REJECT, 2862 .errstr = "invalid access to packet", 2863 .prog_type = BPF_PROG_TYPE_XDP, 2864 }, 2865 { 2866 "helper access to packet: test3, variable add", 2867 .insns = { 2868 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2869 offsetof(struct xdp_md, data)), 2870 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2871 offsetof(struct xdp_md, data_end)), 2872 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 2874 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 2875 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 2876 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2877 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 2878 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 2879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 2880 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 2881 BPF_LD_MAP_FD(BPF_REG_1, 0), 2882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 2883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2884 BPF_FUNC_map_lookup_elem), 2885 BPF_MOV64_IMM(BPF_REG_0, 0), 2886 BPF_EXIT_INSN(), 2887 }, 2888 .fixup_map1 = { 11 }, 2889 .result = ACCEPT, 2890 .prog_type = BPF_PROG_TYPE_XDP, 2891 }, 2892 { 2893 "helper access to packet: test4, packet_ptr with bad range", 2894 .insns = { 2895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2896 offsetof(struct xdp_md, data)), 2897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2898 offsetof(struct xdp_md, data_end)), 2899 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 2901 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 2902 BPF_MOV64_IMM(BPF_REG_0, 0), 2903 BPF_EXIT_INSN(), 2904 BPF_LD_MAP_FD(BPF_REG_1, 0), 2905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2906 BPF_FUNC_map_lookup_elem), 2907 BPF_MOV64_IMM(BPF_REG_0, 0), 2908 BPF_EXIT_INSN(), 2909 }, 2910 .fixup_map1 = { 7 }, 2911 .result = REJECT, 2912 .errstr = "invalid access to packet", 2913 .prog_type = BPF_PROG_TYPE_XDP, 2914 }, 2915 { 2916 "helper access to packet: test5, packet_ptr with too short range", 2917 .insns = { 2918 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2919 offsetof(struct xdp_md, data)), 2920 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2921 offsetof(struct xdp_md, data_end)), 2922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 2923 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 2925 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 2926 BPF_LD_MAP_FD(BPF_REG_1, 0), 2927 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2928 BPF_FUNC_map_lookup_elem), 2929 BPF_MOV64_IMM(BPF_REG_0, 0), 2930 BPF_EXIT_INSN(), 2931 }, 2932 .fixup_map1 = { 6 }, 2933 .result = REJECT, 2934 .errstr = "invalid access to packet", 2935 .prog_type = BPF_PROG_TYPE_XDP, 2936 }, 2937 { 2938 "helper access to packet: test6, cls valid packet_ptr range", 2939 .insns = { 2940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2941 offsetof(struct __sk_buff, data)), 2942 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2943 offsetof(struct __sk_buff, data_end)), 2944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 2945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 2946 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 2947 BPF_LD_MAP_FD(BPF_REG_1, 0), 2948 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 2949 BPF_MOV64_IMM(BPF_REG_4, 0), 2950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2951 BPF_FUNC_map_update_elem), 2952 BPF_MOV64_IMM(BPF_REG_0, 0), 2953 BPF_EXIT_INSN(), 2954 }, 2955 .fixup_map1 = { 5 }, 2956 .result = ACCEPT, 2957 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2958 }, 2959 { 2960 "helper access to packet: test7, cls unchecked packet_ptr", 2961 .insns = { 2962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2963 offsetof(struct __sk_buff, data)), 2964 BPF_LD_MAP_FD(BPF_REG_1, 0), 2965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2966 BPF_FUNC_map_lookup_elem), 2967 BPF_MOV64_IMM(BPF_REG_0, 0), 2968 BPF_EXIT_INSN(), 2969 }, 2970 .fixup_map1 = { 1 }, 2971 .result = REJECT, 2972 .errstr = "invalid access to packet", 2973 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2974 }, 2975 { 2976 "helper access to packet: test8, cls variable add", 2977 .insns = { 2978 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2979 offsetof(struct __sk_buff, data)), 2980 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2981 offsetof(struct __sk_buff, data_end)), 2982 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 2984 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 2985 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 2986 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 2987 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 2988 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 2989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 2990 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 2991 BPF_LD_MAP_FD(BPF_REG_1, 0), 2992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 2993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2994 BPF_FUNC_map_lookup_elem), 2995 BPF_MOV64_IMM(BPF_REG_0, 0), 2996 BPF_EXIT_INSN(), 2997 }, 2998 .fixup_map1 = { 11 }, 2999 .result = ACCEPT, 3000 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3001 }, 3002 { 3003 "helper access to packet: test9, cls packet_ptr with bad range", 3004 .insns = { 3005 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3006 offsetof(struct __sk_buff, data)), 3007 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3008 offsetof(struct __sk_buff, data_end)), 3009 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 3011 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 3012 BPF_MOV64_IMM(BPF_REG_0, 0), 3013 BPF_EXIT_INSN(), 3014 BPF_LD_MAP_FD(BPF_REG_1, 0), 3015 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3016 BPF_FUNC_map_lookup_elem), 3017 BPF_MOV64_IMM(BPF_REG_0, 0), 3018 BPF_EXIT_INSN(), 3019 }, 3020 .fixup_map1 = { 7 }, 3021 .result = REJECT, 3022 .errstr = "invalid access to packet", 3023 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3024 }, 3025 { 3026 "helper access to packet: test10, cls packet_ptr with too short range", 3027 .insns = { 3028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3029 offsetof(struct __sk_buff, data)), 3030 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3031 offsetof(struct __sk_buff, data_end)), 3032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 3033 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 3035 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 3036 BPF_LD_MAP_FD(BPF_REG_1, 0), 3037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3038 BPF_FUNC_map_lookup_elem), 3039 BPF_MOV64_IMM(BPF_REG_0, 0), 3040 BPF_EXIT_INSN(), 3041 }, 3042 .fixup_map1 = { 6 }, 3043 .result = REJECT, 3044 .errstr = "invalid access to packet", 3045 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3046 }, 3047 { 3048 "helper access to packet: test11, cls unsuitable helper 1", 3049 .insns = { 3050 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3051 offsetof(struct __sk_buff, data)), 3052 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3053 offsetof(struct __sk_buff, data_end)), 3054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3055 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 3056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), 3057 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), 3058 BPF_MOV64_IMM(BPF_REG_2, 0), 3059 BPF_MOV64_IMM(BPF_REG_4, 42), 3060 BPF_MOV64_IMM(BPF_REG_5, 0), 3061 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3062 BPF_FUNC_skb_store_bytes), 3063 BPF_MOV64_IMM(BPF_REG_0, 0), 3064 BPF_EXIT_INSN(), 3065 }, 3066 .result = REJECT, 3067 .errstr = "helper access to the packet", 3068 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3069 }, 3070 { 3071 "helper access to packet: test12, cls unsuitable helper 2", 3072 .insns = { 3073 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3074 offsetof(struct __sk_buff, data)), 3075 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3076 offsetof(struct __sk_buff, data_end)), 3077 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 3078 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 3079 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), 3080 BPF_MOV64_IMM(BPF_REG_2, 0), 3081 BPF_MOV64_IMM(BPF_REG_4, 4), 3082 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3083 BPF_FUNC_skb_load_bytes), 3084 BPF_MOV64_IMM(BPF_REG_0, 0), 3085 BPF_EXIT_INSN(), 3086 }, 3087 .result = REJECT, 3088 .errstr = "helper access to the packet", 3089 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3090 }, 3091 { 3092 "helper access to packet: test13, cls helper ok", 3093 .insns = { 3094 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3095 offsetof(struct __sk_buff, data)), 3096 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3097 offsetof(struct __sk_buff, data_end)), 3098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3101 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3103 BPF_MOV64_IMM(BPF_REG_2, 4), 3104 BPF_MOV64_IMM(BPF_REG_3, 0), 3105 BPF_MOV64_IMM(BPF_REG_4, 0), 3106 BPF_MOV64_IMM(BPF_REG_5, 0), 3107 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3108 BPF_FUNC_csum_diff), 3109 BPF_MOV64_IMM(BPF_REG_0, 0), 3110 BPF_EXIT_INSN(), 3111 }, 3112 .result = ACCEPT, 3113 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3114 }, 3115 { 3116 "helper access to packet: test14, cls helper fail sub", 3117 .insns = { 3118 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3119 offsetof(struct __sk_buff, data)), 3120 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3121 offsetof(struct __sk_buff, data_end)), 3122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3123 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3125 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3126 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), 3127 BPF_MOV64_IMM(BPF_REG_2, 4), 3128 BPF_MOV64_IMM(BPF_REG_3, 0), 3129 BPF_MOV64_IMM(BPF_REG_4, 0), 3130 BPF_MOV64_IMM(BPF_REG_5, 0), 3131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3132 BPF_FUNC_csum_diff), 3133 BPF_MOV64_IMM(BPF_REG_0, 0), 3134 BPF_EXIT_INSN(), 3135 }, 3136 .result = REJECT, 3137 .errstr = "type=inv expected=fp", 3138 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3139 }, 3140 { 3141 "helper access to packet: test15, cls helper fail range 1", 3142 .insns = { 3143 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3144 offsetof(struct __sk_buff, data)), 3145 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3146 offsetof(struct __sk_buff, data_end)), 3147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3150 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3152 BPF_MOV64_IMM(BPF_REG_2, 8), 3153 BPF_MOV64_IMM(BPF_REG_3, 0), 3154 BPF_MOV64_IMM(BPF_REG_4, 0), 3155 BPF_MOV64_IMM(BPF_REG_5, 0), 3156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3157 BPF_FUNC_csum_diff), 3158 BPF_MOV64_IMM(BPF_REG_0, 0), 3159 BPF_EXIT_INSN(), 3160 }, 3161 .result = REJECT, 3162 .errstr = "invalid access to packet", 3163 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3164 }, 3165 { 3166 "helper access to packet: test16, cls helper fail range 2", 3167 .insns = { 3168 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3169 offsetof(struct __sk_buff, data)), 3170 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3171 offsetof(struct __sk_buff, data_end)), 3172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3175 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3177 BPF_MOV64_IMM(BPF_REG_2, -9), 3178 BPF_MOV64_IMM(BPF_REG_3, 0), 3179 BPF_MOV64_IMM(BPF_REG_4, 0), 3180 BPF_MOV64_IMM(BPF_REG_5, 0), 3181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3182 BPF_FUNC_csum_diff), 3183 BPF_MOV64_IMM(BPF_REG_0, 0), 3184 BPF_EXIT_INSN(), 3185 }, 3186 .result = REJECT, 3187 .errstr = "invalid access to packet", 3188 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3189 }, 3190 { 3191 "helper access to packet: test17, cls helper fail range 3", 3192 .insns = { 3193 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3194 offsetof(struct __sk_buff, data)), 3195 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3196 offsetof(struct __sk_buff, data_end)), 3197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3200 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3201 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3202 BPF_MOV64_IMM(BPF_REG_2, ~0), 3203 BPF_MOV64_IMM(BPF_REG_3, 0), 3204 BPF_MOV64_IMM(BPF_REG_4, 0), 3205 BPF_MOV64_IMM(BPF_REG_5, 0), 3206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3207 BPF_FUNC_csum_diff), 3208 BPF_MOV64_IMM(BPF_REG_0, 0), 3209 BPF_EXIT_INSN(), 3210 }, 3211 .result = REJECT, 3212 .errstr = "invalid access to packet", 3213 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3214 }, 3215 { 3216 "helper access to packet: test18, cls helper fail range zero", 3217 .insns = { 3218 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3219 offsetof(struct __sk_buff, data)), 3220 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3221 offsetof(struct __sk_buff, data_end)), 3222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3223 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3225 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3226 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3227 BPF_MOV64_IMM(BPF_REG_2, 0), 3228 BPF_MOV64_IMM(BPF_REG_3, 0), 3229 BPF_MOV64_IMM(BPF_REG_4, 0), 3230 BPF_MOV64_IMM(BPF_REG_5, 0), 3231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3232 BPF_FUNC_csum_diff), 3233 BPF_MOV64_IMM(BPF_REG_0, 0), 3234 BPF_EXIT_INSN(), 3235 }, 3236 .result = REJECT, 3237 .errstr = "invalid access to packet", 3238 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3239 }, 3240 { 3241 "helper access to packet: test19, pkt end as input", 3242 .insns = { 3243 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3244 offsetof(struct __sk_buff, data)), 3245 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3246 offsetof(struct __sk_buff, data_end)), 3247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3248 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3250 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 3252 BPF_MOV64_IMM(BPF_REG_2, 4), 3253 BPF_MOV64_IMM(BPF_REG_3, 0), 3254 BPF_MOV64_IMM(BPF_REG_4, 0), 3255 BPF_MOV64_IMM(BPF_REG_5, 0), 3256 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3257 BPF_FUNC_csum_diff), 3258 BPF_MOV64_IMM(BPF_REG_0, 0), 3259 BPF_EXIT_INSN(), 3260 }, 3261 .result = REJECT, 3262 .errstr = "R1 type=pkt_end expected=fp", 3263 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3264 }, 3265 { 3266 "helper access to packet: test20, wrong reg", 3267 .insns = { 3268 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3269 offsetof(struct __sk_buff, data)), 3270 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3271 offsetof(struct __sk_buff, data_end)), 3272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3275 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3276 BPF_MOV64_IMM(BPF_REG_2, 4), 3277 BPF_MOV64_IMM(BPF_REG_3, 0), 3278 BPF_MOV64_IMM(BPF_REG_4, 0), 3279 BPF_MOV64_IMM(BPF_REG_5, 0), 3280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3281 BPF_FUNC_csum_diff), 3282 BPF_MOV64_IMM(BPF_REG_0, 0), 3283 BPF_EXIT_INSN(), 3284 }, 3285 .result = REJECT, 3286 .errstr = "invalid access to packet", 3287 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3288 }, 3289 { 3290 "valid map access into an array with a constant", 3291 .insns = { 3292 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3293 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3295 BPF_LD_MAP_FD(BPF_REG_1, 0), 3296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3297 BPF_FUNC_map_lookup_elem), 3298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3299 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3300 offsetof(struct test_val, foo)), 3301 BPF_EXIT_INSN(), 3302 }, 3303 .fixup_map2 = { 3 }, 3304 .errstr_unpriv = "R0 leaks addr", 3305 .result_unpriv = REJECT, 3306 .result = ACCEPT, 3307 }, 3308 { 3309 "valid map access into an array with a register", 3310 .insns = { 3311 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3314 BPF_LD_MAP_FD(BPF_REG_1, 0), 3315 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3316 BPF_FUNC_map_lookup_elem), 3317 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3318 BPF_MOV64_IMM(BPF_REG_1, 4), 3319 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3320 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3321 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3322 offsetof(struct test_val, foo)), 3323 BPF_EXIT_INSN(), 3324 }, 3325 .fixup_map2 = { 3 }, 3326 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3327 .result_unpriv = REJECT, 3328 .result = ACCEPT, 3329 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3330 }, 3331 { 3332 "valid map access into an array with a variable", 3333 .insns = { 3334 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3337 BPF_LD_MAP_FD(BPF_REG_1, 0), 3338 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3339 BPF_FUNC_map_lookup_elem), 3340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 3341 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3342 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), 3343 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3344 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3345 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3346 offsetof(struct test_val, foo)), 3347 BPF_EXIT_INSN(), 3348 }, 3349 .fixup_map2 = { 3 }, 3350 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3351 .result_unpriv = REJECT, 3352 .result = ACCEPT, 3353 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3354 }, 3355 { 3356 "valid map access into an array with a signed variable", 3357 .insns = { 3358 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3361 BPF_LD_MAP_FD(BPF_REG_1, 0), 3362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3363 BPF_FUNC_map_lookup_elem), 3364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 3365 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3366 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), 3367 BPF_MOV32_IMM(BPF_REG_1, 0), 3368 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 3369 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 3370 BPF_MOV32_IMM(BPF_REG_1, 0), 3371 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3372 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3373 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3374 offsetof(struct test_val, foo)), 3375 BPF_EXIT_INSN(), 3376 }, 3377 .fixup_map2 = { 3 }, 3378 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3379 .result_unpriv = REJECT, 3380 .result = ACCEPT, 3381 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3382 }, 3383 { 3384 "invalid map access into an array with a constant", 3385 .insns = { 3386 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3389 BPF_LD_MAP_FD(BPF_REG_1, 0), 3390 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3391 BPF_FUNC_map_lookup_elem), 3392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3393 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, 3394 offsetof(struct test_val, foo)), 3395 BPF_EXIT_INSN(), 3396 }, 3397 .fixup_map2 = { 3 }, 3398 .errstr = "invalid access to map value, value_size=48 off=48 size=8", 3399 .result = REJECT, 3400 }, 3401 { 3402 "invalid map access into an array with a register", 3403 .insns = { 3404 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3405 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3407 BPF_LD_MAP_FD(BPF_REG_1, 0), 3408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3409 BPF_FUNC_map_lookup_elem), 3410 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3411 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), 3412 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3413 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3414 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3415 offsetof(struct test_val, foo)), 3416 BPF_EXIT_INSN(), 3417 }, 3418 .fixup_map2 = { 3 }, 3419 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3420 .errstr = "R0 min value is outside of the array range", 3421 .result_unpriv = REJECT, 3422 .result = REJECT, 3423 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3424 }, 3425 { 3426 "invalid map access into an array with a variable", 3427 .insns = { 3428 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3431 BPF_LD_MAP_FD(BPF_REG_1, 0), 3432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3433 BPF_FUNC_map_lookup_elem), 3434 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3435 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3436 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3437 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3438 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3439 offsetof(struct test_val, foo)), 3440 BPF_EXIT_INSN(), 3441 }, 3442 .fixup_map2 = { 3 }, 3443 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3444 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3445 .result_unpriv = REJECT, 3446 .result = REJECT, 3447 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3448 }, 3449 { 3450 "invalid map access into an array with no floor check", 3451 .insns = { 3452 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3455 BPF_LD_MAP_FD(BPF_REG_1, 0), 3456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3457 BPF_FUNC_map_lookup_elem), 3458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 3459 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3460 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 3461 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 3462 BPF_MOV32_IMM(BPF_REG_1, 0), 3463 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3464 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3465 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3466 offsetof(struct test_val, foo)), 3467 BPF_EXIT_INSN(), 3468 }, 3469 .fixup_map2 = { 3 }, 3470 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3471 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3472 .result_unpriv = REJECT, 3473 .result = REJECT, 3474 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3475 }, 3476 { 3477 "invalid map access into an array with a invalid max check", 3478 .insns = { 3479 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3480 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3482 BPF_LD_MAP_FD(BPF_REG_1, 0), 3483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3484 BPF_FUNC_map_lookup_elem), 3485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 3486 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3487 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), 3488 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 3489 BPF_MOV32_IMM(BPF_REG_1, 0), 3490 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3491 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3492 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3493 offsetof(struct test_val, foo)), 3494 BPF_EXIT_INSN(), 3495 }, 3496 .fixup_map2 = { 3 }, 3497 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3498 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3499 .result_unpriv = REJECT, 3500 .result = REJECT, 3501 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3502 }, 3503 { 3504 "invalid map access into an array with a invalid max check", 3505 .insns = { 3506 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3509 BPF_LD_MAP_FD(BPF_REG_1, 0), 3510 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3511 BPF_FUNC_map_lookup_elem), 3512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 3513 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 3514 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3515 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3517 BPF_LD_MAP_FD(BPF_REG_1, 0), 3518 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3519 BPF_FUNC_map_lookup_elem), 3520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 3521 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 3522 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3523 offsetof(struct test_val, foo)), 3524 BPF_EXIT_INSN(), 3525 }, 3526 .fixup_map2 = { 3, 11 }, 3527 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3528 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3529 .result_unpriv = REJECT, 3530 .result = REJECT, 3531 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3532 }, 3533 { 3534 "multiple registers share map_lookup_elem result", 3535 .insns = { 3536 BPF_MOV64_IMM(BPF_REG_1, 10), 3537 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3540 BPF_LD_MAP_FD(BPF_REG_1, 0), 3541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3542 BPF_FUNC_map_lookup_elem), 3543 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3544 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3545 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3546 BPF_EXIT_INSN(), 3547 }, 3548 .fixup_map1 = { 4 }, 3549 .result = ACCEPT, 3550 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3551 }, 3552 { 3553 "alu ops on ptr_to_map_value_or_null, 1", 3554 .insns = { 3555 BPF_MOV64_IMM(BPF_REG_1, 10), 3556 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3557 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3559 BPF_LD_MAP_FD(BPF_REG_1, 0), 3560 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3561 BPF_FUNC_map_lookup_elem), 3562 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), 3564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 3565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3566 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3567 BPF_EXIT_INSN(), 3568 }, 3569 .fixup_map1 = { 4 }, 3570 .errstr = "R4 invalid mem access", 3571 .result = REJECT, 3572 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3573 }, 3574 { 3575 "alu ops on ptr_to_map_value_or_null, 2", 3576 .insns = { 3577 BPF_MOV64_IMM(BPF_REG_1, 10), 3578 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3581 BPF_LD_MAP_FD(BPF_REG_1, 0), 3582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3583 BPF_FUNC_map_lookup_elem), 3584 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3585 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), 3586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3587 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3588 BPF_EXIT_INSN(), 3589 }, 3590 .fixup_map1 = { 4 }, 3591 .errstr = "R4 invalid mem access", 3592 .result = REJECT, 3593 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3594 }, 3595 { 3596 "alu ops on ptr_to_map_value_or_null, 3", 3597 .insns = { 3598 BPF_MOV64_IMM(BPF_REG_1, 10), 3599 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3602 BPF_LD_MAP_FD(BPF_REG_1, 0), 3603 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3604 BPF_FUNC_map_lookup_elem), 3605 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3606 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), 3607 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3608 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3609 BPF_EXIT_INSN(), 3610 }, 3611 .fixup_map1 = { 4 }, 3612 .errstr = "R4 invalid mem access", 3613 .result = REJECT, 3614 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3615 }, 3616 { 3617 "invalid memory access with multiple map_lookup_elem calls", 3618 .insns = { 3619 BPF_MOV64_IMM(BPF_REG_1, 10), 3620 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3621 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3623 BPF_LD_MAP_FD(BPF_REG_1, 0), 3624 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 3625 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 3626 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3627 BPF_FUNC_map_lookup_elem), 3628 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3629 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 3630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 3631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3632 BPF_FUNC_map_lookup_elem), 3633 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3634 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3635 BPF_EXIT_INSN(), 3636 }, 3637 .fixup_map1 = { 4 }, 3638 .result = REJECT, 3639 .errstr = "R4 !read_ok", 3640 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3641 }, 3642 { 3643 "valid indirect map_lookup_elem access with 2nd lookup in branch", 3644 .insns = { 3645 BPF_MOV64_IMM(BPF_REG_1, 10), 3646 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3649 BPF_LD_MAP_FD(BPF_REG_1, 0), 3650 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 3651 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 3652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3653 BPF_FUNC_map_lookup_elem), 3654 BPF_MOV64_IMM(BPF_REG_2, 10), 3655 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), 3656 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 3657 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 3658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3659 BPF_FUNC_map_lookup_elem), 3660 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3662 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3663 BPF_EXIT_INSN(), 3664 }, 3665 .fixup_map1 = { 4 }, 3666 .result = ACCEPT, 3667 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3668 }, 3669 { 3670 "multiple registers share map_lookup_elem bad reg type", 3671 .insns = { 3672 BPF_MOV64_IMM(BPF_REG_1, 10), 3673 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3676 BPF_LD_MAP_FD(BPF_REG_1, 0), 3677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3678 BPF_FUNC_map_lookup_elem), 3679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 3680 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 3681 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3682 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), 3683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3684 BPF_MOV64_IMM(BPF_REG_1, 1), 3685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3686 BPF_MOV64_IMM(BPF_REG_1, 2), 3687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), 3688 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0), 3689 BPF_MOV64_IMM(BPF_REG_1, 3), 3690 BPF_EXIT_INSN(), 3691 }, 3692 .fixup_map1 = { 4 }, 3693 .result = REJECT, 3694 .errstr = "R3 invalid mem access 'inv'", 3695 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3696 }, 3697 { 3698 "invalid map access from else condition", 3699 .insns = { 3700 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3703 BPF_LD_MAP_FD(BPF_REG_1, 0), 3704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 3705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 3706 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3707 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), 3708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 3709 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3710 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3711 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 3712 BPF_EXIT_INSN(), 3713 }, 3714 .fixup_map2 = { 3 }, 3715 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", 3716 .result = REJECT, 3717 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3718 .result_unpriv = REJECT, 3719 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3720 }, 3721 { 3722 "constant register |= constant should keep constant type", 3723 .insns = { 3724 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 3725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 3726 BPF_MOV64_IMM(BPF_REG_2, 34), 3727 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), 3728 BPF_MOV64_IMM(BPF_REG_3, 0), 3729 BPF_EMIT_CALL(BPF_FUNC_probe_read), 3730 BPF_EXIT_INSN(), 3731 }, 3732 .result = ACCEPT, 3733 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 3734 }, 3735 { 3736 "constant register |= constant should not bypass stack boundary checks", 3737 .insns = { 3738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 3739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 3740 BPF_MOV64_IMM(BPF_REG_2, 34), 3741 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), 3742 BPF_MOV64_IMM(BPF_REG_3, 0), 3743 BPF_EMIT_CALL(BPF_FUNC_probe_read), 3744 BPF_EXIT_INSN(), 3745 }, 3746 .errstr = "invalid stack type R1 off=-48 access_size=58", 3747 .result = REJECT, 3748 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 3749 }, 3750 { 3751 "constant register |= constant register should keep constant type", 3752 .insns = { 3753 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 3754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 3755 BPF_MOV64_IMM(BPF_REG_2, 34), 3756 BPF_MOV64_IMM(BPF_REG_4, 13), 3757 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), 3758 BPF_MOV64_IMM(BPF_REG_3, 0), 3759 BPF_EMIT_CALL(BPF_FUNC_probe_read), 3760 BPF_EXIT_INSN(), 3761 }, 3762 .result = ACCEPT, 3763 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 3764 }, 3765 { 3766 "constant register |= constant register should not bypass stack boundary checks", 3767 .insns = { 3768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 3769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 3770 BPF_MOV64_IMM(BPF_REG_2, 34), 3771 BPF_MOV64_IMM(BPF_REG_4, 24), 3772 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), 3773 BPF_MOV64_IMM(BPF_REG_3, 0), 3774 BPF_EMIT_CALL(BPF_FUNC_probe_read), 3775 BPF_EXIT_INSN(), 3776 }, 3777 .errstr = "invalid stack type R1 off=-48 access_size=58", 3778 .result = REJECT, 3779 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 3780 }, 3781 { 3782 "invalid direct packet write for LWT_IN", 3783 .insns = { 3784 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3785 offsetof(struct __sk_buff, data)), 3786 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3787 offsetof(struct __sk_buff, data_end)), 3788 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3790 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3791 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 3792 BPF_MOV64_IMM(BPF_REG_0, 0), 3793 BPF_EXIT_INSN(), 3794 }, 3795 .errstr = "cannot write into packet", 3796 .result = REJECT, 3797 .prog_type = BPF_PROG_TYPE_LWT_IN, 3798 }, 3799 { 3800 "invalid direct packet write for LWT_OUT", 3801 .insns = { 3802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3803 offsetof(struct __sk_buff, data)), 3804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3805 offsetof(struct __sk_buff, data_end)), 3806 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3808 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3809 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 3810 BPF_MOV64_IMM(BPF_REG_0, 0), 3811 BPF_EXIT_INSN(), 3812 }, 3813 .errstr = "cannot write into packet", 3814 .result = REJECT, 3815 .prog_type = BPF_PROG_TYPE_LWT_OUT, 3816 }, 3817 { 3818 "direct packet write for LWT_XMIT", 3819 .insns = { 3820 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3821 offsetof(struct __sk_buff, data)), 3822 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3823 offsetof(struct __sk_buff, data_end)), 3824 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3826 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3827 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 3828 BPF_MOV64_IMM(BPF_REG_0, 0), 3829 BPF_EXIT_INSN(), 3830 }, 3831 .result = ACCEPT, 3832 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3833 }, 3834 { 3835 "direct packet read for LWT_IN", 3836 .insns = { 3837 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3838 offsetof(struct __sk_buff, data)), 3839 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3840 offsetof(struct __sk_buff, data_end)), 3841 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3843 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3844 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3845 BPF_MOV64_IMM(BPF_REG_0, 0), 3846 BPF_EXIT_INSN(), 3847 }, 3848 .result = ACCEPT, 3849 .prog_type = BPF_PROG_TYPE_LWT_IN, 3850 }, 3851 { 3852 "direct packet read for LWT_OUT", 3853 .insns = { 3854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3855 offsetof(struct __sk_buff, data)), 3856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3857 offsetof(struct __sk_buff, data_end)), 3858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3860 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3861 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3862 BPF_MOV64_IMM(BPF_REG_0, 0), 3863 BPF_EXIT_INSN(), 3864 }, 3865 .result = ACCEPT, 3866 .prog_type = BPF_PROG_TYPE_LWT_OUT, 3867 }, 3868 { 3869 "direct packet read for LWT_XMIT", 3870 .insns = { 3871 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3872 offsetof(struct __sk_buff, data)), 3873 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3874 offsetof(struct __sk_buff, data_end)), 3875 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3877 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3878 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3879 BPF_MOV64_IMM(BPF_REG_0, 0), 3880 BPF_EXIT_INSN(), 3881 }, 3882 .result = ACCEPT, 3883 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3884 }, 3885 { 3886 "overlapping checks for direct packet access", 3887 .insns = { 3888 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3889 offsetof(struct __sk_buff, data)), 3890 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3891 offsetof(struct __sk_buff, data_end)), 3892 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3894 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), 3895 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 3896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), 3897 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 3898 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), 3899 BPF_MOV64_IMM(BPF_REG_0, 0), 3900 BPF_EXIT_INSN(), 3901 }, 3902 .result = ACCEPT, 3903 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3904 }, 3905 { 3906 "invalid access of tc_classid for LWT_IN", 3907 .insns = { 3908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3909 offsetof(struct __sk_buff, tc_classid)), 3910 BPF_EXIT_INSN(), 3911 }, 3912 .result = REJECT, 3913 .errstr = "invalid bpf_context access", 3914 }, 3915 { 3916 "invalid access of tc_classid for LWT_OUT", 3917 .insns = { 3918 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3919 offsetof(struct __sk_buff, tc_classid)), 3920 BPF_EXIT_INSN(), 3921 }, 3922 .result = REJECT, 3923 .errstr = "invalid bpf_context access", 3924 }, 3925 { 3926 "invalid access of tc_classid for LWT_XMIT", 3927 .insns = { 3928 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3929 offsetof(struct __sk_buff, tc_classid)), 3930 BPF_EXIT_INSN(), 3931 }, 3932 .result = REJECT, 3933 .errstr = "invalid bpf_context access", 3934 }, 3935 { 3936 "leak pointer into ctx 1", 3937 .insns = { 3938 BPF_MOV64_IMM(BPF_REG_0, 0), 3939 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3940 offsetof(struct __sk_buff, cb[0])), 3941 BPF_LD_MAP_FD(BPF_REG_2, 0), 3942 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, 3943 offsetof(struct __sk_buff, cb[0])), 3944 BPF_EXIT_INSN(), 3945 }, 3946 .fixup_map1 = { 2 }, 3947 .errstr_unpriv = "R2 leaks addr into mem", 3948 .result_unpriv = REJECT, 3949 .result = ACCEPT, 3950 }, 3951 { 3952 "leak pointer into ctx 2", 3953 .insns = { 3954 BPF_MOV64_IMM(BPF_REG_0, 0), 3955 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3956 offsetof(struct __sk_buff, cb[0])), 3957 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, 3958 offsetof(struct __sk_buff, cb[0])), 3959 BPF_EXIT_INSN(), 3960 }, 3961 .errstr_unpriv = "R10 leaks addr into mem", 3962 .result_unpriv = REJECT, 3963 .result = ACCEPT, 3964 }, 3965 { 3966 "leak pointer into ctx 3", 3967 .insns = { 3968 BPF_MOV64_IMM(BPF_REG_0, 0), 3969 BPF_LD_MAP_FD(BPF_REG_2, 0), 3970 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 3971 offsetof(struct __sk_buff, cb[0])), 3972 BPF_EXIT_INSN(), 3973 }, 3974 .fixup_map1 = { 1 }, 3975 .errstr_unpriv = "R2 leaks addr into ctx", 3976 .result_unpriv = REJECT, 3977 .result = ACCEPT, 3978 }, 3979 { 3980 "leak pointer into map val", 3981 .insns = { 3982 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 3983 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3984 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3986 BPF_LD_MAP_FD(BPF_REG_1, 0), 3987 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3988 BPF_FUNC_map_lookup_elem), 3989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 3990 BPF_MOV64_IMM(BPF_REG_3, 0), 3991 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 3992 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 3993 BPF_MOV64_IMM(BPF_REG_0, 0), 3994 BPF_EXIT_INSN(), 3995 }, 3996 .fixup_map1 = { 4 }, 3997 .errstr_unpriv = "R6 leaks addr into mem", 3998 .result_unpriv = REJECT, 3999 .result = ACCEPT, 4000 }, 4001 { 4002 "helper access to map: full range", 4003 .insns = { 4004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4006 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4007 BPF_LD_MAP_FD(BPF_REG_1, 0), 4008 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4009 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4011 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 4012 BPF_MOV64_IMM(BPF_REG_3, 0), 4013 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4014 BPF_EXIT_INSN(), 4015 }, 4016 .fixup_map2 = { 3 }, 4017 .result = ACCEPT, 4018 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4019 }, 4020 { 4021 "helper access to map: partial range", 4022 .insns = { 4023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4025 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4026 BPF_LD_MAP_FD(BPF_REG_1, 0), 4027 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4028 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4029 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4030 BPF_MOV64_IMM(BPF_REG_2, 8), 4031 BPF_MOV64_IMM(BPF_REG_3, 0), 4032 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4033 BPF_EXIT_INSN(), 4034 }, 4035 .fixup_map2 = { 3 }, 4036 .result = ACCEPT, 4037 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4038 }, 4039 { 4040 "helper access to map: empty range", 4041 .insns = { 4042 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4043 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4044 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4045 BPF_LD_MAP_FD(BPF_REG_1, 0), 4046 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4047 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4048 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4049 BPF_MOV64_IMM(BPF_REG_2, 0), 4050 BPF_MOV64_IMM(BPF_REG_3, 0), 4051 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4052 BPF_EXIT_INSN(), 4053 }, 4054 .fixup_map2 = { 3 }, 4055 .errstr = "invalid access to map value, value_size=48 off=0 size=0", 4056 .result = REJECT, 4057 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4058 }, 4059 { 4060 "helper access to map: out-of-bound range", 4061 .insns = { 4062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4064 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4065 BPF_LD_MAP_FD(BPF_REG_1, 0), 4066 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4068 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4069 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), 4070 BPF_MOV64_IMM(BPF_REG_3, 0), 4071 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4072 BPF_EXIT_INSN(), 4073 }, 4074 .fixup_map2 = { 3 }, 4075 .errstr = "invalid access to map value, value_size=48 off=0 size=56", 4076 .result = REJECT, 4077 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4078 }, 4079 { 4080 "helper access to map: negative range", 4081 .insns = { 4082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4084 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4085 BPF_LD_MAP_FD(BPF_REG_1, 0), 4086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4089 BPF_MOV64_IMM(BPF_REG_2, -8), 4090 BPF_MOV64_IMM(BPF_REG_3, 0), 4091 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4092 BPF_EXIT_INSN(), 4093 }, 4094 .fixup_map2 = { 3 }, 4095 .errstr = "invalid access to map value, value_size=48 off=0 size=-8", 4096 .result = REJECT, 4097 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4098 }, 4099 { 4100 "helper access to adjusted map (via const imm): full range", 4101 .insns = { 4102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4104 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4105 BPF_LD_MAP_FD(BPF_REG_1, 0), 4106 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4107 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4110 offsetof(struct test_val, foo)), 4111 BPF_MOV64_IMM(BPF_REG_2, 4112 sizeof(struct test_val) - 4113 offsetof(struct test_val, foo)), 4114 BPF_MOV64_IMM(BPF_REG_3, 0), 4115 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4116 BPF_EXIT_INSN(), 4117 }, 4118 .fixup_map2 = { 3 }, 4119 .result = ACCEPT, 4120 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4121 }, 4122 { 4123 "helper access to adjusted map (via const imm): partial range", 4124 .insns = { 4125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4127 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4128 BPF_LD_MAP_FD(BPF_REG_1, 0), 4129 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4133 offsetof(struct test_val, foo)), 4134 BPF_MOV64_IMM(BPF_REG_2, 8), 4135 BPF_MOV64_IMM(BPF_REG_3, 0), 4136 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4137 BPF_EXIT_INSN(), 4138 }, 4139 .fixup_map2 = { 3 }, 4140 .result = ACCEPT, 4141 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4142 }, 4143 { 4144 "helper access to adjusted map (via const imm): empty range", 4145 .insns = { 4146 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4148 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4149 BPF_LD_MAP_FD(BPF_REG_1, 0), 4150 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4152 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4154 offsetof(struct test_val, foo)), 4155 BPF_MOV64_IMM(BPF_REG_2, 0), 4156 BPF_MOV64_IMM(BPF_REG_3, 0), 4157 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4158 BPF_EXIT_INSN(), 4159 }, 4160 .fixup_map2 = { 3 }, 4161 .errstr = "R1 min value is outside of the array range", 4162 .result = REJECT, 4163 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4164 }, 4165 { 4166 "helper access to adjusted map (via const imm): out-of-bound range", 4167 .insns = { 4168 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4170 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4171 BPF_LD_MAP_FD(BPF_REG_1, 0), 4172 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4176 offsetof(struct test_val, foo)), 4177 BPF_MOV64_IMM(BPF_REG_2, 4178 sizeof(struct test_val) - 4179 offsetof(struct test_val, foo) + 8), 4180 BPF_MOV64_IMM(BPF_REG_3, 0), 4181 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4182 BPF_EXIT_INSN(), 4183 }, 4184 .fixup_map2 = { 3 }, 4185 .errstr = "invalid access to map value, value_size=48 off=4 size=52", 4186 .result = REJECT, 4187 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4188 }, 4189 { 4190 "helper access to adjusted map (via const imm): negative range (> adjustment)", 4191 .insns = { 4192 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4194 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4195 BPF_LD_MAP_FD(BPF_REG_1, 0), 4196 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4200 offsetof(struct test_val, foo)), 4201 BPF_MOV64_IMM(BPF_REG_2, -8), 4202 BPF_MOV64_IMM(BPF_REG_3, 0), 4203 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4204 BPF_EXIT_INSN(), 4205 }, 4206 .fixup_map2 = { 3 }, 4207 .errstr = "invalid access to map value, value_size=48 off=4 size=-8", 4208 .result = REJECT, 4209 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4210 }, 4211 { 4212 "helper access to adjusted map (via const imm): negative range (< adjustment)", 4213 .insns = { 4214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4216 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4217 BPF_LD_MAP_FD(BPF_REG_1, 0), 4218 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4219 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4220 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4222 offsetof(struct test_val, foo)), 4223 BPF_MOV64_IMM(BPF_REG_2, -1), 4224 BPF_MOV64_IMM(BPF_REG_3, 0), 4225 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4226 BPF_EXIT_INSN(), 4227 }, 4228 .fixup_map2 = { 3 }, 4229 .errstr = "R1 min value is outside of the array range", 4230 .result = REJECT, 4231 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4232 }, 4233 { 4234 "helper access to adjusted map (via const reg): full range", 4235 .insns = { 4236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4238 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4239 BPF_LD_MAP_FD(BPF_REG_1, 0), 4240 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4242 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4243 BPF_MOV64_IMM(BPF_REG_3, 4244 offsetof(struct test_val, foo)), 4245 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4246 BPF_MOV64_IMM(BPF_REG_2, 4247 sizeof(struct test_val) - 4248 offsetof(struct test_val, foo)), 4249 BPF_MOV64_IMM(BPF_REG_3, 0), 4250 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4251 BPF_EXIT_INSN(), 4252 }, 4253 .fixup_map2 = { 3 }, 4254 .result = ACCEPT, 4255 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4256 }, 4257 { 4258 "helper access to adjusted map (via const reg): partial range", 4259 .insns = { 4260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4262 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4263 BPF_LD_MAP_FD(BPF_REG_1, 0), 4264 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4266 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4267 BPF_MOV64_IMM(BPF_REG_3, 4268 offsetof(struct test_val, foo)), 4269 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4270 BPF_MOV64_IMM(BPF_REG_2, 8), 4271 BPF_MOV64_IMM(BPF_REG_3, 0), 4272 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4273 BPF_EXIT_INSN(), 4274 }, 4275 .fixup_map2 = { 3 }, 4276 .result = ACCEPT, 4277 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4278 }, 4279 { 4280 "helper access to adjusted map (via const reg): empty range", 4281 .insns = { 4282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4284 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4285 BPF_LD_MAP_FD(BPF_REG_1, 0), 4286 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4288 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4289 BPF_MOV64_IMM(BPF_REG_3, 0), 4290 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4291 BPF_MOV64_IMM(BPF_REG_2, 0), 4292 BPF_MOV64_IMM(BPF_REG_3, 0), 4293 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4294 BPF_EXIT_INSN(), 4295 }, 4296 .fixup_map2 = { 3 }, 4297 .errstr = "R1 min value is outside of the array range", 4298 .result = REJECT, 4299 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4300 }, 4301 { 4302 "helper access to adjusted map (via const reg): out-of-bound range", 4303 .insns = { 4304 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4306 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4307 BPF_LD_MAP_FD(BPF_REG_1, 0), 4308 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4309 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4311 BPF_MOV64_IMM(BPF_REG_3, 4312 offsetof(struct test_val, foo)), 4313 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4314 BPF_MOV64_IMM(BPF_REG_2, 4315 sizeof(struct test_val) - 4316 offsetof(struct test_val, foo) + 8), 4317 BPF_MOV64_IMM(BPF_REG_3, 0), 4318 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4319 BPF_EXIT_INSN(), 4320 }, 4321 .fixup_map2 = { 3 }, 4322 .errstr = "invalid access to map value, value_size=48 off=4 size=52", 4323 .result = REJECT, 4324 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4325 }, 4326 { 4327 "helper access to adjusted map (via const reg): negative range (> adjustment)", 4328 .insns = { 4329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4331 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4332 BPF_LD_MAP_FD(BPF_REG_1, 0), 4333 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4334 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4336 BPF_MOV64_IMM(BPF_REG_3, 4337 offsetof(struct test_val, foo)), 4338 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4339 BPF_MOV64_IMM(BPF_REG_2, -8), 4340 BPF_MOV64_IMM(BPF_REG_3, 0), 4341 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4342 BPF_EXIT_INSN(), 4343 }, 4344 .fixup_map2 = { 3 }, 4345 .errstr = "invalid access to map value, value_size=48 off=4 size=-8", 4346 .result = REJECT, 4347 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4348 }, 4349 { 4350 "helper access to adjusted map (via const reg): negative range (< adjustment)", 4351 .insns = { 4352 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4354 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4355 BPF_LD_MAP_FD(BPF_REG_1, 0), 4356 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4357 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4358 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4359 BPF_MOV64_IMM(BPF_REG_3, 4360 offsetof(struct test_val, foo)), 4361 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4362 BPF_MOV64_IMM(BPF_REG_2, -1), 4363 BPF_MOV64_IMM(BPF_REG_3, 0), 4364 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4365 BPF_EXIT_INSN(), 4366 }, 4367 .fixup_map2 = { 3 }, 4368 .errstr = "R1 min value is outside of the array range", 4369 .result = REJECT, 4370 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4371 }, 4372 { 4373 "helper access to adjusted map (via variable): full range", 4374 .insns = { 4375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4378 BPF_LD_MAP_FD(BPF_REG_1, 0), 4379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4381 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4382 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4383 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4384 offsetof(struct test_val, foo), 4), 4385 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4386 BPF_MOV64_IMM(BPF_REG_2, 4387 sizeof(struct test_val) - 4388 offsetof(struct test_val, foo)), 4389 BPF_MOV64_IMM(BPF_REG_3, 0), 4390 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4391 BPF_EXIT_INSN(), 4392 }, 4393 .fixup_map2 = { 3 }, 4394 .result = ACCEPT, 4395 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4396 }, 4397 { 4398 "helper access to adjusted map (via variable): partial range", 4399 .insns = { 4400 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4402 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4403 BPF_LD_MAP_FD(BPF_REG_1, 0), 4404 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4405 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4406 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4407 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4408 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4409 offsetof(struct test_val, foo), 4), 4410 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4411 BPF_MOV64_IMM(BPF_REG_2, 8), 4412 BPF_MOV64_IMM(BPF_REG_3, 0), 4413 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4414 BPF_EXIT_INSN(), 4415 }, 4416 .fixup_map2 = { 3 }, 4417 .result = ACCEPT, 4418 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4419 }, 4420 { 4421 "helper access to adjusted map (via variable): empty range", 4422 .insns = { 4423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4425 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4426 BPF_LD_MAP_FD(BPF_REG_1, 0), 4427 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4430 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4431 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4432 offsetof(struct test_val, foo), 4), 4433 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4434 BPF_MOV64_IMM(BPF_REG_2, 0), 4435 BPF_MOV64_IMM(BPF_REG_3, 0), 4436 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4437 BPF_EXIT_INSN(), 4438 }, 4439 .fixup_map2 = { 3 }, 4440 .errstr = "R1 min value is outside of the array range", 4441 .result = REJECT, 4442 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4443 }, 4444 { 4445 "helper access to adjusted map (via variable): no max check", 4446 .insns = { 4447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4449 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4450 BPF_LD_MAP_FD(BPF_REG_1, 0), 4451 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4452 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4455 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4456 BPF_MOV64_IMM(BPF_REG_2, 0), 4457 BPF_MOV64_IMM(BPF_REG_3, 0), 4458 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4459 BPF_EXIT_INSN(), 4460 }, 4461 .fixup_map2 = { 3 }, 4462 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check", 4463 .result = REJECT, 4464 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4465 }, 4466 { 4467 "helper access to adjusted map (via variable): wrong max check", 4468 .insns = { 4469 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4471 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4472 BPF_LD_MAP_FD(BPF_REG_1, 0), 4473 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4474 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4477 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4478 offsetof(struct test_val, foo), 4), 4479 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4480 BPF_MOV64_IMM(BPF_REG_2, 4481 sizeof(struct test_val) - 4482 offsetof(struct test_val, foo) + 1), 4483 BPF_MOV64_IMM(BPF_REG_3, 0), 4484 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4485 BPF_EXIT_INSN(), 4486 }, 4487 .fixup_map2 = { 3 }, 4488 .errstr = "invalid access to map value, value_size=48 off=4 size=45", 4489 .result = REJECT, 4490 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4491 }, 4492 { 4493 "map element value is preserved across register spilling", 4494 .insns = { 4495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4497 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4498 BPF_LD_MAP_FD(BPF_REG_1, 0), 4499 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4500 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4501 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 4502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), 4504 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 4505 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 4506 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 4507 BPF_EXIT_INSN(), 4508 }, 4509 .fixup_map2 = { 3 }, 4510 .errstr_unpriv = "R0 leaks addr", 4511 .result = ACCEPT, 4512 .result_unpriv = REJECT, 4513 }, 4514 { 4515 "map element value or null is marked on register spilling", 4516 .insns = { 4517 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4519 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4520 BPF_LD_MAP_FD(BPF_REG_1, 0), 4521 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4522 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), 4524 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 4525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 4526 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 4527 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 4528 BPF_EXIT_INSN(), 4529 }, 4530 .fixup_map2 = { 3 }, 4531 .errstr_unpriv = "R0 leaks addr", 4532 .result = ACCEPT, 4533 .result_unpriv = REJECT, 4534 }, 4535 { 4536 "map element value store of cleared call register", 4537 .insns = { 4538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4540 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4541 BPF_LD_MAP_FD(BPF_REG_1, 0), 4542 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 4544 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 4545 BPF_EXIT_INSN(), 4546 }, 4547 .fixup_map2 = { 3 }, 4548 .errstr_unpriv = "R1 !read_ok", 4549 .errstr = "R1 !read_ok", 4550 .result = REJECT, 4551 .result_unpriv = REJECT, 4552 }, 4553 { 4554 "map element value with unaligned store", 4555 .insns = { 4556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4558 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4559 BPF_LD_MAP_FD(BPF_REG_1, 0), 4560 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), 4562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), 4563 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 4564 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), 4565 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), 4566 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 4567 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), 4568 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), 4569 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), 4570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), 4571 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), 4572 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), 4573 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), 4574 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), 4575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), 4576 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), 4577 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), 4578 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), 4579 BPF_EXIT_INSN(), 4580 }, 4581 .fixup_map2 = { 3 }, 4582 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4583 .result = ACCEPT, 4584 .result_unpriv = REJECT, 4585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 4586 }, 4587 { 4588 "map element value with unaligned load", 4589 .insns = { 4590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4592 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4593 BPF_LD_MAP_FD(BPF_REG_1, 0), 4594 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 4596 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 4597 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), 4598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), 4599 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 4600 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), 4601 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 4602 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), 4603 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), 4604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), 4605 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 4606 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), 4607 BPF_EXIT_INSN(), 4608 }, 4609 .fixup_map2 = { 3 }, 4610 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4611 .result = ACCEPT, 4612 .result_unpriv = REJECT, 4613 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 4614 }, 4615 { 4616 "map element value illegal alu op, 1", 4617 .insns = { 4618 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4620 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4621 BPF_LD_MAP_FD(BPF_REG_1, 0), 4622 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4623 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 4624 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), 4625 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 4626 BPF_EXIT_INSN(), 4627 }, 4628 .fixup_map2 = { 3 }, 4629 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4630 .errstr = "invalid mem access 'inv'", 4631 .result = REJECT, 4632 .result_unpriv = REJECT, 4633 }, 4634 { 4635 "map element value illegal alu op, 2", 4636 .insns = { 4637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4639 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4640 BPF_LD_MAP_FD(BPF_REG_1, 0), 4641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 4643 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), 4644 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 4645 BPF_EXIT_INSN(), 4646 }, 4647 .fixup_map2 = { 3 }, 4648 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4649 .errstr = "invalid mem access 'inv'", 4650 .result = REJECT, 4651 .result_unpriv = REJECT, 4652 }, 4653 { 4654 "map element value illegal alu op, 3", 4655 .insns = { 4656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4658 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4659 BPF_LD_MAP_FD(BPF_REG_1, 0), 4660 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 4662 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), 4663 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 4664 BPF_EXIT_INSN(), 4665 }, 4666 .fixup_map2 = { 3 }, 4667 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4668 .errstr = "invalid mem access 'inv'", 4669 .result = REJECT, 4670 .result_unpriv = REJECT, 4671 }, 4672 { 4673 "map element value illegal alu op, 4", 4674 .insns = { 4675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4677 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4678 BPF_LD_MAP_FD(BPF_REG_1, 0), 4679 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 4681 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), 4682 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 4683 BPF_EXIT_INSN(), 4684 }, 4685 .fixup_map2 = { 3 }, 4686 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4687 .errstr = "invalid mem access 'inv'", 4688 .result = REJECT, 4689 .result_unpriv = REJECT, 4690 }, 4691 { 4692 "map element value illegal alu op, 5", 4693 .insns = { 4694 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4696 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4697 BPF_LD_MAP_FD(BPF_REG_1, 0), 4698 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4700 BPF_MOV64_IMM(BPF_REG_3, 4096), 4701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4703 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 4704 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), 4705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), 4706 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 4707 BPF_EXIT_INSN(), 4708 }, 4709 .fixup_map2 = { 3 }, 4710 .errstr_unpriv = "R0 invalid mem access 'inv'", 4711 .errstr = "R0 invalid mem access 'inv'", 4712 .result = REJECT, 4713 .result_unpriv = REJECT, 4714 }, 4715 { 4716 "map element value is preserved across register spilling", 4717 .insns = { 4718 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4720 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4721 BPF_LD_MAP_FD(BPF_REG_1, 0), 4722 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4723 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4725 offsetof(struct test_val, foo)), 4726 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 4727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), 4729 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 4730 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 4731 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 4732 BPF_EXIT_INSN(), 4733 }, 4734 .fixup_map2 = { 3 }, 4735 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4736 .result = ACCEPT, 4737 .result_unpriv = REJECT, 4738 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 4739 }, 4740 { 4741 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 4742 .insns = { 4743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4745 BPF_MOV64_IMM(BPF_REG_0, 0), 4746 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 4747 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 4748 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 4749 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 4750 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 4751 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 4752 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 4753 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 4754 BPF_MOV64_IMM(BPF_REG_2, 16), 4755 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4756 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4757 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 4758 BPF_MOV64_IMM(BPF_REG_4, 0), 4759 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 4760 BPF_MOV64_IMM(BPF_REG_3, 0), 4761 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4762 BPF_MOV64_IMM(BPF_REG_0, 0), 4763 BPF_EXIT_INSN(), 4764 }, 4765 .result = ACCEPT, 4766 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4767 }, 4768 { 4769 "helper access to variable memory: stack, bitwise AND, zero included", 4770 .insns = { 4771 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4772 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4773 BPF_MOV64_IMM(BPF_REG_2, 16), 4774 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4775 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4776 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 4777 BPF_MOV64_IMM(BPF_REG_3, 0), 4778 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4779 BPF_EXIT_INSN(), 4780 }, 4781 .errstr = "invalid stack type R1 off=-64 access_size=0", 4782 .result = REJECT, 4783 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4784 }, 4785 { 4786 "helper access to variable memory: stack, bitwise AND + JMP, wrong max", 4787 .insns = { 4788 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4790 BPF_MOV64_IMM(BPF_REG_2, 16), 4791 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4792 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4793 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), 4794 BPF_MOV64_IMM(BPF_REG_4, 0), 4795 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 4796 BPF_MOV64_IMM(BPF_REG_3, 0), 4797 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4798 BPF_MOV64_IMM(BPF_REG_0, 0), 4799 BPF_EXIT_INSN(), 4800 }, 4801 .errstr = "invalid stack type R1 off=-64 access_size=65", 4802 .result = REJECT, 4803 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4804 }, 4805 { 4806 "helper access to variable memory: stack, JMP, correct bounds", 4807 .insns = { 4808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4810 BPF_MOV64_IMM(BPF_REG_0, 0), 4811 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 4812 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 4813 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 4814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 4815 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 4816 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 4817 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 4818 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 4819 BPF_MOV64_IMM(BPF_REG_2, 16), 4820 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4821 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4822 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), 4823 BPF_MOV64_IMM(BPF_REG_4, 0), 4824 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 4825 BPF_MOV64_IMM(BPF_REG_3, 0), 4826 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4827 BPF_MOV64_IMM(BPF_REG_0, 0), 4828 BPF_EXIT_INSN(), 4829 }, 4830 .result = ACCEPT, 4831 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4832 }, 4833 { 4834 "helper access to variable memory: stack, JMP (signed), correct bounds", 4835 .insns = { 4836 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4838 BPF_MOV64_IMM(BPF_REG_0, 0), 4839 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 4840 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 4841 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 4842 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 4843 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 4844 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 4845 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 4846 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 4847 BPF_MOV64_IMM(BPF_REG_2, 16), 4848 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4849 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4850 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), 4851 BPF_MOV64_IMM(BPF_REG_4, 0), 4852 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 4853 BPF_MOV64_IMM(BPF_REG_3, 0), 4854 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4855 BPF_MOV64_IMM(BPF_REG_0, 0), 4856 BPF_EXIT_INSN(), 4857 }, 4858 .result = ACCEPT, 4859 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4860 }, 4861 { 4862 "helper access to variable memory: stack, JMP, bounds + offset", 4863 .insns = { 4864 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4866 BPF_MOV64_IMM(BPF_REG_2, 16), 4867 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4868 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4869 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), 4870 BPF_MOV64_IMM(BPF_REG_4, 0), 4871 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), 4872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 4873 BPF_MOV64_IMM(BPF_REG_3, 0), 4874 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4875 BPF_MOV64_IMM(BPF_REG_0, 0), 4876 BPF_EXIT_INSN(), 4877 }, 4878 .errstr = "invalid stack type R1 off=-64 access_size=65", 4879 .result = REJECT, 4880 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4881 }, 4882 { 4883 "helper access to variable memory: stack, JMP, wrong max", 4884 .insns = { 4885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4887 BPF_MOV64_IMM(BPF_REG_2, 16), 4888 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4889 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4890 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), 4891 BPF_MOV64_IMM(BPF_REG_4, 0), 4892 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 4893 BPF_MOV64_IMM(BPF_REG_3, 0), 4894 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4895 BPF_MOV64_IMM(BPF_REG_0, 0), 4896 BPF_EXIT_INSN(), 4897 }, 4898 .errstr = "invalid stack type R1 off=-64 access_size=65", 4899 .result = REJECT, 4900 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4901 }, 4902 { 4903 "helper access to variable memory: stack, JMP, no max check", 4904 .insns = { 4905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4907 BPF_MOV64_IMM(BPF_REG_2, 16), 4908 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4909 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4910 BPF_MOV64_IMM(BPF_REG_4, 0), 4911 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 4912 BPF_MOV64_IMM(BPF_REG_3, 0), 4913 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4914 BPF_MOV64_IMM(BPF_REG_0, 0), 4915 BPF_EXIT_INSN(), 4916 }, 4917 .errstr = "R2 unbounded memory access", 4918 .result = REJECT, 4919 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4920 }, 4921 { 4922 "helper access to variable memory: stack, JMP, no min check", 4923 .insns = { 4924 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4926 BPF_MOV64_IMM(BPF_REG_2, 16), 4927 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4928 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4929 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), 4930 BPF_MOV64_IMM(BPF_REG_3, 0), 4931 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4932 BPF_MOV64_IMM(BPF_REG_0, 0), 4933 BPF_EXIT_INSN(), 4934 }, 4935 .errstr = "invalid stack type R1 off=-64 access_size=0", 4936 .result = REJECT, 4937 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4938 }, 4939 { 4940 "helper access to variable memory: stack, JMP (signed), no min check", 4941 .insns = { 4942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 4944 BPF_MOV64_IMM(BPF_REG_2, 16), 4945 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 4946 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 4947 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), 4948 BPF_MOV64_IMM(BPF_REG_3, 0), 4949 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4950 BPF_MOV64_IMM(BPF_REG_0, 0), 4951 BPF_EXIT_INSN(), 4952 }, 4953 .errstr = "R2 min value is negative", 4954 .result = REJECT, 4955 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4956 }, 4957 { 4958 "helper access to variable memory: map, JMP, correct bounds", 4959 .insns = { 4960 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4962 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4963 BPF_LD_MAP_FD(BPF_REG_1, 0), 4964 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 4966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4967 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 4968 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 4969 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 4970 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 4971 sizeof(struct test_val), 4), 4972 BPF_MOV64_IMM(BPF_REG_4, 0), 4973 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 4974 BPF_MOV64_IMM(BPF_REG_3, 0), 4975 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4976 BPF_MOV64_IMM(BPF_REG_0, 0), 4977 BPF_EXIT_INSN(), 4978 }, 4979 .fixup_map2 = { 3 }, 4980 .result = ACCEPT, 4981 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4982 }, 4983 { 4984 "helper access to variable memory: map, JMP, wrong max", 4985 .insns = { 4986 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4988 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4989 BPF_LD_MAP_FD(BPF_REG_1, 0), 4990 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4991 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 4992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4993 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 4994 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 4995 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 4996 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 4997 sizeof(struct test_val) + 1, 4), 4998 BPF_MOV64_IMM(BPF_REG_4, 0), 4999 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5000 BPF_MOV64_IMM(BPF_REG_3, 0), 5001 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5002 BPF_MOV64_IMM(BPF_REG_0, 0), 5003 BPF_EXIT_INSN(), 5004 }, 5005 .fixup_map2 = { 3 }, 5006 .errstr = "invalid access to map value, value_size=48 off=0 size=49", 5007 .result = REJECT, 5008 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5009 }, 5010 { 5011 "helper access to variable memory: map adjusted, JMP, correct bounds", 5012 .insns = { 5013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5015 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5016 BPF_LD_MAP_FD(BPF_REG_1, 0), 5017 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 5019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 5021 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5022 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5023 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5024 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5025 sizeof(struct test_val) - 20, 4), 5026 BPF_MOV64_IMM(BPF_REG_4, 0), 5027 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5028 BPF_MOV64_IMM(BPF_REG_3, 0), 5029 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5030 BPF_MOV64_IMM(BPF_REG_0, 0), 5031 BPF_EXIT_INSN(), 5032 }, 5033 .fixup_map2 = { 3 }, 5034 .result = ACCEPT, 5035 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5036 }, 5037 { 5038 "helper access to variable memory: map adjusted, JMP, wrong max", 5039 .insns = { 5040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5042 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5043 BPF_LD_MAP_FD(BPF_REG_1, 0), 5044 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5045 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 5046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 5048 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5049 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5050 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5051 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5052 sizeof(struct test_val) - 19, 4), 5053 BPF_MOV64_IMM(BPF_REG_4, 0), 5054 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5055 BPF_MOV64_IMM(BPF_REG_3, 0), 5056 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5057 BPF_MOV64_IMM(BPF_REG_0, 0), 5058 BPF_EXIT_INSN(), 5059 }, 5060 .fixup_map2 = { 3 }, 5061 .errstr = "R1 min value is outside of the array range", 5062 .result = REJECT, 5063 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5064 }, 5065 { 5066 "helper access to variable memory: size > 0 not allowed on NULL", 5067 .insns = { 5068 BPF_MOV64_IMM(BPF_REG_1, 0), 5069 BPF_MOV64_IMM(BPF_REG_2, 0), 5070 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5071 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5072 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 5073 BPF_MOV64_IMM(BPF_REG_3, 0), 5074 BPF_MOV64_IMM(BPF_REG_4, 0), 5075 BPF_MOV64_IMM(BPF_REG_5, 0), 5076 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5077 BPF_EXIT_INSN(), 5078 }, 5079 .errstr = "R1 type=imm expected=fp", 5080 .result = REJECT, 5081 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5082 }, 5083 { 5084 "helper access to variable memory: size = 0 not allowed on != NULL", 5085 .insns = { 5086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5088 BPF_MOV64_IMM(BPF_REG_2, 0), 5089 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), 5090 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), 5091 BPF_MOV64_IMM(BPF_REG_3, 0), 5092 BPF_MOV64_IMM(BPF_REG_4, 0), 5093 BPF_MOV64_IMM(BPF_REG_5, 0), 5094 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5095 BPF_EXIT_INSN(), 5096 }, 5097 .errstr = "invalid stack type R1 off=-8 access_size=0", 5098 .result = REJECT, 5099 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5100 }, 5101 { 5102 "helper access to variable memory: 8 bytes leak", 5103 .insns = { 5104 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5106 BPF_MOV64_IMM(BPF_REG_0, 0), 5107 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5108 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5109 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5110 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5111 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5112 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5113 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5114 BPF_MOV64_IMM(BPF_REG_2, 0), 5115 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5116 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5117 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), 5118 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 5119 BPF_MOV64_IMM(BPF_REG_3, 0), 5120 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5121 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5122 BPF_EXIT_INSN(), 5123 }, 5124 .errstr = "invalid indirect read from stack off -64+32 size 64", 5125 .result = REJECT, 5126 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5127 }, 5128 { 5129 "helper access to variable memory: 8 bytes no leak (init memory)", 5130 .insns = { 5131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5132 BPF_MOV64_IMM(BPF_REG_0, 0), 5133 BPF_MOV64_IMM(BPF_REG_0, 0), 5134 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5135 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5136 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5137 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5138 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 5139 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5140 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5141 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5142 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5143 BPF_MOV64_IMM(BPF_REG_2, 0), 5144 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), 5145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), 5146 BPF_MOV64_IMM(BPF_REG_3, 0), 5147 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5148 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5149 BPF_EXIT_INSN(), 5150 }, 5151 .result = ACCEPT, 5152 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5153 }, 5154 { 5155 "invalid and of negative number", 5156 .insns = { 5157 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5160 BPF_LD_MAP_FD(BPF_REG_1, 0), 5161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5162 BPF_FUNC_map_lookup_elem), 5163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5164 BPF_MOV64_IMM(BPF_REG_1, 6), 5165 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), 5166 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 5167 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5168 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 5169 offsetof(struct test_val, foo)), 5170 BPF_EXIT_INSN(), 5171 }, 5172 .fixup_map2 = { 3 }, 5173 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5174 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 5175 .result = REJECT, 5176 .result_unpriv = REJECT, 5177 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5178 }, 5179 { 5180 "invalid range check", 5181 .insns = { 5182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5185 BPF_LD_MAP_FD(BPF_REG_1, 0), 5186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5187 BPF_FUNC_map_lookup_elem), 5188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), 5189 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 5190 BPF_MOV64_IMM(BPF_REG_9, 1), 5191 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), 5192 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), 5193 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), 5194 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), 5195 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), 5196 BPF_MOV32_IMM(BPF_REG_3, 1), 5197 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), 5198 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), 5199 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 5200 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 5201 BPF_MOV64_REG(BPF_REG_0, 0), 5202 BPF_EXIT_INSN(), 5203 }, 5204 .fixup_map2 = { 3 }, 5205 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5206 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 5207 .result = REJECT, 5208 .result_unpriv = REJECT, 5209 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5210 }, 5211 { 5212 "map in map access", 5213 .insns = { 5214 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5217 BPF_LD_MAP_FD(BPF_REG_1, 0), 5218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5219 BPF_FUNC_map_lookup_elem), 5220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 5221 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5222 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5226 BPF_FUNC_map_lookup_elem), 5227 BPF_MOV64_REG(BPF_REG_0, 0), 5228 BPF_EXIT_INSN(), 5229 }, 5230 .fixup_map_in_map = { 3 }, 5231 .result = ACCEPT, 5232 }, 5233 { 5234 "invalid inner map pointer", 5235 .insns = { 5236 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5239 BPF_LD_MAP_FD(BPF_REG_1, 0), 5240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5241 BPF_FUNC_map_lookup_elem), 5242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5243 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5244 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 5248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5249 BPF_FUNC_map_lookup_elem), 5250 BPF_MOV64_REG(BPF_REG_0, 0), 5251 BPF_EXIT_INSN(), 5252 }, 5253 .fixup_map_in_map = { 3 }, 5254 .errstr = "R1 type=inv expected=map_ptr", 5255 .errstr_unpriv = "R1 pointer arithmetic prohibited", 5256 .result = REJECT, 5257 }, 5258 { 5259 "forgot null checking on the inner map pointer", 5260 .insns = { 5261 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5262 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5264 BPF_LD_MAP_FD(BPF_REG_1, 0), 5265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5266 BPF_FUNC_map_lookup_elem), 5267 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5270 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5272 BPF_FUNC_map_lookup_elem), 5273 BPF_MOV64_REG(BPF_REG_0, 0), 5274 BPF_EXIT_INSN(), 5275 }, 5276 .fixup_map_in_map = { 3 }, 5277 .errstr = "R1 type=map_value_or_null expected=map_ptr", 5278 .result = REJECT, 5279 }, 5280 { 5281 "ld_abs: check calling conv, r1", 5282 .insns = { 5283 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5284 BPF_MOV64_IMM(BPF_REG_1, 0), 5285 BPF_LD_ABS(BPF_W, -0x200000), 5286 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 5287 BPF_EXIT_INSN(), 5288 }, 5289 .errstr = "R1 !read_ok", 5290 .result = REJECT, 5291 }, 5292 { 5293 "ld_abs: check calling conv, r2", 5294 .insns = { 5295 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5296 BPF_MOV64_IMM(BPF_REG_2, 0), 5297 BPF_LD_ABS(BPF_W, -0x200000), 5298 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 5299 BPF_EXIT_INSN(), 5300 }, 5301 .errstr = "R2 !read_ok", 5302 .result = REJECT, 5303 }, 5304 { 5305 "ld_abs: check calling conv, r3", 5306 .insns = { 5307 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5308 BPF_MOV64_IMM(BPF_REG_3, 0), 5309 BPF_LD_ABS(BPF_W, -0x200000), 5310 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 5311 BPF_EXIT_INSN(), 5312 }, 5313 .errstr = "R3 !read_ok", 5314 .result = REJECT, 5315 }, 5316 { 5317 "ld_abs: check calling conv, r4", 5318 .insns = { 5319 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5320 BPF_MOV64_IMM(BPF_REG_4, 0), 5321 BPF_LD_ABS(BPF_W, -0x200000), 5322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 5323 BPF_EXIT_INSN(), 5324 }, 5325 .errstr = "R4 !read_ok", 5326 .result = REJECT, 5327 }, 5328 { 5329 "ld_abs: check calling conv, r5", 5330 .insns = { 5331 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5332 BPF_MOV64_IMM(BPF_REG_5, 0), 5333 BPF_LD_ABS(BPF_W, -0x200000), 5334 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 5335 BPF_EXIT_INSN(), 5336 }, 5337 .errstr = "R5 !read_ok", 5338 .result = REJECT, 5339 }, 5340 { 5341 "ld_abs: check calling conv, r7", 5342 .insns = { 5343 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5344 BPF_MOV64_IMM(BPF_REG_7, 0), 5345 BPF_LD_ABS(BPF_W, -0x200000), 5346 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 5347 BPF_EXIT_INSN(), 5348 }, 5349 .result = ACCEPT, 5350 }, 5351 { 5352 "ld_ind: check calling conv, r1", 5353 .insns = { 5354 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5355 BPF_MOV64_IMM(BPF_REG_1, 1), 5356 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), 5357 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 5358 BPF_EXIT_INSN(), 5359 }, 5360 .errstr = "R1 !read_ok", 5361 .result = REJECT, 5362 }, 5363 { 5364 "ld_ind: check calling conv, r2", 5365 .insns = { 5366 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5367 BPF_MOV64_IMM(BPF_REG_2, 1), 5368 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), 5369 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 5370 BPF_EXIT_INSN(), 5371 }, 5372 .errstr = "R2 !read_ok", 5373 .result = REJECT, 5374 }, 5375 { 5376 "ld_ind: check calling conv, r3", 5377 .insns = { 5378 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5379 BPF_MOV64_IMM(BPF_REG_3, 1), 5380 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), 5381 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 5382 BPF_EXIT_INSN(), 5383 }, 5384 .errstr = "R3 !read_ok", 5385 .result = REJECT, 5386 }, 5387 { 5388 "ld_ind: check calling conv, r4", 5389 .insns = { 5390 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5391 BPF_MOV64_IMM(BPF_REG_4, 1), 5392 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), 5393 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 5394 BPF_EXIT_INSN(), 5395 }, 5396 .errstr = "R4 !read_ok", 5397 .result = REJECT, 5398 }, 5399 { 5400 "ld_ind: check calling conv, r5", 5401 .insns = { 5402 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5403 BPF_MOV64_IMM(BPF_REG_5, 1), 5404 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), 5405 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 5406 BPF_EXIT_INSN(), 5407 }, 5408 .errstr = "R5 !read_ok", 5409 .result = REJECT, 5410 }, 5411 { 5412 "ld_ind: check calling conv, r7", 5413 .insns = { 5414 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5415 BPF_MOV64_IMM(BPF_REG_7, 1), 5416 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), 5417 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 5418 BPF_EXIT_INSN(), 5419 }, 5420 .result = ACCEPT, 5421 }, 5422 { 5423 "check bpf_perf_event_data->sample_period byte load permitted", 5424 .insns = { 5425 BPF_MOV64_IMM(BPF_REG_0, 0), 5426 #if __BYTE_ORDER == __LITTLE_ENDIAN 5427 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5428 offsetof(struct bpf_perf_event_data, sample_period)), 5429 #else 5430 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5431 offsetof(struct bpf_perf_event_data, sample_period) + 7), 5432 #endif 5433 BPF_EXIT_INSN(), 5434 }, 5435 .result = ACCEPT, 5436 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 5437 }, 5438 { 5439 "check bpf_perf_event_data->sample_period half load permitted", 5440 .insns = { 5441 BPF_MOV64_IMM(BPF_REG_0, 0), 5442 #if __BYTE_ORDER == __LITTLE_ENDIAN 5443 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5444 offsetof(struct bpf_perf_event_data, sample_period)), 5445 #else 5446 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5447 offsetof(struct bpf_perf_event_data, sample_period) + 6), 5448 #endif 5449 BPF_EXIT_INSN(), 5450 }, 5451 .result = ACCEPT, 5452 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 5453 }, 5454 { 5455 "check bpf_perf_event_data->sample_period word load permitted", 5456 .insns = { 5457 BPF_MOV64_IMM(BPF_REG_0, 0), 5458 #if __BYTE_ORDER == __LITTLE_ENDIAN 5459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5460 offsetof(struct bpf_perf_event_data, sample_period)), 5461 #else 5462 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5463 offsetof(struct bpf_perf_event_data, sample_period) + 4), 5464 #endif 5465 BPF_EXIT_INSN(), 5466 }, 5467 .result = ACCEPT, 5468 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 5469 }, 5470 { 5471 "check bpf_perf_event_data->sample_period dword load permitted", 5472 .insns = { 5473 BPF_MOV64_IMM(BPF_REG_0, 0), 5474 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 5475 offsetof(struct bpf_perf_event_data, sample_period)), 5476 BPF_EXIT_INSN(), 5477 }, 5478 .result = ACCEPT, 5479 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 5480 }, 5481 { 5482 "check skb->data half load not permitted", 5483 .insns = { 5484 BPF_MOV64_IMM(BPF_REG_0, 0), 5485 #if __BYTE_ORDER == __LITTLE_ENDIAN 5486 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5487 offsetof(struct __sk_buff, data)), 5488 #else 5489 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5490 offsetof(struct __sk_buff, data) + 2), 5491 #endif 5492 BPF_EXIT_INSN(), 5493 }, 5494 .result = REJECT, 5495 .errstr = "invalid bpf_context access", 5496 }, 5497 { 5498 "check skb->tc_classid half load not permitted for lwt prog", 5499 .insns = { 5500 BPF_MOV64_IMM(BPF_REG_0, 0), 5501 #if __BYTE_ORDER == __LITTLE_ENDIAN 5502 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5503 offsetof(struct __sk_buff, tc_classid)), 5504 #else 5505 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5506 offsetof(struct __sk_buff, tc_classid) + 2), 5507 #endif 5508 BPF_EXIT_INSN(), 5509 }, 5510 .result = REJECT, 5511 .errstr = "invalid bpf_context access", 5512 .prog_type = BPF_PROG_TYPE_LWT_IN, 5513 }, 5514 { 5515 "bounds checks mixing signed and unsigned, positive bounds", 5516 .insns = { 5517 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5518 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5520 BPF_LD_MAP_FD(BPF_REG_1, 0), 5521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5522 BPF_FUNC_map_lookup_elem), 5523 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5524 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5525 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5526 BPF_MOV64_IMM(BPF_REG_2, 2), 5527 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), 5528 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2), 5529 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5530 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5531 BPF_MOV64_IMM(BPF_REG_0, 0), 5532 BPF_EXIT_INSN(), 5533 }, 5534 .fixup_map1 = { 3 }, 5535 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5536 .errstr = "R0 min value is negative", 5537 .result = REJECT, 5538 .result_unpriv = REJECT, 5539 }, 5540 { 5541 "bounds checks mixing signed and unsigned", 5542 .insns = { 5543 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5544 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5546 BPF_LD_MAP_FD(BPF_REG_1, 0), 5547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5548 BPF_FUNC_map_lookup_elem), 5549 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5550 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5551 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5552 BPF_MOV64_IMM(BPF_REG_2, -1), 5553 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), 5554 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5555 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5556 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5557 BPF_MOV64_IMM(BPF_REG_0, 0), 5558 BPF_EXIT_INSN(), 5559 }, 5560 .fixup_map1 = { 3 }, 5561 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5562 .errstr = "R0 min value is negative", 5563 .result = REJECT, 5564 .result_unpriv = REJECT, 5565 }, 5566 { 5567 "bounds checks mixing signed and unsigned, variant 2", 5568 .insns = { 5569 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5572 BPF_LD_MAP_FD(BPF_REG_1, 0), 5573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5574 BPF_FUNC_map_lookup_elem), 5575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5576 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5577 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5578 BPF_MOV64_IMM(BPF_REG_2, -1), 5579 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), 5580 BPF_MOV64_IMM(BPF_REG_8, 0), 5581 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1), 5582 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), 5583 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 5584 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), 5585 BPF_MOV64_IMM(BPF_REG_0, 0), 5586 BPF_EXIT_INSN(), 5587 }, 5588 .fixup_map1 = { 3 }, 5589 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5590 .errstr = "R8 invalid mem access 'inv'", 5591 .result = REJECT, 5592 .result_unpriv = REJECT, 5593 }, 5594 { 5595 "bounds checks mixing signed and unsigned, variant 3", 5596 .insns = { 5597 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5600 BPF_LD_MAP_FD(BPF_REG_1, 0), 5601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5602 BPF_FUNC_map_lookup_elem), 5603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 5604 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5605 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5606 BPF_MOV64_IMM(BPF_REG_2, -1), 5607 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), 5608 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 5609 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), 5610 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 5611 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), 5612 BPF_MOV64_IMM(BPF_REG_0, 0), 5613 BPF_EXIT_INSN(), 5614 }, 5615 .fixup_map1 = { 3 }, 5616 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5617 .errstr = "R8 invalid mem access 'inv'", 5618 .result = REJECT, 5619 .result_unpriv = REJECT, 5620 }, 5621 { 5622 "bounds checks mixing signed and unsigned, variant 4", 5623 .insns = { 5624 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5627 BPF_LD_MAP_FD(BPF_REG_1, 0), 5628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5629 BPF_FUNC_map_lookup_elem), 5630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5631 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5632 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5633 BPF_MOV64_IMM(BPF_REG_2, 1), 5634 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), 5635 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5636 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5637 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5638 BPF_MOV64_IMM(BPF_REG_0, 0), 5639 BPF_EXIT_INSN(), 5640 }, 5641 .fixup_map1 = { 3 }, 5642 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5643 .errstr = "R0 min value is negative", 5644 .result = REJECT, 5645 .result_unpriv = REJECT, 5646 }, 5647 { 5648 "bounds checks mixing signed and unsigned, variant 5", 5649 .insns = { 5650 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5653 BPF_LD_MAP_FD(BPF_REG_1, 0), 5654 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5655 BPF_FUNC_map_lookup_elem), 5656 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5658 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5659 BPF_MOV64_IMM(BPF_REG_2, -1), 5660 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), 5661 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4), 5662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4), 5663 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 5664 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5665 BPF_MOV64_IMM(BPF_REG_0, 0), 5666 BPF_EXIT_INSN(), 5667 }, 5668 .fixup_map1 = { 3 }, 5669 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5670 .errstr = "R0 invalid mem access", 5671 .result = REJECT, 5672 .result_unpriv = REJECT, 5673 }, 5674 { 5675 "bounds checks mixing signed and unsigned, variant 6", 5676 .insns = { 5677 BPF_MOV64_IMM(BPF_REG_2, 0), 5678 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 5679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), 5680 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5681 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), 5682 BPF_MOV64_IMM(BPF_REG_6, -1), 5683 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), 5684 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4), 5685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), 5686 BPF_MOV64_IMM(BPF_REG_5, 0), 5687 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0), 5688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5689 BPF_FUNC_skb_load_bytes), 5690 BPF_MOV64_IMM(BPF_REG_0, 0), 5691 BPF_EXIT_INSN(), 5692 }, 5693 .errstr_unpriv = "R4 min value is negative, either use unsigned", 5694 .errstr = "R4 min value is negative, either use unsigned", 5695 .result = REJECT, 5696 .result_unpriv = REJECT, 5697 }, 5698 { 5699 "bounds checks mixing signed and unsigned, variant 7", 5700 .insns = { 5701 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5702 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5704 BPF_LD_MAP_FD(BPF_REG_1, 0), 5705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5706 BPF_FUNC_map_lookup_elem), 5707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5708 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5709 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5710 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), 5711 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), 5712 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5713 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5714 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5715 BPF_MOV64_IMM(BPF_REG_0, 0), 5716 BPF_EXIT_INSN(), 5717 }, 5718 .fixup_map1 = { 3 }, 5719 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5720 .errstr = "R0 min value is negative", 5721 .result = REJECT, 5722 .result_unpriv = REJECT, 5723 }, 5724 { 5725 "bounds checks mixing signed and unsigned, variant 8", 5726 .insns = { 5727 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5728 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5730 BPF_LD_MAP_FD(BPF_REG_1, 0), 5731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5732 BPF_FUNC_map_lookup_elem), 5733 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5734 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5735 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5736 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1), 5737 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), 5738 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5739 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5740 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5741 BPF_MOV64_IMM(BPF_REG_0, 0), 5742 BPF_EXIT_INSN(), 5743 }, 5744 .fixup_map1 = { 3 }, 5745 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5746 .errstr = "R0 min value is negative", 5747 .result = REJECT, 5748 .result_unpriv = REJECT, 5749 }, 5750 { 5751 "bounds checks mixing signed and unsigned, variant 9", 5752 .insns = { 5753 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5756 BPF_LD_MAP_FD(BPF_REG_1, 0), 5757 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5758 BPF_FUNC_map_lookup_elem), 5759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5760 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5761 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5762 BPF_MOV64_IMM(BPF_REG_2, -1), 5763 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 5764 BPF_MOV64_IMM(BPF_REG_0, 0), 5765 BPF_EXIT_INSN(), 5766 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5767 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5768 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5769 BPF_MOV64_IMM(BPF_REG_0, 0), 5770 BPF_EXIT_INSN(), 5771 }, 5772 .fixup_map1 = { 3 }, 5773 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5774 .errstr = "R0 min value is negative", 5775 .result = REJECT, 5776 .result_unpriv = REJECT, 5777 }, 5778 { 5779 "bounds checks mixing signed and unsigned, variant 10", 5780 .insns = { 5781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5784 BPF_LD_MAP_FD(BPF_REG_1, 0), 5785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5786 BPF_FUNC_map_lookup_elem), 5787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 5788 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5789 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5790 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), 5791 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 5792 BPF_MOV64_IMM(BPF_REG_0, 0), 5793 BPF_EXIT_INSN(), 5794 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5795 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5796 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5797 BPF_MOV64_IMM(BPF_REG_0, 0), 5798 BPF_EXIT_INSN(), 5799 }, 5800 .fixup_map1 = { 3 }, 5801 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5802 .errstr = "R0 min value is negative", 5803 .result = REJECT, 5804 .result_unpriv = REJECT, 5805 }, 5806 { 5807 "bounds checks mixing signed and unsigned, variant 11", 5808 .insns = { 5809 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5810 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5812 BPF_LD_MAP_FD(BPF_REG_1, 0), 5813 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5814 BPF_FUNC_map_lookup_elem), 5815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5817 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5818 BPF_MOV64_IMM(BPF_REG_2, 0), 5819 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 5820 BPF_MOV64_IMM(BPF_REG_0, 0), 5821 BPF_EXIT_INSN(), 5822 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5823 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5824 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5825 BPF_MOV64_IMM(BPF_REG_0, 0), 5826 BPF_EXIT_INSN(), 5827 }, 5828 .fixup_map1 = { 3 }, 5829 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5830 .errstr = "R0 min value is negative", 5831 .result = REJECT, 5832 .result_unpriv = REJECT, 5833 }, 5834 { 5835 "bounds checks mixing signed and unsigned, variant 12", 5836 .insns = { 5837 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5840 BPF_LD_MAP_FD(BPF_REG_1, 0), 5841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5842 BPF_FUNC_map_lookup_elem), 5843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5844 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5845 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5846 BPF_MOV64_IMM(BPF_REG_2, -1), 5847 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 5848 /* Dead branch. */ 5849 BPF_MOV64_IMM(BPF_REG_0, 0), 5850 BPF_EXIT_INSN(), 5851 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5852 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5853 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5854 BPF_MOV64_IMM(BPF_REG_0, 0), 5855 BPF_EXIT_INSN(), 5856 }, 5857 .fixup_map1 = { 3 }, 5858 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5859 .errstr = "R0 min value is negative", 5860 .result = REJECT, 5861 .result_unpriv = REJECT, 5862 }, 5863 { 5864 "bounds checks mixing signed and unsigned, variant 13", 5865 .insns = { 5866 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5867 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5869 BPF_LD_MAP_FD(BPF_REG_1, 0), 5870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5871 BPF_FUNC_map_lookup_elem), 5872 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5873 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5874 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5875 BPF_MOV64_IMM(BPF_REG_2, -6), 5876 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 5877 BPF_MOV64_IMM(BPF_REG_0, 0), 5878 BPF_EXIT_INSN(), 5879 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5880 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5881 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5882 BPF_MOV64_IMM(BPF_REG_0, 0), 5883 BPF_EXIT_INSN(), 5884 }, 5885 .fixup_map1 = { 3 }, 5886 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5887 .errstr = "R0 min value is negative", 5888 .result = REJECT, 5889 .result_unpriv = REJECT, 5890 }, 5891 { 5892 "bounds checks mixing signed and unsigned, variant 14", 5893 .insns = { 5894 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5895 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5897 BPF_LD_MAP_FD(BPF_REG_1, 0), 5898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5899 BPF_FUNC_map_lookup_elem), 5900 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5901 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5902 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5903 BPF_MOV64_IMM(BPF_REG_2, 2), 5904 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 5905 BPF_MOV64_IMM(BPF_REG_7, 1), 5906 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2), 5907 BPF_MOV64_IMM(BPF_REG_0, 0), 5908 BPF_EXIT_INSN(), 5909 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1), 5910 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2), 5911 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7), 5912 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5913 BPF_MOV64_IMM(BPF_REG_0, 0), 5914 BPF_EXIT_INSN(), 5915 }, 5916 .fixup_map1 = { 3 }, 5917 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5918 .errstr = "R0 min value is negative", 5919 .result = REJECT, 5920 .result_unpriv = REJECT, 5921 }, 5922 { 5923 "bounds checks mixing signed and unsigned, variant 15", 5924 .insns = { 5925 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, 5926 offsetof(struct __sk_buff, mark)), 5927 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5928 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5930 BPF_LD_MAP_FD(BPF_REG_1, 0), 5931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5932 BPF_FUNC_map_lookup_elem), 5933 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 5934 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5935 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5936 BPF_MOV64_IMM(BPF_REG_2, -1), 5937 BPF_MOV64_IMM(BPF_REG_8, 2), 5938 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6), 5939 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3), 5940 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 5941 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5942 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5943 BPF_MOV64_IMM(BPF_REG_0, 0), 5944 BPF_EXIT_INSN(), 5945 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), 5946 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 5947 }, 5948 .fixup_map1 = { 4 }, 5949 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5950 .errstr = "R0 min value is negative", 5951 .result = REJECT, 5952 .result_unpriv = REJECT, 5953 }, 5954 { 5955 "bounds checks mixing signed and unsigned, variant 16", 5956 .insns = { 5957 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5958 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5960 BPF_LD_MAP_FD(BPF_REG_1, 0), 5961 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5962 BPF_FUNC_map_lookup_elem), 5963 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5964 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 5965 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5966 BPF_MOV64_IMM(BPF_REG_2, -6), 5967 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 5968 BPF_MOV64_IMM(BPF_REG_0, 0), 5969 BPF_EXIT_INSN(), 5970 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5971 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2), 5972 BPF_MOV64_IMM(BPF_REG_0, 0), 5973 BPF_EXIT_INSN(), 5974 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 5975 BPF_MOV64_IMM(BPF_REG_0, 0), 5976 BPF_EXIT_INSN(), 5977 }, 5978 .fixup_map1 = { 3 }, 5979 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5980 .errstr = "R0 min value is negative", 5981 .result = REJECT, 5982 .result_unpriv = REJECT, 5983 }, 5984 { 5985 "subtraction bounds (map value)", 5986 .insns = { 5987 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5988 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5990 BPF_LD_MAP_FD(BPF_REG_1, 0), 5991 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5992 BPF_FUNC_map_lookup_elem), 5993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5994 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 5995 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), 5996 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 5997 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), 5998 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 5999 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), 6000 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6001 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6002 BPF_EXIT_INSN(), 6003 BPF_MOV64_IMM(BPF_REG_0, 0), 6004 BPF_EXIT_INSN(), 6005 }, 6006 .fixup_map1 = { 3 }, 6007 .errstr_unpriv = "R0 pointer arithmetic prohibited", 6008 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 6009 .result = REJECT, 6010 .result_unpriv = REJECT, 6011 }, 6012 }; 6013 6014 static int probe_filter_length(const struct bpf_insn *fp) 6015 { 6016 int len; 6017 6018 for (len = MAX_INSNS - 1; len > 0; --len) 6019 if (fp[len].code != 0 || fp[len].imm != 0) 6020 break; 6021 return len + 1; 6022 } 6023 6024 static int create_map(uint32_t size_value, uint32_t max_elem) 6025 { 6026 int fd; 6027 6028 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 6029 size_value, max_elem, BPF_F_NO_PREALLOC); 6030 if (fd < 0) 6031 printf("Failed to create hash map '%s'!\n", strerror(errno)); 6032 6033 return fd; 6034 } 6035 6036 static int create_prog_array(void) 6037 { 6038 int fd; 6039 6040 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 6041 sizeof(int), 4, 0); 6042 if (fd < 0) 6043 printf("Failed to create prog array '%s'!\n", strerror(errno)); 6044 6045 return fd; 6046 } 6047 6048 static int create_map_in_map(void) 6049 { 6050 int inner_map_fd, outer_map_fd; 6051 6052 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 6053 sizeof(int), 1, 0); 6054 if (inner_map_fd < 0) { 6055 printf("Failed to create array '%s'!\n", strerror(errno)); 6056 return inner_map_fd; 6057 } 6058 6059 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 6060 sizeof(int), inner_map_fd, 1, 0); 6061 if (outer_map_fd < 0) 6062 printf("Failed to create array of maps '%s'!\n", 6063 strerror(errno)); 6064 6065 close(inner_map_fd); 6066 6067 return outer_map_fd; 6068 } 6069 6070 static char bpf_vlog[32768]; 6071 6072 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, 6073 int *map_fds) 6074 { 6075 int *fixup_map1 = test->fixup_map1; 6076 int *fixup_map2 = test->fixup_map2; 6077 int *fixup_prog = test->fixup_prog; 6078 int *fixup_map_in_map = test->fixup_map_in_map; 6079 6080 /* Allocating HTs with 1 elem is fine here, since we only test 6081 * for verifier and not do a runtime lookup, so the only thing 6082 * that really matters is value size in this case. 6083 */ 6084 if (*fixup_map1) { 6085 map_fds[0] = create_map(sizeof(long long), 1); 6086 do { 6087 prog[*fixup_map1].imm = map_fds[0]; 6088 fixup_map1++; 6089 } while (*fixup_map1); 6090 } 6091 6092 if (*fixup_map2) { 6093 map_fds[1] = create_map(sizeof(struct test_val), 1); 6094 do { 6095 prog[*fixup_map2].imm = map_fds[1]; 6096 fixup_map2++; 6097 } while (*fixup_map2); 6098 } 6099 6100 if (*fixup_prog) { 6101 map_fds[2] = create_prog_array(); 6102 do { 6103 prog[*fixup_prog].imm = map_fds[2]; 6104 fixup_prog++; 6105 } while (*fixup_prog); 6106 } 6107 6108 if (*fixup_map_in_map) { 6109 map_fds[3] = create_map_in_map(); 6110 do { 6111 prog[*fixup_map_in_map].imm = map_fds[3]; 6112 fixup_map_in_map++; 6113 } while (*fixup_map_in_map); 6114 } 6115 } 6116 6117 static void do_test_single(struct bpf_test *test, bool unpriv, 6118 int *passes, int *errors) 6119 { 6120 int fd_prog, expected_ret, reject_from_alignment; 6121 struct bpf_insn *prog = test->insns; 6122 int prog_len = probe_filter_length(prog); 6123 int prog_type = test->prog_type; 6124 int map_fds[MAX_NR_MAPS]; 6125 const char *expected_err; 6126 int i; 6127 6128 for (i = 0; i < MAX_NR_MAPS; i++) 6129 map_fds[i] = -1; 6130 6131 do_test_fixup(test, prog, map_fds); 6132 6133 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 6134 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, 6135 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); 6136 6137 expected_ret = unpriv && test->result_unpriv != UNDEF ? 6138 test->result_unpriv : test->result; 6139 expected_err = unpriv && test->errstr_unpriv ? 6140 test->errstr_unpriv : test->errstr; 6141 6142 reject_from_alignment = fd_prog < 0 && 6143 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && 6144 strstr(bpf_vlog, "Unknown alignment."); 6145 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 6146 if (reject_from_alignment) { 6147 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", 6148 strerror(errno)); 6149 goto fail_log; 6150 } 6151 #endif 6152 if (expected_ret == ACCEPT) { 6153 if (fd_prog < 0 && !reject_from_alignment) { 6154 printf("FAIL\nFailed to load prog '%s'!\n", 6155 strerror(errno)); 6156 goto fail_log; 6157 } 6158 } else { 6159 if (fd_prog >= 0) { 6160 printf("FAIL\nUnexpected success to load!\n"); 6161 goto fail_log; 6162 } 6163 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { 6164 printf("FAIL\nUnexpected error message!\n"); 6165 goto fail_log; 6166 } 6167 } 6168 6169 (*passes)++; 6170 printf("OK%s\n", reject_from_alignment ? 6171 " (NOTE: reject due to unknown alignment)" : ""); 6172 close_fds: 6173 close(fd_prog); 6174 for (i = 0; i < MAX_NR_MAPS; i++) 6175 close(map_fds[i]); 6176 sched_yield(); 6177 return; 6178 fail_log: 6179 (*errors)++; 6180 printf("%s", bpf_vlog); 6181 goto close_fds; 6182 } 6183 6184 static bool is_admin(void) 6185 { 6186 cap_t caps; 6187 cap_flag_value_t sysadmin = CAP_CLEAR; 6188 const cap_value_t cap_val = CAP_SYS_ADMIN; 6189 6190 #ifdef CAP_IS_SUPPORTED 6191 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 6192 perror("cap_get_flag"); 6193 return false; 6194 } 6195 #endif 6196 caps = cap_get_proc(); 6197 if (!caps) { 6198 perror("cap_get_proc"); 6199 return false; 6200 } 6201 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin)) 6202 perror("cap_get_flag"); 6203 if (cap_free(caps)) 6204 perror("cap_free"); 6205 return (sysadmin == CAP_SET); 6206 } 6207 6208 static int set_admin(bool admin) 6209 { 6210 cap_t caps; 6211 const cap_value_t cap_val = CAP_SYS_ADMIN; 6212 int ret = -1; 6213 6214 caps = cap_get_proc(); 6215 if (!caps) { 6216 perror("cap_get_proc"); 6217 return -1; 6218 } 6219 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, 6220 admin ? CAP_SET : CAP_CLEAR)) { 6221 perror("cap_set_flag"); 6222 goto out; 6223 } 6224 if (cap_set_proc(caps)) { 6225 perror("cap_set_proc"); 6226 goto out; 6227 } 6228 ret = 0; 6229 out: 6230 if (cap_free(caps)) 6231 perror("cap_free"); 6232 return ret; 6233 } 6234 6235 static int do_test(bool unpriv, unsigned int from, unsigned int to) 6236 { 6237 int i, passes = 0, errors = 0; 6238 6239 for (i = from; i < to; i++) { 6240 struct bpf_test *test = &tests[i]; 6241 6242 /* Program types that are not supported by non-root we 6243 * skip right away. 6244 */ 6245 if (!test->prog_type) { 6246 if (!unpriv) 6247 set_admin(false); 6248 printf("#%d/u %s ", i, test->descr); 6249 do_test_single(test, true, &passes, &errors); 6250 if (!unpriv) 6251 set_admin(true); 6252 } 6253 6254 if (!unpriv) { 6255 printf("#%d/p %s ", i, test->descr); 6256 do_test_single(test, false, &passes, &errors); 6257 } 6258 } 6259 6260 printf("Summary: %d PASSED, %d FAILED\n", passes, errors); 6261 return errors ? EXIT_FAILURE : EXIT_SUCCESS; 6262 } 6263 6264 int main(int argc, char **argv) 6265 { 6266 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 6267 struct rlimit rlim = { 1 << 20, 1 << 20 }; 6268 unsigned int from = 0, to = ARRAY_SIZE(tests); 6269 bool unpriv = !is_admin(); 6270 6271 if (argc == 3) { 6272 unsigned int l = atoi(argv[argc - 2]); 6273 unsigned int u = atoi(argv[argc - 1]); 6274 6275 if (l < to && u < to) { 6276 from = l; 6277 to = u + 1; 6278 } 6279 } else if (argc == 2) { 6280 unsigned int t = atoi(argv[argc - 1]); 6281 6282 if (t < to) { 6283 from = t; 6284 to = t + 1; 6285 } 6286 } 6287 6288 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); 6289 return do_test(unpriv, from, to); 6290 } 6291