1 { 2 "calls: basic sanity", 3 .insns = { 4 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 5 BPF_MOV64_IMM(BPF_REG_0, 1), 6 BPF_EXIT_INSN(), 7 BPF_MOV64_IMM(BPF_REG_0, 2), 8 BPF_EXIT_INSN(), 9 }, 10 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 11 .result = ACCEPT, 12 }, 13 { 14 "calls: not on unpriviledged", 15 .insns = { 16 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 17 BPF_MOV64_IMM(BPF_REG_0, 1), 18 BPF_EXIT_INSN(), 19 BPF_MOV64_IMM(BPF_REG_0, 2), 20 BPF_EXIT_INSN(), 21 }, 22 .errstr_unpriv = "function calls to other bpf functions are allowed for root only", 23 .result_unpriv = REJECT, 24 .result = ACCEPT, 25 .retval = 1, 26 }, 27 { 28 "calls: div by 0 in subprog", 29 .insns = { 30 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 31 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 32 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 34 offsetof(struct __sk_buff, data_end)), 35 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 36 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 37 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 38 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 39 BPF_MOV64_IMM(BPF_REG_0, 1), 40 BPF_EXIT_INSN(), 41 BPF_MOV32_IMM(BPF_REG_2, 0), 42 BPF_MOV32_IMM(BPF_REG_3, 1), 43 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), 44 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 45 offsetof(struct __sk_buff, data)), 46 BPF_EXIT_INSN(), 47 }, 48 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 49 .result = ACCEPT, 50 .retval = 1, 51 }, 52 { 53 "calls: multiple ret types in subprog 1", 54 .insns = { 55 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 56 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 57 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 58 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 59 offsetof(struct __sk_buff, data_end)), 60 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 61 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 62 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 63 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 64 BPF_MOV64_IMM(BPF_REG_0, 1), 65 BPF_EXIT_INSN(), 66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 67 offsetof(struct __sk_buff, data)), 68 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 69 BPF_MOV32_IMM(BPF_REG_0, 42), 70 BPF_EXIT_INSN(), 71 }, 72 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 73 .result = REJECT, 74 .errstr = "R0 invalid mem access 'inv'", 75 }, 76 { 77 "calls: multiple ret types in subprog 2", 78 .insns = { 79 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 80 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 81 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 82 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 83 offsetof(struct __sk_buff, data_end)), 84 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 85 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 86 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 87 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 88 BPF_MOV64_IMM(BPF_REG_0, 1), 89 BPF_EXIT_INSN(), 90 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 91 offsetof(struct __sk_buff, data)), 92 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 93 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), 94 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 95 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 97 BPF_LD_MAP_FD(BPF_REG_1, 0), 98 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 99 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 100 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 101 offsetof(struct __sk_buff, data)), 102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), 103 BPF_EXIT_INSN(), 104 }, 105 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 106 .fixup_map_hash_8b = { 16 }, 107 .result = REJECT, 108 .errstr = "R0 min value is outside of the array range", 109 }, 110 { 111 "calls: overlapping caller/callee", 112 .insns = { 113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), 114 BPF_MOV64_IMM(BPF_REG_0, 1), 115 BPF_EXIT_INSN(), 116 }, 117 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 118 .errstr = "last insn is not an exit or jmp", 119 .result = REJECT, 120 }, 121 { 122 "calls: wrong recursive calls", 123 .insns = { 124 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 125 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), 129 BPF_MOV64_IMM(BPF_REG_0, 1), 130 BPF_EXIT_INSN(), 131 }, 132 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 133 .errstr = "jump out of range", 134 .result = REJECT, 135 }, 136 { 137 "calls: wrong src reg", 138 .insns = { 139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0), 140 BPF_MOV64_IMM(BPF_REG_0, 1), 141 BPF_EXIT_INSN(), 142 }, 143 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 144 .errstr = "BPF_CALL uses reserved fields", 145 .result = REJECT, 146 }, 147 { 148 "calls: wrong off value", 149 .insns = { 150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), 151 BPF_MOV64_IMM(BPF_REG_0, 1), 152 BPF_EXIT_INSN(), 153 BPF_MOV64_IMM(BPF_REG_0, 2), 154 BPF_EXIT_INSN(), 155 }, 156 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 157 .errstr = "BPF_CALL uses reserved fields", 158 .result = REJECT, 159 }, 160 { 161 "calls: jump back loop", 162 .insns = { 163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 164 BPF_MOV64_IMM(BPF_REG_0, 1), 165 BPF_EXIT_INSN(), 166 }, 167 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 168 .errstr = "back-edge from insn 0 to 0", 169 .result = REJECT, 170 }, 171 { 172 "calls: conditional call", 173 .insns = { 174 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 175 offsetof(struct __sk_buff, mark)), 176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 178 BPF_MOV64_IMM(BPF_REG_0, 1), 179 BPF_EXIT_INSN(), 180 BPF_MOV64_IMM(BPF_REG_0, 2), 181 BPF_EXIT_INSN(), 182 }, 183 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 184 .errstr = "jump out of range", 185 .result = REJECT, 186 }, 187 { 188 "calls: conditional call 2", 189 .insns = { 190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 191 offsetof(struct __sk_buff, mark)), 192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 194 BPF_MOV64_IMM(BPF_REG_0, 1), 195 BPF_EXIT_INSN(), 196 BPF_MOV64_IMM(BPF_REG_0, 2), 197 BPF_EXIT_INSN(), 198 BPF_MOV64_IMM(BPF_REG_0, 3), 199 BPF_EXIT_INSN(), 200 }, 201 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 202 .result = ACCEPT, 203 }, 204 { 205 "calls: conditional call 3", 206 .insns = { 207 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 208 offsetof(struct __sk_buff, mark)), 209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 210 BPF_JMP_IMM(BPF_JA, 0, 0, 4), 211 BPF_MOV64_IMM(BPF_REG_0, 1), 212 BPF_EXIT_INSN(), 213 BPF_MOV64_IMM(BPF_REG_0, 1), 214 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 215 BPF_MOV64_IMM(BPF_REG_0, 3), 216 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 217 }, 218 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 219 .errstr = "back-edge from insn", 220 .result = REJECT, 221 }, 222 { 223 "calls: conditional call 4", 224 .insns = { 225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 226 offsetof(struct __sk_buff, mark)), 227 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 229 BPF_MOV64_IMM(BPF_REG_0, 1), 230 BPF_EXIT_INSN(), 231 BPF_MOV64_IMM(BPF_REG_0, 1), 232 BPF_JMP_IMM(BPF_JA, 0, 0, -5), 233 BPF_MOV64_IMM(BPF_REG_0, 3), 234 BPF_EXIT_INSN(), 235 }, 236 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 237 .result = ACCEPT, 238 }, 239 { 240 "calls: conditional call 5", 241 .insns = { 242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 243 offsetof(struct __sk_buff, mark)), 244 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 246 BPF_MOV64_IMM(BPF_REG_0, 1), 247 BPF_EXIT_INSN(), 248 BPF_MOV64_IMM(BPF_REG_0, 1), 249 BPF_JMP_IMM(BPF_JA, 0, 0, -6), 250 BPF_MOV64_IMM(BPF_REG_0, 3), 251 BPF_EXIT_INSN(), 252 }, 253 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 254 .errstr = "back-edge from insn", 255 .result = REJECT, 256 }, 257 { 258 "calls: conditional call 6", 259 .insns = { 260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), 262 BPF_EXIT_INSN(), 263 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 264 offsetof(struct __sk_buff, mark)), 265 BPF_EXIT_INSN(), 266 }, 267 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 268 .errstr = "back-edge from insn", 269 .result = REJECT, 270 }, 271 { 272 "calls: using r0 returned by callee", 273 .insns = { 274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 275 BPF_EXIT_INSN(), 276 BPF_MOV64_IMM(BPF_REG_0, 2), 277 BPF_EXIT_INSN(), 278 }, 279 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 280 .result = ACCEPT, 281 }, 282 { 283 "calls: using uninit r0 from callee", 284 .insns = { 285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 286 BPF_EXIT_INSN(), 287 BPF_EXIT_INSN(), 288 }, 289 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 290 .errstr = "!read_ok", 291 .result = REJECT, 292 }, 293 { 294 "calls: callee is using r1", 295 .insns = { 296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 297 BPF_EXIT_INSN(), 298 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 299 offsetof(struct __sk_buff, len)), 300 BPF_EXIT_INSN(), 301 }, 302 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 303 .result = ACCEPT, 304 .retval = TEST_DATA_LEN, 305 }, 306 { 307 "calls: callee using args1", 308 .insns = { 309 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 310 BPF_EXIT_INSN(), 311 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 312 BPF_EXIT_INSN(), 313 }, 314 .errstr_unpriv = "allowed for root only", 315 .result_unpriv = REJECT, 316 .result = ACCEPT, 317 .retval = POINTER_VALUE, 318 }, 319 { 320 "calls: callee using wrong args2", 321 .insns = { 322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 323 BPF_EXIT_INSN(), 324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 325 BPF_EXIT_INSN(), 326 }, 327 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 328 .errstr = "R2 !read_ok", 329 .result = REJECT, 330 }, 331 { 332 "calls: callee using two args", 333 .insns = { 334 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 335 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, 336 offsetof(struct __sk_buff, len)), 337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, 338 offsetof(struct __sk_buff, len)), 339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 340 BPF_EXIT_INSN(), 341 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 342 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 343 BPF_EXIT_INSN(), 344 }, 345 .errstr_unpriv = "allowed for root only", 346 .result_unpriv = REJECT, 347 .result = ACCEPT, 348 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, 349 }, 350 { 351 "calls: callee changing pkt pointers", 352 .insns = { 353 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)), 354 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 355 offsetof(struct xdp_md, data_end)), 356 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), 357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), 358 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), 359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 360 /* clear_all_pkt_pointers() has to walk all frames 361 * to make sure that pkt pointers in the caller 362 * are cleared when callee is calling a helper that 363 * adjusts packet size 364 */ 365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 366 BPF_MOV32_IMM(BPF_REG_0, 0), 367 BPF_EXIT_INSN(), 368 BPF_MOV64_IMM(BPF_REG_2, 0), 369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head), 370 BPF_EXIT_INSN(), 371 }, 372 .result = REJECT, 373 .errstr = "R6 invalid mem access 'inv'", 374 .prog_type = BPF_PROG_TYPE_XDP, 375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 376 }, 377 { 378 "calls: two calls with args", 379 .insns = { 380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 381 BPF_EXIT_INSN(), 382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 384 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 387 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 388 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 389 BPF_EXIT_INSN(), 390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 391 offsetof(struct __sk_buff, len)), 392 BPF_EXIT_INSN(), 393 }, 394 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 395 .result = ACCEPT, 396 .retval = TEST_DATA_LEN + TEST_DATA_LEN, 397 }, 398 { 399 "calls: calls with stack arith", 400 .insns = { 401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 404 BPF_EXIT_INSN(), 405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 407 BPF_EXIT_INSN(), 408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 409 BPF_MOV64_IMM(BPF_REG_0, 42), 410 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 411 BPF_EXIT_INSN(), 412 }, 413 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 414 .result = ACCEPT, 415 .retval = 42, 416 }, 417 { 418 "calls: calls with misaligned stack access", 419 .insns = { 420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 423 BPF_EXIT_INSN(), 424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), 425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 426 BPF_EXIT_INSN(), 427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), 428 BPF_MOV64_IMM(BPF_REG_0, 42), 429 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 430 BPF_EXIT_INSN(), 431 }, 432 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 433 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 434 .errstr = "misaligned stack access", 435 .result = REJECT, 436 }, 437 { 438 "calls: calls control flow, jump test", 439 .insns = { 440 BPF_MOV64_IMM(BPF_REG_0, 42), 441 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 442 BPF_MOV64_IMM(BPF_REG_0, 43), 443 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 444 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 445 BPF_EXIT_INSN(), 446 }, 447 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 448 .result = ACCEPT, 449 .retval = 43, 450 }, 451 { 452 "calls: calls control flow, jump test 2", 453 .insns = { 454 BPF_MOV64_IMM(BPF_REG_0, 42), 455 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 456 BPF_MOV64_IMM(BPF_REG_0, 43), 457 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 459 BPF_EXIT_INSN(), 460 }, 461 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 462 .errstr = "jump out of range from insn 1 to 4", 463 .result = REJECT, 464 }, 465 { 466 "calls: two calls with bad jump", 467 .insns = { 468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 469 BPF_EXIT_INSN(), 470 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 472 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 475 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 476 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 477 BPF_EXIT_INSN(), 478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 479 offsetof(struct __sk_buff, len)), 480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), 481 BPF_EXIT_INSN(), 482 }, 483 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 484 .errstr = "jump out of range from insn 11 to 9", 485 .result = REJECT, 486 }, 487 { 488 "calls: recursive call. test1", 489 .insns = { 490 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 491 BPF_EXIT_INSN(), 492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), 493 BPF_EXIT_INSN(), 494 }, 495 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 496 .errstr = "back-edge", 497 .result = REJECT, 498 }, 499 { 500 "calls: recursive call. test2", 501 .insns = { 502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 503 BPF_EXIT_INSN(), 504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), 505 BPF_EXIT_INSN(), 506 }, 507 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 508 .errstr = "back-edge", 509 .result = REJECT, 510 }, 511 { 512 "calls: unreachable code", 513 .insns = { 514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 515 BPF_EXIT_INSN(), 516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 517 BPF_EXIT_INSN(), 518 BPF_MOV64_IMM(BPF_REG_0, 0), 519 BPF_EXIT_INSN(), 520 BPF_MOV64_IMM(BPF_REG_0, 0), 521 BPF_EXIT_INSN(), 522 }, 523 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 524 .errstr = "unreachable insn 6", 525 .result = REJECT, 526 }, 527 { 528 "calls: invalid call", 529 .insns = { 530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 531 BPF_EXIT_INSN(), 532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), 533 BPF_EXIT_INSN(), 534 }, 535 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 536 .errstr = "invalid destination", 537 .result = REJECT, 538 }, 539 { 540 "calls: invalid call 2", 541 .insns = { 542 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 543 BPF_EXIT_INSN(), 544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), 545 BPF_EXIT_INSN(), 546 }, 547 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 548 .errstr = "invalid destination", 549 .result = REJECT, 550 }, 551 { 552 "calls: jumping across function bodies. test1", 553 .insns = { 554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 555 BPF_MOV64_IMM(BPF_REG_0, 0), 556 BPF_EXIT_INSN(), 557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 558 BPF_EXIT_INSN(), 559 }, 560 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 561 .errstr = "jump out of range", 562 .result = REJECT, 563 }, 564 { 565 "calls: jumping across function bodies. test2", 566 .insns = { 567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 569 BPF_MOV64_IMM(BPF_REG_0, 0), 570 BPF_EXIT_INSN(), 571 BPF_EXIT_INSN(), 572 }, 573 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 574 .errstr = "jump out of range", 575 .result = REJECT, 576 }, 577 { 578 "calls: call without exit", 579 .insns = { 580 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 581 BPF_EXIT_INSN(), 582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 583 BPF_EXIT_INSN(), 584 BPF_MOV64_IMM(BPF_REG_0, 0), 585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), 586 }, 587 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 588 .errstr = "not an exit", 589 .result = REJECT, 590 }, 591 { 592 "calls: call into middle of ld_imm64", 593 .insns = { 594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 596 BPF_MOV64_IMM(BPF_REG_0, 0), 597 BPF_EXIT_INSN(), 598 BPF_LD_IMM64(BPF_REG_0, 0), 599 BPF_EXIT_INSN(), 600 }, 601 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 602 .errstr = "last insn", 603 .result = REJECT, 604 }, 605 { 606 "calls: call into middle of other call", 607 .insns = { 608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 610 BPF_MOV64_IMM(BPF_REG_0, 0), 611 BPF_EXIT_INSN(), 612 BPF_MOV64_IMM(BPF_REG_0, 0), 613 BPF_MOV64_IMM(BPF_REG_0, 0), 614 BPF_EXIT_INSN(), 615 }, 616 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 617 .errstr = "last insn", 618 .result = REJECT, 619 }, 620 { 621 "calls: ld_abs with changing ctx data in callee", 622 .insns = { 623 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 624 BPF_LD_ABS(BPF_B, 0), 625 BPF_LD_ABS(BPF_H, 0), 626 BPF_LD_ABS(BPF_W, 0), 627 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), 628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 629 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 630 BPF_LD_ABS(BPF_B, 0), 631 BPF_LD_ABS(BPF_H, 0), 632 BPF_LD_ABS(BPF_W, 0), 633 BPF_EXIT_INSN(), 634 BPF_MOV64_IMM(BPF_REG_2, 1), 635 BPF_MOV64_IMM(BPF_REG_3, 2), 636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), 637 BPF_EXIT_INSN(), 638 }, 639 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 640 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed", 641 .result = REJECT, 642 }, 643 { 644 "calls: two calls with bad fallthrough", 645 .insns = { 646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 647 BPF_EXIT_INSN(), 648 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 650 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 651 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 653 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 654 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 655 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), 656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 657 offsetof(struct __sk_buff, len)), 658 BPF_EXIT_INSN(), 659 }, 660 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 661 .errstr = "not an exit", 662 .result = REJECT, 663 }, 664 { 665 "calls: two calls with stack read", 666 .insns = { 667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 671 BPF_EXIT_INSN(), 672 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), 674 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 677 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), 678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 679 BPF_EXIT_INSN(), 680 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 681 BPF_EXIT_INSN(), 682 }, 683 .prog_type = BPF_PROG_TYPE_XDP, 684 .result = ACCEPT, 685 }, 686 { 687 "calls: two calls with stack write", 688 .insns = { 689 /* main prog */ 690 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 692 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 695 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 696 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 697 BPF_EXIT_INSN(), 698 699 /* subprog 1 */ 700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 701 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), 703 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 705 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 706 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), 707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), 708 /* write into stack frame of main prog */ 709 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 710 BPF_EXIT_INSN(), 711 712 /* subprog 2 */ 713 /* read from stack frame of main prog */ 714 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 715 BPF_EXIT_INSN(), 716 }, 717 .prog_type = BPF_PROG_TYPE_XDP, 718 .result = ACCEPT, 719 }, 720 { 721 "calls: stack overflow using two frames (pre-call access)", 722 .insns = { 723 /* prog 1 */ 724 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 725 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), 726 BPF_EXIT_INSN(), 727 728 /* prog 2 */ 729 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 730 BPF_MOV64_IMM(BPF_REG_0, 0), 731 BPF_EXIT_INSN(), 732 }, 733 .prog_type = BPF_PROG_TYPE_XDP, 734 .errstr = "combined stack size", 735 .result = REJECT, 736 }, 737 { 738 "calls: stack overflow using two frames (post-call access)", 739 .insns = { 740 /* prog 1 */ 741 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), 742 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 743 BPF_EXIT_INSN(), 744 745 /* prog 2 */ 746 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 747 BPF_MOV64_IMM(BPF_REG_0, 0), 748 BPF_EXIT_INSN(), 749 }, 750 .prog_type = BPF_PROG_TYPE_XDP, 751 .errstr = "combined stack size", 752 .result = REJECT, 753 }, 754 { 755 "calls: stack depth check using three frames. test1", 756 .insns = { 757 /* main */ 758 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 759 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 760 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 761 BPF_MOV64_IMM(BPF_REG_0, 0), 762 BPF_EXIT_INSN(), 763 /* A */ 764 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 765 BPF_EXIT_INSN(), 766 /* B */ 767 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 768 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 769 BPF_EXIT_INSN(), 770 }, 771 .prog_type = BPF_PROG_TYPE_XDP, 772 /* stack_main=32, stack_A=256, stack_B=64 773 * and max(main+A, main+A+B) < 512 774 */ 775 .result = ACCEPT, 776 }, 777 { 778 "calls: stack depth check using three frames. test2", 779 .insns = { 780 /* main */ 781 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 782 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ 783 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), 784 BPF_MOV64_IMM(BPF_REG_0, 0), 785 BPF_EXIT_INSN(), 786 /* A */ 787 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 788 BPF_EXIT_INSN(), 789 /* B */ 790 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ 791 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 792 BPF_EXIT_INSN(), 793 }, 794 .prog_type = BPF_PROG_TYPE_XDP, 795 /* stack_main=32, stack_A=64, stack_B=256 796 * and max(main+A, main+A+B) < 512 797 */ 798 .result = ACCEPT, 799 }, 800 { 801 "calls: stack depth check using three frames. test3", 802 .insns = { 803 /* main */ 804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 805 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 807 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ 808 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), 809 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), 810 BPF_MOV64_IMM(BPF_REG_0, 0), 811 BPF_EXIT_INSN(), 812 /* A */ 813 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), 814 BPF_EXIT_INSN(), 815 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), 816 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 817 /* B */ 818 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), 819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ 820 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), 821 BPF_EXIT_INSN(), 822 }, 823 .prog_type = BPF_PROG_TYPE_XDP, 824 /* stack_main=64, stack_A=224, stack_B=256 825 * and max(main+A, main+A+B) > 512 826 */ 827 .errstr = "combined stack", 828 .result = REJECT, 829 }, 830 { 831 "calls: stack depth check using three frames. test4", 832 /* void main(void) { 833 * func1(0); 834 * func1(1); 835 * func2(1); 836 * } 837 * void func1(int alloc_or_recurse) { 838 * if (alloc_or_recurse) { 839 * frame_pointer[-300] = 1; 840 * } else { 841 * func2(alloc_or_recurse); 842 * } 843 * } 844 * void func2(int alloc_or_recurse) { 845 * if (alloc_or_recurse) { 846 * frame_pointer[-300] = 1; 847 * } 848 * } 849 */ 850 .insns = { 851 /* main */ 852 BPF_MOV64_IMM(BPF_REG_1, 0), 853 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ 854 BPF_MOV64_IMM(BPF_REG_1, 1), 855 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ 856 BPF_MOV64_IMM(BPF_REG_1, 1), 857 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ 858 BPF_MOV64_IMM(BPF_REG_0, 0), 859 BPF_EXIT_INSN(), 860 /* A */ 861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 862 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 863 BPF_EXIT_INSN(), 864 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 865 BPF_EXIT_INSN(), 866 /* B */ 867 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 868 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), 869 BPF_EXIT_INSN(), 870 }, 871 .prog_type = BPF_PROG_TYPE_XDP, 872 .result = REJECT, 873 .errstr = "combined stack", 874 }, 875 { 876 "calls: stack depth check using three frames. test5", 877 .insns = { 878 /* main */ 879 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ 880 BPF_EXIT_INSN(), 881 /* A */ 882 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ 883 BPF_EXIT_INSN(), 884 /* B */ 885 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ 886 BPF_EXIT_INSN(), 887 /* C */ 888 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ 889 BPF_EXIT_INSN(), 890 /* D */ 891 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ 892 BPF_EXIT_INSN(), 893 /* E */ 894 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ 895 BPF_EXIT_INSN(), 896 /* F */ 897 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ 898 BPF_EXIT_INSN(), 899 /* G */ 900 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ 901 BPF_EXIT_INSN(), 902 /* H */ 903 BPF_MOV64_IMM(BPF_REG_0, 0), 904 BPF_EXIT_INSN(), 905 }, 906 .prog_type = BPF_PROG_TYPE_XDP, 907 .errstr = "call stack", 908 .result = REJECT, 909 }, 910 { 911 "calls: spill into caller stack frame", 912 .insns = { 913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 914 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 917 BPF_EXIT_INSN(), 918 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 919 BPF_MOV64_IMM(BPF_REG_0, 0), 920 BPF_EXIT_INSN(), 921 }, 922 .prog_type = BPF_PROG_TYPE_XDP, 923 .errstr = "cannot spill", 924 .result = REJECT, 925 }, 926 { 927 "calls: write into caller stack frame", 928 .insns = { 929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 931 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 933 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 934 BPF_EXIT_INSN(), 935 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 936 BPF_MOV64_IMM(BPF_REG_0, 0), 937 BPF_EXIT_INSN(), 938 }, 939 .prog_type = BPF_PROG_TYPE_XDP, 940 .result = ACCEPT, 941 .retval = 42, 942 }, 943 { 944 "calls: write into callee stack frame", 945 .insns = { 946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 947 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 948 BPF_EXIT_INSN(), 949 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), 951 BPF_EXIT_INSN(), 952 }, 953 .prog_type = BPF_PROG_TYPE_XDP, 954 .errstr = "cannot return stack pointer", 955 .result = REJECT, 956 }, 957 { 958 "calls: two calls with stack write and void return", 959 .insns = { 960 /* main prog */ 961 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 966 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 967 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 968 BPF_EXIT_INSN(), 969 970 /* subprog 1 */ 971 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 972 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 975 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 976 BPF_EXIT_INSN(), 977 978 /* subprog 2 */ 979 /* write into stack frame of main prog */ 980 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), 981 BPF_EXIT_INSN(), /* void return */ 982 }, 983 .prog_type = BPF_PROG_TYPE_XDP, 984 .result = ACCEPT, 985 }, 986 { 987 "calls: ambiguous return value", 988 .insns = { 989 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 995 BPF_EXIT_INSN(), 996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 997 BPF_MOV64_IMM(BPF_REG_0, 0), 998 BPF_EXIT_INSN(), 999 }, 1000 .errstr_unpriv = "allowed for root only", 1001 .result_unpriv = REJECT, 1002 .errstr = "R0 !read_ok", 1003 .result = REJECT, 1004 }, 1005 { 1006 "calls: two calls that return map_value", 1007 .insns = { 1008 /* main prog */ 1009 /* pass fp-16, fp-8 into a function */ 1010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), 1015 1016 /* fetch map_value_ptr from the stack of this function */ 1017 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1019 /* write into map value */ 1020 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1021 /* fetch secound map_value_ptr from the stack */ 1022 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), 1023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1024 /* write into map value */ 1025 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1026 BPF_MOV64_IMM(BPF_REG_0, 0), 1027 BPF_EXIT_INSN(), 1028 1029 /* subprog 1 */ 1030 /* call 3rd function twice */ 1031 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1032 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1033 /* first time with fp-8 */ 1034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1036 /* second time with fp-16 */ 1037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1038 BPF_EXIT_INSN(), 1039 1040 /* subprog 2 */ 1041 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1042 /* lookup from map */ 1043 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1044 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1046 BPF_LD_MAP_FD(BPF_REG_1, 0), 1047 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1048 /* write map_value_ptr into stack frame of main prog */ 1049 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1050 BPF_MOV64_IMM(BPF_REG_0, 0), 1051 BPF_EXIT_INSN(), /* return 0 */ 1052 }, 1053 .prog_type = BPF_PROG_TYPE_XDP, 1054 .fixup_map_hash_8b = { 23 }, 1055 .result = ACCEPT, 1056 }, 1057 { 1058 "calls: two calls that return map_value with bool condition", 1059 .insns = { 1060 /* main prog */ 1061 /* pass fp-16, fp-8 into a function */ 1062 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1064 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1067 BPF_MOV64_IMM(BPF_REG_0, 0), 1068 BPF_EXIT_INSN(), 1069 1070 /* subprog 1 */ 1071 /* call 3rd function twice */ 1072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1073 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1074 /* first time with fp-8 */ 1075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1076 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1077 /* fetch map_value_ptr from the stack of this function */ 1078 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1079 /* write into map value */ 1080 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1082 /* second time with fp-16 */ 1083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1084 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1085 /* fetch secound map_value_ptr from the stack */ 1086 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1087 /* write into map value */ 1088 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1089 BPF_EXIT_INSN(), 1090 1091 /* subprog 2 */ 1092 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1093 /* lookup from map */ 1094 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1097 BPF_LD_MAP_FD(BPF_REG_1, 0), 1098 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1099 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1100 BPF_MOV64_IMM(BPF_REG_0, 0), 1101 BPF_EXIT_INSN(), /* return 0 */ 1102 /* write map_value_ptr into stack frame of main prog */ 1103 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1104 BPF_MOV64_IMM(BPF_REG_0, 1), 1105 BPF_EXIT_INSN(), /* return 1 */ 1106 }, 1107 .prog_type = BPF_PROG_TYPE_XDP, 1108 .fixup_map_hash_8b = { 23 }, 1109 .result = ACCEPT, 1110 }, 1111 { 1112 "calls: two calls that return map_value with incorrect bool check", 1113 .insns = { 1114 /* main prog */ 1115 /* pass fp-16, fp-8 into a function */ 1116 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1118 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1121 BPF_MOV64_IMM(BPF_REG_0, 0), 1122 BPF_EXIT_INSN(), 1123 1124 /* subprog 1 */ 1125 /* call 3rd function twice */ 1126 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1127 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1128 /* first time with fp-8 */ 1129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), 1130 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), 1131 /* fetch map_value_ptr from the stack of this function */ 1132 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 1133 /* write into map value */ 1134 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1136 /* second time with fp-16 */ 1137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1138 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1139 /* fetch secound map_value_ptr from the stack */ 1140 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), 1141 /* write into map value */ 1142 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1143 BPF_EXIT_INSN(), 1144 1145 /* subprog 2 */ 1146 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1147 /* lookup from map */ 1148 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1149 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1151 BPF_LD_MAP_FD(BPF_REG_1, 0), 1152 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1153 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1154 BPF_MOV64_IMM(BPF_REG_0, 0), 1155 BPF_EXIT_INSN(), /* return 0 */ 1156 /* write map_value_ptr into stack frame of main prog */ 1157 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1158 BPF_MOV64_IMM(BPF_REG_0, 1), 1159 BPF_EXIT_INSN(), /* return 1 */ 1160 }, 1161 .prog_type = BPF_PROG_TYPE_XDP, 1162 .fixup_map_hash_8b = { 23 }, 1163 .result = REJECT, 1164 .errstr = "invalid read from stack off -16+0 size 8", 1165 }, 1166 { 1167 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", 1168 .insns = { 1169 /* main prog */ 1170 /* pass fp-16, fp-8 into a function */ 1171 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1176 BPF_MOV64_IMM(BPF_REG_0, 0), 1177 BPF_EXIT_INSN(), 1178 1179 /* subprog 1 */ 1180 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1181 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1182 /* 1st lookup from map */ 1183 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1184 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1186 BPF_LD_MAP_FD(BPF_REG_1, 0), 1187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1188 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1189 BPF_MOV64_IMM(BPF_REG_8, 0), 1190 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1191 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1192 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1193 BPF_MOV64_IMM(BPF_REG_8, 1), 1194 1195 /* 2nd lookup from map */ 1196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1198 BPF_LD_MAP_FD(BPF_REG_1, 0), 1199 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1200 BPF_FUNC_map_lookup_elem), 1201 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1202 BPF_MOV64_IMM(BPF_REG_9, 0), 1203 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1204 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1205 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1206 BPF_MOV64_IMM(BPF_REG_9, 1), 1207 1208 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1211 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1212 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1214 BPF_EXIT_INSN(), 1215 1216 /* subprog 2 */ 1217 /* if arg2 == 1 do *arg1 = 0 */ 1218 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1219 /* fetch map_value_ptr from the stack of this function */ 1220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1221 /* write into map value */ 1222 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1223 1224 /* if arg4 == 1 do *arg3 = 0 */ 1225 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1226 /* fetch map_value_ptr from the stack of this function */ 1227 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1228 /* write into map value */ 1229 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1230 BPF_EXIT_INSN(), 1231 }, 1232 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1233 .fixup_map_hash_8b = { 12, 22 }, 1234 .result = REJECT, 1235 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1236 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1237 }, 1238 { 1239 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", 1240 .insns = { 1241 /* main prog */ 1242 /* pass fp-16, fp-8 into a function */ 1243 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1248 BPF_MOV64_IMM(BPF_REG_0, 0), 1249 BPF_EXIT_INSN(), 1250 1251 /* subprog 1 */ 1252 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1253 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1254 /* 1st lookup from map */ 1255 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1256 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1258 BPF_LD_MAP_FD(BPF_REG_1, 0), 1259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1260 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1261 BPF_MOV64_IMM(BPF_REG_8, 0), 1262 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1263 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1264 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1265 BPF_MOV64_IMM(BPF_REG_8, 1), 1266 1267 /* 2nd lookup from map */ 1268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ 1269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1270 BPF_LD_MAP_FD(BPF_REG_1, 0), 1271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ 1272 BPF_FUNC_map_lookup_elem), 1273 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1274 BPF_MOV64_IMM(BPF_REG_9, 0), 1275 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1276 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1277 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1278 BPF_MOV64_IMM(BPF_REG_9, 1), 1279 1280 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ 1282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1283 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1284 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ 1286 BPF_EXIT_INSN(), 1287 1288 /* subprog 2 */ 1289 /* if arg2 == 1 do *arg1 = 0 */ 1290 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1291 /* fetch map_value_ptr from the stack of this function */ 1292 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1293 /* write into map value */ 1294 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1295 1296 /* if arg4 == 1 do *arg3 = 0 */ 1297 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1298 /* fetch map_value_ptr from the stack of this function */ 1299 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1300 /* write into map value */ 1301 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1302 BPF_EXIT_INSN(), 1303 }, 1304 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1305 .fixup_map_hash_8b = { 12, 22 }, 1306 .result = ACCEPT, 1307 }, 1308 { 1309 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", 1310 .insns = { 1311 /* main prog */ 1312 /* pass fp-16, fp-8 into a function */ 1313 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1317 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 1318 BPF_MOV64_IMM(BPF_REG_0, 0), 1319 BPF_EXIT_INSN(), 1320 1321 /* subprog 1 */ 1322 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1323 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1324 /* 1st lookup from map */ 1325 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), 1326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1328 BPF_LD_MAP_FD(BPF_REG_1, 0), 1329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1330 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1331 BPF_MOV64_IMM(BPF_REG_8, 0), 1332 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1333 /* write map_value_ptr into stack frame of main prog at fp-8 */ 1334 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1335 BPF_MOV64_IMM(BPF_REG_8, 1), 1336 1337 /* 2nd lookup from map */ 1338 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), 1340 BPF_LD_MAP_FD(BPF_REG_1, 0), 1341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1342 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1343 BPF_MOV64_IMM(BPF_REG_9, 0), // 26 1344 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 1345 /* write map_value_ptr into stack frame of main prog at fp-16 */ 1346 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1347 BPF_MOV64_IMM(BPF_REG_9, 1), 1348 1349 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1350 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 1351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1352 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1353 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1354 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 1355 BPF_JMP_IMM(BPF_JA, 0, 0, -30), 1356 1357 /* subprog 2 */ 1358 /* if arg2 == 1 do *arg1 = 0 */ 1359 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1360 /* fetch map_value_ptr from the stack of this function */ 1361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1362 /* write into map value */ 1363 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1364 1365 /* if arg4 == 1 do *arg3 = 0 */ 1366 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1367 /* fetch map_value_ptr from the stack of this function */ 1368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1369 /* write into map value */ 1370 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), 1371 BPF_JMP_IMM(BPF_JA, 0, 0, -8), 1372 }, 1373 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1374 .fixup_map_hash_8b = { 12, 22 }, 1375 .result = REJECT, 1376 .errstr = "invalid access to map value, value_size=8 off=2 size=8", 1377 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1378 }, 1379 { 1380 "calls: two calls that receive map_value_ptr_or_null via arg. test1", 1381 .insns = { 1382 /* main prog */ 1383 /* pass fp-16, fp-8 into a function */ 1384 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1388 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1389 BPF_MOV64_IMM(BPF_REG_0, 0), 1390 BPF_EXIT_INSN(), 1391 1392 /* subprog 1 */ 1393 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1394 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1395 /* 1st lookup from map */ 1396 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1399 BPF_LD_MAP_FD(BPF_REG_1, 0), 1400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1401 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1402 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1403 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1404 BPF_MOV64_IMM(BPF_REG_8, 0), 1405 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1406 BPF_MOV64_IMM(BPF_REG_8, 1), 1407 1408 /* 2nd lookup from map */ 1409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1411 BPF_LD_MAP_FD(BPF_REG_1, 0), 1412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1413 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1414 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1415 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1416 BPF_MOV64_IMM(BPF_REG_9, 0), 1417 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1418 BPF_MOV64_IMM(BPF_REG_9, 1), 1419 1420 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1422 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1423 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1424 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1426 BPF_EXIT_INSN(), 1427 1428 /* subprog 2 */ 1429 /* if arg2 == 1 do *arg1 = 0 */ 1430 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1431 /* fetch map_value_ptr from the stack of this function */ 1432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1433 /* write into map value */ 1434 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1435 1436 /* if arg4 == 1 do *arg3 = 0 */ 1437 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), 1438 /* fetch map_value_ptr from the stack of this function */ 1439 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1440 /* write into map value */ 1441 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1442 BPF_EXIT_INSN(), 1443 }, 1444 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1445 .fixup_map_hash_8b = { 12, 22 }, 1446 .result = ACCEPT, 1447 }, 1448 { 1449 "calls: two calls that receive map_value_ptr_or_null via arg. test2", 1450 .insns = { 1451 /* main prog */ 1452 /* pass fp-16, fp-8 into a function */ 1453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 1457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1458 BPF_MOV64_IMM(BPF_REG_0, 0), 1459 BPF_EXIT_INSN(), 1460 1461 /* subprog 1 */ 1462 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1463 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 1464 /* 1st lookup from map */ 1465 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1466 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1468 BPF_LD_MAP_FD(BPF_REG_1, 0), 1469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1470 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1471 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1472 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1473 BPF_MOV64_IMM(BPF_REG_8, 0), 1474 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1475 BPF_MOV64_IMM(BPF_REG_8, 1), 1476 1477 /* 2nd lookup from map */ 1478 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1480 BPF_LD_MAP_FD(BPF_REG_1, 0), 1481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1482 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ 1483 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1484 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 1485 BPF_MOV64_IMM(BPF_REG_9, 0), 1486 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 1487 BPF_MOV64_IMM(BPF_REG_9, 1), 1488 1489 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ 1490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1491 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), 1492 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), 1493 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), 1494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1495 BPF_EXIT_INSN(), 1496 1497 /* subprog 2 */ 1498 /* if arg2 == 1 do *arg1 = 0 */ 1499 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), 1500 /* fetch map_value_ptr from the stack of this function */ 1501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 1502 /* write into map value */ 1503 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1504 1505 /* if arg4 == 0 do *arg3 = 0 */ 1506 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), 1507 /* fetch map_value_ptr from the stack of this function */ 1508 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 1509 /* write into map value */ 1510 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 1511 BPF_EXIT_INSN(), 1512 }, 1513 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1514 .fixup_map_hash_8b = { 12, 22 }, 1515 .result = REJECT, 1516 .errstr = "R0 invalid mem access 'inv'", 1517 }, 1518 { 1519 "calls: pkt_ptr spill into caller stack", 1520 .insns = { 1521 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 1524 BPF_EXIT_INSN(), 1525 1526 /* subprog 1 */ 1527 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1528 offsetof(struct __sk_buff, data)), 1529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1530 offsetof(struct __sk_buff, data_end)), 1531 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1533 /* spill unchecked pkt_ptr into stack of caller */ 1534 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1535 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1536 /* now the pkt range is verified, read pkt_ptr from stack */ 1537 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1538 /* write 4 bytes into packet */ 1539 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1540 BPF_EXIT_INSN(), 1541 }, 1542 .result = ACCEPT, 1543 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1544 .retval = POINTER_VALUE, 1545 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1546 }, 1547 { 1548 "calls: pkt_ptr spill into caller stack 2", 1549 .insns = { 1550 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1553 /* Marking is still kept, but not in all cases safe. */ 1554 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1555 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1556 BPF_EXIT_INSN(), 1557 1558 /* subprog 1 */ 1559 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1560 offsetof(struct __sk_buff, data)), 1561 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1562 offsetof(struct __sk_buff, data_end)), 1563 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1565 /* spill unchecked pkt_ptr into stack of caller */ 1566 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1567 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1568 /* now the pkt range is verified, read pkt_ptr from stack */ 1569 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1570 /* write 4 bytes into packet */ 1571 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1572 BPF_EXIT_INSN(), 1573 }, 1574 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1575 .errstr = "invalid access to packet", 1576 .result = REJECT, 1577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1578 }, 1579 { 1580 "calls: pkt_ptr spill into caller stack 3", 1581 .insns = { 1582 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1584 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1586 /* Marking is still kept and safe here. */ 1587 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1588 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1589 BPF_EXIT_INSN(), 1590 1591 /* subprog 1 */ 1592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1593 offsetof(struct __sk_buff, data)), 1594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1595 offsetof(struct __sk_buff, data_end)), 1596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1598 /* spill unchecked pkt_ptr into stack of caller */ 1599 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1600 BPF_MOV64_IMM(BPF_REG_5, 0), 1601 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1602 BPF_MOV64_IMM(BPF_REG_5, 1), 1603 /* now the pkt range is verified, read pkt_ptr from stack */ 1604 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 1605 /* write 4 bytes into packet */ 1606 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1607 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1608 BPF_EXIT_INSN(), 1609 }, 1610 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1611 .result = ACCEPT, 1612 .retval = 1, 1613 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1614 }, 1615 { 1616 "calls: pkt_ptr spill into caller stack 4", 1617 .insns = { 1618 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 1622 /* Check marking propagated. */ 1623 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1624 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), 1625 BPF_EXIT_INSN(), 1626 1627 /* subprog 1 */ 1628 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1629 offsetof(struct __sk_buff, data)), 1630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1631 offsetof(struct __sk_buff, data_end)), 1632 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1634 /* spill unchecked pkt_ptr into stack of caller */ 1635 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1636 BPF_MOV64_IMM(BPF_REG_5, 0), 1637 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1638 BPF_MOV64_IMM(BPF_REG_5, 1), 1639 /* don't read back pkt_ptr from stack here */ 1640 /* write 4 bytes into packet */ 1641 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1642 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1643 BPF_EXIT_INSN(), 1644 }, 1645 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1646 .result = ACCEPT, 1647 .retval = 1, 1648 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1649 }, 1650 { 1651 "calls: pkt_ptr spill into caller stack 5", 1652 .insns = { 1653 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1655 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), 1656 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1657 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1658 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1659 BPF_EXIT_INSN(), 1660 1661 /* subprog 1 */ 1662 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1663 offsetof(struct __sk_buff, data)), 1664 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1665 offsetof(struct __sk_buff, data_end)), 1666 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1668 BPF_MOV64_IMM(BPF_REG_5, 0), 1669 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1670 /* spill checked pkt_ptr into stack of caller */ 1671 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1672 BPF_MOV64_IMM(BPF_REG_5, 1), 1673 /* don't read back pkt_ptr from stack here */ 1674 /* write 4 bytes into packet */ 1675 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1676 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1677 BPF_EXIT_INSN(), 1678 }, 1679 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1680 .errstr = "same insn cannot be used with different", 1681 .result = REJECT, 1682 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1683 }, 1684 { 1685 "calls: pkt_ptr spill into caller stack 6", 1686 .insns = { 1687 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1688 offsetof(struct __sk_buff, data_end)), 1689 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1691 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1693 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1694 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1695 BPF_EXIT_INSN(), 1696 1697 /* subprog 1 */ 1698 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1699 offsetof(struct __sk_buff, data)), 1700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1701 offsetof(struct __sk_buff, data_end)), 1702 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1704 BPF_MOV64_IMM(BPF_REG_5, 0), 1705 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1706 /* spill checked pkt_ptr into stack of caller */ 1707 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1708 BPF_MOV64_IMM(BPF_REG_5, 1), 1709 /* don't read back pkt_ptr from stack here */ 1710 /* write 4 bytes into packet */ 1711 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1712 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1713 BPF_EXIT_INSN(), 1714 }, 1715 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1716 .errstr = "R4 invalid mem access", 1717 .result = REJECT, 1718 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1719 }, 1720 { 1721 "calls: pkt_ptr spill into caller stack 7", 1722 .insns = { 1723 BPF_MOV64_IMM(BPF_REG_2, 0), 1724 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1726 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1728 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1729 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1730 BPF_EXIT_INSN(), 1731 1732 /* subprog 1 */ 1733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1734 offsetof(struct __sk_buff, data)), 1735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1736 offsetof(struct __sk_buff, data_end)), 1737 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1739 BPF_MOV64_IMM(BPF_REG_5, 0), 1740 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1741 /* spill checked pkt_ptr into stack of caller */ 1742 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1743 BPF_MOV64_IMM(BPF_REG_5, 1), 1744 /* don't read back pkt_ptr from stack here */ 1745 /* write 4 bytes into packet */ 1746 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1747 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1748 BPF_EXIT_INSN(), 1749 }, 1750 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1751 .errstr = "R4 invalid mem access", 1752 .result = REJECT, 1753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1754 }, 1755 { 1756 "calls: pkt_ptr spill into caller stack 8", 1757 .insns = { 1758 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1759 offsetof(struct __sk_buff, data)), 1760 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1761 offsetof(struct __sk_buff, data_end)), 1762 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1763 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1764 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 1765 BPF_EXIT_INSN(), 1766 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1768 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1770 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1771 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1772 BPF_EXIT_INSN(), 1773 1774 /* subprog 1 */ 1775 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1776 offsetof(struct __sk_buff, data)), 1777 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1778 offsetof(struct __sk_buff, data_end)), 1779 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1781 BPF_MOV64_IMM(BPF_REG_5, 0), 1782 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 1783 /* spill checked pkt_ptr into stack of caller */ 1784 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1785 BPF_MOV64_IMM(BPF_REG_5, 1), 1786 /* don't read back pkt_ptr from stack here */ 1787 /* write 4 bytes into packet */ 1788 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1789 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1790 BPF_EXIT_INSN(), 1791 }, 1792 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1793 .result = ACCEPT, 1794 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1795 }, 1796 { 1797 "calls: pkt_ptr spill into caller stack 9", 1798 .insns = { 1799 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1800 offsetof(struct __sk_buff, data)), 1801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1802 offsetof(struct __sk_buff, data_end)), 1803 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1805 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), 1806 BPF_EXIT_INSN(), 1807 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 1808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 1809 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), 1811 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 1812 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), 1813 BPF_EXIT_INSN(), 1814 1815 /* subprog 1 */ 1816 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1817 offsetof(struct __sk_buff, data)), 1818 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1819 offsetof(struct __sk_buff, data_end)), 1820 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1822 BPF_MOV64_IMM(BPF_REG_5, 0), 1823 /* spill unchecked pkt_ptr into stack of caller */ 1824 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 1825 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 1826 BPF_MOV64_IMM(BPF_REG_5, 1), 1827 /* don't read back pkt_ptr from stack here */ 1828 /* write 4 bytes into packet */ 1829 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 1830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 1831 BPF_EXIT_INSN(), 1832 }, 1833 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1834 .errstr = "invalid access to packet", 1835 .result = REJECT, 1836 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 1837 }, 1838 { 1839 "calls: caller stack init to zero or map_value_or_null", 1840 .insns = { 1841 BPF_MOV64_IMM(BPF_REG_0, 0), 1842 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 1843 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1846 /* fetch map_value_or_null or const_zero from stack */ 1847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 1849 /* store into map_value */ 1850 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), 1851 BPF_EXIT_INSN(), 1852 1853 /* subprog 1 */ 1854 /* if (ctx == 0) return; */ 1855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), 1856 /* else bpf_map_lookup() and *(fp - 8) = r0 */ 1857 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 1858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1860 BPF_LD_MAP_FD(BPF_REG_1, 0), 1861 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1863 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ 1864 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), 1865 BPF_EXIT_INSN(), 1866 }, 1867 .fixup_map_hash_8b = { 13 }, 1868 .result = ACCEPT, 1869 .prog_type = BPF_PROG_TYPE_XDP, 1870 }, 1871 { 1872 "calls: stack init to zero and pruning", 1873 .insns = { 1874 /* first make allocated_stack 16 byte */ 1875 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 1876 /* now fork the execution such that the false branch 1877 * of JGT insn will be verified second and it skisp zero 1878 * init of fp-8 stack slot. If stack liveness marking 1879 * is missing live_read marks from call map_lookup 1880 * processing then pruning will incorrectly assume 1881 * that fp-8 stack slot was unused in the fall-through 1882 * branch and will accept the program incorrectly 1883 */ 1884 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), 1885 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1886 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 1887 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1889 BPF_LD_MAP_FD(BPF_REG_1, 0), 1890 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 1891 BPF_EXIT_INSN(), 1892 }, 1893 .fixup_map_hash_48b = { 6 }, 1894 .errstr = "invalid indirect read from stack off -8+0 size 8", 1895 .result = REJECT, 1896 .prog_type = BPF_PROG_TYPE_XDP, 1897 }, 1898 { 1899 "calls: ctx read at start of subprog", 1900 .insns = { 1901 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 1902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), 1903 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), 1904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 1905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), 1906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1907 BPF_EXIT_INSN(), 1908 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 1909 BPF_MOV64_IMM(BPF_REG_0, 0), 1910 BPF_EXIT_INSN(), 1911 }, 1912 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 1913 .errstr_unpriv = "function calls to other bpf functions are allowed for root only", 1914 .result_unpriv = REJECT, 1915 .result = ACCEPT, 1916 }, 1917 { 1918 "calls: cross frame pruning", 1919 .insns = { 1920 /* r8 = !!random(); 1921 * call pruner() 1922 * if (r8) 1923 * do something bad; 1924 */ 1925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 1926 BPF_MOV64_IMM(BPF_REG_8, 0), 1927 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 1928 BPF_MOV64_IMM(BPF_REG_8, 1), 1929 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 1930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 1932 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), 1933 BPF_MOV64_IMM(BPF_REG_0, 0), 1934 BPF_EXIT_INSN(), 1935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 1936 BPF_EXIT_INSN(), 1937 }, 1938 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 1939 .errstr_unpriv = "function calls to other bpf functions are allowed for root only", 1940 .errstr = "!read_ok", 1941 .result = REJECT, 1942 }, 1943 { 1944 "calls: cross frame pruning - liveness propagation", 1945 .insns = { 1946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 1947 BPF_MOV64_IMM(BPF_REG_8, 0), 1948 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 1949 BPF_MOV64_IMM(BPF_REG_8, 1), 1950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 1951 BPF_MOV64_IMM(BPF_REG_9, 0), 1952 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 1953 BPF_MOV64_IMM(BPF_REG_9, 1), 1954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 1955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 1956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 1957 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), 1958 BPF_MOV64_IMM(BPF_REG_0, 0), 1959 BPF_EXIT_INSN(), 1960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 1961 BPF_EXIT_INSN(), 1962 }, 1963 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 1964 .errstr_unpriv = "function calls to other bpf functions are allowed for root only", 1965 .errstr = "!read_ok", 1966 .result = REJECT, 1967 }, 1968