1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_VERIFIER_H 5 #define _LINUX_BPF_VERIFIER_H 1 6 7 #include <linux/bpf.h> /* for enum bpf_reg_type */ 8 #include <linux/btf.h> /* for struct btf and btf_id() */ 9 #include <linux/filter.h> /* for MAX_BPF_STACK */ 10 #include <linux/tnum.h> 11 12 /* Maximum variable offset umax_value permitted when resolving memory accesses. 13 * In practice this is far bigger than any realistic pointer offset; this limit 14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 15 */ 16 #define BPF_MAX_VAR_OFF (1 << 29) 17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 18 * that converting umax_value to int cannot overflow. 19 */ 20 #define BPF_MAX_VAR_SIZ (1 << 29) 21 /* size of tmp_str_buf in bpf_verifier. 22 * we need at least 306 bytes to fit full stack mask representation 23 * (in the "-8,-16,...,-512" form) 24 */ 25 #define TMP_STR_BUF_LEN 320 26 27 /* Liveness marks, used for registers and spilled-regs (in stack slots). 28 * Read marks propagate upwards until they find a write mark; they record that 29 * "one of this state's descendants read this reg" (and therefore the reg is 30 * relevant for states_equal() checks). 31 * Write marks collect downwards and do not propagate; they record that "the 32 * straight-line code that reached this state (from its parent) wrote this reg" 33 * (and therefore that reads propagated from this state or its descendants 34 * should not propagate to its parent). 35 * A state with a write mark can receive read marks; it just won't propagate 36 * them to its parent, since the write mark is a property, not of the state, 37 * but of the link between it and its parent. See mark_reg_read() and 38 * mark_stack_slot_read() in kernel/bpf/verifier.c. 39 */ 40 enum bpf_reg_liveness { 41 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 42 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 43 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 44 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 45 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 46 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 47 }; 48 49 /* For every reg representing a map value or allocated object pointer, 50 * we consider the tuple of (ptr, id) for them to be unique in verifier 51 * context and conside them to not alias each other for the purposes of 52 * tracking lock state. 53 */ 54 struct bpf_active_lock { 55 /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, 56 * there's no active lock held, and other fields have no 57 * meaning. If non-NULL, it indicates that a lock is held and 58 * id member has the reg->id of the register which can be >= 0. 59 */ 60 void *ptr; 61 /* This will be reg->id */ 62 u32 id; 63 }; 64 65 #define ITER_PREFIX "bpf_iter_" 66 67 enum bpf_iter_state { 68 BPF_ITER_STATE_INVALID, /* for non-first slot */ 69 BPF_ITER_STATE_ACTIVE, 70 BPF_ITER_STATE_DRAINED, 71 }; 72 73 struct bpf_reg_state { 74 /* Ordering of fields matters. See states_equal() */ 75 enum bpf_reg_type type; 76 /* Fixed part of pointer offset, pointer types only */ 77 s32 off; 78 union { 79 /* valid when type == PTR_TO_PACKET */ 80 int range; 81 82 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 83 * PTR_TO_MAP_VALUE_OR_NULL 84 */ 85 struct { 86 struct bpf_map *map_ptr; 87 /* To distinguish map lookups from outer map 88 * the map_uid is non-zero for registers 89 * pointing to inner maps. 90 */ 91 u32 map_uid; 92 }; 93 94 /* for PTR_TO_BTF_ID */ 95 struct { 96 struct btf *btf; 97 u32 btf_id; 98 }; 99 100 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ 101 u32 mem_size; 102 u32 dynptr_id; /* for dynptr slices */ 103 }; 104 105 /* For dynptr stack slots */ 106 struct { 107 enum bpf_dynptr_type type; 108 /* A dynptr is 16 bytes so it takes up 2 stack slots. 109 * We need to track which slot is the first slot 110 * to protect against cases where the user may try to 111 * pass in an address starting at the second slot of the 112 * dynptr. 113 */ 114 bool first_slot; 115 } dynptr; 116 117 /* For bpf_iter stack slots */ 118 struct { 119 /* BTF container and BTF type ID describing 120 * struct bpf_iter_<type> of an iterator state 121 */ 122 struct btf *btf; 123 u32 btf_id; 124 /* packing following two fields to fit iter state into 16 bytes */ 125 enum bpf_iter_state state:2; 126 int depth:30; 127 } iter; 128 129 /* Max size from any of the above. */ 130 struct { 131 unsigned long raw1; 132 unsigned long raw2; 133 } raw; 134 135 u32 subprogno; /* for PTR_TO_FUNC */ 136 }; 137 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 138 * the actual value. 139 * For pointer types, this represents the variable part of the offset 140 * from the pointed-to object, and is shared with all bpf_reg_states 141 * with the same id as us. 142 */ 143 struct tnum var_off; 144 /* Used to determine if any memory access using this register will 145 * result in a bad access. 146 * These refer to the same value as var_off, not necessarily the actual 147 * contents of the register. 148 */ 149 s64 smin_value; /* minimum possible (s64)value */ 150 s64 smax_value; /* maximum possible (s64)value */ 151 u64 umin_value; /* minimum possible (u64)value */ 152 u64 umax_value; /* maximum possible (u64)value */ 153 s32 s32_min_value; /* minimum possible (s32)value */ 154 s32 s32_max_value; /* maximum possible (s32)value */ 155 u32 u32_min_value; /* minimum possible (u32)value */ 156 u32 u32_max_value; /* maximum possible (u32)value */ 157 /* For PTR_TO_PACKET, used to find other pointers with the same variable 158 * offset, so they can share range knowledge. 159 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 160 * came from, when one is tested for != NULL. 161 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation 162 * for the purpose of tracking that it's freed. 163 * For PTR_TO_SOCKET this is used to share which pointers retain the 164 * same reference to the socket, to determine proper reference freeing. 165 * For stack slots that are dynptrs, this is used to track references to 166 * the dynptr to determine proper reference freeing. 167 * Similarly to dynptrs, we use ID to track "belonging" of a reference 168 * to a specific instance of bpf_iter. 169 */ 170 u32 id; 171 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 172 * from a pointer-cast helper, bpf_sk_fullsock() and 173 * bpf_tcp_sock(). 174 * 175 * Consider the following where "sk" is a reference counted 176 * pointer returned from "sk = bpf_sk_lookup_tcp();": 177 * 178 * 1: sk = bpf_sk_lookup_tcp(); 179 * 2: if (!sk) { return 0; } 180 * 3: fullsock = bpf_sk_fullsock(sk); 181 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 182 * 5: tp = bpf_tcp_sock(fullsock); 183 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 184 * 7: bpf_sk_release(sk); 185 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 186 * 187 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 188 * "tp" ptr should be invalidated also. In order to do that, 189 * the reg holding "fullsock" and "sk" need to remember 190 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 191 * such that the verifier can reset all regs which have 192 * ref_obj_id matching the sk_reg->id. 193 * 194 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 195 * sk_reg->id will stay as NULL-marking purpose only. 196 * After NULL-marking is done, sk_reg->id can be reset to 0. 197 * 198 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 199 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 200 * 201 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 202 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 203 * which is the same as sk_reg->ref_obj_id. 204 * 205 * From the verifier perspective, if sk, fullsock and tp 206 * are not NULL, they are the same ptr with different 207 * reg->type. In particular, bpf_sk_release(tp) is also 208 * allowed and has the same effect as bpf_sk_release(sk). 209 */ 210 u32 ref_obj_id; 211 /* parentage chain for liveness checking */ 212 struct bpf_reg_state *parent; 213 /* Inside the callee two registers can be both PTR_TO_STACK like 214 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 215 * while another to the caller's stack. To differentiate them 'frameno' 216 * is used which is an index in bpf_verifier_state->frame[] array 217 * pointing to bpf_func_state. 218 */ 219 u32 frameno; 220 /* Tracks subreg definition. The stored value is the insn_idx of the 221 * writing insn. This is safe because subreg_def is used before any insn 222 * patching which only happens after main verification finished. 223 */ 224 s32 subreg_def; 225 enum bpf_reg_liveness live; 226 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 227 bool precise; 228 }; 229 230 enum bpf_stack_slot_type { 231 STACK_INVALID, /* nothing was stored in this stack slot */ 232 STACK_SPILL, /* register spilled into stack */ 233 STACK_MISC, /* BPF program wrote some data into this slot */ 234 STACK_ZERO, /* BPF program wrote constant zero */ 235 /* A dynptr is stored in this stack slot. The type of dynptr 236 * is stored in bpf_stack_state->spilled_ptr.dynptr.type 237 */ 238 STACK_DYNPTR, 239 STACK_ITER, 240 }; 241 242 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 243 244 #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \ 245 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \ 246 (1 << BPF_REG_5)) 247 248 #define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern) 249 #define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE) 250 251 struct bpf_stack_state { 252 struct bpf_reg_state spilled_ptr; 253 u8 slot_type[BPF_REG_SIZE]; 254 }; 255 256 struct bpf_reference_state { 257 /* Track each reference created with a unique id, even if the same 258 * instruction creates the reference multiple times (eg, via CALL). 259 */ 260 int id; 261 /* Instruction where the allocation of this reference occurred. This 262 * is used purely to inform the user of a reference leak. 263 */ 264 int insn_idx; 265 /* There can be a case like: 266 * main (frame 0) 267 * cb (frame 1) 268 * func (frame 3) 269 * cb (frame 4) 270 * Hence for frame 4, if callback_ref just stored boolean, it would be 271 * impossible to distinguish nested callback refs. Hence store the 272 * frameno and compare that to callback_ref in check_reference_leak when 273 * exiting a callback function. 274 */ 275 int callback_ref; 276 }; 277 278 /* state of the program: 279 * type of all registers and stack info 280 */ 281 struct bpf_func_state { 282 struct bpf_reg_state regs[MAX_BPF_REG]; 283 /* index of call instruction that called into this func */ 284 int callsite; 285 /* stack frame number of this function state from pov of 286 * enclosing bpf_verifier_state. 287 * 0 = main function, 1 = first callee. 288 */ 289 u32 frameno; 290 /* subprog number == index within subprog_info 291 * zero == main subprog 292 */ 293 u32 subprogno; 294 /* Every bpf_timer_start will increment async_entry_cnt. 295 * It's used to distinguish: 296 * void foo(void) { for(;;); } 297 * void foo(void) { bpf_timer_set_callback(,foo); } 298 */ 299 u32 async_entry_cnt; 300 bool in_callback_fn; 301 struct tnum callback_ret_range; 302 bool in_async_callback_fn; 303 304 /* The following fields should be last. See copy_func_state() */ 305 int acquired_refs; 306 struct bpf_reference_state *refs; 307 int allocated_stack; 308 struct bpf_stack_state *stack; 309 }; 310 311 struct bpf_idx_pair { 312 u32 prev_idx; 313 u32 idx; 314 }; 315 316 #define MAX_CALL_FRAMES 8 317 /* Maximum number of register states that can exist at once */ 318 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) 319 struct bpf_verifier_state { 320 /* call stack tracking */ 321 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 322 struct bpf_verifier_state *parent; 323 /* 324 * 'branches' field is the number of branches left to explore: 325 * 0 - all possible paths from this state reached bpf_exit or 326 * were safely pruned 327 * 1 - at least one path is being explored. 328 * This state hasn't reached bpf_exit 329 * 2 - at least two paths are being explored. 330 * This state is an immediate parent of two children. 331 * One is fallthrough branch with branches==1 and another 332 * state is pushed into stack (to be explored later) also with 333 * branches==1. The parent of this state has branches==1. 334 * The verifier state tree connected via 'parent' pointer looks like: 335 * 1 336 * 1 337 * 2 -> 1 (first 'if' pushed into stack) 338 * 1 339 * 2 -> 1 (second 'if' pushed into stack) 340 * 1 341 * 1 342 * 1 bpf_exit. 343 * 344 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 345 * and the verifier state tree will look: 346 * 1 347 * 1 348 * 2 -> 1 (first 'if' pushed into stack) 349 * 1 350 * 1 -> 1 (second 'if' pushed into stack) 351 * 0 352 * 0 353 * 0 bpf_exit. 354 * After pop_stack() the do_check() will resume at second 'if'. 355 * 356 * If is_state_visited() sees a state with branches > 0 it means 357 * there is a loop. If such state is exactly equal to the current state 358 * it's an infinite loop. Note states_equal() checks for states 359 * equivalency, so two states being 'states_equal' does not mean 360 * infinite loop. The exact comparison is provided by 361 * states_maybe_looping() function. It's a stronger pre-check and 362 * much faster than states_equal(). 363 * 364 * This algorithm may not find all possible infinite loops or 365 * loop iteration count may be too high. 366 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 367 */ 368 u32 branches; 369 u32 insn_idx; 370 u32 curframe; 371 372 struct bpf_active_lock active_lock; 373 bool speculative; 374 bool active_rcu_lock; 375 376 /* first and last insn idx of this verifier state */ 377 u32 first_insn_idx; 378 u32 last_insn_idx; 379 /* jmp history recorded from first to last. 380 * backtracking is using it to go from last to first. 381 * For most states jmp_history_cnt is [0-3]. 382 * For loops can go up to ~40. 383 */ 384 struct bpf_idx_pair *jmp_history; 385 u32 jmp_history_cnt; 386 }; 387 388 #define bpf_get_spilled_reg(slot, frame) \ 389 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 390 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ 391 ? &frame->stack[slot].spilled_ptr : NULL) 392 393 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 394 #define bpf_for_each_spilled_reg(iter, frame, reg) \ 395 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ 396 iter < frame->allocated_stack / BPF_REG_SIZE; \ 397 iter++, reg = bpf_get_spilled_reg(iter, frame)) 398 399 /* Invoke __expr over regsiters in __vst, setting __state and __reg */ 400 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ 401 ({ \ 402 struct bpf_verifier_state *___vstate = __vst; \ 403 int ___i, ___j; \ 404 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ 405 struct bpf_reg_state *___regs; \ 406 __state = ___vstate->frame[___i]; \ 407 ___regs = __state->regs; \ 408 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ 409 __reg = &___regs[___j]; \ 410 (void)(__expr); \ 411 } \ 412 bpf_for_each_spilled_reg(___j, __state, __reg) { \ 413 if (!__reg) \ 414 continue; \ 415 (void)(__expr); \ 416 } \ 417 } \ 418 }) 419 420 /* linked list of verifier states used to prune search */ 421 struct bpf_verifier_state_list { 422 struct bpf_verifier_state state; 423 struct bpf_verifier_state_list *next; 424 int miss_cnt, hit_cnt; 425 }; 426 427 struct bpf_loop_inline_state { 428 unsigned int initialized:1; /* set to true upon first entry */ 429 unsigned int fit_for_inline:1; /* true if callback function is the same 430 * at each call and flags are always zero 431 */ 432 u32 callback_subprogno; /* valid when fit_for_inline is true */ 433 }; 434 435 /* Possible states for alu_state member. */ 436 #define BPF_ALU_SANITIZE_SRC (1U << 0) 437 #define BPF_ALU_SANITIZE_DST (1U << 1) 438 #define BPF_ALU_NEG_VALUE (1U << 2) 439 #define BPF_ALU_NON_POINTER (1U << 3) 440 #define BPF_ALU_IMMEDIATE (1U << 4) 441 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 442 BPF_ALU_SANITIZE_DST) 443 444 struct bpf_insn_aux_data { 445 union { 446 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 447 unsigned long map_ptr_state; /* pointer/poison value for maps */ 448 s32 call_imm; /* saved imm field of call insn */ 449 u32 alu_limit; /* limit for add/sub register with pointer */ 450 struct { 451 u32 map_index; /* index into used_maps[] */ 452 u32 map_off; /* offset from value base address */ 453 }; 454 struct { 455 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ 456 union { 457 struct { 458 struct btf *btf; 459 u32 btf_id; /* btf_id for struct typed var */ 460 }; 461 u32 mem_size; /* mem_size for non-struct typed var */ 462 }; 463 } btf_var; 464 /* if instruction is a call to bpf_loop this field tracks 465 * the state of the relevant registers to make decision about inlining 466 */ 467 struct bpf_loop_inline_state loop_inline_state; 468 }; 469 union { 470 /* remember the size of type passed to bpf_obj_new to rewrite R1 */ 471 u64 obj_new_size; 472 /* remember the offset of node field within type to rewrite */ 473 u64 insert_off; 474 }; 475 struct btf_struct_meta *kptr_struct_meta; 476 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 477 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 478 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 479 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ 480 bool zext_dst; /* this insn zero extends dst reg */ 481 bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ 482 bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ 483 u8 alu_state; /* used in combination with alu_limit */ 484 485 /* below fields are initialized once */ 486 unsigned int orig_idx; /* original instruction index */ 487 bool jmp_point; 488 bool prune_point; 489 /* ensure we check state equivalence and save state checkpoint and 490 * this instruction, regardless of any heuristics 491 */ 492 bool force_checkpoint; 493 }; 494 495 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 496 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ 497 498 #define BPF_VERIFIER_TMP_LOG_SIZE 1024 499 500 struct bpf_verifier_log { 501 /* Logical start and end positions of a "log window" of the verifier log. 502 * start_pos == 0 means we haven't truncated anything. 503 * Once truncation starts to happen, start_pos + len_total == end_pos, 504 * except during log reset situations, in which (end_pos - start_pos) 505 * might get smaller than len_total (see bpf_vlog_reset()). 506 * Generally, (end_pos - start_pos) gives number of useful data in 507 * user log buffer. 508 */ 509 u64 start_pos; 510 u64 end_pos; 511 char __user *ubuf; 512 u32 level; 513 u32 len_total; 514 u32 len_max; 515 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 516 }; 517 518 #define BPF_LOG_LEVEL1 1 519 #define BPF_LOG_LEVEL2 2 520 #define BPF_LOG_STATS 4 521 #define BPF_LOG_FIXED 8 522 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 523 #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED) 524 #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 525 #define BPF_LOG_MIN_ALIGNMENT 8U 526 #define BPF_LOG_ALIGNMENT 40U 527 528 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 529 { 530 return log && log->level; 531 } 532 533 #define BPF_MAX_SUBPROGS 256 534 535 struct bpf_subprog_info { 536 /* 'start' has to be the first field otherwise find_subprog() won't work */ 537 u32 start; /* insn idx of function entry point */ 538 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 539 u16 stack_depth; /* max. stack depth used by this function */ 540 bool has_tail_call; 541 bool tail_call_reachable; 542 bool has_ld_abs; 543 bool is_async_cb; 544 }; 545 546 struct bpf_verifier_env; 547 548 struct backtrack_state { 549 struct bpf_verifier_env *env; 550 u32 frame; 551 u32 reg_masks[MAX_CALL_FRAMES]; 552 u64 stack_masks[MAX_CALL_FRAMES]; 553 }; 554 555 struct bpf_id_pair { 556 u32 old; 557 u32 cur; 558 }; 559 560 struct bpf_idmap { 561 u32 tmp_id_gen; 562 struct bpf_id_pair map[BPF_ID_MAP_SIZE]; 563 }; 564 565 struct bpf_idset { 566 u32 count; 567 u32 ids[BPF_ID_MAP_SIZE]; 568 }; 569 570 /* single container for all structs 571 * one verifier_env per bpf_check() call 572 */ 573 struct bpf_verifier_env { 574 u32 insn_idx; 575 u32 prev_insn_idx; 576 struct bpf_prog *prog; /* eBPF program being verified */ 577 const struct bpf_verifier_ops *ops; 578 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 579 int stack_size; /* number of states to be processed */ 580 bool strict_alignment; /* perform strict pointer alignment checks */ 581 bool test_state_freq; /* test verifier with different pruning frequency */ 582 struct bpf_verifier_state *cur_state; /* current verifier state */ 583 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 584 struct bpf_verifier_state_list *free_list; 585 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 586 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ 587 u32 used_map_cnt; /* number of used maps */ 588 u32 used_btf_cnt; /* number of used BTF objects */ 589 u32 id_gen; /* used to generate unique reg IDs */ 590 bool explore_alu_limits; 591 bool allow_ptr_leaks; 592 bool allow_uninit_stack; 593 bool bpf_capable; 594 bool bypass_spec_v1; 595 bool bypass_spec_v4; 596 bool seen_direct_write; 597 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 598 const struct bpf_line_info *prev_linfo; 599 struct bpf_verifier_log log; 600 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 601 union { 602 struct bpf_idmap idmap_scratch; 603 struct bpf_idset idset_scratch; 604 }; 605 struct { 606 int *insn_state; 607 int *insn_stack; 608 int cur_stack; 609 } cfg; 610 struct backtrack_state bt; 611 u32 pass_cnt; /* number of times do_check() was called */ 612 u32 subprog_cnt; 613 /* number of instructions analyzed by the verifier */ 614 u32 prev_insn_processed, insn_processed; 615 /* number of jmps, calls, exits analyzed so far */ 616 u32 prev_jmps_processed, jmps_processed; 617 /* total verification time */ 618 u64 verification_time; 619 /* maximum number of verifier states kept in 'branching' instructions */ 620 u32 max_states_per_insn; 621 /* total number of allocated verifier states */ 622 u32 total_states; 623 /* some states are freed during program analysis. 624 * this is peak number of states. this number dominates kernel 625 * memory consumption during verification 626 */ 627 u32 peak_states; 628 /* longest register parentage chain walked for liveness marking */ 629 u32 longest_mark_read_walk; 630 bpfptr_t fd_array; 631 632 /* bit mask to keep track of whether a register has been accessed 633 * since the last time the function state was printed 634 */ 635 u32 scratched_regs; 636 /* Same as scratched_regs but for stack slots */ 637 u64 scratched_stack_slots; 638 u64 prev_log_pos, prev_insn_print_pos; 639 /* buffer used to generate temporary string representations, 640 * e.g., in reg_type_str() to generate reg_type string 641 */ 642 char tmp_str_buf[TMP_STR_BUF_LEN]; 643 }; 644 645 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 646 const char *fmt, va_list args); 647 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 648 const char *fmt, ...); 649 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 650 const char *fmt, ...); 651 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level, 652 char __user *log_buf, u32 log_size); 653 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos); 654 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual); 655 656 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 657 { 658 struct bpf_verifier_state *cur = env->cur_state; 659 660 return cur->frame[cur->curframe]; 661 } 662 663 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 664 { 665 return cur_func(env)->regs; 666 } 667 668 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 669 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 670 int insn_idx, int prev_insn_idx); 671 int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 672 void 673 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 674 struct bpf_insn *insn); 675 void 676 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 677 678 int check_ptr_off_reg(struct bpf_verifier_env *env, 679 const struct bpf_reg_state *reg, int regno); 680 int check_func_arg_reg_off(struct bpf_verifier_env *env, 681 const struct bpf_reg_state *reg, int regno, 682 enum bpf_arg_type arg_type); 683 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 684 u32 regno, u32 mem_size); 685 686 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ 687 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, 688 struct btf *btf, u32 btf_id) 689 { 690 if (tgt_prog) 691 return ((u64)tgt_prog->aux->id << 32) | btf_id; 692 else 693 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; 694 } 695 696 /* unpack the IDs from the key as constructed above */ 697 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) 698 { 699 if (obj_id) 700 *obj_id = key >> 32; 701 if (btf_id) 702 *btf_id = key & 0x7FFFFFFF; 703 } 704 705 int bpf_check_attach_target(struct bpf_verifier_log *log, 706 const struct bpf_prog *prog, 707 const struct bpf_prog *tgt_prog, 708 u32 btf_id, 709 struct bpf_attach_target_info *tgt_info); 710 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); 711 712 int mark_chain_precision(struct bpf_verifier_env *env, int regno); 713 714 #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) 715 716 /* extract base type from bpf_{arg, return, reg}_type. */ 717 static inline u32 base_type(u32 type) 718 { 719 return type & BPF_BASE_TYPE_MASK; 720 } 721 722 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */ 723 static inline u32 type_flag(u32 type) 724 { 725 return type & ~BPF_BASE_TYPE_MASK; 726 } 727 728 /* only use after check_attach_btf_id() */ 729 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) 730 { 731 return prog->type == BPF_PROG_TYPE_EXT ? 732 prog->aux->dst_prog->type : prog->type; 733 } 734 735 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) 736 { 737 switch (resolve_prog_type(prog)) { 738 case BPF_PROG_TYPE_TRACING: 739 return prog->expected_attach_type != BPF_TRACE_ITER; 740 case BPF_PROG_TYPE_STRUCT_OPS: 741 case BPF_PROG_TYPE_LSM: 742 return false; 743 default: 744 return true; 745 } 746 } 747 748 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED) 749 750 static inline bool bpf_type_has_unsafe_modifiers(u32 type) 751 { 752 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS; 753 } 754 755 #endif /* _LINUX_BPF_VERIFIER_H */ 756