xref: /openbmc/linux/include/linux/bpf_verifier.h (revision ecc2aeea)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
6 
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/btf.h> /* for struct btf and btf_id() */
9 #include <linux/filter.h> /* for MAX_BPF_STACK */
10 #include <linux/tnum.h>
11 
12 /* Maximum variable offset umax_value permitted when resolving memory accesses.
13  * In practice this is far bigger than any realistic pointer offset; this limit
14  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15  */
16 #define BPF_MAX_VAR_OFF	(1 << 29)
17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
18  * that converting umax_value to int cannot overflow.
19  */
20 #define BPF_MAX_VAR_SIZ	(1 << 29)
21 /* size of tmp_str_buf in bpf_verifier.
22  * we need at least 306 bytes to fit full stack mask representation
23  * (in the "-8,-16,...,-512" form)
24  */
25 #define TMP_STR_BUF_LEN 320
26 
27 /* Liveness marks, used for registers and spilled-regs (in stack slots).
28  * Read marks propagate upwards until they find a write mark; they record that
29  * "one of this state's descendants read this reg" (and therefore the reg is
30  * relevant for states_equal() checks).
31  * Write marks collect downwards and do not propagate; they record that "the
32  * straight-line code that reached this state (from its parent) wrote this reg"
33  * (and therefore that reads propagated from this state or its descendants
34  * should not propagate to its parent).
35  * A state with a write mark can receive read marks; it just won't propagate
36  * them to its parent, since the write mark is a property, not of the state,
37  * but of the link between it and its parent.  See mark_reg_read() and
38  * mark_stack_slot_read() in kernel/bpf/verifier.c.
39  */
40 enum bpf_reg_liveness {
41 	REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
42 	REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
43 	REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
44 	REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
45 	REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
46 	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
47 };
48 
49 /* For every reg representing a map value or allocated object pointer,
50  * we consider the tuple of (ptr, id) for them to be unique in verifier
51  * context and conside them to not alias each other for the purposes of
52  * tracking lock state.
53  */
54 struct bpf_active_lock {
55 	/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
56 	 * there's no active lock held, and other fields have no
57 	 * meaning. If non-NULL, it indicates that a lock is held and
58 	 * id member has the reg->id of the register which can be >= 0.
59 	 */
60 	void *ptr;
61 	/* This will be reg->id */
62 	u32 id;
63 };
64 
65 #define ITER_PREFIX "bpf_iter_"
66 
67 enum bpf_iter_state {
68 	BPF_ITER_STATE_INVALID, /* for non-first slot */
69 	BPF_ITER_STATE_ACTIVE,
70 	BPF_ITER_STATE_DRAINED,
71 };
72 
73 struct bpf_reg_state {
74 	/* Ordering of fields matters.  See states_equal() */
75 	enum bpf_reg_type type;
76 	/* Fixed part of pointer offset, pointer types only */
77 	s32 off;
78 	union {
79 		/* valid when type == PTR_TO_PACKET */
80 		int range;
81 
82 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
83 		 *   PTR_TO_MAP_VALUE_OR_NULL
84 		 */
85 		struct {
86 			struct bpf_map *map_ptr;
87 			/* To distinguish map lookups from outer map
88 			 * the map_uid is non-zero for registers
89 			 * pointing to inner maps.
90 			 */
91 			u32 map_uid;
92 		};
93 
94 		/* for PTR_TO_BTF_ID */
95 		struct {
96 			struct btf *btf;
97 			u32 btf_id;
98 		};
99 
100 		struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
101 			u32 mem_size;
102 			u32 dynptr_id; /* for dynptr slices */
103 		};
104 
105 		/* For dynptr stack slots */
106 		struct {
107 			enum bpf_dynptr_type type;
108 			/* A dynptr is 16 bytes so it takes up 2 stack slots.
109 			 * We need to track which slot is the first slot
110 			 * to protect against cases where the user may try to
111 			 * pass in an address starting at the second slot of the
112 			 * dynptr.
113 			 */
114 			bool first_slot;
115 		} dynptr;
116 
117 		/* For bpf_iter stack slots */
118 		struct {
119 			/* BTF container and BTF type ID describing
120 			 * struct bpf_iter_<type> of an iterator state
121 			 */
122 			struct btf *btf;
123 			u32 btf_id;
124 			/* packing following two fields to fit iter state into 16 bytes */
125 			enum bpf_iter_state state:2;
126 			int depth:30;
127 		} iter;
128 
129 		/* Max size from any of the above. */
130 		struct {
131 			unsigned long raw1;
132 			unsigned long raw2;
133 		} raw;
134 
135 		u32 subprogno; /* for PTR_TO_FUNC */
136 	};
137 	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
138 	 * the actual value.
139 	 * For pointer types, this represents the variable part of the offset
140 	 * from the pointed-to object, and is shared with all bpf_reg_states
141 	 * with the same id as us.
142 	 */
143 	struct tnum var_off;
144 	/* Used to determine if any memory access using this register will
145 	 * result in a bad access.
146 	 * These refer to the same value as var_off, not necessarily the actual
147 	 * contents of the register.
148 	 */
149 	s64 smin_value; /* minimum possible (s64)value */
150 	s64 smax_value; /* maximum possible (s64)value */
151 	u64 umin_value; /* minimum possible (u64)value */
152 	u64 umax_value; /* maximum possible (u64)value */
153 	s32 s32_min_value; /* minimum possible (s32)value */
154 	s32 s32_max_value; /* maximum possible (s32)value */
155 	u32 u32_min_value; /* minimum possible (u32)value */
156 	u32 u32_max_value; /* maximum possible (u32)value */
157 	/* For PTR_TO_PACKET, used to find other pointers with the same variable
158 	 * offset, so they can share range knowledge.
159 	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
160 	 * came from, when one is tested for != NULL.
161 	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
162 	 * for the purpose of tracking that it's freed.
163 	 * For PTR_TO_SOCKET this is used to share which pointers retain the
164 	 * same reference to the socket, to determine proper reference freeing.
165 	 * For stack slots that are dynptrs, this is used to track references to
166 	 * the dynptr to determine proper reference freeing.
167 	 * Similarly to dynptrs, we use ID to track "belonging" of a reference
168 	 * to a specific instance of bpf_iter.
169 	 */
170 	u32 id;
171 	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
172 	 * from a pointer-cast helper, bpf_sk_fullsock() and
173 	 * bpf_tcp_sock().
174 	 *
175 	 * Consider the following where "sk" is a reference counted
176 	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
177 	 *
178 	 * 1: sk = bpf_sk_lookup_tcp();
179 	 * 2: if (!sk) { return 0; }
180 	 * 3: fullsock = bpf_sk_fullsock(sk);
181 	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
182 	 * 5: tp = bpf_tcp_sock(fullsock);
183 	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
184 	 * 7: bpf_sk_release(sk);
185 	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
186 	 *
187 	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
188 	 * "tp" ptr should be invalidated also.  In order to do that,
189 	 * the reg holding "fullsock" and "sk" need to remember
190 	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
191 	 * such that the verifier can reset all regs which have
192 	 * ref_obj_id matching the sk_reg->id.
193 	 *
194 	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
195 	 * sk_reg->id will stay as NULL-marking purpose only.
196 	 * After NULL-marking is done, sk_reg->id can be reset to 0.
197 	 *
198 	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
199 	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
200 	 *
201 	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
202 	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
203 	 * which is the same as sk_reg->ref_obj_id.
204 	 *
205 	 * From the verifier perspective, if sk, fullsock and tp
206 	 * are not NULL, they are the same ptr with different
207 	 * reg->type.  In particular, bpf_sk_release(tp) is also
208 	 * allowed and has the same effect as bpf_sk_release(sk).
209 	 */
210 	u32 ref_obj_id;
211 	/* parentage chain for liveness checking */
212 	struct bpf_reg_state *parent;
213 	/* Inside the callee two registers can be both PTR_TO_STACK like
214 	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
215 	 * while another to the caller's stack. To differentiate them 'frameno'
216 	 * is used which is an index in bpf_verifier_state->frame[] array
217 	 * pointing to bpf_func_state.
218 	 */
219 	u32 frameno;
220 	/* Tracks subreg definition. The stored value is the insn_idx of the
221 	 * writing insn. This is safe because subreg_def is used before any insn
222 	 * patching which only happens after main verification finished.
223 	 */
224 	s32 subreg_def;
225 	enum bpf_reg_liveness live;
226 	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
227 	bool precise;
228 };
229 
230 enum bpf_stack_slot_type {
231 	STACK_INVALID,    /* nothing was stored in this stack slot */
232 	STACK_SPILL,      /* register spilled into stack */
233 	STACK_MISC,	  /* BPF program wrote some data into this slot */
234 	STACK_ZERO,	  /* BPF program wrote constant zero */
235 	/* A dynptr is stored in this stack slot. The type of dynptr
236 	 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
237 	 */
238 	STACK_DYNPTR,
239 	STACK_ITER,
240 };
241 
242 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
243 
244 #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
245 			  (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
246 			  (1 << BPF_REG_5))
247 
248 #define BPF_DYNPTR_SIZE		sizeof(struct bpf_dynptr_kern)
249 #define BPF_DYNPTR_NR_SLOTS		(BPF_DYNPTR_SIZE / BPF_REG_SIZE)
250 
251 struct bpf_stack_state {
252 	struct bpf_reg_state spilled_ptr;
253 	u8 slot_type[BPF_REG_SIZE];
254 };
255 
256 struct bpf_reference_state {
257 	/* Track each reference created with a unique id, even if the same
258 	 * instruction creates the reference multiple times (eg, via CALL).
259 	 */
260 	int id;
261 	/* Instruction where the allocation of this reference occurred. This
262 	 * is used purely to inform the user of a reference leak.
263 	 */
264 	int insn_idx;
265 	/* There can be a case like:
266 	 * main (frame 0)
267 	 *  cb (frame 1)
268 	 *   func (frame 3)
269 	 *    cb (frame 4)
270 	 * Hence for frame 4, if callback_ref just stored boolean, it would be
271 	 * impossible to distinguish nested callback refs. Hence store the
272 	 * frameno and compare that to callback_ref in check_reference_leak when
273 	 * exiting a callback function.
274 	 */
275 	int callback_ref;
276 };
277 
278 /* state of the program:
279  * type of all registers and stack info
280  */
281 struct bpf_func_state {
282 	struct bpf_reg_state regs[MAX_BPF_REG];
283 	/* index of call instruction that called into this func */
284 	int callsite;
285 	/* stack frame number of this function state from pov of
286 	 * enclosing bpf_verifier_state.
287 	 * 0 = main function, 1 = first callee.
288 	 */
289 	u32 frameno;
290 	/* subprog number == index within subprog_info
291 	 * zero == main subprog
292 	 */
293 	u32 subprogno;
294 	/* Every bpf_timer_start will increment async_entry_cnt.
295 	 * It's used to distinguish:
296 	 * void foo(void) { for(;;); }
297 	 * void foo(void) { bpf_timer_set_callback(,foo); }
298 	 */
299 	u32 async_entry_cnt;
300 	bool in_callback_fn;
301 	struct tnum callback_ret_range;
302 	bool in_async_callback_fn;
303 	/* For callback calling functions that limit number of possible
304 	 * callback executions (e.g. bpf_loop) keeps track of current
305 	 * simulated iteration number.
306 	 * Value in frame N refers to number of times callback with frame
307 	 * N+1 was simulated, e.g. for the following call:
308 	 *
309 	 *   bpf_loop(..., fn, ...); | suppose current frame is N
310 	 *                           | fn would be simulated in frame N+1
311 	 *                           | number of simulations is tracked in frame N
312 	 */
313 	u32 callback_depth;
314 
315 	/* The following fields should be last. See copy_func_state() */
316 	int acquired_refs;
317 	struct bpf_reference_state *refs;
318 	int allocated_stack;
319 	struct bpf_stack_state *stack;
320 };
321 
322 #define MAX_CALL_FRAMES 8
323 
324 /* instruction history flags, used in bpf_jmp_history_entry.flags field */
325 enum {
326 	/* instruction references stack slot through PTR_TO_STACK register;
327 	 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
328 	 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
329 	 * 8 bytes per slot, so slot index (spi) is [0, 63])
330 	 */
331 	INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
332 
333 	INSN_F_SPI_MASK = 0x3f, /* 6 bits */
334 	INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
335 
336 	INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
337 };
338 
339 static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
340 static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
341 
342 struct bpf_jmp_history_entry {
343 	u32 idx;
344 	/* insn idx can't be bigger than 1 million */
345 	u32 prev_idx : 22;
346 	/* special flags, e.g., whether insn is doing register stack spill/load */
347 	u32 flags : 10;
348 };
349 
350 /* Maximum number of register states that can exist at once */
351 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
352 struct bpf_verifier_state {
353 	/* call stack tracking */
354 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
355 	struct bpf_verifier_state *parent;
356 	/*
357 	 * 'branches' field is the number of branches left to explore:
358 	 * 0 - all possible paths from this state reached bpf_exit or
359 	 * were safely pruned
360 	 * 1 - at least one path is being explored.
361 	 * This state hasn't reached bpf_exit
362 	 * 2 - at least two paths are being explored.
363 	 * This state is an immediate parent of two children.
364 	 * One is fallthrough branch with branches==1 and another
365 	 * state is pushed into stack (to be explored later) also with
366 	 * branches==1. The parent of this state has branches==1.
367 	 * The verifier state tree connected via 'parent' pointer looks like:
368 	 * 1
369 	 * 1
370 	 * 2 -> 1 (first 'if' pushed into stack)
371 	 * 1
372 	 * 2 -> 1 (second 'if' pushed into stack)
373 	 * 1
374 	 * 1
375 	 * 1 bpf_exit.
376 	 *
377 	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
378 	 * and the verifier state tree will look:
379 	 * 1
380 	 * 1
381 	 * 2 -> 1 (first 'if' pushed into stack)
382 	 * 1
383 	 * 1 -> 1 (second 'if' pushed into stack)
384 	 * 0
385 	 * 0
386 	 * 0 bpf_exit.
387 	 * After pop_stack() the do_check() will resume at second 'if'.
388 	 *
389 	 * If is_state_visited() sees a state with branches > 0 it means
390 	 * there is a loop. If such state is exactly equal to the current state
391 	 * it's an infinite loop. Note states_equal() checks for states
392 	 * equivalency, so two states being 'states_equal' does not mean
393 	 * infinite loop. The exact comparison is provided by
394 	 * states_maybe_looping() function. It's a stronger pre-check and
395 	 * much faster than states_equal().
396 	 *
397 	 * This algorithm may not find all possible infinite loops or
398 	 * loop iteration count may be too high.
399 	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
400 	 */
401 	u32 branches;
402 	u32 insn_idx;
403 	u32 curframe;
404 
405 	struct bpf_active_lock active_lock;
406 	bool speculative;
407 	bool active_rcu_lock;
408 	/* If this state was ever pointed-to by other state's loop_entry field
409 	 * this flag would be set to true. Used to avoid freeing such states
410 	 * while they are still in use.
411 	 */
412 	bool used_as_loop_entry;
413 
414 	/* first and last insn idx of this verifier state */
415 	u32 first_insn_idx;
416 	u32 last_insn_idx;
417 	/* If this state is a part of states loop this field points to some
418 	 * parent of this state such that:
419 	 * - it is also a member of the same states loop;
420 	 * - DFS states traversal starting from initial state visits loop_entry
421 	 *   state before this state.
422 	 * Used to compute topmost loop entry for state loops.
423 	 * State loops might appear because of open coded iterators logic.
424 	 * See get_loop_entry() for more information.
425 	 */
426 	struct bpf_verifier_state *loop_entry;
427 	/* jmp history recorded from first to last.
428 	 * backtracking is using it to go from last to first.
429 	 * For most states jmp_history_cnt is [0-3].
430 	 * For loops can go up to ~40.
431 	 */
432 	struct bpf_jmp_history_entry *jmp_history;
433 	u32 jmp_history_cnt;
434 	u32 dfs_depth;
435 	u32 callback_unroll_depth;
436 };
437 
438 #define bpf_get_spilled_reg(slot, frame)				\
439 	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
440 	  (frame->stack[slot].slot_type[0] == STACK_SPILL))		\
441 	 ? &frame->stack[slot].spilled_ptr : NULL)
442 
443 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
444 #define bpf_for_each_spilled_reg(iter, frame, reg)			\
445 	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);		\
446 	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
447 	     iter++, reg = bpf_get_spilled_reg(iter, frame))
448 
449 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
450 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr)   \
451 	({                                                               \
452 		struct bpf_verifier_state *___vstate = __vst;            \
453 		int ___i, ___j;                                          \
454 		for (___i = 0; ___i <= ___vstate->curframe; ___i++) {    \
455 			struct bpf_reg_state *___regs;                   \
456 			__state = ___vstate->frame[___i];                \
457 			___regs = __state->regs;                         \
458 			for (___j = 0; ___j < MAX_BPF_REG; ___j++) {     \
459 				__reg = &___regs[___j];                  \
460 				(void)(__expr);                          \
461 			}                                                \
462 			bpf_for_each_spilled_reg(___j, __state, __reg) { \
463 				if (!__reg)                              \
464 					continue;                        \
465 				(void)(__expr);                          \
466 			}                                                \
467 		}                                                        \
468 	})
469 
470 /* linked list of verifier states used to prune search */
471 struct bpf_verifier_state_list {
472 	struct bpf_verifier_state state;
473 	struct bpf_verifier_state_list *next;
474 	int miss_cnt, hit_cnt;
475 };
476 
477 struct bpf_loop_inline_state {
478 	unsigned int initialized:1; /* set to true upon first entry */
479 	unsigned int fit_for_inline:1; /* true if callback function is the same
480 					* at each call and flags are always zero
481 					*/
482 	u32 callback_subprogno; /* valid when fit_for_inline is true */
483 };
484 
485 /* Possible states for alu_state member. */
486 #define BPF_ALU_SANITIZE_SRC		(1U << 0)
487 #define BPF_ALU_SANITIZE_DST		(1U << 1)
488 #define BPF_ALU_NEG_VALUE		(1U << 2)
489 #define BPF_ALU_NON_POINTER		(1U << 3)
490 #define BPF_ALU_IMMEDIATE		(1U << 4)
491 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
492 					 BPF_ALU_SANITIZE_DST)
493 
494 struct bpf_insn_aux_data {
495 	union {
496 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
497 		unsigned long map_ptr_state;	/* pointer/poison value for maps */
498 		s32 call_imm;			/* saved imm field of call insn */
499 		u32 alu_limit;			/* limit for add/sub register with pointer */
500 		struct {
501 			u32 map_index;		/* index into used_maps[] */
502 			u32 map_off;		/* offset from value base address */
503 		};
504 		struct {
505 			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
506 			union {
507 				struct {
508 					struct btf *btf;
509 					u32 btf_id;	/* btf_id for struct typed var */
510 				};
511 				u32 mem_size;	/* mem_size for non-struct typed var */
512 			};
513 		} btf_var;
514 		/* if instruction is a call to bpf_loop this field tracks
515 		 * the state of the relevant registers to make decision about inlining
516 		 */
517 		struct bpf_loop_inline_state loop_inline_state;
518 	};
519 	union {
520 		/* remember the size of type passed to bpf_obj_new to rewrite R1 */
521 		u64 obj_new_size;
522 		/* remember the offset of node field within type to rewrite */
523 		u64 insert_off;
524 	};
525 	struct btf_struct_meta *kptr_struct_meta;
526 	u64 map_key_state; /* constant (32 bit) key tracking for maps */
527 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
528 	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
529 	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
530 	bool zext_dst; /* this insn zero extends dst reg */
531 	bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
532 	bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
533 	u8 alu_state; /* used in combination with alu_limit */
534 
535 	/* below fields are initialized once */
536 	unsigned int orig_idx; /* original instruction index */
537 	bool jmp_point;
538 	bool prune_point;
539 	/* ensure we check state equivalence and save state checkpoint and
540 	 * this instruction, regardless of any heuristics
541 	 */
542 	bool force_checkpoint;
543 	/* true if instruction is a call to a helper function that
544 	 * accepts callback function as a parameter.
545 	 */
546 	bool calls_callback;
547 };
548 
549 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
550 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
551 
552 #define BPF_VERIFIER_TMP_LOG_SIZE	1024
553 
554 struct bpf_verifier_log {
555 	/* Logical start and end positions of a "log window" of the verifier log.
556 	 * start_pos == 0 means we haven't truncated anything.
557 	 * Once truncation starts to happen, start_pos + len_total == end_pos,
558 	 * except during log reset situations, in which (end_pos - start_pos)
559 	 * might get smaller than len_total (see bpf_vlog_reset()).
560 	 * Generally, (end_pos - start_pos) gives number of useful data in
561 	 * user log buffer.
562 	 */
563 	u64 start_pos;
564 	u64 end_pos;
565 	char __user *ubuf;
566 	u32 level;
567 	u32 len_total;
568 	u32 len_max;
569 	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
570 };
571 
572 #define BPF_LOG_LEVEL1	1
573 #define BPF_LOG_LEVEL2	2
574 #define BPF_LOG_STATS	4
575 #define BPF_LOG_FIXED	8
576 #define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
577 #define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
578 #define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
579 #define BPF_LOG_MIN_ALIGNMENT 8U
580 #define BPF_LOG_ALIGNMENT 40U
581 
bpf_verifier_log_needed(const struct bpf_verifier_log * log)582 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
583 {
584 	return log && log->level;
585 }
586 
587 #define BPF_MAX_SUBPROGS 256
588 
589 struct bpf_subprog_info {
590 	/* 'start' has to be the first field otherwise find_subprog() won't work */
591 	u32 start; /* insn idx of function entry point */
592 	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
593 	u16 stack_depth; /* max. stack depth used by this function */
594 	bool has_tail_call;
595 	bool tail_call_reachable;
596 	bool has_ld_abs;
597 	bool is_async_cb;
598 };
599 
600 struct bpf_verifier_env;
601 
602 struct backtrack_state {
603 	struct bpf_verifier_env *env;
604 	u32 frame;
605 	u32 reg_masks[MAX_CALL_FRAMES];
606 	u64 stack_masks[MAX_CALL_FRAMES];
607 };
608 
609 struct bpf_id_pair {
610 	u32 old;
611 	u32 cur;
612 };
613 
614 struct bpf_idmap {
615 	u32 tmp_id_gen;
616 	struct bpf_id_pair map[BPF_ID_MAP_SIZE];
617 };
618 
619 struct bpf_idset {
620 	u32 count;
621 	u32 ids[BPF_ID_MAP_SIZE];
622 };
623 
624 /* single container for all structs
625  * one verifier_env per bpf_check() call
626  */
627 struct bpf_verifier_env {
628 	u32 insn_idx;
629 	u32 prev_insn_idx;
630 	struct bpf_prog *prog;		/* eBPF program being verified */
631 	const struct bpf_verifier_ops *ops;
632 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
633 	int stack_size;			/* number of states to be processed */
634 	bool strict_alignment;		/* perform strict pointer alignment checks */
635 	bool test_state_freq;		/* test verifier with different pruning frequency */
636 	struct bpf_verifier_state *cur_state; /* current verifier state */
637 	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
638 	struct bpf_verifier_state_list *free_list;
639 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
640 	struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
641 	u32 used_map_cnt;		/* number of used maps */
642 	u32 used_btf_cnt;		/* number of used BTF objects */
643 	u32 id_gen;			/* used to generate unique reg IDs */
644 	bool explore_alu_limits;
645 	bool allow_ptr_leaks;
646 	bool allow_uninit_stack;
647 	bool bpf_capable;
648 	bool bypass_spec_v1;
649 	bool bypass_spec_v4;
650 	bool seen_direct_write;
651 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
652 	const struct bpf_line_info *prev_linfo;
653 	struct bpf_verifier_log log;
654 	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
655 	union {
656 		struct bpf_idmap idmap_scratch;
657 		struct bpf_idset idset_scratch;
658 	};
659 	struct {
660 		int *insn_state;
661 		int *insn_stack;
662 		int cur_stack;
663 	} cfg;
664 	struct backtrack_state bt;
665 	struct bpf_jmp_history_entry *cur_hist_ent;
666 	u32 pass_cnt; /* number of times do_check() was called */
667 	u32 subprog_cnt;
668 	/* number of instructions analyzed by the verifier */
669 	u32 prev_insn_processed, insn_processed;
670 	/* number of jmps, calls, exits analyzed so far */
671 	u32 prev_jmps_processed, jmps_processed;
672 	/* total verification time */
673 	u64 verification_time;
674 	/* maximum number of verifier states kept in 'branching' instructions */
675 	u32 max_states_per_insn;
676 	/* total number of allocated verifier states */
677 	u32 total_states;
678 	/* some states are freed during program analysis.
679 	 * this is peak number of states. this number dominates kernel
680 	 * memory consumption during verification
681 	 */
682 	u32 peak_states;
683 	/* longest register parentage chain walked for liveness marking */
684 	u32 longest_mark_read_walk;
685 	bpfptr_t fd_array;
686 
687 	/* bit mask to keep track of whether a register has been accessed
688 	 * since the last time the function state was printed
689 	 */
690 	u32 scratched_regs;
691 	/* Same as scratched_regs but for stack slots */
692 	u64 scratched_stack_slots;
693 	u64 prev_log_pos, prev_insn_print_pos;
694 	/* buffer used to generate temporary string representations,
695 	 * e.g., in reg_type_str() to generate reg_type string
696 	 */
697 	char tmp_str_buf[TMP_STR_BUF_LEN];
698 };
699 
700 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
701 				      const char *fmt, va_list args);
702 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
703 					   const char *fmt, ...);
704 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
705 			    const char *fmt, ...);
706 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
707 		  char __user *log_buf, u32 log_size);
708 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
709 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
710 
cur_func(struct bpf_verifier_env * env)711 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
712 {
713 	struct bpf_verifier_state *cur = env->cur_state;
714 
715 	return cur->frame[cur->curframe];
716 }
717 
cur_regs(struct bpf_verifier_env * env)718 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
719 {
720 	return cur_func(env)->regs;
721 }
722 
723 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
724 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
725 				 int insn_idx, int prev_insn_idx);
726 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
727 void
728 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
729 			      struct bpf_insn *insn);
730 void
731 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
732 
733 int check_ptr_off_reg(struct bpf_verifier_env *env,
734 		      const struct bpf_reg_state *reg, int regno);
735 int check_func_arg_reg_off(struct bpf_verifier_env *env,
736 			   const struct bpf_reg_state *reg, int regno,
737 			   enum bpf_arg_type arg_type);
738 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
739 		   u32 regno, u32 mem_size);
740 
741 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,struct btf * btf,u32 btf_id)742 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
743 					     struct btf *btf, u32 btf_id)
744 {
745 	if (tgt_prog)
746 		return ((u64)tgt_prog->aux->id << 32) | btf_id;
747 	else
748 		return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
749 }
750 
751 /* unpack the IDs from the key as constructed above */
bpf_trampoline_unpack_key(u64 key,u32 * obj_id,u32 * btf_id)752 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
753 {
754 	if (obj_id)
755 		*obj_id = key >> 32;
756 	if (btf_id)
757 		*btf_id = key & 0x7FFFFFFF;
758 }
759 
760 int bpf_check_attach_target(struct bpf_verifier_log *log,
761 			    const struct bpf_prog *prog,
762 			    const struct bpf_prog *tgt_prog,
763 			    u32 btf_id,
764 			    struct bpf_attach_target_info *tgt_info);
765 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
766 
767 int mark_chain_precision(struct bpf_verifier_env *env, int regno);
768 
769 #define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
770 
771 /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)772 static inline u32 base_type(u32 type)
773 {
774 	return type & BPF_BASE_TYPE_MASK;
775 }
776 
777 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)778 static inline u32 type_flag(u32 type)
779 {
780 	return type & ~BPF_BASE_TYPE_MASK;
781 }
782 
783 /* only use after check_attach_btf_id() */
resolve_prog_type(const struct bpf_prog * prog)784 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
785 {
786 	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
787 		prog->aux->saved_dst_prog_type : prog->type;
788 }
789 
bpf_prog_check_recur(const struct bpf_prog * prog)790 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
791 {
792 	switch (resolve_prog_type(prog)) {
793 	case BPF_PROG_TYPE_TRACING:
794 		return prog->expected_attach_type != BPF_TRACE_ITER;
795 	case BPF_PROG_TYPE_STRUCT_OPS:
796 	case BPF_PROG_TYPE_LSM:
797 		return false;
798 	default:
799 		return true;
800 	}
801 }
802 
803 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
804 
bpf_type_has_unsafe_modifiers(u32 type)805 static inline bool bpf_type_has_unsafe_modifiers(u32 type)
806 {
807 	return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
808 }
809 
810 #endif /* _LINUX_BPF_VERIFIER_H */
811