xref: /openbmc/linux/kernel/bpf/verifier.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 #include <linux/module.h>
28 
29 #include "disasm.h"
30 
31 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
32 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
33 	[_id] = & _name ## _verifier_ops,
34 #define BPF_MAP_TYPE(_id, _ops)
35 #define BPF_LINK_TYPE(_id, _name)
36 #include <linux/bpf_types.h>
37 #undef BPF_PROG_TYPE
38 #undef BPF_MAP_TYPE
39 #undef BPF_LINK_TYPE
40 };
41 
42 /* bpf_check() is a static code analyzer that walks eBPF program
43  * instruction by instruction and updates register/stack state.
44  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
45  *
46  * The first pass is depth-first-search to check that the program is a DAG.
47  * It rejects the following programs:
48  * - larger than BPF_MAXINSNS insns
49  * - if loop is present (detected via back-edge)
50  * - unreachable insns exist (shouldn't be a forest. program = one function)
51  * - out of bounds or malformed jumps
52  * The second pass is all possible path descent from the 1st insn.
53  * Since it's analyzing all paths through the program, the length of the
54  * analysis is limited to 64k insn, which may be hit even if total number of
55  * insn is less then 4K, but there are too many branches that change stack/regs.
56  * Number of 'branches to be analyzed' is limited to 1k
57  *
58  * On entry to each instruction, each register has a type, and the instruction
59  * changes the types of the registers depending on instruction semantics.
60  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
61  * copied to R1.
62  *
63  * All registers are 64-bit.
64  * R0 - return register
65  * R1-R5 argument passing registers
66  * R6-R9 callee saved registers
67  * R10 - frame pointer read-only
68  *
69  * At the start of BPF program the register R1 contains a pointer to bpf_context
70  * and has type PTR_TO_CTX.
71  *
72  * Verifier tracks arithmetic operations on pointers in case:
73  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
74  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
75  * 1st insn copies R10 (which has FRAME_PTR) type into R1
76  * and 2nd arithmetic instruction is pattern matched to recognize
77  * that it wants to construct a pointer to some element within stack.
78  * So after 2nd insn, the register R1 has type PTR_TO_STACK
79  * (and -20 constant is saved for further stack bounds checking).
80  * Meaning that this reg is a pointer to stack plus known immediate constant.
81  *
82  * Most of the time the registers have SCALAR_VALUE type, which
83  * means the register has some value, but it's not a valid pointer.
84  * (like pointer plus pointer becomes SCALAR_VALUE type)
85  *
86  * When verifier sees load or store instructions the type of base register
87  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
88  * four pointer types recognized by check_mem_access() function.
89  *
90  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
91  * and the range of [ptr, ptr + map's value_size) is accessible.
92  *
93  * registers used to pass values to function calls are checked against
94  * function argument constraints.
95  *
96  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
97  * It means that the register type passed to this function must be
98  * PTR_TO_STACK and it will be used inside the function as
99  * 'pointer to map element key'
100  *
101  * For example the argument constraints for bpf_map_lookup_elem():
102  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
103  *   .arg1_type = ARG_CONST_MAP_PTR,
104  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
105  *
106  * ret_type says that this function returns 'pointer to map elem value or null'
107  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
108  * 2nd argument should be a pointer to stack, which will be used inside
109  * the helper function as a pointer to map element key.
110  *
111  * On the kernel side the helper function looks like:
112  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
113  * {
114  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
115  *    void *key = (void *) (unsigned long) r2;
116  *    void *value;
117  *
118  *    here kernel can access 'key' and 'map' pointers safely, knowing that
119  *    [key, key + map->key_size) bytes are valid and were initialized on
120  *    the stack of eBPF program.
121  * }
122  *
123  * Corresponding eBPF program may look like:
124  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
125  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
126  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
127  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
128  * here verifier looks at prototype of map_lookup_elem() and sees:
129  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
130  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
131  *
132  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
133  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
134  * and were initialized prior to this call.
135  * If it's ok, then verifier allows this BPF_CALL insn and looks at
136  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
137  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
138  * returns either pointer to map value or NULL.
139  *
140  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
141  * insn, the register holding that pointer in the true branch changes state to
142  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
143  * branch. See check_cond_jmp_op().
144  *
145  * After the call R0 is set to return type of the function and registers R1-R5
146  * are set to NOT_INIT to indicate that they are no longer readable.
147  *
148  * The following reference types represent a potential reference to a kernel
149  * resource which, after first being allocated, must be checked and freed by
150  * the BPF program:
151  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
152  *
153  * When the verifier sees a helper call return a reference type, it allocates a
154  * pointer id for the reference and stores it in the current function state.
155  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
156  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
157  * passes through a NULL-check conditional. For the branch wherein the state is
158  * changed to CONST_IMM, the verifier releases the reference.
159  *
160  * For each helper function that allocates a reference, such as
161  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
162  * bpf_sk_release(). When a reference type passes into the release function,
163  * the verifier also releases the reference. If any unchecked or unreleased
164  * reference remains at the end of the program, the verifier rejects it.
165  */
166 
167 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
168 struct bpf_verifier_stack_elem {
169 	/* verifer state is 'st'
170 	 * before processing instruction 'insn_idx'
171 	 * and after processing instruction 'prev_insn_idx'
172 	 */
173 	struct bpf_verifier_state st;
174 	int insn_idx;
175 	int prev_insn_idx;
176 	struct bpf_verifier_stack_elem *next;
177 	/* length of verifier log at the time this state was pushed on stack */
178 	u32 log_pos;
179 };
180 
181 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
182 #define BPF_COMPLEXITY_LIMIT_STATES	64
183 
184 #define BPF_MAP_KEY_POISON	(1ULL << 63)
185 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
186 
187 #define BPF_MAP_PTR_UNPRIV	1UL
188 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
189 					  POISON_POINTER_DELTA))
190 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
191 
192 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
193 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
194 static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
195 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
196 static int ref_set_non_owning(struct bpf_verifier_env *env,
197 			      struct bpf_reg_state *reg);
198 static void specialize_kfunc(struct bpf_verifier_env *env,
199 			     u32 func_id, u16 offset, unsigned long *addr);
200 
201 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
202 {
203 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
204 }
205 
206 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
207 {
208 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
209 }
210 
211 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
212 			      const struct bpf_map *map, bool unpriv)
213 {
214 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
215 	unpriv |= bpf_map_ptr_unpriv(aux);
216 	aux->map_ptr_state = (unsigned long)map |
217 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
218 }
219 
220 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
221 {
222 	return aux->map_key_state & BPF_MAP_KEY_POISON;
223 }
224 
225 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
226 {
227 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
228 }
229 
230 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
231 {
232 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
233 }
234 
235 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
236 {
237 	bool poisoned = bpf_map_key_poisoned(aux);
238 
239 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
240 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
241 }
242 
243 static bool bpf_pseudo_call(const struct bpf_insn *insn)
244 {
245 	return insn->code == (BPF_JMP | BPF_CALL) &&
246 	       insn->src_reg == BPF_PSEUDO_CALL;
247 }
248 
249 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
250 {
251 	return insn->code == (BPF_JMP | BPF_CALL) &&
252 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
253 }
254 
255 struct bpf_call_arg_meta {
256 	struct bpf_map *map_ptr;
257 	bool raw_mode;
258 	bool pkt_access;
259 	u8 release_regno;
260 	int regno;
261 	int access_size;
262 	int mem_size;
263 	u64 msize_max_value;
264 	int ref_obj_id;
265 	int dynptr_id;
266 	int map_uid;
267 	int func_id;
268 	struct btf *btf;
269 	u32 btf_id;
270 	struct btf *ret_btf;
271 	u32 ret_btf_id;
272 	u32 subprogno;
273 	struct btf_field *kptr_field;
274 };
275 
276 struct btf_and_id {
277 	struct btf *btf;
278 	u32 btf_id;
279 };
280 
281 struct bpf_kfunc_call_arg_meta {
282 	/* In parameters */
283 	struct btf *btf;
284 	u32 func_id;
285 	u32 kfunc_flags;
286 	const struct btf_type *func_proto;
287 	const char *func_name;
288 	/* Out parameters */
289 	u32 ref_obj_id;
290 	u8 release_regno;
291 	bool r0_rdonly;
292 	u32 ret_btf_id;
293 	u64 r0_size;
294 	u32 subprogno;
295 	struct {
296 		u64 value;
297 		bool found;
298 	} arg_constant;
299 	union {
300 		struct btf_and_id arg_obj_drop;
301 		struct btf_and_id arg_refcount_acquire;
302 	};
303 	struct {
304 		struct btf_field *field;
305 	} arg_list_head;
306 	struct {
307 		struct btf_field *field;
308 	} arg_rbtree_root;
309 	struct {
310 		enum bpf_dynptr_type type;
311 		u32 id;
312 	} initialized_dynptr;
313 	struct {
314 		u8 spi;
315 		u8 frameno;
316 	} iter;
317 	u64 mem_size;
318 };
319 
320 struct btf *btf_vmlinux;
321 
322 static DEFINE_MUTEX(bpf_verifier_lock);
323 
324 static const struct bpf_line_info *
325 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
326 {
327 	const struct bpf_line_info *linfo;
328 	const struct bpf_prog *prog;
329 	u32 i, nr_linfo;
330 
331 	prog = env->prog;
332 	nr_linfo = prog->aux->nr_linfo;
333 
334 	if (!nr_linfo || insn_off >= prog->len)
335 		return NULL;
336 
337 	linfo = prog->aux->linfo;
338 	for (i = 1; i < nr_linfo; i++)
339 		if (insn_off < linfo[i].insn_off)
340 			break;
341 
342 	return &linfo[i - 1];
343 }
344 
345 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
346 {
347 	struct bpf_verifier_env *env = private_data;
348 	va_list args;
349 
350 	if (!bpf_verifier_log_needed(&env->log))
351 		return;
352 
353 	va_start(args, fmt);
354 	bpf_verifier_vlog(&env->log, fmt, args);
355 	va_end(args);
356 }
357 
358 static const char *ltrim(const char *s)
359 {
360 	while (isspace(*s))
361 		s++;
362 
363 	return s;
364 }
365 
366 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
367 					 u32 insn_off,
368 					 const char *prefix_fmt, ...)
369 {
370 	const struct bpf_line_info *linfo;
371 
372 	if (!bpf_verifier_log_needed(&env->log))
373 		return;
374 
375 	linfo = find_linfo(env, insn_off);
376 	if (!linfo || linfo == env->prev_linfo)
377 		return;
378 
379 	if (prefix_fmt) {
380 		va_list args;
381 
382 		va_start(args, prefix_fmt);
383 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
384 		va_end(args);
385 	}
386 
387 	verbose(env, "%s\n",
388 		ltrim(btf_name_by_offset(env->prog->aux->btf,
389 					 linfo->line_off)));
390 
391 	env->prev_linfo = linfo;
392 }
393 
394 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
395 				   struct bpf_reg_state *reg,
396 				   struct tnum *range, const char *ctx,
397 				   const char *reg_name)
398 {
399 	char tn_buf[48];
400 
401 	verbose(env, "At %s the register %s ", ctx, reg_name);
402 	if (!tnum_is_unknown(reg->var_off)) {
403 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
404 		verbose(env, "has value %s", tn_buf);
405 	} else {
406 		verbose(env, "has unknown scalar value");
407 	}
408 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
409 	verbose(env, " should have been in %s\n", tn_buf);
410 }
411 
412 static bool type_is_pkt_pointer(enum bpf_reg_type type)
413 {
414 	type = base_type(type);
415 	return type == PTR_TO_PACKET ||
416 	       type == PTR_TO_PACKET_META;
417 }
418 
419 static bool type_is_sk_pointer(enum bpf_reg_type type)
420 {
421 	return type == PTR_TO_SOCKET ||
422 		type == PTR_TO_SOCK_COMMON ||
423 		type == PTR_TO_TCP_SOCK ||
424 		type == PTR_TO_XDP_SOCK;
425 }
426 
427 static bool type_may_be_null(u32 type)
428 {
429 	return type & PTR_MAYBE_NULL;
430 }
431 
432 static bool reg_type_not_null(enum bpf_reg_type type)
433 {
434 	if (type_may_be_null(type))
435 		return false;
436 
437 	type = base_type(type);
438 	return type == PTR_TO_SOCKET ||
439 		type == PTR_TO_TCP_SOCK ||
440 		type == PTR_TO_MAP_VALUE ||
441 		type == PTR_TO_MAP_KEY ||
442 		type == PTR_TO_SOCK_COMMON ||
443 		type == PTR_TO_MEM;
444 }
445 
446 static bool type_is_ptr_alloc_obj(u32 type)
447 {
448 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
449 }
450 
451 static bool type_is_non_owning_ref(u32 type)
452 {
453 	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
454 }
455 
456 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
457 {
458 	struct btf_record *rec = NULL;
459 	struct btf_struct_meta *meta;
460 
461 	if (reg->type == PTR_TO_MAP_VALUE) {
462 		rec = reg->map_ptr->record;
463 	} else if (type_is_ptr_alloc_obj(reg->type)) {
464 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
465 		if (meta)
466 			rec = meta->record;
467 	}
468 	return rec;
469 }
470 
471 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
472 {
473 	return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
474 }
475 
476 static bool type_is_rdonly_mem(u32 type)
477 {
478 	return type & MEM_RDONLY;
479 }
480 
481 static bool is_acquire_function(enum bpf_func_id func_id,
482 				const struct bpf_map *map)
483 {
484 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
485 
486 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
487 	    func_id == BPF_FUNC_sk_lookup_udp ||
488 	    func_id == BPF_FUNC_skc_lookup_tcp ||
489 	    func_id == BPF_FUNC_ringbuf_reserve ||
490 	    func_id == BPF_FUNC_kptr_xchg)
491 		return true;
492 
493 	if (func_id == BPF_FUNC_map_lookup_elem &&
494 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
495 	     map_type == BPF_MAP_TYPE_SOCKHASH))
496 		return true;
497 
498 	return false;
499 }
500 
501 static bool is_ptr_cast_function(enum bpf_func_id func_id)
502 {
503 	return func_id == BPF_FUNC_tcp_sock ||
504 		func_id == BPF_FUNC_sk_fullsock ||
505 		func_id == BPF_FUNC_skc_to_tcp_sock ||
506 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
507 		func_id == BPF_FUNC_skc_to_udp6_sock ||
508 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
509 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
510 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
511 }
512 
513 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
514 {
515 	return func_id == BPF_FUNC_dynptr_data;
516 }
517 
518 static bool is_callback_calling_function(enum bpf_func_id func_id)
519 {
520 	return func_id == BPF_FUNC_for_each_map_elem ||
521 	       func_id == BPF_FUNC_timer_set_callback ||
522 	       func_id == BPF_FUNC_find_vma ||
523 	       func_id == BPF_FUNC_loop ||
524 	       func_id == BPF_FUNC_user_ringbuf_drain;
525 }
526 
527 static bool is_storage_get_function(enum bpf_func_id func_id)
528 {
529 	return func_id == BPF_FUNC_sk_storage_get ||
530 	       func_id == BPF_FUNC_inode_storage_get ||
531 	       func_id == BPF_FUNC_task_storage_get ||
532 	       func_id == BPF_FUNC_cgrp_storage_get;
533 }
534 
535 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
536 					const struct bpf_map *map)
537 {
538 	int ref_obj_uses = 0;
539 
540 	if (is_ptr_cast_function(func_id))
541 		ref_obj_uses++;
542 	if (is_acquire_function(func_id, map))
543 		ref_obj_uses++;
544 	if (is_dynptr_ref_function(func_id))
545 		ref_obj_uses++;
546 
547 	return ref_obj_uses > 1;
548 }
549 
550 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
551 {
552 	return BPF_CLASS(insn->code) == BPF_STX &&
553 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
554 	       insn->imm == BPF_CMPXCHG;
555 }
556 
557 /* string representation of 'enum bpf_reg_type'
558  *
559  * Note that reg_type_str() can not appear more than once in a single verbose()
560  * statement.
561  */
562 static const char *reg_type_str(struct bpf_verifier_env *env,
563 				enum bpf_reg_type type)
564 {
565 	char postfix[16] = {0}, prefix[64] = {0};
566 	static const char * const str[] = {
567 		[NOT_INIT]		= "?",
568 		[SCALAR_VALUE]		= "scalar",
569 		[PTR_TO_CTX]		= "ctx",
570 		[CONST_PTR_TO_MAP]	= "map_ptr",
571 		[PTR_TO_MAP_VALUE]	= "map_value",
572 		[PTR_TO_STACK]		= "fp",
573 		[PTR_TO_PACKET]		= "pkt",
574 		[PTR_TO_PACKET_META]	= "pkt_meta",
575 		[PTR_TO_PACKET_END]	= "pkt_end",
576 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
577 		[PTR_TO_SOCKET]		= "sock",
578 		[PTR_TO_SOCK_COMMON]	= "sock_common",
579 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
580 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
581 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
582 		[PTR_TO_BTF_ID]		= "ptr_",
583 		[PTR_TO_MEM]		= "mem",
584 		[PTR_TO_BUF]		= "buf",
585 		[PTR_TO_FUNC]		= "func",
586 		[PTR_TO_MAP_KEY]	= "map_key",
587 		[CONST_PTR_TO_DYNPTR]	= "dynptr_ptr",
588 	};
589 
590 	if (type & PTR_MAYBE_NULL) {
591 		if (base_type(type) == PTR_TO_BTF_ID)
592 			strncpy(postfix, "or_null_", 16);
593 		else
594 			strncpy(postfix, "_or_null", 16);
595 	}
596 
597 	snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
598 		 type & MEM_RDONLY ? "rdonly_" : "",
599 		 type & MEM_RINGBUF ? "ringbuf_" : "",
600 		 type & MEM_USER ? "user_" : "",
601 		 type & MEM_PERCPU ? "percpu_" : "",
602 		 type & MEM_RCU ? "rcu_" : "",
603 		 type & PTR_UNTRUSTED ? "untrusted_" : "",
604 		 type & PTR_TRUSTED ? "trusted_" : ""
605 	);
606 
607 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
608 		 prefix, str[base_type(type)], postfix);
609 	return env->type_str_buf;
610 }
611 
612 static char slot_type_char[] = {
613 	[STACK_INVALID]	= '?',
614 	[STACK_SPILL]	= 'r',
615 	[STACK_MISC]	= 'm',
616 	[STACK_ZERO]	= '0',
617 	[STACK_DYNPTR]	= 'd',
618 	[STACK_ITER]	= 'i',
619 };
620 
621 static void print_liveness(struct bpf_verifier_env *env,
622 			   enum bpf_reg_liveness live)
623 {
624 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
625 	    verbose(env, "_");
626 	if (live & REG_LIVE_READ)
627 		verbose(env, "r");
628 	if (live & REG_LIVE_WRITTEN)
629 		verbose(env, "w");
630 	if (live & REG_LIVE_DONE)
631 		verbose(env, "D");
632 }
633 
634 static int __get_spi(s32 off)
635 {
636 	return (-off - 1) / BPF_REG_SIZE;
637 }
638 
639 static struct bpf_func_state *func(struct bpf_verifier_env *env,
640 				   const struct bpf_reg_state *reg)
641 {
642 	struct bpf_verifier_state *cur = env->cur_state;
643 
644 	return cur->frame[reg->frameno];
645 }
646 
647 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
648 {
649        int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
650 
651        /* We need to check that slots between [spi - nr_slots + 1, spi] are
652 	* within [0, allocated_stack).
653 	*
654 	* Please note that the spi grows downwards. For example, a dynptr
655 	* takes the size of two stack slots; the first slot will be at
656 	* spi and the second slot will be at spi - 1.
657 	*/
658        return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
659 }
660 
661 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
662 			          const char *obj_kind, int nr_slots)
663 {
664 	int off, spi;
665 
666 	if (!tnum_is_const(reg->var_off)) {
667 		verbose(env, "%s has to be at a constant offset\n", obj_kind);
668 		return -EINVAL;
669 	}
670 
671 	off = reg->off + reg->var_off.value;
672 	if (off % BPF_REG_SIZE) {
673 		verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
674 		return -EINVAL;
675 	}
676 
677 	spi = __get_spi(off);
678 	if (spi + 1 < nr_slots) {
679 		verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
680 		return -EINVAL;
681 	}
682 
683 	if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots))
684 		return -ERANGE;
685 	return spi;
686 }
687 
688 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
689 {
690 	return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS);
691 }
692 
693 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
694 {
695 	return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
696 }
697 
698 static const char *btf_type_name(const struct btf *btf, u32 id)
699 {
700 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
701 }
702 
703 static const char *dynptr_type_str(enum bpf_dynptr_type type)
704 {
705 	switch (type) {
706 	case BPF_DYNPTR_TYPE_LOCAL:
707 		return "local";
708 	case BPF_DYNPTR_TYPE_RINGBUF:
709 		return "ringbuf";
710 	case BPF_DYNPTR_TYPE_SKB:
711 		return "skb";
712 	case BPF_DYNPTR_TYPE_XDP:
713 		return "xdp";
714 	case BPF_DYNPTR_TYPE_INVALID:
715 		return "<invalid>";
716 	default:
717 		WARN_ONCE(1, "unknown dynptr type %d\n", type);
718 		return "<unknown>";
719 	}
720 }
721 
722 static const char *iter_type_str(const struct btf *btf, u32 btf_id)
723 {
724 	if (!btf || btf_id == 0)
725 		return "<invalid>";
726 
727 	/* we already validated that type is valid and has conforming name */
728 	return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
729 }
730 
731 static const char *iter_state_str(enum bpf_iter_state state)
732 {
733 	switch (state) {
734 	case BPF_ITER_STATE_ACTIVE:
735 		return "active";
736 	case BPF_ITER_STATE_DRAINED:
737 		return "drained";
738 	case BPF_ITER_STATE_INVALID:
739 		return "<invalid>";
740 	default:
741 		WARN_ONCE(1, "unknown iter state %d\n", state);
742 		return "<unknown>";
743 	}
744 }
745 
746 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
747 {
748 	env->scratched_regs |= 1U << regno;
749 }
750 
751 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
752 {
753 	env->scratched_stack_slots |= 1ULL << spi;
754 }
755 
756 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
757 {
758 	return (env->scratched_regs >> regno) & 1;
759 }
760 
761 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
762 {
763 	return (env->scratched_stack_slots >> regno) & 1;
764 }
765 
766 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
767 {
768 	return env->scratched_regs || env->scratched_stack_slots;
769 }
770 
771 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
772 {
773 	env->scratched_regs = 0U;
774 	env->scratched_stack_slots = 0ULL;
775 }
776 
777 /* Used for printing the entire verifier state. */
778 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
779 {
780 	env->scratched_regs = ~0U;
781 	env->scratched_stack_slots = ~0ULL;
782 }
783 
784 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
785 {
786 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
787 	case DYNPTR_TYPE_LOCAL:
788 		return BPF_DYNPTR_TYPE_LOCAL;
789 	case DYNPTR_TYPE_RINGBUF:
790 		return BPF_DYNPTR_TYPE_RINGBUF;
791 	case DYNPTR_TYPE_SKB:
792 		return BPF_DYNPTR_TYPE_SKB;
793 	case DYNPTR_TYPE_XDP:
794 		return BPF_DYNPTR_TYPE_XDP;
795 	default:
796 		return BPF_DYNPTR_TYPE_INVALID;
797 	}
798 }
799 
800 static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
801 {
802 	switch (type) {
803 	case BPF_DYNPTR_TYPE_LOCAL:
804 		return DYNPTR_TYPE_LOCAL;
805 	case BPF_DYNPTR_TYPE_RINGBUF:
806 		return DYNPTR_TYPE_RINGBUF;
807 	case BPF_DYNPTR_TYPE_SKB:
808 		return DYNPTR_TYPE_SKB;
809 	case BPF_DYNPTR_TYPE_XDP:
810 		return DYNPTR_TYPE_XDP;
811 	default:
812 		return 0;
813 	}
814 }
815 
816 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
817 {
818 	return type == BPF_DYNPTR_TYPE_RINGBUF;
819 }
820 
821 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
822 			      enum bpf_dynptr_type type,
823 			      bool first_slot, int dynptr_id);
824 
825 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
826 				struct bpf_reg_state *reg);
827 
828 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
829 				   struct bpf_reg_state *sreg1,
830 				   struct bpf_reg_state *sreg2,
831 				   enum bpf_dynptr_type type)
832 {
833 	int id = ++env->id_gen;
834 
835 	__mark_dynptr_reg(sreg1, type, true, id);
836 	__mark_dynptr_reg(sreg2, type, false, id);
837 }
838 
839 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
840 			       struct bpf_reg_state *reg,
841 			       enum bpf_dynptr_type type)
842 {
843 	__mark_dynptr_reg(reg, type, true, ++env->id_gen);
844 }
845 
846 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
847 				        struct bpf_func_state *state, int spi);
848 
849 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
850 				   enum bpf_arg_type arg_type, int insn_idx)
851 {
852 	struct bpf_func_state *state = func(env, reg);
853 	enum bpf_dynptr_type type;
854 	int spi, i, id, err;
855 
856 	spi = dynptr_get_spi(env, reg);
857 	if (spi < 0)
858 		return spi;
859 
860 	/* We cannot assume both spi and spi - 1 belong to the same dynptr,
861 	 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
862 	 * to ensure that for the following example:
863 	 *	[d1][d1][d2][d2]
864 	 * spi    3   2   1   0
865 	 * So marking spi = 2 should lead to destruction of both d1 and d2. In
866 	 * case they do belong to same dynptr, second call won't see slot_type
867 	 * as STACK_DYNPTR and will simply skip destruction.
868 	 */
869 	err = destroy_if_dynptr_stack_slot(env, state, spi);
870 	if (err)
871 		return err;
872 	err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
873 	if (err)
874 		return err;
875 
876 	for (i = 0; i < BPF_REG_SIZE; i++) {
877 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
878 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
879 	}
880 
881 	type = arg_to_dynptr_type(arg_type);
882 	if (type == BPF_DYNPTR_TYPE_INVALID)
883 		return -EINVAL;
884 
885 	mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
886 			       &state->stack[spi - 1].spilled_ptr, type);
887 
888 	if (dynptr_type_refcounted(type)) {
889 		/* The id is used to track proper releasing */
890 		id = acquire_reference_state(env, insn_idx);
891 		if (id < 0)
892 			return id;
893 
894 		state->stack[spi].spilled_ptr.ref_obj_id = id;
895 		state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
896 	}
897 
898 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
899 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
900 
901 	return 0;
902 }
903 
904 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
905 {
906 	struct bpf_func_state *state = func(env, reg);
907 	int spi, i;
908 
909 	spi = dynptr_get_spi(env, reg);
910 	if (spi < 0)
911 		return spi;
912 
913 	for (i = 0; i < BPF_REG_SIZE; i++) {
914 		state->stack[spi].slot_type[i] = STACK_INVALID;
915 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
916 	}
917 
918 	/* Invalidate any slices associated with this dynptr */
919 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type))
920 		WARN_ON_ONCE(release_reference(env, state->stack[spi].spilled_ptr.ref_obj_id));
921 
922 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
923 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
924 
925 	/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
926 	 *
927 	 * While we don't allow reading STACK_INVALID, it is still possible to
928 	 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
929 	 * helpers or insns can do partial read of that part without failing,
930 	 * but check_stack_range_initialized, check_stack_read_var_off, and
931 	 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
932 	 * the slot conservatively. Hence we need to prevent those liveness
933 	 * marking walks.
934 	 *
935 	 * This was not a problem before because STACK_INVALID is only set by
936 	 * default (where the default reg state has its reg->parent as NULL), or
937 	 * in clean_live_states after REG_LIVE_DONE (at which point
938 	 * mark_reg_read won't walk reg->parent chain), but not randomly during
939 	 * verifier state exploration (like we did above). Hence, for our case
940 	 * parentage chain will still be live (i.e. reg->parent may be
941 	 * non-NULL), while earlier reg->parent was NULL, so we need
942 	 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
943 	 * done later on reads or by mark_dynptr_read as well to unnecessary
944 	 * mark registers in verifier state.
945 	 */
946 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
947 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
948 
949 	return 0;
950 }
951 
952 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
953 			       struct bpf_reg_state *reg);
954 
955 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
956 {
957 	if (!env->allow_ptr_leaks)
958 		__mark_reg_not_init(env, reg);
959 	else
960 		__mark_reg_unknown(env, reg);
961 }
962 
963 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
964 				        struct bpf_func_state *state, int spi)
965 {
966 	struct bpf_func_state *fstate;
967 	struct bpf_reg_state *dreg;
968 	int i, dynptr_id;
969 
970 	/* We always ensure that STACK_DYNPTR is never set partially,
971 	 * hence just checking for slot_type[0] is enough. This is
972 	 * different for STACK_SPILL, where it may be only set for
973 	 * 1 byte, so code has to use is_spilled_reg.
974 	 */
975 	if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
976 		return 0;
977 
978 	/* Reposition spi to first slot */
979 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
980 		spi = spi + 1;
981 
982 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
983 		verbose(env, "cannot overwrite referenced dynptr\n");
984 		return -EINVAL;
985 	}
986 
987 	mark_stack_slot_scratched(env, spi);
988 	mark_stack_slot_scratched(env, spi - 1);
989 
990 	/* Writing partially to one dynptr stack slot destroys both. */
991 	for (i = 0; i < BPF_REG_SIZE; i++) {
992 		state->stack[spi].slot_type[i] = STACK_INVALID;
993 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
994 	}
995 
996 	dynptr_id = state->stack[spi].spilled_ptr.id;
997 	/* Invalidate any slices associated with this dynptr */
998 	bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
999 		/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
1000 		if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
1001 			continue;
1002 		if (dreg->dynptr_id == dynptr_id)
1003 			mark_reg_invalid(env, dreg);
1004 	}));
1005 
1006 	/* Do not release reference state, we are destroying dynptr on stack,
1007 	 * not using some helper to release it. Just reset register.
1008 	 */
1009 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
1010 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
1011 
1012 	/* Same reason as unmark_stack_slots_dynptr above */
1013 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1014 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
1015 
1016 	return 0;
1017 }
1018 
1019 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1020 {
1021 	int spi;
1022 
1023 	if (reg->type == CONST_PTR_TO_DYNPTR)
1024 		return false;
1025 
1026 	spi = dynptr_get_spi(env, reg);
1027 
1028 	/* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an
1029 	 * error because this just means the stack state hasn't been updated yet.
1030 	 * We will do check_mem_access to check and update stack bounds later.
1031 	 */
1032 	if (spi < 0 && spi != -ERANGE)
1033 		return false;
1034 
1035 	/* We don't need to check if the stack slots are marked by previous
1036 	 * dynptr initializations because we allow overwriting existing unreferenced
1037 	 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls
1038 	 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are
1039 	 * touching are completely destructed before we reinitialize them for a new
1040 	 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early
1041 	 * instead of delaying it until the end where the user will get "Unreleased
1042 	 * reference" error.
1043 	 */
1044 	return true;
1045 }
1046 
1047 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1048 {
1049 	struct bpf_func_state *state = func(env, reg);
1050 	int i, spi;
1051 
1052 	/* This already represents first slot of initialized bpf_dynptr.
1053 	 *
1054 	 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
1055 	 * check_func_arg_reg_off's logic, so we don't need to check its
1056 	 * offset and alignment.
1057 	 */
1058 	if (reg->type == CONST_PTR_TO_DYNPTR)
1059 		return true;
1060 
1061 	spi = dynptr_get_spi(env, reg);
1062 	if (spi < 0)
1063 		return false;
1064 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
1065 		return false;
1066 
1067 	for (i = 0; i < BPF_REG_SIZE; i++) {
1068 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
1069 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
1070 			return false;
1071 	}
1072 
1073 	return true;
1074 }
1075 
1076 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1077 				    enum bpf_arg_type arg_type)
1078 {
1079 	struct bpf_func_state *state = func(env, reg);
1080 	enum bpf_dynptr_type dynptr_type;
1081 	int spi;
1082 
1083 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1084 	if (arg_type == ARG_PTR_TO_DYNPTR)
1085 		return true;
1086 
1087 	dynptr_type = arg_to_dynptr_type(arg_type);
1088 	if (reg->type == CONST_PTR_TO_DYNPTR) {
1089 		return reg->dynptr.type == dynptr_type;
1090 	} else {
1091 		spi = dynptr_get_spi(env, reg);
1092 		if (spi < 0)
1093 			return false;
1094 		return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
1095 	}
1096 }
1097 
1098 static void __mark_reg_known_zero(struct bpf_reg_state *reg);
1099 
1100 static int mark_stack_slots_iter(struct bpf_verifier_env *env,
1101 				 struct bpf_reg_state *reg, int insn_idx,
1102 				 struct btf *btf, u32 btf_id, int nr_slots)
1103 {
1104 	struct bpf_func_state *state = func(env, reg);
1105 	int spi, i, j, id;
1106 
1107 	spi = iter_get_spi(env, reg, nr_slots);
1108 	if (spi < 0)
1109 		return spi;
1110 
1111 	id = acquire_reference_state(env, insn_idx);
1112 	if (id < 0)
1113 		return id;
1114 
1115 	for (i = 0; i < nr_slots; i++) {
1116 		struct bpf_stack_state *slot = &state->stack[spi - i];
1117 		struct bpf_reg_state *st = &slot->spilled_ptr;
1118 
1119 		__mark_reg_known_zero(st);
1120 		st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
1121 		st->live |= REG_LIVE_WRITTEN;
1122 		st->ref_obj_id = i == 0 ? id : 0;
1123 		st->iter.btf = btf;
1124 		st->iter.btf_id = btf_id;
1125 		st->iter.state = BPF_ITER_STATE_ACTIVE;
1126 		st->iter.depth = 0;
1127 
1128 		for (j = 0; j < BPF_REG_SIZE; j++)
1129 			slot->slot_type[j] = STACK_ITER;
1130 
1131 		mark_stack_slot_scratched(env, spi - i);
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
1138 				   struct bpf_reg_state *reg, int nr_slots)
1139 {
1140 	struct bpf_func_state *state = func(env, reg);
1141 	int spi, i, j;
1142 
1143 	spi = iter_get_spi(env, reg, nr_slots);
1144 	if (spi < 0)
1145 		return spi;
1146 
1147 	for (i = 0; i < nr_slots; i++) {
1148 		struct bpf_stack_state *slot = &state->stack[spi - i];
1149 		struct bpf_reg_state *st = &slot->spilled_ptr;
1150 
1151 		if (i == 0)
1152 			WARN_ON_ONCE(release_reference(env, st->ref_obj_id));
1153 
1154 		__mark_reg_not_init(env, st);
1155 
1156 		/* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
1157 		st->live |= REG_LIVE_WRITTEN;
1158 
1159 		for (j = 0; j < BPF_REG_SIZE; j++)
1160 			slot->slot_type[j] = STACK_INVALID;
1161 
1162 		mark_stack_slot_scratched(env, spi - i);
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
1169 				     struct bpf_reg_state *reg, int nr_slots)
1170 {
1171 	struct bpf_func_state *state = func(env, reg);
1172 	int spi, i, j;
1173 
1174 	/* For -ERANGE (i.e. spi not falling into allocated stack slots), we
1175 	 * will do check_mem_access to check and update stack bounds later, so
1176 	 * return true for that case.
1177 	 */
1178 	spi = iter_get_spi(env, reg, nr_slots);
1179 	if (spi == -ERANGE)
1180 		return true;
1181 	if (spi < 0)
1182 		return false;
1183 
1184 	for (i = 0; i < nr_slots; i++) {
1185 		struct bpf_stack_state *slot = &state->stack[spi - i];
1186 
1187 		for (j = 0; j < BPF_REG_SIZE; j++)
1188 			if (slot->slot_type[j] == STACK_ITER)
1189 				return false;
1190 	}
1191 
1192 	return true;
1193 }
1194 
1195 static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1196 				   struct btf *btf, u32 btf_id, int nr_slots)
1197 {
1198 	struct bpf_func_state *state = func(env, reg);
1199 	int spi, i, j;
1200 
1201 	spi = iter_get_spi(env, reg, nr_slots);
1202 	if (spi < 0)
1203 		return false;
1204 
1205 	for (i = 0; i < nr_slots; i++) {
1206 		struct bpf_stack_state *slot = &state->stack[spi - i];
1207 		struct bpf_reg_state *st = &slot->spilled_ptr;
1208 
1209 		/* only main (first) slot has ref_obj_id set */
1210 		if (i == 0 && !st->ref_obj_id)
1211 			return false;
1212 		if (i != 0 && st->ref_obj_id)
1213 			return false;
1214 		if (st->iter.btf != btf || st->iter.btf_id != btf_id)
1215 			return false;
1216 
1217 		for (j = 0; j < BPF_REG_SIZE; j++)
1218 			if (slot->slot_type[j] != STACK_ITER)
1219 				return false;
1220 	}
1221 
1222 	return true;
1223 }
1224 
1225 /* Check if given stack slot is "special":
1226  *   - spilled register state (STACK_SPILL);
1227  *   - dynptr state (STACK_DYNPTR);
1228  *   - iter state (STACK_ITER).
1229  */
1230 static bool is_stack_slot_special(const struct bpf_stack_state *stack)
1231 {
1232 	enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1];
1233 
1234 	switch (type) {
1235 	case STACK_SPILL:
1236 	case STACK_DYNPTR:
1237 	case STACK_ITER:
1238 		return true;
1239 	case STACK_INVALID:
1240 	case STACK_MISC:
1241 	case STACK_ZERO:
1242 		return false;
1243 	default:
1244 		WARN_ONCE(1, "unknown stack slot type %d\n", type);
1245 		return true;
1246 	}
1247 }
1248 
1249 /* The reg state of a pointer or a bounded scalar was saved when
1250  * it was spilled to the stack.
1251  */
1252 static bool is_spilled_reg(const struct bpf_stack_state *stack)
1253 {
1254 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1255 }
1256 
1257 static void scrub_spilled_slot(u8 *stype)
1258 {
1259 	if (*stype != STACK_INVALID)
1260 		*stype = STACK_MISC;
1261 }
1262 
1263 static void print_verifier_state(struct bpf_verifier_env *env,
1264 				 const struct bpf_func_state *state,
1265 				 bool print_all)
1266 {
1267 	const struct bpf_reg_state *reg;
1268 	enum bpf_reg_type t;
1269 	int i;
1270 
1271 	if (state->frameno)
1272 		verbose(env, " frame%d:", state->frameno);
1273 	for (i = 0; i < MAX_BPF_REG; i++) {
1274 		reg = &state->regs[i];
1275 		t = reg->type;
1276 		if (t == NOT_INIT)
1277 			continue;
1278 		if (!print_all && !reg_scratched(env, i))
1279 			continue;
1280 		verbose(env, " R%d", i);
1281 		print_liveness(env, reg->live);
1282 		verbose(env, "=");
1283 		if (t == SCALAR_VALUE && reg->precise)
1284 			verbose(env, "P");
1285 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
1286 		    tnum_is_const(reg->var_off)) {
1287 			/* reg->off should be 0 for SCALAR_VALUE */
1288 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1289 			verbose(env, "%lld", reg->var_off.value + reg->off);
1290 		} else {
1291 			const char *sep = "";
1292 
1293 			verbose(env, "%s", reg_type_str(env, t));
1294 			if (base_type(t) == PTR_TO_BTF_ID)
1295 				verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
1296 			verbose(env, "(");
1297 /*
1298  * _a stands for append, was shortened to avoid multiline statements below.
1299  * This macro is used to output a comma separated list of attributes.
1300  */
1301 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
1302 
1303 			if (reg->id)
1304 				verbose_a("id=%d", reg->id);
1305 			if (reg->ref_obj_id)
1306 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
1307 			if (type_is_non_owning_ref(reg->type))
1308 				verbose_a("%s", "non_own_ref");
1309 			if (t != SCALAR_VALUE)
1310 				verbose_a("off=%d", reg->off);
1311 			if (type_is_pkt_pointer(t))
1312 				verbose_a("r=%d", reg->range);
1313 			else if (base_type(t) == CONST_PTR_TO_MAP ||
1314 				 base_type(t) == PTR_TO_MAP_KEY ||
1315 				 base_type(t) == PTR_TO_MAP_VALUE)
1316 				verbose_a("ks=%d,vs=%d",
1317 					  reg->map_ptr->key_size,
1318 					  reg->map_ptr->value_size);
1319 			if (tnum_is_const(reg->var_off)) {
1320 				/* Typically an immediate SCALAR_VALUE, but
1321 				 * could be a pointer whose offset is too big
1322 				 * for reg->off
1323 				 */
1324 				verbose_a("imm=%llx", reg->var_off.value);
1325 			} else {
1326 				if (reg->smin_value != reg->umin_value &&
1327 				    reg->smin_value != S64_MIN)
1328 					verbose_a("smin=%lld", (long long)reg->smin_value);
1329 				if (reg->smax_value != reg->umax_value &&
1330 				    reg->smax_value != S64_MAX)
1331 					verbose_a("smax=%lld", (long long)reg->smax_value);
1332 				if (reg->umin_value != 0)
1333 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
1334 				if (reg->umax_value != U64_MAX)
1335 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
1336 				if (!tnum_is_unknown(reg->var_off)) {
1337 					char tn_buf[48];
1338 
1339 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1340 					verbose_a("var_off=%s", tn_buf);
1341 				}
1342 				if (reg->s32_min_value != reg->smin_value &&
1343 				    reg->s32_min_value != S32_MIN)
1344 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
1345 				if (reg->s32_max_value != reg->smax_value &&
1346 				    reg->s32_max_value != S32_MAX)
1347 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
1348 				if (reg->u32_min_value != reg->umin_value &&
1349 				    reg->u32_min_value != U32_MIN)
1350 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
1351 				if (reg->u32_max_value != reg->umax_value &&
1352 				    reg->u32_max_value != U32_MAX)
1353 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
1354 			}
1355 #undef verbose_a
1356 
1357 			verbose(env, ")");
1358 		}
1359 	}
1360 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1361 		char types_buf[BPF_REG_SIZE + 1];
1362 		bool valid = false;
1363 		int j;
1364 
1365 		for (j = 0; j < BPF_REG_SIZE; j++) {
1366 			if (state->stack[i].slot_type[j] != STACK_INVALID)
1367 				valid = true;
1368 			types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
1369 		}
1370 		types_buf[BPF_REG_SIZE] = 0;
1371 		if (!valid)
1372 			continue;
1373 		if (!print_all && !stack_slot_scratched(env, i))
1374 			continue;
1375 		switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
1376 		case STACK_SPILL:
1377 			reg = &state->stack[i].spilled_ptr;
1378 			t = reg->type;
1379 
1380 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1381 			print_liveness(env, reg->live);
1382 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1383 			if (t == SCALAR_VALUE && reg->precise)
1384 				verbose(env, "P");
1385 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
1386 				verbose(env, "%lld", reg->var_off.value + reg->off);
1387 			break;
1388 		case STACK_DYNPTR:
1389 			i += BPF_DYNPTR_NR_SLOTS - 1;
1390 			reg = &state->stack[i].spilled_ptr;
1391 
1392 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1393 			print_liveness(env, reg->live);
1394 			verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
1395 			if (reg->ref_obj_id)
1396 				verbose(env, "(ref_id=%d)", reg->ref_obj_id);
1397 			break;
1398 		case STACK_ITER:
1399 			/* only main slot has ref_obj_id set; skip others */
1400 			reg = &state->stack[i].spilled_ptr;
1401 			if (!reg->ref_obj_id)
1402 				continue;
1403 
1404 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1405 			print_liveness(env, reg->live);
1406 			verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
1407 				iter_type_str(reg->iter.btf, reg->iter.btf_id),
1408 				reg->ref_obj_id, iter_state_str(reg->iter.state),
1409 				reg->iter.depth);
1410 			break;
1411 		case STACK_MISC:
1412 		case STACK_ZERO:
1413 		default:
1414 			reg = &state->stack[i].spilled_ptr;
1415 
1416 			for (j = 0; j < BPF_REG_SIZE; j++)
1417 				types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
1418 			types_buf[BPF_REG_SIZE] = 0;
1419 
1420 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1421 			print_liveness(env, reg->live);
1422 			verbose(env, "=%s", types_buf);
1423 			break;
1424 		}
1425 	}
1426 	if (state->acquired_refs && state->refs[0].id) {
1427 		verbose(env, " refs=%d", state->refs[0].id);
1428 		for (i = 1; i < state->acquired_refs; i++)
1429 			if (state->refs[i].id)
1430 				verbose(env, ",%d", state->refs[i].id);
1431 	}
1432 	if (state->in_callback_fn)
1433 		verbose(env, " cb");
1434 	if (state->in_async_callback_fn)
1435 		verbose(env, " async_cb");
1436 	verbose(env, "\n");
1437 	mark_verifier_state_clean(env);
1438 }
1439 
1440 static inline u32 vlog_alignment(u32 pos)
1441 {
1442 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1443 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1444 }
1445 
1446 static void print_insn_state(struct bpf_verifier_env *env,
1447 			     const struct bpf_func_state *state)
1448 {
1449 	if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
1450 		/* remove new line character */
1451 		bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
1452 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
1453 	} else {
1454 		verbose(env, "%d:", env->insn_idx);
1455 	}
1456 	print_verifier_state(env, state, false);
1457 }
1458 
1459 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1460  * small to hold src. This is different from krealloc since we don't want to preserve
1461  * the contents of dst.
1462  *
1463  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1464  * not be allocated.
1465  */
1466 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1467 {
1468 	size_t alloc_bytes;
1469 	void *orig = dst;
1470 	size_t bytes;
1471 
1472 	if (ZERO_OR_NULL_PTR(src))
1473 		goto out;
1474 
1475 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1476 		return NULL;
1477 
1478 	alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1479 	dst = krealloc(orig, alloc_bytes, flags);
1480 	if (!dst) {
1481 		kfree(orig);
1482 		return NULL;
1483 	}
1484 
1485 	memcpy(dst, src, bytes);
1486 out:
1487 	return dst ? dst : ZERO_SIZE_PTR;
1488 }
1489 
1490 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1491  * small to hold new_n items. new items are zeroed out if the array grows.
1492  *
1493  * Contrary to krealloc_array, does not free arr if new_n is zero.
1494  */
1495 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1496 {
1497 	size_t alloc_size;
1498 	void *new_arr;
1499 
1500 	if (!new_n || old_n == new_n)
1501 		goto out;
1502 
1503 	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1504 	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
1505 	if (!new_arr) {
1506 		kfree(arr);
1507 		return NULL;
1508 	}
1509 	arr = new_arr;
1510 
1511 	if (new_n > old_n)
1512 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1513 
1514 out:
1515 	return arr ? arr : ZERO_SIZE_PTR;
1516 }
1517 
1518 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1519 {
1520 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1521 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1522 	if (!dst->refs)
1523 		return -ENOMEM;
1524 
1525 	dst->acquired_refs = src->acquired_refs;
1526 	return 0;
1527 }
1528 
1529 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1530 {
1531 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1532 
1533 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1534 				GFP_KERNEL);
1535 	if (!dst->stack)
1536 		return -ENOMEM;
1537 
1538 	dst->allocated_stack = src->allocated_stack;
1539 	return 0;
1540 }
1541 
1542 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1543 {
1544 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1545 				    sizeof(struct bpf_reference_state));
1546 	if (!state->refs)
1547 		return -ENOMEM;
1548 
1549 	state->acquired_refs = n;
1550 	return 0;
1551 }
1552 
1553 static int grow_stack_state(struct bpf_func_state *state, int size)
1554 {
1555 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1556 
1557 	if (old_n >= n)
1558 		return 0;
1559 
1560 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1561 	if (!state->stack)
1562 		return -ENOMEM;
1563 
1564 	state->allocated_stack = size;
1565 	return 0;
1566 }
1567 
1568 /* Acquire a pointer id from the env and update the state->refs to include
1569  * this new pointer reference.
1570  * On success, returns a valid pointer id to associate with the register
1571  * On failure, returns a negative errno.
1572  */
1573 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1574 {
1575 	struct bpf_func_state *state = cur_func(env);
1576 	int new_ofs = state->acquired_refs;
1577 	int id, err;
1578 
1579 	err = resize_reference_state(state, state->acquired_refs + 1);
1580 	if (err)
1581 		return err;
1582 	id = ++env->id_gen;
1583 	state->refs[new_ofs].id = id;
1584 	state->refs[new_ofs].insn_idx = insn_idx;
1585 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1586 
1587 	return id;
1588 }
1589 
1590 /* release function corresponding to acquire_reference_state(). Idempotent. */
1591 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1592 {
1593 	int i, last_idx;
1594 
1595 	last_idx = state->acquired_refs - 1;
1596 	for (i = 0; i < state->acquired_refs; i++) {
1597 		if (state->refs[i].id == ptr_id) {
1598 			/* Cannot release caller references in callbacks */
1599 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1600 				return -EINVAL;
1601 			if (last_idx && i != last_idx)
1602 				memcpy(&state->refs[i], &state->refs[last_idx],
1603 				       sizeof(*state->refs));
1604 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1605 			state->acquired_refs--;
1606 			return 0;
1607 		}
1608 	}
1609 	return -EINVAL;
1610 }
1611 
1612 static void free_func_state(struct bpf_func_state *state)
1613 {
1614 	if (!state)
1615 		return;
1616 	kfree(state->refs);
1617 	kfree(state->stack);
1618 	kfree(state);
1619 }
1620 
1621 static void clear_jmp_history(struct bpf_verifier_state *state)
1622 {
1623 	kfree(state->jmp_history);
1624 	state->jmp_history = NULL;
1625 	state->jmp_history_cnt = 0;
1626 }
1627 
1628 static void free_verifier_state(struct bpf_verifier_state *state,
1629 				bool free_self)
1630 {
1631 	int i;
1632 
1633 	for (i = 0; i <= state->curframe; i++) {
1634 		free_func_state(state->frame[i]);
1635 		state->frame[i] = NULL;
1636 	}
1637 	clear_jmp_history(state);
1638 	if (free_self)
1639 		kfree(state);
1640 }
1641 
1642 /* copy verifier state from src to dst growing dst stack space
1643  * when necessary to accommodate larger src stack
1644  */
1645 static int copy_func_state(struct bpf_func_state *dst,
1646 			   const struct bpf_func_state *src)
1647 {
1648 	int err;
1649 
1650 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1651 	err = copy_reference_state(dst, src);
1652 	if (err)
1653 		return err;
1654 	return copy_stack_state(dst, src);
1655 }
1656 
1657 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1658 			       const struct bpf_verifier_state *src)
1659 {
1660 	struct bpf_func_state *dst;
1661 	int i, err;
1662 
1663 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1664 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1665 					    GFP_USER);
1666 	if (!dst_state->jmp_history)
1667 		return -ENOMEM;
1668 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1669 
1670 	/* if dst has more stack frames then src frame, free them */
1671 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1672 		free_func_state(dst_state->frame[i]);
1673 		dst_state->frame[i] = NULL;
1674 	}
1675 	dst_state->speculative = src->speculative;
1676 	dst_state->active_rcu_lock = src->active_rcu_lock;
1677 	dst_state->curframe = src->curframe;
1678 	dst_state->active_lock.ptr = src->active_lock.ptr;
1679 	dst_state->active_lock.id = src->active_lock.id;
1680 	dst_state->branches = src->branches;
1681 	dst_state->parent = src->parent;
1682 	dst_state->first_insn_idx = src->first_insn_idx;
1683 	dst_state->last_insn_idx = src->last_insn_idx;
1684 	for (i = 0; i <= src->curframe; i++) {
1685 		dst = dst_state->frame[i];
1686 		if (!dst) {
1687 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1688 			if (!dst)
1689 				return -ENOMEM;
1690 			dst_state->frame[i] = dst;
1691 		}
1692 		err = copy_func_state(dst, src->frame[i]);
1693 		if (err)
1694 			return err;
1695 	}
1696 	return 0;
1697 }
1698 
1699 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1700 {
1701 	while (st) {
1702 		u32 br = --st->branches;
1703 
1704 		/* WARN_ON(br > 1) technically makes sense here,
1705 		 * but see comment in push_stack(), hence:
1706 		 */
1707 		WARN_ONCE((int)br < 0,
1708 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1709 			  br);
1710 		if (br)
1711 			break;
1712 		st = st->parent;
1713 	}
1714 }
1715 
1716 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1717 		     int *insn_idx, bool pop_log)
1718 {
1719 	struct bpf_verifier_state *cur = env->cur_state;
1720 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1721 	int err;
1722 
1723 	if (env->head == NULL)
1724 		return -ENOENT;
1725 
1726 	if (cur) {
1727 		err = copy_verifier_state(cur, &head->st);
1728 		if (err)
1729 			return err;
1730 	}
1731 	if (pop_log)
1732 		bpf_vlog_reset(&env->log, head->log_pos);
1733 	if (insn_idx)
1734 		*insn_idx = head->insn_idx;
1735 	if (prev_insn_idx)
1736 		*prev_insn_idx = head->prev_insn_idx;
1737 	elem = head->next;
1738 	free_verifier_state(&head->st, false);
1739 	kfree(head);
1740 	env->head = elem;
1741 	env->stack_size--;
1742 	return 0;
1743 }
1744 
1745 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1746 					     int insn_idx, int prev_insn_idx,
1747 					     bool speculative)
1748 {
1749 	struct bpf_verifier_state *cur = env->cur_state;
1750 	struct bpf_verifier_stack_elem *elem;
1751 	int err;
1752 
1753 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1754 	if (!elem)
1755 		goto err;
1756 
1757 	elem->insn_idx = insn_idx;
1758 	elem->prev_insn_idx = prev_insn_idx;
1759 	elem->next = env->head;
1760 	elem->log_pos = env->log.end_pos;
1761 	env->head = elem;
1762 	env->stack_size++;
1763 	err = copy_verifier_state(&elem->st, cur);
1764 	if (err)
1765 		goto err;
1766 	elem->st.speculative |= speculative;
1767 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1768 		verbose(env, "The sequence of %d jumps is too complex.\n",
1769 			env->stack_size);
1770 		goto err;
1771 	}
1772 	if (elem->st.parent) {
1773 		++elem->st.parent->branches;
1774 		/* WARN_ON(branches > 2) technically makes sense here,
1775 		 * but
1776 		 * 1. speculative states will bump 'branches' for non-branch
1777 		 * instructions
1778 		 * 2. is_state_visited() heuristics may decide not to create
1779 		 * a new state for a sequence of branches and all such current
1780 		 * and cloned states will be pointing to a single parent state
1781 		 * which might have large 'branches' count.
1782 		 */
1783 	}
1784 	return &elem->st;
1785 err:
1786 	free_verifier_state(env->cur_state, true);
1787 	env->cur_state = NULL;
1788 	/* pop all elements and return */
1789 	while (!pop_stack(env, NULL, NULL, false));
1790 	return NULL;
1791 }
1792 
1793 #define CALLER_SAVED_REGS 6
1794 static const int caller_saved[CALLER_SAVED_REGS] = {
1795 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1796 };
1797 
1798 /* This helper doesn't clear reg->id */
1799 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1800 {
1801 	reg->var_off = tnum_const(imm);
1802 	reg->smin_value = (s64)imm;
1803 	reg->smax_value = (s64)imm;
1804 	reg->umin_value = imm;
1805 	reg->umax_value = imm;
1806 
1807 	reg->s32_min_value = (s32)imm;
1808 	reg->s32_max_value = (s32)imm;
1809 	reg->u32_min_value = (u32)imm;
1810 	reg->u32_max_value = (u32)imm;
1811 }
1812 
1813 /* Mark the unknown part of a register (variable offset or scalar value) as
1814  * known to have the value @imm.
1815  */
1816 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1817 {
1818 	/* Clear off and union(map_ptr, range) */
1819 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1820 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1821 	reg->id = 0;
1822 	reg->ref_obj_id = 0;
1823 	___mark_reg_known(reg, imm);
1824 }
1825 
1826 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1827 {
1828 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1829 	reg->s32_min_value = (s32)imm;
1830 	reg->s32_max_value = (s32)imm;
1831 	reg->u32_min_value = (u32)imm;
1832 	reg->u32_max_value = (u32)imm;
1833 }
1834 
1835 /* Mark the 'variable offset' part of a register as zero.  This should be
1836  * used only on registers holding a pointer type.
1837  */
1838 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1839 {
1840 	__mark_reg_known(reg, 0);
1841 }
1842 
1843 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1844 {
1845 	__mark_reg_known(reg, 0);
1846 	reg->type = SCALAR_VALUE;
1847 }
1848 
1849 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1850 				struct bpf_reg_state *regs, u32 regno)
1851 {
1852 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1853 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1854 		/* Something bad happened, let's kill all regs */
1855 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1856 			__mark_reg_not_init(env, regs + regno);
1857 		return;
1858 	}
1859 	__mark_reg_known_zero(regs + regno);
1860 }
1861 
1862 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
1863 			      bool first_slot, int dynptr_id)
1864 {
1865 	/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1866 	 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1867 	 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1868 	 */
1869 	__mark_reg_known_zero(reg);
1870 	reg->type = CONST_PTR_TO_DYNPTR;
1871 	/* Give each dynptr a unique id to uniquely associate slices to it. */
1872 	reg->id = dynptr_id;
1873 	reg->dynptr.type = type;
1874 	reg->dynptr.first_slot = first_slot;
1875 }
1876 
1877 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1878 {
1879 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1880 		const struct bpf_map *map = reg->map_ptr;
1881 
1882 		if (map->inner_map_meta) {
1883 			reg->type = CONST_PTR_TO_MAP;
1884 			reg->map_ptr = map->inner_map_meta;
1885 			/* transfer reg's id which is unique for every map_lookup_elem
1886 			 * as UID of the inner map.
1887 			 */
1888 			if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
1889 				reg->map_uid = reg->id;
1890 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1891 			reg->type = PTR_TO_XDP_SOCK;
1892 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1893 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1894 			reg->type = PTR_TO_SOCKET;
1895 		} else {
1896 			reg->type = PTR_TO_MAP_VALUE;
1897 		}
1898 		return;
1899 	}
1900 
1901 	reg->type &= ~PTR_MAYBE_NULL;
1902 }
1903 
1904 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1905 				struct btf_field_graph_root *ds_head)
1906 {
1907 	__mark_reg_known_zero(&regs[regno]);
1908 	regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1909 	regs[regno].btf = ds_head->btf;
1910 	regs[regno].btf_id = ds_head->value_btf_id;
1911 	regs[regno].off = ds_head->node_offset;
1912 }
1913 
1914 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1915 {
1916 	return type_is_pkt_pointer(reg->type);
1917 }
1918 
1919 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1920 {
1921 	return reg_is_pkt_pointer(reg) ||
1922 	       reg->type == PTR_TO_PACKET_END;
1923 }
1924 
1925 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
1926 {
1927 	return base_type(reg->type) == PTR_TO_MEM &&
1928 		(reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP);
1929 }
1930 
1931 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1932 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1933 				    enum bpf_reg_type which)
1934 {
1935 	/* The register can already have a range from prior markings.
1936 	 * This is fine as long as it hasn't been advanced from its
1937 	 * origin.
1938 	 */
1939 	return reg->type == which &&
1940 	       reg->id == 0 &&
1941 	       reg->off == 0 &&
1942 	       tnum_equals_const(reg->var_off, 0);
1943 }
1944 
1945 /* Reset the min/max bounds of a register */
1946 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1947 {
1948 	reg->smin_value = S64_MIN;
1949 	reg->smax_value = S64_MAX;
1950 	reg->umin_value = 0;
1951 	reg->umax_value = U64_MAX;
1952 
1953 	reg->s32_min_value = S32_MIN;
1954 	reg->s32_max_value = S32_MAX;
1955 	reg->u32_min_value = 0;
1956 	reg->u32_max_value = U32_MAX;
1957 }
1958 
1959 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1960 {
1961 	reg->smin_value = S64_MIN;
1962 	reg->smax_value = S64_MAX;
1963 	reg->umin_value = 0;
1964 	reg->umax_value = U64_MAX;
1965 }
1966 
1967 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1968 {
1969 	reg->s32_min_value = S32_MIN;
1970 	reg->s32_max_value = S32_MAX;
1971 	reg->u32_min_value = 0;
1972 	reg->u32_max_value = U32_MAX;
1973 }
1974 
1975 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1976 {
1977 	struct tnum var32_off = tnum_subreg(reg->var_off);
1978 
1979 	/* min signed is max(sign bit) | min(other bits) */
1980 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1981 			var32_off.value | (var32_off.mask & S32_MIN));
1982 	/* max signed is min(sign bit) | max(other bits) */
1983 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1984 			var32_off.value | (var32_off.mask & S32_MAX));
1985 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1986 	reg->u32_max_value = min(reg->u32_max_value,
1987 				 (u32)(var32_off.value | var32_off.mask));
1988 }
1989 
1990 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1991 {
1992 	/* min signed is max(sign bit) | min(other bits) */
1993 	reg->smin_value = max_t(s64, reg->smin_value,
1994 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1995 	/* max signed is min(sign bit) | max(other bits) */
1996 	reg->smax_value = min_t(s64, reg->smax_value,
1997 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1998 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1999 	reg->umax_value = min(reg->umax_value,
2000 			      reg->var_off.value | reg->var_off.mask);
2001 }
2002 
2003 static void __update_reg_bounds(struct bpf_reg_state *reg)
2004 {
2005 	__update_reg32_bounds(reg);
2006 	__update_reg64_bounds(reg);
2007 }
2008 
2009 /* Uses signed min/max values to inform unsigned, and vice-versa */
2010 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
2011 {
2012 	/* Learn sign from signed bounds.
2013 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
2014 	 * are the same, so combine.  This works even in the negative case, e.g.
2015 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2016 	 */
2017 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
2018 		reg->s32_min_value = reg->u32_min_value =
2019 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
2020 		reg->s32_max_value = reg->u32_max_value =
2021 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
2022 		return;
2023 	}
2024 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
2025 	 * boundary, so we must be careful.
2026 	 */
2027 	if ((s32)reg->u32_max_value >= 0) {
2028 		/* Positive.  We can't learn anything from the smin, but smax
2029 		 * is positive, hence safe.
2030 		 */
2031 		reg->s32_min_value = reg->u32_min_value;
2032 		reg->s32_max_value = reg->u32_max_value =
2033 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
2034 	} else if ((s32)reg->u32_min_value < 0) {
2035 		/* Negative.  We can't learn anything from the smax, but smin
2036 		 * is negative, hence safe.
2037 		 */
2038 		reg->s32_min_value = reg->u32_min_value =
2039 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
2040 		reg->s32_max_value = reg->u32_max_value;
2041 	}
2042 }
2043 
2044 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
2045 {
2046 	/* Learn sign from signed bounds.
2047 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
2048 	 * are the same, so combine.  This works even in the negative case, e.g.
2049 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2050 	 */
2051 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
2052 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2053 							  reg->umin_value);
2054 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2055 							  reg->umax_value);
2056 		return;
2057 	}
2058 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
2059 	 * boundary, so we must be careful.
2060 	 */
2061 	if ((s64)reg->umax_value >= 0) {
2062 		/* Positive.  We can't learn anything from the smin, but smax
2063 		 * is positive, hence safe.
2064 		 */
2065 		reg->smin_value = reg->umin_value;
2066 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2067 							  reg->umax_value);
2068 	} else if ((s64)reg->umin_value < 0) {
2069 		/* Negative.  We can't learn anything from the smax, but smin
2070 		 * is negative, hence safe.
2071 		 */
2072 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2073 							  reg->umin_value);
2074 		reg->smax_value = reg->umax_value;
2075 	}
2076 }
2077 
2078 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
2079 {
2080 	__reg32_deduce_bounds(reg);
2081 	__reg64_deduce_bounds(reg);
2082 }
2083 
2084 /* Attempts to improve var_off based on unsigned min/max information */
2085 static void __reg_bound_offset(struct bpf_reg_state *reg)
2086 {
2087 	struct tnum var64_off = tnum_intersect(reg->var_off,
2088 					       tnum_range(reg->umin_value,
2089 							  reg->umax_value));
2090 	struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
2091 					       tnum_range(reg->u32_min_value,
2092 							  reg->u32_max_value));
2093 
2094 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
2095 }
2096 
2097 static void reg_bounds_sync(struct bpf_reg_state *reg)
2098 {
2099 	/* We might have learned new bounds from the var_off. */
2100 	__update_reg_bounds(reg);
2101 	/* We might have learned something about the sign bit. */
2102 	__reg_deduce_bounds(reg);
2103 	/* We might have learned some bits from the bounds. */
2104 	__reg_bound_offset(reg);
2105 	/* Intersecting with the old var_off might have improved our bounds
2106 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2107 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2108 	 */
2109 	__update_reg_bounds(reg);
2110 }
2111 
2112 static bool __reg32_bound_s64(s32 a)
2113 {
2114 	return a >= 0 && a <= S32_MAX;
2115 }
2116 
2117 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
2118 {
2119 	reg->umin_value = reg->u32_min_value;
2120 	reg->umax_value = reg->u32_max_value;
2121 
2122 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
2123 	 * be positive otherwise set to worse case bounds and refine later
2124 	 * from tnum.
2125 	 */
2126 	if (__reg32_bound_s64(reg->s32_min_value) &&
2127 	    __reg32_bound_s64(reg->s32_max_value)) {
2128 		reg->smin_value = reg->s32_min_value;
2129 		reg->smax_value = reg->s32_max_value;
2130 	} else {
2131 		reg->smin_value = 0;
2132 		reg->smax_value = U32_MAX;
2133 	}
2134 }
2135 
2136 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
2137 {
2138 	/* special case when 64-bit register has upper 32-bit register
2139 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
2140 	 * allowing us to use 32-bit bounds directly,
2141 	 */
2142 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
2143 		__reg_assign_32_into_64(reg);
2144 	} else {
2145 		/* Otherwise the best we can do is push lower 32bit known and
2146 		 * unknown bits into register (var_off set from jmp logic)
2147 		 * then learn as much as possible from the 64-bit tnum
2148 		 * known and unknown bits. The previous smin/smax bounds are
2149 		 * invalid here because of jmp32 compare so mark them unknown
2150 		 * so they do not impact tnum bounds calculation.
2151 		 */
2152 		__mark_reg64_unbounded(reg);
2153 	}
2154 	reg_bounds_sync(reg);
2155 }
2156 
2157 static bool __reg64_bound_s32(s64 a)
2158 {
2159 	return a >= S32_MIN && a <= S32_MAX;
2160 }
2161 
2162 static bool __reg64_bound_u32(u64 a)
2163 {
2164 	return a >= U32_MIN && a <= U32_MAX;
2165 }
2166 
2167 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
2168 {
2169 	__mark_reg32_unbounded(reg);
2170 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
2171 		reg->s32_min_value = (s32)reg->smin_value;
2172 		reg->s32_max_value = (s32)reg->smax_value;
2173 	}
2174 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
2175 		reg->u32_min_value = (u32)reg->umin_value;
2176 		reg->u32_max_value = (u32)reg->umax_value;
2177 	}
2178 	reg_bounds_sync(reg);
2179 }
2180 
2181 /* Mark a register as having a completely unknown (scalar) value. */
2182 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
2183 			       struct bpf_reg_state *reg)
2184 {
2185 	/*
2186 	 * Clear type, off, and union(map_ptr, range) and
2187 	 * padding between 'type' and union
2188 	 */
2189 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
2190 	reg->type = SCALAR_VALUE;
2191 	reg->id = 0;
2192 	reg->ref_obj_id = 0;
2193 	reg->var_off = tnum_unknown;
2194 	reg->frameno = 0;
2195 	reg->precise = !env->bpf_capable;
2196 	__mark_reg_unbounded(reg);
2197 }
2198 
2199 static void mark_reg_unknown(struct bpf_verifier_env *env,
2200 			     struct bpf_reg_state *regs, u32 regno)
2201 {
2202 	if (WARN_ON(regno >= MAX_BPF_REG)) {
2203 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
2204 		/* Something bad happened, let's kill all regs except FP */
2205 		for (regno = 0; regno < BPF_REG_FP; regno++)
2206 			__mark_reg_not_init(env, regs + regno);
2207 		return;
2208 	}
2209 	__mark_reg_unknown(env, regs + regno);
2210 }
2211 
2212 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
2213 				struct bpf_reg_state *reg)
2214 {
2215 	__mark_reg_unknown(env, reg);
2216 	reg->type = NOT_INIT;
2217 }
2218 
2219 static void mark_reg_not_init(struct bpf_verifier_env *env,
2220 			      struct bpf_reg_state *regs, u32 regno)
2221 {
2222 	if (WARN_ON(regno >= MAX_BPF_REG)) {
2223 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
2224 		/* Something bad happened, let's kill all regs except FP */
2225 		for (regno = 0; regno < BPF_REG_FP; regno++)
2226 			__mark_reg_not_init(env, regs + regno);
2227 		return;
2228 	}
2229 	__mark_reg_not_init(env, regs + regno);
2230 }
2231 
2232 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
2233 			    struct bpf_reg_state *regs, u32 regno,
2234 			    enum bpf_reg_type reg_type,
2235 			    struct btf *btf, u32 btf_id,
2236 			    enum bpf_type_flag flag)
2237 {
2238 	if (reg_type == SCALAR_VALUE) {
2239 		mark_reg_unknown(env, regs, regno);
2240 		return;
2241 	}
2242 	mark_reg_known_zero(env, regs, regno);
2243 	regs[regno].type = PTR_TO_BTF_ID | flag;
2244 	regs[regno].btf = btf;
2245 	regs[regno].btf_id = btf_id;
2246 }
2247 
2248 #define DEF_NOT_SUBREG	(0)
2249 static void init_reg_state(struct bpf_verifier_env *env,
2250 			   struct bpf_func_state *state)
2251 {
2252 	struct bpf_reg_state *regs = state->regs;
2253 	int i;
2254 
2255 	for (i = 0; i < MAX_BPF_REG; i++) {
2256 		mark_reg_not_init(env, regs, i);
2257 		regs[i].live = REG_LIVE_NONE;
2258 		regs[i].parent = NULL;
2259 		regs[i].subreg_def = DEF_NOT_SUBREG;
2260 	}
2261 
2262 	/* frame pointer */
2263 	regs[BPF_REG_FP].type = PTR_TO_STACK;
2264 	mark_reg_known_zero(env, regs, BPF_REG_FP);
2265 	regs[BPF_REG_FP].frameno = state->frameno;
2266 }
2267 
2268 #define BPF_MAIN_FUNC (-1)
2269 static void init_func_state(struct bpf_verifier_env *env,
2270 			    struct bpf_func_state *state,
2271 			    int callsite, int frameno, int subprogno)
2272 {
2273 	state->callsite = callsite;
2274 	state->frameno = frameno;
2275 	state->subprogno = subprogno;
2276 	state->callback_ret_range = tnum_range(0, 0);
2277 	init_reg_state(env, state);
2278 	mark_verifier_state_scratched(env);
2279 }
2280 
2281 /* Similar to push_stack(), but for async callbacks */
2282 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
2283 						int insn_idx, int prev_insn_idx,
2284 						int subprog)
2285 {
2286 	struct bpf_verifier_stack_elem *elem;
2287 	struct bpf_func_state *frame;
2288 
2289 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2290 	if (!elem)
2291 		goto err;
2292 
2293 	elem->insn_idx = insn_idx;
2294 	elem->prev_insn_idx = prev_insn_idx;
2295 	elem->next = env->head;
2296 	elem->log_pos = env->log.end_pos;
2297 	env->head = elem;
2298 	env->stack_size++;
2299 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2300 		verbose(env,
2301 			"The sequence of %d jumps is too complex for async cb.\n",
2302 			env->stack_size);
2303 		goto err;
2304 	}
2305 	/* Unlike push_stack() do not copy_verifier_state().
2306 	 * The caller state doesn't matter.
2307 	 * This is async callback. It starts in a fresh stack.
2308 	 * Initialize it similar to do_check_common().
2309 	 */
2310 	elem->st.branches = 1;
2311 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2312 	if (!frame)
2313 		goto err;
2314 	init_func_state(env, frame,
2315 			BPF_MAIN_FUNC /* callsite */,
2316 			0 /* frameno within this callchain */,
2317 			subprog /* subprog number within this prog */);
2318 	elem->st.frame[0] = frame;
2319 	return &elem->st;
2320 err:
2321 	free_verifier_state(env->cur_state, true);
2322 	env->cur_state = NULL;
2323 	/* pop all elements and return */
2324 	while (!pop_stack(env, NULL, NULL, false));
2325 	return NULL;
2326 }
2327 
2328 
2329 enum reg_arg_type {
2330 	SRC_OP,		/* register is used as source operand */
2331 	DST_OP,		/* register is used as destination operand */
2332 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
2333 };
2334 
2335 static int cmp_subprogs(const void *a, const void *b)
2336 {
2337 	return ((struct bpf_subprog_info *)a)->start -
2338 	       ((struct bpf_subprog_info *)b)->start;
2339 }
2340 
2341 static int find_subprog(struct bpf_verifier_env *env, int off)
2342 {
2343 	struct bpf_subprog_info *p;
2344 
2345 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2346 		    sizeof(env->subprog_info[0]), cmp_subprogs);
2347 	if (!p)
2348 		return -ENOENT;
2349 	return p - env->subprog_info;
2350 
2351 }
2352 
2353 static int add_subprog(struct bpf_verifier_env *env, int off)
2354 {
2355 	int insn_cnt = env->prog->len;
2356 	int ret;
2357 
2358 	if (off >= insn_cnt || off < 0) {
2359 		verbose(env, "call to invalid destination\n");
2360 		return -EINVAL;
2361 	}
2362 	ret = find_subprog(env, off);
2363 	if (ret >= 0)
2364 		return ret;
2365 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
2366 		verbose(env, "too many subprograms\n");
2367 		return -E2BIG;
2368 	}
2369 	/* determine subprog starts. The end is one before the next starts */
2370 	env->subprog_info[env->subprog_cnt++].start = off;
2371 	sort(env->subprog_info, env->subprog_cnt,
2372 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
2373 	return env->subprog_cnt - 1;
2374 }
2375 
2376 #define MAX_KFUNC_DESCS 256
2377 #define MAX_KFUNC_BTFS	256
2378 
2379 struct bpf_kfunc_desc {
2380 	struct btf_func_model func_model;
2381 	u32 func_id;
2382 	s32 imm;
2383 	u16 offset;
2384 	unsigned long addr;
2385 };
2386 
2387 struct bpf_kfunc_btf {
2388 	struct btf *btf;
2389 	struct module *module;
2390 	u16 offset;
2391 };
2392 
2393 struct bpf_kfunc_desc_tab {
2394 	/* Sorted by func_id (BTF ID) and offset (fd_array offset) during
2395 	 * verification. JITs do lookups by bpf_insn, where func_id may not be
2396 	 * available, therefore at the end of verification do_misc_fixups()
2397 	 * sorts this by imm and offset.
2398 	 */
2399 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2400 	u32 nr_descs;
2401 };
2402 
2403 struct bpf_kfunc_btf_tab {
2404 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2405 	u32 nr_descs;
2406 };
2407 
2408 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
2409 {
2410 	const struct bpf_kfunc_desc *d0 = a;
2411 	const struct bpf_kfunc_desc *d1 = b;
2412 
2413 	/* func_id is not greater than BTF_MAX_TYPE */
2414 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2415 }
2416 
2417 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2418 {
2419 	const struct bpf_kfunc_btf *d0 = a;
2420 	const struct bpf_kfunc_btf *d1 = b;
2421 
2422 	return d0->offset - d1->offset;
2423 }
2424 
2425 static const struct bpf_kfunc_desc *
2426 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
2427 {
2428 	struct bpf_kfunc_desc desc = {
2429 		.func_id = func_id,
2430 		.offset = offset,
2431 	};
2432 	struct bpf_kfunc_desc_tab *tab;
2433 
2434 	tab = prog->aux->kfunc_tab;
2435 	return bsearch(&desc, tab->descs, tab->nr_descs,
2436 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2437 }
2438 
2439 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2440 		       u16 btf_fd_idx, u8 **func_addr)
2441 {
2442 	const struct bpf_kfunc_desc *desc;
2443 
2444 	desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
2445 	if (!desc)
2446 		return -EFAULT;
2447 
2448 	*func_addr = (u8 *)desc->addr;
2449 	return 0;
2450 }
2451 
2452 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
2453 					 s16 offset)
2454 {
2455 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
2456 	struct bpf_kfunc_btf_tab *tab;
2457 	struct bpf_kfunc_btf *b;
2458 	struct module *mod;
2459 	struct btf *btf;
2460 	int btf_fd;
2461 
2462 	tab = env->prog->aux->kfunc_btf_tab;
2463 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2464 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2465 	if (!b) {
2466 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
2467 			verbose(env, "too many different module BTFs\n");
2468 			return ERR_PTR(-E2BIG);
2469 		}
2470 
2471 		if (bpfptr_is_null(env->fd_array)) {
2472 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2473 			return ERR_PTR(-EPROTO);
2474 		}
2475 
2476 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2477 					    offset * sizeof(btf_fd),
2478 					    sizeof(btf_fd)))
2479 			return ERR_PTR(-EFAULT);
2480 
2481 		btf = btf_get_by_fd(btf_fd);
2482 		if (IS_ERR(btf)) {
2483 			verbose(env, "invalid module BTF fd specified\n");
2484 			return btf;
2485 		}
2486 
2487 		if (!btf_is_module(btf)) {
2488 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
2489 			btf_put(btf);
2490 			return ERR_PTR(-EINVAL);
2491 		}
2492 
2493 		mod = btf_try_get_module(btf);
2494 		if (!mod) {
2495 			btf_put(btf);
2496 			return ERR_PTR(-ENXIO);
2497 		}
2498 
2499 		b = &tab->descs[tab->nr_descs++];
2500 		b->btf = btf;
2501 		b->module = mod;
2502 		b->offset = offset;
2503 
2504 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2505 		     kfunc_btf_cmp_by_off, NULL);
2506 	}
2507 	return b->btf;
2508 }
2509 
2510 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2511 {
2512 	if (!tab)
2513 		return;
2514 
2515 	while (tab->nr_descs--) {
2516 		module_put(tab->descs[tab->nr_descs].module);
2517 		btf_put(tab->descs[tab->nr_descs].btf);
2518 	}
2519 	kfree(tab);
2520 }
2521 
2522 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2523 {
2524 	if (offset) {
2525 		if (offset < 0) {
2526 			/* In the future, this can be allowed to increase limit
2527 			 * of fd index into fd_array, interpreted as u16.
2528 			 */
2529 			verbose(env, "negative offset disallowed for kernel module function call\n");
2530 			return ERR_PTR(-EINVAL);
2531 		}
2532 
2533 		return __find_kfunc_desc_btf(env, offset);
2534 	}
2535 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2536 }
2537 
2538 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2539 {
2540 	const struct btf_type *func, *func_proto;
2541 	struct bpf_kfunc_btf_tab *btf_tab;
2542 	struct bpf_kfunc_desc_tab *tab;
2543 	struct bpf_prog_aux *prog_aux;
2544 	struct bpf_kfunc_desc *desc;
2545 	const char *func_name;
2546 	struct btf *desc_btf;
2547 	unsigned long call_imm;
2548 	unsigned long addr;
2549 	int err;
2550 
2551 	prog_aux = env->prog->aux;
2552 	tab = prog_aux->kfunc_tab;
2553 	btf_tab = prog_aux->kfunc_btf_tab;
2554 	if (!tab) {
2555 		if (!btf_vmlinux) {
2556 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2557 			return -ENOTSUPP;
2558 		}
2559 
2560 		if (!env->prog->jit_requested) {
2561 			verbose(env, "JIT is required for calling kernel function\n");
2562 			return -ENOTSUPP;
2563 		}
2564 
2565 		if (!bpf_jit_supports_kfunc_call()) {
2566 			verbose(env, "JIT does not support calling kernel function\n");
2567 			return -ENOTSUPP;
2568 		}
2569 
2570 		if (!env->prog->gpl_compatible) {
2571 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2572 			return -EINVAL;
2573 		}
2574 
2575 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2576 		if (!tab)
2577 			return -ENOMEM;
2578 		prog_aux->kfunc_tab = tab;
2579 	}
2580 
2581 	/* func_id == 0 is always invalid, but instead of returning an error, be
2582 	 * conservative and wait until the code elimination pass before returning
2583 	 * error, so that invalid calls that get pruned out can be in BPF programs
2584 	 * loaded from userspace.  It is also required that offset be untouched
2585 	 * for such calls.
2586 	 */
2587 	if (!func_id && !offset)
2588 		return 0;
2589 
2590 	if (!btf_tab && offset) {
2591 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2592 		if (!btf_tab)
2593 			return -ENOMEM;
2594 		prog_aux->kfunc_btf_tab = btf_tab;
2595 	}
2596 
2597 	desc_btf = find_kfunc_desc_btf(env, offset);
2598 	if (IS_ERR(desc_btf)) {
2599 		verbose(env, "failed to find BTF for kernel function\n");
2600 		return PTR_ERR(desc_btf);
2601 	}
2602 
2603 	if (find_kfunc_desc(env->prog, func_id, offset))
2604 		return 0;
2605 
2606 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2607 		verbose(env, "too many different kernel function calls\n");
2608 		return -E2BIG;
2609 	}
2610 
2611 	func = btf_type_by_id(desc_btf, func_id);
2612 	if (!func || !btf_type_is_func(func)) {
2613 		verbose(env, "kernel btf_id %u is not a function\n",
2614 			func_id);
2615 		return -EINVAL;
2616 	}
2617 	func_proto = btf_type_by_id(desc_btf, func->type);
2618 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2619 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2620 			func_id);
2621 		return -EINVAL;
2622 	}
2623 
2624 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2625 	addr = kallsyms_lookup_name(func_name);
2626 	if (!addr) {
2627 		verbose(env, "cannot find address for kernel function %s\n",
2628 			func_name);
2629 		return -EINVAL;
2630 	}
2631 	specialize_kfunc(env, func_id, offset, &addr);
2632 
2633 	if (bpf_jit_supports_far_kfunc_call()) {
2634 		call_imm = func_id;
2635 	} else {
2636 		call_imm = BPF_CALL_IMM(addr);
2637 		/* Check whether the relative offset overflows desc->imm */
2638 		if ((unsigned long)(s32)call_imm != call_imm) {
2639 			verbose(env, "address of kernel function %s is out of range\n",
2640 				func_name);
2641 			return -EINVAL;
2642 		}
2643 	}
2644 
2645 	if (bpf_dev_bound_kfunc_id(func_id)) {
2646 		err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2647 		if (err)
2648 			return err;
2649 	}
2650 
2651 	desc = &tab->descs[tab->nr_descs++];
2652 	desc->func_id = func_id;
2653 	desc->imm = call_imm;
2654 	desc->offset = offset;
2655 	desc->addr = addr;
2656 	err = btf_distill_func_proto(&env->log, desc_btf,
2657 				     func_proto, func_name,
2658 				     &desc->func_model);
2659 	if (!err)
2660 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2661 		     kfunc_desc_cmp_by_id_off, NULL);
2662 	return err;
2663 }
2664 
2665 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
2666 {
2667 	const struct bpf_kfunc_desc *d0 = a;
2668 	const struct bpf_kfunc_desc *d1 = b;
2669 
2670 	if (d0->imm != d1->imm)
2671 		return d0->imm < d1->imm ? -1 : 1;
2672 	if (d0->offset != d1->offset)
2673 		return d0->offset < d1->offset ? -1 : 1;
2674 	return 0;
2675 }
2676 
2677 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog)
2678 {
2679 	struct bpf_kfunc_desc_tab *tab;
2680 
2681 	tab = prog->aux->kfunc_tab;
2682 	if (!tab)
2683 		return;
2684 
2685 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2686 	     kfunc_desc_cmp_by_imm_off, NULL);
2687 }
2688 
2689 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2690 {
2691 	return !!prog->aux->kfunc_tab;
2692 }
2693 
2694 const struct btf_func_model *
2695 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2696 			 const struct bpf_insn *insn)
2697 {
2698 	const struct bpf_kfunc_desc desc = {
2699 		.imm = insn->imm,
2700 		.offset = insn->off,
2701 	};
2702 	const struct bpf_kfunc_desc *res;
2703 	struct bpf_kfunc_desc_tab *tab;
2704 
2705 	tab = prog->aux->kfunc_tab;
2706 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2707 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off);
2708 
2709 	return res ? &res->func_model : NULL;
2710 }
2711 
2712 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2713 {
2714 	struct bpf_subprog_info *subprog = env->subprog_info;
2715 	struct bpf_insn *insn = env->prog->insnsi;
2716 	int i, ret, insn_cnt = env->prog->len;
2717 
2718 	/* Add entry function. */
2719 	ret = add_subprog(env, 0);
2720 	if (ret)
2721 		return ret;
2722 
2723 	for (i = 0; i < insn_cnt; i++, insn++) {
2724 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2725 		    !bpf_pseudo_kfunc_call(insn))
2726 			continue;
2727 
2728 		if (!env->bpf_capable) {
2729 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2730 			return -EPERM;
2731 		}
2732 
2733 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2734 			ret = add_subprog(env, i + insn->imm + 1);
2735 		else
2736 			ret = add_kfunc_call(env, insn->imm, insn->off);
2737 
2738 		if (ret < 0)
2739 			return ret;
2740 	}
2741 
2742 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2743 	 * logic. 'subprog_cnt' should not be increased.
2744 	 */
2745 	subprog[env->subprog_cnt].start = insn_cnt;
2746 
2747 	if (env->log.level & BPF_LOG_LEVEL2)
2748 		for (i = 0; i < env->subprog_cnt; i++)
2749 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2750 
2751 	return 0;
2752 }
2753 
2754 static int check_subprogs(struct bpf_verifier_env *env)
2755 {
2756 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2757 	struct bpf_subprog_info *subprog = env->subprog_info;
2758 	struct bpf_insn *insn = env->prog->insnsi;
2759 	int insn_cnt = env->prog->len;
2760 
2761 	/* now check that all jumps are within the same subprog */
2762 	subprog_start = subprog[cur_subprog].start;
2763 	subprog_end = subprog[cur_subprog + 1].start;
2764 	for (i = 0; i < insn_cnt; i++) {
2765 		u8 code = insn[i].code;
2766 
2767 		if (code == (BPF_JMP | BPF_CALL) &&
2768 		    insn[i].src_reg == 0 &&
2769 		    insn[i].imm == BPF_FUNC_tail_call)
2770 			subprog[cur_subprog].has_tail_call = true;
2771 		if (BPF_CLASS(code) == BPF_LD &&
2772 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2773 			subprog[cur_subprog].has_ld_abs = true;
2774 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2775 			goto next;
2776 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2777 			goto next;
2778 		off = i + insn[i].off + 1;
2779 		if (off < subprog_start || off >= subprog_end) {
2780 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2781 			return -EINVAL;
2782 		}
2783 next:
2784 		if (i == subprog_end - 1) {
2785 			/* to avoid fall-through from one subprog into another
2786 			 * the last insn of the subprog should be either exit
2787 			 * or unconditional jump back
2788 			 */
2789 			if (code != (BPF_JMP | BPF_EXIT) &&
2790 			    code != (BPF_JMP | BPF_JA)) {
2791 				verbose(env, "last insn is not an exit or jmp\n");
2792 				return -EINVAL;
2793 			}
2794 			subprog_start = subprog_end;
2795 			cur_subprog++;
2796 			if (cur_subprog < env->subprog_cnt)
2797 				subprog_end = subprog[cur_subprog + 1].start;
2798 		}
2799 	}
2800 	return 0;
2801 }
2802 
2803 /* Parentage chain of this register (or stack slot) should take care of all
2804  * issues like callee-saved registers, stack slot allocation time, etc.
2805  */
2806 static int mark_reg_read(struct bpf_verifier_env *env,
2807 			 const struct bpf_reg_state *state,
2808 			 struct bpf_reg_state *parent, u8 flag)
2809 {
2810 	bool writes = parent == state->parent; /* Observe write marks */
2811 	int cnt = 0;
2812 
2813 	while (parent) {
2814 		/* if read wasn't screened by an earlier write ... */
2815 		if (writes && state->live & REG_LIVE_WRITTEN)
2816 			break;
2817 		if (parent->live & REG_LIVE_DONE) {
2818 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2819 				reg_type_str(env, parent->type),
2820 				parent->var_off.value, parent->off);
2821 			return -EFAULT;
2822 		}
2823 		/* The first condition is more likely to be true than the
2824 		 * second, checked it first.
2825 		 */
2826 		if ((parent->live & REG_LIVE_READ) == flag ||
2827 		    parent->live & REG_LIVE_READ64)
2828 			/* The parentage chain never changes and
2829 			 * this parent was already marked as LIVE_READ.
2830 			 * There is no need to keep walking the chain again and
2831 			 * keep re-marking all parents as LIVE_READ.
2832 			 * This case happens when the same register is read
2833 			 * multiple times without writes into it in-between.
2834 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2835 			 * then no need to set the weak REG_LIVE_READ32.
2836 			 */
2837 			break;
2838 		/* ... then we depend on parent's value */
2839 		parent->live |= flag;
2840 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2841 		if (flag == REG_LIVE_READ64)
2842 			parent->live &= ~REG_LIVE_READ32;
2843 		state = parent;
2844 		parent = state->parent;
2845 		writes = true;
2846 		cnt++;
2847 	}
2848 
2849 	if (env->longest_mark_read_walk < cnt)
2850 		env->longest_mark_read_walk = cnt;
2851 	return 0;
2852 }
2853 
2854 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2855 {
2856 	struct bpf_func_state *state = func(env, reg);
2857 	int spi, ret;
2858 
2859 	/* For CONST_PTR_TO_DYNPTR, it must have already been done by
2860 	 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
2861 	 * check_kfunc_call.
2862 	 */
2863 	if (reg->type == CONST_PTR_TO_DYNPTR)
2864 		return 0;
2865 	spi = dynptr_get_spi(env, reg);
2866 	if (spi < 0)
2867 		return spi;
2868 	/* Caller ensures dynptr is valid and initialized, which means spi is in
2869 	 * bounds and spi is the first dynptr slot. Simply mark stack slot as
2870 	 * read.
2871 	 */
2872 	ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
2873 			    state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
2874 	if (ret)
2875 		return ret;
2876 	return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
2877 			     state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
2878 }
2879 
2880 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
2881 			  int spi, int nr_slots)
2882 {
2883 	struct bpf_func_state *state = func(env, reg);
2884 	int err, i;
2885 
2886 	for (i = 0; i < nr_slots; i++) {
2887 		struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
2888 
2889 		err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
2890 		if (err)
2891 			return err;
2892 
2893 		mark_stack_slot_scratched(env, spi - i);
2894 	}
2895 
2896 	return 0;
2897 }
2898 
2899 /* This function is supposed to be used by the following 32-bit optimization
2900  * code only. It returns TRUE if the source or destination register operates
2901  * on 64-bit, otherwise return FALSE.
2902  */
2903 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2904 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2905 {
2906 	u8 code, class, op;
2907 
2908 	code = insn->code;
2909 	class = BPF_CLASS(code);
2910 	op = BPF_OP(code);
2911 	if (class == BPF_JMP) {
2912 		/* BPF_EXIT for "main" will reach here. Return TRUE
2913 		 * conservatively.
2914 		 */
2915 		if (op == BPF_EXIT)
2916 			return true;
2917 		if (op == BPF_CALL) {
2918 			/* BPF to BPF call will reach here because of marking
2919 			 * caller saved clobber with DST_OP_NO_MARK for which we
2920 			 * don't care the register def because they are anyway
2921 			 * marked as NOT_INIT already.
2922 			 */
2923 			if (insn->src_reg == BPF_PSEUDO_CALL)
2924 				return false;
2925 			/* Helper call will reach here because of arg type
2926 			 * check, conservatively return TRUE.
2927 			 */
2928 			if (t == SRC_OP)
2929 				return true;
2930 
2931 			return false;
2932 		}
2933 	}
2934 
2935 	if (class == BPF_ALU64 || class == BPF_JMP ||
2936 	    /* BPF_END always use BPF_ALU class. */
2937 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2938 		return true;
2939 
2940 	if (class == BPF_ALU || class == BPF_JMP32)
2941 		return false;
2942 
2943 	if (class == BPF_LDX) {
2944 		if (t != SRC_OP)
2945 			return BPF_SIZE(code) == BPF_DW;
2946 		/* LDX source must be ptr. */
2947 		return true;
2948 	}
2949 
2950 	if (class == BPF_STX) {
2951 		/* BPF_STX (including atomic variants) has multiple source
2952 		 * operands, one of which is a ptr. Check whether the caller is
2953 		 * asking about it.
2954 		 */
2955 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2956 			return true;
2957 		return BPF_SIZE(code) == BPF_DW;
2958 	}
2959 
2960 	if (class == BPF_LD) {
2961 		u8 mode = BPF_MODE(code);
2962 
2963 		/* LD_IMM64 */
2964 		if (mode == BPF_IMM)
2965 			return true;
2966 
2967 		/* Both LD_IND and LD_ABS return 32-bit data. */
2968 		if (t != SRC_OP)
2969 			return  false;
2970 
2971 		/* Implicit ctx ptr. */
2972 		if (regno == BPF_REG_6)
2973 			return true;
2974 
2975 		/* Explicit source could be any width. */
2976 		return true;
2977 	}
2978 
2979 	if (class == BPF_ST)
2980 		/* The only source register for BPF_ST is a ptr. */
2981 		return true;
2982 
2983 	/* Conservatively return true at default. */
2984 	return true;
2985 }
2986 
2987 /* Return the regno defined by the insn, or -1. */
2988 static int insn_def_regno(const struct bpf_insn *insn)
2989 {
2990 	switch (BPF_CLASS(insn->code)) {
2991 	case BPF_JMP:
2992 	case BPF_JMP32:
2993 	case BPF_ST:
2994 		return -1;
2995 	case BPF_STX:
2996 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2997 		    (insn->imm & BPF_FETCH)) {
2998 			if (insn->imm == BPF_CMPXCHG)
2999 				return BPF_REG_0;
3000 			else
3001 				return insn->src_reg;
3002 		} else {
3003 			return -1;
3004 		}
3005 	default:
3006 		return insn->dst_reg;
3007 	}
3008 }
3009 
3010 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3011 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
3012 {
3013 	int dst_reg = insn_def_regno(insn);
3014 
3015 	if (dst_reg == -1)
3016 		return false;
3017 
3018 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
3019 }
3020 
3021 static void mark_insn_zext(struct bpf_verifier_env *env,
3022 			   struct bpf_reg_state *reg)
3023 {
3024 	s32 def_idx = reg->subreg_def;
3025 
3026 	if (def_idx == DEF_NOT_SUBREG)
3027 		return;
3028 
3029 	env->insn_aux_data[def_idx - 1].zext_dst = true;
3030 	/* The dst will be zero extended, so won't be sub-register anymore. */
3031 	reg->subreg_def = DEF_NOT_SUBREG;
3032 }
3033 
3034 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
3035 			 enum reg_arg_type t)
3036 {
3037 	struct bpf_verifier_state *vstate = env->cur_state;
3038 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3039 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
3040 	struct bpf_reg_state *reg, *regs = state->regs;
3041 	bool rw64;
3042 
3043 	if (regno >= MAX_BPF_REG) {
3044 		verbose(env, "R%d is invalid\n", regno);
3045 		return -EINVAL;
3046 	}
3047 
3048 	mark_reg_scratched(env, regno);
3049 
3050 	reg = &regs[regno];
3051 	rw64 = is_reg64(env, insn, regno, reg, t);
3052 	if (t == SRC_OP) {
3053 		/* check whether register used as source operand can be read */
3054 		if (reg->type == NOT_INIT) {
3055 			verbose(env, "R%d !read_ok\n", regno);
3056 			return -EACCES;
3057 		}
3058 		/* We don't need to worry about FP liveness because it's read-only */
3059 		if (regno == BPF_REG_FP)
3060 			return 0;
3061 
3062 		if (rw64)
3063 			mark_insn_zext(env, reg);
3064 
3065 		return mark_reg_read(env, reg, reg->parent,
3066 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
3067 	} else {
3068 		/* check whether register used as dest operand can be written to */
3069 		if (regno == BPF_REG_FP) {
3070 			verbose(env, "frame pointer is read only\n");
3071 			return -EACCES;
3072 		}
3073 		reg->live |= REG_LIVE_WRITTEN;
3074 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
3075 		if (t == DST_OP)
3076 			mark_reg_unknown(env, regs, regno);
3077 	}
3078 	return 0;
3079 }
3080 
3081 static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
3082 {
3083 	env->insn_aux_data[idx].jmp_point = true;
3084 }
3085 
3086 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
3087 {
3088 	return env->insn_aux_data[insn_idx].jmp_point;
3089 }
3090 
3091 /* for any branch, call, exit record the history of jmps in the given state */
3092 static int push_jmp_history(struct bpf_verifier_env *env,
3093 			    struct bpf_verifier_state *cur)
3094 {
3095 	u32 cnt = cur->jmp_history_cnt;
3096 	struct bpf_idx_pair *p;
3097 	size_t alloc_size;
3098 
3099 	if (!is_jmp_point(env, env->insn_idx))
3100 		return 0;
3101 
3102 	cnt++;
3103 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
3104 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
3105 	if (!p)
3106 		return -ENOMEM;
3107 	p[cnt - 1].idx = env->insn_idx;
3108 	p[cnt - 1].prev_idx = env->prev_insn_idx;
3109 	cur->jmp_history = p;
3110 	cur->jmp_history_cnt = cnt;
3111 	return 0;
3112 }
3113 
3114 /* Backtrack one insn at a time. If idx is not at the top of recorded
3115  * history then previous instruction came from straight line execution.
3116  */
3117 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
3118 			     u32 *history)
3119 {
3120 	u32 cnt = *history;
3121 
3122 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
3123 		i = st->jmp_history[cnt - 1].prev_idx;
3124 		(*history)--;
3125 	} else {
3126 		i--;
3127 	}
3128 	return i;
3129 }
3130 
3131 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
3132 {
3133 	const struct btf_type *func;
3134 	struct btf *desc_btf;
3135 
3136 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
3137 		return NULL;
3138 
3139 	desc_btf = find_kfunc_desc_btf(data, insn->off);
3140 	if (IS_ERR(desc_btf))
3141 		return "<error>";
3142 
3143 	func = btf_type_by_id(desc_btf, insn->imm);
3144 	return btf_name_by_offset(desc_btf, func->name_off);
3145 }
3146 
3147 /* For given verifier state backtrack_insn() is called from the last insn to
3148  * the first insn. Its purpose is to compute a bitmask of registers and
3149  * stack slots that needs precision in the parent verifier state.
3150  */
3151 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
3152 			  u32 *reg_mask, u64 *stack_mask)
3153 {
3154 	const struct bpf_insn_cbs cbs = {
3155 		.cb_call	= disasm_kfunc_name,
3156 		.cb_print	= verbose,
3157 		.private_data	= env,
3158 	};
3159 	struct bpf_insn *insn = env->prog->insnsi + idx;
3160 	u8 class = BPF_CLASS(insn->code);
3161 	u8 opcode = BPF_OP(insn->code);
3162 	u8 mode = BPF_MODE(insn->code);
3163 	u32 dreg = 1u << insn->dst_reg;
3164 	u32 sreg = 1u << insn->src_reg;
3165 	u32 spi;
3166 
3167 	if (insn->code == 0)
3168 		return 0;
3169 	if (env->log.level & BPF_LOG_LEVEL2) {
3170 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
3171 		verbose(env, "%d: ", idx);
3172 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
3173 	}
3174 
3175 	if (class == BPF_ALU || class == BPF_ALU64) {
3176 		if (!(*reg_mask & dreg))
3177 			return 0;
3178 		if (opcode == BPF_MOV) {
3179 			if (BPF_SRC(insn->code) == BPF_X) {
3180 				/* dreg = sreg
3181 				 * dreg needs precision after this insn
3182 				 * sreg needs precision before this insn
3183 				 */
3184 				*reg_mask &= ~dreg;
3185 				*reg_mask |= sreg;
3186 			} else {
3187 				/* dreg = K
3188 				 * dreg needs precision after this insn.
3189 				 * Corresponding register is already marked
3190 				 * as precise=true in this verifier state.
3191 				 * No further markings in parent are necessary
3192 				 */
3193 				*reg_mask &= ~dreg;
3194 			}
3195 		} else {
3196 			if (BPF_SRC(insn->code) == BPF_X) {
3197 				/* dreg += sreg
3198 				 * both dreg and sreg need precision
3199 				 * before this insn
3200 				 */
3201 				*reg_mask |= sreg;
3202 			} /* else dreg += K
3203 			   * dreg still needs precision before this insn
3204 			   */
3205 		}
3206 	} else if (class == BPF_LDX) {
3207 		if (!(*reg_mask & dreg))
3208 			return 0;
3209 		*reg_mask &= ~dreg;
3210 
3211 		/* scalars can only be spilled into stack w/o losing precision.
3212 		 * Load from any other memory can be zero extended.
3213 		 * The desire to keep that precision is already indicated
3214 		 * by 'precise' mark in corresponding register of this state.
3215 		 * No further tracking necessary.
3216 		 */
3217 		if (insn->src_reg != BPF_REG_FP)
3218 			return 0;
3219 
3220 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
3221 		 * that [fp - off] slot contains scalar that needs to be
3222 		 * tracked with precision
3223 		 */
3224 		spi = (-insn->off - 1) / BPF_REG_SIZE;
3225 		if (spi >= 64) {
3226 			verbose(env, "BUG spi %d\n", spi);
3227 			WARN_ONCE(1, "verifier backtracking bug");
3228 			return -EFAULT;
3229 		}
3230 		*stack_mask |= 1ull << spi;
3231 	} else if (class == BPF_STX || class == BPF_ST) {
3232 		if (*reg_mask & dreg)
3233 			/* stx & st shouldn't be using _scalar_ dst_reg
3234 			 * to access memory. It means backtracking
3235 			 * encountered a case of pointer subtraction.
3236 			 */
3237 			return -ENOTSUPP;
3238 		/* scalars can only be spilled into stack */
3239 		if (insn->dst_reg != BPF_REG_FP)
3240 			return 0;
3241 		spi = (-insn->off - 1) / BPF_REG_SIZE;
3242 		if (spi >= 64) {
3243 			verbose(env, "BUG spi %d\n", spi);
3244 			WARN_ONCE(1, "verifier backtracking bug");
3245 			return -EFAULT;
3246 		}
3247 		if (!(*stack_mask & (1ull << spi)))
3248 			return 0;
3249 		*stack_mask &= ~(1ull << spi);
3250 		if (class == BPF_STX)
3251 			*reg_mask |= sreg;
3252 	} else if (class == BPF_JMP || class == BPF_JMP32) {
3253 		if (opcode == BPF_CALL) {
3254 			if (insn->src_reg == BPF_PSEUDO_CALL)
3255 				return -ENOTSUPP;
3256 			/* BPF helpers that invoke callback subprogs are
3257 			 * equivalent to BPF_PSEUDO_CALL above
3258 			 */
3259 			if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
3260 				return -ENOTSUPP;
3261 			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
3262 			 * catch this error later. Make backtracking conservative
3263 			 * with ENOTSUPP.
3264 			 */
3265 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
3266 				return -ENOTSUPP;
3267 			/* regular helper call sets R0 */
3268 			*reg_mask &= ~1;
3269 			if (*reg_mask & 0x3f) {
3270 				/* if backtracing was looking for registers R1-R5
3271 				 * they should have been found already.
3272 				 */
3273 				verbose(env, "BUG regs %x\n", *reg_mask);
3274 				WARN_ONCE(1, "verifier backtracking bug");
3275 				return -EFAULT;
3276 			}
3277 		} else if (opcode == BPF_EXIT) {
3278 			return -ENOTSUPP;
3279 		} else if (BPF_SRC(insn->code) == BPF_X) {
3280 			if (!(*reg_mask & (dreg | sreg)))
3281 				return 0;
3282 			/* dreg <cond> sreg
3283 			 * Both dreg and sreg need precision before
3284 			 * this insn. If only sreg was marked precise
3285 			 * before it would be equally necessary to
3286 			 * propagate it to dreg.
3287 			 */
3288 			*reg_mask |= (sreg | dreg);
3289 			 /* else dreg <cond> K
3290 			  * Only dreg still needs precision before
3291 			  * this insn, so for the K-based conditional
3292 			  * there is nothing new to be marked.
3293 			  */
3294 		}
3295 	} else if (class == BPF_LD) {
3296 		if (!(*reg_mask & dreg))
3297 			return 0;
3298 		*reg_mask &= ~dreg;
3299 		/* It's ld_imm64 or ld_abs or ld_ind.
3300 		 * For ld_imm64 no further tracking of precision
3301 		 * into parent is necessary
3302 		 */
3303 		if (mode == BPF_IND || mode == BPF_ABS)
3304 			/* to be analyzed */
3305 			return -ENOTSUPP;
3306 	}
3307 	return 0;
3308 }
3309 
3310 /* the scalar precision tracking algorithm:
3311  * . at the start all registers have precise=false.
3312  * . scalar ranges are tracked as normal through alu and jmp insns.
3313  * . once precise value of the scalar register is used in:
3314  *   .  ptr + scalar alu
3315  *   . if (scalar cond K|scalar)
3316  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
3317  *   backtrack through the verifier states and mark all registers and
3318  *   stack slots with spilled constants that these scalar regisers
3319  *   should be precise.
3320  * . during state pruning two registers (or spilled stack slots)
3321  *   are equivalent if both are not precise.
3322  *
3323  * Note the verifier cannot simply walk register parentage chain,
3324  * since many different registers and stack slots could have been
3325  * used to compute single precise scalar.
3326  *
3327  * The approach of starting with precise=true for all registers and then
3328  * backtrack to mark a register as not precise when the verifier detects
3329  * that program doesn't care about specific value (e.g., when helper
3330  * takes register as ARG_ANYTHING parameter) is not safe.
3331  *
3332  * It's ok to walk single parentage chain of the verifier states.
3333  * It's possible that this backtracking will go all the way till 1st insn.
3334  * All other branches will be explored for needing precision later.
3335  *
3336  * The backtracking needs to deal with cases like:
3337  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
3338  * r9 -= r8
3339  * r5 = r9
3340  * if r5 > 0x79f goto pc+7
3341  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
3342  * r5 += 1
3343  * ...
3344  * call bpf_perf_event_output#25
3345  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3346  *
3347  * and this case:
3348  * r6 = 1
3349  * call foo // uses callee's r6 inside to compute r0
3350  * r0 += r6
3351  * if r0 == 0 goto
3352  *
3353  * to track above reg_mask/stack_mask needs to be independent for each frame.
3354  *
3355  * Also if parent's curframe > frame where backtracking started,
3356  * the verifier need to mark registers in both frames, otherwise callees
3357  * may incorrectly prune callers. This is similar to
3358  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3359  *
3360  * For now backtracking falls back into conservative marking.
3361  */
3362 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3363 				     struct bpf_verifier_state *st)
3364 {
3365 	struct bpf_func_state *func;
3366 	struct bpf_reg_state *reg;
3367 	int i, j;
3368 
3369 	/* big hammer: mark all scalars precise in this path.
3370 	 * pop_stack may still get !precise scalars.
3371 	 * We also skip current state and go straight to first parent state,
3372 	 * because precision markings in current non-checkpointed state are
3373 	 * not needed. See why in the comment in __mark_chain_precision below.
3374 	 */
3375 	for (st = st->parent; st; st = st->parent) {
3376 		for (i = 0; i <= st->curframe; i++) {
3377 			func = st->frame[i];
3378 			for (j = 0; j < BPF_REG_FP; j++) {
3379 				reg = &func->regs[j];
3380 				if (reg->type != SCALAR_VALUE)
3381 					continue;
3382 				reg->precise = true;
3383 			}
3384 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3385 				if (!is_spilled_reg(&func->stack[j]))
3386 					continue;
3387 				reg = &func->stack[j].spilled_ptr;
3388 				if (reg->type != SCALAR_VALUE)
3389 					continue;
3390 				reg->precise = true;
3391 			}
3392 		}
3393 	}
3394 }
3395 
3396 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3397 {
3398 	struct bpf_func_state *func;
3399 	struct bpf_reg_state *reg;
3400 	int i, j;
3401 
3402 	for (i = 0; i <= st->curframe; i++) {
3403 		func = st->frame[i];
3404 		for (j = 0; j < BPF_REG_FP; j++) {
3405 			reg = &func->regs[j];
3406 			if (reg->type != SCALAR_VALUE)
3407 				continue;
3408 			reg->precise = false;
3409 		}
3410 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3411 			if (!is_spilled_reg(&func->stack[j]))
3412 				continue;
3413 			reg = &func->stack[j].spilled_ptr;
3414 			if (reg->type != SCALAR_VALUE)
3415 				continue;
3416 			reg->precise = false;
3417 		}
3418 	}
3419 }
3420 
3421 /*
3422  * __mark_chain_precision() backtracks BPF program instruction sequence and
3423  * chain of verifier states making sure that register *regno* (if regno >= 0)
3424  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
3425  * SCALARS, as well as any other registers and slots that contribute to
3426  * a tracked state of given registers/stack slots, depending on specific BPF
3427  * assembly instructions (see backtrack_insns() for exact instruction handling
3428  * logic). This backtracking relies on recorded jmp_history and is able to
3429  * traverse entire chain of parent states. This process ends only when all the
3430  * necessary registers/slots and their transitive dependencies are marked as
3431  * precise.
3432  *
3433  * One important and subtle aspect is that precise marks *do not matter* in
3434  * the currently verified state (current state). It is important to understand
3435  * why this is the case.
3436  *
3437  * First, note that current state is the state that is not yet "checkpointed",
3438  * i.e., it is not yet put into env->explored_states, and it has no children
3439  * states as well. It's ephemeral, and can end up either a) being discarded if
3440  * compatible explored state is found at some point or BPF_EXIT instruction is
3441  * reached or b) checkpointed and put into env->explored_states, branching out
3442  * into one or more children states.
3443  *
3444  * In the former case, precise markings in current state are completely
3445  * ignored by state comparison code (see regsafe() for details). Only
3446  * checkpointed ("old") state precise markings are important, and if old
3447  * state's register/slot is precise, regsafe() assumes current state's
3448  * register/slot as precise and checks value ranges exactly and precisely. If
3449  * states turn out to be compatible, current state's necessary precise
3450  * markings and any required parent states' precise markings are enforced
3451  * after the fact with propagate_precision() logic, after the fact. But it's
3452  * important to realize that in this case, even after marking current state
3453  * registers/slots as precise, we immediately discard current state. So what
3454  * actually matters is any of the precise markings propagated into current
3455  * state's parent states, which are always checkpointed (due to b) case above).
3456  * As such, for scenario a) it doesn't matter if current state has precise
3457  * markings set or not.
3458  *
3459  * Now, for the scenario b), checkpointing and forking into child(ren)
3460  * state(s). Note that before current state gets to checkpointing step, any
3461  * processed instruction always assumes precise SCALAR register/slot
3462  * knowledge: if precise value or range is useful to prune jump branch, BPF
3463  * verifier takes this opportunity enthusiastically. Similarly, when
3464  * register's value is used to calculate offset or memory address, exact
3465  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
3466  * what we mentioned above about state comparison ignoring precise markings
3467  * during state comparison, BPF verifier ignores and also assumes precise
3468  * markings *at will* during instruction verification process. But as verifier
3469  * assumes precision, it also propagates any precision dependencies across
3470  * parent states, which are not yet finalized, so can be further restricted
3471  * based on new knowledge gained from restrictions enforced by their children
3472  * states. This is so that once those parent states are finalized, i.e., when
3473  * they have no more active children state, state comparison logic in
3474  * is_state_visited() would enforce strict and precise SCALAR ranges, if
3475  * required for correctness.
3476  *
3477  * To build a bit more intuition, note also that once a state is checkpointed,
3478  * the path we took to get to that state is not important. This is crucial
3479  * property for state pruning. When state is checkpointed and finalized at
3480  * some instruction index, it can be correctly and safely used to "short
3481  * circuit" any *compatible* state that reaches exactly the same instruction
3482  * index. I.e., if we jumped to that instruction from a completely different
3483  * code path than original finalized state was derived from, it doesn't
3484  * matter, current state can be discarded because from that instruction
3485  * forward having a compatible state will ensure we will safely reach the
3486  * exit. States describe preconditions for further exploration, but completely
3487  * forget the history of how we got here.
3488  *
3489  * This also means that even if we needed precise SCALAR range to get to
3490  * finalized state, but from that point forward *that same* SCALAR register is
3491  * never used in a precise context (i.e., it's precise value is not needed for
3492  * correctness), it's correct and safe to mark such register as "imprecise"
3493  * (i.e., precise marking set to false). This is what we rely on when we do
3494  * not set precise marking in current state. If no child state requires
3495  * precision for any given SCALAR register, it's safe to dictate that it can
3496  * be imprecise. If any child state does require this register to be precise,
3497  * we'll mark it precise later retroactively during precise markings
3498  * propagation from child state to parent states.
3499  *
3500  * Skipping precise marking setting in current state is a mild version of
3501  * relying on the above observation. But we can utilize this property even
3502  * more aggressively by proactively forgetting any precise marking in the
3503  * current state (which we inherited from the parent state), right before we
3504  * checkpoint it and branch off into new child state. This is done by
3505  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
3506  * finalized states which help in short circuiting more future states.
3507  */
3508 static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
3509 				  int spi)
3510 {
3511 	struct bpf_verifier_state *st = env->cur_state;
3512 	int first_idx = st->first_insn_idx;
3513 	int last_idx = env->insn_idx;
3514 	struct bpf_func_state *func;
3515 	struct bpf_reg_state *reg;
3516 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
3517 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
3518 	bool skip_first = true;
3519 	bool new_marks = false;
3520 	int i, err;
3521 
3522 	if (!env->bpf_capable)
3523 		return 0;
3524 
3525 	/* Do sanity checks against current state of register and/or stack
3526 	 * slot, but don't set precise flag in current state, as precision
3527 	 * tracking in the current state is unnecessary.
3528 	 */
3529 	func = st->frame[frame];
3530 	if (regno >= 0) {
3531 		reg = &func->regs[regno];
3532 		if (reg->type != SCALAR_VALUE) {
3533 			WARN_ONCE(1, "backtracing misuse");
3534 			return -EFAULT;
3535 		}
3536 		new_marks = true;
3537 	}
3538 
3539 	while (spi >= 0) {
3540 		if (!is_spilled_reg(&func->stack[spi])) {
3541 			stack_mask = 0;
3542 			break;
3543 		}
3544 		reg = &func->stack[spi].spilled_ptr;
3545 		if (reg->type != SCALAR_VALUE) {
3546 			stack_mask = 0;
3547 			break;
3548 		}
3549 		new_marks = true;
3550 		break;
3551 	}
3552 
3553 	if (!new_marks)
3554 		return 0;
3555 	if (!reg_mask && !stack_mask)
3556 		return 0;
3557 
3558 	for (;;) {
3559 		DECLARE_BITMAP(mask, 64);
3560 		u32 history = st->jmp_history_cnt;
3561 
3562 		if (env->log.level & BPF_LOG_LEVEL2)
3563 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
3564 
3565 		if (last_idx < 0) {
3566 			/* we are at the entry into subprog, which
3567 			 * is expected for global funcs, but only if
3568 			 * requested precise registers are R1-R5
3569 			 * (which are global func's input arguments)
3570 			 */
3571 			if (st->curframe == 0 &&
3572 			    st->frame[0]->subprogno > 0 &&
3573 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
3574 			    stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
3575 				bitmap_from_u64(mask, reg_mask);
3576 				for_each_set_bit(i, mask, 32) {
3577 					reg = &st->frame[0]->regs[i];
3578 					if (reg->type != SCALAR_VALUE) {
3579 						reg_mask &= ~(1u << i);
3580 						continue;
3581 					}
3582 					reg->precise = true;
3583 				}
3584 				return 0;
3585 			}
3586 
3587 			verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
3588 				st->frame[0]->subprogno, reg_mask, stack_mask);
3589 			WARN_ONCE(1, "verifier backtracking bug");
3590 			return -EFAULT;
3591 		}
3592 
3593 		for (i = last_idx;;) {
3594 			if (skip_first) {
3595 				err = 0;
3596 				skip_first = false;
3597 			} else {
3598 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
3599 			}
3600 			if (err == -ENOTSUPP) {
3601 				mark_all_scalars_precise(env, st);
3602 				return 0;
3603 			} else if (err) {
3604 				return err;
3605 			}
3606 			if (!reg_mask && !stack_mask)
3607 				/* Found assignment(s) into tracked register in this state.
3608 				 * Since this state is already marked, just return.
3609 				 * Nothing to be tracked further in the parent state.
3610 				 */
3611 				return 0;
3612 			if (i == first_idx)
3613 				break;
3614 			i = get_prev_insn_idx(st, i, &history);
3615 			if (i >= env->prog->len) {
3616 				/* This can happen if backtracking reached insn 0
3617 				 * and there are still reg_mask or stack_mask
3618 				 * to backtrack.
3619 				 * It means the backtracking missed the spot where
3620 				 * particular register was initialized with a constant.
3621 				 */
3622 				verbose(env, "BUG backtracking idx %d\n", i);
3623 				WARN_ONCE(1, "verifier backtracking bug");
3624 				return -EFAULT;
3625 			}
3626 		}
3627 		st = st->parent;
3628 		if (!st)
3629 			break;
3630 
3631 		new_marks = false;
3632 		func = st->frame[frame];
3633 		bitmap_from_u64(mask, reg_mask);
3634 		for_each_set_bit(i, mask, 32) {
3635 			reg = &func->regs[i];
3636 			if (reg->type != SCALAR_VALUE) {
3637 				reg_mask &= ~(1u << i);
3638 				continue;
3639 			}
3640 			if (!reg->precise)
3641 				new_marks = true;
3642 			reg->precise = true;
3643 		}
3644 
3645 		bitmap_from_u64(mask, stack_mask);
3646 		for_each_set_bit(i, mask, 64) {
3647 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
3648 				/* the sequence of instructions:
3649 				 * 2: (bf) r3 = r10
3650 				 * 3: (7b) *(u64 *)(r3 -8) = r0
3651 				 * 4: (79) r4 = *(u64 *)(r10 -8)
3652 				 * doesn't contain jmps. It's backtracked
3653 				 * as a single block.
3654 				 * During backtracking insn 3 is not recognized as
3655 				 * stack access, so at the end of backtracking
3656 				 * stack slot fp-8 is still marked in stack_mask.
3657 				 * However the parent state may not have accessed
3658 				 * fp-8 and it's "unallocated" stack space.
3659 				 * In such case fallback to conservative.
3660 				 */
3661 				mark_all_scalars_precise(env, st);
3662 				return 0;
3663 			}
3664 
3665 			if (!is_spilled_reg(&func->stack[i])) {
3666 				stack_mask &= ~(1ull << i);
3667 				continue;
3668 			}
3669 			reg = &func->stack[i].spilled_ptr;
3670 			if (reg->type != SCALAR_VALUE) {
3671 				stack_mask &= ~(1ull << i);
3672 				continue;
3673 			}
3674 			if (!reg->precise)
3675 				new_marks = true;
3676 			reg->precise = true;
3677 		}
3678 		if (env->log.level & BPF_LOG_LEVEL2) {
3679 			verbose(env, "parent %s regs=%x stack=%llx marks:",
3680 				new_marks ? "didn't have" : "already had",
3681 				reg_mask, stack_mask);
3682 			print_verifier_state(env, func, true);
3683 		}
3684 
3685 		if (!reg_mask && !stack_mask)
3686 			break;
3687 		if (!new_marks)
3688 			break;
3689 
3690 		last_idx = st->last_insn_idx;
3691 		first_idx = st->first_insn_idx;
3692 	}
3693 	return 0;
3694 }
3695 
3696 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
3697 {
3698 	return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
3699 }
3700 
3701 static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
3702 {
3703 	return __mark_chain_precision(env, frame, regno, -1);
3704 }
3705 
3706 static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
3707 {
3708 	return __mark_chain_precision(env, frame, -1, spi);
3709 }
3710 
3711 static bool is_spillable_regtype(enum bpf_reg_type type)
3712 {
3713 	switch (base_type(type)) {
3714 	case PTR_TO_MAP_VALUE:
3715 	case PTR_TO_STACK:
3716 	case PTR_TO_CTX:
3717 	case PTR_TO_PACKET:
3718 	case PTR_TO_PACKET_META:
3719 	case PTR_TO_PACKET_END:
3720 	case PTR_TO_FLOW_KEYS:
3721 	case CONST_PTR_TO_MAP:
3722 	case PTR_TO_SOCKET:
3723 	case PTR_TO_SOCK_COMMON:
3724 	case PTR_TO_TCP_SOCK:
3725 	case PTR_TO_XDP_SOCK:
3726 	case PTR_TO_BTF_ID:
3727 	case PTR_TO_BUF:
3728 	case PTR_TO_MEM:
3729 	case PTR_TO_FUNC:
3730 	case PTR_TO_MAP_KEY:
3731 		return true;
3732 	default:
3733 		return false;
3734 	}
3735 }
3736 
3737 /* Does this register contain a constant zero? */
3738 static bool register_is_null(struct bpf_reg_state *reg)
3739 {
3740 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
3741 }
3742 
3743 static bool register_is_const(struct bpf_reg_state *reg)
3744 {
3745 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
3746 }
3747 
3748 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
3749 {
3750 	return tnum_is_unknown(reg->var_off) &&
3751 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
3752 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
3753 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
3754 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
3755 }
3756 
3757 static bool register_is_bounded(struct bpf_reg_state *reg)
3758 {
3759 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
3760 }
3761 
3762 static bool __is_pointer_value(bool allow_ptr_leaks,
3763 			       const struct bpf_reg_state *reg)
3764 {
3765 	if (allow_ptr_leaks)
3766 		return false;
3767 
3768 	return reg->type != SCALAR_VALUE;
3769 }
3770 
3771 /* Copy src state preserving dst->parent and dst->live fields */
3772 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
3773 {
3774 	struct bpf_reg_state *parent = dst->parent;
3775 	enum bpf_reg_liveness live = dst->live;
3776 
3777 	*dst = *src;
3778 	dst->parent = parent;
3779 	dst->live = live;
3780 }
3781 
3782 static void save_register_state(struct bpf_func_state *state,
3783 				int spi, struct bpf_reg_state *reg,
3784 				int size)
3785 {
3786 	int i;
3787 
3788 	copy_register_state(&state->stack[spi].spilled_ptr, reg);
3789 	if (size == BPF_REG_SIZE)
3790 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3791 
3792 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3793 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
3794 
3795 	/* size < 8 bytes spill */
3796 	for (; i; i--)
3797 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
3798 }
3799 
3800 static bool is_bpf_st_mem(struct bpf_insn *insn)
3801 {
3802 	return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
3803 }
3804 
3805 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
3806  * stack boundary and alignment are checked in check_mem_access()
3807  */
3808 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3809 				       /* stack frame we're writing to */
3810 				       struct bpf_func_state *state,
3811 				       int off, int size, int value_regno,
3812 				       int insn_idx)
3813 {
3814 	struct bpf_func_state *cur; /* state of the current function */
3815 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3816 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3817 	struct bpf_reg_state *reg = NULL;
3818 	u32 dst_reg = insn->dst_reg;
3819 
3820 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3821 	if (err)
3822 		return err;
3823 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3824 	 * so it's aligned access and [off, off + size) are within stack limits
3825 	 */
3826 	if (!env->allow_ptr_leaks &&
3827 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3828 	    size != BPF_REG_SIZE) {
3829 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3830 		return -EACCES;
3831 	}
3832 
3833 	cur = env->cur_state->frame[env->cur_state->curframe];
3834 	if (value_regno >= 0)
3835 		reg = &cur->regs[value_regno];
3836 	if (!env->bypass_spec_v4) {
3837 		bool sanitize = reg && is_spillable_regtype(reg->type);
3838 
3839 		for (i = 0; i < size; i++) {
3840 			u8 type = state->stack[spi].slot_type[i];
3841 
3842 			if (type != STACK_MISC && type != STACK_ZERO) {
3843 				sanitize = true;
3844 				break;
3845 			}
3846 		}
3847 
3848 		if (sanitize)
3849 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3850 	}
3851 
3852 	err = destroy_if_dynptr_stack_slot(env, state, spi);
3853 	if (err)
3854 		return err;
3855 
3856 	mark_stack_slot_scratched(env, spi);
3857 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3858 	    !register_is_null(reg) && env->bpf_capable) {
3859 		if (dst_reg != BPF_REG_FP) {
3860 			/* The backtracking logic can only recognize explicit
3861 			 * stack slot address like [fp - 8]. Other spill of
3862 			 * scalar via different register has to be conservative.
3863 			 * Backtrack from here and mark all registers as precise
3864 			 * that contributed into 'reg' being a constant.
3865 			 */
3866 			err = mark_chain_precision(env, value_regno);
3867 			if (err)
3868 				return err;
3869 		}
3870 		save_register_state(state, spi, reg, size);
3871 		/* Break the relation on a narrowing spill. */
3872 		if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
3873 			state->stack[spi].spilled_ptr.id = 0;
3874 	} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
3875 		   insn->imm != 0 && env->bpf_capable) {
3876 		struct bpf_reg_state fake_reg = {};
3877 
3878 		__mark_reg_known(&fake_reg, (u32)insn->imm);
3879 		fake_reg.type = SCALAR_VALUE;
3880 		save_register_state(state, spi, &fake_reg, size);
3881 	} else if (reg && is_spillable_regtype(reg->type)) {
3882 		/* register containing pointer is being spilled into stack */
3883 		if (size != BPF_REG_SIZE) {
3884 			verbose_linfo(env, insn_idx, "; ");
3885 			verbose(env, "invalid size of register spill\n");
3886 			return -EACCES;
3887 		}
3888 		if (state != cur && reg->type == PTR_TO_STACK) {
3889 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3890 			return -EINVAL;
3891 		}
3892 		save_register_state(state, spi, reg, size);
3893 	} else {
3894 		u8 type = STACK_MISC;
3895 
3896 		/* regular write of data into stack destroys any spilled ptr */
3897 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3898 		/* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */
3899 		if (is_stack_slot_special(&state->stack[spi]))
3900 			for (i = 0; i < BPF_REG_SIZE; i++)
3901 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3902 
3903 		/* only mark the slot as written if all 8 bytes were written
3904 		 * otherwise read propagation may incorrectly stop too soon
3905 		 * when stack slots are partially written.
3906 		 * This heuristic means that read propagation will be
3907 		 * conservative, since it will add reg_live_read marks
3908 		 * to stack slots all the way to first state when programs
3909 		 * writes+reads less than 8 bytes
3910 		 */
3911 		if (size == BPF_REG_SIZE)
3912 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3913 
3914 		/* when we zero initialize stack slots mark them as such */
3915 		if ((reg && register_is_null(reg)) ||
3916 		    (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
3917 			/* backtracking doesn't work for STACK_ZERO yet. */
3918 			err = mark_chain_precision(env, value_regno);
3919 			if (err)
3920 				return err;
3921 			type = STACK_ZERO;
3922 		}
3923 
3924 		/* Mark slots affected by this stack write. */
3925 		for (i = 0; i < size; i++)
3926 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3927 				type;
3928 	}
3929 	return 0;
3930 }
3931 
3932 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3933  * known to contain a variable offset.
3934  * This function checks whether the write is permitted and conservatively
3935  * tracks the effects of the write, considering that each stack slot in the
3936  * dynamic range is potentially written to.
3937  *
3938  * 'off' includes 'regno->off'.
3939  * 'value_regno' can be -1, meaning that an unknown value is being written to
3940  * the stack.
3941  *
3942  * Spilled pointers in range are not marked as written because we don't know
3943  * what's going to be actually written. This means that read propagation for
3944  * future reads cannot be terminated by this write.
3945  *
3946  * For privileged programs, uninitialized stack slots are considered
3947  * initialized by this write (even though we don't know exactly what offsets
3948  * are going to be written to). The idea is that we don't want the verifier to
3949  * reject future reads that access slots written to through variable offsets.
3950  */
3951 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3952 				     /* func where register points to */
3953 				     struct bpf_func_state *state,
3954 				     int ptr_regno, int off, int size,
3955 				     int value_regno, int insn_idx)
3956 {
3957 	struct bpf_func_state *cur; /* state of the current function */
3958 	int min_off, max_off;
3959 	int i, err;
3960 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3961 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3962 	bool writing_zero = false;
3963 	/* set if the fact that we're writing a zero is used to let any
3964 	 * stack slots remain STACK_ZERO
3965 	 */
3966 	bool zero_used = false;
3967 
3968 	cur = env->cur_state->frame[env->cur_state->curframe];
3969 	ptr_reg = &cur->regs[ptr_regno];
3970 	min_off = ptr_reg->smin_value + off;
3971 	max_off = ptr_reg->smax_value + off + size;
3972 	if (value_regno >= 0)
3973 		value_reg = &cur->regs[value_regno];
3974 	if ((value_reg && register_is_null(value_reg)) ||
3975 	    (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
3976 		writing_zero = true;
3977 
3978 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3979 	if (err)
3980 		return err;
3981 
3982 	for (i = min_off; i < max_off; i++) {
3983 		int spi;
3984 
3985 		spi = __get_spi(i);
3986 		err = destroy_if_dynptr_stack_slot(env, state, spi);
3987 		if (err)
3988 			return err;
3989 	}
3990 
3991 	/* Variable offset writes destroy any spilled pointers in range. */
3992 	for (i = min_off; i < max_off; i++) {
3993 		u8 new_type, *stype;
3994 		int slot, spi;
3995 
3996 		slot = -i - 1;
3997 		spi = slot / BPF_REG_SIZE;
3998 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3999 		mark_stack_slot_scratched(env, spi);
4000 
4001 		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
4002 			/* Reject the write if range we may write to has not
4003 			 * been initialized beforehand. If we didn't reject
4004 			 * here, the ptr status would be erased below (even
4005 			 * though not all slots are actually overwritten),
4006 			 * possibly opening the door to leaks.
4007 			 *
4008 			 * We do however catch STACK_INVALID case below, and
4009 			 * only allow reading possibly uninitialized memory
4010 			 * later for CAP_PERFMON, as the write may not happen to
4011 			 * that slot.
4012 			 */
4013 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
4014 				insn_idx, i);
4015 			return -EINVAL;
4016 		}
4017 
4018 		/* Erase all spilled pointers. */
4019 		state->stack[spi].spilled_ptr.type = NOT_INIT;
4020 
4021 		/* Update the slot type. */
4022 		new_type = STACK_MISC;
4023 		if (writing_zero && *stype == STACK_ZERO) {
4024 			new_type = STACK_ZERO;
4025 			zero_used = true;
4026 		}
4027 		/* If the slot is STACK_INVALID, we check whether it's OK to
4028 		 * pretend that it will be initialized by this write. The slot
4029 		 * might not actually be written to, and so if we mark it as
4030 		 * initialized future reads might leak uninitialized memory.
4031 		 * For privileged programs, we will accept such reads to slots
4032 		 * that may or may not be written because, if we're reject
4033 		 * them, the error would be too confusing.
4034 		 */
4035 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
4036 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
4037 					insn_idx, i);
4038 			return -EINVAL;
4039 		}
4040 		*stype = new_type;
4041 	}
4042 	if (zero_used) {
4043 		/* backtracking doesn't work for STACK_ZERO yet. */
4044 		err = mark_chain_precision(env, value_regno);
4045 		if (err)
4046 			return err;
4047 	}
4048 	return 0;
4049 }
4050 
4051 /* When register 'dst_regno' is assigned some values from stack[min_off,
4052  * max_off), we set the register's type according to the types of the
4053  * respective stack slots. If all the stack values are known to be zeros, then
4054  * so is the destination reg. Otherwise, the register is considered to be
4055  * SCALAR. This function does not deal with register filling; the caller must
4056  * ensure that all spilled registers in the stack range have been marked as
4057  * read.
4058  */
4059 static void mark_reg_stack_read(struct bpf_verifier_env *env,
4060 				/* func where src register points to */
4061 				struct bpf_func_state *ptr_state,
4062 				int min_off, int max_off, int dst_regno)
4063 {
4064 	struct bpf_verifier_state *vstate = env->cur_state;
4065 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4066 	int i, slot, spi;
4067 	u8 *stype;
4068 	int zeros = 0;
4069 
4070 	for (i = min_off; i < max_off; i++) {
4071 		slot = -i - 1;
4072 		spi = slot / BPF_REG_SIZE;
4073 		stype = ptr_state->stack[spi].slot_type;
4074 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
4075 			break;
4076 		zeros++;
4077 	}
4078 	if (zeros == max_off - min_off) {
4079 		/* any access_size read into register is zero extended,
4080 		 * so the whole register == const_zero
4081 		 */
4082 		__mark_reg_const_zero(&state->regs[dst_regno]);
4083 		/* backtracking doesn't support STACK_ZERO yet,
4084 		 * so mark it precise here, so that later
4085 		 * backtracking can stop here.
4086 		 * Backtracking may not need this if this register
4087 		 * doesn't participate in pointer adjustment.
4088 		 * Forward propagation of precise flag is not
4089 		 * necessary either. This mark is only to stop
4090 		 * backtracking. Any register that contributed
4091 		 * to const 0 was marked precise before spill.
4092 		 */
4093 		state->regs[dst_regno].precise = true;
4094 	} else {
4095 		/* have read misc data from the stack */
4096 		mark_reg_unknown(env, state->regs, dst_regno);
4097 	}
4098 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4099 }
4100 
4101 /* Read the stack at 'off' and put the results into the register indicated by
4102  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
4103  * spilled reg.
4104  *
4105  * 'dst_regno' can be -1, meaning that the read value is not going to a
4106  * register.
4107  *
4108  * The access is assumed to be within the current stack bounds.
4109  */
4110 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
4111 				      /* func where src register points to */
4112 				      struct bpf_func_state *reg_state,
4113 				      int off, int size, int dst_regno)
4114 {
4115 	struct bpf_verifier_state *vstate = env->cur_state;
4116 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4117 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
4118 	struct bpf_reg_state *reg;
4119 	u8 *stype, type;
4120 
4121 	stype = reg_state->stack[spi].slot_type;
4122 	reg = &reg_state->stack[spi].spilled_ptr;
4123 
4124 	if (is_spilled_reg(&reg_state->stack[spi])) {
4125 		u8 spill_size = 1;
4126 
4127 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
4128 			spill_size++;
4129 
4130 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
4131 			if (reg->type != SCALAR_VALUE) {
4132 				verbose_linfo(env, env->insn_idx, "; ");
4133 				verbose(env, "invalid size of register fill\n");
4134 				return -EACCES;
4135 			}
4136 
4137 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4138 			if (dst_regno < 0)
4139 				return 0;
4140 
4141 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
4142 				/* The earlier check_reg_arg() has decided the
4143 				 * subreg_def for this insn.  Save it first.
4144 				 */
4145 				s32 subreg_def = state->regs[dst_regno].subreg_def;
4146 
4147 				copy_register_state(&state->regs[dst_regno], reg);
4148 				state->regs[dst_regno].subreg_def = subreg_def;
4149 			} else {
4150 				for (i = 0; i < size; i++) {
4151 					type = stype[(slot - i) % BPF_REG_SIZE];
4152 					if (type == STACK_SPILL)
4153 						continue;
4154 					if (type == STACK_MISC)
4155 						continue;
4156 					if (type == STACK_INVALID && env->allow_uninit_stack)
4157 						continue;
4158 					verbose(env, "invalid read from stack off %d+%d size %d\n",
4159 						off, i, size);
4160 					return -EACCES;
4161 				}
4162 				mark_reg_unknown(env, state->regs, dst_regno);
4163 			}
4164 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4165 			return 0;
4166 		}
4167 
4168 		if (dst_regno >= 0) {
4169 			/* restore register state from stack */
4170 			copy_register_state(&state->regs[dst_regno], reg);
4171 			/* mark reg as written since spilled pointer state likely
4172 			 * has its liveness marks cleared by is_state_visited()
4173 			 * which resets stack/reg liveness for state transitions
4174 			 */
4175 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4176 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
4177 			/* If dst_regno==-1, the caller is asking us whether
4178 			 * it is acceptable to use this value as a SCALAR_VALUE
4179 			 * (e.g. for XADD).
4180 			 * We must not allow unprivileged callers to do that
4181 			 * with spilled pointers.
4182 			 */
4183 			verbose(env, "leaking pointer from stack off %d\n",
4184 				off);
4185 			return -EACCES;
4186 		}
4187 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4188 	} else {
4189 		for (i = 0; i < size; i++) {
4190 			type = stype[(slot - i) % BPF_REG_SIZE];
4191 			if (type == STACK_MISC)
4192 				continue;
4193 			if (type == STACK_ZERO)
4194 				continue;
4195 			if (type == STACK_INVALID && env->allow_uninit_stack)
4196 				continue;
4197 			verbose(env, "invalid read from stack off %d+%d size %d\n",
4198 				off, i, size);
4199 			return -EACCES;
4200 		}
4201 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4202 		if (dst_regno >= 0)
4203 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
4204 	}
4205 	return 0;
4206 }
4207 
4208 enum bpf_access_src {
4209 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
4210 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
4211 };
4212 
4213 static int check_stack_range_initialized(struct bpf_verifier_env *env,
4214 					 int regno, int off, int access_size,
4215 					 bool zero_size_allowed,
4216 					 enum bpf_access_src type,
4217 					 struct bpf_call_arg_meta *meta);
4218 
4219 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
4220 {
4221 	return cur_regs(env) + regno;
4222 }
4223 
4224 /* Read the stack at 'ptr_regno + off' and put the result into the register
4225  * 'dst_regno'.
4226  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
4227  * but not its variable offset.
4228  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
4229  *
4230  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
4231  * filling registers (i.e. reads of spilled register cannot be detected when
4232  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
4233  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
4234  * offset; for a fixed offset check_stack_read_fixed_off should be used
4235  * instead.
4236  */
4237 static int check_stack_read_var_off(struct bpf_verifier_env *env,
4238 				    int ptr_regno, int off, int size, int dst_regno)
4239 {
4240 	/* The state of the source register. */
4241 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4242 	struct bpf_func_state *ptr_state = func(env, reg);
4243 	int err;
4244 	int min_off, max_off;
4245 
4246 	/* Note that we pass a NULL meta, so raw access will not be permitted.
4247 	 */
4248 	err = check_stack_range_initialized(env, ptr_regno, off, size,
4249 					    false, ACCESS_DIRECT, NULL);
4250 	if (err)
4251 		return err;
4252 
4253 	min_off = reg->smin_value + off;
4254 	max_off = reg->smax_value + off;
4255 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
4256 	return 0;
4257 }
4258 
4259 /* check_stack_read dispatches to check_stack_read_fixed_off or
4260  * check_stack_read_var_off.
4261  *
4262  * The caller must ensure that the offset falls within the allocated stack
4263  * bounds.
4264  *
4265  * 'dst_regno' is a register which will receive the value from the stack. It
4266  * can be -1, meaning that the read value is not going to a register.
4267  */
4268 static int check_stack_read(struct bpf_verifier_env *env,
4269 			    int ptr_regno, int off, int size,
4270 			    int dst_regno)
4271 {
4272 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4273 	struct bpf_func_state *state = func(env, reg);
4274 	int err;
4275 	/* Some accesses are only permitted with a static offset. */
4276 	bool var_off = !tnum_is_const(reg->var_off);
4277 
4278 	/* The offset is required to be static when reads don't go to a
4279 	 * register, in order to not leak pointers (see
4280 	 * check_stack_read_fixed_off).
4281 	 */
4282 	if (dst_regno < 0 && var_off) {
4283 		char tn_buf[48];
4284 
4285 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4286 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
4287 			tn_buf, off, size);
4288 		return -EACCES;
4289 	}
4290 	/* Variable offset is prohibited for unprivileged mode for simplicity
4291 	 * since it requires corresponding support in Spectre masking for stack
4292 	 * ALU. See also retrieve_ptr_limit(). The check in
4293 	 * check_stack_access_for_ptr_arithmetic() called by
4294 	 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
4295 	 * with variable offsets, therefore no check is required here. Further,
4296 	 * just checking it here would be insufficient as speculative stack
4297 	 * writes could still lead to unsafe speculative behaviour.
4298 	 */
4299 	if (!var_off) {
4300 		off += reg->var_off.value;
4301 		err = check_stack_read_fixed_off(env, state, off, size,
4302 						 dst_regno);
4303 	} else {
4304 		/* Variable offset stack reads need more conservative handling
4305 		 * than fixed offset ones. Note that dst_regno >= 0 on this
4306 		 * branch.
4307 		 */
4308 		err = check_stack_read_var_off(env, ptr_regno, off, size,
4309 					       dst_regno);
4310 	}
4311 	return err;
4312 }
4313 
4314 
4315 /* check_stack_write dispatches to check_stack_write_fixed_off or
4316  * check_stack_write_var_off.
4317  *
4318  * 'ptr_regno' is the register used as a pointer into the stack.
4319  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
4320  * 'value_regno' is the register whose value we're writing to the stack. It can
4321  * be -1, meaning that we're not writing from a register.
4322  *
4323  * The caller must ensure that the offset falls within the maximum stack size.
4324  */
4325 static int check_stack_write(struct bpf_verifier_env *env,
4326 			     int ptr_regno, int off, int size,
4327 			     int value_regno, int insn_idx)
4328 {
4329 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4330 	struct bpf_func_state *state = func(env, reg);
4331 	int err;
4332 
4333 	if (tnum_is_const(reg->var_off)) {
4334 		off += reg->var_off.value;
4335 		err = check_stack_write_fixed_off(env, state, off, size,
4336 						  value_regno, insn_idx);
4337 	} else {
4338 		/* Variable offset stack reads need more conservative handling
4339 		 * than fixed offset ones.
4340 		 */
4341 		err = check_stack_write_var_off(env, state,
4342 						ptr_regno, off, size,
4343 						value_regno, insn_idx);
4344 	}
4345 	return err;
4346 }
4347 
4348 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
4349 				 int off, int size, enum bpf_access_type type)
4350 {
4351 	struct bpf_reg_state *regs = cur_regs(env);
4352 	struct bpf_map *map = regs[regno].map_ptr;
4353 	u32 cap = bpf_map_flags_to_cap(map);
4354 
4355 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
4356 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
4357 			map->value_size, off, size);
4358 		return -EACCES;
4359 	}
4360 
4361 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
4362 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
4363 			map->value_size, off, size);
4364 		return -EACCES;
4365 	}
4366 
4367 	return 0;
4368 }
4369 
4370 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
4371 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
4372 			      int off, int size, u32 mem_size,
4373 			      bool zero_size_allowed)
4374 {
4375 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
4376 	struct bpf_reg_state *reg;
4377 
4378 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
4379 		return 0;
4380 
4381 	reg = &cur_regs(env)[regno];
4382 	switch (reg->type) {
4383 	case PTR_TO_MAP_KEY:
4384 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
4385 			mem_size, off, size);
4386 		break;
4387 	case PTR_TO_MAP_VALUE:
4388 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
4389 			mem_size, off, size);
4390 		break;
4391 	case PTR_TO_PACKET:
4392 	case PTR_TO_PACKET_META:
4393 	case PTR_TO_PACKET_END:
4394 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
4395 			off, size, regno, reg->id, off, mem_size);
4396 		break;
4397 	case PTR_TO_MEM:
4398 	default:
4399 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
4400 			mem_size, off, size);
4401 	}
4402 
4403 	return -EACCES;
4404 }
4405 
4406 /* check read/write into a memory region with possible variable offset */
4407 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
4408 				   int off, int size, u32 mem_size,
4409 				   bool zero_size_allowed)
4410 {
4411 	struct bpf_verifier_state *vstate = env->cur_state;
4412 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4413 	struct bpf_reg_state *reg = &state->regs[regno];
4414 	int err;
4415 
4416 	/* We may have adjusted the register pointing to memory region, so we
4417 	 * need to try adding each of min_value and max_value to off
4418 	 * to make sure our theoretical access will be safe.
4419 	 *
4420 	 * The minimum value is only important with signed
4421 	 * comparisons where we can't assume the floor of a
4422 	 * value is 0.  If we are using signed variables for our
4423 	 * index'es we need to make sure that whatever we use
4424 	 * will have a set floor within our range.
4425 	 */
4426 	if (reg->smin_value < 0 &&
4427 	    (reg->smin_value == S64_MIN ||
4428 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
4429 	      reg->smin_value + off < 0)) {
4430 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4431 			regno);
4432 		return -EACCES;
4433 	}
4434 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
4435 				 mem_size, zero_size_allowed);
4436 	if (err) {
4437 		verbose(env, "R%d min value is outside of the allowed memory range\n",
4438 			regno);
4439 		return err;
4440 	}
4441 
4442 	/* If we haven't set a max value then we need to bail since we can't be
4443 	 * sure we won't do bad things.
4444 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
4445 	 */
4446 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
4447 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
4448 			regno);
4449 		return -EACCES;
4450 	}
4451 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
4452 				 mem_size, zero_size_allowed);
4453 	if (err) {
4454 		verbose(env, "R%d max value is outside of the allowed memory range\n",
4455 			regno);
4456 		return err;
4457 	}
4458 
4459 	return 0;
4460 }
4461 
4462 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
4463 			       const struct bpf_reg_state *reg, int regno,
4464 			       bool fixed_off_ok)
4465 {
4466 	/* Access to this pointer-typed register or passing it to a helper
4467 	 * is only allowed in its original, unmodified form.
4468 	 */
4469 
4470 	if (reg->off < 0) {
4471 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
4472 			reg_type_str(env, reg->type), regno, reg->off);
4473 		return -EACCES;
4474 	}
4475 
4476 	if (!fixed_off_ok && reg->off) {
4477 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
4478 			reg_type_str(env, reg->type), regno, reg->off);
4479 		return -EACCES;
4480 	}
4481 
4482 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4483 		char tn_buf[48];
4484 
4485 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4486 		verbose(env, "variable %s access var_off=%s disallowed\n",
4487 			reg_type_str(env, reg->type), tn_buf);
4488 		return -EACCES;
4489 	}
4490 
4491 	return 0;
4492 }
4493 
4494 int check_ptr_off_reg(struct bpf_verifier_env *env,
4495 		      const struct bpf_reg_state *reg, int regno)
4496 {
4497 	return __check_ptr_off_reg(env, reg, regno, false);
4498 }
4499 
4500 static int map_kptr_match_type(struct bpf_verifier_env *env,
4501 			       struct btf_field *kptr_field,
4502 			       struct bpf_reg_state *reg, u32 regno)
4503 {
4504 	const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
4505 	int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
4506 	const char *reg_name = "";
4507 
4508 	/* Only unreferenced case accepts untrusted pointers */
4509 	if (kptr_field->type == BPF_KPTR_UNREF)
4510 		perm_flags |= PTR_UNTRUSTED;
4511 
4512 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
4513 		goto bad_type;
4514 
4515 	if (!btf_is_kernel(reg->btf)) {
4516 		verbose(env, "R%d must point to kernel BTF\n", regno);
4517 		return -EINVAL;
4518 	}
4519 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
4520 	reg_name = btf_type_name(reg->btf, reg->btf_id);
4521 
4522 	/* For ref_ptr case, release function check should ensure we get one
4523 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
4524 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
4525 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
4526 	 * reg->off and reg->ref_obj_id are not needed here.
4527 	 */
4528 	if (__check_ptr_off_reg(env, reg, regno, true))
4529 		return -EACCES;
4530 
4531 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
4532 	 * we also need to take into account the reg->off.
4533 	 *
4534 	 * We want to support cases like:
4535 	 *
4536 	 * struct foo {
4537 	 *         struct bar br;
4538 	 *         struct baz bz;
4539 	 * };
4540 	 *
4541 	 * struct foo *v;
4542 	 * v = func();	      // PTR_TO_BTF_ID
4543 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
4544 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
4545 	 *                    // first member type of struct after comparison fails
4546 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
4547 	 *                    // to match type
4548 	 *
4549 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
4550 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
4551 	 * the struct to match type against first member of struct, i.e. reject
4552 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
4553 	 * strict mode to true for type match.
4554 	 */
4555 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
4556 				  kptr_field->kptr.btf, kptr_field->kptr.btf_id,
4557 				  kptr_field->type == BPF_KPTR_REF))
4558 		goto bad_type;
4559 	return 0;
4560 bad_type:
4561 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
4562 		reg_type_str(env, reg->type), reg_name);
4563 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
4564 	if (kptr_field->type == BPF_KPTR_UNREF)
4565 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
4566 			targ_name);
4567 	else
4568 		verbose(env, "\n");
4569 	return -EINVAL;
4570 }
4571 
4572 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
4573  * can dereference RCU protected pointers and result is PTR_TRUSTED.
4574  */
4575 static bool in_rcu_cs(struct bpf_verifier_env *env)
4576 {
4577 	return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable;
4578 }
4579 
4580 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
4581 BTF_SET_START(rcu_protected_types)
4582 BTF_ID(struct, prog_test_ref_kfunc)
4583 BTF_ID(struct, cgroup)
4584 BTF_ID(struct, bpf_cpumask)
4585 BTF_ID(struct, task_struct)
4586 BTF_SET_END(rcu_protected_types)
4587 
4588 static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
4589 {
4590 	if (!btf_is_kernel(btf))
4591 		return false;
4592 	return btf_id_set_contains(&rcu_protected_types, btf_id);
4593 }
4594 
4595 static bool rcu_safe_kptr(const struct btf_field *field)
4596 {
4597 	const struct btf_field_kptr *kptr = &field->kptr;
4598 
4599 	return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id);
4600 }
4601 
4602 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
4603 				 int value_regno, int insn_idx,
4604 				 struct btf_field *kptr_field)
4605 {
4606 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4607 	int class = BPF_CLASS(insn->code);
4608 	struct bpf_reg_state *val_reg;
4609 
4610 	/* Things we already checked for in check_map_access and caller:
4611 	 *  - Reject cases where variable offset may touch kptr
4612 	 *  - size of access (must be BPF_DW)
4613 	 *  - tnum_is_const(reg->var_off)
4614 	 *  - kptr_field->offset == off + reg->var_off.value
4615 	 */
4616 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
4617 	if (BPF_MODE(insn->code) != BPF_MEM) {
4618 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
4619 		return -EACCES;
4620 	}
4621 
4622 	/* We only allow loading referenced kptr, since it will be marked as
4623 	 * untrusted, similar to unreferenced kptr.
4624 	 */
4625 	if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
4626 		verbose(env, "store to referenced kptr disallowed\n");
4627 		return -EACCES;
4628 	}
4629 
4630 	if (class == BPF_LDX) {
4631 		val_reg = reg_state(env, value_regno);
4632 		/* We can simply mark the value_regno receiving the pointer
4633 		 * value from map as PTR_TO_BTF_ID, with the correct type.
4634 		 */
4635 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
4636 				kptr_field->kptr.btf_id,
4637 				rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ?
4638 				PTR_MAYBE_NULL | MEM_RCU :
4639 				PTR_MAYBE_NULL | PTR_UNTRUSTED);
4640 		/* For mark_ptr_or_null_reg */
4641 		val_reg->id = ++env->id_gen;
4642 	} else if (class == BPF_STX) {
4643 		val_reg = reg_state(env, value_regno);
4644 		if (!register_is_null(val_reg) &&
4645 		    map_kptr_match_type(env, kptr_field, val_reg, value_regno))
4646 			return -EACCES;
4647 	} else if (class == BPF_ST) {
4648 		if (insn->imm) {
4649 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
4650 				kptr_field->offset);
4651 			return -EACCES;
4652 		}
4653 	} else {
4654 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
4655 		return -EACCES;
4656 	}
4657 	return 0;
4658 }
4659 
4660 /* check read/write into a map element with possible variable offset */
4661 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
4662 			    int off, int size, bool zero_size_allowed,
4663 			    enum bpf_access_src src)
4664 {
4665 	struct bpf_verifier_state *vstate = env->cur_state;
4666 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4667 	struct bpf_reg_state *reg = &state->regs[regno];
4668 	struct bpf_map *map = reg->map_ptr;
4669 	struct btf_record *rec;
4670 	int err, i;
4671 
4672 	err = check_mem_region_access(env, regno, off, size, map->value_size,
4673 				      zero_size_allowed);
4674 	if (err)
4675 		return err;
4676 
4677 	if (IS_ERR_OR_NULL(map->record))
4678 		return 0;
4679 	rec = map->record;
4680 	for (i = 0; i < rec->cnt; i++) {
4681 		struct btf_field *field = &rec->fields[i];
4682 		u32 p = field->offset;
4683 
4684 		/* If any part of a field  can be touched by load/store, reject
4685 		 * this program. To check that [x1, x2) overlaps with [y1, y2),
4686 		 * it is sufficient to check x1 < y2 && y1 < x2.
4687 		 */
4688 		if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
4689 		    p < reg->umax_value + off + size) {
4690 			switch (field->type) {
4691 			case BPF_KPTR_UNREF:
4692 			case BPF_KPTR_REF:
4693 				if (src != ACCESS_DIRECT) {
4694 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
4695 					return -EACCES;
4696 				}
4697 				if (!tnum_is_const(reg->var_off)) {
4698 					verbose(env, "kptr access cannot have variable offset\n");
4699 					return -EACCES;
4700 				}
4701 				if (p != off + reg->var_off.value) {
4702 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
4703 						p, off + reg->var_off.value);
4704 					return -EACCES;
4705 				}
4706 				if (size != bpf_size_to_bytes(BPF_DW)) {
4707 					verbose(env, "kptr access size must be BPF_DW\n");
4708 					return -EACCES;
4709 				}
4710 				break;
4711 			default:
4712 				verbose(env, "%s cannot be accessed directly by load/store\n",
4713 					btf_field_type_name(field->type));
4714 				return -EACCES;
4715 			}
4716 		}
4717 	}
4718 	return 0;
4719 }
4720 
4721 #define MAX_PACKET_OFF 0xffff
4722 
4723 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
4724 				       const struct bpf_call_arg_meta *meta,
4725 				       enum bpf_access_type t)
4726 {
4727 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
4728 
4729 	switch (prog_type) {
4730 	/* Program types only with direct read access go here! */
4731 	case BPF_PROG_TYPE_LWT_IN:
4732 	case BPF_PROG_TYPE_LWT_OUT:
4733 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
4734 	case BPF_PROG_TYPE_SK_REUSEPORT:
4735 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4736 	case BPF_PROG_TYPE_CGROUP_SKB:
4737 		if (t == BPF_WRITE)
4738 			return false;
4739 		fallthrough;
4740 
4741 	/* Program types with direct read + write access go here! */
4742 	case BPF_PROG_TYPE_SCHED_CLS:
4743 	case BPF_PROG_TYPE_SCHED_ACT:
4744 	case BPF_PROG_TYPE_XDP:
4745 	case BPF_PROG_TYPE_LWT_XMIT:
4746 	case BPF_PROG_TYPE_SK_SKB:
4747 	case BPF_PROG_TYPE_SK_MSG:
4748 		if (meta)
4749 			return meta->pkt_access;
4750 
4751 		env->seen_direct_write = true;
4752 		return true;
4753 
4754 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4755 		if (t == BPF_WRITE)
4756 			env->seen_direct_write = true;
4757 
4758 		return true;
4759 
4760 	default:
4761 		return false;
4762 	}
4763 }
4764 
4765 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
4766 			       int size, bool zero_size_allowed)
4767 {
4768 	struct bpf_reg_state *regs = cur_regs(env);
4769 	struct bpf_reg_state *reg = &regs[regno];
4770 	int err;
4771 
4772 	/* We may have added a variable offset to the packet pointer; but any
4773 	 * reg->range we have comes after that.  We are only checking the fixed
4774 	 * offset.
4775 	 */
4776 
4777 	/* We don't allow negative numbers, because we aren't tracking enough
4778 	 * detail to prove they're safe.
4779 	 */
4780 	if (reg->smin_value < 0) {
4781 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4782 			regno);
4783 		return -EACCES;
4784 	}
4785 
4786 	err = reg->range < 0 ? -EINVAL :
4787 	      __check_mem_access(env, regno, off, size, reg->range,
4788 				 zero_size_allowed);
4789 	if (err) {
4790 		verbose(env, "R%d offset is outside of the packet\n", regno);
4791 		return err;
4792 	}
4793 
4794 	/* __check_mem_access has made sure "off + size - 1" is within u16.
4795 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
4796 	 * otherwise find_good_pkt_pointers would have refused to set range info
4797 	 * that __check_mem_access would have rejected this pkt access.
4798 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
4799 	 */
4800 	env->prog->aux->max_pkt_offset =
4801 		max_t(u32, env->prog->aux->max_pkt_offset,
4802 		      off + reg->umax_value + size - 1);
4803 
4804 	return err;
4805 }
4806 
4807 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
4808 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
4809 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
4810 			    struct btf **btf, u32 *btf_id)
4811 {
4812 	struct bpf_insn_access_aux info = {
4813 		.reg_type = *reg_type,
4814 		.log = &env->log,
4815 	};
4816 
4817 	if (env->ops->is_valid_access &&
4818 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
4819 		/* A non zero info.ctx_field_size indicates that this field is a
4820 		 * candidate for later verifier transformation to load the whole
4821 		 * field and then apply a mask when accessed with a narrower
4822 		 * access than actual ctx access size. A zero info.ctx_field_size
4823 		 * will only allow for whole field access and rejects any other
4824 		 * type of narrower access.
4825 		 */
4826 		*reg_type = info.reg_type;
4827 
4828 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
4829 			*btf = info.btf;
4830 			*btf_id = info.btf_id;
4831 		} else {
4832 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
4833 		}
4834 		/* remember the offset of last byte accessed in ctx */
4835 		if (env->prog->aux->max_ctx_offset < off + size)
4836 			env->prog->aux->max_ctx_offset = off + size;
4837 		return 0;
4838 	}
4839 
4840 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
4841 	return -EACCES;
4842 }
4843 
4844 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4845 				  int size)
4846 {
4847 	if (size < 0 || off < 0 ||
4848 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
4849 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
4850 			off, size);
4851 		return -EACCES;
4852 	}
4853 	return 0;
4854 }
4855 
4856 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4857 			     u32 regno, int off, int size,
4858 			     enum bpf_access_type t)
4859 {
4860 	struct bpf_reg_state *regs = cur_regs(env);
4861 	struct bpf_reg_state *reg = &regs[regno];
4862 	struct bpf_insn_access_aux info = {};
4863 	bool valid;
4864 
4865 	if (reg->smin_value < 0) {
4866 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4867 			regno);
4868 		return -EACCES;
4869 	}
4870 
4871 	switch (reg->type) {
4872 	case PTR_TO_SOCK_COMMON:
4873 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4874 		break;
4875 	case PTR_TO_SOCKET:
4876 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4877 		break;
4878 	case PTR_TO_TCP_SOCK:
4879 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4880 		break;
4881 	case PTR_TO_XDP_SOCK:
4882 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4883 		break;
4884 	default:
4885 		valid = false;
4886 	}
4887 
4888 
4889 	if (valid) {
4890 		env->insn_aux_data[insn_idx].ctx_field_size =
4891 			info.ctx_field_size;
4892 		return 0;
4893 	}
4894 
4895 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4896 		regno, reg_type_str(env, reg->type), off, size);
4897 
4898 	return -EACCES;
4899 }
4900 
4901 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4902 {
4903 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4904 }
4905 
4906 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4907 {
4908 	const struct bpf_reg_state *reg = reg_state(env, regno);
4909 
4910 	return reg->type == PTR_TO_CTX;
4911 }
4912 
4913 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4914 {
4915 	const struct bpf_reg_state *reg = reg_state(env, regno);
4916 
4917 	return type_is_sk_pointer(reg->type);
4918 }
4919 
4920 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4921 {
4922 	const struct bpf_reg_state *reg = reg_state(env, regno);
4923 
4924 	return type_is_pkt_pointer(reg->type);
4925 }
4926 
4927 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4928 {
4929 	const struct bpf_reg_state *reg = reg_state(env, regno);
4930 
4931 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4932 	return reg->type == PTR_TO_FLOW_KEYS;
4933 }
4934 
4935 static bool is_trusted_reg(const struct bpf_reg_state *reg)
4936 {
4937 	/* A referenced register is always trusted. */
4938 	if (reg->ref_obj_id)
4939 		return true;
4940 
4941 	/* If a register is not referenced, it is trusted if it has the
4942 	 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
4943 	 * other type modifiers may be safe, but we elect to take an opt-in
4944 	 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
4945 	 * not.
4946 	 *
4947 	 * Eventually, we should make PTR_TRUSTED the single source of truth
4948 	 * for whether a register is trusted.
4949 	 */
4950 	return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
4951 	       !bpf_type_has_unsafe_modifiers(reg->type);
4952 }
4953 
4954 static bool is_rcu_reg(const struct bpf_reg_state *reg)
4955 {
4956 	return reg->type & MEM_RCU;
4957 }
4958 
4959 static void clear_trusted_flags(enum bpf_type_flag *flag)
4960 {
4961 	*flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU);
4962 }
4963 
4964 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4965 				   const struct bpf_reg_state *reg,
4966 				   int off, int size, bool strict)
4967 {
4968 	struct tnum reg_off;
4969 	int ip_align;
4970 
4971 	/* Byte size accesses are always allowed. */
4972 	if (!strict || size == 1)
4973 		return 0;
4974 
4975 	/* For platforms that do not have a Kconfig enabling
4976 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4977 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4978 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4979 	 * to this code only in strict mode where we want to emulate
4980 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4981 	 * unconditional IP align value of '2'.
4982 	 */
4983 	ip_align = 2;
4984 
4985 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4986 	if (!tnum_is_aligned(reg_off, size)) {
4987 		char tn_buf[48];
4988 
4989 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4990 		verbose(env,
4991 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4992 			ip_align, tn_buf, reg->off, off, size);
4993 		return -EACCES;
4994 	}
4995 
4996 	return 0;
4997 }
4998 
4999 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
5000 				       const struct bpf_reg_state *reg,
5001 				       const char *pointer_desc,
5002 				       int off, int size, bool strict)
5003 {
5004 	struct tnum reg_off;
5005 
5006 	/* Byte size accesses are always allowed. */
5007 	if (!strict || size == 1)
5008 		return 0;
5009 
5010 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
5011 	if (!tnum_is_aligned(reg_off, size)) {
5012 		char tn_buf[48];
5013 
5014 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5015 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
5016 			pointer_desc, tn_buf, reg->off, off, size);
5017 		return -EACCES;
5018 	}
5019 
5020 	return 0;
5021 }
5022 
5023 static int check_ptr_alignment(struct bpf_verifier_env *env,
5024 			       const struct bpf_reg_state *reg, int off,
5025 			       int size, bool strict_alignment_once)
5026 {
5027 	bool strict = env->strict_alignment || strict_alignment_once;
5028 	const char *pointer_desc = "";
5029 
5030 	switch (reg->type) {
5031 	case PTR_TO_PACKET:
5032 	case PTR_TO_PACKET_META:
5033 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
5034 		 * right in front, treat it the very same way.
5035 		 */
5036 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
5037 	case PTR_TO_FLOW_KEYS:
5038 		pointer_desc = "flow keys ";
5039 		break;
5040 	case PTR_TO_MAP_KEY:
5041 		pointer_desc = "key ";
5042 		break;
5043 	case PTR_TO_MAP_VALUE:
5044 		pointer_desc = "value ";
5045 		break;
5046 	case PTR_TO_CTX:
5047 		pointer_desc = "context ";
5048 		break;
5049 	case PTR_TO_STACK:
5050 		pointer_desc = "stack ";
5051 		/* The stack spill tracking logic in check_stack_write_fixed_off()
5052 		 * and check_stack_read_fixed_off() relies on stack accesses being
5053 		 * aligned.
5054 		 */
5055 		strict = true;
5056 		break;
5057 	case PTR_TO_SOCKET:
5058 		pointer_desc = "sock ";
5059 		break;
5060 	case PTR_TO_SOCK_COMMON:
5061 		pointer_desc = "sock_common ";
5062 		break;
5063 	case PTR_TO_TCP_SOCK:
5064 		pointer_desc = "tcp_sock ";
5065 		break;
5066 	case PTR_TO_XDP_SOCK:
5067 		pointer_desc = "xdp_sock ";
5068 		break;
5069 	default:
5070 		break;
5071 	}
5072 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
5073 					   strict);
5074 }
5075 
5076 static int update_stack_depth(struct bpf_verifier_env *env,
5077 			      const struct bpf_func_state *func,
5078 			      int off)
5079 {
5080 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
5081 
5082 	if (stack >= -off)
5083 		return 0;
5084 
5085 	/* update known max for given subprogram */
5086 	env->subprog_info[func->subprogno].stack_depth = -off;
5087 	return 0;
5088 }
5089 
5090 /* starting from main bpf function walk all instructions of the function
5091  * and recursively walk all callees that given function can call.
5092  * Ignore jump and exit insns.
5093  * Since recursion is prevented by check_cfg() this algorithm
5094  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
5095  */
5096 static int check_max_stack_depth(struct bpf_verifier_env *env)
5097 {
5098 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
5099 	struct bpf_subprog_info *subprog = env->subprog_info;
5100 	struct bpf_insn *insn = env->prog->insnsi;
5101 	bool tail_call_reachable = false;
5102 	int ret_insn[MAX_CALL_FRAMES];
5103 	int ret_prog[MAX_CALL_FRAMES];
5104 	int j;
5105 
5106 process_func:
5107 	/* protect against potential stack overflow that might happen when
5108 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
5109 	 * depth for such case down to 256 so that the worst case scenario
5110 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
5111 	 * 8k).
5112 	 *
5113 	 * To get the idea what might happen, see an example:
5114 	 * func1 -> sub rsp, 128
5115 	 *  subfunc1 -> sub rsp, 256
5116 	 *  tailcall1 -> add rsp, 256
5117 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
5118 	 *   subfunc2 -> sub rsp, 64
5119 	 *   subfunc22 -> sub rsp, 128
5120 	 *   tailcall2 -> add rsp, 128
5121 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
5122 	 *
5123 	 * tailcall will unwind the current stack frame but it will not get rid
5124 	 * of caller's stack as shown on the example above.
5125 	 */
5126 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
5127 		verbose(env,
5128 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
5129 			depth);
5130 		return -EACCES;
5131 	}
5132 	/* round up to 32-bytes, since this is granularity
5133 	 * of interpreter stack size
5134 	 */
5135 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
5136 	if (depth > MAX_BPF_STACK) {
5137 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
5138 			frame + 1, depth);
5139 		return -EACCES;
5140 	}
5141 continue_func:
5142 	subprog_end = subprog[idx + 1].start;
5143 	for (; i < subprog_end; i++) {
5144 		int next_insn;
5145 
5146 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
5147 			continue;
5148 		/* remember insn and function to return to */
5149 		ret_insn[frame] = i + 1;
5150 		ret_prog[frame] = idx;
5151 
5152 		/* find the callee */
5153 		next_insn = i + insn[i].imm + 1;
5154 		idx = find_subprog(env, next_insn);
5155 		if (idx < 0) {
5156 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5157 				  next_insn);
5158 			return -EFAULT;
5159 		}
5160 		if (subprog[idx].is_async_cb) {
5161 			if (subprog[idx].has_tail_call) {
5162 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
5163 				return -EFAULT;
5164 			}
5165 			 /* async callbacks don't increase bpf prog stack size */
5166 			continue;
5167 		}
5168 		i = next_insn;
5169 
5170 		if (subprog[idx].has_tail_call)
5171 			tail_call_reachable = true;
5172 
5173 		frame++;
5174 		if (frame >= MAX_CALL_FRAMES) {
5175 			verbose(env, "the call stack of %d frames is too deep !\n",
5176 				frame);
5177 			return -E2BIG;
5178 		}
5179 		goto process_func;
5180 	}
5181 	/* if tail call got detected across bpf2bpf calls then mark each of the
5182 	 * currently present subprog frames as tail call reachable subprogs;
5183 	 * this info will be utilized by JIT so that we will be preserving the
5184 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
5185 	 */
5186 	if (tail_call_reachable)
5187 		for (j = 0; j < frame; j++)
5188 			subprog[ret_prog[j]].tail_call_reachable = true;
5189 	if (subprog[0].tail_call_reachable)
5190 		env->prog->aux->tail_call_reachable = true;
5191 
5192 	/* end of for() loop means the last insn of the 'subprog'
5193 	 * was reached. Doesn't matter whether it was JA or EXIT
5194 	 */
5195 	if (frame == 0)
5196 		return 0;
5197 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
5198 	frame--;
5199 	i = ret_insn[frame];
5200 	idx = ret_prog[frame];
5201 	goto continue_func;
5202 }
5203 
5204 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5205 static int get_callee_stack_depth(struct bpf_verifier_env *env,
5206 				  const struct bpf_insn *insn, int idx)
5207 {
5208 	int start = idx + insn->imm + 1, subprog;
5209 
5210 	subprog = find_subprog(env, start);
5211 	if (subprog < 0) {
5212 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5213 			  start);
5214 		return -EFAULT;
5215 	}
5216 	return env->subprog_info[subprog].stack_depth;
5217 }
5218 #endif
5219 
5220 static int __check_buffer_access(struct bpf_verifier_env *env,
5221 				 const char *buf_info,
5222 				 const struct bpf_reg_state *reg,
5223 				 int regno, int off, int size)
5224 {
5225 	if (off < 0) {
5226 		verbose(env,
5227 			"R%d invalid %s buffer access: off=%d, size=%d\n",
5228 			regno, buf_info, off, size);
5229 		return -EACCES;
5230 	}
5231 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5232 		char tn_buf[48];
5233 
5234 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5235 		verbose(env,
5236 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
5237 			regno, off, tn_buf);
5238 		return -EACCES;
5239 	}
5240 
5241 	return 0;
5242 }
5243 
5244 static int check_tp_buffer_access(struct bpf_verifier_env *env,
5245 				  const struct bpf_reg_state *reg,
5246 				  int regno, int off, int size)
5247 {
5248 	int err;
5249 
5250 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
5251 	if (err)
5252 		return err;
5253 
5254 	if (off + size > env->prog->aux->max_tp_access)
5255 		env->prog->aux->max_tp_access = off + size;
5256 
5257 	return 0;
5258 }
5259 
5260 static int check_buffer_access(struct bpf_verifier_env *env,
5261 			       const struct bpf_reg_state *reg,
5262 			       int regno, int off, int size,
5263 			       bool zero_size_allowed,
5264 			       u32 *max_access)
5265 {
5266 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
5267 	int err;
5268 
5269 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
5270 	if (err)
5271 		return err;
5272 
5273 	if (off + size > *max_access)
5274 		*max_access = off + size;
5275 
5276 	return 0;
5277 }
5278 
5279 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
5280 static void zext_32_to_64(struct bpf_reg_state *reg)
5281 {
5282 	reg->var_off = tnum_subreg(reg->var_off);
5283 	__reg_assign_32_into_64(reg);
5284 }
5285 
5286 /* truncate register to smaller size (in bytes)
5287  * must be called with size < BPF_REG_SIZE
5288  */
5289 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
5290 {
5291 	u64 mask;
5292 
5293 	/* clear high bits in bit representation */
5294 	reg->var_off = tnum_cast(reg->var_off, size);
5295 
5296 	/* fix arithmetic bounds */
5297 	mask = ((u64)1 << (size * 8)) - 1;
5298 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
5299 		reg->umin_value &= mask;
5300 		reg->umax_value &= mask;
5301 	} else {
5302 		reg->umin_value = 0;
5303 		reg->umax_value = mask;
5304 	}
5305 	reg->smin_value = reg->umin_value;
5306 	reg->smax_value = reg->umax_value;
5307 
5308 	/* If size is smaller than 32bit register the 32bit register
5309 	 * values are also truncated so we push 64-bit bounds into
5310 	 * 32-bit bounds. Above were truncated < 32-bits already.
5311 	 */
5312 	if (size >= 4)
5313 		return;
5314 	__reg_combine_64_into_32(reg);
5315 }
5316 
5317 static bool bpf_map_is_rdonly(const struct bpf_map *map)
5318 {
5319 	/* A map is considered read-only if the following condition are true:
5320 	 *
5321 	 * 1) BPF program side cannot change any of the map content. The
5322 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
5323 	 *    and was set at map creation time.
5324 	 * 2) The map value(s) have been initialized from user space by a
5325 	 *    loader and then "frozen", such that no new map update/delete
5326 	 *    operations from syscall side are possible for the rest of
5327 	 *    the map's lifetime from that point onwards.
5328 	 * 3) Any parallel/pending map update/delete operations from syscall
5329 	 *    side have been completed. Only after that point, it's safe to
5330 	 *    assume that map value(s) are immutable.
5331 	 */
5332 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
5333 	       READ_ONCE(map->frozen) &&
5334 	       !bpf_map_write_active(map);
5335 }
5336 
5337 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
5338 {
5339 	void *ptr;
5340 	u64 addr;
5341 	int err;
5342 
5343 	err = map->ops->map_direct_value_addr(map, &addr, off);
5344 	if (err)
5345 		return err;
5346 	ptr = (void *)(long)addr + off;
5347 
5348 	switch (size) {
5349 	case sizeof(u8):
5350 		*val = (u64)*(u8 *)ptr;
5351 		break;
5352 	case sizeof(u16):
5353 		*val = (u64)*(u16 *)ptr;
5354 		break;
5355 	case sizeof(u32):
5356 		*val = (u64)*(u32 *)ptr;
5357 		break;
5358 	case sizeof(u64):
5359 		*val = *(u64 *)ptr;
5360 		break;
5361 	default:
5362 		return -EINVAL;
5363 	}
5364 	return 0;
5365 }
5366 
5367 #define BTF_TYPE_SAFE_RCU(__type)  __PASTE(__type, __safe_rcu)
5368 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type)  __PASTE(__type, __safe_rcu_or_null)
5369 #define BTF_TYPE_SAFE_TRUSTED(__type)  __PASTE(__type, __safe_trusted)
5370 
5371 /*
5372  * Allow list few fields as RCU trusted or full trusted.
5373  * This logic doesn't allow mix tagging and will be removed once GCC supports
5374  * btf_type_tag.
5375  */
5376 
5377 /* RCU trusted: these fields are trusted in RCU CS and never NULL */
5378 BTF_TYPE_SAFE_RCU(struct task_struct) {
5379 	const cpumask_t *cpus_ptr;
5380 	struct css_set __rcu *cgroups;
5381 	struct task_struct __rcu *real_parent;
5382 	struct task_struct *group_leader;
5383 };
5384 
5385 BTF_TYPE_SAFE_RCU(struct cgroup) {
5386 	/* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */
5387 	struct kernfs_node *kn;
5388 };
5389 
5390 BTF_TYPE_SAFE_RCU(struct css_set) {
5391 	struct cgroup *dfl_cgrp;
5392 };
5393 
5394 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */
5395 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {
5396 	struct file __rcu *exe_file;
5397 };
5398 
5399 /* skb->sk, req->sk are not RCU protected, but we mark them as such
5400  * because bpf prog accessible sockets are SOCK_RCU_FREE.
5401  */
5402 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {
5403 	struct sock *sk;
5404 };
5405 
5406 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {
5407 	struct sock *sk;
5408 };
5409 
5410 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */
5411 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
5412 	struct seq_file *seq;
5413 };
5414 
5415 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
5416 	struct bpf_iter_meta *meta;
5417 	struct task_struct *task;
5418 };
5419 
5420 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
5421 	struct file *file;
5422 };
5423 
5424 BTF_TYPE_SAFE_TRUSTED(struct file) {
5425 	struct inode *f_inode;
5426 };
5427 
5428 BTF_TYPE_SAFE_TRUSTED(struct dentry) {
5429 	/* no negative dentry-s in places where bpf can see it */
5430 	struct inode *d_inode;
5431 };
5432 
5433 BTF_TYPE_SAFE_TRUSTED(struct socket) {
5434 	struct sock *sk;
5435 };
5436 
5437 static bool type_is_rcu(struct bpf_verifier_env *env,
5438 			struct bpf_reg_state *reg,
5439 			const char *field_name, u32 btf_id)
5440 {
5441 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
5442 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup));
5443 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
5444 
5445 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu");
5446 }
5447 
5448 static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
5449 				struct bpf_reg_state *reg,
5450 				const char *field_name, u32 btf_id)
5451 {
5452 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct));
5453 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff));
5454 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock));
5455 
5456 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null");
5457 }
5458 
5459 static bool type_is_trusted(struct bpf_verifier_env *env,
5460 			    struct bpf_reg_state *reg,
5461 			    const char *field_name, u32 btf_id)
5462 {
5463 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
5464 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
5465 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
5466 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
5467 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
5468 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
5469 
5470 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
5471 }
5472 
5473 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
5474 				   struct bpf_reg_state *regs,
5475 				   int regno, int off, int size,
5476 				   enum bpf_access_type atype,
5477 				   int value_regno)
5478 {
5479 	struct bpf_reg_state *reg = regs + regno;
5480 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
5481 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
5482 	const char *field_name = NULL;
5483 	enum bpf_type_flag flag = 0;
5484 	u32 btf_id = 0;
5485 	int ret;
5486 
5487 	if (!env->allow_ptr_leaks) {
5488 		verbose(env,
5489 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5490 			tname);
5491 		return -EPERM;
5492 	}
5493 	if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
5494 		verbose(env,
5495 			"Cannot access kernel 'struct %s' from non-GPL compatible program\n",
5496 			tname);
5497 		return -EINVAL;
5498 	}
5499 	if (off < 0) {
5500 		verbose(env,
5501 			"R%d is ptr_%s invalid negative access: off=%d\n",
5502 			regno, tname, off);
5503 		return -EACCES;
5504 	}
5505 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5506 		char tn_buf[48];
5507 
5508 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5509 		verbose(env,
5510 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
5511 			regno, tname, off, tn_buf);
5512 		return -EACCES;
5513 	}
5514 
5515 	if (reg->type & MEM_USER) {
5516 		verbose(env,
5517 			"R%d is ptr_%s access user memory: off=%d\n",
5518 			regno, tname, off);
5519 		return -EACCES;
5520 	}
5521 
5522 	if (reg->type & MEM_PERCPU) {
5523 		verbose(env,
5524 			"R%d is ptr_%s access percpu memory: off=%d\n",
5525 			regno, tname, off);
5526 		return -EACCES;
5527 	}
5528 
5529 	if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) {
5530 		if (!btf_is_kernel(reg->btf)) {
5531 			verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
5532 			return -EFAULT;
5533 		}
5534 		ret = env->ops->btf_struct_access(&env->log, reg, off, size);
5535 	} else {
5536 		/* Writes are permitted with default btf_struct_access for
5537 		 * program allocated objects (which always have ref_obj_id > 0),
5538 		 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
5539 		 */
5540 		if (atype != BPF_READ && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
5541 			verbose(env, "only read is supported\n");
5542 			return -EACCES;
5543 		}
5544 
5545 		if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
5546 		    !reg->ref_obj_id) {
5547 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
5548 			return -EFAULT;
5549 		}
5550 
5551 		ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name);
5552 	}
5553 
5554 	if (ret < 0)
5555 		return ret;
5556 
5557 	if (ret != PTR_TO_BTF_ID) {
5558 		/* just mark; */
5559 
5560 	} else if (type_flag(reg->type) & PTR_UNTRUSTED) {
5561 		/* If this is an untrusted pointer, all pointers formed by walking it
5562 		 * also inherit the untrusted flag.
5563 		 */
5564 		flag = PTR_UNTRUSTED;
5565 
5566 	} else if (is_trusted_reg(reg) || is_rcu_reg(reg)) {
5567 		/* By default any pointer obtained from walking a trusted pointer is no
5568 		 * longer trusted, unless the field being accessed has explicitly been
5569 		 * marked as inheriting its parent's state of trust (either full or RCU).
5570 		 * For example:
5571 		 * 'cgroups' pointer is untrusted if task->cgroups dereference
5572 		 * happened in a sleepable program outside of bpf_rcu_read_lock()
5573 		 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU).
5574 		 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED.
5575 		 *
5576 		 * A regular RCU-protected pointer with __rcu tag can also be deemed
5577 		 * trusted if we are in an RCU CS. Such pointer can be NULL.
5578 		 */
5579 		if (type_is_trusted(env, reg, field_name, btf_id)) {
5580 			flag |= PTR_TRUSTED;
5581 		} else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
5582 			if (type_is_rcu(env, reg, field_name, btf_id)) {
5583 				/* ignore __rcu tag and mark it MEM_RCU */
5584 				flag |= MEM_RCU;
5585 			} else if (flag & MEM_RCU ||
5586 				   type_is_rcu_or_null(env, reg, field_name, btf_id)) {
5587 				/* __rcu tagged pointers can be NULL */
5588 				flag |= MEM_RCU | PTR_MAYBE_NULL;
5589 			} else if (flag & (MEM_PERCPU | MEM_USER)) {
5590 				/* keep as-is */
5591 			} else {
5592 				/* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */
5593 				clear_trusted_flags(&flag);
5594 			}
5595 		} else {
5596 			/*
5597 			 * If not in RCU CS or MEM_RCU pointer can be NULL then
5598 			 * aggressively mark as untrusted otherwise such
5599 			 * pointers will be plain PTR_TO_BTF_ID without flags
5600 			 * and will be allowed to be passed into helpers for
5601 			 * compat reasons.
5602 			 */
5603 			flag = PTR_UNTRUSTED;
5604 		}
5605 	} else {
5606 		/* Old compat. Deprecated */
5607 		clear_trusted_flags(&flag);
5608 	}
5609 
5610 	if (atype == BPF_READ && value_regno >= 0)
5611 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
5612 
5613 	return 0;
5614 }
5615 
5616 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
5617 				   struct bpf_reg_state *regs,
5618 				   int regno, int off, int size,
5619 				   enum bpf_access_type atype,
5620 				   int value_regno)
5621 {
5622 	struct bpf_reg_state *reg = regs + regno;
5623 	struct bpf_map *map = reg->map_ptr;
5624 	struct bpf_reg_state map_reg;
5625 	enum bpf_type_flag flag = 0;
5626 	const struct btf_type *t;
5627 	const char *tname;
5628 	u32 btf_id;
5629 	int ret;
5630 
5631 	if (!btf_vmlinux) {
5632 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
5633 		return -ENOTSUPP;
5634 	}
5635 
5636 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
5637 		verbose(env, "map_ptr access not supported for map type %d\n",
5638 			map->map_type);
5639 		return -ENOTSUPP;
5640 	}
5641 
5642 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
5643 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
5644 
5645 	if (!env->allow_ptr_leaks) {
5646 		verbose(env,
5647 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5648 			tname);
5649 		return -EPERM;
5650 	}
5651 
5652 	if (off < 0) {
5653 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
5654 			regno, tname, off);
5655 		return -EACCES;
5656 	}
5657 
5658 	if (atype != BPF_READ) {
5659 		verbose(env, "only read from %s is supported\n", tname);
5660 		return -EACCES;
5661 	}
5662 
5663 	/* Simulate access to a PTR_TO_BTF_ID */
5664 	memset(&map_reg, 0, sizeof(map_reg));
5665 	mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
5666 	ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL);
5667 	if (ret < 0)
5668 		return ret;
5669 
5670 	if (value_regno >= 0)
5671 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
5672 
5673 	return 0;
5674 }
5675 
5676 /* Check that the stack access at the given offset is within bounds. The
5677  * maximum valid offset is -1.
5678  *
5679  * The minimum valid offset is -MAX_BPF_STACK for writes, and
5680  * -state->allocated_stack for reads.
5681  */
5682 static int check_stack_slot_within_bounds(int off,
5683 					  struct bpf_func_state *state,
5684 					  enum bpf_access_type t)
5685 {
5686 	int min_valid_off;
5687 
5688 	if (t == BPF_WRITE)
5689 		min_valid_off = -MAX_BPF_STACK;
5690 	else
5691 		min_valid_off = -state->allocated_stack;
5692 
5693 	if (off < min_valid_off || off > -1)
5694 		return -EACCES;
5695 	return 0;
5696 }
5697 
5698 /* Check that the stack access at 'regno + off' falls within the maximum stack
5699  * bounds.
5700  *
5701  * 'off' includes `regno->offset`, but not its dynamic part (if any).
5702  */
5703 static int check_stack_access_within_bounds(
5704 		struct bpf_verifier_env *env,
5705 		int regno, int off, int access_size,
5706 		enum bpf_access_src src, enum bpf_access_type type)
5707 {
5708 	struct bpf_reg_state *regs = cur_regs(env);
5709 	struct bpf_reg_state *reg = regs + regno;
5710 	struct bpf_func_state *state = func(env, reg);
5711 	int min_off, max_off;
5712 	int err;
5713 	char *err_extra;
5714 
5715 	if (src == ACCESS_HELPER)
5716 		/* We don't know if helpers are reading or writing (or both). */
5717 		err_extra = " indirect access to";
5718 	else if (type == BPF_READ)
5719 		err_extra = " read from";
5720 	else
5721 		err_extra = " write to";
5722 
5723 	if (tnum_is_const(reg->var_off)) {
5724 		min_off = reg->var_off.value + off;
5725 		if (access_size > 0)
5726 			max_off = min_off + access_size - 1;
5727 		else
5728 			max_off = min_off;
5729 	} else {
5730 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
5731 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
5732 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
5733 				err_extra, regno);
5734 			return -EACCES;
5735 		}
5736 		min_off = reg->smin_value + off;
5737 		if (access_size > 0)
5738 			max_off = reg->smax_value + off + access_size - 1;
5739 		else
5740 			max_off = min_off;
5741 	}
5742 
5743 	err = check_stack_slot_within_bounds(min_off, state, type);
5744 	if (!err)
5745 		err = check_stack_slot_within_bounds(max_off, state, type);
5746 
5747 	if (err) {
5748 		if (tnum_is_const(reg->var_off)) {
5749 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
5750 				err_extra, regno, off, access_size);
5751 		} else {
5752 			char tn_buf[48];
5753 
5754 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5755 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
5756 				err_extra, regno, tn_buf, access_size);
5757 		}
5758 	}
5759 	return err;
5760 }
5761 
5762 /* check whether memory at (regno + off) is accessible for t = (read | write)
5763  * if t==write, value_regno is a register which value is stored into memory
5764  * if t==read, value_regno is a register which will receive the value from memory
5765  * if t==write && value_regno==-1, some unknown value is stored into memory
5766  * if t==read && value_regno==-1, don't care what we read from memory
5767  */
5768 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
5769 			    int off, int bpf_size, enum bpf_access_type t,
5770 			    int value_regno, bool strict_alignment_once)
5771 {
5772 	struct bpf_reg_state *regs = cur_regs(env);
5773 	struct bpf_reg_state *reg = regs + regno;
5774 	struct bpf_func_state *state;
5775 	int size, err = 0;
5776 
5777 	size = bpf_size_to_bytes(bpf_size);
5778 	if (size < 0)
5779 		return size;
5780 
5781 	/* alignment checks will add in reg->off themselves */
5782 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
5783 	if (err)
5784 		return err;
5785 
5786 	/* for access checks, reg->off is just part of off */
5787 	off += reg->off;
5788 
5789 	if (reg->type == PTR_TO_MAP_KEY) {
5790 		if (t == BPF_WRITE) {
5791 			verbose(env, "write to change key R%d not allowed\n", regno);
5792 			return -EACCES;
5793 		}
5794 
5795 		err = check_mem_region_access(env, regno, off, size,
5796 					      reg->map_ptr->key_size, false);
5797 		if (err)
5798 			return err;
5799 		if (value_regno >= 0)
5800 			mark_reg_unknown(env, regs, value_regno);
5801 	} else if (reg->type == PTR_TO_MAP_VALUE) {
5802 		struct btf_field *kptr_field = NULL;
5803 
5804 		if (t == BPF_WRITE && value_regno >= 0 &&
5805 		    is_pointer_value(env, value_regno)) {
5806 			verbose(env, "R%d leaks addr into map\n", value_regno);
5807 			return -EACCES;
5808 		}
5809 		err = check_map_access_type(env, regno, off, size, t);
5810 		if (err)
5811 			return err;
5812 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
5813 		if (err)
5814 			return err;
5815 		if (tnum_is_const(reg->var_off))
5816 			kptr_field = btf_record_find(reg->map_ptr->record,
5817 						     off + reg->var_off.value, BPF_KPTR);
5818 		if (kptr_field) {
5819 			err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
5820 		} else if (t == BPF_READ && value_regno >= 0) {
5821 			struct bpf_map *map = reg->map_ptr;
5822 
5823 			/* if map is read-only, track its contents as scalars */
5824 			if (tnum_is_const(reg->var_off) &&
5825 			    bpf_map_is_rdonly(map) &&
5826 			    map->ops->map_direct_value_addr) {
5827 				int map_off = off + reg->var_off.value;
5828 				u64 val = 0;
5829 
5830 				err = bpf_map_direct_read(map, map_off, size,
5831 							  &val);
5832 				if (err)
5833 					return err;
5834 
5835 				regs[value_regno].type = SCALAR_VALUE;
5836 				__mark_reg_known(&regs[value_regno], val);
5837 			} else {
5838 				mark_reg_unknown(env, regs, value_regno);
5839 			}
5840 		}
5841 	} else if (base_type(reg->type) == PTR_TO_MEM) {
5842 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5843 
5844 		if (type_may_be_null(reg->type)) {
5845 			verbose(env, "R%d invalid mem access '%s'\n", regno,
5846 				reg_type_str(env, reg->type));
5847 			return -EACCES;
5848 		}
5849 
5850 		if (t == BPF_WRITE && rdonly_mem) {
5851 			verbose(env, "R%d cannot write into %s\n",
5852 				regno, reg_type_str(env, reg->type));
5853 			return -EACCES;
5854 		}
5855 
5856 		if (t == BPF_WRITE && value_regno >= 0 &&
5857 		    is_pointer_value(env, value_regno)) {
5858 			verbose(env, "R%d leaks addr into mem\n", value_regno);
5859 			return -EACCES;
5860 		}
5861 
5862 		err = check_mem_region_access(env, regno, off, size,
5863 					      reg->mem_size, false);
5864 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
5865 			mark_reg_unknown(env, regs, value_regno);
5866 	} else if (reg->type == PTR_TO_CTX) {
5867 		enum bpf_reg_type reg_type = SCALAR_VALUE;
5868 		struct btf *btf = NULL;
5869 		u32 btf_id = 0;
5870 
5871 		if (t == BPF_WRITE && value_regno >= 0 &&
5872 		    is_pointer_value(env, value_regno)) {
5873 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
5874 			return -EACCES;
5875 		}
5876 
5877 		err = check_ptr_off_reg(env, reg, regno);
5878 		if (err < 0)
5879 			return err;
5880 
5881 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
5882 				       &btf_id);
5883 		if (err)
5884 			verbose_linfo(env, insn_idx, "; ");
5885 		if (!err && t == BPF_READ && value_regno >= 0) {
5886 			/* ctx access returns either a scalar, or a
5887 			 * PTR_TO_PACKET[_META,_END]. In the latter
5888 			 * case, we know the offset is zero.
5889 			 */
5890 			if (reg_type == SCALAR_VALUE) {
5891 				mark_reg_unknown(env, regs, value_regno);
5892 			} else {
5893 				mark_reg_known_zero(env, regs,
5894 						    value_regno);
5895 				if (type_may_be_null(reg_type))
5896 					regs[value_regno].id = ++env->id_gen;
5897 				/* A load of ctx field could have different
5898 				 * actual load size with the one encoded in the
5899 				 * insn. When the dst is PTR, it is for sure not
5900 				 * a sub-register.
5901 				 */
5902 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
5903 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
5904 					regs[value_regno].btf = btf;
5905 					regs[value_regno].btf_id = btf_id;
5906 				}
5907 			}
5908 			regs[value_regno].type = reg_type;
5909 		}
5910 
5911 	} else if (reg->type == PTR_TO_STACK) {
5912 		/* Basic bounds checks. */
5913 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
5914 		if (err)
5915 			return err;
5916 
5917 		state = func(env, reg);
5918 		err = update_stack_depth(env, state, off);
5919 		if (err)
5920 			return err;
5921 
5922 		if (t == BPF_READ)
5923 			err = check_stack_read(env, regno, off, size,
5924 					       value_regno);
5925 		else
5926 			err = check_stack_write(env, regno, off, size,
5927 						value_regno, insn_idx);
5928 	} else if (reg_is_pkt_pointer(reg)) {
5929 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
5930 			verbose(env, "cannot write into packet\n");
5931 			return -EACCES;
5932 		}
5933 		if (t == BPF_WRITE && value_regno >= 0 &&
5934 		    is_pointer_value(env, value_regno)) {
5935 			verbose(env, "R%d leaks addr into packet\n",
5936 				value_regno);
5937 			return -EACCES;
5938 		}
5939 		err = check_packet_access(env, regno, off, size, false);
5940 		if (!err && t == BPF_READ && value_regno >= 0)
5941 			mark_reg_unknown(env, regs, value_regno);
5942 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
5943 		if (t == BPF_WRITE && value_regno >= 0 &&
5944 		    is_pointer_value(env, value_regno)) {
5945 			verbose(env, "R%d leaks addr into flow keys\n",
5946 				value_regno);
5947 			return -EACCES;
5948 		}
5949 
5950 		err = check_flow_keys_access(env, off, size);
5951 		if (!err && t == BPF_READ && value_regno >= 0)
5952 			mark_reg_unknown(env, regs, value_regno);
5953 	} else if (type_is_sk_pointer(reg->type)) {
5954 		if (t == BPF_WRITE) {
5955 			verbose(env, "R%d cannot write into %s\n",
5956 				regno, reg_type_str(env, reg->type));
5957 			return -EACCES;
5958 		}
5959 		err = check_sock_access(env, insn_idx, regno, off, size, t);
5960 		if (!err && value_regno >= 0)
5961 			mark_reg_unknown(env, regs, value_regno);
5962 	} else if (reg->type == PTR_TO_TP_BUFFER) {
5963 		err = check_tp_buffer_access(env, reg, regno, off, size);
5964 		if (!err && t == BPF_READ && value_regno >= 0)
5965 			mark_reg_unknown(env, regs, value_regno);
5966 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
5967 		   !type_may_be_null(reg->type)) {
5968 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
5969 					      value_regno);
5970 	} else if (reg->type == CONST_PTR_TO_MAP) {
5971 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
5972 					      value_regno);
5973 	} else if (base_type(reg->type) == PTR_TO_BUF) {
5974 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5975 		u32 *max_access;
5976 
5977 		if (rdonly_mem) {
5978 			if (t == BPF_WRITE) {
5979 				verbose(env, "R%d cannot write into %s\n",
5980 					regno, reg_type_str(env, reg->type));
5981 				return -EACCES;
5982 			}
5983 			max_access = &env->prog->aux->max_rdonly_access;
5984 		} else {
5985 			max_access = &env->prog->aux->max_rdwr_access;
5986 		}
5987 
5988 		err = check_buffer_access(env, reg, regno, off, size, false,
5989 					  max_access);
5990 
5991 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
5992 			mark_reg_unknown(env, regs, value_regno);
5993 	} else {
5994 		verbose(env, "R%d invalid mem access '%s'\n", regno,
5995 			reg_type_str(env, reg->type));
5996 		return -EACCES;
5997 	}
5998 
5999 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
6000 	    regs[value_regno].type == SCALAR_VALUE) {
6001 		/* b/h/w load zero-extends, mark upper bits as known 0 */
6002 		coerce_reg_to_size(&regs[value_regno], size);
6003 	}
6004 	return err;
6005 }
6006 
6007 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
6008 {
6009 	int load_reg;
6010 	int err;
6011 
6012 	switch (insn->imm) {
6013 	case BPF_ADD:
6014 	case BPF_ADD | BPF_FETCH:
6015 	case BPF_AND:
6016 	case BPF_AND | BPF_FETCH:
6017 	case BPF_OR:
6018 	case BPF_OR | BPF_FETCH:
6019 	case BPF_XOR:
6020 	case BPF_XOR | BPF_FETCH:
6021 	case BPF_XCHG:
6022 	case BPF_CMPXCHG:
6023 		break;
6024 	default:
6025 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
6026 		return -EINVAL;
6027 	}
6028 
6029 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
6030 		verbose(env, "invalid atomic operand size\n");
6031 		return -EINVAL;
6032 	}
6033 
6034 	/* check src1 operand */
6035 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
6036 	if (err)
6037 		return err;
6038 
6039 	/* check src2 operand */
6040 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6041 	if (err)
6042 		return err;
6043 
6044 	if (insn->imm == BPF_CMPXCHG) {
6045 		/* Check comparison of R0 with memory location */
6046 		const u32 aux_reg = BPF_REG_0;
6047 
6048 		err = check_reg_arg(env, aux_reg, SRC_OP);
6049 		if (err)
6050 			return err;
6051 
6052 		if (is_pointer_value(env, aux_reg)) {
6053 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
6054 			return -EACCES;
6055 		}
6056 	}
6057 
6058 	if (is_pointer_value(env, insn->src_reg)) {
6059 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6060 		return -EACCES;
6061 	}
6062 
6063 	if (is_ctx_reg(env, insn->dst_reg) ||
6064 	    is_pkt_reg(env, insn->dst_reg) ||
6065 	    is_flow_key_reg(env, insn->dst_reg) ||
6066 	    is_sk_reg(env, insn->dst_reg)) {
6067 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
6068 			insn->dst_reg,
6069 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
6070 		return -EACCES;
6071 	}
6072 
6073 	if (insn->imm & BPF_FETCH) {
6074 		if (insn->imm == BPF_CMPXCHG)
6075 			load_reg = BPF_REG_0;
6076 		else
6077 			load_reg = insn->src_reg;
6078 
6079 		/* check and record load of old value */
6080 		err = check_reg_arg(env, load_reg, DST_OP);
6081 		if (err)
6082 			return err;
6083 	} else {
6084 		/* This instruction accesses a memory location but doesn't
6085 		 * actually load it into a register.
6086 		 */
6087 		load_reg = -1;
6088 	}
6089 
6090 	/* Check whether we can read the memory, with second call for fetch
6091 	 * case to simulate the register fill.
6092 	 */
6093 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6094 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
6095 	if (!err && load_reg >= 0)
6096 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6097 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
6098 				       true);
6099 	if (err)
6100 		return err;
6101 
6102 	/* Check whether we can write into the same memory. */
6103 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6104 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
6105 	if (err)
6106 		return err;
6107 
6108 	return 0;
6109 }
6110 
6111 /* When register 'regno' is used to read the stack (either directly or through
6112  * a helper function) make sure that it's within stack boundary and, depending
6113  * on the access type, that all elements of the stack are initialized.
6114  *
6115  * 'off' includes 'regno->off', but not its dynamic part (if any).
6116  *
6117  * All registers that have been spilled on the stack in the slots within the
6118  * read offsets are marked as read.
6119  */
6120 static int check_stack_range_initialized(
6121 		struct bpf_verifier_env *env, int regno, int off,
6122 		int access_size, bool zero_size_allowed,
6123 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
6124 {
6125 	struct bpf_reg_state *reg = reg_state(env, regno);
6126 	struct bpf_func_state *state = func(env, reg);
6127 	int err, min_off, max_off, i, j, slot, spi;
6128 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
6129 	enum bpf_access_type bounds_check_type;
6130 	/* Some accesses can write anything into the stack, others are
6131 	 * read-only.
6132 	 */
6133 	bool clobber = false;
6134 
6135 	if (access_size == 0 && !zero_size_allowed) {
6136 		verbose(env, "invalid zero-sized read\n");
6137 		return -EACCES;
6138 	}
6139 
6140 	if (type == ACCESS_HELPER) {
6141 		/* The bounds checks for writes are more permissive than for
6142 		 * reads. However, if raw_mode is not set, we'll do extra
6143 		 * checks below.
6144 		 */
6145 		bounds_check_type = BPF_WRITE;
6146 		clobber = true;
6147 	} else {
6148 		bounds_check_type = BPF_READ;
6149 	}
6150 	err = check_stack_access_within_bounds(env, regno, off, access_size,
6151 					       type, bounds_check_type);
6152 	if (err)
6153 		return err;
6154 
6155 
6156 	if (tnum_is_const(reg->var_off)) {
6157 		min_off = max_off = reg->var_off.value + off;
6158 	} else {
6159 		/* Variable offset is prohibited for unprivileged mode for
6160 		 * simplicity since it requires corresponding support in
6161 		 * Spectre masking for stack ALU.
6162 		 * See also retrieve_ptr_limit().
6163 		 */
6164 		if (!env->bypass_spec_v1) {
6165 			char tn_buf[48];
6166 
6167 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6168 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
6169 				regno, err_extra, tn_buf);
6170 			return -EACCES;
6171 		}
6172 		/* Only initialized buffer on stack is allowed to be accessed
6173 		 * with variable offset. With uninitialized buffer it's hard to
6174 		 * guarantee that whole memory is marked as initialized on
6175 		 * helper return since specific bounds are unknown what may
6176 		 * cause uninitialized stack leaking.
6177 		 */
6178 		if (meta && meta->raw_mode)
6179 			meta = NULL;
6180 
6181 		min_off = reg->smin_value + off;
6182 		max_off = reg->smax_value + off;
6183 	}
6184 
6185 	if (meta && meta->raw_mode) {
6186 		/* Ensure we won't be overwriting dynptrs when simulating byte
6187 		 * by byte access in check_helper_call using meta.access_size.
6188 		 * This would be a problem if we have a helper in the future
6189 		 * which takes:
6190 		 *
6191 		 *	helper(uninit_mem, len, dynptr)
6192 		 *
6193 		 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
6194 		 * may end up writing to dynptr itself when touching memory from
6195 		 * arg 1. This can be relaxed on a case by case basis for known
6196 		 * safe cases, but reject due to the possibilitiy of aliasing by
6197 		 * default.
6198 		 */
6199 		for (i = min_off; i < max_off + access_size; i++) {
6200 			int stack_off = -i - 1;
6201 
6202 			spi = __get_spi(i);
6203 			/* raw_mode may write past allocated_stack */
6204 			if (state->allocated_stack <= stack_off)
6205 				continue;
6206 			if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
6207 				verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
6208 				return -EACCES;
6209 			}
6210 		}
6211 		meta->access_size = access_size;
6212 		meta->regno = regno;
6213 		return 0;
6214 	}
6215 
6216 	for (i = min_off; i < max_off + access_size; i++) {
6217 		u8 *stype;
6218 
6219 		slot = -i - 1;
6220 		spi = slot / BPF_REG_SIZE;
6221 		if (state->allocated_stack <= slot)
6222 			goto err;
6223 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
6224 		if (*stype == STACK_MISC)
6225 			goto mark;
6226 		if ((*stype == STACK_ZERO) ||
6227 		    (*stype == STACK_INVALID && env->allow_uninit_stack)) {
6228 			if (clobber) {
6229 				/* helper can write anything into the stack */
6230 				*stype = STACK_MISC;
6231 			}
6232 			goto mark;
6233 		}
6234 
6235 		if (is_spilled_reg(&state->stack[spi]) &&
6236 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
6237 		     env->allow_ptr_leaks)) {
6238 			if (clobber) {
6239 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
6240 				for (j = 0; j < BPF_REG_SIZE; j++)
6241 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
6242 			}
6243 			goto mark;
6244 		}
6245 
6246 err:
6247 		if (tnum_is_const(reg->var_off)) {
6248 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
6249 				err_extra, regno, min_off, i - min_off, access_size);
6250 		} else {
6251 			char tn_buf[48];
6252 
6253 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6254 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
6255 				err_extra, regno, tn_buf, i - min_off, access_size);
6256 		}
6257 		return -EACCES;
6258 mark:
6259 		/* reading any byte out of 8-byte 'spill_slot' will cause
6260 		 * the whole slot to be marked as 'read'
6261 		 */
6262 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
6263 			      state->stack[spi].spilled_ptr.parent,
6264 			      REG_LIVE_READ64);
6265 		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
6266 		 * be sure that whether stack slot is written to or not. Hence,
6267 		 * we must still conservatively propagate reads upwards even if
6268 		 * helper may write to the entire memory range.
6269 		 */
6270 	}
6271 	return update_stack_depth(env, state, min_off);
6272 }
6273 
6274 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
6275 				   int access_size, bool zero_size_allowed,
6276 				   struct bpf_call_arg_meta *meta)
6277 {
6278 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6279 	u32 *max_access;
6280 
6281 	switch (base_type(reg->type)) {
6282 	case PTR_TO_PACKET:
6283 	case PTR_TO_PACKET_META:
6284 		return check_packet_access(env, regno, reg->off, access_size,
6285 					   zero_size_allowed);
6286 	case PTR_TO_MAP_KEY:
6287 		if (meta && meta->raw_mode) {
6288 			verbose(env, "R%d cannot write into %s\n", regno,
6289 				reg_type_str(env, reg->type));
6290 			return -EACCES;
6291 		}
6292 		return check_mem_region_access(env, regno, reg->off, access_size,
6293 					       reg->map_ptr->key_size, false);
6294 	case PTR_TO_MAP_VALUE:
6295 		if (check_map_access_type(env, regno, reg->off, access_size,
6296 					  meta && meta->raw_mode ? BPF_WRITE :
6297 					  BPF_READ))
6298 			return -EACCES;
6299 		return check_map_access(env, regno, reg->off, access_size,
6300 					zero_size_allowed, ACCESS_HELPER);
6301 	case PTR_TO_MEM:
6302 		if (type_is_rdonly_mem(reg->type)) {
6303 			if (meta && meta->raw_mode) {
6304 				verbose(env, "R%d cannot write into %s\n", regno,
6305 					reg_type_str(env, reg->type));
6306 				return -EACCES;
6307 			}
6308 		}
6309 		return check_mem_region_access(env, regno, reg->off,
6310 					       access_size, reg->mem_size,
6311 					       zero_size_allowed);
6312 	case PTR_TO_BUF:
6313 		if (type_is_rdonly_mem(reg->type)) {
6314 			if (meta && meta->raw_mode) {
6315 				verbose(env, "R%d cannot write into %s\n", regno,
6316 					reg_type_str(env, reg->type));
6317 				return -EACCES;
6318 			}
6319 
6320 			max_access = &env->prog->aux->max_rdonly_access;
6321 		} else {
6322 			max_access = &env->prog->aux->max_rdwr_access;
6323 		}
6324 		return check_buffer_access(env, reg, regno, reg->off,
6325 					   access_size, zero_size_allowed,
6326 					   max_access);
6327 	case PTR_TO_STACK:
6328 		return check_stack_range_initialized(
6329 				env,
6330 				regno, reg->off, access_size,
6331 				zero_size_allowed, ACCESS_HELPER, meta);
6332 	case PTR_TO_BTF_ID:
6333 		return check_ptr_to_btf_access(env, regs, regno, reg->off,
6334 					       access_size, BPF_READ, -1);
6335 	case PTR_TO_CTX:
6336 		/* in case the function doesn't know how to access the context,
6337 		 * (because we are in a program of type SYSCALL for example), we
6338 		 * can not statically check its size.
6339 		 * Dynamically check it now.
6340 		 */
6341 		if (!env->ops->convert_ctx_access) {
6342 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
6343 			int offset = access_size - 1;
6344 
6345 			/* Allow zero-byte read from PTR_TO_CTX */
6346 			if (access_size == 0)
6347 				return zero_size_allowed ? 0 : -EACCES;
6348 
6349 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
6350 						atype, -1, false);
6351 		}
6352 
6353 		fallthrough;
6354 	default: /* scalar_value or invalid ptr */
6355 		/* Allow zero-byte read from NULL, regardless of pointer type */
6356 		if (zero_size_allowed && access_size == 0 &&
6357 		    register_is_null(reg))
6358 			return 0;
6359 
6360 		verbose(env, "R%d type=%s ", regno,
6361 			reg_type_str(env, reg->type));
6362 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
6363 		return -EACCES;
6364 	}
6365 }
6366 
6367 static int check_mem_size_reg(struct bpf_verifier_env *env,
6368 			      struct bpf_reg_state *reg, u32 regno,
6369 			      bool zero_size_allowed,
6370 			      struct bpf_call_arg_meta *meta)
6371 {
6372 	int err;
6373 
6374 	/* This is used to refine r0 return value bounds for helpers
6375 	 * that enforce this value as an upper bound on return values.
6376 	 * See do_refine_retval_range() for helpers that can refine
6377 	 * the return value. C type of helper is u32 so we pull register
6378 	 * bound from umax_value however, if negative verifier errors
6379 	 * out. Only upper bounds can be learned because retval is an
6380 	 * int type and negative retvals are allowed.
6381 	 */
6382 	meta->msize_max_value = reg->umax_value;
6383 
6384 	/* The register is SCALAR_VALUE; the access check
6385 	 * happens using its boundaries.
6386 	 */
6387 	if (!tnum_is_const(reg->var_off))
6388 		/* For unprivileged variable accesses, disable raw
6389 		 * mode so that the program is required to
6390 		 * initialize all the memory that the helper could
6391 		 * just partially fill up.
6392 		 */
6393 		meta = NULL;
6394 
6395 	if (reg->smin_value < 0) {
6396 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
6397 			regno);
6398 		return -EACCES;
6399 	}
6400 
6401 	if (reg->umin_value == 0) {
6402 		err = check_helper_mem_access(env, regno - 1, 0,
6403 					      zero_size_allowed,
6404 					      meta);
6405 		if (err)
6406 			return err;
6407 	}
6408 
6409 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
6410 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
6411 			regno);
6412 		return -EACCES;
6413 	}
6414 	err = check_helper_mem_access(env, regno - 1,
6415 				      reg->umax_value,
6416 				      zero_size_allowed, meta);
6417 	if (!err)
6418 		err = mark_chain_precision(env, regno);
6419 	return err;
6420 }
6421 
6422 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
6423 		   u32 regno, u32 mem_size)
6424 {
6425 	bool may_be_null = type_may_be_null(reg->type);
6426 	struct bpf_reg_state saved_reg;
6427 	struct bpf_call_arg_meta meta;
6428 	int err;
6429 
6430 	if (register_is_null(reg))
6431 		return 0;
6432 
6433 	memset(&meta, 0, sizeof(meta));
6434 	/* Assuming that the register contains a value check if the memory
6435 	 * access is safe. Temporarily save and restore the register's state as
6436 	 * the conversion shouldn't be visible to a caller.
6437 	 */
6438 	if (may_be_null) {
6439 		saved_reg = *reg;
6440 		mark_ptr_not_null_reg(reg);
6441 	}
6442 
6443 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
6444 	/* Check access for BPF_WRITE */
6445 	meta.raw_mode = true;
6446 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
6447 
6448 	if (may_be_null)
6449 		*reg = saved_reg;
6450 
6451 	return err;
6452 }
6453 
6454 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
6455 				    u32 regno)
6456 {
6457 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
6458 	bool may_be_null = type_may_be_null(mem_reg->type);
6459 	struct bpf_reg_state saved_reg;
6460 	struct bpf_call_arg_meta meta;
6461 	int err;
6462 
6463 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
6464 
6465 	memset(&meta, 0, sizeof(meta));
6466 
6467 	if (may_be_null) {
6468 		saved_reg = *mem_reg;
6469 		mark_ptr_not_null_reg(mem_reg);
6470 	}
6471 
6472 	err = check_mem_size_reg(env, reg, regno, true, &meta);
6473 	/* Check access for BPF_WRITE */
6474 	meta.raw_mode = true;
6475 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
6476 
6477 	if (may_be_null)
6478 		*mem_reg = saved_reg;
6479 	return err;
6480 }
6481 
6482 /* Implementation details:
6483  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
6484  * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
6485  * Two bpf_map_lookups (even with the same key) will have different reg->id.
6486  * Two separate bpf_obj_new will also have different reg->id.
6487  * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
6488  * clears reg->id after value_or_null->value transition, since the verifier only
6489  * cares about the range of access to valid map value pointer and doesn't care
6490  * about actual address of the map element.
6491  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
6492  * reg->id > 0 after value_or_null->value transition. By doing so
6493  * two bpf_map_lookups will be considered two different pointers that
6494  * point to different bpf_spin_locks. Likewise for pointers to allocated objects
6495  * returned from bpf_obj_new.
6496  * The verifier allows taking only one bpf_spin_lock at a time to avoid
6497  * dead-locks.
6498  * Since only one bpf_spin_lock is allowed the checks are simpler than
6499  * reg_is_refcounted() logic. The verifier needs to remember only
6500  * one spin_lock instead of array of acquired_refs.
6501  * cur_state->active_lock remembers which map value element or allocated
6502  * object got locked and clears it after bpf_spin_unlock.
6503  */
6504 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
6505 			     bool is_lock)
6506 {
6507 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6508 	struct bpf_verifier_state *cur = env->cur_state;
6509 	bool is_const = tnum_is_const(reg->var_off);
6510 	u64 val = reg->var_off.value;
6511 	struct bpf_map *map = NULL;
6512 	struct btf *btf = NULL;
6513 	struct btf_record *rec;
6514 
6515 	if (!is_const) {
6516 		verbose(env,
6517 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
6518 			regno);
6519 		return -EINVAL;
6520 	}
6521 	if (reg->type == PTR_TO_MAP_VALUE) {
6522 		map = reg->map_ptr;
6523 		if (!map->btf) {
6524 			verbose(env,
6525 				"map '%s' has to have BTF in order to use bpf_spin_lock\n",
6526 				map->name);
6527 			return -EINVAL;
6528 		}
6529 	} else {
6530 		btf = reg->btf;
6531 	}
6532 
6533 	rec = reg_btf_record(reg);
6534 	if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
6535 		verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
6536 			map ? map->name : "kptr");
6537 		return -EINVAL;
6538 	}
6539 	if (rec->spin_lock_off != val + reg->off) {
6540 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
6541 			val + reg->off, rec->spin_lock_off);
6542 		return -EINVAL;
6543 	}
6544 	if (is_lock) {
6545 		if (cur->active_lock.ptr) {
6546 			verbose(env,
6547 				"Locking two bpf_spin_locks are not allowed\n");
6548 			return -EINVAL;
6549 		}
6550 		if (map)
6551 			cur->active_lock.ptr = map;
6552 		else
6553 			cur->active_lock.ptr = btf;
6554 		cur->active_lock.id = reg->id;
6555 	} else {
6556 		void *ptr;
6557 
6558 		if (map)
6559 			ptr = map;
6560 		else
6561 			ptr = btf;
6562 
6563 		if (!cur->active_lock.ptr) {
6564 			verbose(env, "bpf_spin_unlock without taking a lock\n");
6565 			return -EINVAL;
6566 		}
6567 		if (cur->active_lock.ptr != ptr ||
6568 		    cur->active_lock.id != reg->id) {
6569 			verbose(env, "bpf_spin_unlock of different lock\n");
6570 			return -EINVAL;
6571 		}
6572 
6573 		invalidate_non_owning_refs(env);
6574 
6575 		cur->active_lock.ptr = NULL;
6576 		cur->active_lock.id = 0;
6577 	}
6578 	return 0;
6579 }
6580 
6581 static int process_timer_func(struct bpf_verifier_env *env, int regno,
6582 			      struct bpf_call_arg_meta *meta)
6583 {
6584 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6585 	bool is_const = tnum_is_const(reg->var_off);
6586 	struct bpf_map *map = reg->map_ptr;
6587 	u64 val = reg->var_off.value;
6588 
6589 	if (!is_const) {
6590 		verbose(env,
6591 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
6592 			regno);
6593 		return -EINVAL;
6594 	}
6595 	if (!map->btf) {
6596 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
6597 			map->name);
6598 		return -EINVAL;
6599 	}
6600 	if (!btf_record_has_field(map->record, BPF_TIMER)) {
6601 		verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
6602 		return -EINVAL;
6603 	}
6604 	if (map->record->timer_off != val + reg->off) {
6605 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
6606 			val + reg->off, map->record->timer_off);
6607 		return -EINVAL;
6608 	}
6609 	if (meta->map_ptr) {
6610 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
6611 		return -EFAULT;
6612 	}
6613 	meta->map_uid = reg->map_uid;
6614 	meta->map_ptr = map;
6615 	return 0;
6616 }
6617 
6618 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
6619 			     struct bpf_call_arg_meta *meta)
6620 {
6621 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6622 	struct bpf_map *map_ptr = reg->map_ptr;
6623 	struct btf_field *kptr_field;
6624 	u32 kptr_off;
6625 
6626 	if (!tnum_is_const(reg->var_off)) {
6627 		verbose(env,
6628 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
6629 			regno);
6630 		return -EINVAL;
6631 	}
6632 	if (!map_ptr->btf) {
6633 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
6634 			map_ptr->name);
6635 		return -EINVAL;
6636 	}
6637 	if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
6638 		verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
6639 		return -EINVAL;
6640 	}
6641 
6642 	meta->map_ptr = map_ptr;
6643 	kptr_off = reg->off + reg->var_off.value;
6644 	kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
6645 	if (!kptr_field) {
6646 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
6647 		return -EACCES;
6648 	}
6649 	if (kptr_field->type != BPF_KPTR_REF) {
6650 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
6651 		return -EACCES;
6652 	}
6653 	meta->kptr_field = kptr_field;
6654 	return 0;
6655 }
6656 
6657 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
6658  * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
6659  *
6660  * In both cases we deal with the first 8 bytes, but need to mark the next 8
6661  * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
6662  * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
6663  *
6664  * Mutability of bpf_dynptr is at two levels, one is at the level of struct
6665  * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
6666  * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
6667  * mutate the view of the dynptr and also possibly destroy it. In the latter
6668  * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
6669  * memory that dynptr points to.
6670  *
6671  * The verifier will keep track both levels of mutation (bpf_dynptr's in
6672  * reg->type and the memory's in reg->dynptr.type), but there is no support for
6673  * readonly dynptr view yet, hence only the first case is tracked and checked.
6674  *
6675  * This is consistent with how C applies the const modifier to a struct object,
6676  * where the pointer itself inside bpf_dynptr becomes const but not what it
6677  * points to.
6678  *
6679  * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
6680  * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
6681  */
6682 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
6683 			       enum bpf_arg_type arg_type)
6684 {
6685 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6686 	int err;
6687 
6688 	/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
6689 	 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
6690 	 */
6691 	if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
6692 		verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
6693 		return -EFAULT;
6694 	}
6695 
6696 	/*  MEM_UNINIT - Points to memory that is an appropriate candidate for
6697 	 *		 constructing a mutable bpf_dynptr object.
6698 	 *
6699 	 *		 Currently, this is only possible with PTR_TO_STACK
6700 	 *		 pointing to a region of at least 16 bytes which doesn't
6701 	 *		 contain an existing bpf_dynptr.
6702 	 *
6703 	 *  MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
6704 	 *		 mutated or destroyed. However, the memory it points to
6705 	 *		 may be mutated.
6706 	 *
6707 	 *  None       - Points to a initialized dynptr that can be mutated and
6708 	 *		 destroyed, including mutation of the memory it points
6709 	 *		 to.
6710 	 */
6711 	if (arg_type & MEM_UNINIT) {
6712 		int i;
6713 
6714 		if (!is_dynptr_reg_valid_uninit(env, reg)) {
6715 			verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6716 			return -EINVAL;
6717 		}
6718 
6719 		/* we write BPF_DW bits (8 bytes) at a time */
6720 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
6721 			err = check_mem_access(env, insn_idx, regno,
6722 					       i, BPF_DW, BPF_WRITE, -1, false);
6723 			if (err)
6724 				return err;
6725 		}
6726 
6727 		err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx);
6728 	} else /* MEM_RDONLY and None case from above */ {
6729 		/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
6730 		if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
6731 			verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
6732 			return -EINVAL;
6733 		}
6734 
6735 		if (!is_dynptr_reg_valid_init(env, reg)) {
6736 			verbose(env,
6737 				"Expected an initialized dynptr as arg #%d\n",
6738 				regno);
6739 			return -EINVAL;
6740 		}
6741 
6742 		/* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
6743 		if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
6744 			verbose(env,
6745 				"Expected a dynptr of type %s as arg #%d\n",
6746 				dynptr_type_str(arg_to_dynptr_type(arg_type)), regno);
6747 			return -EINVAL;
6748 		}
6749 
6750 		err = mark_dynptr_read(env, reg);
6751 	}
6752 	return err;
6753 }
6754 
6755 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
6756 {
6757 	struct bpf_func_state *state = func(env, reg);
6758 
6759 	return state->stack[spi].spilled_ptr.ref_obj_id;
6760 }
6761 
6762 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6763 {
6764 	return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
6765 }
6766 
6767 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6768 {
6769 	return meta->kfunc_flags & KF_ITER_NEW;
6770 }
6771 
6772 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6773 {
6774 	return meta->kfunc_flags & KF_ITER_NEXT;
6775 }
6776 
6777 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6778 {
6779 	return meta->kfunc_flags & KF_ITER_DESTROY;
6780 }
6781 
6782 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
6783 {
6784 	/* btf_check_iter_kfuncs() guarantees that first argument of any iter
6785 	 * kfunc is iter state pointer
6786 	 */
6787 	return arg == 0 && is_iter_kfunc(meta);
6788 }
6789 
6790 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
6791 			    struct bpf_kfunc_call_arg_meta *meta)
6792 {
6793 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6794 	const struct btf_type *t;
6795 	const struct btf_param *arg;
6796 	int spi, err, i, nr_slots;
6797 	u32 btf_id;
6798 
6799 	/* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
6800 	arg = &btf_params(meta->func_proto)[0];
6801 	t = btf_type_skip_modifiers(meta->btf, arg->type, NULL);	/* PTR */
6802 	t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id);	/* STRUCT */
6803 	nr_slots = t->size / BPF_REG_SIZE;
6804 
6805 	if (is_iter_new_kfunc(meta)) {
6806 		/* bpf_iter_<type>_new() expects pointer to uninit iter state */
6807 		if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
6808 			verbose(env, "expected uninitialized iter_%s as arg #%d\n",
6809 				iter_type_str(meta->btf, btf_id), regno);
6810 			return -EINVAL;
6811 		}
6812 
6813 		for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
6814 			err = check_mem_access(env, insn_idx, regno,
6815 					       i, BPF_DW, BPF_WRITE, -1, false);
6816 			if (err)
6817 				return err;
6818 		}
6819 
6820 		err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots);
6821 		if (err)
6822 			return err;
6823 	} else {
6824 		/* iter_next() or iter_destroy() expect initialized iter state*/
6825 		if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) {
6826 			verbose(env, "expected an initialized iter_%s as arg #%d\n",
6827 				iter_type_str(meta->btf, btf_id), regno);
6828 			return -EINVAL;
6829 		}
6830 
6831 		spi = iter_get_spi(env, reg, nr_slots);
6832 		if (spi < 0)
6833 			return spi;
6834 
6835 		err = mark_iter_read(env, reg, spi, nr_slots);
6836 		if (err)
6837 			return err;
6838 
6839 		/* remember meta->iter info for process_iter_next_call() */
6840 		meta->iter.spi = spi;
6841 		meta->iter.frameno = reg->frameno;
6842 		meta->ref_obj_id = iter_ref_obj_id(env, reg, spi);
6843 
6844 		if (is_iter_destroy_kfunc(meta)) {
6845 			err = unmark_stack_slots_iter(env, reg, nr_slots);
6846 			if (err)
6847 				return err;
6848 		}
6849 	}
6850 
6851 	return 0;
6852 }
6853 
6854 /* process_iter_next_call() is called when verifier gets to iterator's next
6855  * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
6856  * to it as just "iter_next()" in comments below.
6857  *
6858  * BPF verifier relies on a crucial contract for any iter_next()
6859  * implementation: it should *eventually* return NULL, and once that happens
6860  * it should keep returning NULL. That is, once iterator exhausts elements to
6861  * iterate, it should never reset or spuriously return new elements.
6862  *
6863  * With the assumption of such contract, process_iter_next_call() simulates
6864  * a fork in the verifier state to validate loop logic correctness and safety
6865  * without having to simulate infinite amount of iterations.
6866  *
6867  * In current state, we first assume that iter_next() returned NULL and
6868  * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
6869  * conditions we should not form an infinite loop and should eventually reach
6870  * exit.
6871  *
6872  * Besides that, we also fork current state and enqueue it for later
6873  * verification. In a forked state we keep iterator state as ACTIVE
6874  * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
6875  * also bump iteration depth to prevent erroneous infinite loop detection
6876  * later on (see iter_active_depths_differ() comment for details). In this
6877  * state we assume that we'll eventually loop back to another iter_next()
6878  * calls (it could be in exactly same location or in some other instruction,
6879  * it doesn't matter, we don't make any unnecessary assumptions about this,
6880  * everything revolves around iterator state in a stack slot, not which
6881  * instruction is calling iter_next()). When that happens, we either will come
6882  * to iter_next() with equivalent state and can conclude that next iteration
6883  * will proceed in exactly the same way as we just verified, so it's safe to
6884  * assume that loop converges. If not, we'll go on another iteration
6885  * simulation with a different input state, until all possible starting states
6886  * are validated or we reach maximum number of instructions limit.
6887  *
6888  * This way, we will either exhaustively discover all possible input states
6889  * that iterator loop can start with and eventually will converge, or we'll
6890  * effectively regress into bounded loop simulation logic and either reach
6891  * maximum number of instructions if loop is not provably convergent, or there
6892  * is some statically known limit on number of iterations (e.g., if there is
6893  * an explicit `if n > 100 then break;` statement somewhere in the loop).
6894  *
6895  * One very subtle but very important aspect is that we *always* simulate NULL
6896  * condition first (as the current state) before we simulate non-NULL case.
6897  * This has to do with intricacies of scalar precision tracking. By simulating
6898  * "exit condition" of iter_next() returning NULL first, we make sure all the
6899  * relevant precision marks *that will be set **after** we exit iterator loop*
6900  * are propagated backwards to common parent state of NULL and non-NULL
6901  * branches. Thanks to that, state equivalence checks done later in forked
6902  * state, when reaching iter_next() for ACTIVE iterator, can assume that
6903  * precision marks are finalized and won't change. Because simulating another
6904  * ACTIVE iterator iteration won't change them (because given same input
6905  * states we'll end up with exactly same output states which we are currently
6906  * comparing; and verification after the loop already propagated back what
6907  * needs to be **additionally** tracked as precise). It's subtle, grok
6908  * precision tracking for more intuitive understanding.
6909  */
6910 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
6911 				  struct bpf_kfunc_call_arg_meta *meta)
6912 {
6913 	struct bpf_verifier_state *cur_st = env->cur_state, *queued_st;
6914 	struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
6915 	struct bpf_reg_state *cur_iter, *queued_iter;
6916 	int iter_frameno = meta->iter.frameno;
6917 	int iter_spi = meta->iter.spi;
6918 
6919 	BTF_TYPE_EMIT(struct bpf_iter);
6920 
6921 	cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
6922 
6923 	if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
6924 	    cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
6925 		verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n",
6926 			cur_iter->iter.state, iter_state_str(cur_iter->iter.state));
6927 		return -EFAULT;
6928 	}
6929 
6930 	if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
6931 		/* branch out active iter state */
6932 		queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
6933 		if (!queued_st)
6934 			return -ENOMEM;
6935 
6936 		queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
6937 		queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
6938 		queued_iter->iter.depth++;
6939 
6940 		queued_fr = queued_st->frame[queued_st->curframe];
6941 		mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
6942 	}
6943 
6944 	/* switch to DRAINED state, but keep the depth unchanged */
6945 	/* mark current iter state as drained and assume returned NULL */
6946 	cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
6947 	__mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]);
6948 
6949 	return 0;
6950 }
6951 
6952 static bool arg_type_is_mem_size(enum bpf_arg_type type)
6953 {
6954 	return type == ARG_CONST_SIZE ||
6955 	       type == ARG_CONST_SIZE_OR_ZERO;
6956 }
6957 
6958 static bool arg_type_is_release(enum bpf_arg_type type)
6959 {
6960 	return type & OBJ_RELEASE;
6961 }
6962 
6963 static bool arg_type_is_dynptr(enum bpf_arg_type type)
6964 {
6965 	return base_type(type) == ARG_PTR_TO_DYNPTR;
6966 }
6967 
6968 static int int_ptr_type_to_size(enum bpf_arg_type type)
6969 {
6970 	if (type == ARG_PTR_TO_INT)
6971 		return sizeof(u32);
6972 	else if (type == ARG_PTR_TO_LONG)
6973 		return sizeof(u64);
6974 
6975 	return -EINVAL;
6976 }
6977 
6978 static int resolve_map_arg_type(struct bpf_verifier_env *env,
6979 				 const struct bpf_call_arg_meta *meta,
6980 				 enum bpf_arg_type *arg_type)
6981 {
6982 	if (!meta->map_ptr) {
6983 		/* kernel subsystem misconfigured verifier */
6984 		verbose(env, "invalid map_ptr to access map->type\n");
6985 		return -EACCES;
6986 	}
6987 
6988 	switch (meta->map_ptr->map_type) {
6989 	case BPF_MAP_TYPE_SOCKMAP:
6990 	case BPF_MAP_TYPE_SOCKHASH:
6991 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6992 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
6993 		} else {
6994 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
6995 			return -EINVAL;
6996 		}
6997 		break;
6998 	case BPF_MAP_TYPE_BLOOM_FILTER:
6999 		if (meta->func_id == BPF_FUNC_map_peek_elem)
7000 			*arg_type = ARG_PTR_TO_MAP_VALUE;
7001 		break;
7002 	default:
7003 		break;
7004 	}
7005 	return 0;
7006 }
7007 
7008 struct bpf_reg_types {
7009 	const enum bpf_reg_type types[10];
7010 	u32 *btf_id;
7011 };
7012 
7013 static const struct bpf_reg_types sock_types = {
7014 	.types = {
7015 		PTR_TO_SOCK_COMMON,
7016 		PTR_TO_SOCKET,
7017 		PTR_TO_TCP_SOCK,
7018 		PTR_TO_XDP_SOCK,
7019 	},
7020 };
7021 
7022 #ifdef CONFIG_NET
7023 static const struct bpf_reg_types btf_id_sock_common_types = {
7024 	.types = {
7025 		PTR_TO_SOCK_COMMON,
7026 		PTR_TO_SOCKET,
7027 		PTR_TO_TCP_SOCK,
7028 		PTR_TO_XDP_SOCK,
7029 		PTR_TO_BTF_ID,
7030 		PTR_TO_BTF_ID | PTR_TRUSTED,
7031 	},
7032 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
7033 };
7034 #endif
7035 
7036 static const struct bpf_reg_types mem_types = {
7037 	.types = {
7038 		PTR_TO_STACK,
7039 		PTR_TO_PACKET,
7040 		PTR_TO_PACKET_META,
7041 		PTR_TO_MAP_KEY,
7042 		PTR_TO_MAP_VALUE,
7043 		PTR_TO_MEM,
7044 		PTR_TO_MEM | MEM_RINGBUF,
7045 		PTR_TO_BUF,
7046 		PTR_TO_BTF_ID | PTR_TRUSTED,
7047 	},
7048 };
7049 
7050 static const struct bpf_reg_types int_ptr_types = {
7051 	.types = {
7052 		PTR_TO_STACK,
7053 		PTR_TO_PACKET,
7054 		PTR_TO_PACKET_META,
7055 		PTR_TO_MAP_KEY,
7056 		PTR_TO_MAP_VALUE,
7057 	},
7058 };
7059 
7060 static const struct bpf_reg_types spin_lock_types = {
7061 	.types = {
7062 		PTR_TO_MAP_VALUE,
7063 		PTR_TO_BTF_ID | MEM_ALLOC,
7064 	}
7065 };
7066 
7067 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
7068 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
7069 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
7070 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
7071 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
7072 static const struct bpf_reg_types btf_ptr_types = {
7073 	.types = {
7074 		PTR_TO_BTF_ID,
7075 		PTR_TO_BTF_ID | PTR_TRUSTED,
7076 		PTR_TO_BTF_ID | MEM_RCU,
7077 	},
7078 };
7079 static const struct bpf_reg_types percpu_btf_ptr_types = {
7080 	.types = {
7081 		PTR_TO_BTF_ID | MEM_PERCPU,
7082 		PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
7083 	}
7084 };
7085 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
7086 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
7087 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
7088 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
7089 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
7090 static const struct bpf_reg_types dynptr_types = {
7091 	.types = {
7092 		PTR_TO_STACK,
7093 		CONST_PTR_TO_DYNPTR,
7094 	}
7095 };
7096 
7097 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
7098 	[ARG_PTR_TO_MAP_KEY]		= &mem_types,
7099 	[ARG_PTR_TO_MAP_VALUE]		= &mem_types,
7100 	[ARG_CONST_SIZE]		= &scalar_types,
7101 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
7102 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
7103 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
7104 	[ARG_PTR_TO_CTX]		= &context_types,
7105 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
7106 #ifdef CONFIG_NET
7107 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
7108 #endif
7109 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
7110 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
7111 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
7112 	[ARG_PTR_TO_MEM]		= &mem_types,
7113 	[ARG_PTR_TO_RINGBUF_MEM]	= &ringbuf_mem_types,
7114 	[ARG_PTR_TO_INT]		= &int_ptr_types,
7115 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
7116 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
7117 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
7118 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
7119 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
7120 	[ARG_PTR_TO_TIMER]		= &timer_types,
7121 	[ARG_PTR_TO_KPTR]		= &kptr_types,
7122 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
7123 };
7124 
7125 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
7126 			  enum bpf_arg_type arg_type,
7127 			  const u32 *arg_btf_id,
7128 			  struct bpf_call_arg_meta *meta)
7129 {
7130 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7131 	enum bpf_reg_type expected, type = reg->type;
7132 	const struct bpf_reg_types *compatible;
7133 	int i, j;
7134 
7135 	compatible = compatible_reg_types[base_type(arg_type)];
7136 	if (!compatible) {
7137 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
7138 		return -EFAULT;
7139 	}
7140 
7141 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
7142 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
7143 	 *
7144 	 * Same for MAYBE_NULL:
7145 	 *
7146 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
7147 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
7148 	 *
7149 	 * Therefore we fold these flags depending on the arg_type before comparison.
7150 	 */
7151 	if (arg_type & MEM_RDONLY)
7152 		type &= ~MEM_RDONLY;
7153 	if (arg_type & PTR_MAYBE_NULL)
7154 		type &= ~PTR_MAYBE_NULL;
7155 
7156 	if (meta->func_id == BPF_FUNC_kptr_xchg && type & MEM_ALLOC)
7157 		type &= ~MEM_ALLOC;
7158 
7159 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
7160 		expected = compatible->types[i];
7161 		if (expected == NOT_INIT)
7162 			break;
7163 
7164 		if (type == expected)
7165 			goto found;
7166 	}
7167 
7168 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
7169 	for (j = 0; j + 1 < i; j++)
7170 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
7171 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
7172 	return -EACCES;
7173 
7174 found:
7175 	if (base_type(reg->type) != PTR_TO_BTF_ID)
7176 		return 0;
7177 
7178 	if (compatible == &mem_types) {
7179 		if (!(arg_type & MEM_RDONLY)) {
7180 			verbose(env,
7181 				"%s() may write into memory pointed by R%d type=%s\n",
7182 				func_id_name(meta->func_id),
7183 				regno, reg_type_str(env, reg->type));
7184 			return -EACCES;
7185 		}
7186 		return 0;
7187 	}
7188 
7189 	switch ((int)reg->type) {
7190 	case PTR_TO_BTF_ID:
7191 	case PTR_TO_BTF_ID | PTR_TRUSTED:
7192 	case PTR_TO_BTF_ID | MEM_RCU:
7193 	case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
7194 	case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
7195 	{
7196 		/* For bpf_sk_release, it needs to match against first member
7197 		 * 'struct sock_common', hence make an exception for it. This
7198 		 * allows bpf_sk_release to work for multiple socket types.
7199 		 */
7200 		bool strict_type_match = arg_type_is_release(arg_type) &&
7201 					 meta->func_id != BPF_FUNC_sk_release;
7202 
7203 		if (type_may_be_null(reg->type) &&
7204 		    (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) {
7205 			verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
7206 			return -EACCES;
7207 		}
7208 
7209 		if (!arg_btf_id) {
7210 			if (!compatible->btf_id) {
7211 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
7212 				return -EFAULT;
7213 			}
7214 			arg_btf_id = compatible->btf_id;
7215 		}
7216 
7217 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
7218 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
7219 				return -EACCES;
7220 		} else {
7221 			if (arg_btf_id == BPF_PTR_POISON) {
7222 				verbose(env, "verifier internal error:");
7223 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
7224 					regno);
7225 				return -EACCES;
7226 			}
7227 
7228 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
7229 						  btf_vmlinux, *arg_btf_id,
7230 						  strict_type_match)) {
7231 				verbose(env, "R%d is of type %s but %s is expected\n",
7232 					regno, btf_type_name(reg->btf, reg->btf_id),
7233 					btf_type_name(btf_vmlinux, *arg_btf_id));
7234 				return -EACCES;
7235 			}
7236 		}
7237 		break;
7238 	}
7239 	case PTR_TO_BTF_ID | MEM_ALLOC:
7240 		if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
7241 		    meta->func_id != BPF_FUNC_kptr_xchg) {
7242 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
7243 			return -EFAULT;
7244 		}
7245 		/* Handled by helper specific checks */
7246 		break;
7247 	case PTR_TO_BTF_ID | MEM_PERCPU:
7248 	case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
7249 		/* Handled by helper specific checks */
7250 		break;
7251 	default:
7252 		verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n");
7253 		return -EFAULT;
7254 	}
7255 	return 0;
7256 }
7257 
7258 static struct btf_field *
7259 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
7260 {
7261 	struct btf_field *field;
7262 	struct btf_record *rec;
7263 
7264 	rec = reg_btf_record(reg);
7265 	if (!rec)
7266 		return NULL;
7267 
7268 	field = btf_record_find(rec, off, fields);
7269 	if (!field)
7270 		return NULL;
7271 
7272 	return field;
7273 }
7274 
7275 int check_func_arg_reg_off(struct bpf_verifier_env *env,
7276 			   const struct bpf_reg_state *reg, int regno,
7277 			   enum bpf_arg_type arg_type)
7278 {
7279 	u32 type = reg->type;
7280 
7281 	/* When referenced register is passed to release function, its fixed
7282 	 * offset must be 0.
7283 	 *
7284 	 * We will check arg_type_is_release reg has ref_obj_id when storing
7285 	 * meta->release_regno.
7286 	 */
7287 	if (arg_type_is_release(arg_type)) {
7288 		/* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
7289 		 * may not directly point to the object being released, but to
7290 		 * dynptr pointing to such object, which might be at some offset
7291 		 * on the stack. In that case, we simply to fallback to the
7292 		 * default handling.
7293 		 */
7294 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
7295 			return 0;
7296 
7297 		if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
7298 			if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
7299 				return __check_ptr_off_reg(env, reg, regno, true);
7300 
7301 			verbose(env, "R%d must have zero offset when passed to release func\n",
7302 				regno);
7303 			verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
7304 				btf_type_name(reg->btf, reg->btf_id), reg->off);
7305 			return -EINVAL;
7306 		}
7307 
7308 		/* Doing check_ptr_off_reg check for the offset will catch this
7309 		 * because fixed_off_ok is false, but checking here allows us
7310 		 * to give the user a better error message.
7311 		 */
7312 		if (reg->off) {
7313 			verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
7314 				regno);
7315 			return -EINVAL;
7316 		}
7317 		return __check_ptr_off_reg(env, reg, regno, false);
7318 	}
7319 
7320 	switch (type) {
7321 	/* Pointer types where both fixed and variable offset is explicitly allowed: */
7322 	case PTR_TO_STACK:
7323 	case PTR_TO_PACKET:
7324 	case PTR_TO_PACKET_META:
7325 	case PTR_TO_MAP_KEY:
7326 	case PTR_TO_MAP_VALUE:
7327 	case PTR_TO_MEM:
7328 	case PTR_TO_MEM | MEM_RDONLY:
7329 	case PTR_TO_MEM | MEM_RINGBUF:
7330 	case PTR_TO_BUF:
7331 	case PTR_TO_BUF | MEM_RDONLY:
7332 	case SCALAR_VALUE:
7333 		return 0;
7334 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
7335 	 * fixed offset.
7336 	 */
7337 	case PTR_TO_BTF_ID:
7338 	case PTR_TO_BTF_ID | MEM_ALLOC:
7339 	case PTR_TO_BTF_ID | PTR_TRUSTED:
7340 	case PTR_TO_BTF_ID | MEM_RCU:
7341 	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
7342 		/* When referenced PTR_TO_BTF_ID is passed to release function,
7343 		 * its fixed offset must be 0. In the other cases, fixed offset
7344 		 * can be non-zero. This was already checked above. So pass
7345 		 * fixed_off_ok as true to allow fixed offset for all other
7346 		 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
7347 		 * still need to do checks instead of returning.
7348 		 */
7349 		return __check_ptr_off_reg(env, reg, regno, true);
7350 	default:
7351 		return __check_ptr_off_reg(env, reg, regno, false);
7352 	}
7353 }
7354 
7355 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
7356 						const struct bpf_func_proto *fn,
7357 						struct bpf_reg_state *regs)
7358 {
7359 	struct bpf_reg_state *state = NULL;
7360 	int i;
7361 
7362 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
7363 		if (arg_type_is_dynptr(fn->arg_type[i])) {
7364 			if (state) {
7365 				verbose(env, "verifier internal error: multiple dynptr args\n");
7366 				return NULL;
7367 			}
7368 			state = &regs[BPF_REG_1 + i];
7369 		}
7370 
7371 	if (!state)
7372 		verbose(env, "verifier internal error: no dynptr arg found\n");
7373 
7374 	return state;
7375 }
7376 
7377 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
7378 {
7379 	struct bpf_func_state *state = func(env, reg);
7380 	int spi;
7381 
7382 	if (reg->type == CONST_PTR_TO_DYNPTR)
7383 		return reg->id;
7384 	spi = dynptr_get_spi(env, reg);
7385 	if (spi < 0)
7386 		return spi;
7387 	return state->stack[spi].spilled_ptr.id;
7388 }
7389 
7390 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
7391 {
7392 	struct bpf_func_state *state = func(env, reg);
7393 	int spi;
7394 
7395 	if (reg->type == CONST_PTR_TO_DYNPTR)
7396 		return reg->ref_obj_id;
7397 	spi = dynptr_get_spi(env, reg);
7398 	if (spi < 0)
7399 		return spi;
7400 	return state->stack[spi].spilled_ptr.ref_obj_id;
7401 }
7402 
7403 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
7404 					    struct bpf_reg_state *reg)
7405 {
7406 	struct bpf_func_state *state = func(env, reg);
7407 	int spi;
7408 
7409 	if (reg->type == CONST_PTR_TO_DYNPTR)
7410 		return reg->dynptr.type;
7411 
7412 	spi = __get_spi(reg->off);
7413 	if (spi < 0) {
7414 		verbose(env, "verifier internal error: invalid spi when querying dynptr type\n");
7415 		return BPF_DYNPTR_TYPE_INVALID;
7416 	}
7417 
7418 	return state->stack[spi].spilled_ptr.dynptr.type;
7419 }
7420 
7421 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
7422 			  struct bpf_call_arg_meta *meta,
7423 			  const struct bpf_func_proto *fn,
7424 			  int insn_idx)
7425 {
7426 	u32 regno = BPF_REG_1 + arg;
7427 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7428 	enum bpf_arg_type arg_type = fn->arg_type[arg];
7429 	enum bpf_reg_type type = reg->type;
7430 	u32 *arg_btf_id = NULL;
7431 	int err = 0;
7432 
7433 	if (arg_type == ARG_DONTCARE)
7434 		return 0;
7435 
7436 	err = check_reg_arg(env, regno, SRC_OP);
7437 	if (err)
7438 		return err;
7439 
7440 	if (arg_type == ARG_ANYTHING) {
7441 		if (is_pointer_value(env, regno)) {
7442 			verbose(env, "R%d leaks addr into helper function\n",
7443 				regno);
7444 			return -EACCES;
7445 		}
7446 		return 0;
7447 	}
7448 
7449 	if (type_is_pkt_pointer(type) &&
7450 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
7451 		verbose(env, "helper access to the packet is not allowed\n");
7452 		return -EACCES;
7453 	}
7454 
7455 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
7456 		err = resolve_map_arg_type(env, meta, &arg_type);
7457 		if (err)
7458 			return err;
7459 	}
7460 
7461 	if (register_is_null(reg) && type_may_be_null(arg_type))
7462 		/* A NULL register has a SCALAR_VALUE type, so skip
7463 		 * type checking.
7464 		 */
7465 		goto skip_type_check;
7466 
7467 	/* arg_btf_id and arg_size are in a union. */
7468 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
7469 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
7470 		arg_btf_id = fn->arg_btf_id[arg];
7471 
7472 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
7473 	if (err)
7474 		return err;
7475 
7476 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
7477 	if (err)
7478 		return err;
7479 
7480 skip_type_check:
7481 	if (arg_type_is_release(arg_type)) {
7482 		if (arg_type_is_dynptr(arg_type)) {
7483 			struct bpf_func_state *state = func(env, reg);
7484 			int spi;
7485 
7486 			/* Only dynptr created on stack can be released, thus
7487 			 * the get_spi and stack state checks for spilled_ptr
7488 			 * should only be done before process_dynptr_func for
7489 			 * PTR_TO_STACK.
7490 			 */
7491 			if (reg->type == PTR_TO_STACK) {
7492 				spi = dynptr_get_spi(env, reg);
7493 				if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
7494 					verbose(env, "arg %d is an unacquired reference\n", regno);
7495 					return -EINVAL;
7496 				}
7497 			} else {
7498 				verbose(env, "cannot release unowned const bpf_dynptr\n");
7499 				return -EINVAL;
7500 			}
7501 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
7502 			verbose(env, "R%d must be referenced when passed to release function\n",
7503 				regno);
7504 			return -EINVAL;
7505 		}
7506 		if (meta->release_regno) {
7507 			verbose(env, "verifier internal error: more than one release argument\n");
7508 			return -EFAULT;
7509 		}
7510 		meta->release_regno = regno;
7511 	}
7512 
7513 	if (reg->ref_obj_id) {
7514 		if (meta->ref_obj_id) {
7515 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
7516 				regno, reg->ref_obj_id,
7517 				meta->ref_obj_id);
7518 			return -EFAULT;
7519 		}
7520 		meta->ref_obj_id = reg->ref_obj_id;
7521 	}
7522 
7523 	switch (base_type(arg_type)) {
7524 	case ARG_CONST_MAP_PTR:
7525 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
7526 		if (meta->map_ptr) {
7527 			/* Use map_uid (which is unique id of inner map) to reject:
7528 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
7529 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
7530 			 * if (inner_map1 && inner_map2) {
7531 			 *     timer = bpf_map_lookup_elem(inner_map1);
7532 			 *     if (timer)
7533 			 *         // mismatch would have been allowed
7534 			 *         bpf_timer_init(timer, inner_map2);
7535 			 * }
7536 			 *
7537 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
7538 			 */
7539 			if (meta->map_ptr != reg->map_ptr ||
7540 			    meta->map_uid != reg->map_uid) {
7541 				verbose(env,
7542 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
7543 					meta->map_uid, reg->map_uid);
7544 				return -EINVAL;
7545 			}
7546 		}
7547 		meta->map_ptr = reg->map_ptr;
7548 		meta->map_uid = reg->map_uid;
7549 		break;
7550 	case ARG_PTR_TO_MAP_KEY:
7551 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
7552 		 * check that [key, key + map->key_size) are within
7553 		 * stack limits and initialized
7554 		 */
7555 		if (!meta->map_ptr) {
7556 			/* in function declaration map_ptr must come before
7557 			 * map_key, so that it's verified and known before
7558 			 * we have to check map_key here. Otherwise it means
7559 			 * that kernel subsystem misconfigured verifier
7560 			 */
7561 			verbose(env, "invalid map_ptr to access map->key\n");
7562 			return -EACCES;
7563 		}
7564 		err = check_helper_mem_access(env, regno,
7565 					      meta->map_ptr->key_size, false,
7566 					      NULL);
7567 		break;
7568 	case ARG_PTR_TO_MAP_VALUE:
7569 		if (type_may_be_null(arg_type) && register_is_null(reg))
7570 			return 0;
7571 
7572 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
7573 		 * check [value, value + map->value_size) validity
7574 		 */
7575 		if (!meta->map_ptr) {
7576 			/* kernel subsystem misconfigured verifier */
7577 			verbose(env, "invalid map_ptr to access map->value\n");
7578 			return -EACCES;
7579 		}
7580 		meta->raw_mode = arg_type & MEM_UNINIT;
7581 		err = check_helper_mem_access(env, regno,
7582 					      meta->map_ptr->value_size, false,
7583 					      meta);
7584 		break;
7585 	case ARG_PTR_TO_PERCPU_BTF_ID:
7586 		if (!reg->btf_id) {
7587 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
7588 			return -EACCES;
7589 		}
7590 		meta->ret_btf = reg->btf;
7591 		meta->ret_btf_id = reg->btf_id;
7592 		break;
7593 	case ARG_PTR_TO_SPIN_LOCK:
7594 		if (in_rbtree_lock_required_cb(env)) {
7595 			verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
7596 			return -EACCES;
7597 		}
7598 		if (meta->func_id == BPF_FUNC_spin_lock) {
7599 			err = process_spin_lock(env, regno, true);
7600 			if (err)
7601 				return err;
7602 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
7603 			err = process_spin_lock(env, regno, false);
7604 			if (err)
7605 				return err;
7606 		} else {
7607 			verbose(env, "verifier internal error\n");
7608 			return -EFAULT;
7609 		}
7610 		break;
7611 	case ARG_PTR_TO_TIMER:
7612 		err = process_timer_func(env, regno, meta);
7613 		if (err)
7614 			return err;
7615 		break;
7616 	case ARG_PTR_TO_FUNC:
7617 		meta->subprogno = reg->subprogno;
7618 		break;
7619 	case ARG_PTR_TO_MEM:
7620 		/* The access to this pointer is only checked when we hit the
7621 		 * next is_mem_size argument below.
7622 		 */
7623 		meta->raw_mode = arg_type & MEM_UNINIT;
7624 		if (arg_type & MEM_FIXED_SIZE) {
7625 			err = check_helper_mem_access(env, regno,
7626 						      fn->arg_size[arg], false,
7627 						      meta);
7628 		}
7629 		break;
7630 	case ARG_CONST_SIZE:
7631 		err = check_mem_size_reg(env, reg, regno, false, meta);
7632 		break;
7633 	case ARG_CONST_SIZE_OR_ZERO:
7634 		err = check_mem_size_reg(env, reg, regno, true, meta);
7635 		break;
7636 	case ARG_PTR_TO_DYNPTR:
7637 		err = process_dynptr_func(env, regno, insn_idx, arg_type);
7638 		if (err)
7639 			return err;
7640 		break;
7641 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
7642 		if (!tnum_is_const(reg->var_off)) {
7643 			verbose(env, "R%d is not a known constant'\n",
7644 				regno);
7645 			return -EACCES;
7646 		}
7647 		meta->mem_size = reg->var_off.value;
7648 		err = mark_chain_precision(env, regno);
7649 		if (err)
7650 			return err;
7651 		break;
7652 	case ARG_PTR_TO_INT:
7653 	case ARG_PTR_TO_LONG:
7654 	{
7655 		int size = int_ptr_type_to_size(arg_type);
7656 
7657 		err = check_helper_mem_access(env, regno, size, false, meta);
7658 		if (err)
7659 			return err;
7660 		err = check_ptr_alignment(env, reg, 0, size, true);
7661 		break;
7662 	}
7663 	case ARG_PTR_TO_CONST_STR:
7664 	{
7665 		struct bpf_map *map = reg->map_ptr;
7666 		int map_off;
7667 		u64 map_addr;
7668 		char *str_ptr;
7669 
7670 		if (!bpf_map_is_rdonly(map)) {
7671 			verbose(env, "R%d does not point to a readonly map'\n", regno);
7672 			return -EACCES;
7673 		}
7674 
7675 		if (!tnum_is_const(reg->var_off)) {
7676 			verbose(env, "R%d is not a constant address'\n", regno);
7677 			return -EACCES;
7678 		}
7679 
7680 		if (!map->ops->map_direct_value_addr) {
7681 			verbose(env, "no direct value access support for this map type\n");
7682 			return -EACCES;
7683 		}
7684 
7685 		err = check_map_access(env, regno, reg->off,
7686 				       map->value_size - reg->off, false,
7687 				       ACCESS_HELPER);
7688 		if (err)
7689 			return err;
7690 
7691 		map_off = reg->off + reg->var_off.value;
7692 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
7693 		if (err) {
7694 			verbose(env, "direct value access on string failed\n");
7695 			return err;
7696 		}
7697 
7698 		str_ptr = (char *)(long)(map_addr);
7699 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
7700 			verbose(env, "string is not zero-terminated\n");
7701 			return -EINVAL;
7702 		}
7703 		break;
7704 	}
7705 	case ARG_PTR_TO_KPTR:
7706 		err = process_kptr_func(env, regno, meta);
7707 		if (err)
7708 			return err;
7709 		break;
7710 	}
7711 
7712 	return err;
7713 }
7714 
7715 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
7716 {
7717 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
7718 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7719 
7720 	if (func_id != BPF_FUNC_map_update_elem)
7721 		return false;
7722 
7723 	/* It's not possible to get access to a locked struct sock in these
7724 	 * contexts, so updating is safe.
7725 	 */
7726 	switch (type) {
7727 	case BPF_PROG_TYPE_TRACING:
7728 		if (eatype == BPF_TRACE_ITER)
7729 			return true;
7730 		break;
7731 	case BPF_PROG_TYPE_SOCKET_FILTER:
7732 	case BPF_PROG_TYPE_SCHED_CLS:
7733 	case BPF_PROG_TYPE_SCHED_ACT:
7734 	case BPF_PROG_TYPE_XDP:
7735 	case BPF_PROG_TYPE_SK_REUSEPORT:
7736 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
7737 	case BPF_PROG_TYPE_SK_LOOKUP:
7738 		return true;
7739 	default:
7740 		break;
7741 	}
7742 
7743 	verbose(env, "cannot update sockmap in this context\n");
7744 	return false;
7745 }
7746 
7747 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
7748 {
7749 	return env->prog->jit_requested &&
7750 	       bpf_jit_supports_subprog_tailcalls();
7751 }
7752 
7753 static int check_map_func_compatibility(struct bpf_verifier_env *env,
7754 					struct bpf_map *map, int func_id)
7755 {
7756 	if (!map)
7757 		return 0;
7758 
7759 	/* We need a two way check, first is from map perspective ... */
7760 	switch (map->map_type) {
7761 	case BPF_MAP_TYPE_PROG_ARRAY:
7762 		if (func_id != BPF_FUNC_tail_call)
7763 			goto error;
7764 		break;
7765 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
7766 		if (func_id != BPF_FUNC_perf_event_read &&
7767 		    func_id != BPF_FUNC_perf_event_output &&
7768 		    func_id != BPF_FUNC_skb_output &&
7769 		    func_id != BPF_FUNC_perf_event_read_value &&
7770 		    func_id != BPF_FUNC_xdp_output)
7771 			goto error;
7772 		break;
7773 	case BPF_MAP_TYPE_RINGBUF:
7774 		if (func_id != BPF_FUNC_ringbuf_output &&
7775 		    func_id != BPF_FUNC_ringbuf_reserve &&
7776 		    func_id != BPF_FUNC_ringbuf_query &&
7777 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
7778 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
7779 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
7780 			goto error;
7781 		break;
7782 	case BPF_MAP_TYPE_USER_RINGBUF:
7783 		if (func_id != BPF_FUNC_user_ringbuf_drain)
7784 			goto error;
7785 		break;
7786 	case BPF_MAP_TYPE_STACK_TRACE:
7787 		if (func_id != BPF_FUNC_get_stackid)
7788 			goto error;
7789 		break;
7790 	case BPF_MAP_TYPE_CGROUP_ARRAY:
7791 		if (func_id != BPF_FUNC_skb_under_cgroup &&
7792 		    func_id != BPF_FUNC_current_task_under_cgroup)
7793 			goto error;
7794 		break;
7795 	case BPF_MAP_TYPE_CGROUP_STORAGE:
7796 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
7797 		if (func_id != BPF_FUNC_get_local_storage)
7798 			goto error;
7799 		break;
7800 	case BPF_MAP_TYPE_DEVMAP:
7801 	case BPF_MAP_TYPE_DEVMAP_HASH:
7802 		if (func_id != BPF_FUNC_redirect_map &&
7803 		    func_id != BPF_FUNC_map_lookup_elem)
7804 			goto error;
7805 		break;
7806 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
7807 	 * appear.
7808 	 */
7809 	case BPF_MAP_TYPE_CPUMAP:
7810 		if (func_id != BPF_FUNC_redirect_map)
7811 			goto error;
7812 		break;
7813 	case BPF_MAP_TYPE_XSKMAP:
7814 		if (func_id != BPF_FUNC_redirect_map &&
7815 		    func_id != BPF_FUNC_map_lookup_elem)
7816 			goto error;
7817 		break;
7818 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
7819 	case BPF_MAP_TYPE_HASH_OF_MAPS:
7820 		if (func_id != BPF_FUNC_map_lookup_elem)
7821 			goto error;
7822 		break;
7823 	case BPF_MAP_TYPE_SOCKMAP:
7824 		if (func_id != BPF_FUNC_sk_redirect_map &&
7825 		    func_id != BPF_FUNC_sock_map_update &&
7826 		    func_id != BPF_FUNC_map_delete_elem &&
7827 		    func_id != BPF_FUNC_msg_redirect_map &&
7828 		    func_id != BPF_FUNC_sk_select_reuseport &&
7829 		    func_id != BPF_FUNC_map_lookup_elem &&
7830 		    !may_update_sockmap(env, func_id))
7831 			goto error;
7832 		break;
7833 	case BPF_MAP_TYPE_SOCKHASH:
7834 		if (func_id != BPF_FUNC_sk_redirect_hash &&
7835 		    func_id != BPF_FUNC_sock_hash_update &&
7836 		    func_id != BPF_FUNC_map_delete_elem &&
7837 		    func_id != BPF_FUNC_msg_redirect_hash &&
7838 		    func_id != BPF_FUNC_sk_select_reuseport &&
7839 		    func_id != BPF_FUNC_map_lookup_elem &&
7840 		    !may_update_sockmap(env, func_id))
7841 			goto error;
7842 		break;
7843 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
7844 		if (func_id != BPF_FUNC_sk_select_reuseport)
7845 			goto error;
7846 		break;
7847 	case BPF_MAP_TYPE_QUEUE:
7848 	case BPF_MAP_TYPE_STACK:
7849 		if (func_id != BPF_FUNC_map_peek_elem &&
7850 		    func_id != BPF_FUNC_map_pop_elem &&
7851 		    func_id != BPF_FUNC_map_push_elem)
7852 			goto error;
7853 		break;
7854 	case BPF_MAP_TYPE_SK_STORAGE:
7855 		if (func_id != BPF_FUNC_sk_storage_get &&
7856 		    func_id != BPF_FUNC_sk_storage_delete &&
7857 		    func_id != BPF_FUNC_kptr_xchg)
7858 			goto error;
7859 		break;
7860 	case BPF_MAP_TYPE_INODE_STORAGE:
7861 		if (func_id != BPF_FUNC_inode_storage_get &&
7862 		    func_id != BPF_FUNC_inode_storage_delete &&
7863 		    func_id != BPF_FUNC_kptr_xchg)
7864 			goto error;
7865 		break;
7866 	case BPF_MAP_TYPE_TASK_STORAGE:
7867 		if (func_id != BPF_FUNC_task_storage_get &&
7868 		    func_id != BPF_FUNC_task_storage_delete &&
7869 		    func_id != BPF_FUNC_kptr_xchg)
7870 			goto error;
7871 		break;
7872 	case BPF_MAP_TYPE_CGRP_STORAGE:
7873 		if (func_id != BPF_FUNC_cgrp_storage_get &&
7874 		    func_id != BPF_FUNC_cgrp_storage_delete &&
7875 		    func_id != BPF_FUNC_kptr_xchg)
7876 			goto error;
7877 		break;
7878 	case BPF_MAP_TYPE_BLOOM_FILTER:
7879 		if (func_id != BPF_FUNC_map_peek_elem &&
7880 		    func_id != BPF_FUNC_map_push_elem)
7881 			goto error;
7882 		break;
7883 	default:
7884 		break;
7885 	}
7886 
7887 	/* ... and second from the function itself. */
7888 	switch (func_id) {
7889 	case BPF_FUNC_tail_call:
7890 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
7891 			goto error;
7892 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
7893 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
7894 			return -EINVAL;
7895 		}
7896 		break;
7897 	case BPF_FUNC_perf_event_read:
7898 	case BPF_FUNC_perf_event_output:
7899 	case BPF_FUNC_perf_event_read_value:
7900 	case BPF_FUNC_skb_output:
7901 	case BPF_FUNC_xdp_output:
7902 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
7903 			goto error;
7904 		break;
7905 	case BPF_FUNC_ringbuf_output:
7906 	case BPF_FUNC_ringbuf_reserve:
7907 	case BPF_FUNC_ringbuf_query:
7908 	case BPF_FUNC_ringbuf_reserve_dynptr:
7909 	case BPF_FUNC_ringbuf_submit_dynptr:
7910 	case BPF_FUNC_ringbuf_discard_dynptr:
7911 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
7912 			goto error;
7913 		break;
7914 	case BPF_FUNC_user_ringbuf_drain:
7915 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
7916 			goto error;
7917 		break;
7918 	case BPF_FUNC_get_stackid:
7919 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
7920 			goto error;
7921 		break;
7922 	case BPF_FUNC_current_task_under_cgroup:
7923 	case BPF_FUNC_skb_under_cgroup:
7924 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
7925 			goto error;
7926 		break;
7927 	case BPF_FUNC_redirect_map:
7928 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
7929 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
7930 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
7931 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
7932 			goto error;
7933 		break;
7934 	case BPF_FUNC_sk_redirect_map:
7935 	case BPF_FUNC_msg_redirect_map:
7936 	case BPF_FUNC_sock_map_update:
7937 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
7938 			goto error;
7939 		break;
7940 	case BPF_FUNC_sk_redirect_hash:
7941 	case BPF_FUNC_msg_redirect_hash:
7942 	case BPF_FUNC_sock_hash_update:
7943 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
7944 			goto error;
7945 		break;
7946 	case BPF_FUNC_get_local_storage:
7947 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
7948 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
7949 			goto error;
7950 		break;
7951 	case BPF_FUNC_sk_select_reuseport:
7952 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
7953 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
7954 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
7955 			goto error;
7956 		break;
7957 	case BPF_FUNC_map_pop_elem:
7958 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7959 		    map->map_type != BPF_MAP_TYPE_STACK)
7960 			goto error;
7961 		break;
7962 	case BPF_FUNC_map_peek_elem:
7963 	case BPF_FUNC_map_push_elem:
7964 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7965 		    map->map_type != BPF_MAP_TYPE_STACK &&
7966 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
7967 			goto error;
7968 		break;
7969 	case BPF_FUNC_map_lookup_percpu_elem:
7970 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
7971 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
7972 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
7973 			goto error;
7974 		break;
7975 	case BPF_FUNC_sk_storage_get:
7976 	case BPF_FUNC_sk_storage_delete:
7977 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
7978 			goto error;
7979 		break;
7980 	case BPF_FUNC_inode_storage_get:
7981 	case BPF_FUNC_inode_storage_delete:
7982 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
7983 			goto error;
7984 		break;
7985 	case BPF_FUNC_task_storage_get:
7986 	case BPF_FUNC_task_storage_delete:
7987 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
7988 			goto error;
7989 		break;
7990 	case BPF_FUNC_cgrp_storage_get:
7991 	case BPF_FUNC_cgrp_storage_delete:
7992 		if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
7993 			goto error;
7994 		break;
7995 	default:
7996 		break;
7997 	}
7998 
7999 	return 0;
8000 error:
8001 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
8002 		map->map_type, func_id_name(func_id), func_id);
8003 	return -EINVAL;
8004 }
8005 
8006 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
8007 {
8008 	int count = 0;
8009 
8010 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
8011 		count++;
8012 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
8013 		count++;
8014 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
8015 		count++;
8016 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
8017 		count++;
8018 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
8019 		count++;
8020 
8021 	/* We only support one arg being in raw mode at the moment,
8022 	 * which is sufficient for the helper functions we have
8023 	 * right now.
8024 	 */
8025 	return count <= 1;
8026 }
8027 
8028 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
8029 {
8030 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
8031 	bool has_size = fn->arg_size[arg] != 0;
8032 	bool is_next_size = false;
8033 
8034 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
8035 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
8036 
8037 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
8038 		return is_next_size;
8039 
8040 	return has_size == is_next_size || is_next_size == is_fixed;
8041 }
8042 
8043 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
8044 {
8045 	/* bpf_xxx(..., buf, len) call will access 'len'
8046 	 * bytes from memory 'buf'. Both arg types need
8047 	 * to be paired, so make sure there's no buggy
8048 	 * helper function specification.
8049 	 */
8050 	if (arg_type_is_mem_size(fn->arg1_type) ||
8051 	    check_args_pair_invalid(fn, 0) ||
8052 	    check_args_pair_invalid(fn, 1) ||
8053 	    check_args_pair_invalid(fn, 2) ||
8054 	    check_args_pair_invalid(fn, 3) ||
8055 	    check_args_pair_invalid(fn, 4))
8056 		return false;
8057 
8058 	return true;
8059 }
8060 
8061 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
8062 {
8063 	int i;
8064 
8065 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
8066 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
8067 			return !!fn->arg_btf_id[i];
8068 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
8069 			return fn->arg_btf_id[i] == BPF_PTR_POISON;
8070 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
8071 		    /* arg_btf_id and arg_size are in a union. */
8072 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
8073 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
8074 			return false;
8075 	}
8076 
8077 	return true;
8078 }
8079 
8080 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
8081 {
8082 	return check_raw_mode_ok(fn) &&
8083 	       check_arg_pair_ok(fn) &&
8084 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
8085 }
8086 
8087 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
8088  * are now invalid, so turn them into unknown SCALAR_VALUE.
8089  *
8090  * This also applies to dynptr slices belonging to skb and xdp dynptrs,
8091  * since these slices point to packet data.
8092  */
8093 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
8094 {
8095 	struct bpf_func_state *state;
8096 	struct bpf_reg_state *reg;
8097 
8098 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
8099 		if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg))
8100 			mark_reg_invalid(env, reg);
8101 	}));
8102 }
8103 
8104 enum {
8105 	AT_PKT_END = -1,
8106 	BEYOND_PKT_END = -2,
8107 };
8108 
8109 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
8110 {
8111 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8112 	struct bpf_reg_state *reg = &state->regs[regn];
8113 
8114 	if (reg->type != PTR_TO_PACKET)
8115 		/* PTR_TO_PACKET_META is not supported yet */
8116 		return;
8117 
8118 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
8119 	 * How far beyond pkt_end it goes is unknown.
8120 	 * if (!range_open) it's the case of pkt >= pkt_end
8121 	 * if (range_open) it's the case of pkt > pkt_end
8122 	 * hence this pointer is at least 1 byte bigger than pkt_end
8123 	 */
8124 	if (range_open)
8125 		reg->range = BEYOND_PKT_END;
8126 	else
8127 		reg->range = AT_PKT_END;
8128 }
8129 
8130 /* The pointer with the specified id has released its reference to kernel
8131  * resources. Identify all copies of the same pointer and clear the reference.
8132  */
8133 static int release_reference(struct bpf_verifier_env *env,
8134 			     int ref_obj_id)
8135 {
8136 	struct bpf_func_state *state;
8137 	struct bpf_reg_state *reg;
8138 	int err;
8139 
8140 	err = release_reference_state(cur_func(env), ref_obj_id);
8141 	if (err)
8142 		return err;
8143 
8144 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
8145 		if (reg->ref_obj_id == ref_obj_id)
8146 			mark_reg_invalid(env, reg);
8147 	}));
8148 
8149 	return 0;
8150 }
8151 
8152 static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
8153 {
8154 	struct bpf_func_state *unused;
8155 	struct bpf_reg_state *reg;
8156 
8157 	bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
8158 		if (type_is_non_owning_ref(reg->type))
8159 			mark_reg_invalid(env, reg);
8160 	}));
8161 }
8162 
8163 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
8164 				    struct bpf_reg_state *regs)
8165 {
8166 	int i;
8167 
8168 	/* after the call registers r0 - r5 were scratched */
8169 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
8170 		mark_reg_not_init(env, regs, caller_saved[i]);
8171 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
8172 	}
8173 }
8174 
8175 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
8176 				   struct bpf_func_state *caller,
8177 				   struct bpf_func_state *callee,
8178 				   int insn_idx);
8179 
8180 static int set_callee_state(struct bpf_verifier_env *env,
8181 			    struct bpf_func_state *caller,
8182 			    struct bpf_func_state *callee, int insn_idx);
8183 
8184 static bool is_callback_calling_kfunc(u32 btf_id);
8185 
8186 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8187 			     int *insn_idx, int subprog,
8188 			     set_callee_state_fn set_callee_state_cb)
8189 {
8190 	struct bpf_verifier_state *state = env->cur_state;
8191 	struct bpf_func_info_aux *func_info_aux;
8192 	struct bpf_func_state *caller, *callee;
8193 	int err;
8194 	bool is_global = false;
8195 
8196 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
8197 		verbose(env, "the call stack of %d frames is too deep\n",
8198 			state->curframe + 2);
8199 		return -E2BIG;
8200 	}
8201 
8202 	caller = state->frame[state->curframe];
8203 	if (state->frame[state->curframe + 1]) {
8204 		verbose(env, "verifier bug. Frame %d already allocated\n",
8205 			state->curframe + 1);
8206 		return -EFAULT;
8207 	}
8208 
8209 	func_info_aux = env->prog->aux->func_info_aux;
8210 	if (func_info_aux)
8211 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
8212 	err = btf_check_subprog_call(env, subprog, caller->regs);
8213 	if (err == -EFAULT)
8214 		return err;
8215 	if (is_global) {
8216 		if (err) {
8217 			verbose(env, "Caller passes invalid args into func#%d\n",
8218 				subprog);
8219 			return err;
8220 		} else {
8221 			if (env->log.level & BPF_LOG_LEVEL)
8222 				verbose(env,
8223 					"Func#%d is global and valid. Skipping.\n",
8224 					subprog);
8225 			clear_caller_saved_regs(env, caller->regs);
8226 
8227 			/* All global functions return a 64-bit SCALAR_VALUE */
8228 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
8229 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8230 
8231 			/* continue with next insn after call */
8232 			return 0;
8233 		}
8234 	}
8235 
8236 	/* set_callee_state is used for direct subprog calls, but we are
8237 	 * interested in validating only BPF helpers that can call subprogs as
8238 	 * callbacks
8239 	 */
8240 	if (set_callee_state_cb != set_callee_state) {
8241 		if (bpf_pseudo_kfunc_call(insn) &&
8242 		    !is_callback_calling_kfunc(insn->imm)) {
8243 			verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
8244 				func_id_name(insn->imm), insn->imm);
8245 			return -EFAULT;
8246 		} else if (!bpf_pseudo_kfunc_call(insn) &&
8247 			   !is_callback_calling_function(insn->imm)) { /* helper */
8248 			verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
8249 				func_id_name(insn->imm), insn->imm);
8250 			return -EFAULT;
8251 		}
8252 	}
8253 
8254 	if (insn->code == (BPF_JMP | BPF_CALL) &&
8255 	    insn->src_reg == 0 &&
8256 	    insn->imm == BPF_FUNC_timer_set_callback) {
8257 		struct bpf_verifier_state *async_cb;
8258 
8259 		/* there is no real recursion here. timer callbacks are async */
8260 		env->subprog_info[subprog].is_async_cb = true;
8261 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
8262 					 *insn_idx, subprog);
8263 		if (!async_cb)
8264 			return -EFAULT;
8265 		callee = async_cb->frame[0];
8266 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
8267 
8268 		/* Convert bpf_timer_set_callback() args into timer callback args */
8269 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
8270 		if (err)
8271 			return err;
8272 
8273 		clear_caller_saved_regs(env, caller->regs);
8274 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
8275 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8276 		/* continue with next insn after call */
8277 		return 0;
8278 	}
8279 
8280 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
8281 	if (!callee)
8282 		return -ENOMEM;
8283 	state->frame[state->curframe + 1] = callee;
8284 
8285 	/* callee cannot access r0, r6 - r9 for reading and has to write
8286 	 * into its own stack before reading from it.
8287 	 * callee can read/write into caller's stack
8288 	 */
8289 	init_func_state(env, callee,
8290 			/* remember the callsite, it will be used by bpf_exit */
8291 			*insn_idx /* callsite */,
8292 			state->curframe + 1 /* frameno within this callchain */,
8293 			subprog /* subprog number within this prog */);
8294 
8295 	/* Transfer references to the callee */
8296 	err = copy_reference_state(callee, caller);
8297 	if (err)
8298 		goto err_out;
8299 
8300 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
8301 	if (err)
8302 		goto err_out;
8303 
8304 	clear_caller_saved_regs(env, caller->regs);
8305 
8306 	/* only increment it after check_reg_arg() finished */
8307 	state->curframe++;
8308 
8309 	/* and go analyze first insn of the callee */
8310 	*insn_idx = env->subprog_info[subprog].start - 1;
8311 
8312 	if (env->log.level & BPF_LOG_LEVEL) {
8313 		verbose(env, "caller:\n");
8314 		print_verifier_state(env, caller, true);
8315 		verbose(env, "callee:\n");
8316 		print_verifier_state(env, callee, true);
8317 	}
8318 	return 0;
8319 
8320 err_out:
8321 	free_func_state(callee);
8322 	state->frame[state->curframe + 1] = NULL;
8323 	return err;
8324 }
8325 
8326 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
8327 				   struct bpf_func_state *caller,
8328 				   struct bpf_func_state *callee)
8329 {
8330 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
8331 	 *      void *callback_ctx, u64 flags);
8332 	 * callback_fn(struct bpf_map *map, void *key, void *value,
8333 	 *      void *callback_ctx);
8334 	 */
8335 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
8336 
8337 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
8338 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8339 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
8340 
8341 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
8342 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
8343 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
8344 
8345 	/* pointer to stack or null */
8346 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
8347 
8348 	/* unused */
8349 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8350 	return 0;
8351 }
8352 
8353 static int set_callee_state(struct bpf_verifier_env *env,
8354 			    struct bpf_func_state *caller,
8355 			    struct bpf_func_state *callee, int insn_idx)
8356 {
8357 	int i;
8358 
8359 	/* copy r1 - r5 args that callee can access.  The copy includes parent
8360 	 * pointers, which connects us up to the liveness chain
8361 	 */
8362 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
8363 		callee->regs[i] = caller->regs[i];
8364 	return 0;
8365 }
8366 
8367 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8368 			   int *insn_idx)
8369 {
8370 	int subprog, target_insn;
8371 
8372 	target_insn = *insn_idx + insn->imm + 1;
8373 	subprog = find_subprog(env, target_insn);
8374 	if (subprog < 0) {
8375 		verbose(env, "verifier bug. No program starts at insn %d\n",
8376 			target_insn);
8377 		return -EFAULT;
8378 	}
8379 
8380 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
8381 }
8382 
8383 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
8384 				       struct bpf_func_state *caller,
8385 				       struct bpf_func_state *callee,
8386 				       int insn_idx)
8387 {
8388 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
8389 	struct bpf_map *map;
8390 	int err;
8391 
8392 	if (bpf_map_ptr_poisoned(insn_aux)) {
8393 		verbose(env, "tail_call abusing map_ptr\n");
8394 		return -EINVAL;
8395 	}
8396 
8397 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
8398 	if (!map->ops->map_set_for_each_callback_args ||
8399 	    !map->ops->map_for_each_callback) {
8400 		verbose(env, "callback function not allowed for map\n");
8401 		return -ENOTSUPP;
8402 	}
8403 
8404 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
8405 	if (err)
8406 		return err;
8407 
8408 	callee->in_callback_fn = true;
8409 	callee->callback_ret_range = tnum_range(0, 1);
8410 	return 0;
8411 }
8412 
8413 static int set_loop_callback_state(struct bpf_verifier_env *env,
8414 				   struct bpf_func_state *caller,
8415 				   struct bpf_func_state *callee,
8416 				   int insn_idx)
8417 {
8418 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
8419 	 *	    u64 flags);
8420 	 * callback_fn(u32 index, void *callback_ctx);
8421 	 */
8422 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
8423 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
8424 
8425 	/* unused */
8426 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8427 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8428 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8429 
8430 	callee->in_callback_fn = true;
8431 	callee->callback_ret_range = tnum_range(0, 1);
8432 	return 0;
8433 }
8434 
8435 static int set_timer_callback_state(struct bpf_verifier_env *env,
8436 				    struct bpf_func_state *caller,
8437 				    struct bpf_func_state *callee,
8438 				    int insn_idx)
8439 {
8440 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
8441 
8442 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
8443 	 * callback_fn(struct bpf_map *map, void *key, void *value);
8444 	 */
8445 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
8446 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
8447 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
8448 
8449 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
8450 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8451 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
8452 
8453 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
8454 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
8455 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
8456 
8457 	/* unused */
8458 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8459 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8460 	callee->in_async_callback_fn = true;
8461 	callee->callback_ret_range = tnum_range(0, 1);
8462 	return 0;
8463 }
8464 
8465 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
8466 				       struct bpf_func_state *caller,
8467 				       struct bpf_func_state *callee,
8468 				       int insn_idx)
8469 {
8470 	/* bpf_find_vma(struct task_struct *task, u64 addr,
8471 	 *               void *callback_fn, void *callback_ctx, u64 flags)
8472 	 * (callback_fn)(struct task_struct *task,
8473 	 *               struct vm_area_struct *vma, void *callback_ctx);
8474 	 */
8475 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
8476 
8477 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
8478 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8479 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
8480 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
8481 
8482 	/* pointer to stack or null */
8483 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
8484 
8485 	/* unused */
8486 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8487 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8488 	callee->in_callback_fn = true;
8489 	callee->callback_ret_range = tnum_range(0, 1);
8490 	return 0;
8491 }
8492 
8493 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
8494 					   struct bpf_func_state *caller,
8495 					   struct bpf_func_state *callee,
8496 					   int insn_idx)
8497 {
8498 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
8499 	 *			  callback_ctx, u64 flags);
8500 	 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
8501 	 */
8502 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
8503 	mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
8504 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
8505 
8506 	/* unused */
8507 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8508 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8509 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8510 
8511 	callee->in_callback_fn = true;
8512 	callee->callback_ret_range = tnum_range(0, 1);
8513 	return 0;
8514 }
8515 
8516 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
8517 					 struct bpf_func_state *caller,
8518 					 struct bpf_func_state *callee,
8519 					 int insn_idx)
8520 {
8521 	/* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
8522 	 *                     bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
8523 	 *
8524 	 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset
8525 	 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
8526 	 * by this point, so look at 'root'
8527 	 */
8528 	struct btf_field *field;
8529 
8530 	field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
8531 				      BPF_RB_ROOT);
8532 	if (!field || !field->graph_root.value_btf_id)
8533 		return -EFAULT;
8534 
8535 	mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
8536 	ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
8537 	mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
8538 	ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
8539 
8540 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8541 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8542 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8543 	callee->in_callback_fn = true;
8544 	callee->callback_ret_range = tnum_range(0, 1);
8545 	return 0;
8546 }
8547 
8548 static bool is_rbtree_lock_required_kfunc(u32 btf_id);
8549 
8550 /* Are we currently verifying the callback for a rbtree helper that must
8551  * be called with lock held? If so, no need to complain about unreleased
8552  * lock
8553  */
8554 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
8555 {
8556 	struct bpf_verifier_state *state = env->cur_state;
8557 	struct bpf_insn *insn = env->prog->insnsi;
8558 	struct bpf_func_state *callee;
8559 	int kfunc_btf_id;
8560 
8561 	if (!state->curframe)
8562 		return false;
8563 
8564 	callee = state->frame[state->curframe];
8565 
8566 	if (!callee->in_callback_fn)
8567 		return false;
8568 
8569 	kfunc_btf_id = insn[callee->callsite].imm;
8570 	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
8571 }
8572 
8573 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
8574 {
8575 	struct bpf_verifier_state *state = env->cur_state;
8576 	struct bpf_func_state *caller, *callee;
8577 	struct bpf_reg_state *r0;
8578 	int err;
8579 
8580 	callee = state->frame[state->curframe];
8581 	r0 = &callee->regs[BPF_REG_0];
8582 	if (r0->type == PTR_TO_STACK) {
8583 		/* technically it's ok to return caller's stack pointer
8584 		 * (or caller's caller's pointer) back to the caller,
8585 		 * since these pointers are valid. Only current stack
8586 		 * pointer will be invalid as soon as function exits,
8587 		 * but let's be conservative
8588 		 */
8589 		verbose(env, "cannot return stack pointer to the caller\n");
8590 		return -EINVAL;
8591 	}
8592 
8593 	caller = state->frame[state->curframe - 1];
8594 	if (callee->in_callback_fn) {
8595 		/* enforce R0 return value range [0, 1]. */
8596 		struct tnum range = callee->callback_ret_range;
8597 
8598 		if (r0->type != SCALAR_VALUE) {
8599 			verbose(env, "R0 not a scalar value\n");
8600 			return -EACCES;
8601 		}
8602 		if (!tnum_in(range, r0->var_off)) {
8603 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
8604 			return -EINVAL;
8605 		}
8606 	} else {
8607 		/* return to the caller whatever r0 had in the callee */
8608 		caller->regs[BPF_REG_0] = *r0;
8609 	}
8610 
8611 	/* callback_fn frame should have released its own additions to parent's
8612 	 * reference state at this point, or check_reference_leak would
8613 	 * complain, hence it must be the same as the caller. There is no need
8614 	 * to copy it back.
8615 	 */
8616 	if (!callee->in_callback_fn) {
8617 		/* Transfer references to the caller */
8618 		err = copy_reference_state(caller, callee);
8619 		if (err)
8620 			return err;
8621 	}
8622 
8623 	*insn_idx = callee->callsite + 1;
8624 	if (env->log.level & BPF_LOG_LEVEL) {
8625 		verbose(env, "returning from callee:\n");
8626 		print_verifier_state(env, callee, true);
8627 		verbose(env, "to caller at %d:\n", *insn_idx);
8628 		print_verifier_state(env, caller, true);
8629 	}
8630 	/* clear everything in the callee */
8631 	free_func_state(callee);
8632 	state->frame[state->curframe--] = NULL;
8633 	return 0;
8634 }
8635 
8636 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
8637 				   int func_id,
8638 				   struct bpf_call_arg_meta *meta)
8639 {
8640 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
8641 
8642 	if (ret_type != RET_INTEGER ||
8643 	    (func_id != BPF_FUNC_get_stack &&
8644 	     func_id != BPF_FUNC_get_task_stack &&
8645 	     func_id != BPF_FUNC_probe_read_str &&
8646 	     func_id != BPF_FUNC_probe_read_kernel_str &&
8647 	     func_id != BPF_FUNC_probe_read_user_str))
8648 		return;
8649 
8650 	ret_reg->smax_value = meta->msize_max_value;
8651 	ret_reg->s32_max_value = meta->msize_max_value;
8652 	ret_reg->smin_value = -MAX_ERRNO;
8653 	ret_reg->s32_min_value = -MAX_ERRNO;
8654 	reg_bounds_sync(ret_reg);
8655 }
8656 
8657 static int
8658 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
8659 		int func_id, int insn_idx)
8660 {
8661 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
8662 	struct bpf_map *map = meta->map_ptr;
8663 
8664 	if (func_id != BPF_FUNC_tail_call &&
8665 	    func_id != BPF_FUNC_map_lookup_elem &&
8666 	    func_id != BPF_FUNC_map_update_elem &&
8667 	    func_id != BPF_FUNC_map_delete_elem &&
8668 	    func_id != BPF_FUNC_map_push_elem &&
8669 	    func_id != BPF_FUNC_map_pop_elem &&
8670 	    func_id != BPF_FUNC_map_peek_elem &&
8671 	    func_id != BPF_FUNC_for_each_map_elem &&
8672 	    func_id != BPF_FUNC_redirect_map &&
8673 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
8674 		return 0;
8675 
8676 	if (map == NULL) {
8677 		verbose(env, "kernel subsystem misconfigured verifier\n");
8678 		return -EINVAL;
8679 	}
8680 
8681 	/* In case of read-only, some additional restrictions
8682 	 * need to be applied in order to prevent altering the
8683 	 * state of the map from program side.
8684 	 */
8685 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
8686 	    (func_id == BPF_FUNC_map_delete_elem ||
8687 	     func_id == BPF_FUNC_map_update_elem ||
8688 	     func_id == BPF_FUNC_map_push_elem ||
8689 	     func_id == BPF_FUNC_map_pop_elem)) {
8690 		verbose(env, "write into map forbidden\n");
8691 		return -EACCES;
8692 	}
8693 
8694 	if (!BPF_MAP_PTR(aux->map_ptr_state))
8695 		bpf_map_ptr_store(aux, meta->map_ptr,
8696 				  !meta->map_ptr->bypass_spec_v1);
8697 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
8698 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
8699 				  !meta->map_ptr->bypass_spec_v1);
8700 	return 0;
8701 }
8702 
8703 static int
8704 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
8705 		int func_id, int insn_idx)
8706 {
8707 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
8708 	struct bpf_reg_state *regs = cur_regs(env), *reg;
8709 	struct bpf_map *map = meta->map_ptr;
8710 	u64 val, max;
8711 	int err;
8712 
8713 	if (func_id != BPF_FUNC_tail_call)
8714 		return 0;
8715 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
8716 		verbose(env, "kernel subsystem misconfigured verifier\n");
8717 		return -EINVAL;
8718 	}
8719 
8720 	reg = &regs[BPF_REG_3];
8721 	val = reg->var_off.value;
8722 	max = map->max_entries;
8723 
8724 	if (!(register_is_const(reg) && val < max)) {
8725 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8726 		return 0;
8727 	}
8728 
8729 	err = mark_chain_precision(env, BPF_REG_3);
8730 	if (err)
8731 		return err;
8732 	if (bpf_map_key_unseen(aux))
8733 		bpf_map_key_store(aux, val);
8734 	else if (!bpf_map_key_poisoned(aux) &&
8735 		  bpf_map_key_immediate(aux) != val)
8736 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8737 	return 0;
8738 }
8739 
8740 static int check_reference_leak(struct bpf_verifier_env *env)
8741 {
8742 	struct bpf_func_state *state = cur_func(env);
8743 	bool refs_lingering = false;
8744 	int i;
8745 
8746 	if (state->frameno && !state->in_callback_fn)
8747 		return 0;
8748 
8749 	for (i = 0; i < state->acquired_refs; i++) {
8750 		if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
8751 			continue;
8752 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
8753 			state->refs[i].id, state->refs[i].insn_idx);
8754 		refs_lingering = true;
8755 	}
8756 	return refs_lingering ? -EINVAL : 0;
8757 }
8758 
8759 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
8760 				   struct bpf_reg_state *regs)
8761 {
8762 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
8763 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
8764 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
8765 	struct bpf_bprintf_data data = {};
8766 	int err, fmt_map_off, num_args;
8767 	u64 fmt_addr;
8768 	char *fmt;
8769 
8770 	/* data must be an array of u64 */
8771 	if (data_len_reg->var_off.value % 8)
8772 		return -EINVAL;
8773 	num_args = data_len_reg->var_off.value / 8;
8774 
8775 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
8776 	 * and map_direct_value_addr is set.
8777 	 */
8778 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
8779 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
8780 						  fmt_map_off);
8781 	if (err) {
8782 		verbose(env, "verifier bug\n");
8783 		return -EFAULT;
8784 	}
8785 	fmt = (char *)(long)fmt_addr + fmt_map_off;
8786 
8787 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
8788 	 * can focus on validating the format specifiers.
8789 	 */
8790 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
8791 	if (err < 0)
8792 		verbose(env, "Invalid format string\n");
8793 
8794 	return err;
8795 }
8796 
8797 static int check_get_func_ip(struct bpf_verifier_env *env)
8798 {
8799 	enum bpf_prog_type type = resolve_prog_type(env->prog);
8800 	int func_id = BPF_FUNC_get_func_ip;
8801 
8802 	if (type == BPF_PROG_TYPE_TRACING) {
8803 		if (!bpf_prog_has_trampoline(env->prog)) {
8804 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
8805 				func_id_name(func_id), func_id);
8806 			return -ENOTSUPP;
8807 		}
8808 		return 0;
8809 	} else if (type == BPF_PROG_TYPE_KPROBE) {
8810 		return 0;
8811 	}
8812 
8813 	verbose(env, "func %s#%d not supported for program type %d\n",
8814 		func_id_name(func_id), func_id, type);
8815 	return -ENOTSUPP;
8816 }
8817 
8818 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
8819 {
8820 	return &env->insn_aux_data[env->insn_idx];
8821 }
8822 
8823 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
8824 {
8825 	struct bpf_reg_state *regs = cur_regs(env);
8826 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
8827 	bool reg_is_null = register_is_null(reg);
8828 
8829 	if (reg_is_null)
8830 		mark_chain_precision(env, BPF_REG_4);
8831 
8832 	return reg_is_null;
8833 }
8834 
8835 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
8836 {
8837 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
8838 
8839 	if (!state->initialized) {
8840 		state->initialized = 1;
8841 		state->fit_for_inline = loop_flag_is_zero(env);
8842 		state->callback_subprogno = subprogno;
8843 		return;
8844 	}
8845 
8846 	if (!state->fit_for_inline)
8847 		return;
8848 
8849 	state->fit_for_inline = (loop_flag_is_zero(env) &&
8850 				 state->callback_subprogno == subprogno);
8851 }
8852 
8853 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8854 			     int *insn_idx_p)
8855 {
8856 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
8857 	const struct bpf_func_proto *fn = NULL;
8858 	enum bpf_return_type ret_type;
8859 	enum bpf_type_flag ret_flag;
8860 	struct bpf_reg_state *regs;
8861 	struct bpf_call_arg_meta meta;
8862 	int insn_idx = *insn_idx_p;
8863 	bool changes_data;
8864 	int i, err, func_id;
8865 
8866 	/* find function prototype */
8867 	func_id = insn->imm;
8868 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
8869 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
8870 			func_id);
8871 		return -EINVAL;
8872 	}
8873 
8874 	if (env->ops->get_func_proto)
8875 		fn = env->ops->get_func_proto(func_id, env->prog);
8876 	if (!fn) {
8877 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
8878 			func_id);
8879 		return -EINVAL;
8880 	}
8881 
8882 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
8883 	if (!env->prog->gpl_compatible && fn->gpl_only) {
8884 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
8885 		return -EINVAL;
8886 	}
8887 
8888 	if (fn->allowed && !fn->allowed(env->prog)) {
8889 		verbose(env, "helper call is not allowed in probe\n");
8890 		return -EINVAL;
8891 	}
8892 
8893 	if (!env->prog->aux->sleepable && fn->might_sleep) {
8894 		verbose(env, "helper call might sleep in a non-sleepable prog\n");
8895 		return -EINVAL;
8896 	}
8897 
8898 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
8899 	changes_data = bpf_helper_changes_pkt_data(fn->func);
8900 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
8901 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
8902 			func_id_name(func_id), func_id);
8903 		return -EINVAL;
8904 	}
8905 
8906 	memset(&meta, 0, sizeof(meta));
8907 	meta.pkt_access = fn->pkt_access;
8908 
8909 	err = check_func_proto(fn, func_id);
8910 	if (err) {
8911 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
8912 			func_id_name(func_id), func_id);
8913 		return err;
8914 	}
8915 
8916 	if (env->cur_state->active_rcu_lock) {
8917 		if (fn->might_sleep) {
8918 			verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
8919 				func_id_name(func_id), func_id);
8920 			return -EINVAL;
8921 		}
8922 
8923 		if (env->prog->aux->sleepable && is_storage_get_function(func_id))
8924 			env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
8925 	}
8926 
8927 	meta.func_id = func_id;
8928 	/* check args */
8929 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
8930 		err = check_func_arg(env, i, &meta, fn, insn_idx);
8931 		if (err)
8932 			return err;
8933 	}
8934 
8935 	err = record_func_map(env, &meta, func_id, insn_idx);
8936 	if (err)
8937 		return err;
8938 
8939 	err = record_func_key(env, &meta, func_id, insn_idx);
8940 	if (err)
8941 		return err;
8942 
8943 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
8944 	 * is inferred from register state.
8945 	 */
8946 	for (i = 0; i < meta.access_size; i++) {
8947 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
8948 				       BPF_WRITE, -1, false);
8949 		if (err)
8950 			return err;
8951 	}
8952 
8953 	regs = cur_regs(env);
8954 
8955 	if (meta.release_regno) {
8956 		err = -EINVAL;
8957 		/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8958 		 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
8959 		 * is safe to do directly.
8960 		 */
8961 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
8962 			if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
8963 				verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
8964 				return -EFAULT;
8965 			}
8966 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
8967 		} else if (meta.ref_obj_id) {
8968 			err = release_reference(env, meta.ref_obj_id);
8969 		} else if (register_is_null(&regs[meta.release_regno])) {
8970 			/* meta.ref_obj_id can only be 0 if register that is meant to be
8971 			 * released is NULL, which must be > R0.
8972 			 */
8973 			err = 0;
8974 		}
8975 		if (err) {
8976 			verbose(env, "func %s#%d reference has not been acquired before\n",
8977 				func_id_name(func_id), func_id);
8978 			return err;
8979 		}
8980 	}
8981 
8982 	switch (func_id) {
8983 	case BPF_FUNC_tail_call:
8984 		err = check_reference_leak(env);
8985 		if (err) {
8986 			verbose(env, "tail_call would lead to reference leak\n");
8987 			return err;
8988 		}
8989 		break;
8990 	case BPF_FUNC_get_local_storage:
8991 		/* check that flags argument in get_local_storage(map, flags) is 0,
8992 		 * this is required because get_local_storage() can't return an error.
8993 		 */
8994 		if (!register_is_null(&regs[BPF_REG_2])) {
8995 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
8996 			return -EINVAL;
8997 		}
8998 		break;
8999 	case BPF_FUNC_for_each_map_elem:
9000 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9001 					set_map_elem_callback_state);
9002 		break;
9003 	case BPF_FUNC_timer_set_callback:
9004 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9005 					set_timer_callback_state);
9006 		break;
9007 	case BPF_FUNC_find_vma:
9008 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9009 					set_find_vma_callback_state);
9010 		break;
9011 	case BPF_FUNC_snprintf:
9012 		err = check_bpf_snprintf_call(env, regs);
9013 		break;
9014 	case BPF_FUNC_loop:
9015 		update_loop_inline_state(env, meta.subprogno);
9016 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9017 					set_loop_callback_state);
9018 		break;
9019 	case BPF_FUNC_dynptr_from_mem:
9020 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
9021 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
9022 				reg_type_str(env, regs[BPF_REG_1].type));
9023 			return -EACCES;
9024 		}
9025 		break;
9026 	case BPF_FUNC_set_retval:
9027 		if (prog_type == BPF_PROG_TYPE_LSM &&
9028 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
9029 			if (!env->prog->aux->attach_func_proto->type) {
9030 				/* Make sure programs that attach to void
9031 				 * hooks don't try to modify return value.
9032 				 */
9033 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
9034 				return -EINVAL;
9035 			}
9036 		}
9037 		break;
9038 	case BPF_FUNC_dynptr_data:
9039 	{
9040 		struct bpf_reg_state *reg;
9041 		int id, ref_obj_id;
9042 
9043 		reg = get_dynptr_arg_reg(env, fn, regs);
9044 		if (!reg)
9045 			return -EFAULT;
9046 
9047 
9048 		if (meta.dynptr_id) {
9049 			verbose(env, "verifier internal error: meta.dynptr_id already set\n");
9050 			return -EFAULT;
9051 		}
9052 		if (meta.ref_obj_id) {
9053 			verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
9054 			return -EFAULT;
9055 		}
9056 
9057 		id = dynptr_id(env, reg);
9058 		if (id < 0) {
9059 			verbose(env, "verifier internal error: failed to obtain dynptr id\n");
9060 			return id;
9061 		}
9062 
9063 		ref_obj_id = dynptr_ref_obj_id(env, reg);
9064 		if (ref_obj_id < 0) {
9065 			verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
9066 			return ref_obj_id;
9067 		}
9068 
9069 		meta.dynptr_id = id;
9070 		meta.ref_obj_id = ref_obj_id;
9071 
9072 		break;
9073 	}
9074 	case BPF_FUNC_dynptr_write:
9075 	{
9076 		enum bpf_dynptr_type dynptr_type;
9077 		struct bpf_reg_state *reg;
9078 
9079 		reg = get_dynptr_arg_reg(env, fn, regs);
9080 		if (!reg)
9081 			return -EFAULT;
9082 
9083 		dynptr_type = dynptr_get_type(env, reg);
9084 		if (dynptr_type == BPF_DYNPTR_TYPE_INVALID)
9085 			return -EFAULT;
9086 
9087 		if (dynptr_type == BPF_DYNPTR_TYPE_SKB)
9088 			/* this will trigger clear_all_pkt_pointers(), which will
9089 			 * invalidate all dynptr slices associated with the skb
9090 			 */
9091 			changes_data = true;
9092 
9093 		break;
9094 	}
9095 	case BPF_FUNC_user_ringbuf_drain:
9096 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9097 					set_user_ringbuf_callback_state);
9098 		break;
9099 	}
9100 
9101 	if (err)
9102 		return err;
9103 
9104 	/* reset caller saved regs */
9105 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
9106 		mark_reg_not_init(env, regs, caller_saved[i]);
9107 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9108 	}
9109 
9110 	/* helper call returns 64-bit value. */
9111 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
9112 
9113 	/* update return register (already marked as written above) */
9114 	ret_type = fn->ret_type;
9115 	ret_flag = type_flag(ret_type);
9116 
9117 	switch (base_type(ret_type)) {
9118 	case RET_INTEGER:
9119 		/* sets type to SCALAR_VALUE */
9120 		mark_reg_unknown(env, regs, BPF_REG_0);
9121 		break;
9122 	case RET_VOID:
9123 		regs[BPF_REG_0].type = NOT_INIT;
9124 		break;
9125 	case RET_PTR_TO_MAP_VALUE:
9126 		/* There is no offset yet applied, variable or fixed */
9127 		mark_reg_known_zero(env, regs, BPF_REG_0);
9128 		/* remember map_ptr, so that check_map_access()
9129 		 * can check 'value_size' boundary of memory access
9130 		 * to map element returned from bpf_map_lookup_elem()
9131 		 */
9132 		if (meta.map_ptr == NULL) {
9133 			verbose(env,
9134 				"kernel subsystem misconfigured verifier\n");
9135 			return -EINVAL;
9136 		}
9137 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
9138 		regs[BPF_REG_0].map_uid = meta.map_uid;
9139 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
9140 		if (!type_may_be_null(ret_type) &&
9141 		    btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
9142 			regs[BPF_REG_0].id = ++env->id_gen;
9143 		}
9144 		break;
9145 	case RET_PTR_TO_SOCKET:
9146 		mark_reg_known_zero(env, regs, BPF_REG_0);
9147 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
9148 		break;
9149 	case RET_PTR_TO_SOCK_COMMON:
9150 		mark_reg_known_zero(env, regs, BPF_REG_0);
9151 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
9152 		break;
9153 	case RET_PTR_TO_TCP_SOCK:
9154 		mark_reg_known_zero(env, regs, BPF_REG_0);
9155 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
9156 		break;
9157 	case RET_PTR_TO_MEM:
9158 		mark_reg_known_zero(env, regs, BPF_REG_0);
9159 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
9160 		regs[BPF_REG_0].mem_size = meta.mem_size;
9161 		break;
9162 	case RET_PTR_TO_MEM_OR_BTF_ID:
9163 	{
9164 		const struct btf_type *t;
9165 
9166 		mark_reg_known_zero(env, regs, BPF_REG_0);
9167 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
9168 		if (!btf_type_is_struct(t)) {
9169 			u32 tsize;
9170 			const struct btf_type *ret;
9171 			const char *tname;
9172 
9173 			/* resolve the type size of ksym. */
9174 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
9175 			if (IS_ERR(ret)) {
9176 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
9177 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
9178 					tname, PTR_ERR(ret));
9179 				return -EINVAL;
9180 			}
9181 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
9182 			regs[BPF_REG_0].mem_size = tsize;
9183 		} else {
9184 			/* MEM_RDONLY may be carried from ret_flag, but it
9185 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
9186 			 * it will confuse the check of PTR_TO_BTF_ID in
9187 			 * check_mem_access().
9188 			 */
9189 			ret_flag &= ~MEM_RDONLY;
9190 
9191 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
9192 			regs[BPF_REG_0].btf = meta.ret_btf;
9193 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
9194 		}
9195 		break;
9196 	}
9197 	case RET_PTR_TO_BTF_ID:
9198 	{
9199 		struct btf *ret_btf;
9200 		int ret_btf_id;
9201 
9202 		mark_reg_known_zero(env, regs, BPF_REG_0);
9203 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
9204 		if (func_id == BPF_FUNC_kptr_xchg) {
9205 			ret_btf = meta.kptr_field->kptr.btf;
9206 			ret_btf_id = meta.kptr_field->kptr.btf_id;
9207 			if (!btf_is_kernel(ret_btf))
9208 				regs[BPF_REG_0].type |= MEM_ALLOC;
9209 		} else {
9210 			if (fn->ret_btf_id == BPF_PTR_POISON) {
9211 				verbose(env, "verifier internal error:");
9212 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
9213 					func_id_name(func_id));
9214 				return -EINVAL;
9215 			}
9216 			ret_btf = btf_vmlinux;
9217 			ret_btf_id = *fn->ret_btf_id;
9218 		}
9219 		if (ret_btf_id == 0) {
9220 			verbose(env, "invalid return type %u of func %s#%d\n",
9221 				base_type(ret_type), func_id_name(func_id),
9222 				func_id);
9223 			return -EINVAL;
9224 		}
9225 		regs[BPF_REG_0].btf = ret_btf;
9226 		regs[BPF_REG_0].btf_id = ret_btf_id;
9227 		break;
9228 	}
9229 	default:
9230 		verbose(env, "unknown return type %u of func %s#%d\n",
9231 			base_type(ret_type), func_id_name(func_id), func_id);
9232 		return -EINVAL;
9233 	}
9234 
9235 	if (type_may_be_null(regs[BPF_REG_0].type))
9236 		regs[BPF_REG_0].id = ++env->id_gen;
9237 
9238 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
9239 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
9240 			func_id_name(func_id), func_id);
9241 		return -EFAULT;
9242 	}
9243 
9244 	if (is_dynptr_ref_function(func_id))
9245 		regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
9246 
9247 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
9248 		/* For release_reference() */
9249 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
9250 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
9251 		int id = acquire_reference_state(env, insn_idx);
9252 
9253 		if (id < 0)
9254 			return id;
9255 		/* For mark_ptr_or_null_reg() */
9256 		regs[BPF_REG_0].id = id;
9257 		/* For release_reference() */
9258 		regs[BPF_REG_0].ref_obj_id = id;
9259 	}
9260 
9261 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
9262 
9263 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
9264 	if (err)
9265 		return err;
9266 
9267 	if ((func_id == BPF_FUNC_get_stack ||
9268 	     func_id == BPF_FUNC_get_task_stack) &&
9269 	    !env->prog->has_callchain_buf) {
9270 		const char *err_str;
9271 
9272 #ifdef CONFIG_PERF_EVENTS
9273 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
9274 		err_str = "cannot get callchain buffer for func %s#%d\n";
9275 #else
9276 		err = -ENOTSUPP;
9277 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
9278 #endif
9279 		if (err) {
9280 			verbose(env, err_str, func_id_name(func_id), func_id);
9281 			return err;
9282 		}
9283 
9284 		env->prog->has_callchain_buf = true;
9285 	}
9286 
9287 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
9288 		env->prog->call_get_stack = true;
9289 
9290 	if (func_id == BPF_FUNC_get_func_ip) {
9291 		if (check_get_func_ip(env))
9292 			return -ENOTSUPP;
9293 		env->prog->call_get_func_ip = true;
9294 	}
9295 
9296 	if (changes_data)
9297 		clear_all_pkt_pointers(env);
9298 	return 0;
9299 }
9300 
9301 /* mark_btf_func_reg_size() is used when the reg size is determined by
9302  * the BTF func_proto's return value size and argument.
9303  */
9304 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
9305 				   size_t reg_size)
9306 {
9307 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
9308 
9309 	if (regno == BPF_REG_0) {
9310 		/* Function return value */
9311 		reg->live |= REG_LIVE_WRITTEN;
9312 		reg->subreg_def = reg_size == sizeof(u64) ?
9313 			DEF_NOT_SUBREG : env->insn_idx + 1;
9314 	} else {
9315 		/* Function argument */
9316 		if (reg_size == sizeof(u64)) {
9317 			mark_insn_zext(env, reg);
9318 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
9319 		} else {
9320 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
9321 		}
9322 	}
9323 }
9324 
9325 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
9326 {
9327 	return meta->kfunc_flags & KF_ACQUIRE;
9328 }
9329 
9330 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
9331 {
9332 	return meta->kfunc_flags & KF_RET_NULL;
9333 }
9334 
9335 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
9336 {
9337 	return meta->kfunc_flags & KF_RELEASE;
9338 }
9339 
9340 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
9341 {
9342 	return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta);
9343 }
9344 
9345 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
9346 {
9347 	return meta->kfunc_flags & KF_SLEEPABLE;
9348 }
9349 
9350 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
9351 {
9352 	return meta->kfunc_flags & KF_DESTRUCTIVE;
9353 }
9354 
9355 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
9356 {
9357 	return meta->kfunc_flags & KF_RCU;
9358 }
9359 
9360 static bool __kfunc_param_match_suffix(const struct btf *btf,
9361 				       const struct btf_param *arg,
9362 				       const char *suffix)
9363 {
9364 	int suffix_len = strlen(suffix), len;
9365 	const char *param_name;
9366 
9367 	/* In the future, this can be ported to use BTF tagging */
9368 	param_name = btf_name_by_offset(btf, arg->name_off);
9369 	if (str_is_empty(param_name))
9370 		return false;
9371 	len = strlen(param_name);
9372 	if (len < suffix_len)
9373 		return false;
9374 	param_name += len - suffix_len;
9375 	return !strncmp(param_name, suffix, suffix_len);
9376 }
9377 
9378 static bool is_kfunc_arg_mem_size(const struct btf *btf,
9379 				  const struct btf_param *arg,
9380 				  const struct bpf_reg_state *reg)
9381 {
9382 	const struct btf_type *t;
9383 
9384 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
9385 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
9386 		return false;
9387 
9388 	return __kfunc_param_match_suffix(btf, arg, "__sz");
9389 }
9390 
9391 static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
9392 					const struct btf_param *arg,
9393 					const struct bpf_reg_state *reg)
9394 {
9395 	const struct btf_type *t;
9396 
9397 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
9398 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
9399 		return false;
9400 
9401 	return __kfunc_param_match_suffix(btf, arg, "__szk");
9402 }
9403 
9404 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
9405 {
9406 	return __kfunc_param_match_suffix(btf, arg, "__k");
9407 }
9408 
9409 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
9410 {
9411 	return __kfunc_param_match_suffix(btf, arg, "__ign");
9412 }
9413 
9414 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
9415 {
9416 	return __kfunc_param_match_suffix(btf, arg, "__alloc");
9417 }
9418 
9419 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
9420 {
9421 	return __kfunc_param_match_suffix(btf, arg, "__uninit");
9422 }
9423 
9424 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
9425 {
9426 	return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr");
9427 }
9428 
9429 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
9430 					  const struct btf_param *arg,
9431 					  const char *name)
9432 {
9433 	int len, target_len = strlen(name);
9434 	const char *param_name;
9435 
9436 	param_name = btf_name_by_offset(btf, arg->name_off);
9437 	if (str_is_empty(param_name))
9438 		return false;
9439 	len = strlen(param_name);
9440 	if (len != target_len)
9441 		return false;
9442 	if (strcmp(param_name, name))
9443 		return false;
9444 
9445 	return true;
9446 }
9447 
9448 enum {
9449 	KF_ARG_DYNPTR_ID,
9450 	KF_ARG_LIST_HEAD_ID,
9451 	KF_ARG_LIST_NODE_ID,
9452 	KF_ARG_RB_ROOT_ID,
9453 	KF_ARG_RB_NODE_ID,
9454 };
9455 
9456 BTF_ID_LIST(kf_arg_btf_ids)
9457 BTF_ID(struct, bpf_dynptr_kern)
9458 BTF_ID(struct, bpf_list_head)
9459 BTF_ID(struct, bpf_list_node)
9460 BTF_ID(struct, bpf_rb_root)
9461 BTF_ID(struct, bpf_rb_node)
9462 
9463 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
9464 				    const struct btf_param *arg, int type)
9465 {
9466 	const struct btf_type *t;
9467 	u32 res_id;
9468 
9469 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
9470 	if (!t)
9471 		return false;
9472 	if (!btf_type_is_ptr(t))
9473 		return false;
9474 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
9475 	if (!t)
9476 		return false;
9477 	return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
9478 }
9479 
9480 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
9481 {
9482 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
9483 }
9484 
9485 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
9486 {
9487 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
9488 }
9489 
9490 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
9491 {
9492 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
9493 }
9494 
9495 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
9496 {
9497 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
9498 }
9499 
9500 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
9501 {
9502 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
9503 }
9504 
9505 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
9506 				  const struct btf_param *arg)
9507 {
9508 	const struct btf_type *t;
9509 
9510 	t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
9511 	if (!t)
9512 		return false;
9513 
9514 	return true;
9515 }
9516 
9517 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
9518 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
9519 					const struct btf *btf,
9520 					const struct btf_type *t, int rec)
9521 {
9522 	const struct btf_type *member_type;
9523 	const struct btf_member *member;
9524 	u32 i;
9525 
9526 	if (!btf_type_is_struct(t))
9527 		return false;
9528 
9529 	for_each_member(i, t, member) {
9530 		const struct btf_array *array;
9531 
9532 		member_type = btf_type_skip_modifiers(btf, member->type, NULL);
9533 		if (btf_type_is_struct(member_type)) {
9534 			if (rec >= 3) {
9535 				verbose(env, "max struct nesting depth exceeded\n");
9536 				return false;
9537 			}
9538 			if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
9539 				return false;
9540 			continue;
9541 		}
9542 		if (btf_type_is_array(member_type)) {
9543 			array = btf_array(member_type);
9544 			if (!array->nelems)
9545 				return false;
9546 			member_type = btf_type_skip_modifiers(btf, array->type, NULL);
9547 			if (!btf_type_is_scalar(member_type))
9548 				return false;
9549 			continue;
9550 		}
9551 		if (!btf_type_is_scalar(member_type))
9552 			return false;
9553 	}
9554 	return true;
9555 }
9556 
9557 
9558 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
9559 #ifdef CONFIG_NET
9560 	[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
9561 	[PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
9562 	[PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
9563 #endif
9564 };
9565 
9566 enum kfunc_ptr_arg_type {
9567 	KF_ARG_PTR_TO_CTX,
9568 	KF_ARG_PTR_TO_ALLOC_BTF_ID,    /* Allocated object */
9569 	KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */
9570 	KF_ARG_PTR_TO_DYNPTR,
9571 	KF_ARG_PTR_TO_ITER,
9572 	KF_ARG_PTR_TO_LIST_HEAD,
9573 	KF_ARG_PTR_TO_LIST_NODE,
9574 	KF_ARG_PTR_TO_BTF_ID,	       /* Also covers reg2btf_ids conversions */
9575 	KF_ARG_PTR_TO_MEM,
9576 	KF_ARG_PTR_TO_MEM_SIZE,	       /* Size derived from next argument, skip it */
9577 	KF_ARG_PTR_TO_CALLBACK,
9578 	KF_ARG_PTR_TO_RB_ROOT,
9579 	KF_ARG_PTR_TO_RB_NODE,
9580 };
9581 
9582 enum special_kfunc_type {
9583 	KF_bpf_obj_new_impl,
9584 	KF_bpf_obj_drop_impl,
9585 	KF_bpf_refcount_acquire_impl,
9586 	KF_bpf_list_push_front_impl,
9587 	KF_bpf_list_push_back_impl,
9588 	KF_bpf_list_pop_front,
9589 	KF_bpf_list_pop_back,
9590 	KF_bpf_cast_to_kern_ctx,
9591 	KF_bpf_rdonly_cast,
9592 	KF_bpf_rcu_read_lock,
9593 	KF_bpf_rcu_read_unlock,
9594 	KF_bpf_rbtree_remove,
9595 	KF_bpf_rbtree_add_impl,
9596 	KF_bpf_rbtree_first,
9597 	KF_bpf_dynptr_from_skb,
9598 	KF_bpf_dynptr_from_xdp,
9599 	KF_bpf_dynptr_slice,
9600 	KF_bpf_dynptr_slice_rdwr,
9601 };
9602 
9603 BTF_SET_START(special_kfunc_set)
9604 BTF_ID(func, bpf_obj_new_impl)
9605 BTF_ID(func, bpf_obj_drop_impl)
9606 BTF_ID(func, bpf_refcount_acquire_impl)
9607 BTF_ID(func, bpf_list_push_front_impl)
9608 BTF_ID(func, bpf_list_push_back_impl)
9609 BTF_ID(func, bpf_list_pop_front)
9610 BTF_ID(func, bpf_list_pop_back)
9611 BTF_ID(func, bpf_cast_to_kern_ctx)
9612 BTF_ID(func, bpf_rdonly_cast)
9613 BTF_ID(func, bpf_rbtree_remove)
9614 BTF_ID(func, bpf_rbtree_add_impl)
9615 BTF_ID(func, bpf_rbtree_first)
9616 BTF_ID(func, bpf_dynptr_from_skb)
9617 BTF_ID(func, bpf_dynptr_from_xdp)
9618 BTF_ID(func, bpf_dynptr_slice)
9619 BTF_ID(func, bpf_dynptr_slice_rdwr)
9620 BTF_SET_END(special_kfunc_set)
9621 
9622 BTF_ID_LIST(special_kfunc_list)
9623 BTF_ID(func, bpf_obj_new_impl)
9624 BTF_ID(func, bpf_obj_drop_impl)
9625 BTF_ID(func, bpf_refcount_acquire_impl)
9626 BTF_ID(func, bpf_list_push_front_impl)
9627 BTF_ID(func, bpf_list_push_back_impl)
9628 BTF_ID(func, bpf_list_pop_front)
9629 BTF_ID(func, bpf_list_pop_back)
9630 BTF_ID(func, bpf_cast_to_kern_ctx)
9631 BTF_ID(func, bpf_rdonly_cast)
9632 BTF_ID(func, bpf_rcu_read_lock)
9633 BTF_ID(func, bpf_rcu_read_unlock)
9634 BTF_ID(func, bpf_rbtree_remove)
9635 BTF_ID(func, bpf_rbtree_add_impl)
9636 BTF_ID(func, bpf_rbtree_first)
9637 BTF_ID(func, bpf_dynptr_from_skb)
9638 BTF_ID(func, bpf_dynptr_from_xdp)
9639 BTF_ID(func, bpf_dynptr_slice)
9640 BTF_ID(func, bpf_dynptr_slice_rdwr)
9641 
9642 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
9643 {
9644 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
9645 }
9646 
9647 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
9648 {
9649 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
9650 }
9651 
9652 static enum kfunc_ptr_arg_type
9653 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
9654 		       struct bpf_kfunc_call_arg_meta *meta,
9655 		       const struct btf_type *t, const struct btf_type *ref_t,
9656 		       const char *ref_tname, const struct btf_param *args,
9657 		       int argno, int nargs)
9658 {
9659 	u32 regno = argno + 1;
9660 	struct bpf_reg_state *regs = cur_regs(env);
9661 	struct bpf_reg_state *reg = &regs[regno];
9662 	bool arg_mem_size = false;
9663 
9664 	if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
9665 		return KF_ARG_PTR_TO_CTX;
9666 
9667 	/* In this function, we verify the kfunc's BTF as per the argument type,
9668 	 * leaving the rest of the verification with respect to the register
9669 	 * type to our caller. When a set of conditions hold in the BTF type of
9670 	 * arguments, we resolve it to a known kfunc_ptr_arg_type.
9671 	 */
9672 	if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
9673 		return KF_ARG_PTR_TO_CTX;
9674 
9675 	if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
9676 		return KF_ARG_PTR_TO_ALLOC_BTF_ID;
9677 
9678 	if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno]))
9679 		return KF_ARG_PTR_TO_REFCOUNTED_KPTR;
9680 
9681 	if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
9682 		return KF_ARG_PTR_TO_DYNPTR;
9683 
9684 	if (is_kfunc_arg_iter(meta, argno))
9685 		return KF_ARG_PTR_TO_ITER;
9686 
9687 	if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
9688 		return KF_ARG_PTR_TO_LIST_HEAD;
9689 
9690 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
9691 		return KF_ARG_PTR_TO_LIST_NODE;
9692 
9693 	if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
9694 		return KF_ARG_PTR_TO_RB_ROOT;
9695 
9696 	if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
9697 		return KF_ARG_PTR_TO_RB_NODE;
9698 
9699 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
9700 		if (!btf_type_is_struct(ref_t)) {
9701 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
9702 				meta->func_name, argno, btf_type_str(ref_t), ref_tname);
9703 			return -EINVAL;
9704 		}
9705 		return KF_ARG_PTR_TO_BTF_ID;
9706 	}
9707 
9708 	if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
9709 		return KF_ARG_PTR_TO_CALLBACK;
9710 
9711 
9712 	if (argno + 1 < nargs &&
9713 	    (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
9714 	     is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
9715 		arg_mem_size = true;
9716 
9717 	/* This is the catch all argument type of register types supported by
9718 	 * check_helper_mem_access. However, we only allow when argument type is
9719 	 * pointer to scalar, or struct composed (recursively) of scalars. When
9720 	 * arg_mem_size is true, the pointer can be void *.
9721 	 */
9722 	if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
9723 	    (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
9724 		verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
9725 			argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
9726 		return -EINVAL;
9727 	}
9728 	return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
9729 }
9730 
9731 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
9732 					struct bpf_reg_state *reg,
9733 					const struct btf_type *ref_t,
9734 					const char *ref_tname, u32 ref_id,
9735 					struct bpf_kfunc_call_arg_meta *meta,
9736 					int argno)
9737 {
9738 	const struct btf_type *reg_ref_t;
9739 	bool strict_type_match = false;
9740 	const struct btf *reg_btf;
9741 	const char *reg_ref_tname;
9742 	u32 reg_ref_id;
9743 
9744 	if (base_type(reg->type) == PTR_TO_BTF_ID) {
9745 		reg_btf = reg->btf;
9746 		reg_ref_id = reg->btf_id;
9747 	} else {
9748 		reg_btf = btf_vmlinux;
9749 		reg_ref_id = *reg2btf_ids[base_type(reg->type)];
9750 	}
9751 
9752 	/* Enforce strict type matching for calls to kfuncs that are acquiring
9753 	 * or releasing a reference, or are no-cast aliases. We do _not_
9754 	 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
9755 	 * as we want to enable BPF programs to pass types that are bitwise
9756 	 * equivalent without forcing them to explicitly cast with something
9757 	 * like bpf_cast_to_kern_ctx().
9758 	 *
9759 	 * For example, say we had a type like the following:
9760 	 *
9761 	 * struct bpf_cpumask {
9762 	 *	cpumask_t cpumask;
9763 	 *	refcount_t usage;
9764 	 * };
9765 	 *
9766 	 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
9767 	 * to a struct cpumask, so it would be safe to pass a struct
9768 	 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
9769 	 *
9770 	 * The philosophy here is similar to how we allow scalars of different
9771 	 * types to be passed to kfuncs as long as the size is the same. The
9772 	 * only difference here is that we're simply allowing
9773 	 * btf_struct_ids_match() to walk the struct at the 0th offset, and
9774 	 * resolve types.
9775 	 */
9776 	if (is_kfunc_acquire(meta) ||
9777 	    (is_kfunc_release(meta) && reg->ref_obj_id) ||
9778 	    btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
9779 		strict_type_match = true;
9780 
9781 	WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
9782 
9783 	reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
9784 	reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
9785 	if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
9786 		verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
9787 			meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
9788 			btf_type_str(reg_ref_t), reg_ref_tname);
9789 		return -EINVAL;
9790 	}
9791 	return 0;
9792 }
9793 
9794 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9795 {
9796 	struct bpf_verifier_state *state = env->cur_state;
9797 
9798 	if (!state->active_lock.ptr) {
9799 		verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
9800 		return -EFAULT;
9801 	}
9802 
9803 	if (type_flag(reg->type) & NON_OWN_REF) {
9804 		verbose(env, "verifier internal error: NON_OWN_REF already set\n");
9805 		return -EFAULT;
9806 	}
9807 
9808 	reg->type |= NON_OWN_REF;
9809 	return 0;
9810 }
9811 
9812 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
9813 {
9814 	struct bpf_func_state *state, *unused;
9815 	struct bpf_reg_state *reg;
9816 	int i;
9817 
9818 	state = cur_func(env);
9819 
9820 	if (!ref_obj_id) {
9821 		verbose(env, "verifier internal error: ref_obj_id is zero for "
9822 			     "owning -> non-owning conversion\n");
9823 		return -EFAULT;
9824 	}
9825 
9826 	for (i = 0; i < state->acquired_refs; i++) {
9827 		if (state->refs[i].id != ref_obj_id)
9828 			continue;
9829 
9830 		/* Clear ref_obj_id here so release_reference doesn't clobber
9831 		 * the whole reg
9832 		 */
9833 		bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
9834 			if (reg->ref_obj_id == ref_obj_id) {
9835 				reg->ref_obj_id = 0;
9836 				ref_set_non_owning(env, reg);
9837 			}
9838 		}));
9839 		return 0;
9840 	}
9841 
9842 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
9843 	return -EFAULT;
9844 }
9845 
9846 /* Implementation details:
9847  *
9848  * Each register points to some region of memory, which we define as an
9849  * allocation. Each allocation may embed a bpf_spin_lock which protects any
9850  * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
9851  * allocation. The lock and the data it protects are colocated in the same
9852  * memory region.
9853  *
9854  * Hence, everytime a register holds a pointer value pointing to such
9855  * allocation, the verifier preserves a unique reg->id for it.
9856  *
9857  * The verifier remembers the lock 'ptr' and the lock 'id' whenever
9858  * bpf_spin_lock is called.
9859  *
9860  * To enable this, lock state in the verifier captures two values:
9861  *	active_lock.ptr = Register's type specific pointer
9862  *	active_lock.id  = A unique ID for each register pointer value
9863  *
9864  * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
9865  * supported register types.
9866  *
9867  * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
9868  * allocated objects is the reg->btf pointer.
9869  *
9870  * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
9871  * can establish the provenance of the map value statically for each distinct
9872  * lookup into such maps. They always contain a single map value hence unique
9873  * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
9874  *
9875  * So, in case of global variables, they use array maps with max_entries = 1,
9876  * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
9877  * into the same map value as max_entries is 1, as described above).
9878  *
9879  * In case of inner map lookups, the inner map pointer has same map_ptr as the
9880  * outer map pointer (in verifier context), but each lookup into an inner map
9881  * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
9882  * maps from the same outer map share the same map_ptr as active_lock.ptr, they
9883  * will get different reg->id assigned to each lookup, hence different
9884  * active_lock.id.
9885  *
9886  * In case of allocated objects, active_lock.ptr is the reg->btf, and the
9887  * reg->id is a unique ID preserved after the NULL pointer check on the pointer
9888  * returned from bpf_obj_new. Each allocation receives a new reg->id.
9889  */
9890 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9891 {
9892 	void *ptr;
9893 	u32 id;
9894 
9895 	switch ((int)reg->type) {
9896 	case PTR_TO_MAP_VALUE:
9897 		ptr = reg->map_ptr;
9898 		break;
9899 	case PTR_TO_BTF_ID | MEM_ALLOC:
9900 		ptr = reg->btf;
9901 		break;
9902 	default:
9903 		verbose(env, "verifier internal error: unknown reg type for lock check\n");
9904 		return -EFAULT;
9905 	}
9906 	id = reg->id;
9907 
9908 	if (!env->cur_state->active_lock.ptr)
9909 		return -EINVAL;
9910 	if (env->cur_state->active_lock.ptr != ptr ||
9911 	    env->cur_state->active_lock.id != id) {
9912 		verbose(env, "held lock and object are not in the same allocation\n");
9913 		return -EINVAL;
9914 	}
9915 	return 0;
9916 }
9917 
9918 static bool is_bpf_list_api_kfunc(u32 btf_id)
9919 {
9920 	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
9921 	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
9922 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9923 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
9924 }
9925 
9926 static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
9927 {
9928 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
9929 	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9930 	       btf_id == special_kfunc_list[KF_bpf_rbtree_first];
9931 }
9932 
9933 static bool is_bpf_graph_api_kfunc(u32 btf_id)
9934 {
9935 	return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
9936 	       btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
9937 }
9938 
9939 static bool is_callback_calling_kfunc(u32 btf_id)
9940 {
9941 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
9942 }
9943 
9944 static bool is_rbtree_lock_required_kfunc(u32 btf_id)
9945 {
9946 	return is_bpf_rbtree_api_kfunc(btf_id);
9947 }
9948 
9949 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
9950 					  enum btf_field_type head_field_type,
9951 					  u32 kfunc_btf_id)
9952 {
9953 	bool ret;
9954 
9955 	switch (head_field_type) {
9956 	case BPF_LIST_HEAD:
9957 		ret = is_bpf_list_api_kfunc(kfunc_btf_id);
9958 		break;
9959 	case BPF_RB_ROOT:
9960 		ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
9961 		break;
9962 	default:
9963 		verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
9964 			btf_field_type_name(head_field_type));
9965 		return false;
9966 	}
9967 
9968 	if (!ret)
9969 		verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
9970 			btf_field_type_name(head_field_type));
9971 	return ret;
9972 }
9973 
9974 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
9975 					  enum btf_field_type node_field_type,
9976 					  u32 kfunc_btf_id)
9977 {
9978 	bool ret;
9979 
9980 	switch (node_field_type) {
9981 	case BPF_LIST_NODE:
9982 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
9983 		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]);
9984 		break;
9985 	case BPF_RB_NODE:
9986 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9987 		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]);
9988 		break;
9989 	default:
9990 		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
9991 			btf_field_type_name(node_field_type));
9992 		return false;
9993 	}
9994 
9995 	if (!ret)
9996 		verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
9997 			btf_field_type_name(node_field_type));
9998 	return ret;
9999 }
10000 
10001 static int
10002 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
10003 				   struct bpf_reg_state *reg, u32 regno,
10004 				   struct bpf_kfunc_call_arg_meta *meta,
10005 				   enum btf_field_type head_field_type,
10006 				   struct btf_field **head_field)
10007 {
10008 	const char *head_type_name;
10009 	struct btf_field *field;
10010 	struct btf_record *rec;
10011 	u32 head_off;
10012 
10013 	if (meta->btf != btf_vmlinux) {
10014 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
10015 		return -EFAULT;
10016 	}
10017 
10018 	if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
10019 		return -EFAULT;
10020 
10021 	head_type_name = btf_field_type_name(head_field_type);
10022 	if (!tnum_is_const(reg->var_off)) {
10023 		verbose(env,
10024 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
10025 			regno, head_type_name);
10026 		return -EINVAL;
10027 	}
10028 
10029 	rec = reg_btf_record(reg);
10030 	head_off = reg->off + reg->var_off.value;
10031 	field = btf_record_find(rec, head_off, head_field_type);
10032 	if (!field) {
10033 		verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
10034 		return -EINVAL;
10035 	}
10036 
10037 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
10038 	if (check_reg_allocation_locked(env, reg)) {
10039 		verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
10040 			rec->spin_lock_off, head_type_name);
10041 		return -EINVAL;
10042 	}
10043 
10044 	if (*head_field) {
10045 		verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
10046 		return -EFAULT;
10047 	}
10048 	*head_field = field;
10049 	return 0;
10050 }
10051 
10052 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
10053 					   struct bpf_reg_state *reg, u32 regno,
10054 					   struct bpf_kfunc_call_arg_meta *meta)
10055 {
10056 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
10057 							  &meta->arg_list_head.field);
10058 }
10059 
10060 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
10061 					     struct bpf_reg_state *reg, u32 regno,
10062 					     struct bpf_kfunc_call_arg_meta *meta)
10063 {
10064 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
10065 							  &meta->arg_rbtree_root.field);
10066 }
10067 
10068 static int
10069 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
10070 				   struct bpf_reg_state *reg, u32 regno,
10071 				   struct bpf_kfunc_call_arg_meta *meta,
10072 				   enum btf_field_type head_field_type,
10073 				   enum btf_field_type node_field_type,
10074 				   struct btf_field **node_field)
10075 {
10076 	const char *node_type_name;
10077 	const struct btf_type *et, *t;
10078 	struct btf_field *field;
10079 	u32 node_off;
10080 
10081 	if (meta->btf != btf_vmlinux) {
10082 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
10083 		return -EFAULT;
10084 	}
10085 
10086 	if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
10087 		return -EFAULT;
10088 
10089 	node_type_name = btf_field_type_name(node_field_type);
10090 	if (!tnum_is_const(reg->var_off)) {
10091 		verbose(env,
10092 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
10093 			regno, node_type_name);
10094 		return -EINVAL;
10095 	}
10096 
10097 	node_off = reg->off + reg->var_off.value;
10098 	field = reg_find_field_offset(reg, node_off, node_field_type);
10099 	if (!field || field->offset != node_off) {
10100 		verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
10101 		return -EINVAL;
10102 	}
10103 
10104 	field = *node_field;
10105 
10106 	et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
10107 	t = btf_type_by_id(reg->btf, reg->btf_id);
10108 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
10109 				  field->graph_root.value_btf_id, true)) {
10110 		verbose(env, "operation on %s expects arg#1 %s at offset=%d "
10111 			"in struct %s, but arg is at offset=%d in struct %s\n",
10112 			btf_field_type_name(head_field_type),
10113 			btf_field_type_name(node_field_type),
10114 			field->graph_root.node_offset,
10115 			btf_name_by_offset(field->graph_root.btf, et->name_off),
10116 			node_off, btf_name_by_offset(reg->btf, t->name_off));
10117 		return -EINVAL;
10118 	}
10119 
10120 	if (node_off != field->graph_root.node_offset) {
10121 		verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
10122 			node_off, btf_field_type_name(node_field_type),
10123 			field->graph_root.node_offset,
10124 			btf_name_by_offset(field->graph_root.btf, et->name_off));
10125 		return -EINVAL;
10126 	}
10127 
10128 	return 0;
10129 }
10130 
10131 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
10132 					   struct bpf_reg_state *reg, u32 regno,
10133 					   struct bpf_kfunc_call_arg_meta *meta)
10134 {
10135 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10136 						  BPF_LIST_HEAD, BPF_LIST_NODE,
10137 						  &meta->arg_list_head.field);
10138 }
10139 
10140 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
10141 					     struct bpf_reg_state *reg, u32 regno,
10142 					     struct bpf_kfunc_call_arg_meta *meta)
10143 {
10144 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10145 						  BPF_RB_ROOT, BPF_RB_NODE,
10146 						  &meta->arg_rbtree_root.field);
10147 }
10148 
10149 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
10150 			    int insn_idx)
10151 {
10152 	const char *func_name = meta->func_name, *ref_tname;
10153 	const struct btf *btf = meta->btf;
10154 	const struct btf_param *args;
10155 	struct btf_record *rec;
10156 	u32 i, nargs;
10157 	int ret;
10158 
10159 	args = (const struct btf_param *)(meta->func_proto + 1);
10160 	nargs = btf_type_vlen(meta->func_proto);
10161 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
10162 		verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
10163 			MAX_BPF_FUNC_REG_ARGS);
10164 		return -EINVAL;
10165 	}
10166 
10167 	/* Check that BTF function arguments match actual types that the
10168 	 * verifier sees.
10169 	 */
10170 	for (i = 0; i < nargs; i++) {
10171 		struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
10172 		const struct btf_type *t, *ref_t, *resolve_ret;
10173 		enum bpf_arg_type arg_type = ARG_DONTCARE;
10174 		u32 regno = i + 1, ref_id, type_size;
10175 		bool is_ret_buf_sz = false;
10176 		int kf_arg_type;
10177 
10178 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
10179 
10180 		if (is_kfunc_arg_ignore(btf, &args[i]))
10181 			continue;
10182 
10183 		if (btf_type_is_scalar(t)) {
10184 			if (reg->type != SCALAR_VALUE) {
10185 				verbose(env, "R%d is not a scalar\n", regno);
10186 				return -EINVAL;
10187 			}
10188 
10189 			if (is_kfunc_arg_constant(meta->btf, &args[i])) {
10190 				if (meta->arg_constant.found) {
10191 					verbose(env, "verifier internal error: only one constant argument permitted\n");
10192 					return -EFAULT;
10193 				}
10194 				if (!tnum_is_const(reg->var_off)) {
10195 					verbose(env, "R%d must be a known constant\n", regno);
10196 					return -EINVAL;
10197 				}
10198 				ret = mark_chain_precision(env, regno);
10199 				if (ret < 0)
10200 					return ret;
10201 				meta->arg_constant.found = true;
10202 				meta->arg_constant.value = reg->var_off.value;
10203 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
10204 				meta->r0_rdonly = true;
10205 				is_ret_buf_sz = true;
10206 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
10207 				is_ret_buf_sz = true;
10208 			}
10209 
10210 			if (is_ret_buf_sz) {
10211 				if (meta->r0_size) {
10212 					verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
10213 					return -EINVAL;
10214 				}
10215 
10216 				if (!tnum_is_const(reg->var_off)) {
10217 					verbose(env, "R%d is not a const\n", regno);
10218 					return -EINVAL;
10219 				}
10220 
10221 				meta->r0_size = reg->var_off.value;
10222 				ret = mark_chain_precision(env, regno);
10223 				if (ret)
10224 					return ret;
10225 			}
10226 			continue;
10227 		}
10228 
10229 		if (!btf_type_is_ptr(t)) {
10230 			verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
10231 			return -EINVAL;
10232 		}
10233 
10234 		if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
10235 		    (register_is_null(reg) || type_may_be_null(reg->type))) {
10236 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
10237 			return -EACCES;
10238 		}
10239 
10240 		if (reg->ref_obj_id) {
10241 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
10242 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
10243 					regno, reg->ref_obj_id,
10244 					meta->ref_obj_id);
10245 				return -EFAULT;
10246 			}
10247 			meta->ref_obj_id = reg->ref_obj_id;
10248 			if (is_kfunc_release(meta))
10249 				meta->release_regno = regno;
10250 		}
10251 
10252 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
10253 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
10254 
10255 		kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
10256 		if (kf_arg_type < 0)
10257 			return kf_arg_type;
10258 
10259 		switch (kf_arg_type) {
10260 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
10261 		case KF_ARG_PTR_TO_BTF_ID:
10262 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
10263 				break;
10264 
10265 			if (!is_trusted_reg(reg)) {
10266 				if (!is_kfunc_rcu(meta)) {
10267 					verbose(env, "R%d must be referenced or trusted\n", regno);
10268 					return -EINVAL;
10269 				}
10270 				if (!is_rcu_reg(reg)) {
10271 					verbose(env, "R%d must be a rcu pointer\n", regno);
10272 					return -EINVAL;
10273 				}
10274 			}
10275 
10276 			fallthrough;
10277 		case KF_ARG_PTR_TO_CTX:
10278 			/* Trusted arguments have the same offset checks as release arguments */
10279 			arg_type |= OBJ_RELEASE;
10280 			break;
10281 		case KF_ARG_PTR_TO_DYNPTR:
10282 		case KF_ARG_PTR_TO_ITER:
10283 		case KF_ARG_PTR_TO_LIST_HEAD:
10284 		case KF_ARG_PTR_TO_LIST_NODE:
10285 		case KF_ARG_PTR_TO_RB_ROOT:
10286 		case KF_ARG_PTR_TO_RB_NODE:
10287 		case KF_ARG_PTR_TO_MEM:
10288 		case KF_ARG_PTR_TO_MEM_SIZE:
10289 		case KF_ARG_PTR_TO_CALLBACK:
10290 		case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
10291 			/* Trusted by default */
10292 			break;
10293 		default:
10294 			WARN_ON_ONCE(1);
10295 			return -EFAULT;
10296 		}
10297 
10298 		if (is_kfunc_release(meta) && reg->ref_obj_id)
10299 			arg_type |= OBJ_RELEASE;
10300 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
10301 		if (ret < 0)
10302 			return ret;
10303 
10304 		switch (kf_arg_type) {
10305 		case KF_ARG_PTR_TO_CTX:
10306 			if (reg->type != PTR_TO_CTX) {
10307 				verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
10308 				return -EINVAL;
10309 			}
10310 
10311 			if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
10312 				ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
10313 				if (ret < 0)
10314 					return -EINVAL;
10315 				meta->ret_btf_id  = ret;
10316 			}
10317 			break;
10318 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
10319 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10320 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
10321 				return -EINVAL;
10322 			}
10323 			if (!reg->ref_obj_id) {
10324 				verbose(env, "allocated object must be referenced\n");
10325 				return -EINVAL;
10326 			}
10327 			if (meta->btf == btf_vmlinux &&
10328 			    meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
10329 				meta->arg_obj_drop.btf = reg->btf;
10330 				meta->arg_obj_drop.btf_id = reg->btf_id;
10331 			}
10332 			break;
10333 		case KF_ARG_PTR_TO_DYNPTR:
10334 		{
10335 			enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
10336 
10337 			if (reg->type != PTR_TO_STACK &&
10338 			    reg->type != CONST_PTR_TO_DYNPTR) {
10339 				verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
10340 				return -EINVAL;
10341 			}
10342 
10343 			if (reg->type == CONST_PTR_TO_DYNPTR)
10344 				dynptr_arg_type |= MEM_RDONLY;
10345 
10346 			if (is_kfunc_arg_uninit(btf, &args[i]))
10347 				dynptr_arg_type |= MEM_UNINIT;
10348 
10349 			if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb])
10350 				dynptr_arg_type |= DYNPTR_TYPE_SKB;
10351 			else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp])
10352 				dynptr_arg_type |= DYNPTR_TYPE_XDP;
10353 
10354 			ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type);
10355 			if (ret < 0)
10356 				return ret;
10357 
10358 			if (!(dynptr_arg_type & MEM_UNINIT)) {
10359 				int id = dynptr_id(env, reg);
10360 
10361 				if (id < 0) {
10362 					verbose(env, "verifier internal error: failed to obtain dynptr id\n");
10363 					return id;
10364 				}
10365 				meta->initialized_dynptr.id = id;
10366 				meta->initialized_dynptr.type = dynptr_get_type(env, reg);
10367 			}
10368 
10369 			break;
10370 		}
10371 		case KF_ARG_PTR_TO_ITER:
10372 			ret = process_iter_arg(env, regno, insn_idx, meta);
10373 			if (ret < 0)
10374 				return ret;
10375 			break;
10376 		case KF_ARG_PTR_TO_LIST_HEAD:
10377 			if (reg->type != PTR_TO_MAP_VALUE &&
10378 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10379 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
10380 				return -EINVAL;
10381 			}
10382 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
10383 				verbose(env, "allocated object must be referenced\n");
10384 				return -EINVAL;
10385 			}
10386 			ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
10387 			if (ret < 0)
10388 				return ret;
10389 			break;
10390 		case KF_ARG_PTR_TO_RB_ROOT:
10391 			if (reg->type != PTR_TO_MAP_VALUE &&
10392 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10393 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
10394 				return -EINVAL;
10395 			}
10396 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
10397 				verbose(env, "allocated object must be referenced\n");
10398 				return -EINVAL;
10399 			}
10400 			ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
10401 			if (ret < 0)
10402 				return ret;
10403 			break;
10404 		case KF_ARG_PTR_TO_LIST_NODE:
10405 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10406 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
10407 				return -EINVAL;
10408 			}
10409 			if (!reg->ref_obj_id) {
10410 				verbose(env, "allocated object must be referenced\n");
10411 				return -EINVAL;
10412 			}
10413 			ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
10414 			if (ret < 0)
10415 				return ret;
10416 			break;
10417 		case KF_ARG_PTR_TO_RB_NODE:
10418 			if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
10419 				if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
10420 					verbose(env, "rbtree_remove node input must be non-owning ref\n");
10421 					return -EINVAL;
10422 				}
10423 				if (in_rbtree_lock_required_cb(env)) {
10424 					verbose(env, "rbtree_remove not allowed in rbtree cb\n");
10425 					return -EINVAL;
10426 				}
10427 			} else {
10428 				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10429 					verbose(env, "arg#%d expected pointer to allocated object\n", i);
10430 					return -EINVAL;
10431 				}
10432 				if (!reg->ref_obj_id) {
10433 					verbose(env, "allocated object must be referenced\n");
10434 					return -EINVAL;
10435 				}
10436 			}
10437 
10438 			ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
10439 			if (ret < 0)
10440 				return ret;
10441 			break;
10442 		case KF_ARG_PTR_TO_BTF_ID:
10443 			/* Only base_type is checked, further checks are done here */
10444 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
10445 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
10446 			    !reg2btf_ids[base_type(reg->type)]) {
10447 				verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
10448 				verbose(env, "expected %s or socket\n",
10449 					reg_type_str(env, base_type(reg->type) |
10450 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
10451 				return -EINVAL;
10452 			}
10453 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
10454 			if (ret < 0)
10455 				return ret;
10456 			break;
10457 		case KF_ARG_PTR_TO_MEM:
10458 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
10459 			if (IS_ERR(resolve_ret)) {
10460 				verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
10461 					i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
10462 				return -EINVAL;
10463 			}
10464 			ret = check_mem_reg(env, reg, regno, type_size);
10465 			if (ret < 0)
10466 				return ret;
10467 			break;
10468 		case KF_ARG_PTR_TO_MEM_SIZE:
10469 		{
10470 			struct bpf_reg_state *size_reg = &regs[regno + 1];
10471 			const struct btf_param *size_arg = &args[i + 1];
10472 
10473 			ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
10474 			if (ret < 0) {
10475 				verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
10476 				return ret;
10477 			}
10478 
10479 			if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) {
10480 				if (meta->arg_constant.found) {
10481 					verbose(env, "verifier internal error: only one constant argument permitted\n");
10482 					return -EFAULT;
10483 				}
10484 				if (!tnum_is_const(size_reg->var_off)) {
10485 					verbose(env, "R%d must be a known constant\n", regno + 1);
10486 					return -EINVAL;
10487 				}
10488 				meta->arg_constant.found = true;
10489 				meta->arg_constant.value = size_reg->var_off.value;
10490 			}
10491 
10492 			/* Skip next '__sz' or '__szk' argument */
10493 			i++;
10494 			break;
10495 		}
10496 		case KF_ARG_PTR_TO_CALLBACK:
10497 			meta->subprogno = reg->subprogno;
10498 			break;
10499 		case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
10500 			if (!type_is_ptr_alloc_obj(reg->type) && !type_is_non_owning_ref(reg->type)) {
10501 				verbose(env, "arg#%d is neither owning or non-owning ref\n", i);
10502 				return -EINVAL;
10503 			}
10504 
10505 			rec = reg_btf_record(reg);
10506 			if (!rec) {
10507 				verbose(env, "verifier internal error: Couldn't find btf_record\n");
10508 				return -EFAULT;
10509 			}
10510 
10511 			if (rec->refcount_off < 0) {
10512 				verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
10513 				return -EINVAL;
10514 			}
10515 			if (rec->refcount_off >= 0) {
10516 				verbose(env, "bpf_refcount_acquire calls are disabled for now\n");
10517 				return -EINVAL;
10518 			}
10519 			meta->arg_refcount_acquire.btf = reg->btf;
10520 			meta->arg_refcount_acquire.btf_id = reg->btf_id;
10521 			break;
10522 		}
10523 	}
10524 
10525 	if (is_kfunc_release(meta) && !meta->release_regno) {
10526 		verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
10527 			func_name);
10528 		return -EINVAL;
10529 	}
10530 
10531 	return 0;
10532 }
10533 
10534 static int fetch_kfunc_meta(struct bpf_verifier_env *env,
10535 			    struct bpf_insn *insn,
10536 			    struct bpf_kfunc_call_arg_meta *meta,
10537 			    const char **kfunc_name)
10538 {
10539 	const struct btf_type *func, *func_proto;
10540 	u32 func_id, *kfunc_flags;
10541 	const char *func_name;
10542 	struct btf *desc_btf;
10543 
10544 	if (kfunc_name)
10545 		*kfunc_name = NULL;
10546 
10547 	if (!insn->imm)
10548 		return -EINVAL;
10549 
10550 	desc_btf = find_kfunc_desc_btf(env, insn->off);
10551 	if (IS_ERR(desc_btf))
10552 		return PTR_ERR(desc_btf);
10553 
10554 	func_id = insn->imm;
10555 	func = btf_type_by_id(desc_btf, func_id);
10556 	func_name = btf_name_by_offset(desc_btf, func->name_off);
10557 	if (kfunc_name)
10558 		*kfunc_name = func_name;
10559 	func_proto = btf_type_by_id(desc_btf, func->type);
10560 
10561 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
10562 	if (!kfunc_flags) {
10563 		return -EACCES;
10564 	}
10565 
10566 	memset(meta, 0, sizeof(*meta));
10567 	meta->btf = desc_btf;
10568 	meta->func_id = func_id;
10569 	meta->kfunc_flags = *kfunc_flags;
10570 	meta->func_proto = func_proto;
10571 	meta->func_name = func_name;
10572 
10573 	return 0;
10574 }
10575 
10576 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
10577 			    int *insn_idx_p)
10578 {
10579 	const struct btf_type *t, *ptr_type;
10580 	u32 i, nargs, ptr_type_id, release_ref_obj_id;
10581 	struct bpf_reg_state *regs = cur_regs(env);
10582 	const char *func_name, *ptr_type_name;
10583 	bool sleepable, rcu_lock, rcu_unlock;
10584 	struct bpf_kfunc_call_arg_meta meta;
10585 	struct bpf_insn_aux_data *insn_aux;
10586 	int err, insn_idx = *insn_idx_p;
10587 	const struct btf_param *args;
10588 	const struct btf_type *ret_t;
10589 	struct btf *desc_btf;
10590 
10591 	/* skip for now, but return error when we find this in fixup_kfunc_call */
10592 	if (!insn->imm)
10593 		return 0;
10594 
10595 	err = fetch_kfunc_meta(env, insn, &meta, &func_name);
10596 	if (err == -EACCES && func_name)
10597 		verbose(env, "calling kernel function %s is not allowed\n", func_name);
10598 	if (err)
10599 		return err;
10600 	desc_btf = meta.btf;
10601 	insn_aux = &env->insn_aux_data[insn_idx];
10602 
10603 	insn_aux->is_iter_next = is_iter_next_kfunc(&meta);
10604 
10605 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
10606 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
10607 		return -EACCES;
10608 	}
10609 
10610 	sleepable = is_kfunc_sleepable(&meta);
10611 	if (sleepable && !env->prog->aux->sleepable) {
10612 		verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
10613 		return -EACCES;
10614 	}
10615 
10616 	rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
10617 	rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
10618 
10619 	if (env->cur_state->active_rcu_lock) {
10620 		struct bpf_func_state *state;
10621 		struct bpf_reg_state *reg;
10622 
10623 		if (rcu_lock) {
10624 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
10625 			return -EINVAL;
10626 		} else if (rcu_unlock) {
10627 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
10628 				if (reg->type & MEM_RCU) {
10629 					reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
10630 					reg->type |= PTR_UNTRUSTED;
10631 				}
10632 			}));
10633 			env->cur_state->active_rcu_lock = false;
10634 		} else if (sleepable) {
10635 			verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
10636 			return -EACCES;
10637 		}
10638 	} else if (rcu_lock) {
10639 		env->cur_state->active_rcu_lock = true;
10640 	} else if (rcu_unlock) {
10641 		verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
10642 		return -EINVAL;
10643 	}
10644 
10645 	/* Check the arguments */
10646 	err = check_kfunc_args(env, &meta, insn_idx);
10647 	if (err < 0)
10648 		return err;
10649 	/* In case of release function, we get register number of refcounted
10650 	 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
10651 	 */
10652 	if (meta.release_regno) {
10653 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
10654 		if (err) {
10655 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
10656 				func_name, meta.func_id);
10657 			return err;
10658 		}
10659 	}
10660 
10661 	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
10662 	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
10663 	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
10664 		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
10665 		insn_aux->insert_off = regs[BPF_REG_2].off;
10666 		err = ref_convert_owning_non_owning(env, release_ref_obj_id);
10667 		if (err) {
10668 			verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
10669 				func_name, meta.func_id);
10670 			return err;
10671 		}
10672 
10673 		err = release_reference(env, release_ref_obj_id);
10674 		if (err) {
10675 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
10676 				func_name, meta.func_id);
10677 			return err;
10678 		}
10679 	}
10680 
10681 	if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
10682 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
10683 					set_rbtree_add_callback_state);
10684 		if (err) {
10685 			verbose(env, "kfunc %s#%d failed callback verification\n",
10686 				func_name, meta.func_id);
10687 			return err;
10688 		}
10689 	}
10690 
10691 	for (i = 0; i < CALLER_SAVED_REGS; i++)
10692 		mark_reg_not_init(env, regs, caller_saved[i]);
10693 
10694 	/* Check return type */
10695 	t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
10696 
10697 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
10698 		/* Only exception is bpf_obj_new_impl */
10699 		if (meta.btf != btf_vmlinux ||
10700 		    (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
10701 		     meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
10702 			verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
10703 			return -EINVAL;
10704 		}
10705 	}
10706 
10707 	if (btf_type_is_scalar(t)) {
10708 		mark_reg_unknown(env, regs, BPF_REG_0);
10709 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
10710 	} else if (btf_type_is_ptr(t)) {
10711 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
10712 
10713 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
10714 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
10715 				struct btf *ret_btf;
10716 				u32 ret_btf_id;
10717 
10718 				if (unlikely(!bpf_global_ma_set))
10719 					return -ENOMEM;
10720 
10721 				if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
10722 					verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
10723 					return -EINVAL;
10724 				}
10725 
10726 				ret_btf = env->prog->aux->btf;
10727 				ret_btf_id = meta.arg_constant.value;
10728 
10729 				/* This may be NULL due to user not supplying a BTF */
10730 				if (!ret_btf) {
10731 					verbose(env, "bpf_obj_new requires prog BTF\n");
10732 					return -EINVAL;
10733 				}
10734 
10735 				ret_t = btf_type_by_id(ret_btf, ret_btf_id);
10736 				if (!ret_t || !__btf_type_is_struct(ret_t)) {
10737 					verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
10738 					return -EINVAL;
10739 				}
10740 
10741 				mark_reg_known_zero(env, regs, BPF_REG_0);
10742 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
10743 				regs[BPF_REG_0].btf = ret_btf;
10744 				regs[BPF_REG_0].btf_id = ret_btf_id;
10745 
10746 				insn_aux->obj_new_size = ret_t->size;
10747 				insn_aux->kptr_struct_meta =
10748 					btf_find_struct_meta(ret_btf, ret_btf_id);
10749 			} else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
10750 				mark_reg_known_zero(env, regs, BPF_REG_0);
10751 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
10752 				regs[BPF_REG_0].btf = meta.arg_refcount_acquire.btf;
10753 				regs[BPF_REG_0].btf_id = meta.arg_refcount_acquire.btf_id;
10754 
10755 				insn_aux->kptr_struct_meta =
10756 					btf_find_struct_meta(meta.arg_refcount_acquire.btf,
10757 							     meta.arg_refcount_acquire.btf_id);
10758 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
10759 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
10760 				struct btf_field *field = meta.arg_list_head.field;
10761 
10762 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
10763 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
10764 				   meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
10765 				struct btf_field *field = meta.arg_rbtree_root.field;
10766 
10767 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
10768 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
10769 				mark_reg_known_zero(env, regs, BPF_REG_0);
10770 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
10771 				regs[BPF_REG_0].btf = desc_btf;
10772 				regs[BPF_REG_0].btf_id = meta.ret_btf_id;
10773 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
10774 				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
10775 				if (!ret_t || !btf_type_is_struct(ret_t)) {
10776 					verbose(env,
10777 						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
10778 					return -EINVAL;
10779 				}
10780 
10781 				mark_reg_known_zero(env, regs, BPF_REG_0);
10782 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
10783 				regs[BPF_REG_0].btf = desc_btf;
10784 				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
10785 			} else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
10786 				   meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
10787 				enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type);
10788 
10789 				mark_reg_known_zero(env, regs, BPF_REG_0);
10790 
10791 				if (!meta.arg_constant.found) {
10792 					verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n");
10793 					return -EFAULT;
10794 				}
10795 
10796 				regs[BPF_REG_0].mem_size = meta.arg_constant.value;
10797 
10798 				/* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */
10799 				regs[BPF_REG_0].type = PTR_TO_MEM | type_flag;
10800 
10801 				if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) {
10802 					regs[BPF_REG_0].type |= MEM_RDONLY;
10803 				} else {
10804 					/* this will set env->seen_direct_write to true */
10805 					if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) {
10806 						verbose(env, "the prog does not allow writes to packet data\n");
10807 						return -EINVAL;
10808 					}
10809 				}
10810 
10811 				if (!meta.initialized_dynptr.id) {
10812 					verbose(env, "verifier internal error: no dynptr id\n");
10813 					return -EFAULT;
10814 				}
10815 				regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id;
10816 
10817 				/* we don't need to set BPF_REG_0's ref obj id
10818 				 * because packet slices are not refcounted (see
10819 				 * dynptr_type_refcounted)
10820 				 */
10821 			} else {
10822 				verbose(env, "kernel function %s unhandled dynamic return type\n",
10823 					meta.func_name);
10824 				return -EFAULT;
10825 			}
10826 		} else if (!__btf_type_is_struct(ptr_type)) {
10827 			if (!meta.r0_size) {
10828 				__u32 sz;
10829 
10830 				if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) {
10831 					meta.r0_size = sz;
10832 					meta.r0_rdonly = true;
10833 				}
10834 			}
10835 			if (!meta.r0_size) {
10836 				ptr_type_name = btf_name_by_offset(desc_btf,
10837 								   ptr_type->name_off);
10838 				verbose(env,
10839 					"kernel function %s returns pointer type %s %s is not supported\n",
10840 					func_name,
10841 					btf_type_str(ptr_type),
10842 					ptr_type_name);
10843 				return -EINVAL;
10844 			}
10845 
10846 			mark_reg_known_zero(env, regs, BPF_REG_0);
10847 			regs[BPF_REG_0].type = PTR_TO_MEM;
10848 			regs[BPF_REG_0].mem_size = meta.r0_size;
10849 
10850 			if (meta.r0_rdonly)
10851 				regs[BPF_REG_0].type |= MEM_RDONLY;
10852 
10853 			/* Ensures we don't access the memory after a release_reference() */
10854 			if (meta.ref_obj_id)
10855 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
10856 		} else {
10857 			mark_reg_known_zero(env, regs, BPF_REG_0);
10858 			regs[BPF_REG_0].btf = desc_btf;
10859 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
10860 			regs[BPF_REG_0].btf_id = ptr_type_id;
10861 		}
10862 
10863 		if (is_kfunc_ret_null(&meta)) {
10864 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
10865 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
10866 			regs[BPF_REG_0].id = ++env->id_gen;
10867 		}
10868 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
10869 		if (is_kfunc_acquire(&meta)) {
10870 			int id = acquire_reference_state(env, insn_idx);
10871 
10872 			if (id < 0)
10873 				return id;
10874 			if (is_kfunc_ret_null(&meta))
10875 				regs[BPF_REG_0].id = id;
10876 			regs[BPF_REG_0].ref_obj_id = id;
10877 		} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
10878 			ref_set_non_owning(env, &regs[BPF_REG_0]);
10879 		}
10880 
10881 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
10882 			regs[BPF_REG_0].id = ++env->id_gen;
10883 	} else if (btf_type_is_void(t)) {
10884 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
10885 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
10886 				insn_aux->kptr_struct_meta =
10887 					btf_find_struct_meta(meta.arg_obj_drop.btf,
10888 							     meta.arg_obj_drop.btf_id);
10889 			}
10890 		}
10891 	}
10892 
10893 	nargs = btf_type_vlen(meta.func_proto);
10894 	args = (const struct btf_param *)(meta.func_proto + 1);
10895 	for (i = 0; i < nargs; i++) {
10896 		u32 regno = i + 1;
10897 
10898 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
10899 		if (btf_type_is_ptr(t))
10900 			mark_btf_func_reg_size(env, regno, sizeof(void *));
10901 		else
10902 			/* scalar. ensured by btf_check_kfunc_arg_match() */
10903 			mark_btf_func_reg_size(env, regno, t->size);
10904 	}
10905 
10906 	if (is_iter_next_kfunc(&meta)) {
10907 		err = process_iter_next_call(env, insn_idx, &meta);
10908 		if (err)
10909 			return err;
10910 	}
10911 
10912 	return 0;
10913 }
10914 
10915 static bool signed_add_overflows(s64 a, s64 b)
10916 {
10917 	/* Do the add in u64, where overflow is well-defined */
10918 	s64 res = (s64)((u64)a + (u64)b);
10919 
10920 	if (b < 0)
10921 		return res > a;
10922 	return res < a;
10923 }
10924 
10925 static bool signed_add32_overflows(s32 a, s32 b)
10926 {
10927 	/* Do the add in u32, where overflow is well-defined */
10928 	s32 res = (s32)((u32)a + (u32)b);
10929 
10930 	if (b < 0)
10931 		return res > a;
10932 	return res < a;
10933 }
10934 
10935 static bool signed_sub_overflows(s64 a, s64 b)
10936 {
10937 	/* Do the sub in u64, where overflow is well-defined */
10938 	s64 res = (s64)((u64)a - (u64)b);
10939 
10940 	if (b < 0)
10941 		return res < a;
10942 	return res > a;
10943 }
10944 
10945 static bool signed_sub32_overflows(s32 a, s32 b)
10946 {
10947 	/* Do the sub in u32, where overflow is well-defined */
10948 	s32 res = (s32)((u32)a - (u32)b);
10949 
10950 	if (b < 0)
10951 		return res < a;
10952 	return res > a;
10953 }
10954 
10955 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
10956 				  const struct bpf_reg_state *reg,
10957 				  enum bpf_reg_type type)
10958 {
10959 	bool known = tnum_is_const(reg->var_off);
10960 	s64 val = reg->var_off.value;
10961 	s64 smin = reg->smin_value;
10962 
10963 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
10964 		verbose(env, "math between %s pointer and %lld is not allowed\n",
10965 			reg_type_str(env, type), val);
10966 		return false;
10967 	}
10968 
10969 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
10970 		verbose(env, "%s pointer offset %d is not allowed\n",
10971 			reg_type_str(env, type), reg->off);
10972 		return false;
10973 	}
10974 
10975 	if (smin == S64_MIN) {
10976 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
10977 			reg_type_str(env, type));
10978 		return false;
10979 	}
10980 
10981 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
10982 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
10983 			smin, reg_type_str(env, type));
10984 		return false;
10985 	}
10986 
10987 	return true;
10988 }
10989 
10990 enum {
10991 	REASON_BOUNDS	= -1,
10992 	REASON_TYPE	= -2,
10993 	REASON_PATHS	= -3,
10994 	REASON_LIMIT	= -4,
10995 	REASON_STACK	= -5,
10996 };
10997 
10998 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
10999 			      u32 *alu_limit, bool mask_to_left)
11000 {
11001 	u32 max = 0, ptr_limit = 0;
11002 
11003 	switch (ptr_reg->type) {
11004 	case PTR_TO_STACK:
11005 		/* Offset 0 is out-of-bounds, but acceptable start for the
11006 		 * left direction, see BPF_REG_FP. Also, unknown scalar
11007 		 * offset where we would need to deal with min/max bounds is
11008 		 * currently prohibited for unprivileged.
11009 		 */
11010 		max = MAX_BPF_STACK + mask_to_left;
11011 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
11012 		break;
11013 	case PTR_TO_MAP_VALUE:
11014 		max = ptr_reg->map_ptr->value_size;
11015 		ptr_limit = (mask_to_left ?
11016 			     ptr_reg->smin_value :
11017 			     ptr_reg->umax_value) + ptr_reg->off;
11018 		break;
11019 	default:
11020 		return REASON_TYPE;
11021 	}
11022 
11023 	if (ptr_limit >= max)
11024 		return REASON_LIMIT;
11025 	*alu_limit = ptr_limit;
11026 	return 0;
11027 }
11028 
11029 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
11030 				    const struct bpf_insn *insn)
11031 {
11032 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
11033 }
11034 
11035 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
11036 				       u32 alu_state, u32 alu_limit)
11037 {
11038 	/* If we arrived here from different branches with different
11039 	 * state or limits to sanitize, then this won't work.
11040 	 */
11041 	if (aux->alu_state &&
11042 	    (aux->alu_state != alu_state ||
11043 	     aux->alu_limit != alu_limit))
11044 		return REASON_PATHS;
11045 
11046 	/* Corresponding fixup done in do_misc_fixups(). */
11047 	aux->alu_state = alu_state;
11048 	aux->alu_limit = alu_limit;
11049 	return 0;
11050 }
11051 
11052 static int sanitize_val_alu(struct bpf_verifier_env *env,
11053 			    struct bpf_insn *insn)
11054 {
11055 	struct bpf_insn_aux_data *aux = cur_aux(env);
11056 
11057 	if (can_skip_alu_sanitation(env, insn))
11058 		return 0;
11059 
11060 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
11061 }
11062 
11063 static bool sanitize_needed(u8 opcode)
11064 {
11065 	return opcode == BPF_ADD || opcode == BPF_SUB;
11066 }
11067 
11068 struct bpf_sanitize_info {
11069 	struct bpf_insn_aux_data aux;
11070 	bool mask_to_left;
11071 };
11072 
11073 static struct bpf_verifier_state *
11074 sanitize_speculative_path(struct bpf_verifier_env *env,
11075 			  const struct bpf_insn *insn,
11076 			  u32 next_idx, u32 curr_idx)
11077 {
11078 	struct bpf_verifier_state *branch;
11079 	struct bpf_reg_state *regs;
11080 
11081 	branch = push_stack(env, next_idx, curr_idx, true);
11082 	if (branch && insn) {
11083 		regs = branch->frame[branch->curframe]->regs;
11084 		if (BPF_SRC(insn->code) == BPF_K) {
11085 			mark_reg_unknown(env, regs, insn->dst_reg);
11086 		} else if (BPF_SRC(insn->code) == BPF_X) {
11087 			mark_reg_unknown(env, regs, insn->dst_reg);
11088 			mark_reg_unknown(env, regs, insn->src_reg);
11089 		}
11090 	}
11091 	return branch;
11092 }
11093 
11094 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
11095 			    struct bpf_insn *insn,
11096 			    const struct bpf_reg_state *ptr_reg,
11097 			    const struct bpf_reg_state *off_reg,
11098 			    struct bpf_reg_state *dst_reg,
11099 			    struct bpf_sanitize_info *info,
11100 			    const bool commit_window)
11101 {
11102 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
11103 	struct bpf_verifier_state *vstate = env->cur_state;
11104 	bool off_is_imm = tnum_is_const(off_reg->var_off);
11105 	bool off_is_neg = off_reg->smin_value < 0;
11106 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
11107 	u8 opcode = BPF_OP(insn->code);
11108 	u32 alu_state, alu_limit;
11109 	struct bpf_reg_state tmp;
11110 	bool ret;
11111 	int err;
11112 
11113 	if (can_skip_alu_sanitation(env, insn))
11114 		return 0;
11115 
11116 	/* We already marked aux for masking from non-speculative
11117 	 * paths, thus we got here in the first place. We only care
11118 	 * to explore bad access from here.
11119 	 */
11120 	if (vstate->speculative)
11121 		goto do_sim;
11122 
11123 	if (!commit_window) {
11124 		if (!tnum_is_const(off_reg->var_off) &&
11125 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
11126 			return REASON_BOUNDS;
11127 
11128 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
11129 				     (opcode == BPF_SUB && !off_is_neg);
11130 	}
11131 
11132 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
11133 	if (err < 0)
11134 		return err;
11135 
11136 	if (commit_window) {
11137 		/* In commit phase we narrow the masking window based on
11138 		 * the observed pointer move after the simulated operation.
11139 		 */
11140 		alu_state = info->aux.alu_state;
11141 		alu_limit = abs(info->aux.alu_limit - alu_limit);
11142 	} else {
11143 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
11144 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
11145 		alu_state |= ptr_is_dst_reg ?
11146 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
11147 
11148 		/* Limit pruning on unknown scalars to enable deep search for
11149 		 * potential masking differences from other program paths.
11150 		 */
11151 		if (!off_is_imm)
11152 			env->explore_alu_limits = true;
11153 	}
11154 
11155 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
11156 	if (err < 0)
11157 		return err;
11158 do_sim:
11159 	/* If we're in commit phase, we're done here given we already
11160 	 * pushed the truncated dst_reg into the speculative verification
11161 	 * stack.
11162 	 *
11163 	 * Also, when register is a known constant, we rewrite register-based
11164 	 * operation to immediate-based, and thus do not need masking (and as
11165 	 * a consequence, do not need to simulate the zero-truncation either).
11166 	 */
11167 	if (commit_window || off_is_imm)
11168 		return 0;
11169 
11170 	/* Simulate and find potential out-of-bounds access under
11171 	 * speculative execution from truncation as a result of
11172 	 * masking when off was not within expected range. If off
11173 	 * sits in dst, then we temporarily need to move ptr there
11174 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
11175 	 * for cases where we use K-based arithmetic in one direction
11176 	 * and truncated reg-based in the other in order to explore
11177 	 * bad access.
11178 	 */
11179 	if (!ptr_is_dst_reg) {
11180 		tmp = *dst_reg;
11181 		copy_register_state(dst_reg, ptr_reg);
11182 	}
11183 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
11184 					env->insn_idx);
11185 	if (!ptr_is_dst_reg && ret)
11186 		*dst_reg = tmp;
11187 	return !ret ? REASON_STACK : 0;
11188 }
11189 
11190 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
11191 {
11192 	struct bpf_verifier_state *vstate = env->cur_state;
11193 
11194 	/* If we simulate paths under speculation, we don't update the
11195 	 * insn as 'seen' such that when we verify unreachable paths in
11196 	 * the non-speculative domain, sanitize_dead_code() can still
11197 	 * rewrite/sanitize them.
11198 	 */
11199 	if (!vstate->speculative)
11200 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
11201 }
11202 
11203 static int sanitize_err(struct bpf_verifier_env *env,
11204 			const struct bpf_insn *insn, int reason,
11205 			const struct bpf_reg_state *off_reg,
11206 			const struct bpf_reg_state *dst_reg)
11207 {
11208 	static const char *err = "pointer arithmetic with it prohibited for !root";
11209 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
11210 	u32 dst = insn->dst_reg, src = insn->src_reg;
11211 
11212 	switch (reason) {
11213 	case REASON_BOUNDS:
11214 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
11215 			off_reg == dst_reg ? dst : src, err);
11216 		break;
11217 	case REASON_TYPE:
11218 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
11219 			off_reg == dst_reg ? src : dst, err);
11220 		break;
11221 	case REASON_PATHS:
11222 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
11223 			dst, op, err);
11224 		break;
11225 	case REASON_LIMIT:
11226 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
11227 			dst, op, err);
11228 		break;
11229 	case REASON_STACK:
11230 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
11231 			dst, err);
11232 		break;
11233 	default:
11234 		verbose(env, "verifier internal error: unknown reason (%d)\n",
11235 			reason);
11236 		break;
11237 	}
11238 
11239 	return -EACCES;
11240 }
11241 
11242 /* check that stack access falls within stack limits and that 'reg' doesn't
11243  * have a variable offset.
11244  *
11245  * Variable offset is prohibited for unprivileged mode for simplicity since it
11246  * requires corresponding support in Spectre masking for stack ALU.  See also
11247  * retrieve_ptr_limit().
11248  *
11249  *
11250  * 'off' includes 'reg->off'.
11251  */
11252 static int check_stack_access_for_ptr_arithmetic(
11253 				struct bpf_verifier_env *env,
11254 				int regno,
11255 				const struct bpf_reg_state *reg,
11256 				int off)
11257 {
11258 	if (!tnum_is_const(reg->var_off)) {
11259 		char tn_buf[48];
11260 
11261 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
11262 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
11263 			regno, tn_buf, off);
11264 		return -EACCES;
11265 	}
11266 
11267 	if (off >= 0 || off < -MAX_BPF_STACK) {
11268 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
11269 			"prohibited for !root; off=%d\n", regno, off);
11270 		return -EACCES;
11271 	}
11272 
11273 	return 0;
11274 }
11275 
11276 static int sanitize_check_bounds(struct bpf_verifier_env *env,
11277 				 const struct bpf_insn *insn,
11278 				 const struct bpf_reg_state *dst_reg)
11279 {
11280 	u32 dst = insn->dst_reg;
11281 
11282 	/* For unprivileged we require that resulting offset must be in bounds
11283 	 * in order to be able to sanitize access later on.
11284 	 */
11285 	if (env->bypass_spec_v1)
11286 		return 0;
11287 
11288 	switch (dst_reg->type) {
11289 	case PTR_TO_STACK:
11290 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
11291 					dst_reg->off + dst_reg->var_off.value))
11292 			return -EACCES;
11293 		break;
11294 	case PTR_TO_MAP_VALUE:
11295 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
11296 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
11297 				"prohibited for !root\n", dst);
11298 			return -EACCES;
11299 		}
11300 		break;
11301 	default:
11302 		break;
11303 	}
11304 
11305 	return 0;
11306 }
11307 
11308 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
11309  * Caller should also handle BPF_MOV case separately.
11310  * If we return -EACCES, caller may want to try again treating pointer as a
11311  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
11312  */
11313 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
11314 				   struct bpf_insn *insn,
11315 				   const struct bpf_reg_state *ptr_reg,
11316 				   const struct bpf_reg_state *off_reg)
11317 {
11318 	struct bpf_verifier_state *vstate = env->cur_state;
11319 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
11320 	struct bpf_reg_state *regs = state->regs, *dst_reg;
11321 	bool known = tnum_is_const(off_reg->var_off);
11322 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
11323 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
11324 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
11325 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
11326 	struct bpf_sanitize_info info = {};
11327 	u8 opcode = BPF_OP(insn->code);
11328 	u32 dst = insn->dst_reg;
11329 	int ret;
11330 
11331 	dst_reg = &regs[dst];
11332 
11333 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
11334 	    smin_val > smax_val || umin_val > umax_val) {
11335 		/* Taint dst register if offset had invalid bounds derived from
11336 		 * e.g. dead branches.
11337 		 */
11338 		__mark_reg_unknown(env, dst_reg);
11339 		return 0;
11340 	}
11341 
11342 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
11343 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
11344 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
11345 			__mark_reg_unknown(env, dst_reg);
11346 			return 0;
11347 		}
11348 
11349 		verbose(env,
11350 			"R%d 32-bit pointer arithmetic prohibited\n",
11351 			dst);
11352 		return -EACCES;
11353 	}
11354 
11355 	if (ptr_reg->type & PTR_MAYBE_NULL) {
11356 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
11357 			dst, reg_type_str(env, ptr_reg->type));
11358 		return -EACCES;
11359 	}
11360 
11361 	switch (base_type(ptr_reg->type)) {
11362 	case CONST_PTR_TO_MAP:
11363 		/* smin_val represents the known value */
11364 		if (known && smin_val == 0 && opcode == BPF_ADD)
11365 			break;
11366 		fallthrough;
11367 	case PTR_TO_PACKET_END:
11368 	case PTR_TO_SOCKET:
11369 	case PTR_TO_SOCK_COMMON:
11370 	case PTR_TO_TCP_SOCK:
11371 	case PTR_TO_XDP_SOCK:
11372 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
11373 			dst, reg_type_str(env, ptr_reg->type));
11374 		return -EACCES;
11375 	default:
11376 		break;
11377 	}
11378 
11379 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
11380 	 * The id may be overwritten later if we create a new variable offset.
11381 	 */
11382 	dst_reg->type = ptr_reg->type;
11383 	dst_reg->id = ptr_reg->id;
11384 
11385 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
11386 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
11387 		return -EINVAL;
11388 
11389 	/* pointer types do not carry 32-bit bounds at the moment. */
11390 	__mark_reg32_unbounded(dst_reg);
11391 
11392 	if (sanitize_needed(opcode)) {
11393 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
11394 				       &info, false);
11395 		if (ret < 0)
11396 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
11397 	}
11398 
11399 	switch (opcode) {
11400 	case BPF_ADD:
11401 		/* We can take a fixed offset as long as it doesn't overflow
11402 		 * the s32 'off' field
11403 		 */
11404 		if (known && (ptr_reg->off + smin_val ==
11405 			      (s64)(s32)(ptr_reg->off + smin_val))) {
11406 			/* pointer += K.  Accumulate it into fixed offset */
11407 			dst_reg->smin_value = smin_ptr;
11408 			dst_reg->smax_value = smax_ptr;
11409 			dst_reg->umin_value = umin_ptr;
11410 			dst_reg->umax_value = umax_ptr;
11411 			dst_reg->var_off = ptr_reg->var_off;
11412 			dst_reg->off = ptr_reg->off + smin_val;
11413 			dst_reg->raw = ptr_reg->raw;
11414 			break;
11415 		}
11416 		/* A new variable offset is created.  Note that off_reg->off
11417 		 * == 0, since it's a scalar.
11418 		 * dst_reg gets the pointer type and since some positive
11419 		 * integer value was added to the pointer, give it a new 'id'
11420 		 * if it's a PTR_TO_PACKET.
11421 		 * this creates a new 'base' pointer, off_reg (variable) gets
11422 		 * added into the variable offset, and we copy the fixed offset
11423 		 * from ptr_reg.
11424 		 */
11425 		if (signed_add_overflows(smin_ptr, smin_val) ||
11426 		    signed_add_overflows(smax_ptr, smax_val)) {
11427 			dst_reg->smin_value = S64_MIN;
11428 			dst_reg->smax_value = S64_MAX;
11429 		} else {
11430 			dst_reg->smin_value = smin_ptr + smin_val;
11431 			dst_reg->smax_value = smax_ptr + smax_val;
11432 		}
11433 		if (umin_ptr + umin_val < umin_ptr ||
11434 		    umax_ptr + umax_val < umax_ptr) {
11435 			dst_reg->umin_value = 0;
11436 			dst_reg->umax_value = U64_MAX;
11437 		} else {
11438 			dst_reg->umin_value = umin_ptr + umin_val;
11439 			dst_reg->umax_value = umax_ptr + umax_val;
11440 		}
11441 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
11442 		dst_reg->off = ptr_reg->off;
11443 		dst_reg->raw = ptr_reg->raw;
11444 		if (reg_is_pkt_pointer(ptr_reg)) {
11445 			dst_reg->id = ++env->id_gen;
11446 			/* something was added to pkt_ptr, set range to zero */
11447 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
11448 		}
11449 		break;
11450 	case BPF_SUB:
11451 		if (dst_reg == off_reg) {
11452 			/* scalar -= pointer.  Creates an unknown scalar */
11453 			verbose(env, "R%d tried to subtract pointer from scalar\n",
11454 				dst);
11455 			return -EACCES;
11456 		}
11457 		/* We don't allow subtraction from FP, because (according to
11458 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
11459 		 * be able to deal with it.
11460 		 */
11461 		if (ptr_reg->type == PTR_TO_STACK) {
11462 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
11463 				dst);
11464 			return -EACCES;
11465 		}
11466 		if (known && (ptr_reg->off - smin_val ==
11467 			      (s64)(s32)(ptr_reg->off - smin_val))) {
11468 			/* pointer -= K.  Subtract it from fixed offset */
11469 			dst_reg->smin_value = smin_ptr;
11470 			dst_reg->smax_value = smax_ptr;
11471 			dst_reg->umin_value = umin_ptr;
11472 			dst_reg->umax_value = umax_ptr;
11473 			dst_reg->var_off = ptr_reg->var_off;
11474 			dst_reg->id = ptr_reg->id;
11475 			dst_reg->off = ptr_reg->off - smin_val;
11476 			dst_reg->raw = ptr_reg->raw;
11477 			break;
11478 		}
11479 		/* A new variable offset is created.  If the subtrahend is known
11480 		 * nonnegative, then any reg->range we had before is still good.
11481 		 */
11482 		if (signed_sub_overflows(smin_ptr, smax_val) ||
11483 		    signed_sub_overflows(smax_ptr, smin_val)) {
11484 			/* Overflow possible, we know nothing */
11485 			dst_reg->smin_value = S64_MIN;
11486 			dst_reg->smax_value = S64_MAX;
11487 		} else {
11488 			dst_reg->smin_value = smin_ptr - smax_val;
11489 			dst_reg->smax_value = smax_ptr - smin_val;
11490 		}
11491 		if (umin_ptr < umax_val) {
11492 			/* Overflow possible, we know nothing */
11493 			dst_reg->umin_value = 0;
11494 			dst_reg->umax_value = U64_MAX;
11495 		} else {
11496 			/* Cannot overflow (as long as bounds are consistent) */
11497 			dst_reg->umin_value = umin_ptr - umax_val;
11498 			dst_reg->umax_value = umax_ptr - umin_val;
11499 		}
11500 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
11501 		dst_reg->off = ptr_reg->off;
11502 		dst_reg->raw = ptr_reg->raw;
11503 		if (reg_is_pkt_pointer(ptr_reg)) {
11504 			dst_reg->id = ++env->id_gen;
11505 			/* something was added to pkt_ptr, set range to zero */
11506 			if (smin_val < 0)
11507 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
11508 		}
11509 		break;
11510 	case BPF_AND:
11511 	case BPF_OR:
11512 	case BPF_XOR:
11513 		/* bitwise ops on pointers are troublesome, prohibit. */
11514 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
11515 			dst, bpf_alu_string[opcode >> 4]);
11516 		return -EACCES;
11517 	default:
11518 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
11519 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
11520 			dst, bpf_alu_string[opcode >> 4]);
11521 		return -EACCES;
11522 	}
11523 
11524 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
11525 		return -EINVAL;
11526 	reg_bounds_sync(dst_reg);
11527 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
11528 		return -EACCES;
11529 	if (sanitize_needed(opcode)) {
11530 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
11531 				       &info, true);
11532 		if (ret < 0)
11533 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
11534 	}
11535 
11536 	return 0;
11537 }
11538 
11539 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
11540 				 struct bpf_reg_state *src_reg)
11541 {
11542 	s32 smin_val = src_reg->s32_min_value;
11543 	s32 smax_val = src_reg->s32_max_value;
11544 	u32 umin_val = src_reg->u32_min_value;
11545 	u32 umax_val = src_reg->u32_max_value;
11546 
11547 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
11548 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
11549 		dst_reg->s32_min_value = S32_MIN;
11550 		dst_reg->s32_max_value = S32_MAX;
11551 	} else {
11552 		dst_reg->s32_min_value += smin_val;
11553 		dst_reg->s32_max_value += smax_val;
11554 	}
11555 	if (dst_reg->u32_min_value + umin_val < umin_val ||
11556 	    dst_reg->u32_max_value + umax_val < umax_val) {
11557 		dst_reg->u32_min_value = 0;
11558 		dst_reg->u32_max_value = U32_MAX;
11559 	} else {
11560 		dst_reg->u32_min_value += umin_val;
11561 		dst_reg->u32_max_value += umax_val;
11562 	}
11563 }
11564 
11565 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
11566 			       struct bpf_reg_state *src_reg)
11567 {
11568 	s64 smin_val = src_reg->smin_value;
11569 	s64 smax_val = src_reg->smax_value;
11570 	u64 umin_val = src_reg->umin_value;
11571 	u64 umax_val = src_reg->umax_value;
11572 
11573 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
11574 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
11575 		dst_reg->smin_value = S64_MIN;
11576 		dst_reg->smax_value = S64_MAX;
11577 	} else {
11578 		dst_reg->smin_value += smin_val;
11579 		dst_reg->smax_value += smax_val;
11580 	}
11581 	if (dst_reg->umin_value + umin_val < umin_val ||
11582 	    dst_reg->umax_value + umax_val < umax_val) {
11583 		dst_reg->umin_value = 0;
11584 		dst_reg->umax_value = U64_MAX;
11585 	} else {
11586 		dst_reg->umin_value += umin_val;
11587 		dst_reg->umax_value += umax_val;
11588 	}
11589 }
11590 
11591 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
11592 				 struct bpf_reg_state *src_reg)
11593 {
11594 	s32 smin_val = src_reg->s32_min_value;
11595 	s32 smax_val = src_reg->s32_max_value;
11596 	u32 umin_val = src_reg->u32_min_value;
11597 	u32 umax_val = src_reg->u32_max_value;
11598 
11599 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
11600 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
11601 		/* Overflow possible, we know nothing */
11602 		dst_reg->s32_min_value = S32_MIN;
11603 		dst_reg->s32_max_value = S32_MAX;
11604 	} else {
11605 		dst_reg->s32_min_value -= smax_val;
11606 		dst_reg->s32_max_value -= smin_val;
11607 	}
11608 	if (dst_reg->u32_min_value < umax_val) {
11609 		/* Overflow possible, we know nothing */
11610 		dst_reg->u32_min_value = 0;
11611 		dst_reg->u32_max_value = U32_MAX;
11612 	} else {
11613 		/* Cannot overflow (as long as bounds are consistent) */
11614 		dst_reg->u32_min_value -= umax_val;
11615 		dst_reg->u32_max_value -= umin_val;
11616 	}
11617 }
11618 
11619 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
11620 			       struct bpf_reg_state *src_reg)
11621 {
11622 	s64 smin_val = src_reg->smin_value;
11623 	s64 smax_val = src_reg->smax_value;
11624 	u64 umin_val = src_reg->umin_value;
11625 	u64 umax_val = src_reg->umax_value;
11626 
11627 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
11628 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
11629 		/* Overflow possible, we know nothing */
11630 		dst_reg->smin_value = S64_MIN;
11631 		dst_reg->smax_value = S64_MAX;
11632 	} else {
11633 		dst_reg->smin_value -= smax_val;
11634 		dst_reg->smax_value -= smin_val;
11635 	}
11636 	if (dst_reg->umin_value < umax_val) {
11637 		/* Overflow possible, we know nothing */
11638 		dst_reg->umin_value = 0;
11639 		dst_reg->umax_value = U64_MAX;
11640 	} else {
11641 		/* Cannot overflow (as long as bounds are consistent) */
11642 		dst_reg->umin_value -= umax_val;
11643 		dst_reg->umax_value -= umin_val;
11644 	}
11645 }
11646 
11647 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
11648 				 struct bpf_reg_state *src_reg)
11649 {
11650 	s32 smin_val = src_reg->s32_min_value;
11651 	u32 umin_val = src_reg->u32_min_value;
11652 	u32 umax_val = src_reg->u32_max_value;
11653 
11654 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
11655 		/* Ain't nobody got time to multiply that sign */
11656 		__mark_reg32_unbounded(dst_reg);
11657 		return;
11658 	}
11659 	/* Both values are positive, so we can work with unsigned and
11660 	 * copy the result to signed (unless it exceeds S32_MAX).
11661 	 */
11662 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
11663 		/* Potential overflow, we know nothing */
11664 		__mark_reg32_unbounded(dst_reg);
11665 		return;
11666 	}
11667 	dst_reg->u32_min_value *= umin_val;
11668 	dst_reg->u32_max_value *= umax_val;
11669 	if (dst_reg->u32_max_value > S32_MAX) {
11670 		/* Overflow possible, we know nothing */
11671 		dst_reg->s32_min_value = S32_MIN;
11672 		dst_reg->s32_max_value = S32_MAX;
11673 	} else {
11674 		dst_reg->s32_min_value = dst_reg->u32_min_value;
11675 		dst_reg->s32_max_value = dst_reg->u32_max_value;
11676 	}
11677 }
11678 
11679 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
11680 			       struct bpf_reg_state *src_reg)
11681 {
11682 	s64 smin_val = src_reg->smin_value;
11683 	u64 umin_val = src_reg->umin_value;
11684 	u64 umax_val = src_reg->umax_value;
11685 
11686 	if (smin_val < 0 || dst_reg->smin_value < 0) {
11687 		/* Ain't nobody got time to multiply that sign */
11688 		__mark_reg64_unbounded(dst_reg);
11689 		return;
11690 	}
11691 	/* Both values are positive, so we can work with unsigned and
11692 	 * copy the result to signed (unless it exceeds S64_MAX).
11693 	 */
11694 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
11695 		/* Potential overflow, we know nothing */
11696 		__mark_reg64_unbounded(dst_reg);
11697 		return;
11698 	}
11699 	dst_reg->umin_value *= umin_val;
11700 	dst_reg->umax_value *= umax_val;
11701 	if (dst_reg->umax_value > S64_MAX) {
11702 		/* Overflow possible, we know nothing */
11703 		dst_reg->smin_value = S64_MIN;
11704 		dst_reg->smax_value = S64_MAX;
11705 	} else {
11706 		dst_reg->smin_value = dst_reg->umin_value;
11707 		dst_reg->smax_value = dst_reg->umax_value;
11708 	}
11709 }
11710 
11711 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
11712 				 struct bpf_reg_state *src_reg)
11713 {
11714 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
11715 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11716 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11717 	s32 smin_val = src_reg->s32_min_value;
11718 	u32 umax_val = src_reg->u32_max_value;
11719 
11720 	if (src_known && dst_known) {
11721 		__mark_reg32_known(dst_reg, var32_off.value);
11722 		return;
11723 	}
11724 
11725 	/* We get our minimum from the var_off, since that's inherently
11726 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
11727 	 */
11728 	dst_reg->u32_min_value = var32_off.value;
11729 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
11730 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
11731 		/* Lose signed bounds when ANDing negative numbers,
11732 		 * ain't nobody got time for that.
11733 		 */
11734 		dst_reg->s32_min_value = S32_MIN;
11735 		dst_reg->s32_max_value = S32_MAX;
11736 	} else {
11737 		/* ANDing two positives gives a positive, so safe to
11738 		 * cast result into s64.
11739 		 */
11740 		dst_reg->s32_min_value = dst_reg->u32_min_value;
11741 		dst_reg->s32_max_value = dst_reg->u32_max_value;
11742 	}
11743 }
11744 
11745 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
11746 			       struct bpf_reg_state *src_reg)
11747 {
11748 	bool src_known = tnum_is_const(src_reg->var_off);
11749 	bool dst_known = tnum_is_const(dst_reg->var_off);
11750 	s64 smin_val = src_reg->smin_value;
11751 	u64 umax_val = src_reg->umax_value;
11752 
11753 	if (src_known && dst_known) {
11754 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
11755 		return;
11756 	}
11757 
11758 	/* We get our minimum from the var_off, since that's inherently
11759 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
11760 	 */
11761 	dst_reg->umin_value = dst_reg->var_off.value;
11762 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
11763 	if (dst_reg->smin_value < 0 || smin_val < 0) {
11764 		/* Lose signed bounds when ANDing negative numbers,
11765 		 * ain't nobody got time for that.
11766 		 */
11767 		dst_reg->smin_value = S64_MIN;
11768 		dst_reg->smax_value = S64_MAX;
11769 	} else {
11770 		/* ANDing two positives gives a positive, so safe to
11771 		 * cast result into s64.
11772 		 */
11773 		dst_reg->smin_value = dst_reg->umin_value;
11774 		dst_reg->smax_value = dst_reg->umax_value;
11775 	}
11776 	/* We may learn something more from the var_off */
11777 	__update_reg_bounds(dst_reg);
11778 }
11779 
11780 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
11781 				struct bpf_reg_state *src_reg)
11782 {
11783 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
11784 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11785 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11786 	s32 smin_val = src_reg->s32_min_value;
11787 	u32 umin_val = src_reg->u32_min_value;
11788 
11789 	if (src_known && dst_known) {
11790 		__mark_reg32_known(dst_reg, var32_off.value);
11791 		return;
11792 	}
11793 
11794 	/* We get our maximum from the var_off, and our minimum is the
11795 	 * maximum of the operands' minima
11796 	 */
11797 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
11798 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
11799 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
11800 		/* Lose signed bounds when ORing negative numbers,
11801 		 * ain't nobody got time for that.
11802 		 */
11803 		dst_reg->s32_min_value = S32_MIN;
11804 		dst_reg->s32_max_value = S32_MAX;
11805 	} else {
11806 		/* ORing two positives gives a positive, so safe to
11807 		 * cast result into s64.
11808 		 */
11809 		dst_reg->s32_min_value = dst_reg->u32_min_value;
11810 		dst_reg->s32_max_value = dst_reg->u32_max_value;
11811 	}
11812 }
11813 
11814 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
11815 			      struct bpf_reg_state *src_reg)
11816 {
11817 	bool src_known = tnum_is_const(src_reg->var_off);
11818 	bool dst_known = tnum_is_const(dst_reg->var_off);
11819 	s64 smin_val = src_reg->smin_value;
11820 	u64 umin_val = src_reg->umin_value;
11821 
11822 	if (src_known && dst_known) {
11823 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
11824 		return;
11825 	}
11826 
11827 	/* We get our maximum from the var_off, and our minimum is the
11828 	 * maximum of the operands' minima
11829 	 */
11830 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
11831 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11832 	if (dst_reg->smin_value < 0 || smin_val < 0) {
11833 		/* Lose signed bounds when ORing negative numbers,
11834 		 * ain't nobody got time for that.
11835 		 */
11836 		dst_reg->smin_value = S64_MIN;
11837 		dst_reg->smax_value = S64_MAX;
11838 	} else {
11839 		/* ORing two positives gives a positive, so safe to
11840 		 * cast result into s64.
11841 		 */
11842 		dst_reg->smin_value = dst_reg->umin_value;
11843 		dst_reg->smax_value = dst_reg->umax_value;
11844 	}
11845 	/* We may learn something more from the var_off */
11846 	__update_reg_bounds(dst_reg);
11847 }
11848 
11849 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
11850 				 struct bpf_reg_state *src_reg)
11851 {
11852 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
11853 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11854 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11855 	s32 smin_val = src_reg->s32_min_value;
11856 
11857 	if (src_known && dst_known) {
11858 		__mark_reg32_known(dst_reg, var32_off.value);
11859 		return;
11860 	}
11861 
11862 	/* We get both minimum and maximum from the var32_off. */
11863 	dst_reg->u32_min_value = var32_off.value;
11864 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
11865 
11866 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
11867 		/* XORing two positive sign numbers gives a positive,
11868 		 * so safe to cast u32 result into s32.
11869 		 */
11870 		dst_reg->s32_min_value = dst_reg->u32_min_value;
11871 		dst_reg->s32_max_value = dst_reg->u32_max_value;
11872 	} else {
11873 		dst_reg->s32_min_value = S32_MIN;
11874 		dst_reg->s32_max_value = S32_MAX;
11875 	}
11876 }
11877 
11878 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
11879 			       struct bpf_reg_state *src_reg)
11880 {
11881 	bool src_known = tnum_is_const(src_reg->var_off);
11882 	bool dst_known = tnum_is_const(dst_reg->var_off);
11883 	s64 smin_val = src_reg->smin_value;
11884 
11885 	if (src_known && dst_known) {
11886 		/* dst_reg->var_off.value has been updated earlier */
11887 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
11888 		return;
11889 	}
11890 
11891 	/* We get both minimum and maximum from the var_off. */
11892 	dst_reg->umin_value = dst_reg->var_off.value;
11893 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11894 
11895 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
11896 		/* XORing two positive sign numbers gives a positive,
11897 		 * so safe to cast u64 result into s64.
11898 		 */
11899 		dst_reg->smin_value = dst_reg->umin_value;
11900 		dst_reg->smax_value = dst_reg->umax_value;
11901 	} else {
11902 		dst_reg->smin_value = S64_MIN;
11903 		dst_reg->smax_value = S64_MAX;
11904 	}
11905 
11906 	__update_reg_bounds(dst_reg);
11907 }
11908 
11909 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11910 				   u64 umin_val, u64 umax_val)
11911 {
11912 	/* We lose all sign bit information (except what we can pick
11913 	 * up from var_off)
11914 	 */
11915 	dst_reg->s32_min_value = S32_MIN;
11916 	dst_reg->s32_max_value = S32_MAX;
11917 	/* If we might shift our top bit out, then we know nothing */
11918 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
11919 		dst_reg->u32_min_value = 0;
11920 		dst_reg->u32_max_value = U32_MAX;
11921 	} else {
11922 		dst_reg->u32_min_value <<= umin_val;
11923 		dst_reg->u32_max_value <<= umax_val;
11924 	}
11925 }
11926 
11927 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11928 				 struct bpf_reg_state *src_reg)
11929 {
11930 	u32 umax_val = src_reg->u32_max_value;
11931 	u32 umin_val = src_reg->u32_min_value;
11932 	/* u32 alu operation will zext upper bits */
11933 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
11934 
11935 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11936 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
11937 	/* Not required but being careful mark reg64 bounds as unknown so
11938 	 * that we are forced to pick them up from tnum and zext later and
11939 	 * if some path skips this step we are still safe.
11940 	 */
11941 	__mark_reg64_unbounded(dst_reg);
11942 	__update_reg32_bounds(dst_reg);
11943 }
11944 
11945 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
11946 				   u64 umin_val, u64 umax_val)
11947 {
11948 	/* Special case <<32 because it is a common compiler pattern to sign
11949 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
11950 	 * positive we know this shift will also be positive so we can track
11951 	 * bounds correctly. Otherwise we lose all sign bit information except
11952 	 * what we can pick up from var_off. Perhaps we can generalize this
11953 	 * later to shifts of any length.
11954 	 */
11955 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
11956 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
11957 	else
11958 		dst_reg->smax_value = S64_MAX;
11959 
11960 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
11961 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
11962 	else
11963 		dst_reg->smin_value = S64_MIN;
11964 
11965 	/* If we might shift our top bit out, then we know nothing */
11966 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
11967 		dst_reg->umin_value = 0;
11968 		dst_reg->umax_value = U64_MAX;
11969 	} else {
11970 		dst_reg->umin_value <<= umin_val;
11971 		dst_reg->umax_value <<= umax_val;
11972 	}
11973 }
11974 
11975 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
11976 			       struct bpf_reg_state *src_reg)
11977 {
11978 	u64 umax_val = src_reg->umax_value;
11979 	u64 umin_val = src_reg->umin_value;
11980 
11981 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
11982 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
11983 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11984 
11985 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
11986 	/* We may learn something more from the var_off */
11987 	__update_reg_bounds(dst_reg);
11988 }
11989 
11990 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
11991 				 struct bpf_reg_state *src_reg)
11992 {
11993 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
11994 	u32 umax_val = src_reg->u32_max_value;
11995 	u32 umin_val = src_reg->u32_min_value;
11996 
11997 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
11998 	 * be negative, then either:
11999 	 * 1) src_reg might be zero, so the sign bit of the result is
12000 	 *    unknown, so we lose our signed bounds
12001 	 * 2) it's known negative, thus the unsigned bounds capture the
12002 	 *    signed bounds
12003 	 * 3) the signed bounds cross zero, so they tell us nothing
12004 	 *    about the result
12005 	 * If the value in dst_reg is known nonnegative, then again the
12006 	 * unsigned bounds capture the signed bounds.
12007 	 * Thus, in all cases it suffices to blow away our signed bounds
12008 	 * and rely on inferring new ones from the unsigned bounds and
12009 	 * var_off of the result.
12010 	 */
12011 	dst_reg->s32_min_value = S32_MIN;
12012 	dst_reg->s32_max_value = S32_MAX;
12013 
12014 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
12015 	dst_reg->u32_min_value >>= umax_val;
12016 	dst_reg->u32_max_value >>= umin_val;
12017 
12018 	__mark_reg64_unbounded(dst_reg);
12019 	__update_reg32_bounds(dst_reg);
12020 }
12021 
12022 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
12023 			       struct bpf_reg_state *src_reg)
12024 {
12025 	u64 umax_val = src_reg->umax_value;
12026 	u64 umin_val = src_reg->umin_value;
12027 
12028 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
12029 	 * be negative, then either:
12030 	 * 1) src_reg might be zero, so the sign bit of the result is
12031 	 *    unknown, so we lose our signed bounds
12032 	 * 2) it's known negative, thus the unsigned bounds capture the
12033 	 *    signed bounds
12034 	 * 3) the signed bounds cross zero, so they tell us nothing
12035 	 *    about the result
12036 	 * If the value in dst_reg is known nonnegative, then again the
12037 	 * unsigned bounds capture the signed bounds.
12038 	 * Thus, in all cases it suffices to blow away our signed bounds
12039 	 * and rely on inferring new ones from the unsigned bounds and
12040 	 * var_off of the result.
12041 	 */
12042 	dst_reg->smin_value = S64_MIN;
12043 	dst_reg->smax_value = S64_MAX;
12044 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
12045 	dst_reg->umin_value >>= umax_val;
12046 	dst_reg->umax_value >>= umin_val;
12047 
12048 	/* Its not easy to operate on alu32 bounds here because it depends
12049 	 * on bits being shifted in. Take easy way out and mark unbounded
12050 	 * so we can recalculate later from tnum.
12051 	 */
12052 	__mark_reg32_unbounded(dst_reg);
12053 	__update_reg_bounds(dst_reg);
12054 }
12055 
12056 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
12057 				  struct bpf_reg_state *src_reg)
12058 {
12059 	u64 umin_val = src_reg->u32_min_value;
12060 
12061 	/* Upon reaching here, src_known is true and
12062 	 * umax_val is equal to umin_val.
12063 	 */
12064 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
12065 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
12066 
12067 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
12068 
12069 	/* blow away the dst_reg umin_value/umax_value and rely on
12070 	 * dst_reg var_off to refine the result.
12071 	 */
12072 	dst_reg->u32_min_value = 0;
12073 	dst_reg->u32_max_value = U32_MAX;
12074 
12075 	__mark_reg64_unbounded(dst_reg);
12076 	__update_reg32_bounds(dst_reg);
12077 }
12078 
12079 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
12080 				struct bpf_reg_state *src_reg)
12081 {
12082 	u64 umin_val = src_reg->umin_value;
12083 
12084 	/* Upon reaching here, src_known is true and umax_val is equal
12085 	 * to umin_val.
12086 	 */
12087 	dst_reg->smin_value >>= umin_val;
12088 	dst_reg->smax_value >>= umin_val;
12089 
12090 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
12091 
12092 	/* blow away the dst_reg umin_value/umax_value and rely on
12093 	 * dst_reg var_off to refine the result.
12094 	 */
12095 	dst_reg->umin_value = 0;
12096 	dst_reg->umax_value = U64_MAX;
12097 
12098 	/* Its not easy to operate on alu32 bounds here because it depends
12099 	 * on bits being shifted in from upper 32-bits. Take easy way out
12100 	 * and mark unbounded so we can recalculate later from tnum.
12101 	 */
12102 	__mark_reg32_unbounded(dst_reg);
12103 	__update_reg_bounds(dst_reg);
12104 }
12105 
12106 /* WARNING: This function does calculations on 64-bit values, but the actual
12107  * execution may occur on 32-bit values. Therefore, things like bitshifts
12108  * need extra checks in the 32-bit case.
12109  */
12110 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
12111 				      struct bpf_insn *insn,
12112 				      struct bpf_reg_state *dst_reg,
12113 				      struct bpf_reg_state src_reg)
12114 {
12115 	struct bpf_reg_state *regs = cur_regs(env);
12116 	u8 opcode = BPF_OP(insn->code);
12117 	bool src_known;
12118 	s64 smin_val, smax_val;
12119 	u64 umin_val, umax_val;
12120 	s32 s32_min_val, s32_max_val;
12121 	u32 u32_min_val, u32_max_val;
12122 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
12123 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
12124 	int ret;
12125 
12126 	smin_val = src_reg.smin_value;
12127 	smax_val = src_reg.smax_value;
12128 	umin_val = src_reg.umin_value;
12129 	umax_val = src_reg.umax_value;
12130 
12131 	s32_min_val = src_reg.s32_min_value;
12132 	s32_max_val = src_reg.s32_max_value;
12133 	u32_min_val = src_reg.u32_min_value;
12134 	u32_max_val = src_reg.u32_max_value;
12135 
12136 	if (alu32) {
12137 		src_known = tnum_subreg_is_const(src_reg.var_off);
12138 		if ((src_known &&
12139 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
12140 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
12141 			/* Taint dst register if offset had invalid bounds
12142 			 * derived from e.g. dead branches.
12143 			 */
12144 			__mark_reg_unknown(env, dst_reg);
12145 			return 0;
12146 		}
12147 	} else {
12148 		src_known = tnum_is_const(src_reg.var_off);
12149 		if ((src_known &&
12150 		     (smin_val != smax_val || umin_val != umax_val)) ||
12151 		    smin_val > smax_val || umin_val > umax_val) {
12152 			/* Taint dst register if offset had invalid bounds
12153 			 * derived from e.g. dead branches.
12154 			 */
12155 			__mark_reg_unknown(env, dst_reg);
12156 			return 0;
12157 		}
12158 	}
12159 
12160 	if (!src_known &&
12161 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
12162 		__mark_reg_unknown(env, dst_reg);
12163 		return 0;
12164 	}
12165 
12166 	if (sanitize_needed(opcode)) {
12167 		ret = sanitize_val_alu(env, insn);
12168 		if (ret < 0)
12169 			return sanitize_err(env, insn, ret, NULL, NULL);
12170 	}
12171 
12172 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
12173 	 * There are two classes of instructions: The first class we track both
12174 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
12175 	 * greatest amount of precision when alu operations are mixed with jmp32
12176 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
12177 	 * and BPF_OR. This is possible because these ops have fairly easy to
12178 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
12179 	 * See alu32 verifier tests for examples. The second class of
12180 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
12181 	 * with regards to tracking sign/unsigned bounds because the bits may
12182 	 * cross subreg boundaries in the alu64 case. When this happens we mark
12183 	 * the reg unbounded in the subreg bound space and use the resulting
12184 	 * tnum to calculate an approximation of the sign/unsigned bounds.
12185 	 */
12186 	switch (opcode) {
12187 	case BPF_ADD:
12188 		scalar32_min_max_add(dst_reg, &src_reg);
12189 		scalar_min_max_add(dst_reg, &src_reg);
12190 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
12191 		break;
12192 	case BPF_SUB:
12193 		scalar32_min_max_sub(dst_reg, &src_reg);
12194 		scalar_min_max_sub(dst_reg, &src_reg);
12195 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
12196 		break;
12197 	case BPF_MUL:
12198 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
12199 		scalar32_min_max_mul(dst_reg, &src_reg);
12200 		scalar_min_max_mul(dst_reg, &src_reg);
12201 		break;
12202 	case BPF_AND:
12203 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
12204 		scalar32_min_max_and(dst_reg, &src_reg);
12205 		scalar_min_max_and(dst_reg, &src_reg);
12206 		break;
12207 	case BPF_OR:
12208 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
12209 		scalar32_min_max_or(dst_reg, &src_reg);
12210 		scalar_min_max_or(dst_reg, &src_reg);
12211 		break;
12212 	case BPF_XOR:
12213 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
12214 		scalar32_min_max_xor(dst_reg, &src_reg);
12215 		scalar_min_max_xor(dst_reg, &src_reg);
12216 		break;
12217 	case BPF_LSH:
12218 		if (umax_val >= insn_bitness) {
12219 			/* Shifts greater than 31 or 63 are undefined.
12220 			 * This includes shifts by a negative number.
12221 			 */
12222 			mark_reg_unknown(env, regs, insn->dst_reg);
12223 			break;
12224 		}
12225 		if (alu32)
12226 			scalar32_min_max_lsh(dst_reg, &src_reg);
12227 		else
12228 			scalar_min_max_lsh(dst_reg, &src_reg);
12229 		break;
12230 	case BPF_RSH:
12231 		if (umax_val >= insn_bitness) {
12232 			/* Shifts greater than 31 or 63 are undefined.
12233 			 * This includes shifts by a negative number.
12234 			 */
12235 			mark_reg_unknown(env, regs, insn->dst_reg);
12236 			break;
12237 		}
12238 		if (alu32)
12239 			scalar32_min_max_rsh(dst_reg, &src_reg);
12240 		else
12241 			scalar_min_max_rsh(dst_reg, &src_reg);
12242 		break;
12243 	case BPF_ARSH:
12244 		if (umax_val >= insn_bitness) {
12245 			/* Shifts greater than 31 or 63 are undefined.
12246 			 * This includes shifts by a negative number.
12247 			 */
12248 			mark_reg_unknown(env, regs, insn->dst_reg);
12249 			break;
12250 		}
12251 		if (alu32)
12252 			scalar32_min_max_arsh(dst_reg, &src_reg);
12253 		else
12254 			scalar_min_max_arsh(dst_reg, &src_reg);
12255 		break;
12256 	default:
12257 		mark_reg_unknown(env, regs, insn->dst_reg);
12258 		break;
12259 	}
12260 
12261 	/* ALU32 ops are zero extended into 64bit register */
12262 	if (alu32)
12263 		zext_32_to_64(dst_reg);
12264 	reg_bounds_sync(dst_reg);
12265 	return 0;
12266 }
12267 
12268 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
12269  * and var_off.
12270  */
12271 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
12272 				   struct bpf_insn *insn)
12273 {
12274 	struct bpf_verifier_state *vstate = env->cur_state;
12275 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
12276 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
12277 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
12278 	u8 opcode = BPF_OP(insn->code);
12279 	int err;
12280 
12281 	dst_reg = &regs[insn->dst_reg];
12282 	src_reg = NULL;
12283 	if (dst_reg->type != SCALAR_VALUE)
12284 		ptr_reg = dst_reg;
12285 	else
12286 		/* Make sure ID is cleared otherwise dst_reg min/max could be
12287 		 * incorrectly propagated into other registers by find_equal_scalars()
12288 		 */
12289 		dst_reg->id = 0;
12290 	if (BPF_SRC(insn->code) == BPF_X) {
12291 		src_reg = &regs[insn->src_reg];
12292 		if (src_reg->type != SCALAR_VALUE) {
12293 			if (dst_reg->type != SCALAR_VALUE) {
12294 				/* Combining two pointers by any ALU op yields
12295 				 * an arbitrary scalar. Disallow all math except
12296 				 * pointer subtraction
12297 				 */
12298 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
12299 					mark_reg_unknown(env, regs, insn->dst_reg);
12300 					return 0;
12301 				}
12302 				verbose(env, "R%d pointer %s pointer prohibited\n",
12303 					insn->dst_reg,
12304 					bpf_alu_string[opcode >> 4]);
12305 				return -EACCES;
12306 			} else {
12307 				/* scalar += pointer
12308 				 * This is legal, but we have to reverse our
12309 				 * src/dest handling in computing the range
12310 				 */
12311 				err = mark_chain_precision(env, insn->dst_reg);
12312 				if (err)
12313 					return err;
12314 				return adjust_ptr_min_max_vals(env, insn,
12315 							       src_reg, dst_reg);
12316 			}
12317 		} else if (ptr_reg) {
12318 			/* pointer += scalar */
12319 			err = mark_chain_precision(env, insn->src_reg);
12320 			if (err)
12321 				return err;
12322 			return adjust_ptr_min_max_vals(env, insn,
12323 						       dst_reg, src_reg);
12324 		} else if (dst_reg->precise) {
12325 			/* if dst_reg is precise, src_reg should be precise as well */
12326 			err = mark_chain_precision(env, insn->src_reg);
12327 			if (err)
12328 				return err;
12329 		}
12330 	} else {
12331 		/* Pretend the src is a reg with a known value, since we only
12332 		 * need to be able to read from this state.
12333 		 */
12334 		off_reg.type = SCALAR_VALUE;
12335 		__mark_reg_known(&off_reg, insn->imm);
12336 		src_reg = &off_reg;
12337 		if (ptr_reg) /* pointer += K */
12338 			return adjust_ptr_min_max_vals(env, insn,
12339 						       ptr_reg, src_reg);
12340 	}
12341 
12342 	/* Got here implies adding two SCALAR_VALUEs */
12343 	if (WARN_ON_ONCE(ptr_reg)) {
12344 		print_verifier_state(env, state, true);
12345 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
12346 		return -EINVAL;
12347 	}
12348 	if (WARN_ON(!src_reg)) {
12349 		print_verifier_state(env, state, true);
12350 		verbose(env, "verifier internal error: no src_reg\n");
12351 		return -EINVAL;
12352 	}
12353 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
12354 }
12355 
12356 /* check validity of 32-bit and 64-bit arithmetic operations */
12357 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
12358 {
12359 	struct bpf_reg_state *regs = cur_regs(env);
12360 	u8 opcode = BPF_OP(insn->code);
12361 	int err;
12362 
12363 	if (opcode == BPF_END || opcode == BPF_NEG) {
12364 		if (opcode == BPF_NEG) {
12365 			if (BPF_SRC(insn->code) != BPF_K ||
12366 			    insn->src_reg != BPF_REG_0 ||
12367 			    insn->off != 0 || insn->imm != 0) {
12368 				verbose(env, "BPF_NEG uses reserved fields\n");
12369 				return -EINVAL;
12370 			}
12371 		} else {
12372 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
12373 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
12374 			    BPF_CLASS(insn->code) == BPF_ALU64) {
12375 				verbose(env, "BPF_END uses reserved fields\n");
12376 				return -EINVAL;
12377 			}
12378 		}
12379 
12380 		/* check src operand */
12381 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12382 		if (err)
12383 			return err;
12384 
12385 		if (is_pointer_value(env, insn->dst_reg)) {
12386 			verbose(env, "R%d pointer arithmetic prohibited\n",
12387 				insn->dst_reg);
12388 			return -EACCES;
12389 		}
12390 
12391 		/* check dest operand */
12392 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
12393 		if (err)
12394 			return err;
12395 
12396 	} else if (opcode == BPF_MOV) {
12397 
12398 		if (BPF_SRC(insn->code) == BPF_X) {
12399 			if (insn->imm != 0 || insn->off != 0) {
12400 				verbose(env, "BPF_MOV uses reserved fields\n");
12401 				return -EINVAL;
12402 			}
12403 
12404 			/* check src operand */
12405 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12406 			if (err)
12407 				return err;
12408 		} else {
12409 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
12410 				verbose(env, "BPF_MOV uses reserved fields\n");
12411 				return -EINVAL;
12412 			}
12413 		}
12414 
12415 		/* check dest operand, mark as required later */
12416 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
12417 		if (err)
12418 			return err;
12419 
12420 		if (BPF_SRC(insn->code) == BPF_X) {
12421 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
12422 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
12423 
12424 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
12425 				/* case: R1 = R2
12426 				 * copy register state to dest reg
12427 				 */
12428 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
12429 					/* Assign src and dst registers the same ID
12430 					 * that will be used by find_equal_scalars()
12431 					 * to propagate min/max range.
12432 					 */
12433 					src_reg->id = ++env->id_gen;
12434 				copy_register_state(dst_reg, src_reg);
12435 				dst_reg->live |= REG_LIVE_WRITTEN;
12436 				dst_reg->subreg_def = DEF_NOT_SUBREG;
12437 			} else {
12438 				/* R1 = (u32) R2 */
12439 				if (is_pointer_value(env, insn->src_reg)) {
12440 					verbose(env,
12441 						"R%d partial copy of pointer\n",
12442 						insn->src_reg);
12443 					return -EACCES;
12444 				} else if (src_reg->type == SCALAR_VALUE) {
12445 					bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
12446 
12447 					if (is_src_reg_u32 && !src_reg->id)
12448 						src_reg->id = ++env->id_gen;
12449 					copy_register_state(dst_reg, src_reg);
12450 					/* Make sure ID is cleared if src_reg is not in u32 range otherwise
12451 					 * dst_reg min/max could be incorrectly
12452 					 * propagated into src_reg by find_equal_scalars()
12453 					 */
12454 					if (!is_src_reg_u32)
12455 						dst_reg->id = 0;
12456 					dst_reg->live |= REG_LIVE_WRITTEN;
12457 					dst_reg->subreg_def = env->insn_idx + 1;
12458 				} else {
12459 					mark_reg_unknown(env, regs,
12460 							 insn->dst_reg);
12461 				}
12462 				zext_32_to_64(dst_reg);
12463 				reg_bounds_sync(dst_reg);
12464 			}
12465 		} else {
12466 			/* case: R = imm
12467 			 * remember the value we stored into this reg
12468 			 */
12469 			/* clear any state __mark_reg_known doesn't set */
12470 			mark_reg_unknown(env, regs, insn->dst_reg);
12471 			regs[insn->dst_reg].type = SCALAR_VALUE;
12472 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
12473 				__mark_reg_known(regs + insn->dst_reg,
12474 						 insn->imm);
12475 			} else {
12476 				__mark_reg_known(regs + insn->dst_reg,
12477 						 (u32)insn->imm);
12478 			}
12479 		}
12480 
12481 	} else if (opcode > BPF_END) {
12482 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
12483 		return -EINVAL;
12484 
12485 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
12486 
12487 		if (BPF_SRC(insn->code) == BPF_X) {
12488 			if (insn->imm != 0 || insn->off != 0) {
12489 				verbose(env, "BPF_ALU uses reserved fields\n");
12490 				return -EINVAL;
12491 			}
12492 			/* check src1 operand */
12493 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12494 			if (err)
12495 				return err;
12496 		} else {
12497 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
12498 				verbose(env, "BPF_ALU uses reserved fields\n");
12499 				return -EINVAL;
12500 			}
12501 		}
12502 
12503 		/* check src2 operand */
12504 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12505 		if (err)
12506 			return err;
12507 
12508 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
12509 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
12510 			verbose(env, "div by zero\n");
12511 			return -EINVAL;
12512 		}
12513 
12514 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
12515 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
12516 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
12517 
12518 			if (insn->imm < 0 || insn->imm >= size) {
12519 				verbose(env, "invalid shift %d\n", insn->imm);
12520 				return -EINVAL;
12521 			}
12522 		}
12523 
12524 		/* check dest operand */
12525 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
12526 		if (err)
12527 			return err;
12528 
12529 		return adjust_reg_min_max_vals(env, insn);
12530 	}
12531 
12532 	return 0;
12533 }
12534 
12535 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
12536 				   struct bpf_reg_state *dst_reg,
12537 				   enum bpf_reg_type type,
12538 				   bool range_right_open)
12539 {
12540 	struct bpf_func_state *state;
12541 	struct bpf_reg_state *reg;
12542 	int new_range;
12543 
12544 	if (dst_reg->off < 0 ||
12545 	    (dst_reg->off == 0 && range_right_open))
12546 		/* This doesn't give us any range */
12547 		return;
12548 
12549 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
12550 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
12551 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
12552 		 * than pkt_end, but that's because it's also less than pkt.
12553 		 */
12554 		return;
12555 
12556 	new_range = dst_reg->off;
12557 	if (range_right_open)
12558 		new_range++;
12559 
12560 	/* Examples for register markings:
12561 	 *
12562 	 * pkt_data in dst register:
12563 	 *
12564 	 *   r2 = r3;
12565 	 *   r2 += 8;
12566 	 *   if (r2 > pkt_end) goto <handle exception>
12567 	 *   <access okay>
12568 	 *
12569 	 *   r2 = r3;
12570 	 *   r2 += 8;
12571 	 *   if (r2 < pkt_end) goto <access okay>
12572 	 *   <handle exception>
12573 	 *
12574 	 *   Where:
12575 	 *     r2 == dst_reg, pkt_end == src_reg
12576 	 *     r2=pkt(id=n,off=8,r=0)
12577 	 *     r3=pkt(id=n,off=0,r=0)
12578 	 *
12579 	 * pkt_data in src register:
12580 	 *
12581 	 *   r2 = r3;
12582 	 *   r2 += 8;
12583 	 *   if (pkt_end >= r2) goto <access okay>
12584 	 *   <handle exception>
12585 	 *
12586 	 *   r2 = r3;
12587 	 *   r2 += 8;
12588 	 *   if (pkt_end <= r2) goto <handle exception>
12589 	 *   <access okay>
12590 	 *
12591 	 *   Where:
12592 	 *     pkt_end == dst_reg, r2 == src_reg
12593 	 *     r2=pkt(id=n,off=8,r=0)
12594 	 *     r3=pkt(id=n,off=0,r=0)
12595 	 *
12596 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
12597 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
12598 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
12599 	 * the check.
12600 	 */
12601 
12602 	/* If our ids match, then we must have the same max_value.  And we
12603 	 * don't care about the other reg's fixed offset, since if it's too big
12604 	 * the range won't allow anything.
12605 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
12606 	 */
12607 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
12608 		if (reg->type == type && reg->id == dst_reg->id)
12609 			/* keep the maximum range already checked */
12610 			reg->range = max(reg->range, new_range);
12611 	}));
12612 }
12613 
12614 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
12615 {
12616 	struct tnum subreg = tnum_subreg(reg->var_off);
12617 	s32 sval = (s32)val;
12618 
12619 	switch (opcode) {
12620 	case BPF_JEQ:
12621 		if (tnum_is_const(subreg))
12622 			return !!tnum_equals_const(subreg, val);
12623 		else if (val < reg->u32_min_value || val > reg->u32_max_value)
12624 			return 0;
12625 		break;
12626 	case BPF_JNE:
12627 		if (tnum_is_const(subreg))
12628 			return !tnum_equals_const(subreg, val);
12629 		else if (val < reg->u32_min_value || val > reg->u32_max_value)
12630 			return 1;
12631 		break;
12632 	case BPF_JSET:
12633 		if ((~subreg.mask & subreg.value) & val)
12634 			return 1;
12635 		if (!((subreg.mask | subreg.value) & val))
12636 			return 0;
12637 		break;
12638 	case BPF_JGT:
12639 		if (reg->u32_min_value > val)
12640 			return 1;
12641 		else if (reg->u32_max_value <= val)
12642 			return 0;
12643 		break;
12644 	case BPF_JSGT:
12645 		if (reg->s32_min_value > sval)
12646 			return 1;
12647 		else if (reg->s32_max_value <= sval)
12648 			return 0;
12649 		break;
12650 	case BPF_JLT:
12651 		if (reg->u32_max_value < val)
12652 			return 1;
12653 		else if (reg->u32_min_value >= val)
12654 			return 0;
12655 		break;
12656 	case BPF_JSLT:
12657 		if (reg->s32_max_value < sval)
12658 			return 1;
12659 		else if (reg->s32_min_value >= sval)
12660 			return 0;
12661 		break;
12662 	case BPF_JGE:
12663 		if (reg->u32_min_value >= val)
12664 			return 1;
12665 		else if (reg->u32_max_value < val)
12666 			return 0;
12667 		break;
12668 	case BPF_JSGE:
12669 		if (reg->s32_min_value >= sval)
12670 			return 1;
12671 		else if (reg->s32_max_value < sval)
12672 			return 0;
12673 		break;
12674 	case BPF_JLE:
12675 		if (reg->u32_max_value <= val)
12676 			return 1;
12677 		else if (reg->u32_min_value > val)
12678 			return 0;
12679 		break;
12680 	case BPF_JSLE:
12681 		if (reg->s32_max_value <= sval)
12682 			return 1;
12683 		else if (reg->s32_min_value > sval)
12684 			return 0;
12685 		break;
12686 	}
12687 
12688 	return -1;
12689 }
12690 
12691 
12692 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
12693 {
12694 	s64 sval = (s64)val;
12695 
12696 	switch (opcode) {
12697 	case BPF_JEQ:
12698 		if (tnum_is_const(reg->var_off))
12699 			return !!tnum_equals_const(reg->var_off, val);
12700 		else if (val < reg->umin_value || val > reg->umax_value)
12701 			return 0;
12702 		break;
12703 	case BPF_JNE:
12704 		if (tnum_is_const(reg->var_off))
12705 			return !tnum_equals_const(reg->var_off, val);
12706 		else if (val < reg->umin_value || val > reg->umax_value)
12707 			return 1;
12708 		break;
12709 	case BPF_JSET:
12710 		if ((~reg->var_off.mask & reg->var_off.value) & val)
12711 			return 1;
12712 		if (!((reg->var_off.mask | reg->var_off.value) & val))
12713 			return 0;
12714 		break;
12715 	case BPF_JGT:
12716 		if (reg->umin_value > val)
12717 			return 1;
12718 		else if (reg->umax_value <= val)
12719 			return 0;
12720 		break;
12721 	case BPF_JSGT:
12722 		if (reg->smin_value > sval)
12723 			return 1;
12724 		else if (reg->smax_value <= sval)
12725 			return 0;
12726 		break;
12727 	case BPF_JLT:
12728 		if (reg->umax_value < val)
12729 			return 1;
12730 		else if (reg->umin_value >= val)
12731 			return 0;
12732 		break;
12733 	case BPF_JSLT:
12734 		if (reg->smax_value < sval)
12735 			return 1;
12736 		else if (reg->smin_value >= sval)
12737 			return 0;
12738 		break;
12739 	case BPF_JGE:
12740 		if (reg->umin_value >= val)
12741 			return 1;
12742 		else if (reg->umax_value < val)
12743 			return 0;
12744 		break;
12745 	case BPF_JSGE:
12746 		if (reg->smin_value >= sval)
12747 			return 1;
12748 		else if (reg->smax_value < sval)
12749 			return 0;
12750 		break;
12751 	case BPF_JLE:
12752 		if (reg->umax_value <= val)
12753 			return 1;
12754 		else if (reg->umin_value > val)
12755 			return 0;
12756 		break;
12757 	case BPF_JSLE:
12758 		if (reg->smax_value <= sval)
12759 			return 1;
12760 		else if (reg->smin_value > sval)
12761 			return 0;
12762 		break;
12763 	}
12764 
12765 	return -1;
12766 }
12767 
12768 /* compute branch direction of the expression "if (reg opcode val) goto target;"
12769  * and return:
12770  *  1 - branch will be taken and "goto target" will be executed
12771  *  0 - branch will not be taken and fall-through to next insn
12772  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
12773  *      range [0,10]
12774  */
12775 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
12776 			   bool is_jmp32)
12777 {
12778 	if (__is_pointer_value(false, reg)) {
12779 		if (!reg_type_not_null(reg->type))
12780 			return -1;
12781 
12782 		/* If pointer is valid tests against zero will fail so we can
12783 		 * use this to direct branch taken.
12784 		 */
12785 		if (val != 0)
12786 			return -1;
12787 
12788 		switch (opcode) {
12789 		case BPF_JEQ:
12790 			return 0;
12791 		case BPF_JNE:
12792 			return 1;
12793 		default:
12794 			return -1;
12795 		}
12796 	}
12797 
12798 	if (is_jmp32)
12799 		return is_branch32_taken(reg, val, opcode);
12800 	return is_branch64_taken(reg, val, opcode);
12801 }
12802 
12803 static int flip_opcode(u32 opcode)
12804 {
12805 	/* How can we transform "a <op> b" into "b <op> a"? */
12806 	static const u8 opcode_flip[16] = {
12807 		/* these stay the same */
12808 		[BPF_JEQ  >> 4] = BPF_JEQ,
12809 		[BPF_JNE  >> 4] = BPF_JNE,
12810 		[BPF_JSET >> 4] = BPF_JSET,
12811 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
12812 		[BPF_JGE  >> 4] = BPF_JLE,
12813 		[BPF_JGT  >> 4] = BPF_JLT,
12814 		[BPF_JLE  >> 4] = BPF_JGE,
12815 		[BPF_JLT  >> 4] = BPF_JGT,
12816 		[BPF_JSGE >> 4] = BPF_JSLE,
12817 		[BPF_JSGT >> 4] = BPF_JSLT,
12818 		[BPF_JSLE >> 4] = BPF_JSGE,
12819 		[BPF_JSLT >> 4] = BPF_JSGT
12820 	};
12821 	return opcode_flip[opcode >> 4];
12822 }
12823 
12824 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
12825 				   struct bpf_reg_state *src_reg,
12826 				   u8 opcode)
12827 {
12828 	struct bpf_reg_state *pkt;
12829 
12830 	if (src_reg->type == PTR_TO_PACKET_END) {
12831 		pkt = dst_reg;
12832 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
12833 		pkt = src_reg;
12834 		opcode = flip_opcode(opcode);
12835 	} else {
12836 		return -1;
12837 	}
12838 
12839 	if (pkt->range >= 0)
12840 		return -1;
12841 
12842 	switch (opcode) {
12843 	case BPF_JLE:
12844 		/* pkt <= pkt_end */
12845 		fallthrough;
12846 	case BPF_JGT:
12847 		/* pkt > pkt_end */
12848 		if (pkt->range == BEYOND_PKT_END)
12849 			/* pkt has at last one extra byte beyond pkt_end */
12850 			return opcode == BPF_JGT;
12851 		break;
12852 	case BPF_JLT:
12853 		/* pkt < pkt_end */
12854 		fallthrough;
12855 	case BPF_JGE:
12856 		/* pkt >= pkt_end */
12857 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
12858 			return opcode == BPF_JGE;
12859 		break;
12860 	}
12861 	return -1;
12862 }
12863 
12864 /* Adjusts the register min/max values in the case that the dst_reg is the
12865  * variable register that we are working on, and src_reg is a constant or we're
12866  * simply doing a BPF_K check.
12867  * In JEQ/JNE cases we also adjust the var_off values.
12868  */
12869 static void reg_set_min_max(struct bpf_reg_state *true_reg,
12870 			    struct bpf_reg_state *false_reg,
12871 			    u64 val, u32 val32,
12872 			    u8 opcode, bool is_jmp32)
12873 {
12874 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
12875 	struct tnum false_64off = false_reg->var_off;
12876 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
12877 	struct tnum true_64off = true_reg->var_off;
12878 	s64 sval = (s64)val;
12879 	s32 sval32 = (s32)val32;
12880 
12881 	/* If the dst_reg is a pointer, we can't learn anything about its
12882 	 * variable offset from the compare (unless src_reg were a pointer into
12883 	 * the same object, but we don't bother with that.
12884 	 * Since false_reg and true_reg have the same type by construction, we
12885 	 * only need to check one of them for pointerness.
12886 	 */
12887 	if (__is_pointer_value(false, false_reg))
12888 		return;
12889 
12890 	switch (opcode) {
12891 	/* JEQ/JNE comparison doesn't change the register equivalence.
12892 	 *
12893 	 * r1 = r2;
12894 	 * if (r1 == 42) goto label;
12895 	 * ...
12896 	 * label: // here both r1 and r2 are known to be 42.
12897 	 *
12898 	 * Hence when marking register as known preserve it's ID.
12899 	 */
12900 	case BPF_JEQ:
12901 		if (is_jmp32) {
12902 			__mark_reg32_known(true_reg, val32);
12903 			true_32off = tnum_subreg(true_reg->var_off);
12904 		} else {
12905 			___mark_reg_known(true_reg, val);
12906 			true_64off = true_reg->var_off;
12907 		}
12908 		break;
12909 	case BPF_JNE:
12910 		if (is_jmp32) {
12911 			__mark_reg32_known(false_reg, val32);
12912 			false_32off = tnum_subreg(false_reg->var_off);
12913 		} else {
12914 			___mark_reg_known(false_reg, val);
12915 			false_64off = false_reg->var_off;
12916 		}
12917 		break;
12918 	case BPF_JSET:
12919 		if (is_jmp32) {
12920 			false_32off = tnum_and(false_32off, tnum_const(~val32));
12921 			if (is_power_of_2(val32))
12922 				true_32off = tnum_or(true_32off,
12923 						     tnum_const(val32));
12924 		} else {
12925 			false_64off = tnum_and(false_64off, tnum_const(~val));
12926 			if (is_power_of_2(val))
12927 				true_64off = tnum_or(true_64off,
12928 						     tnum_const(val));
12929 		}
12930 		break;
12931 	case BPF_JGE:
12932 	case BPF_JGT:
12933 	{
12934 		if (is_jmp32) {
12935 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
12936 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
12937 
12938 			false_reg->u32_max_value = min(false_reg->u32_max_value,
12939 						       false_umax);
12940 			true_reg->u32_min_value = max(true_reg->u32_min_value,
12941 						      true_umin);
12942 		} else {
12943 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
12944 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
12945 
12946 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
12947 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
12948 		}
12949 		break;
12950 	}
12951 	case BPF_JSGE:
12952 	case BPF_JSGT:
12953 	{
12954 		if (is_jmp32) {
12955 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
12956 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
12957 
12958 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
12959 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
12960 		} else {
12961 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
12962 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
12963 
12964 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
12965 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
12966 		}
12967 		break;
12968 	}
12969 	case BPF_JLE:
12970 	case BPF_JLT:
12971 	{
12972 		if (is_jmp32) {
12973 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
12974 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
12975 
12976 			false_reg->u32_min_value = max(false_reg->u32_min_value,
12977 						       false_umin);
12978 			true_reg->u32_max_value = min(true_reg->u32_max_value,
12979 						      true_umax);
12980 		} else {
12981 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
12982 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
12983 
12984 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
12985 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
12986 		}
12987 		break;
12988 	}
12989 	case BPF_JSLE:
12990 	case BPF_JSLT:
12991 	{
12992 		if (is_jmp32) {
12993 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
12994 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
12995 
12996 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
12997 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
12998 		} else {
12999 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
13000 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
13001 
13002 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
13003 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
13004 		}
13005 		break;
13006 	}
13007 	default:
13008 		return;
13009 	}
13010 
13011 	if (is_jmp32) {
13012 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
13013 					     tnum_subreg(false_32off));
13014 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
13015 					    tnum_subreg(true_32off));
13016 		__reg_combine_32_into_64(false_reg);
13017 		__reg_combine_32_into_64(true_reg);
13018 	} else {
13019 		false_reg->var_off = false_64off;
13020 		true_reg->var_off = true_64off;
13021 		__reg_combine_64_into_32(false_reg);
13022 		__reg_combine_64_into_32(true_reg);
13023 	}
13024 }
13025 
13026 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
13027  * the variable reg.
13028  */
13029 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
13030 				struct bpf_reg_state *false_reg,
13031 				u64 val, u32 val32,
13032 				u8 opcode, bool is_jmp32)
13033 {
13034 	opcode = flip_opcode(opcode);
13035 	/* This uses zero as "not present in table"; luckily the zero opcode,
13036 	 * BPF_JA, can't get here.
13037 	 */
13038 	if (opcode)
13039 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
13040 }
13041 
13042 /* Regs are known to be equal, so intersect their min/max/var_off */
13043 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
13044 				  struct bpf_reg_state *dst_reg)
13045 {
13046 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
13047 							dst_reg->umin_value);
13048 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
13049 							dst_reg->umax_value);
13050 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
13051 							dst_reg->smin_value);
13052 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
13053 							dst_reg->smax_value);
13054 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
13055 							     dst_reg->var_off);
13056 	reg_bounds_sync(src_reg);
13057 	reg_bounds_sync(dst_reg);
13058 }
13059 
13060 static void reg_combine_min_max(struct bpf_reg_state *true_src,
13061 				struct bpf_reg_state *true_dst,
13062 				struct bpf_reg_state *false_src,
13063 				struct bpf_reg_state *false_dst,
13064 				u8 opcode)
13065 {
13066 	switch (opcode) {
13067 	case BPF_JEQ:
13068 		__reg_combine_min_max(true_src, true_dst);
13069 		break;
13070 	case BPF_JNE:
13071 		__reg_combine_min_max(false_src, false_dst);
13072 		break;
13073 	}
13074 }
13075 
13076 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
13077 				 struct bpf_reg_state *reg, u32 id,
13078 				 bool is_null)
13079 {
13080 	if (type_may_be_null(reg->type) && reg->id == id &&
13081 	    (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
13082 		/* Old offset (both fixed and variable parts) should have been
13083 		 * known-zero, because we don't allow pointer arithmetic on
13084 		 * pointers that might be NULL. If we see this happening, don't
13085 		 * convert the register.
13086 		 *
13087 		 * But in some cases, some helpers that return local kptrs
13088 		 * advance offset for the returned pointer. In those cases, it
13089 		 * is fine to expect to see reg->off.
13090 		 */
13091 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
13092 			return;
13093 		if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
13094 		    WARN_ON_ONCE(reg->off))
13095 			return;
13096 
13097 		if (is_null) {
13098 			reg->type = SCALAR_VALUE;
13099 			/* We don't need id and ref_obj_id from this point
13100 			 * onwards anymore, thus we should better reset it,
13101 			 * so that state pruning has chances to take effect.
13102 			 */
13103 			reg->id = 0;
13104 			reg->ref_obj_id = 0;
13105 
13106 			return;
13107 		}
13108 
13109 		mark_ptr_not_null_reg(reg);
13110 
13111 		if (!reg_may_point_to_spin_lock(reg)) {
13112 			/* For not-NULL ptr, reg->ref_obj_id will be reset
13113 			 * in release_reference().
13114 			 *
13115 			 * reg->id is still used by spin_lock ptr. Other
13116 			 * than spin_lock ptr type, reg->id can be reset.
13117 			 */
13118 			reg->id = 0;
13119 		}
13120 	}
13121 }
13122 
13123 /* The logic is similar to find_good_pkt_pointers(), both could eventually
13124  * be folded together at some point.
13125  */
13126 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
13127 				  bool is_null)
13128 {
13129 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
13130 	struct bpf_reg_state *regs = state->regs, *reg;
13131 	u32 ref_obj_id = regs[regno].ref_obj_id;
13132 	u32 id = regs[regno].id;
13133 
13134 	if (ref_obj_id && ref_obj_id == id && is_null)
13135 		/* regs[regno] is in the " == NULL" branch.
13136 		 * No one could have freed the reference state before
13137 		 * doing the NULL check.
13138 		 */
13139 		WARN_ON_ONCE(release_reference_state(state, id));
13140 
13141 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13142 		mark_ptr_or_null_reg(state, reg, id, is_null);
13143 	}));
13144 }
13145 
13146 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
13147 				   struct bpf_reg_state *dst_reg,
13148 				   struct bpf_reg_state *src_reg,
13149 				   struct bpf_verifier_state *this_branch,
13150 				   struct bpf_verifier_state *other_branch)
13151 {
13152 	if (BPF_SRC(insn->code) != BPF_X)
13153 		return false;
13154 
13155 	/* Pointers are always 64-bit. */
13156 	if (BPF_CLASS(insn->code) == BPF_JMP32)
13157 		return false;
13158 
13159 	switch (BPF_OP(insn->code)) {
13160 	case BPF_JGT:
13161 		if ((dst_reg->type == PTR_TO_PACKET &&
13162 		     src_reg->type == PTR_TO_PACKET_END) ||
13163 		    (dst_reg->type == PTR_TO_PACKET_META &&
13164 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13165 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
13166 			find_good_pkt_pointers(this_branch, dst_reg,
13167 					       dst_reg->type, false);
13168 			mark_pkt_end(other_branch, insn->dst_reg, true);
13169 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
13170 			    src_reg->type == PTR_TO_PACKET) ||
13171 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13172 			    src_reg->type == PTR_TO_PACKET_META)) {
13173 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
13174 			find_good_pkt_pointers(other_branch, src_reg,
13175 					       src_reg->type, true);
13176 			mark_pkt_end(this_branch, insn->src_reg, false);
13177 		} else {
13178 			return false;
13179 		}
13180 		break;
13181 	case BPF_JLT:
13182 		if ((dst_reg->type == PTR_TO_PACKET &&
13183 		     src_reg->type == PTR_TO_PACKET_END) ||
13184 		    (dst_reg->type == PTR_TO_PACKET_META &&
13185 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13186 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
13187 			find_good_pkt_pointers(other_branch, dst_reg,
13188 					       dst_reg->type, true);
13189 			mark_pkt_end(this_branch, insn->dst_reg, false);
13190 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
13191 			    src_reg->type == PTR_TO_PACKET) ||
13192 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13193 			    src_reg->type == PTR_TO_PACKET_META)) {
13194 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
13195 			find_good_pkt_pointers(this_branch, src_reg,
13196 					       src_reg->type, false);
13197 			mark_pkt_end(other_branch, insn->src_reg, true);
13198 		} else {
13199 			return false;
13200 		}
13201 		break;
13202 	case BPF_JGE:
13203 		if ((dst_reg->type == PTR_TO_PACKET &&
13204 		     src_reg->type == PTR_TO_PACKET_END) ||
13205 		    (dst_reg->type == PTR_TO_PACKET_META &&
13206 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13207 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
13208 			find_good_pkt_pointers(this_branch, dst_reg,
13209 					       dst_reg->type, true);
13210 			mark_pkt_end(other_branch, insn->dst_reg, false);
13211 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
13212 			    src_reg->type == PTR_TO_PACKET) ||
13213 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13214 			    src_reg->type == PTR_TO_PACKET_META)) {
13215 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
13216 			find_good_pkt_pointers(other_branch, src_reg,
13217 					       src_reg->type, false);
13218 			mark_pkt_end(this_branch, insn->src_reg, true);
13219 		} else {
13220 			return false;
13221 		}
13222 		break;
13223 	case BPF_JLE:
13224 		if ((dst_reg->type == PTR_TO_PACKET &&
13225 		     src_reg->type == PTR_TO_PACKET_END) ||
13226 		    (dst_reg->type == PTR_TO_PACKET_META &&
13227 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13228 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
13229 			find_good_pkt_pointers(other_branch, dst_reg,
13230 					       dst_reg->type, false);
13231 			mark_pkt_end(this_branch, insn->dst_reg, true);
13232 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
13233 			    src_reg->type == PTR_TO_PACKET) ||
13234 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13235 			    src_reg->type == PTR_TO_PACKET_META)) {
13236 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
13237 			find_good_pkt_pointers(this_branch, src_reg,
13238 					       src_reg->type, true);
13239 			mark_pkt_end(other_branch, insn->src_reg, false);
13240 		} else {
13241 			return false;
13242 		}
13243 		break;
13244 	default:
13245 		return false;
13246 	}
13247 
13248 	return true;
13249 }
13250 
13251 static void find_equal_scalars(struct bpf_verifier_state *vstate,
13252 			       struct bpf_reg_state *known_reg)
13253 {
13254 	struct bpf_func_state *state;
13255 	struct bpf_reg_state *reg;
13256 
13257 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13258 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
13259 			copy_register_state(reg, known_reg);
13260 	}));
13261 }
13262 
13263 static int check_cond_jmp_op(struct bpf_verifier_env *env,
13264 			     struct bpf_insn *insn, int *insn_idx)
13265 {
13266 	struct bpf_verifier_state *this_branch = env->cur_state;
13267 	struct bpf_verifier_state *other_branch;
13268 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
13269 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
13270 	struct bpf_reg_state *eq_branch_regs;
13271 	u8 opcode = BPF_OP(insn->code);
13272 	bool is_jmp32;
13273 	int pred = -1;
13274 	int err;
13275 
13276 	/* Only conditional jumps are expected to reach here. */
13277 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
13278 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
13279 		return -EINVAL;
13280 	}
13281 
13282 	if (BPF_SRC(insn->code) == BPF_X) {
13283 		if (insn->imm != 0) {
13284 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
13285 			return -EINVAL;
13286 		}
13287 
13288 		/* check src1 operand */
13289 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
13290 		if (err)
13291 			return err;
13292 
13293 		if (is_pointer_value(env, insn->src_reg)) {
13294 			verbose(env, "R%d pointer comparison prohibited\n",
13295 				insn->src_reg);
13296 			return -EACCES;
13297 		}
13298 		src_reg = &regs[insn->src_reg];
13299 	} else {
13300 		if (insn->src_reg != BPF_REG_0) {
13301 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
13302 			return -EINVAL;
13303 		}
13304 	}
13305 
13306 	/* check src2 operand */
13307 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
13308 	if (err)
13309 		return err;
13310 
13311 	dst_reg = &regs[insn->dst_reg];
13312 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
13313 
13314 	if (BPF_SRC(insn->code) == BPF_K) {
13315 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
13316 	} else if (src_reg->type == SCALAR_VALUE &&
13317 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
13318 		pred = is_branch_taken(dst_reg,
13319 				       tnum_subreg(src_reg->var_off).value,
13320 				       opcode,
13321 				       is_jmp32);
13322 	} else if (src_reg->type == SCALAR_VALUE &&
13323 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
13324 		pred = is_branch_taken(dst_reg,
13325 				       src_reg->var_off.value,
13326 				       opcode,
13327 				       is_jmp32);
13328 	} else if (dst_reg->type == SCALAR_VALUE &&
13329 		   is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) {
13330 		pred = is_branch_taken(src_reg,
13331 				       tnum_subreg(dst_reg->var_off).value,
13332 				       flip_opcode(opcode),
13333 				       is_jmp32);
13334 	} else if (dst_reg->type == SCALAR_VALUE &&
13335 		   !is_jmp32 && tnum_is_const(dst_reg->var_off)) {
13336 		pred = is_branch_taken(src_reg,
13337 				       dst_reg->var_off.value,
13338 				       flip_opcode(opcode),
13339 				       is_jmp32);
13340 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
13341 		   reg_is_pkt_pointer_any(src_reg) &&
13342 		   !is_jmp32) {
13343 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
13344 	}
13345 
13346 	if (pred >= 0) {
13347 		/* If we get here with a dst_reg pointer type it is because
13348 		 * above is_branch_taken() special cased the 0 comparison.
13349 		 */
13350 		if (!__is_pointer_value(false, dst_reg))
13351 			err = mark_chain_precision(env, insn->dst_reg);
13352 		if (BPF_SRC(insn->code) == BPF_X && !err &&
13353 		    !__is_pointer_value(false, src_reg))
13354 			err = mark_chain_precision(env, insn->src_reg);
13355 		if (err)
13356 			return err;
13357 	}
13358 
13359 	if (pred == 1) {
13360 		/* Only follow the goto, ignore fall-through. If needed, push
13361 		 * the fall-through branch for simulation under speculative
13362 		 * execution.
13363 		 */
13364 		if (!env->bypass_spec_v1 &&
13365 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
13366 					       *insn_idx))
13367 			return -EFAULT;
13368 		*insn_idx += insn->off;
13369 		return 0;
13370 	} else if (pred == 0) {
13371 		/* Only follow the fall-through branch, since that's where the
13372 		 * program will go. If needed, push the goto branch for
13373 		 * simulation under speculative execution.
13374 		 */
13375 		if (!env->bypass_spec_v1 &&
13376 		    !sanitize_speculative_path(env, insn,
13377 					       *insn_idx + insn->off + 1,
13378 					       *insn_idx))
13379 			return -EFAULT;
13380 		return 0;
13381 	}
13382 
13383 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
13384 				  false);
13385 	if (!other_branch)
13386 		return -EFAULT;
13387 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
13388 
13389 	/* detect if we are comparing against a constant value so we can adjust
13390 	 * our min/max values for our dst register.
13391 	 * this is only legit if both are scalars (or pointers to the same
13392 	 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
13393 	 * because otherwise the different base pointers mean the offsets aren't
13394 	 * comparable.
13395 	 */
13396 	if (BPF_SRC(insn->code) == BPF_X) {
13397 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
13398 
13399 		if (dst_reg->type == SCALAR_VALUE &&
13400 		    src_reg->type == SCALAR_VALUE) {
13401 			if (tnum_is_const(src_reg->var_off) ||
13402 			    (is_jmp32 &&
13403 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
13404 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
13405 						dst_reg,
13406 						src_reg->var_off.value,
13407 						tnum_subreg(src_reg->var_off).value,
13408 						opcode, is_jmp32);
13409 			else if (tnum_is_const(dst_reg->var_off) ||
13410 				 (is_jmp32 &&
13411 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
13412 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
13413 						    src_reg,
13414 						    dst_reg->var_off.value,
13415 						    tnum_subreg(dst_reg->var_off).value,
13416 						    opcode, is_jmp32);
13417 			else if (!is_jmp32 &&
13418 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
13419 				/* Comparing for equality, we can combine knowledge */
13420 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
13421 						    &other_branch_regs[insn->dst_reg],
13422 						    src_reg, dst_reg, opcode);
13423 			if (src_reg->id &&
13424 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
13425 				find_equal_scalars(this_branch, src_reg);
13426 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
13427 			}
13428 
13429 		}
13430 	} else if (dst_reg->type == SCALAR_VALUE) {
13431 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
13432 					dst_reg, insn->imm, (u32)insn->imm,
13433 					opcode, is_jmp32);
13434 	}
13435 
13436 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
13437 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
13438 		find_equal_scalars(this_branch, dst_reg);
13439 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
13440 	}
13441 
13442 	/* if one pointer register is compared to another pointer
13443 	 * register check if PTR_MAYBE_NULL could be lifted.
13444 	 * E.g. register A - maybe null
13445 	 *      register B - not null
13446 	 * for JNE A, B, ... - A is not null in the false branch;
13447 	 * for JEQ A, B, ... - A is not null in the true branch.
13448 	 *
13449 	 * Since PTR_TO_BTF_ID points to a kernel struct that does
13450 	 * not need to be null checked by the BPF program, i.e.,
13451 	 * could be null even without PTR_MAYBE_NULL marking, so
13452 	 * only propagate nullness when neither reg is that type.
13453 	 */
13454 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
13455 	    __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
13456 	    type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
13457 	    base_type(src_reg->type) != PTR_TO_BTF_ID &&
13458 	    base_type(dst_reg->type) != PTR_TO_BTF_ID) {
13459 		eq_branch_regs = NULL;
13460 		switch (opcode) {
13461 		case BPF_JEQ:
13462 			eq_branch_regs = other_branch_regs;
13463 			break;
13464 		case BPF_JNE:
13465 			eq_branch_regs = regs;
13466 			break;
13467 		default:
13468 			/* do nothing */
13469 			break;
13470 		}
13471 		if (eq_branch_regs) {
13472 			if (type_may_be_null(src_reg->type))
13473 				mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
13474 			else
13475 				mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
13476 		}
13477 	}
13478 
13479 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
13480 	 * NOTE: these optimizations below are related with pointer comparison
13481 	 *       which will never be JMP32.
13482 	 */
13483 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
13484 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
13485 	    type_may_be_null(dst_reg->type)) {
13486 		/* Mark all identical registers in each branch as either
13487 		 * safe or unknown depending R == 0 or R != 0 conditional.
13488 		 */
13489 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
13490 				      opcode == BPF_JNE);
13491 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
13492 				      opcode == BPF_JEQ);
13493 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
13494 					   this_branch, other_branch) &&
13495 		   is_pointer_value(env, insn->dst_reg)) {
13496 		verbose(env, "R%d pointer comparison prohibited\n",
13497 			insn->dst_reg);
13498 		return -EACCES;
13499 	}
13500 	if (env->log.level & BPF_LOG_LEVEL)
13501 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
13502 	return 0;
13503 }
13504 
13505 /* verify BPF_LD_IMM64 instruction */
13506 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
13507 {
13508 	struct bpf_insn_aux_data *aux = cur_aux(env);
13509 	struct bpf_reg_state *regs = cur_regs(env);
13510 	struct bpf_reg_state *dst_reg;
13511 	struct bpf_map *map;
13512 	int err;
13513 
13514 	if (BPF_SIZE(insn->code) != BPF_DW) {
13515 		verbose(env, "invalid BPF_LD_IMM insn\n");
13516 		return -EINVAL;
13517 	}
13518 	if (insn->off != 0) {
13519 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
13520 		return -EINVAL;
13521 	}
13522 
13523 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
13524 	if (err)
13525 		return err;
13526 
13527 	dst_reg = &regs[insn->dst_reg];
13528 	if (insn->src_reg == 0) {
13529 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
13530 
13531 		dst_reg->type = SCALAR_VALUE;
13532 		__mark_reg_known(&regs[insn->dst_reg], imm);
13533 		return 0;
13534 	}
13535 
13536 	/* All special src_reg cases are listed below. From this point onwards
13537 	 * we either succeed and assign a corresponding dst_reg->type after
13538 	 * zeroing the offset, or fail and reject the program.
13539 	 */
13540 	mark_reg_known_zero(env, regs, insn->dst_reg);
13541 
13542 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
13543 		dst_reg->type = aux->btf_var.reg_type;
13544 		switch (base_type(dst_reg->type)) {
13545 		case PTR_TO_MEM:
13546 			dst_reg->mem_size = aux->btf_var.mem_size;
13547 			break;
13548 		case PTR_TO_BTF_ID:
13549 			dst_reg->btf = aux->btf_var.btf;
13550 			dst_reg->btf_id = aux->btf_var.btf_id;
13551 			break;
13552 		default:
13553 			verbose(env, "bpf verifier is misconfigured\n");
13554 			return -EFAULT;
13555 		}
13556 		return 0;
13557 	}
13558 
13559 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
13560 		struct bpf_prog_aux *aux = env->prog->aux;
13561 		u32 subprogno = find_subprog(env,
13562 					     env->insn_idx + insn->imm + 1);
13563 
13564 		if (!aux->func_info) {
13565 			verbose(env, "missing btf func_info\n");
13566 			return -EINVAL;
13567 		}
13568 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
13569 			verbose(env, "callback function not static\n");
13570 			return -EINVAL;
13571 		}
13572 
13573 		dst_reg->type = PTR_TO_FUNC;
13574 		dst_reg->subprogno = subprogno;
13575 		return 0;
13576 	}
13577 
13578 	map = env->used_maps[aux->map_index];
13579 	dst_reg->map_ptr = map;
13580 
13581 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
13582 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
13583 		dst_reg->type = PTR_TO_MAP_VALUE;
13584 		dst_reg->off = aux->map_off;
13585 		WARN_ON_ONCE(map->max_entries != 1);
13586 		/* We want reg->id to be same (0) as map_value is not distinct */
13587 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
13588 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
13589 		dst_reg->type = CONST_PTR_TO_MAP;
13590 	} else {
13591 		verbose(env, "bpf verifier is misconfigured\n");
13592 		return -EINVAL;
13593 	}
13594 
13595 	return 0;
13596 }
13597 
13598 static bool may_access_skb(enum bpf_prog_type type)
13599 {
13600 	switch (type) {
13601 	case BPF_PROG_TYPE_SOCKET_FILTER:
13602 	case BPF_PROG_TYPE_SCHED_CLS:
13603 	case BPF_PROG_TYPE_SCHED_ACT:
13604 		return true;
13605 	default:
13606 		return false;
13607 	}
13608 }
13609 
13610 /* verify safety of LD_ABS|LD_IND instructions:
13611  * - they can only appear in the programs where ctx == skb
13612  * - since they are wrappers of function calls, they scratch R1-R5 registers,
13613  *   preserve R6-R9, and store return value into R0
13614  *
13615  * Implicit input:
13616  *   ctx == skb == R6 == CTX
13617  *
13618  * Explicit input:
13619  *   SRC == any register
13620  *   IMM == 32-bit immediate
13621  *
13622  * Output:
13623  *   R0 - 8/16/32-bit skb data converted to cpu endianness
13624  */
13625 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
13626 {
13627 	struct bpf_reg_state *regs = cur_regs(env);
13628 	static const int ctx_reg = BPF_REG_6;
13629 	u8 mode = BPF_MODE(insn->code);
13630 	int i, err;
13631 
13632 	if (!may_access_skb(resolve_prog_type(env->prog))) {
13633 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
13634 		return -EINVAL;
13635 	}
13636 
13637 	if (!env->ops->gen_ld_abs) {
13638 		verbose(env, "bpf verifier is misconfigured\n");
13639 		return -EINVAL;
13640 	}
13641 
13642 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
13643 	    BPF_SIZE(insn->code) == BPF_DW ||
13644 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
13645 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
13646 		return -EINVAL;
13647 	}
13648 
13649 	/* check whether implicit source operand (register R6) is readable */
13650 	err = check_reg_arg(env, ctx_reg, SRC_OP);
13651 	if (err)
13652 		return err;
13653 
13654 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
13655 	 * gen_ld_abs() may terminate the program at runtime, leading to
13656 	 * reference leak.
13657 	 */
13658 	err = check_reference_leak(env);
13659 	if (err) {
13660 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
13661 		return err;
13662 	}
13663 
13664 	if (env->cur_state->active_lock.ptr) {
13665 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
13666 		return -EINVAL;
13667 	}
13668 
13669 	if (env->cur_state->active_rcu_lock) {
13670 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
13671 		return -EINVAL;
13672 	}
13673 
13674 	if (regs[ctx_reg].type != PTR_TO_CTX) {
13675 		verbose(env,
13676 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
13677 		return -EINVAL;
13678 	}
13679 
13680 	if (mode == BPF_IND) {
13681 		/* check explicit source operand */
13682 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
13683 		if (err)
13684 			return err;
13685 	}
13686 
13687 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
13688 	if (err < 0)
13689 		return err;
13690 
13691 	/* reset caller saved regs to unreadable */
13692 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
13693 		mark_reg_not_init(env, regs, caller_saved[i]);
13694 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
13695 	}
13696 
13697 	/* mark destination R0 register as readable, since it contains
13698 	 * the value fetched from the packet.
13699 	 * Already marked as written above.
13700 	 */
13701 	mark_reg_unknown(env, regs, BPF_REG_0);
13702 	/* ld_abs load up to 32-bit skb data. */
13703 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
13704 	return 0;
13705 }
13706 
13707 static int check_return_code(struct bpf_verifier_env *env)
13708 {
13709 	struct tnum enforce_attach_type_range = tnum_unknown;
13710 	const struct bpf_prog *prog = env->prog;
13711 	struct bpf_reg_state *reg;
13712 	struct tnum range = tnum_range(0, 1);
13713 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
13714 	int err;
13715 	struct bpf_func_state *frame = env->cur_state->frame[0];
13716 	const bool is_subprog = frame->subprogno;
13717 
13718 	/* LSM and struct_ops func-ptr's return type could be "void" */
13719 	if (!is_subprog) {
13720 		switch (prog_type) {
13721 		case BPF_PROG_TYPE_LSM:
13722 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
13723 				/* See below, can be 0 or 0-1 depending on hook. */
13724 				break;
13725 			fallthrough;
13726 		case BPF_PROG_TYPE_STRUCT_OPS:
13727 			if (!prog->aux->attach_func_proto->type)
13728 				return 0;
13729 			break;
13730 		default:
13731 			break;
13732 		}
13733 	}
13734 
13735 	/* eBPF calling convention is such that R0 is used
13736 	 * to return the value from eBPF program.
13737 	 * Make sure that it's readable at this time
13738 	 * of bpf_exit, which means that program wrote
13739 	 * something into it earlier
13740 	 */
13741 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
13742 	if (err)
13743 		return err;
13744 
13745 	if (is_pointer_value(env, BPF_REG_0)) {
13746 		verbose(env, "R0 leaks addr as return value\n");
13747 		return -EACCES;
13748 	}
13749 
13750 	reg = cur_regs(env) + BPF_REG_0;
13751 
13752 	if (frame->in_async_callback_fn) {
13753 		/* enforce return zero from async callbacks like timer */
13754 		if (reg->type != SCALAR_VALUE) {
13755 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
13756 				reg_type_str(env, reg->type));
13757 			return -EINVAL;
13758 		}
13759 
13760 		if (!tnum_in(tnum_const(0), reg->var_off)) {
13761 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
13762 			return -EINVAL;
13763 		}
13764 		return 0;
13765 	}
13766 
13767 	if (is_subprog) {
13768 		if (reg->type != SCALAR_VALUE) {
13769 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
13770 				reg_type_str(env, reg->type));
13771 			return -EINVAL;
13772 		}
13773 		return 0;
13774 	}
13775 
13776 	switch (prog_type) {
13777 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
13778 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
13779 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
13780 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
13781 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
13782 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
13783 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
13784 			range = tnum_range(1, 1);
13785 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
13786 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
13787 			range = tnum_range(0, 3);
13788 		break;
13789 	case BPF_PROG_TYPE_CGROUP_SKB:
13790 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
13791 			range = tnum_range(0, 3);
13792 			enforce_attach_type_range = tnum_range(2, 3);
13793 		}
13794 		break;
13795 	case BPF_PROG_TYPE_CGROUP_SOCK:
13796 	case BPF_PROG_TYPE_SOCK_OPS:
13797 	case BPF_PROG_TYPE_CGROUP_DEVICE:
13798 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
13799 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
13800 		break;
13801 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
13802 		if (!env->prog->aux->attach_btf_id)
13803 			return 0;
13804 		range = tnum_const(0);
13805 		break;
13806 	case BPF_PROG_TYPE_TRACING:
13807 		switch (env->prog->expected_attach_type) {
13808 		case BPF_TRACE_FENTRY:
13809 		case BPF_TRACE_FEXIT:
13810 			range = tnum_const(0);
13811 			break;
13812 		case BPF_TRACE_RAW_TP:
13813 		case BPF_MODIFY_RETURN:
13814 			return 0;
13815 		case BPF_TRACE_ITER:
13816 			break;
13817 		default:
13818 			return -ENOTSUPP;
13819 		}
13820 		break;
13821 	case BPF_PROG_TYPE_SK_LOOKUP:
13822 		range = tnum_range(SK_DROP, SK_PASS);
13823 		break;
13824 
13825 	case BPF_PROG_TYPE_LSM:
13826 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
13827 			/* Regular BPF_PROG_TYPE_LSM programs can return
13828 			 * any value.
13829 			 */
13830 			return 0;
13831 		}
13832 		if (!env->prog->aux->attach_func_proto->type) {
13833 			/* Make sure programs that attach to void
13834 			 * hooks don't try to modify return value.
13835 			 */
13836 			range = tnum_range(1, 1);
13837 		}
13838 		break;
13839 
13840 	case BPF_PROG_TYPE_NETFILTER:
13841 		range = tnum_range(NF_DROP, NF_ACCEPT);
13842 		break;
13843 	case BPF_PROG_TYPE_EXT:
13844 		/* freplace program can return anything as its return value
13845 		 * depends on the to-be-replaced kernel func or bpf program.
13846 		 */
13847 	default:
13848 		return 0;
13849 	}
13850 
13851 	if (reg->type != SCALAR_VALUE) {
13852 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
13853 			reg_type_str(env, reg->type));
13854 		return -EINVAL;
13855 	}
13856 
13857 	if (!tnum_in(range, reg->var_off)) {
13858 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
13859 		if (prog->expected_attach_type == BPF_LSM_CGROUP &&
13860 		    prog_type == BPF_PROG_TYPE_LSM &&
13861 		    !prog->aux->attach_func_proto->type)
13862 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
13863 		return -EINVAL;
13864 	}
13865 
13866 	if (!tnum_is_unknown(enforce_attach_type_range) &&
13867 	    tnum_in(enforce_attach_type_range, reg->var_off))
13868 		env->prog->enforce_expected_attach_type = 1;
13869 	return 0;
13870 }
13871 
13872 /* non-recursive DFS pseudo code
13873  * 1  procedure DFS-iterative(G,v):
13874  * 2      label v as discovered
13875  * 3      let S be a stack
13876  * 4      S.push(v)
13877  * 5      while S is not empty
13878  * 6            t <- S.peek()
13879  * 7            if t is what we're looking for:
13880  * 8                return t
13881  * 9            for all edges e in G.adjacentEdges(t) do
13882  * 10               if edge e is already labelled
13883  * 11                   continue with the next edge
13884  * 12               w <- G.adjacentVertex(t,e)
13885  * 13               if vertex w is not discovered and not explored
13886  * 14                   label e as tree-edge
13887  * 15                   label w as discovered
13888  * 16                   S.push(w)
13889  * 17                   continue at 5
13890  * 18               else if vertex w is discovered
13891  * 19                   label e as back-edge
13892  * 20               else
13893  * 21                   // vertex w is explored
13894  * 22                   label e as forward- or cross-edge
13895  * 23           label t as explored
13896  * 24           S.pop()
13897  *
13898  * convention:
13899  * 0x10 - discovered
13900  * 0x11 - discovered and fall-through edge labelled
13901  * 0x12 - discovered and fall-through and branch edges labelled
13902  * 0x20 - explored
13903  */
13904 
13905 enum {
13906 	DISCOVERED = 0x10,
13907 	EXPLORED = 0x20,
13908 	FALLTHROUGH = 1,
13909 	BRANCH = 2,
13910 };
13911 
13912 static u32 state_htab_size(struct bpf_verifier_env *env)
13913 {
13914 	return env->prog->len;
13915 }
13916 
13917 static struct bpf_verifier_state_list **explored_state(
13918 					struct bpf_verifier_env *env,
13919 					int idx)
13920 {
13921 	struct bpf_verifier_state *cur = env->cur_state;
13922 	struct bpf_func_state *state = cur->frame[cur->curframe];
13923 
13924 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
13925 }
13926 
13927 static void mark_prune_point(struct bpf_verifier_env *env, int idx)
13928 {
13929 	env->insn_aux_data[idx].prune_point = true;
13930 }
13931 
13932 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
13933 {
13934 	return env->insn_aux_data[insn_idx].prune_point;
13935 }
13936 
13937 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
13938 {
13939 	env->insn_aux_data[idx].force_checkpoint = true;
13940 }
13941 
13942 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
13943 {
13944 	return env->insn_aux_data[insn_idx].force_checkpoint;
13945 }
13946 
13947 
13948 enum {
13949 	DONE_EXPLORING = 0,
13950 	KEEP_EXPLORING = 1,
13951 };
13952 
13953 /* t, w, e - match pseudo-code above:
13954  * t - index of current instruction
13955  * w - next instruction
13956  * e - edge
13957  */
13958 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
13959 		     bool loop_ok)
13960 {
13961 	int *insn_stack = env->cfg.insn_stack;
13962 	int *insn_state = env->cfg.insn_state;
13963 
13964 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
13965 		return DONE_EXPLORING;
13966 
13967 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
13968 		return DONE_EXPLORING;
13969 
13970 	if (w < 0 || w >= env->prog->len) {
13971 		verbose_linfo(env, t, "%d: ", t);
13972 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
13973 		return -EINVAL;
13974 	}
13975 
13976 	if (e == BRANCH) {
13977 		/* mark branch target for state pruning */
13978 		mark_prune_point(env, w);
13979 		mark_jmp_point(env, w);
13980 	}
13981 
13982 	if (insn_state[w] == 0) {
13983 		/* tree-edge */
13984 		insn_state[t] = DISCOVERED | e;
13985 		insn_state[w] = DISCOVERED;
13986 		if (env->cfg.cur_stack >= env->prog->len)
13987 			return -E2BIG;
13988 		insn_stack[env->cfg.cur_stack++] = w;
13989 		return KEEP_EXPLORING;
13990 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
13991 		if (loop_ok && env->bpf_capable)
13992 			return DONE_EXPLORING;
13993 		verbose_linfo(env, t, "%d: ", t);
13994 		verbose_linfo(env, w, "%d: ", w);
13995 		verbose(env, "back-edge from insn %d to %d\n", t, w);
13996 		return -EINVAL;
13997 	} else if (insn_state[w] == EXPLORED) {
13998 		/* forward- or cross-edge */
13999 		insn_state[t] = DISCOVERED | e;
14000 	} else {
14001 		verbose(env, "insn state internal bug\n");
14002 		return -EFAULT;
14003 	}
14004 	return DONE_EXPLORING;
14005 }
14006 
14007 static int visit_func_call_insn(int t, struct bpf_insn *insns,
14008 				struct bpf_verifier_env *env,
14009 				bool visit_callee)
14010 {
14011 	int ret;
14012 
14013 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
14014 	if (ret)
14015 		return ret;
14016 
14017 	mark_prune_point(env, t + 1);
14018 	/* when we exit from subprog, we need to record non-linear history */
14019 	mark_jmp_point(env, t + 1);
14020 
14021 	if (visit_callee) {
14022 		mark_prune_point(env, t);
14023 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
14024 				/* It's ok to allow recursion from CFG point of
14025 				 * view. __check_func_call() will do the actual
14026 				 * check.
14027 				 */
14028 				bpf_pseudo_func(insns + t));
14029 	}
14030 	return ret;
14031 }
14032 
14033 /* Visits the instruction at index t and returns one of the following:
14034  *  < 0 - an error occurred
14035  *  DONE_EXPLORING - the instruction was fully explored
14036  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
14037  */
14038 static int visit_insn(int t, struct bpf_verifier_env *env)
14039 {
14040 	struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
14041 	int ret;
14042 
14043 	if (bpf_pseudo_func(insn))
14044 		return visit_func_call_insn(t, insns, env, true);
14045 
14046 	/* All non-branch instructions have a single fall-through edge. */
14047 	if (BPF_CLASS(insn->code) != BPF_JMP &&
14048 	    BPF_CLASS(insn->code) != BPF_JMP32)
14049 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
14050 
14051 	switch (BPF_OP(insn->code)) {
14052 	case BPF_EXIT:
14053 		return DONE_EXPLORING;
14054 
14055 	case BPF_CALL:
14056 		if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback)
14057 			/* Mark this call insn as a prune point to trigger
14058 			 * is_state_visited() check before call itself is
14059 			 * processed by __check_func_call(). Otherwise new
14060 			 * async state will be pushed for further exploration.
14061 			 */
14062 			mark_prune_point(env, t);
14063 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
14064 			struct bpf_kfunc_call_arg_meta meta;
14065 
14066 			ret = fetch_kfunc_meta(env, insn, &meta, NULL);
14067 			if (ret == 0 && is_iter_next_kfunc(&meta)) {
14068 				mark_prune_point(env, t);
14069 				/* Checking and saving state checkpoints at iter_next() call
14070 				 * is crucial for fast convergence of open-coded iterator loop
14071 				 * logic, so we need to force it. If we don't do that,
14072 				 * is_state_visited() might skip saving a checkpoint, causing
14073 				 * unnecessarily long sequence of not checkpointed
14074 				 * instructions and jumps, leading to exhaustion of jump
14075 				 * history buffer, and potentially other undesired outcomes.
14076 				 * It is expected that with correct open-coded iterators
14077 				 * convergence will happen quickly, so we don't run a risk of
14078 				 * exhausting memory.
14079 				 */
14080 				mark_force_checkpoint(env, t);
14081 			}
14082 		}
14083 		return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
14084 
14085 	case BPF_JA:
14086 		if (BPF_SRC(insn->code) != BPF_K)
14087 			return -EINVAL;
14088 
14089 		/* unconditional jump with single edge */
14090 		ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
14091 				true);
14092 		if (ret)
14093 			return ret;
14094 
14095 		mark_prune_point(env, t + insn->off + 1);
14096 		mark_jmp_point(env, t + insn->off + 1);
14097 
14098 		return ret;
14099 
14100 	default:
14101 		/* conditional jump with two edges */
14102 		mark_prune_point(env, t);
14103 
14104 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
14105 		if (ret)
14106 			return ret;
14107 
14108 		return push_insn(t, t + insn->off + 1, BRANCH, env, true);
14109 	}
14110 }
14111 
14112 /* non-recursive depth-first-search to detect loops in BPF program
14113  * loop == back-edge in directed graph
14114  */
14115 static int check_cfg(struct bpf_verifier_env *env)
14116 {
14117 	int insn_cnt = env->prog->len;
14118 	int *insn_stack, *insn_state;
14119 	int ret = 0;
14120 	int i;
14121 
14122 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
14123 	if (!insn_state)
14124 		return -ENOMEM;
14125 
14126 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
14127 	if (!insn_stack) {
14128 		kvfree(insn_state);
14129 		return -ENOMEM;
14130 	}
14131 
14132 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
14133 	insn_stack[0] = 0; /* 0 is the first instruction */
14134 	env->cfg.cur_stack = 1;
14135 
14136 	while (env->cfg.cur_stack > 0) {
14137 		int t = insn_stack[env->cfg.cur_stack - 1];
14138 
14139 		ret = visit_insn(t, env);
14140 		switch (ret) {
14141 		case DONE_EXPLORING:
14142 			insn_state[t] = EXPLORED;
14143 			env->cfg.cur_stack--;
14144 			break;
14145 		case KEEP_EXPLORING:
14146 			break;
14147 		default:
14148 			if (ret > 0) {
14149 				verbose(env, "visit_insn internal bug\n");
14150 				ret = -EFAULT;
14151 			}
14152 			goto err_free;
14153 		}
14154 	}
14155 
14156 	if (env->cfg.cur_stack < 0) {
14157 		verbose(env, "pop stack internal bug\n");
14158 		ret = -EFAULT;
14159 		goto err_free;
14160 	}
14161 
14162 	for (i = 0; i < insn_cnt; i++) {
14163 		if (insn_state[i] != EXPLORED) {
14164 			verbose(env, "unreachable insn %d\n", i);
14165 			ret = -EINVAL;
14166 			goto err_free;
14167 		}
14168 	}
14169 	ret = 0; /* cfg looks good */
14170 
14171 err_free:
14172 	kvfree(insn_state);
14173 	kvfree(insn_stack);
14174 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
14175 	return ret;
14176 }
14177 
14178 static int check_abnormal_return(struct bpf_verifier_env *env)
14179 {
14180 	int i;
14181 
14182 	for (i = 1; i < env->subprog_cnt; i++) {
14183 		if (env->subprog_info[i].has_ld_abs) {
14184 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
14185 			return -EINVAL;
14186 		}
14187 		if (env->subprog_info[i].has_tail_call) {
14188 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
14189 			return -EINVAL;
14190 		}
14191 	}
14192 	return 0;
14193 }
14194 
14195 /* The minimum supported BTF func info size */
14196 #define MIN_BPF_FUNCINFO_SIZE	8
14197 #define MAX_FUNCINFO_REC_SIZE	252
14198 
14199 static int check_btf_func(struct bpf_verifier_env *env,
14200 			  const union bpf_attr *attr,
14201 			  bpfptr_t uattr)
14202 {
14203 	const struct btf_type *type, *func_proto, *ret_type;
14204 	u32 i, nfuncs, urec_size, min_size;
14205 	u32 krec_size = sizeof(struct bpf_func_info);
14206 	struct bpf_func_info *krecord;
14207 	struct bpf_func_info_aux *info_aux = NULL;
14208 	struct bpf_prog *prog;
14209 	const struct btf *btf;
14210 	bpfptr_t urecord;
14211 	u32 prev_offset = 0;
14212 	bool scalar_return;
14213 	int ret = -ENOMEM;
14214 
14215 	nfuncs = attr->func_info_cnt;
14216 	if (!nfuncs) {
14217 		if (check_abnormal_return(env))
14218 			return -EINVAL;
14219 		return 0;
14220 	}
14221 
14222 	if (nfuncs != env->subprog_cnt) {
14223 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
14224 		return -EINVAL;
14225 	}
14226 
14227 	urec_size = attr->func_info_rec_size;
14228 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
14229 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
14230 	    urec_size % sizeof(u32)) {
14231 		verbose(env, "invalid func info rec size %u\n", urec_size);
14232 		return -EINVAL;
14233 	}
14234 
14235 	prog = env->prog;
14236 	btf = prog->aux->btf;
14237 
14238 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
14239 	min_size = min_t(u32, krec_size, urec_size);
14240 
14241 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
14242 	if (!krecord)
14243 		return -ENOMEM;
14244 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
14245 	if (!info_aux)
14246 		goto err_free;
14247 
14248 	for (i = 0; i < nfuncs; i++) {
14249 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
14250 		if (ret) {
14251 			if (ret == -E2BIG) {
14252 				verbose(env, "nonzero tailing record in func info");
14253 				/* set the size kernel expects so loader can zero
14254 				 * out the rest of the record.
14255 				 */
14256 				if (copy_to_bpfptr_offset(uattr,
14257 							  offsetof(union bpf_attr, func_info_rec_size),
14258 							  &min_size, sizeof(min_size)))
14259 					ret = -EFAULT;
14260 			}
14261 			goto err_free;
14262 		}
14263 
14264 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
14265 			ret = -EFAULT;
14266 			goto err_free;
14267 		}
14268 
14269 		/* check insn_off */
14270 		ret = -EINVAL;
14271 		if (i == 0) {
14272 			if (krecord[i].insn_off) {
14273 				verbose(env,
14274 					"nonzero insn_off %u for the first func info record",
14275 					krecord[i].insn_off);
14276 				goto err_free;
14277 			}
14278 		} else if (krecord[i].insn_off <= prev_offset) {
14279 			verbose(env,
14280 				"same or smaller insn offset (%u) than previous func info record (%u)",
14281 				krecord[i].insn_off, prev_offset);
14282 			goto err_free;
14283 		}
14284 
14285 		if (env->subprog_info[i].start != krecord[i].insn_off) {
14286 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
14287 			goto err_free;
14288 		}
14289 
14290 		/* check type_id */
14291 		type = btf_type_by_id(btf, krecord[i].type_id);
14292 		if (!type || !btf_type_is_func(type)) {
14293 			verbose(env, "invalid type id %d in func info",
14294 				krecord[i].type_id);
14295 			goto err_free;
14296 		}
14297 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
14298 
14299 		func_proto = btf_type_by_id(btf, type->type);
14300 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
14301 			/* btf_func_check() already verified it during BTF load */
14302 			goto err_free;
14303 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
14304 		scalar_return =
14305 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
14306 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
14307 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
14308 			goto err_free;
14309 		}
14310 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
14311 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
14312 			goto err_free;
14313 		}
14314 
14315 		prev_offset = krecord[i].insn_off;
14316 		bpfptr_add(&urecord, urec_size);
14317 	}
14318 
14319 	prog->aux->func_info = krecord;
14320 	prog->aux->func_info_cnt = nfuncs;
14321 	prog->aux->func_info_aux = info_aux;
14322 	return 0;
14323 
14324 err_free:
14325 	kvfree(krecord);
14326 	kfree(info_aux);
14327 	return ret;
14328 }
14329 
14330 static void adjust_btf_func(struct bpf_verifier_env *env)
14331 {
14332 	struct bpf_prog_aux *aux = env->prog->aux;
14333 	int i;
14334 
14335 	if (!aux->func_info)
14336 		return;
14337 
14338 	for (i = 0; i < env->subprog_cnt; i++)
14339 		aux->func_info[i].insn_off = env->subprog_info[i].start;
14340 }
14341 
14342 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
14343 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
14344 
14345 static int check_btf_line(struct bpf_verifier_env *env,
14346 			  const union bpf_attr *attr,
14347 			  bpfptr_t uattr)
14348 {
14349 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
14350 	struct bpf_subprog_info *sub;
14351 	struct bpf_line_info *linfo;
14352 	struct bpf_prog *prog;
14353 	const struct btf *btf;
14354 	bpfptr_t ulinfo;
14355 	int err;
14356 
14357 	nr_linfo = attr->line_info_cnt;
14358 	if (!nr_linfo)
14359 		return 0;
14360 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
14361 		return -EINVAL;
14362 
14363 	rec_size = attr->line_info_rec_size;
14364 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
14365 	    rec_size > MAX_LINEINFO_REC_SIZE ||
14366 	    rec_size & (sizeof(u32) - 1))
14367 		return -EINVAL;
14368 
14369 	/* Need to zero it in case the userspace may
14370 	 * pass in a smaller bpf_line_info object.
14371 	 */
14372 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
14373 			 GFP_KERNEL | __GFP_NOWARN);
14374 	if (!linfo)
14375 		return -ENOMEM;
14376 
14377 	prog = env->prog;
14378 	btf = prog->aux->btf;
14379 
14380 	s = 0;
14381 	sub = env->subprog_info;
14382 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
14383 	expected_size = sizeof(struct bpf_line_info);
14384 	ncopy = min_t(u32, expected_size, rec_size);
14385 	for (i = 0; i < nr_linfo; i++) {
14386 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
14387 		if (err) {
14388 			if (err == -E2BIG) {
14389 				verbose(env, "nonzero tailing record in line_info");
14390 				if (copy_to_bpfptr_offset(uattr,
14391 							  offsetof(union bpf_attr, line_info_rec_size),
14392 							  &expected_size, sizeof(expected_size)))
14393 					err = -EFAULT;
14394 			}
14395 			goto err_free;
14396 		}
14397 
14398 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
14399 			err = -EFAULT;
14400 			goto err_free;
14401 		}
14402 
14403 		/*
14404 		 * Check insn_off to ensure
14405 		 * 1) strictly increasing AND
14406 		 * 2) bounded by prog->len
14407 		 *
14408 		 * The linfo[0].insn_off == 0 check logically falls into
14409 		 * the later "missing bpf_line_info for func..." case
14410 		 * because the first linfo[0].insn_off must be the
14411 		 * first sub also and the first sub must have
14412 		 * subprog_info[0].start == 0.
14413 		 */
14414 		if ((i && linfo[i].insn_off <= prev_offset) ||
14415 		    linfo[i].insn_off >= prog->len) {
14416 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
14417 				i, linfo[i].insn_off, prev_offset,
14418 				prog->len);
14419 			err = -EINVAL;
14420 			goto err_free;
14421 		}
14422 
14423 		if (!prog->insnsi[linfo[i].insn_off].code) {
14424 			verbose(env,
14425 				"Invalid insn code at line_info[%u].insn_off\n",
14426 				i);
14427 			err = -EINVAL;
14428 			goto err_free;
14429 		}
14430 
14431 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
14432 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
14433 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
14434 			err = -EINVAL;
14435 			goto err_free;
14436 		}
14437 
14438 		if (s != env->subprog_cnt) {
14439 			if (linfo[i].insn_off == sub[s].start) {
14440 				sub[s].linfo_idx = i;
14441 				s++;
14442 			} else if (sub[s].start < linfo[i].insn_off) {
14443 				verbose(env, "missing bpf_line_info for func#%u\n", s);
14444 				err = -EINVAL;
14445 				goto err_free;
14446 			}
14447 		}
14448 
14449 		prev_offset = linfo[i].insn_off;
14450 		bpfptr_add(&ulinfo, rec_size);
14451 	}
14452 
14453 	if (s != env->subprog_cnt) {
14454 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
14455 			env->subprog_cnt - s, s);
14456 		err = -EINVAL;
14457 		goto err_free;
14458 	}
14459 
14460 	prog->aux->linfo = linfo;
14461 	prog->aux->nr_linfo = nr_linfo;
14462 
14463 	return 0;
14464 
14465 err_free:
14466 	kvfree(linfo);
14467 	return err;
14468 }
14469 
14470 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
14471 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
14472 
14473 static int check_core_relo(struct bpf_verifier_env *env,
14474 			   const union bpf_attr *attr,
14475 			   bpfptr_t uattr)
14476 {
14477 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
14478 	struct bpf_core_relo core_relo = {};
14479 	struct bpf_prog *prog = env->prog;
14480 	const struct btf *btf = prog->aux->btf;
14481 	struct bpf_core_ctx ctx = {
14482 		.log = &env->log,
14483 		.btf = btf,
14484 	};
14485 	bpfptr_t u_core_relo;
14486 	int err;
14487 
14488 	nr_core_relo = attr->core_relo_cnt;
14489 	if (!nr_core_relo)
14490 		return 0;
14491 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
14492 		return -EINVAL;
14493 
14494 	rec_size = attr->core_relo_rec_size;
14495 	if (rec_size < MIN_CORE_RELO_SIZE ||
14496 	    rec_size > MAX_CORE_RELO_SIZE ||
14497 	    rec_size % sizeof(u32))
14498 		return -EINVAL;
14499 
14500 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
14501 	expected_size = sizeof(struct bpf_core_relo);
14502 	ncopy = min_t(u32, expected_size, rec_size);
14503 
14504 	/* Unlike func_info and line_info, copy and apply each CO-RE
14505 	 * relocation record one at a time.
14506 	 */
14507 	for (i = 0; i < nr_core_relo; i++) {
14508 		/* future proofing when sizeof(bpf_core_relo) changes */
14509 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
14510 		if (err) {
14511 			if (err == -E2BIG) {
14512 				verbose(env, "nonzero tailing record in core_relo");
14513 				if (copy_to_bpfptr_offset(uattr,
14514 							  offsetof(union bpf_attr, core_relo_rec_size),
14515 							  &expected_size, sizeof(expected_size)))
14516 					err = -EFAULT;
14517 			}
14518 			break;
14519 		}
14520 
14521 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
14522 			err = -EFAULT;
14523 			break;
14524 		}
14525 
14526 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
14527 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
14528 				i, core_relo.insn_off, prog->len);
14529 			err = -EINVAL;
14530 			break;
14531 		}
14532 
14533 		err = bpf_core_apply(&ctx, &core_relo, i,
14534 				     &prog->insnsi[core_relo.insn_off / 8]);
14535 		if (err)
14536 			break;
14537 		bpfptr_add(&u_core_relo, rec_size);
14538 	}
14539 	return err;
14540 }
14541 
14542 static int check_btf_info(struct bpf_verifier_env *env,
14543 			  const union bpf_attr *attr,
14544 			  bpfptr_t uattr)
14545 {
14546 	struct btf *btf;
14547 	int err;
14548 
14549 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
14550 		if (check_abnormal_return(env))
14551 			return -EINVAL;
14552 		return 0;
14553 	}
14554 
14555 	btf = btf_get_by_fd(attr->prog_btf_fd);
14556 	if (IS_ERR(btf))
14557 		return PTR_ERR(btf);
14558 	if (btf_is_kernel(btf)) {
14559 		btf_put(btf);
14560 		return -EACCES;
14561 	}
14562 	env->prog->aux->btf = btf;
14563 
14564 	err = check_btf_func(env, attr, uattr);
14565 	if (err)
14566 		return err;
14567 
14568 	err = check_btf_line(env, attr, uattr);
14569 	if (err)
14570 		return err;
14571 
14572 	err = check_core_relo(env, attr, uattr);
14573 	if (err)
14574 		return err;
14575 
14576 	return 0;
14577 }
14578 
14579 /* check %cur's range satisfies %old's */
14580 static bool range_within(struct bpf_reg_state *old,
14581 			 struct bpf_reg_state *cur)
14582 {
14583 	return old->umin_value <= cur->umin_value &&
14584 	       old->umax_value >= cur->umax_value &&
14585 	       old->smin_value <= cur->smin_value &&
14586 	       old->smax_value >= cur->smax_value &&
14587 	       old->u32_min_value <= cur->u32_min_value &&
14588 	       old->u32_max_value >= cur->u32_max_value &&
14589 	       old->s32_min_value <= cur->s32_min_value &&
14590 	       old->s32_max_value >= cur->s32_max_value;
14591 }
14592 
14593 /* If in the old state two registers had the same id, then they need to have
14594  * the same id in the new state as well.  But that id could be different from
14595  * the old state, so we need to track the mapping from old to new ids.
14596  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
14597  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
14598  * regs with a different old id could still have new id 9, we don't care about
14599  * that.
14600  * So we look through our idmap to see if this old id has been seen before.  If
14601  * so, we require the new id to match; otherwise, we add the id pair to the map.
14602  */
14603 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
14604 {
14605 	unsigned int i;
14606 
14607 	/* either both IDs should be set or both should be zero */
14608 	if (!!old_id != !!cur_id)
14609 		return false;
14610 
14611 	if (old_id == 0) /* cur_id == 0 as well */
14612 		return true;
14613 
14614 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
14615 		if (!idmap[i].old) {
14616 			/* Reached an empty slot; haven't seen this id before */
14617 			idmap[i].old = old_id;
14618 			idmap[i].cur = cur_id;
14619 			return true;
14620 		}
14621 		if (idmap[i].old == old_id)
14622 			return idmap[i].cur == cur_id;
14623 	}
14624 	/* We ran out of idmap slots, which should be impossible */
14625 	WARN_ON_ONCE(1);
14626 	return false;
14627 }
14628 
14629 static void clean_func_state(struct bpf_verifier_env *env,
14630 			     struct bpf_func_state *st)
14631 {
14632 	enum bpf_reg_liveness live;
14633 	int i, j;
14634 
14635 	for (i = 0; i < BPF_REG_FP; i++) {
14636 		live = st->regs[i].live;
14637 		/* liveness must not touch this register anymore */
14638 		st->regs[i].live |= REG_LIVE_DONE;
14639 		if (!(live & REG_LIVE_READ))
14640 			/* since the register is unused, clear its state
14641 			 * to make further comparison simpler
14642 			 */
14643 			__mark_reg_not_init(env, &st->regs[i]);
14644 	}
14645 
14646 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
14647 		live = st->stack[i].spilled_ptr.live;
14648 		/* liveness must not touch this stack slot anymore */
14649 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
14650 		if (!(live & REG_LIVE_READ)) {
14651 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
14652 			for (j = 0; j < BPF_REG_SIZE; j++)
14653 				st->stack[i].slot_type[j] = STACK_INVALID;
14654 		}
14655 	}
14656 }
14657 
14658 static void clean_verifier_state(struct bpf_verifier_env *env,
14659 				 struct bpf_verifier_state *st)
14660 {
14661 	int i;
14662 
14663 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
14664 		/* all regs in this state in all frames were already marked */
14665 		return;
14666 
14667 	for (i = 0; i <= st->curframe; i++)
14668 		clean_func_state(env, st->frame[i]);
14669 }
14670 
14671 /* the parentage chains form a tree.
14672  * the verifier states are added to state lists at given insn and
14673  * pushed into state stack for future exploration.
14674  * when the verifier reaches bpf_exit insn some of the verifer states
14675  * stored in the state lists have their final liveness state already,
14676  * but a lot of states will get revised from liveness point of view when
14677  * the verifier explores other branches.
14678  * Example:
14679  * 1: r0 = 1
14680  * 2: if r1 == 100 goto pc+1
14681  * 3: r0 = 2
14682  * 4: exit
14683  * when the verifier reaches exit insn the register r0 in the state list of
14684  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
14685  * of insn 2 and goes exploring further. At the insn 4 it will walk the
14686  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
14687  *
14688  * Since the verifier pushes the branch states as it sees them while exploring
14689  * the program the condition of walking the branch instruction for the second
14690  * time means that all states below this branch were already explored and
14691  * their final liveness marks are already propagated.
14692  * Hence when the verifier completes the search of state list in is_state_visited()
14693  * we can call this clean_live_states() function to mark all liveness states
14694  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
14695  * will not be used.
14696  * This function also clears the registers and stack for states that !READ
14697  * to simplify state merging.
14698  *
14699  * Important note here that walking the same branch instruction in the callee
14700  * doesn't meant that the states are DONE. The verifier has to compare
14701  * the callsites
14702  */
14703 static void clean_live_states(struct bpf_verifier_env *env, int insn,
14704 			      struct bpf_verifier_state *cur)
14705 {
14706 	struct bpf_verifier_state_list *sl;
14707 	int i;
14708 
14709 	sl = *explored_state(env, insn);
14710 	while (sl) {
14711 		if (sl->state.branches)
14712 			goto next;
14713 		if (sl->state.insn_idx != insn ||
14714 		    sl->state.curframe != cur->curframe)
14715 			goto next;
14716 		for (i = 0; i <= cur->curframe; i++)
14717 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
14718 				goto next;
14719 		clean_verifier_state(env, &sl->state);
14720 next:
14721 		sl = sl->next;
14722 	}
14723 }
14724 
14725 static bool regs_exact(const struct bpf_reg_state *rold,
14726 		       const struct bpf_reg_state *rcur,
14727 		       struct bpf_id_pair *idmap)
14728 {
14729 	return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
14730 	       check_ids(rold->id, rcur->id, idmap) &&
14731 	       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
14732 }
14733 
14734 /* Returns true if (rold safe implies rcur safe) */
14735 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
14736 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
14737 {
14738 	if (!(rold->live & REG_LIVE_READ))
14739 		/* explored state didn't use this */
14740 		return true;
14741 	if (rold->type == NOT_INIT)
14742 		/* explored state can't have used this */
14743 		return true;
14744 	if (rcur->type == NOT_INIT)
14745 		return false;
14746 
14747 	/* Enforce that register types have to match exactly, including their
14748 	 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
14749 	 * rule.
14750 	 *
14751 	 * One can make a point that using a pointer register as unbounded
14752 	 * SCALAR would be technically acceptable, but this could lead to
14753 	 * pointer leaks because scalars are allowed to leak while pointers
14754 	 * are not. We could make this safe in special cases if root is
14755 	 * calling us, but it's probably not worth the hassle.
14756 	 *
14757 	 * Also, register types that are *not* MAYBE_NULL could technically be
14758 	 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
14759 	 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
14760 	 * to the same map).
14761 	 * However, if the old MAYBE_NULL register then got NULL checked,
14762 	 * doing so could have affected others with the same id, and we can't
14763 	 * check for that because we lost the id when we converted to
14764 	 * a non-MAYBE_NULL variant.
14765 	 * So, as a general rule we don't allow mixing MAYBE_NULL and
14766 	 * non-MAYBE_NULL registers as well.
14767 	 */
14768 	if (rold->type != rcur->type)
14769 		return false;
14770 
14771 	switch (base_type(rold->type)) {
14772 	case SCALAR_VALUE:
14773 		if (regs_exact(rold, rcur, idmap))
14774 			return true;
14775 		if (env->explore_alu_limits)
14776 			return false;
14777 		if (!rold->precise)
14778 			return true;
14779 		/* new val must satisfy old val knowledge */
14780 		return range_within(rold, rcur) &&
14781 		       tnum_in(rold->var_off, rcur->var_off);
14782 	case PTR_TO_MAP_KEY:
14783 	case PTR_TO_MAP_VALUE:
14784 	case PTR_TO_MEM:
14785 	case PTR_TO_BUF:
14786 	case PTR_TO_TP_BUFFER:
14787 		/* If the new min/max/var_off satisfy the old ones and
14788 		 * everything else matches, we are OK.
14789 		 */
14790 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
14791 		       range_within(rold, rcur) &&
14792 		       tnum_in(rold->var_off, rcur->var_off) &&
14793 		       check_ids(rold->id, rcur->id, idmap) &&
14794 		       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
14795 	case PTR_TO_PACKET_META:
14796 	case PTR_TO_PACKET:
14797 		/* We must have at least as much range as the old ptr
14798 		 * did, so that any accesses which were safe before are
14799 		 * still safe.  This is true even if old range < old off,
14800 		 * since someone could have accessed through (ptr - k), or
14801 		 * even done ptr -= k in a register, to get a safe access.
14802 		 */
14803 		if (rold->range > rcur->range)
14804 			return false;
14805 		/* If the offsets don't match, we can't trust our alignment;
14806 		 * nor can we be sure that we won't fall out of range.
14807 		 */
14808 		if (rold->off != rcur->off)
14809 			return false;
14810 		/* id relations must be preserved */
14811 		if (!check_ids(rold->id, rcur->id, idmap))
14812 			return false;
14813 		/* new val must satisfy old val knowledge */
14814 		return range_within(rold, rcur) &&
14815 		       tnum_in(rold->var_off, rcur->var_off);
14816 	case PTR_TO_STACK:
14817 		/* two stack pointers are equal only if they're pointing to
14818 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
14819 		 */
14820 		return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
14821 	default:
14822 		return regs_exact(rold, rcur, idmap);
14823 	}
14824 }
14825 
14826 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
14827 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
14828 {
14829 	int i, spi;
14830 
14831 	/* walk slots of the explored stack and ignore any additional
14832 	 * slots in the current stack, since explored(safe) state
14833 	 * didn't use them
14834 	 */
14835 	for (i = 0; i < old->allocated_stack; i++) {
14836 		struct bpf_reg_state *old_reg, *cur_reg;
14837 
14838 		spi = i / BPF_REG_SIZE;
14839 
14840 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
14841 			i += BPF_REG_SIZE - 1;
14842 			/* explored state didn't use this */
14843 			continue;
14844 		}
14845 
14846 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
14847 			continue;
14848 
14849 		if (env->allow_uninit_stack &&
14850 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
14851 			continue;
14852 
14853 		/* explored stack has more populated slots than current stack
14854 		 * and these slots were used
14855 		 */
14856 		if (i >= cur->allocated_stack)
14857 			return false;
14858 
14859 		/* if old state was safe with misc data in the stack
14860 		 * it will be safe with zero-initialized stack.
14861 		 * The opposite is not true
14862 		 */
14863 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
14864 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
14865 			continue;
14866 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
14867 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
14868 			/* Ex: old explored (safe) state has STACK_SPILL in
14869 			 * this stack slot, but current has STACK_MISC ->
14870 			 * this verifier states are not equivalent,
14871 			 * return false to continue verification of this path
14872 			 */
14873 			return false;
14874 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
14875 			continue;
14876 		/* Both old and cur are having same slot_type */
14877 		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
14878 		case STACK_SPILL:
14879 			/* when explored and current stack slot are both storing
14880 			 * spilled registers, check that stored pointers types
14881 			 * are the same as well.
14882 			 * Ex: explored safe path could have stored
14883 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
14884 			 * but current path has stored:
14885 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
14886 			 * such verifier states are not equivalent.
14887 			 * return false to continue verification of this path
14888 			 */
14889 			if (!regsafe(env, &old->stack[spi].spilled_ptr,
14890 				     &cur->stack[spi].spilled_ptr, idmap))
14891 				return false;
14892 			break;
14893 		case STACK_DYNPTR:
14894 			old_reg = &old->stack[spi].spilled_ptr;
14895 			cur_reg = &cur->stack[spi].spilled_ptr;
14896 			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
14897 			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
14898 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
14899 				return false;
14900 			break;
14901 		case STACK_ITER:
14902 			old_reg = &old->stack[spi].spilled_ptr;
14903 			cur_reg = &cur->stack[spi].spilled_ptr;
14904 			/* iter.depth is not compared between states as it
14905 			 * doesn't matter for correctness and would otherwise
14906 			 * prevent convergence; we maintain it only to prevent
14907 			 * infinite loop check triggering, see
14908 			 * iter_active_depths_differ()
14909 			 */
14910 			if (old_reg->iter.btf != cur_reg->iter.btf ||
14911 			    old_reg->iter.btf_id != cur_reg->iter.btf_id ||
14912 			    old_reg->iter.state != cur_reg->iter.state ||
14913 			    /* ignore {old_reg,cur_reg}->iter.depth, see above */
14914 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
14915 				return false;
14916 			break;
14917 		case STACK_MISC:
14918 		case STACK_ZERO:
14919 		case STACK_INVALID:
14920 			continue;
14921 		/* Ensure that new unhandled slot types return false by default */
14922 		default:
14923 			return false;
14924 		}
14925 	}
14926 	return true;
14927 }
14928 
14929 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
14930 		    struct bpf_id_pair *idmap)
14931 {
14932 	int i;
14933 
14934 	if (old->acquired_refs != cur->acquired_refs)
14935 		return false;
14936 
14937 	for (i = 0; i < old->acquired_refs; i++) {
14938 		if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
14939 			return false;
14940 	}
14941 
14942 	return true;
14943 }
14944 
14945 /* compare two verifier states
14946  *
14947  * all states stored in state_list are known to be valid, since
14948  * verifier reached 'bpf_exit' instruction through them
14949  *
14950  * this function is called when verifier exploring different branches of
14951  * execution popped from the state stack. If it sees an old state that has
14952  * more strict register state and more strict stack state then this execution
14953  * branch doesn't need to be explored further, since verifier already
14954  * concluded that more strict state leads to valid finish.
14955  *
14956  * Therefore two states are equivalent if register state is more conservative
14957  * and explored stack state is more conservative than the current one.
14958  * Example:
14959  *       explored                   current
14960  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
14961  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
14962  *
14963  * In other words if current stack state (one being explored) has more
14964  * valid slots than old one that already passed validation, it means
14965  * the verifier can stop exploring and conclude that current state is valid too
14966  *
14967  * Similarly with registers. If explored state has register type as invalid
14968  * whereas register type in current state is meaningful, it means that
14969  * the current state will reach 'bpf_exit' instruction safely
14970  */
14971 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
14972 			      struct bpf_func_state *cur)
14973 {
14974 	int i;
14975 
14976 	for (i = 0; i < MAX_BPF_REG; i++)
14977 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
14978 			     env->idmap_scratch))
14979 			return false;
14980 
14981 	if (!stacksafe(env, old, cur, env->idmap_scratch))
14982 		return false;
14983 
14984 	if (!refsafe(old, cur, env->idmap_scratch))
14985 		return false;
14986 
14987 	return true;
14988 }
14989 
14990 static bool states_equal(struct bpf_verifier_env *env,
14991 			 struct bpf_verifier_state *old,
14992 			 struct bpf_verifier_state *cur)
14993 {
14994 	int i;
14995 
14996 	if (old->curframe != cur->curframe)
14997 		return false;
14998 
14999 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
15000 
15001 	/* Verification state from speculative execution simulation
15002 	 * must never prune a non-speculative execution one.
15003 	 */
15004 	if (old->speculative && !cur->speculative)
15005 		return false;
15006 
15007 	if (old->active_lock.ptr != cur->active_lock.ptr)
15008 		return false;
15009 
15010 	/* Old and cur active_lock's have to be either both present
15011 	 * or both absent.
15012 	 */
15013 	if (!!old->active_lock.id != !!cur->active_lock.id)
15014 		return false;
15015 
15016 	if (old->active_lock.id &&
15017 	    !check_ids(old->active_lock.id, cur->active_lock.id, env->idmap_scratch))
15018 		return false;
15019 
15020 	if (old->active_rcu_lock != cur->active_rcu_lock)
15021 		return false;
15022 
15023 	/* for states to be equal callsites have to be the same
15024 	 * and all frame states need to be equivalent
15025 	 */
15026 	for (i = 0; i <= old->curframe; i++) {
15027 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
15028 			return false;
15029 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
15030 			return false;
15031 	}
15032 	return true;
15033 }
15034 
15035 /* Return 0 if no propagation happened. Return negative error code if error
15036  * happened. Otherwise, return the propagated bit.
15037  */
15038 static int propagate_liveness_reg(struct bpf_verifier_env *env,
15039 				  struct bpf_reg_state *reg,
15040 				  struct bpf_reg_state *parent_reg)
15041 {
15042 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
15043 	u8 flag = reg->live & REG_LIVE_READ;
15044 	int err;
15045 
15046 	/* When comes here, read flags of PARENT_REG or REG could be any of
15047 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
15048 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
15049 	 */
15050 	if (parent_flag == REG_LIVE_READ64 ||
15051 	    /* Or if there is no read flag from REG. */
15052 	    !flag ||
15053 	    /* Or if the read flag from REG is the same as PARENT_REG. */
15054 	    parent_flag == flag)
15055 		return 0;
15056 
15057 	err = mark_reg_read(env, reg, parent_reg, flag);
15058 	if (err)
15059 		return err;
15060 
15061 	return flag;
15062 }
15063 
15064 /* A write screens off any subsequent reads; but write marks come from the
15065  * straight-line code between a state and its parent.  When we arrive at an
15066  * equivalent state (jump target or such) we didn't arrive by the straight-line
15067  * code, so read marks in the state must propagate to the parent regardless
15068  * of the state's write marks. That's what 'parent == state->parent' comparison
15069  * in mark_reg_read() is for.
15070  */
15071 static int propagate_liveness(struct bpf_verifier_env *env,
15072 			      const struct bpf_verifier_state *vstate,
15073 			      struct bpf_verifier_state *vparent)
15074 {
15075 	struct bpf_reg_state *state_reg, *parent_reg;
15076 	struct bpf_func_state *state, *parent;
15077 	int i, frame, err = 0;
15078 
15079 	if (vparent->curframe != vstate->curframe) {
15080 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
15081 		     vparent->curframe, vstate->curframe);
15082 		return -EFAULT;
15083 	}
15084 	/* Propagate read liveness of registers... */
15085 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
15086 	for (frame = 0; frame <= vstate->curframe; frame++) {
15087 		parent = vparent->frame[frame];
15088 		state = vstate->frame[frame];
15089 		parent_reg = parent->regs;
15090 		state_reg = state->regs;
15091 		/* We don't need to worry about FP liveness, it's read-only */
15092 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
15093 			err = propagate_liveness_reg(env, &state_reg[i],
15094 						     &parent_reg[i]);
15095 			if (err < 0)
15096 				return err;
15097 			if (err == REG_LIVE_READ64)
15098 				mark_insn_zext(env, &parent_reg[i]);
15099 		}
15100 
15101 		/* Propagate stack slots. */
15102 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
15103 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
15104 			parent_reg = &parent->stack[i].spilled_ptr;
15105 			state_reg = &state->stack[i].spilled_ptr;
15106 			err = propagate_liveness_reg(env, state_reg,
15107 						     parent_reg);
15108 			if (err < 0)
15109 				return err;
15110 		}
15111 	}
15112 	return 0;
15113 }
15114 
15115 /* find precise scalars in the previous equivalent state and
15116  * propagate them into the current state
15117  */
15118 static int propagate_precision(struct bpf_verifier_env *env,
15119 			       const struct bpf_verifier_state *old)
15120 {
15121 	struct bpf_reg_state *state_reg;
15122 	struct bpf_func_state *state;
15123 	int i, err = 0, fr;
15124 
15125 	for (fr = old->curframe; fr >= 0; fr--) {
15126 		state = old->frame[fr];
15127 		state_reg = state->regs;
15128 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
15129 			if (state_reg->type != SCALAR_VALUE ||
15130 			    !state_reg->precise ||
15131 			    !(state_reg->live & REG_LIVE_READ))
15132 				continue;
15133 			if (env->log.level & BPF_LOG_LEVEL2)
15134 				verbose(env, "frame %d: propagating r%d\n", fr, i);
15135 			err = mark_chain_precision_frame(env, fr, i);
15136 			if (err < 0)
15137 				return err;
15138 		}
15139 
15140 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
15141 			if (!is_spilled_reg(&state->stack[i]))
15142 				continue;
15143 			state_reg = &state->stack[i].spilled_ptr;
15144 			if (state_reg->type != SCALAR_VALUE ||
15145 			    !state_reg->precise ||
15146 			    !(state_reg->live & REG_LIVE_READ))
15147 				continue;
15148 			if (env->log.level & BPF_LOG_LEVEL2)
15149 				verbose(env, "frame %d: propagating fp%d\n",
15150 					fr, (-i - 1) * BPF_REG_SIZE);
15151 			err = mark_chain_precision_stack_frame(env, fr, i);
15152 			if (err < 0)
15153 				return err;
15154 		}
15155 	}
15156 	return 0;
15157 }
15158 
15159 static bool states_maybe_looping(struct bpf_verifier_state *old,
15160 				 struct bpf_verifier_state *cur)
15161 {
15162 	struct bpf_func_state *fold, *fcur;
15163 	int i, fr = cur->curframe;
15164 
15165 	if (old->curframe != fr)
15166 		return false;
15167 
15168 	fold = old->frame[fr];
15169 	fcur = cur->frame[fr];
15170 	for (i = 0; i < MAX_BPF_REG; i++)
15171 		if (memcmp(&fold->regs[i], &fcur->regs[i],
15172 			   offsetof(struct bpf_reg_state, parent)))
15173 			return false;
15174 	return true;
15175 }
15176 
15177 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
15178 {
15179 	return env->insn_aux_data[insn_idx].is_iter_next;
15180 }
15181 
15182 /* is_state_visited() handles iter_next() (see process_iter_next_call() for
15183  * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
15184  * states to match, which otherwise would look like an infinite loop. So while
15185  * iter_next() calls are taken care of, we still need to be careful and
15186  * prevent erroneous and too eager declaration of "ininite loop", when
15187  * iterators are involved.
15188  *
15189  * Here's a situation in pseudo-BPF assembly form:
15190  *
15191  *   0: again:                          ; set up iter_next() call args
15192  *   1:   r1 = &it                      ; <CHECKPOINT HERE>
15193  *   2:   call bpf_iter_num_next        ; this is iter_next() call
15194  *   3:   if r0 == 0 goto done
15195  *   4:   ... something useful here ...
15196  *   5:   goto again                    ; another iteration
15197  *   6: done:
15198  *   7:   r1 = &it
15199  *   8:   call bpf_iter_num_destroy     ; clean up iter state
15200  *   9:   exit
15201  *
15202  * This is a typical loop. Let's assume that we have a prune point at 1:,
15203  * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
15204  * again`, assuming other heuristics don't get in a way).
15205  *
15206  * When we first time come to 1:, let's say we have some state X. We proceed
15207  * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
15208  * Now we come back to validate that forked ACTIVE state. We proceed through
15209  * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
15210  * are converging. But the problem is that we don't know that yet, as this
15211  * convergence has to happen at iter_next() call site only. So if nothing is
15212  * done, at 1: verifier will use bounded loop logic and declare infinite
15213  * looping (and would be *technically* correct, if not for iterator's
15214  * "eventual sticky NULL" contract, see process_iter_next_call()). But we
15215  * don't want that. So what we do in process_iter_next_call() when we go on
15216  * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
15217  * a different iteration. So when we suspect an infinite loop, we additionally
15218  * check if any of the *ACTIVE* iterator states depths differ. If yes, we
15219  * pretend we are not looping and wait for next iter_next() call.
15220  *
15221  * This only applies to ACTIVE state. In DRAINED state we don't expect to
15222  * loop, because that would actually mean infinite loop, as DRAINED state is
15223  * "sticky", and so we'll keep returning into the same instruction with the
15224  * same state (at least in one of possible code paths).
15225  *
15226  * This approach allows to keep infinite loop heuristic even in the face of
15227  * active iterator. E.g., C snippet below is and will be detected as
15228  * inifintely looping:
15229  *
15230  *   struct bpf_iter_num it;
15231  *   int *p, x;
15232  *
15233  *   bpf_iter_num_new(&it, 0, 10);
15234  *   while ((p = bpf_iter_num_next(&t))) {
15235  *       x = p;
15236  *       while (x--) {} // <<-- infinite loop here
15237  *   }
15238  *
15239  */
15240 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
15241 {
15242 	struct bpf_reg_state *slot, *cur_slot;
15243 	struct bpf_func_state *state;
15244 	int i, fr;
15245 
15246 	for (fr = old->curframe; fr >= 0; fr--) {
15247 		state = old->frame[fr];
15248 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
15249 			if (state->stack[i].slot_type[0] != STACK_ITER)
15250 				continue;
15251 
15252 			slot = &state->stack[i].spilled_ptr;
15253 			if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
15254 				continue;
15255 
15256 			cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
15257 			if (cur_slot->iter.depth != slot->iter.depth)
15258 				return true;
15259 		}
15260 	}
15261 	return false;
15262 }
15263 
15264 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
15265 {
15266 	struct bpf_verifier_state_list *new_sl;
15267 	struct bpf_verifier_state_list *sl, **pprev;
15268 	struct bpf_verifier_state *cur = env->cur_state, *new;
15269 	int i, j, err, states_cnt = 0;
15270 	bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
15271 	bool add_new_state = force_new_state;
15272 
15273 	/* bpf progs typically have pruning point every 4 instructions
15274 	 * http://vger.kernel.org/bpfconf2019.html#session-1
15275 	 * Do not add new state for future pruning if the verifier hasn't seen
15276 	 * at least 2 jumps and at least 8 instructions.
15277 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
15278 	 * In tests that amounts to up to 50% reduction into total verifier
15279 	 * memory consumption and 20% verifier time speedup.
15280 	 */
15281 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
15282 	    env->insn_processed - env->prev_insn_processed >= 8)
15283 		add_new_state = true;
15284 
15285 	pprev = explored_state(env, insn_idx);
15286 	sl = *pprev;
15287 
15288 	clean_live_states(env, insn_idx, cur);
15289 
15290 	while (sl) {
15291 		states_cnt++;
15292 		if (sl->state.insn_idx != insn_idx)
15293 			goto next;
15294 
15295 		if (sl->state.branches) {
15296 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
15297 
15298 			if (frame->in_async_callback_fn &&
15299 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
15300 				/* Different async_entry_cnt means that the verifier is
15301 				 * processing another entry into async callback.
15302 				 * Seeing the same state is not an indication of infinite
15303 				 * loop or infinite recursion.
15304 				 * But finding the same state doesn't mean that it's safe
15305 				 * to stop processing the current state. The previous state
15306 				 * hasn't yet reached bpf_exit, since state.branches > 0.
15307 				 * Checking in_async_callback_fn alone is not enough either.
15308 				 * Since the verifier still needs to catch infinite loops
15309 				 * inside async callbacks.
15310 				 */
15311 				goto skip_inf_loop_check;
15312 			}
15313 			/* BPF open-coded iterators loop detection is special.
15314 			 * states_maybe_looping() logic is too simplistic in detecting
15315 			 * states that *might* be equivalent, because it doesn't know
15316 			 * about ID remapping, so don't even perform it.
15317 			 * See process_iter_next_call() and iter_active_depths_differ()
15318 			 * for overview of the logic. When current and one of parent
15319 			 * states are detected as equivalent, it's a good thing: we prove
15320 			 * convergence and can stop simulating further iterations.
15321 			 * It's safe to assume that iterator loop will finish, taking into
15322 			 * account iter_next() contract of eventually returning
15323 			 * sticky NULL result.
15324 			 */
15325 			if (is_iter_next_insn(env, insn_idx)) {
15326 				if (states_equal(env, &sl->state, cur)) {
15327 					struct bpf_func_state *cur_frame;
15328 					struct bpf_reg_state *iter_state, *iter_reg;
15329 					int spi;
15330 
15331 					cur_frame = cur->frame[cur->curframe];
15332 					/* btf_check_iter_kfuncs() enforces that
15333 					 * iter state pointer is always the first arg
15334 					 */
15335 					iter_reg = &cur_frame->regs[BPF_REG_1];
15336 					/* current state is valid due to states_equal(),
15337 					 * so we can assume valid iter and reg state,
15338 					 * no need for extra (re-)validations
15339 					 */
15340 					spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
15341 					iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
15342 					if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE)
15343 						goto hit;
15344 				}
15345 				goto skip_inf_loop_check;
15346 			}
15347 			/* attempt to detect infinite loop to avoid unnecessary doomed work */
15348 			if (states_maybe_looping(&sl->state, cur) &&
15349 			    states_equal(env, &sl->state, cur) &&
15350 			    !iter_active_depths_differ(&sl->state, cur)) {
15351 				verbose_linfo(env, insn_idx, "; ");
15352 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
15353 				return -EINVAL;
15354 			}
15355 			/* if the verifier is processing a loop, avoid adding new state
15356 			 * too often, since different loop iterations have distinct
15357 			 * states and may not help future pruning.
15358 			 * This threshold shouldn't be too low to make sure that
15359 			 * a loop with large bound will be rejected quickly.
15360 			 * The most abusive loop will be:
15361 			 * r1 += 1
15362 			 * if r1 < 1000000 goto pc-2
15363 			 * 1M insn_procssed limit / 100 == 10k peak states.
15364 			 * This threshold shouldn't be too high either, since states
15365 			 * at the end of the loop are likely to be useful in pruning.
15366 			 */
15367 skip_inf_loop_check:
15368 			if (!force_new_state &&
15369 			    env->jmps_processed - env->prev_jmps_processed < 20 &&
15370 			    env->insn_processed - env->prev_insn_processed < 100)
15371 				add_new_state = false;
15372 			goto miss;
15373 		}
15374 		if (states_equal(env, &sl->state, cur)) {
15375 hit:
15376 			sl->hit_cnt++;
15377 			/* reached equivalent register/stack state,
15378 			 * prune the search.
15379 			 * Registers read by the continuation are read by us.
15380 			 * If we have any write marks in env->cur_state, they
15381 			 * will prevent corresponding reads in the continuation
15382 			 * from reaching our parent (an explored_state).  Our
15383 			 * own state will get the read marks recorded, but
15384 			 * they'll be immediately forgotten as we're pruning
15385 			 * this state and will pop a new one.
15386 			 */
15387 			err = propagate_liveness(env, &sl->state, cur);
15388 
15389 			/* if previous state reached the exit with precision and
15390 			 * current state is equivalent to it (except precsion marks)
15391 			 * the precision needs to be propagated back in
15392 			 * the current state.
15393 			 */
15394 			err = err ? : push_jmp_history(env, cur);
15395 			err = err ? : propagate_precision(env, &sl->state);
15396 			if (err)
15397 				return err;
15398 			return 1;
15399 		}
15400 miss:
15401 		/* when new state is not going to be added do not increase miss count.
15402 		 * Otherwise several loop iterations will remove the state
15403 		 * recorded earlier. The goal of these heuristics is to have
15404 		 * states from some iterations of the loop (some in the beginning
15405 		 * and some at the end) to help pruning.
15406 		 */
15407 		if (add_new_state)
15408 			sl->miss_cnt++;
15409 		/* heuristic to determine whether this state is beneficial
15410 		 * to keep checking from state equivalence point of view.
15411 		 * Higher numbers increase max_states_per_insn and verification time,
15412 		 * but do not meaningfully decrease insn_processed.
15413 		 */
15414 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
15415 			/* the state is unlikely to be useful. Remove it to
15416 			 * speed up verification
15417 			 */
15418 			*pprev = sl->next;
15419 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
15420 				u32 br = sl->state.branches;
15421 
15422 				WARN_ONCE(br,
15423 					  "BUG live_done but branches_to_explore %d\n",
15424 					  br);
15425 				free_verifier_state(&sl->state, false);
15426 				kfree(sl);
15427 				env->peak_states--;
15428 			} else {
15429 				/* cannot free this state, since parentage chain may
15430 				 * walk it later. Add it for free_list instead to
15431 				 * be freed at the end of verification
15432 				 */
15433 				sl->next = env->free_list;
15434 				env->free_list = sl;
15435 			}
15436 			sl = *pprev;
15437 			continue;
15438 		}
15439 next:
15440 		pprev = &sl->next;
15441 		sl = *pprev;
15442 	}
15443 
15444 	if (env->max_states_per_insn < states_cnt)
15445 		env->max_states_per_insn = states_cnt;
15446 
15447 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
15448 		return 0;
15449 
15450 	if (!add_new_state)
15451 		return 0;
15452 
15453 	/* There were no equivalent states, remember the current one.
15454 	 * Technically the current state is not proven to be safe yet,
15455 	 * but it will either reach outer most bpf_exit (which means it's safe)
15456 	 * or it will be rejected. When there are no loops the verifier won't be
15457 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
15458 	 * again on the way to bpf_exit.
15459 	 * When looping the sl->state.branches will be > 0 and this state
15460 	 * will not be considered for equivalence until branches == 0.
15461 	 */
15462 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
15463 	if (!new_sl)
15464 		return -ENOMEM;
15465 	env->total_states++;
15466 	env->peak_states++;
15467 	env->prev_jmps_processed = env->jmps_processed;
15468 	env->prev_insn_processed = env->insn_processed;
15469 
15470 	/* forget precise markings we inherited, see __mark_chain_precision */
15471 	if (env->bpf_capable)
15472 		mark_all_scalars_imprecise(env, cur);
15473 
15474 	/* add new state to the head of linked list */
15475 	new = &new_sl->state;
15476 	err = copy_verifier_state(new, cur);
15477 	if (err) {
15478 		free_verifier_state(new, false);
15479 		kfree(new_sl);
15480 		return err;
15481 	}
15482 	new->insn_idx = insn_idx;
15483 	WARN_ONCE(new->branches != 1,
15484 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
15485 
15486 	cur->parent = new;
15487 	cur->first_insn_idx = insn_idx;
15488 	clear_jmp_history(cur);
15489 	new_sl->next = *explored_state(env, insn_idx);
15490 	*explored_state(env, insn_idx) = new_sl;
15491 	/* connect new state to parentage chain. Current frame needs all
15492 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
15493 	 * to the stack implicitly by JITs) so in callers' frames connect just
15494 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
15495 	 * the state of the call instruction (with WRITTEN set), and r0 comes
15496 	 * from callee with its full parentage chain, anyway.
15497 	 */
15498 	/* clear write marks in current state: the writes we did are not writes
15499 	 * our child did, so they don't screen off its reads from us.
15500 	 * (There are no read marks in current state, because reads always mark
15501 	 * their parent and current state never has children yet.  Only
15502 	 * explored_states can get read marks.)
15503 	 */
15504 	for (j = 0; j <= cur->curframe; j++) {
15505 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
15506 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
15507 		for (i = 0; i < BPF_REG_FP; i++)
15508 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
15509 	}
15510 
15511 	/* all stack frames are accessible from callee, clear them all */
15512 	for (j = 0; j <= cur->curframe; j++) {
15513 		struct bpf_func_state *frame = cur->frame[j];
15514 		struct bpf_func_state *newframe = new->frame[j];
15515 
15516 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
15517 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
15518 			frame->stack[i].spilled_ptr.parent =
15519 						&newframe->stack[i].spilled_ptr;
15520 		}
15521 	}
15522 	return 0;
15523 }
15524 
15525 /* Return true if it's OK to have the same insn return a different type. */
15526 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
15527 {
15528 	switch (base_type(type)) {
15529 	case PTR_TO_CTX:
15530 	case PTR_TO_SOCKET:
15531 	case PTR_TO_SOCK_COMMON:
15532 	case PTR_TO_TCP_SOCK:
15533 	case PTR_TO_XDP_SOCK:
15534 	case PTR_TO_BTF_ID:
15535 		return false;
15536 	default:
15537 		return true;
15538 	}
15539 }
15540 
15541 /* If an instruction was previously used with particular pointer types, then we
15542  * need to be careful to avoid cases such as the below, where it may be ok
15543  * for one branch accessing the pointer, but not ok for the other branch:
15544  *
15545  * R1 = sock_ptr
15546  * goto X;
15547  * ...
15548  * R1 = some_other_valid_ptr;
15549  * goto X;
15550  * ...
15551  * R2 = *(u32 *)(R1 + 0);
15552  */
15553 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
15554 {
15555 	return src != prev && (!reg_type_mismatch_ok(src) ||
15556 			       !reg_type_mismatch_ok(prev));
15557 }
15558 
15559 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
15560 			     bool allow_trust_missmatch)
15561 {
15562 	enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
15563 
15564 	if (*prev_type == NOT_INIT) {
15565 		/* Saw a valid insn
15566 		 * dst_reg = *(u32 *)(src_reg + off)
15567 		 * save type to validate intersecting paths
15568 		 */
15569 		*prev_type = type;
15570 	} else if (reg_type_mismatch(type, *prev_type)) {
15571 		/* Abuser program is trying to use the same insn
15572 		 * dst_reg = *(u32*) (src_reg + off)
15573 		 * with different pointer types:
15574 		 * src_reg == ctx in one branch and
15575 		 * src_reg == stack|map in some other branch.
15576 		 * Reject it.
15577 		 */
15578 		if (allow_trust_missmatch &&
15579 		    base_type(type) == PTR_TO_BTF_ID &&
15580 		    base_type(*prev_type) == PTR_TO_BTF_ID) {
15581 			/*
15582 			 * Have to support a use case when one path through
15583 			 * the program yields TRUSTED pointer while another
15584 			 * is UNTRUSTED. Fallback to UNTRUSTED to generate
15585 			 * BPF_PROBE_MEM.
15586 			 */
15587 			*prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
15588 		} else {
15589 			verbose(env, "same insn cannot be used with different pointers\n");
15590 			return -EINVAL;
15591 		}
15592 	}
15593 
15594 	return 0;
15595 }
15596 
15597 static int do_check(struct bpf_verifier_env *env)
15598 {
15599 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
15600 	struct bpf_verifier_state *state = env->cur_state;
15601 	struct bpf_insn *insns = env->prog->insnsi;
15602 	struct bpf_reg_state *regs;
15603 	int insn_cnt = env->prog->len;
15604 	bool do_print_state = false;
15605 	int prev_insn_idx = -1;
15606 
15607 	for (;;) {
15608 		struct bpf_insn *insn;
15609 		u8 class;
15610 		int err;
15611 
15612 		env->prev_insn_idx = prev_insn_idx;
15613 		if (env->insn_idx >= insn_cnt) {
15614 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
15615 				env->insn_idx, insn_cnt);
15616 			return -EFAULT;
15617 		}
15618 
15619 		insn = &insns[env->insn_idx];
15620 		class = BPF_CLASS(insn->code);
15621 
15622 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
15623 			verbose(env,
15624 				"BPF program is too large. Processed %d insn\n",
15625 				env->insn_processed);
15626 			return -E2BIG;
15627 		}
15628 
15629 		state->last_insn_idx = env->prev_insn_idx;
15630 
15631 		if (is_prune_point(env, env->insn_idx)) {
15632 			err = is_state_visited(env, env->insn_idx);
15633 			if (err < 0)
15634 				return err;
15635 			if (err == 1) {
15636 				/* found equivalent state, can prune the search */
15637 				if (env->log.level & BPF_LOG_LEVEL) {
15638 					if (do_print_state)
15639 						verbose(env, "\nfrom %d to %d%s: safe\n",
15640 							env->prev_insn_idx, env->insn_idx,
15641 							env->cur_state->speculative ?
15642 							" (speculative execution)" : "");
15643 					else
15644 						verbose(env, "%d: safe\n", env->insn_idx);
15645 				}
15646 				goto process_bpf_exit;
15647 			}
15648 		}
15649 
15650 		if (is_jmp_point(env, env->insn_idx)) {
15651 			err = push_jmp_history(env, state);
15652 			if (err)
15653 				return err;
15654 		}
15655 
15656 		if (signal_pending(current))
15657 			return -EAGAIN;
15658 
15659 		if (need_resched())
15660 			cond_resched();
15661 
15662 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
15663 			verbose(env, "\nfrom %d to %d%s:",
15664 				env->prev_insn_idx, env->insn_idx,
15665 				env->cur_state->speculative ?
15666 				" (speculative execution)" : "");
15667 			print_verifier_state(env, state->frame[state->curframe], true);
15668 			do_print_state = false;
15669 		}
15670 
15671 		if (env->log.level & BPF_LOG_LEVEL) {
15672 			const struct bpf_insn_cbs cbs = {
15673 				.cb_call	= disasm_kfunc_name,
15674 				.cb_print	= verbose,
15675 				.private_data	= env,
15676 			};
15677 
15678 			if (verifier_state_scratched(env))
15679 				print_insn_state(env, state->frame[state->curframe]);
15680 
15681 			verbose_linfo(env, env->insn_idx, "; ");
15682 			env->prev_log_pos = env->log.end_pos;
15683 			verbose(env, "%d: ", env->insn_idx);
15684 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
15685 			env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
15686 			env->prev_log_pos = env->log.end_pos;
15687 		}
15688 
15689 		if (bpf_prog_is_offloaded(env->prog->aux)) {
15690 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
15691 							   env->prev_insn_idx);
15692 			if (err)
15693 				return err;
15694 		}
15695 
15696 		regs = cur_regs(env);
15697 		sanitize_mark_insn_seen(env);
15698 		prev_insn_idx = env->insn_idx;
15699 
15700 		if (class == BPF_ALU || class == BPF_ALU64) {
15701 			err = check_alu_op(env, insn);
15702 			if (err)
15703 				return err;
15704 
15705 		} else if (class == BPF_LDX) {
15706 			enum bpf_reg_type src_reg_type;
15707 
15708 			/* check for reserved fields is already done */
15709 
15710 			/* check src operand */
15711 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
15712 			if (err)
15713 				return err;
15714 
15715 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
15716 			if (err)
15717 				return err;
15718 
15719 			src_reg_type = regs[insn->src_reg].type;
15720 
15721 			/* check that memory (src_reg + off) is readable,
15722 			 * the state of dst_reg will be updated by this func
15723 			 */
15724 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
15725 					       insn->off, BPF_SIZE(insn->code),
15726 					       BPF_READ, insn->dst_reg, false);
15727 			if (err)
15728 				return err;
15729 
15730 			err = save_aux_ptr_type(env, src_reg_type, true);
15731 			if (err)
15732 				return err;
15733 		} else if (class == BPF_STX) {
15734 			enum bpf_reg_type dst_reg_type;
15735 
15736 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
15737 				err = check_atomic(env, env->insn_idx, insn);
15738 				if (err)
15739 					return err;
15740 				env->insn_idx++;
15741 				continue;
15742 			}
15743 
15744 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
15745 				verbose(env, "BPF_STX uses reserved fields\n");
15746 				return -EINVAL;
15747 			}
15748 
15749 			/* check src1 operand */
15750 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
15751 			if (err)
15752 				return err;
15753 			/* check src2 operand */
15754 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
15755 			if (err)
15756 				return err;
15757 
15758 			dst_reg_type = regs[insn->dst_reg].type;
15759 
15760 			/* check that memory (dst_reg + off) is writeable */
15761 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
15762 					       insn->off, BPF_SIZE(insn->code),
15763 					       BPF_WRITE, insn->src_reg, false);
15764 			if (err)
15765 				return err;
15766 
15767 			err = save_aux_ptr_type(env, dst_reg_type, false);
15768 			if (err)
15769 				return err;
15770 		} else if (class == BPF_ST) {
15771 			enum bpf_reg_type dst_reg_type;
15772 
15773 			if (BPF_MODE(insn->code) != BPF_MEM ||
15774 			    insn->src_reg != BPF_REG_0) {
15775 				verbose(env, "BPF_ST uses reserved fields\n");
15776 				return -EINVAL;
15777 			}
15778 			/* check src operand */
15779 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
15780 			if (err)
15781 				return err;
15782 
15783 			dst_reg_type = regs[insn->dst_reg].type;
15784 
15785 			/* check that memory (dst_reg + off) is writeable */
15786 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
15787 					       insn->off, BPF_SIZE(insn->code),
15788 					       BPF_WRITE, -1, false);
15789 			if (err)
15790 				return err;
15791 
15792 			err = save_aux_ptr_type(env, dst_reg_type, false);
15793 			if (err)
15794 				return err;
15795 		} else if (class == BPF_JMP || class == BPF_JMP32) {
15796 			u8 opcode = BPF_OP(insn->code);
15797 
15798 			env->jmps_processed++;
15799 			if (opcode == BPF_CALL) {
15800 				if (BPF_SRC(insn->code) != BPF_K ||
15801 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
15802 				     && insn->off != 0) ||
15803 				    (insn->src_reg != BPF_REG_0 &&
15804 				     insn->src_reg != BPF_PSEUDO_CALL &&
15805 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
15806 				    insn->dst_reg != BPF_REG_0 ||
15807 				    class == BPF_JMP32) {
15808 					verbose(env, "BPF_CALL uses reserved fields\n");
15809 					return -EINVAL;
15810 				}
15811 
15812 				if (env->cur_state->active_lock.ptr) {
15813 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
15814 					    (insn->src_reg == BPF_PSEUDO_CALL) ||
15815 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
15816 					     (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
15817 						verbose(env, "function calls are not allowed while holding a lock\n");
15818 						return -EINVAL;
15819 					}
15820 				}
15821 				if (insn->src_reg == BPF_PSEUDO_CALL)
15822 					err = check_func_call(env, insn, &env->insn_idx);
15823 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
15824 					err = check_kfunc_call(env, insn, &env->insn_idx);
15825 				else
15826 					err = check_helper_call(env, insn, &env->insn_idx);
15827 				if (err)
15828 					return err;
15829 
15830 				mark_reg_scratched(env, BPF_REG_0);
15831 			} else if (opcode == BPF_JA) {
15832 				if (BPF_SRC(insn->code) != BPF_K ||
15833 				    insn->imm != 0 ||
15834 				    insn->src_reg != BPF_REG_0 ||
15835 				    insn->dst_reg != BPF_REG_0 ||
15836 				    class == BPF_JMP32) {
15837 					verbose(env, "BPF_JA uses reserved fields\n");
15838 					return -EINVAL;
15839 				}
15840 
15841 				env->insn_idx += insn->off + 1;
15842 				continue;
15843 
15844 			} else if (opcode == BPF_EXIT) {
15845 				if (BPF_SRC(insn->code) != BPF_K ||
15846 				    insn->imm != 0 ||
15847 				    insn->src_reg != BPF_REG_0 ||
15848 				    insn->dst_reg != BPF_REG_0 ||
15849 				    class == BPF_JMP32) {
15850 					verbose(env, "BPF_EXIT uses reserved fields\n");
15851 					return -EINVAL;
15852 				}
15853 
15854 				if (env->cur_state->active_lock.ptr &&
15855 				    !in_rbtree_lock_required_cb(env)) {
15856 					verbose(env, "bpf_spin_unlock is missing\n");
15857 					return -EINVAL;
15858 				}
15859 
15860 				if (env->cur_state->active_rcu_lock) {
15861 					verbose(env, "bpf_rcu_read_unlock is missing\n");
15862 					return -EINVAL;
15863 				}
15864 
15865 				/* We must do check_reference_leak here before
15866 				 * prepare_func_exit to handle the case when
15867 				 * state->curframe > 0, it may be a callback
15868 				 * function, for which reference_state must
15869 				 * match caller reference state when it exits.
15870 				 */
15871 				err = check_reference_leak(env);
15872 				if (err)
15873 					return err;
15874 
15875 				if (state->curframe) {
15876 					/* exit from nested function */
15877 					err = prepare_func_exit(env, &env->insn_idx);
15878 					if (err)
15879 						return err;
15880 					do_print_state = true;
15881 					continue;
15882 				}
15883 
15884 				err = check_return_code(env);
15885 				if (err)
15886 					return err;
15887 process_bpf_exit:
15888 				mark_verifier_state_scratched(env);
15889 				update_branch_counts(env, env->cur_state);
15890 				err = pop_stack(env, &prev_insn_idx,
15891 						&env->insn_idx, pop_log);
15892 				if (err < 0) {
15893 					if (err != -ENOENT)
15894 						return err;
15895 					break;
15896 				} else {
15897 					do_print_state = true;
15898 					continue;
15899 				}
15900 			} else {
15901 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
15902 				if (err)
15903 					return err;
15904 			}
15905 		} else if (class == BPF_LD) {
15906 			u8 mode = BPF_MODE(insn->code);
15907 
15908 			if (mode == BPF_ABS || mode == BPF_IND) {
15909 				err = check_ld_abs(env, insn);
15910 				if (err)
15911 					return err;
15912 
15913 			} else if (mode == BPF_IMM) {
15914 				err = check_ld_imm(env, insn);
15915 				if (err)
15916 					return err;
15917 
15918 				env->insn_idx++;
15919 				sanitize_mark_insn_seen(env);
15920 			} else {
15921 				verbose(env, "invalid BPF_LD mode\n");
15922 				return -EINVAL;
15923 			}
15924 		} else {
15925 			verbose(env, "unknown insn class %d\n", class);
15926 			return -EINVAL;
15927 		}
15928 
15929 		env->insn_idx++;
15930 	}
15931 
15932 	return 0;
15933 }
15934 
15935 static int find_btf_percpu_datasec(struct btf *btf)
15936 {
15937 	const struct btf_type *t;
15938 	const char *tname;
15939 	int i, n;
15940 
15941 	/*
15942 	 * Both vmlinux and module each have their own ".data..percpu"
15943 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
15944 	 * types to look at only module's own BTF types.
15945 	 */
15946 	n = btf_nr_types(btf);
15947 	if (btf_is_module(btf))
15948 		i = btf_nr_types(btf_vmlinux);
15949 	else
15950 		i = 1;
15951 
15952 	for(; i < n; i++) {
15953 		t = btf_type_by_id(btf, i);
15954 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
15955 			continue;
15956 
15957 		tname = btf_name_by_offset(btf, t->name_off);
15958 		if (!strcmp(tname, ".data..percpu"))
15959 			return i;
15960 	}
15961 
15962 	return -ENOENT;
15963 }
15964 
15965 /* replace pseudo btf_id with kernel symbol address */
15966 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
15967 			       struct bpf_insn *insn,
15968 			       struct bpf_insn_aux_data *aux)
15969 {
15970 	const struct btf_var_secinfo *vsi;
15971 	const struct btf_type *datasec;
15972 	struct btf_mod_pair *btf_mod;
15973 	const struct btf_type *t;
15974 	const char *sym_name;
15975 	bool percpu = false;
15976 	u32 type, id = insn->imm;
15977 	struct btf *btf;
15978 	s32 datasec_id;
15979 	u64 addr;
15980 	int i, btf_fd, err;
15981 
15982 	btf_fd = insn[1].imm;
15983 	if (btf_fd) {
15984 		btf = btf_get_by_fd(btf_fd);
15985 		if (IS_ERR(btf)) {
15986 			verbose(env, "invalid module BTF object FD specified.\n");
15987 			return -EINVAL;
15988 		}
15989 	} else {
15990 		if (!btf_vmlinux) {
15991 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
15992 			return -EINVAL;
15993 		}
15994 		btf = btf_vmlinux;
15995 		btf_get(btf);
15996 	}
15997 
15998 	t = btf_type_by_id(btf, id);
15999 	if (!t) {
16000 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
16001 		err = -ENOENT;
16002 		goto err_put;
16003 	}
16004 
16005 	if (!btf_type_is_var(t) && !btf_type_is_func(t)) {
16006 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id);
16007 		err = -EINVAL;
16008 		goto err_put;
16009 	}
16010 
16011 	sym_name = btf_name_by_offset(btf, t->name_off);
16012 	addr = kallsyms_lookup_name(sym_name);
16013 	if (!addr) {
16014 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
16015 			sym_name);
16016 		err = -ENOENT;
16017 		goto err_put;
16018 	}
16019 	insn[0].imm = (u32)addr;
16020 	insn[1].imm = addr >> 32;
16021 
16022 	if (btf_type_is_func(t)) {
16023 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
16024 		aux->btf_var.mem_size = 0;
16025 		goto check_btf;
16026 	}
16027 
16028 	datasec_id = find_btf_percpu_datasec(btf);
16029 	if (datasec_id > 0) {
16030 		datasec = btf_type_by_id(btf, datasec_id);
16031 		for_each_vsi(i, datasec, vsi) {
16032 			if (vsi->type == id) {
16033 				percpu = true;
16034 				break;
16035 			}
16036 		}
16037 	}
16038 
16039 	type = t->type;
16040 	t = btf_type_skip_modifiers(btf, type, NULL);
16041 	if (percpu) {
16042 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
16043 		aux->btf_var.btf = btf;
16044 		aux->btf_var.btf_id = type;
16045 	} else if (!btf_type_is_struct(t)) {
16046 		const struct btf_type *ret;
16047 		const char *tname;
16048 		u32 tsize;
16049 
16050 		/* resolve the type size of ksym. */
16051 		ret = btf_resolve_size(btf, t, &tsize);
16052 		if (IS_ERR(ret)) {
16053 			tname = btf_name_by_offset(btf, t->name_off);
16054 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
16055 				tname, PTR_ERR(ret));
16056 			err = -EINVAL;
16057 			goto err_put;
16058 		}
16059 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
16060 		aux->btf_var.mem_size = tsize;
16061 	} else {
16062 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
16063 		aux->btf_var.btf = btf;
16064 		aux->btf_var.btf_id = type;
16065 	}
16066 check_btf:
16067 	/* check whether we recorded this BTF (and maybe module) already */
16068 	for (i = 0; i < env->used_btf_cnt; i++) {
16069 		if (env->used_btfs[i].btf == btf) {
16070 			btf_put(btf);
16071 			return 0;
16072 		}
16073 	}
16074 
16075 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
16076 		err = -E2BIG;
16077 		goto err_put;
16078 	}
16079 
16080 	btf_mod = &env->used_btfs[env->used_btf_cnt];
16081 	btf_mod->btf = btf;
16082 	btf_mod->module = NULL;
16083 
16084 	/* if we reference variables from kernel module, bump its refcount */
16085 	if (btf_is_module(btf)) {
16086 		btf_mod->module = btf_try_get_module(btf);
16087 		if (!btf_mod->module) {
16088 			err = -ENXIO;
16089 			goto err_put;
16090 		}
16091 	}
16092 
16093 	env->used_btf_cnt++;
16094 
16095 	return 0;
16096 err_put:
16097 	btf_put(btf);
16098 	return err;
16099 }
16100 
16101 static bool is_tracing_prog_type(enum bpf_prog_type type)
16102 {
16103 	switch (type) {
16104 	case BPF_PROG_TYPE_KPROBE:
16105 	case BPF_PROG_TYPE_TRACEPOINT:
16106 	case BPF_PROG_TYPE_PERF_EVENT:
16107 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
16108 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
16109 		return true;
16110 	default:
16111 		return false;
16112 	}
16113 }
16114 
16115 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
16116 					struct bpf_map *map,
16117 					struct bpf_prog *prog)
16118 
16119 {
16120 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
16121 
16122 	if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
16123 	    btf_record_has_field(map->record, BPF_RB_ROOT)) {
16124 		if (is_tracing_prog_type(prog_type)) {
16125 			verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
16126 			return -EINVAL;
16127 		}
16128 	}
16129 
16130 	if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
16131 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
16132 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
16133 			return -EINVAL;
16134 		}
16135 
16136 		if (is_tracing_prog_type(prog_type)) {
16137 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
16138 			return -EINVAL;
16139 		}
16140 
16141 		if (prog->aux->sleepable) {
16142 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
16143 			return -EINVAL;
16144 		}
16145 	}
16146 
16147 	if (btf_record_has_field(map->record, BPF_TIMER)) {
16148 		if (is_tracing_prog_type(prog_type)) {
16149 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
16150 			return -EINVAL;
16151 		}
16152 	}
16153 
16154 	if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
16155 	    !bpf_offload_prog_map_match(prog, map)) {
16156 		verbose(env, "offload device mismatch between prog and map\n");
16157 		return -EINVAL;
16158 	}
16159 
16160 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
16161 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
16162 		return -EINVAL;
16163 	}
16164 
16165 	if (prog->aux->sleepable)
16166 		switch (map->map_type) {
16167 		case BPF_MAP_TYPE_HASH:
16168 		case BPF_MAP_TYPE_LRU_HASH:
16169 		case BPF_MAP_TYPE_ARRAY:
16170 		case BPF_MAP_TYPE_PERCPU_HASH:
16171 		case BPF_MAP_TYPE_PERCPU_ARRAY:
16172 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
16173 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
16174 		case BPF_MAP_TYPE_HASH_OF_MAPS:
16175 		case BPF_MAP_TYPE_RINGBUF:
16176 		case BPF_MAP_TYPE_USER_RINGBUF:
16177 		case BPF_MAP_TYPE_INODE_STORAGE:
16178 		case BPF_MAP_TYPE_SK_STORAGE:
16179 		case BPF_MAP_TYPE_TASK_STORAGE:
16180 		case BPF_MAP_TYPE_CGRP_STORAGE:
16181 			break;
16182 		default:
16183 			verbose(env,
16184 				"Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
16185 			return -EINVAL;
16186 		}
16187 
16188 	return 0;
16189 }
16190 
16191 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
16192 {
16193 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
16194 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
16195 }
16196 
16197 /* find and rewrite pseudo imm in ld_imm64 instructions:
16198  *
16199  * 1. if it accesses map FD, replace it with actual map pointer.
16200  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
16201  *
16202  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
16203  */
16204 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
16205 {
16206 	struct bpf_insn *insn = env->prog->insnsi;
16207 	int insn_cnt = env->prog->len;
16208 	int i, j, err;
16209 
16210 	err = bpf_prog_calc_tag(env->prog);
16211 	if (err)
16212 		return err;
16213 
16214 	for (i = 0; i < insn_cnt; i++, insn++) {
16215 		if (BPF_CLASS(insn->code) == BPF_LDX &&
16216 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
16217 			verbose(env, "BPF_LDX uses reserved fields\n");
16218 			return -EINVAL;
16219 		}
16220 
16221 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
16222 			struct bpf_insn_aux_data *aux;
16223 			struct bpf_map *map;
16224 			struct fd f;
16225 			u64 addr;
16226 			u32 fd;
16227 
16228 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
16229 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
16230 			    insn[1].off != 0) {
16231 				verbose(env, "invalid bpf_ld_imm64 insn\n");
16232 				return -EINVAL;
16233 			}
16234 
16235 			if (insn[0].src_reg == 0)
16236 				/* valid generic load 64-bit imm */
16237 				goto next_insn;
16238 
16239 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
16240 				aux = &env->insn_aux_data[i];
16241 				err = check_pseudo_btf_id(env, insn, aux);
16242 				if (err)
16243 					return err;
16244 				goto next_insn;
16245 			}
16246 
16247 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
16248 				aux = &env->insn_aux_data[i];
16249 				aux->ptr_type = PTR_TO_FUNC;
16250 				goto next_insn;
16251 			}
16252 
16253 			/* In final convert_pseudo_ld_imm64() step, this is
16254 			 * converted into regular 64-bit imm load insn.
16255 			 */
16256 			switch (insn[0].src_reg) {
16257 			case BPF_PSEUDO_MAP_VALUE:
16258 			case BPF_PSEUDO_MAP_IDX_VALUE:
16259 				break;
16260 			case BPF_PSEUDO_MAP_FD:
16261 			case BPF_PSEUDO_MAP_IDX:
16262 				if (insn[1].imm == 0)
16263 					break;
16264 				fallthrough;
16265 			default:
16266 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
16267 				return -EINVAL;
16268 			}
16269 
16270 			switch (insn[0].src_reg) {
16271 			case BPF_PSEUDO_MAP_IDX_VALUE:
16272 			case BPF_PSEUDO_MAP_IDX:
16273 				if (bpfptr_is_null(env->fd_array)) {
16274 					verbose(env, "fd_idx without fd_array is invalid\n");
16275 					return -EPROTO;
16276 				}
16277 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
16278 							    insn[0].imm * sizeof(fd),
16279 							    sizeof(fd)))
16280 					return -EFAULT;
16281 				break;
16282 			default:
16283 				fd = insn[0].imm;
16284 				break;
16285 			}
16286 
16287 			f = fdget(fd);
16288 			map = __bpf_map_get(f);
16289 			if (IS_ERR(map)) {
16290 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
16291 					insn[0].imm);
16292 				return PTR_ERR(map);
16293 			}
16294 
16295 			err = check_map_prog_compatibility(env, map, env->prog);
16296 			if (err) {
16297 				fdput(f);
16298 				return err;
16299 			}
16300 
16301 			aux = &env->insn_aux_data[i];
16302 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
16303 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
16304 				addr = (unsigned long)map;
16305 			} else {
16306 				u32 off = insn[1].imm;
16307 
16308 				if (off >= BPF_MAX_VAR_OFF) {
16309 					verbose(env, "direct value offset of %u is not allowed\n", off);
16310 					fdput(f);
16311 					return -EINVAL;
16312 				}
16313 
16314 				if (!map->ops->map_direct_value_addr) {
16315 					verbose(env, "no direct value access support for this map type\n");
16316 					fdput(f);
16317 					return -EINVAL;
16318 				}
16319 
16320 				err = map->ops->map_direct_value_addr(map, &addr, off);
16321 				if (err) {
16322 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
16323 						map->value_size, off);
16324 					fdput(f);
16325 					return err;
16326 				}
16327 
16328 				aux->map_off = off;
16329 				addr += off;
16330 			}
16331 
16332 			insn[0].imm = (u32)addr;
16333 			insn[1].imm = addr >> 32;
16334 
16335 			/* check whether we recorded this map already */
16336 			for (j = 0; j < env->used_map_cnt; j++) {
16337 				if (env->used_maps[j] == map) {
16338 					aux->map_index = j;
16339 					fdput(f);
16340 					goto next_insn;
16341 				}
16342 			}
16343 
16344 			if (env->used_map_cnt >= MAX_USED_MAPS) {
16345 				fdput(f);
16346 				return -E2BIG;
16347 			}
16348 
16349 			/* hold the map. If the program is rejected by verifier,
16350 			 * the map will be released by release_maps() or it
16351 			 * will be used by the valid program until it's unloaded
16352 			 * and all maps are released in free_used_maps()
16353 			 */
16354 			bpf_map_inc(map);
16355 
16356 			aux->map_index = env->used_map_cnt;
16357 			env->used_maps[env->used_map_cnt++] = map;
16358 
16359 			if (bpf_map_is_cgroup_storage(map) &&
16360 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
16361 				verbose(env, "only one cgroup storage of each type is allowed\n");
16362 				fdput(f);
16363 				return -EBUSY;
16364 			}
16365 
16366 			fdput(f);
16367 next_insn:
16368 			insn++;
16369 			i++;
16370 			continue;
16371 		}
16372 
16373 		/* Basic sanity check before we invest more work here. */
16374 		if (!bpf_opcode_in_insntable(insn->code)) {
16375 			verbose(env, "unknown opcode %02x\n", insn->code);
16376 			return -EINVAL;
16377 		}
16378 	}
16379 
16380 	/* now all pseudo BPF_LD_IMM64 instructions load valid
16381 	 * 'struct bpf_map *' into a register instead of user map_fd.
16382 	 * These pointers will be used later by verifier to validate map access.
16383 	 */
16384 	return 0;
16385 }
16386 
16387 /* drop refcnt of maps used by the rejected program */
16388 static void release_maps(struct bpf_verifier_env *env)
16389 {
16390 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
16391 			     env->used_map_cnt);
16392 }
16393 
16394 /* drop refcnt of maps used by the rejected program */
16395 static void release_btfs(struct bpf_verifier_env *env)
16396 {
16397 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
16398 			     env->used_btf_cnt);
16399 }
16400 
16401 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
16402 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
16403 {
16404 	struct bpf_insn *insn = env->prog->insnsi;
16405 	int insn_cnt = env->prog->len;
16406 	int i;
16407 
16408 	for (i = 0; i < insn_cnt; i++, insn++) {
16409 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
16410 			continue;
16411 		if (insn->src_reg == BPF_PSEUDO_FUNC)
16412 			continue;
16413 		insn->src_reg = 0;
16414 	}
16415 }
16416 
16417 /* single env->prog->insni[off] instruction was replaced with the range
16418  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
16419  * [0, off) and [off, end) to new locations, so the patched range stays zero
16420  */
16421 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
16422 				 struct bpf_insn_aux_data *new_data,
16423 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
16424 {
16425 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
16426 	struct bpf_insn *insn = new_prog->insnsi;
16427 	u32 old_seen = old_data[off].seen;
16428 	u32 prog_len;
16429 	int i;
16430 
16431 	/* aux info at OFF always needs adjustment, no matter fast path
16432 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
16433 	 * original insn at old prog.
16434 	 */
16435 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
16436 
16437 	if (cnt == 1)
16438 		return;
16439 	prog_len = new_prog->len;
16440 
16441 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
16442 	memcpy(new_data + off + cnt - 1, old_data + off,
16443 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
16444 	for (i = off; i < off + cnt - 1; i++) {
16445 		/* Expand insni[off]'s seen count to the patched range. */
16446 		new_data[i].seen = old_seen;
16447 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
16448 	}
16449 	env->insn_aux_data = new_data;
16450 	vfree(old_data);
16451 }
16452 
16453 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
16454 {
16455 	int i;
16456 
16457 	if (len == 1)
16458 		return;
16459 	/* NOTE: fake 'exit' subprog should be updated as well. */
16460 	for (i = 0; i <= env->subprog_cnt; i++) {
16461 		if (env->subprog_info[i].start <= off)
16462 			continue;
16463 		env->subprog_info[i].start += len - 1;
16464 	}
16465 }
16466 
16467 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
16468 {
16469 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
16470 	int i, sz = prog->aux->size_poke_tab;
16471 	struct bpf_jit_poke_descriptor *desc;
16472 
16473 	for (i = 0; i < sz; i++) {
16474 		desc = &tab[i];
16475 		if (desc->insn_idx <= off)
16476 			continue;
16477 		desc->insn_idx += len - 1;
16478 	}
16479 }
16480 
16481 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
16482 					    const struct bpf_insn *patch, u32 len)
16483 {
16484 	struct bpf_prog *new_prog;
16485 	struct bpf_insn_aux_data *new_data = NULL;
16486 
16487 	if (len > 1) {
16488 		new_data = vzalloc(array_size(env->prog->len + len - 1,
16489 					      sizeof(struct bpf_insn_aux_data)));
16490 		if (!new_data)
16491 			return NULL;
16492 	}
16493 
16494 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
16495 	if (IS_ERR(new_prog)) {
16496 		if (PTR_ERR(new_prog) == -ERANGE)
16497 			verbose(env,
16498 				"insn %d cannot be patched due to 16-bit range\n",
16499 				env->insn_aux_data[off].orig_idx);
16500 		vfree(new_data);
16501 		return NULL;
16502 	}
16503 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
16504 	adjust_subprog_starts(env, off, len);
16505 	adjust_poke_descs(new_prog, off, len);
16506 	return new_prog;
16507 }
16508 
16509 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
16510 					      u32 off, u32 cnt)
16511 {
16512 	int i, j;
16513 
16514 	/* find first prog starting at or after off (first to remove) */
16515 	for (i = 0; i < env->subprog_cnt; i++)
16516 		if (env->subprog_info[i].start >= off)
16517 			break;
16518 	/* find first prog starting at or after off + cnt (first to stay) */
16519 	for (j = i; j < env->subprog_cnt; j++)
16520 		if (env->subprog_info[j].start >= off + cnt)
16521 			break;
16522 	/* if j doesn't start exactly at off + cnt, we are just removing
16523 	 * the front of previous prog
16524 	 */
16525 	if (env->subprog_info[j].start != off + cnt)
16526 		j--;
16527 
16528 	if (j > i) {
16529 		struct bpf_prog_aux *aux = env->prog->aux;
16530 		int move;
16531 
16532 		/* move fake 'exit' subprog as well */
16533 		move = env->subprog_cnt + 1 - j;
16534 
16535 		memmove(env->subprog_info + i,
16536 			env->subprog_info + j,
16537 			sizeof(*env->subprog_info) * move);
16538 		env->subprog_cnt -= j - i;
16539 
16540 		/* remove func_info */
16541 		if (aux->func_info) {
16542 			move = aux->func_info_cnt - j;
16543 
16544 			memmove(aux->func_info + i,
16545 				aux->func_info + j,
16546 				sizeof(*aux->func_info) * move);
16547 			aux->func_info_cnt -= j - i;
16548 			/* func_info->insn_off is set after all code rewrites,
16549 			 * in adjust_btf_func() - no need to adjust
16550 			 */
16551 		}
16552 	} else {
16553 		/* convert i from "first prog to remove" to "first to adjust" */
16554 		if (env->subprog_info[i].start == off)
16555 			i++;
16556 	}
16557 
16558 	/* update fake 'exit' subprog as well */
16559 	for (; i <= env->subprog_cnt; i++)
16560 		env->subprog_info[i].start -= cnt;
16561 
16562 	return 0;
16563 }
16564 
16565 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
16566 				      u32 cnt)
16567 {
16568 	struct bpf_prog *prog = env->prog;
16569 	u32 i, l_off, l_cnt, nr_linfo;
16570 	struct bpf_line_info *linfo;
16571 
16572 	nr_linfo = prog->aux->nr_linfo;
16573 	if (!nr_linfo)
16574 		return 0;
16575 
16576 	linfo = prog->aux->linfo;
16577 
16578 	/* find first line info to remove, count lines to be removed */
16579 	for (i = 0; i < nr_linfo; i++)
16580 		if (linfo[i].insn_off >= off)
16581 			break;
16582 
16583 	l_off = i;
16584 	l_cnt = 0;
16585 	for (; i < nr_linfo; i++)
16586 		if (linfo[i].insn_off < off + cnt)
16587 			l_cnt++;
16588 		else
16589 			break;
16590 
16591 	/* First live insn doesn't match first live linfo, it needs to "inherit"
16592 	 * last removed linfo.  prog is already modified, so prog->len == off
16593 	 * means no live instructions after (tail of the program was removed).
16594 	 */
16595 	if (prog->len != off && l_cnt &&
16596 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
16597 		l_cnt--;
16598 		linfo[--i].insn_off = off + cnt;
16599 	}
16600 
16601 	/* remove the line info which refer to the removed instructions */
16602 	if (l_cnt) {
16603 		memmove(linfo + l_off, linfo + i,
16604 			sizeof(*linfo) * (nr_linfo - i));
16605 
16606 		prog->aux->nr_linfo -= l_cnt;
16607 		nr_linfo = prog->aux->nr_linfo;
16608 	}
16609 
16610 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
16611 	for (i = l_off; i < nr_linfo; i++)
16612 		linfo[i].insn_off -= cnt;
16613 
16614 	/* fix up all subprogs (incl. 'exit') which start >= off */
16615 	for (i = 0; i <= env->subprog_cnt; i++)
16616 		if (env->subprog_info[i].linfo_idx > l_off) {
16617 			/* program may have started in the removed region but
16618 			 * may not be fully removed
16619 			 */
16620 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
16621 				env->subprog_info[i].linfo_idx -= l_cnt;
16622 			else
16623 				env->subprog_info[i].linfo_idx = l_off;
16624 		}
16625 
16626 	return 0;
16627 }
16628 
16629 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
16630 {
16631 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16632 	unsigned int orig_prog_len = env->prog->len;
16633 	int err;
16634 
16635 	if (bpf_prog_is_offloaded(env->prog->aux))
16636 		bpf_prog_offload_remove_insns(env, off, cnt);
16637 
16638 	err = bpf_remove_insns(env->prog, off, cnt);
16639 	if (err)
16640 		return err;
16641 
16642 	err = adjust_subprog_starts_after_remove(env, off, cnt);
16643 	if (err)
16644 		return err;
16645 
16646 	err = bpf_adj_linfo_after_remove(env, off, cnt);
16647 	if (err)
16648 		return err;
16649 
16650 	memmove(aux_data + off,	aux_data + off + cnt,
16651 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
16652 
16653 	return 0;
16654 }
16655 
16656 /* The verifier does more data flow analysis than llvm and will not
16657  * explore branches that are dead at run time. Malicious programs can
16658  * have dead code too. Therefore replace all dead at-run-time code
16659  * with 'ja -1'.
16660  *
16661  * Just nops are not optimal, e.g. if they would sit at the end of the
16662  * program and through another bug we would manage to jump there, then
16663  * we'd execute beyond program memory otherwise. Returning exception
16664  * code also wouldn't work since we can have subprogs where the dead
16665  * code could be located.
16666  */
16667 static void sanitize_dead_code(struct bpf_verifier_env *env)
16668 {
16669 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16670 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
16671 	struct bpf_insn *insn = env->prog->insnsi;
16672 	const int insn_cnt = env->prog->len;
16673 	int i;
16674 
16675 	for (i = 0; i < insn_cnt; i++) {
16676 		if (aux_data[i].seen)
16677 			continue;
16678 		memcpy(insn + i, &trap, sizeof(trap));
16679 		aux_data[i].zext_dst = false;
16680 	}
16681 }
16682 
16683 static bool insn_is_cond_jump(u8 code)
16684 {
16685 	u8 op;
16686 
16687 	if (BPF_CLASS(code) == BPF_JMP32)
16688 		return true;
16689 
16690 	if (BPF_CLASS(code) != BPF_JMP)
16691 		return false;
16692 
16693 	op = BPF_OP(code);
16694 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
16695 }
16696 
16697 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
16698 {
16699 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16700 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
16701 	struct bpf_insn *insn = env->prog->insnsi;
16702 	const int insn_cnt = env->prog->len;
16703 	int i;
16704 
16705 	for (i = 0; i < insn_cnt; i++, insn++) {
16706 		if (!insn_is_cond_jump(insn->code))
16707 			continue;
16708 
16709 		if (!aux_data[i + 1].seen)
16710 			ja.off = insn->off;
16711 		else if (!aux_data[i + 1 + insn->off].seen)
16712 			ja.off = 0;
16713 		else
16714 			continue;
16715 
16716 		if (bpf_prog_is_offloaded(env->prog->aux))
16717 			bpf_prog_offload_replace_insn(env, i, &ja);
16718 
16719 		memcpy(insn, &ja, sizeof(ja));
16720 	}
16721 }
16722 
16723 static int opt_remove_dead_code(struct bpf_verifier_env *env)
16724 {
16725 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16726 	int insn_cnt = env->prog->len;
16727 	int i, err;
16728 
16729 	for (i = 0; i < insn_cnt; i++) {
16730 		int j;
16731 
16732 		j = 0;
16733 		while (i + j < insn_cnt && !aux_data[i + j].seen)
16734 			j++;
16735 		if (!j)
16736 			continue;
16737 
16738 		err = verifier_remove_insns(env, i, j);
16739 		if (err)
16740 			return err;
16741 		insn_cnt = env->prog->len;
16742 	}
16743 
16744 	return 0;
16745 }
16746 
16747 static int opt_remove_nops(struct bpf_verifier_env *env)
16748 {
16749 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
16750 	struct bpf_insn *insn = env->prog->insnsi;
16751 	int insn_cnt = env->prog->len;
16752 	int i, err;
16753 
16754 	for (i = 0; i < insn_cnt; i++) {
16755 		if (memcmp(&insn[i], &ja, sizeof(ja)))
16756 			continue;
16757 
16758 		err = verifier_remove_insns(env, i, 1);
16759 		if (err)
16760 			return err;
16761 		insn_cnt--;
16762 		i--;
16763 	}
16764 
16765 	return 0;
16766 }
16767 
16768 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
16769 					 const union bpf_attr *attr)
16770 {
16771 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
16772 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
16773 	int i, patch_len, delta = 0, len = env->prog->len;
16774 	struct bpf_insn *insns = env->prog->insnsi;
16775 	struct bpf_prog *new_prog;
16776 	bool rnd_hi32;
16777 
16778 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
16779 	zext_patch[1] = BPF_ZEXT_REG(0);
16780 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
16781 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
16782 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
16783 	for (i = 0; i < len; i++) {
16784 		int adj_idx = i + delta;
16785 		struct bpf_insn insn;
16786 		int load_reg;
16787 
16788 		insn = insns[adj_idx];
16789 		load_reg = insn_def_regno(&insn);
16790 		if (!aux[adj_idx].zext_dst) {
16791 			u8 code, class;
16792 			u32 imm_rnd;
16793 
16794 			if (!rnd_hi32)
16795 				continue;
16796 
16797 			code = insn.code;
16798 			class = BPF_CLASS(code);
16799 			if (load_reg == -1)
16800 				continue;
16801 
16802 			/* NOTE: arg "reg" (the fourth one) is only used for
16803 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
16804 			 *       here.
16805 			 */
16806 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
16807 				if (class == BPF_LD &&
16808 				    BPF_MODE(code) == BPF_IMM)
16809 					i++;
16810 				continue;
16811 			}
16812 
16813 			/* ctx load could be transformed into wider load. */
16814 			if (class == BPF_LDX &&
16815 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
16816 				continue;
16817 
16818 			imm_rnd = get_random_u32();
16819 			rnd_hi32_patch[0] = insn;
16820 			rnd_hi32_patch[1].imm = imm_rnd;
16821 			rnd_hi32_patch[3].dst_reg = load_reg;
16822 			patch = rnd_hi32_patch;
16823 			patch_len = 4;
16824 			goto apply_patch_buffer;
16825 		}
16826 
16827 		/* Add in an zero-extend instruction if a) the JIT has requested
16828 		 * it or b) it's a CMPXCHG.
16829 		 *
16830 		 * The latter is because: BPF_CMPXCHG always loads a value into
16831 		 * R0, therefore always zero-extends. However some archs'
16832 		 * equivalent instruction only does this load when the
16833 		 * comparison is successful. This detail of CMPXCHG is
16834 		 * orthogonal to the general zero-extension behaviour of the
16835 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
16836 		 */
16837 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
16838 			continue;
16839 
16840 		/* Zero-extension is done by the caller. */
16841 		if (bpf_pseudo_kfunc_call(&insn))
16842 			continue;
16843 
16844 		if (WARN_ON(load_reg == -1)) {
16845 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
16846 			return -EFAULT;
16847 		}
16848 
16849 		zext_patch[0] = insn;
16850 		zext_patch[1].dst_reg = load_reg;
16851 		zext_patch[1].src_reg = load_reg;
16852 		patch = zext_patch;
16853 		patch_len = 2;
16854 apply_patch_buffer:
16855 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
16856 		if (!new_prog)
16857 			return -ENOMEM;
16858 		env->prog = new_prog;
16859 		insns = new_prog->insnsi;
16860 		aux = env->insn_aux_data;
16861 		delta += patch_len - 1;
16862 	}
16863 
16864 	return 0;
16865 }
16866 
16867 /* convert load instructions that access fields of a context type into a
16868  * sequence of instructions that access fields of the underlying structure:
16869  *     struct __sk_buff    -> struct sk_buff
16870  *     struct bpf_sock_ops -> struct sock
16871  */
16872 static int convert_ctx_accesses(struct bpf_verifier_env *env)
16873 {
16874 	const struct bpf_verifier_ops *ops = env->ops;
16875 	int i, cnt, size, ctx_field_size, delta = 0;
16876 	const int insn_cnt = env->prog->len;
16877 	struct bpf_insn insn_buf[16], *insn;
16878 	u32 target_size, size_default, off;
16879 	struct bpf_prog *new_prog;
16880 	enum bpf_access_type type;
16881 	bool is_narrower_load;
16882 
16883 	if (ops->gen_prologue || env->seen_direct_write) {
16884 		if (!ops->gen_prologue) {
16885 			verbose(env, "bpf verifier is misconfigured\n");
16886 			return -EINVAL;
16887 		}
16888 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
16889 					env->prog);
16890 		if (cnt >= ARRAY_SIZE(insn_buf)) {
16891 			verbose(env, "bpf verifier is misconfigured\n");
16892 			return -EINVAL;
16893 		} else if (cnt) {
16894 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
16895 			if (!new_prog)
16896 				return -ENOMEM;
16897 
16898 			env->prog = new_prog;
16899 			delta += cnt - 1;
16900 		}
16901 	}
16902 
16903 	if (bpf_prog_is_offloaded(env->prog->aux))
16904 		return 0;
16905 
16906 	insn = env->prog->insnsi + delta;
16907 
16908 	for (i = 0; i < insn_cnt; i++, insn++) {
16909 		bpf_convert_ctx_access_t convert_ctx_access;
16910 
16911 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
16912 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
16913 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
16914 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
16915 			type = BPF_READ;
16916 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
16917 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
16918 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
16919 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
16920 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
16921 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
16922 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
16923 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
16924 			type = BPF_WRITE;
16925 		} else {
16926 			continue;
16927 		}
16928 
16929 		if (type == BPF_WRITE &&
16930 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
16931 			struct bpf_insn patch[] = {
16932 				*insn,
16933 				BPF_ST_NOSPEC(),
16934 			};
16935 
16936 			cnt = ARRAY_SIZE(patch);
16937 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
16938 			if (!new_prog)
16939 				return -ENOMEM;
16940 
16941 			delta    += cnt - 1;
16942 			env->prog = new_prog;
16943 			insn      = new_prog->insnsi + i + delta;
16944 			continue;
16945 		}
16946 
16947 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
16948 		case PTR_TO_CTX:
16949 			if (!ops->convert_ctx_access)
16950 				continue;
16951 			convert_ctx_access = ops->convert_ctx_access;
16952 			break;
16953 		case PTR_TO_SOCKET:
16954 		case PTR_TO_SOCK_COMMON:
16955 			convert_ctx_access = bpf_sock_convert_ctx_access;
16956 			break;
16957 		case PTR_TO_TCP_SOCK:
16958 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
16959 			break;
16960 		case PTR_TO_XDP_SOCK:
16961 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
16962 			break;
16963 		case PTR_TO_BTF_ID:
16964 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
16965 		/* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
16966 		 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
16967 		 * be said once it is marked PTR_UNTRUSTED, hence we must handle
16968 		 * any faults for loads into such types. BPF_WRITE is disallowed
16969 		 * for this case.
16970 		 */
16971 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
16972 			if (type == BPF_READ) {
16973 				insn->code = BPF_LDX | BPF_PROBE_MEM |
16974 					BPF_SIZE((insn)->code);
16975 				env->prog->aux->num_exentries++;
16976 			}
16977 			continue;
16978 		default:
16979 			continue;
16980 		}
16981 
16982 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
16983 		size = BPF_LDST_BYTES(insn);
16984 
16985 		/* If the read access is a narrower load of the field,
16986 		 * convert to a 4/8-byte load, to minimum program type specific
16987 		 * convert_ctx_access changes. If conversion is successful,
16988 		 * we will apply proper mask to the result.
16989 		 */
16990 		is_narrower_load = size < ctx_field_size;
16991 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
16992 		off = insn->off;
16993 		if (is_narrower_load) {
16994 			u8 size_code;
16995 
16996 			if (type == BPF_WRITE) {
16997 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
16998 				return -EINVAL;
16999 			}
17000 
17001 			size_code = BPF_H;
17002 			if (ctx_field_size == 4)
17003 				size_code = BPF_W;
17004 			else if (ctx_field_size == 8)
17005 				size_code = BPF_DW;
17006 
17007 			insn->off = off & ~(size_default - 1);
17008 			insn->code = BPF_LDX | BPF_MEM | size_code;
17009 		}
17010 
17011 		target_size = 0;
17012 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
17013 					 &target_size);
17014 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
17015 		    (ctx_field_size && !target_size)) {
17016 			verbose(env, "bpf verifier is misconfigured\n");
17017 			return -EINVAL;
17018 		}
17019 
17020 		if (is_narrower_load && size < target_size) {
17021 			u8 shift = bpf_ctx_narrow_access_offset(
17022 				off, size, size_default) * 8;
17023 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
17024 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
17025 				return -EINVAL;
17026 			}
17027 			if (ctx_field_size <= 4) {
17028 				if (shift)
17029 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
17030 									insn->dst_reg,
17031 									shift);
17032 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
17033 								(1 << size * 8) - 1);
17034 			} else {
17035 				if (shift)
17036 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
17037 									insn->dst_reg,
17038 									shift);
17039 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
17040 								(1ULL << size * 8) - 1);
17041 			}
17042 		}
17043 
17044 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17045 		if (!new_prog)
17046 			return -ENOMEM;
17047 
17048 		delta += cnt - 1;
17049 
17050 		/* keep walking new program and skip insns we just inserted */
17051 		env->prog = new_prog;
17052 		insn      = new_prog->insnsi + i + delta;
17053 	}
17054 
17055 	return 0;
17056 }
17057 
17058 static int jit_subprogs(struct bpf_verifier_env *env)
17059 {
17060 	struct bpf_prog *prog = env->prog, **func, *tmp;
17061 	int i, j, subprog_start, subprog_end = 0, len, subprog;
17062 	struct bpf_map *map_ptr;
17063 	struct bpf_insn *insn;
17064 	void *old_bpf_func;
17065 	int err, num_exentries;
17066 
17067 	if (env->subprog_cnt <= 1)
17068 		return 0;
17069 
17070 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
17071 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
17072 			continue;
17073 
17074 		/* Upon error here we cannot fall back to interpreter but
17075 		 * need a hard reject of the program. Thus -EFAULT is
17076 		 * propagated in any case.
17077 		 */
17078 		subprog = find_subprog(env, i + insn->imm + 1);
17079 		if (subprog < 0) {
17080 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
17081 				  i + insn->imm + 1);
17082 			return -EFAULT;
17083 		}
17084 		/* temporarily remember subprog id inside insn instead of
17085 		 * aux_data, since next loop will split up all insns into funcs
17086 		 */
17087 		insn->off = subprog;
17088 		/* remember original imm in case JIT fails and fallback
17089 		 * to interpreter will be needed
17090 		 */
17091 		env->insn_aux_data[i].call_imm = insn->imm;
17092 		/* point imm to __bpf_call_base+1 from JITs point of view */
17093 		insn->imm = 1;
17094 		if (bpf_pseudo_func(insn))
17095 			/* jit (e.g. x86_64) may emit fewer instructions
17096 			 * if it learns a u32 imm is the same as a u64 imm.
17097 			 * Force a non zero here.
17098 			 */
17099 			insn[1].imm = 1;
17100 	}
17101 
17102 	err = bpf_prog_alloc_jited_linfo(prog);
17103 	if (err)
17104 		goto out_undo_insn;
17105 
17106 	err = -ENOMEM;
17107 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
17108 	if (!func)
17109 		goto out_undo_insn;
17110 
17111 	for (i = 0; i < env->subprog_cnt; i++) {
17112 		subprog_start = subprog_end;
17113 		subprog_end = env->subprog_info[i + 1].start;
17114 
17115 		len = subprog_end - subprog_start;
17116 		/* bpf_prog_run() doesn't call subprogs directly,
17117 		 * hence main prog stats include the runtime of subprogs.
17118 		 * subprogs don't have IDs and not reachable via prog_get_next_id
17119 		 * func[i]->stats will never be accessed and stays NULL
17120 		 */
17121 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
17122 		if (!func[i])
17123 			goto out_free;
17124 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
17125 		       len * sizeof(struct bpf_insn));
17126 		func[i]->type = prog->type;
17127 		func[i]->len = len;
17128 		if (bpf_prog_calc_tag(func[i]))
17129 			goto out_free;
17130 		func[i]->is_func = 1;
17131 		func[i]->aux->func_idx = i;
17132 		/* Below members will be freed only at prog->aux */
17133 		func[i]->aux->btf = prog->aux->btf;
17134 		func[i]->aux->func_info = prog->aux->func_info;
17135 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
17136 		func[i]->aux->poke_tab = prog->aux->poke_tab;
17137 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
17138 
17139 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
17140 			struct bpf_jit_poke_descriptor *poke;
17141 
17142 			poke = &prog->aux->poke_tab[j];
17143 			if (poke->insn_idx < subprog_end &&
17144 			    poke->insn_idx >= subprog_start)
17145 				poke->aux = func[i]->aux;
17146 		}
17147 
17148 		func[i]->aux->name[0] = 'F';
17149 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
17150 		func[i]->jit_requested = 1;
17151 		func[i]->blinding_requested = prog->blinding_requested;
17152 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
17153 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
17154 		func[i]->aux->linfo = prog->aux->linfo;
17155 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
17156 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
17157 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
17158 		num_exentries = 0;
17159 		insn = func[i]->insnsi;
17160 		for (j = 0; j < func[i]->len; j++, insn++) {
17161 			if (BPF_CLASS(insn->code) == BPF_LDX &&
17162 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
17163 				num_exentries++;
17164 		}
17165 		func[i]->aux->num_exentries = num_exentries;
17166 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
17167 		func[i] = bpf_int_jit_compile(func[i]);
17168 		if (!func[i]->jited) {
17169 			err = -ENOTSUPP;
17170 			goto out_free;
17171 		}
17172 		cond_resched();
17173 	}
17174 
17175 	/* at this point all bpf functions were successfully JITed
17176 	 * now populate all bpf_calls with correct addresses and
17177 	 * run last pass of JIT
17178 	 */
17179 	for (i = 0; i < env->subprog_cnt; i++) {
17180 		insn = func[i]->insnsi;
17181 		for (j = 0; j < func[i]->len; j++, insn++) {
17182 			if (bpf_pseudo_func(insn)) {
17183 				subprog = insn->off;
17184 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
17185 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
17186 				continue;
17187 			}
17188 			if (!bpf_pseudo_call(insn))
17189 				continue;
17190 			subprog = insn->off;
17191 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
17192 		}
17193 
17194 		/* we use the aux data to keep a list of the start addresses
17195 		 * of the JITed images for each function in the program
17196 		 *
17197 		 * for some architectures, such as powerpc64, the imm field
17198 		 * might not be large enough to hold the offset of the start
17199 		 * address of the callee's JITed image from __bpf_call_base
17200 		 *
17201 		 * in such cases, we can lookup the start address of a callee
17202 		 * by using its subprog id, available from the off field of
17203 		 * the call instruction, as an index for this list
17204 		 */
17205 		func[i]->aux->func = func;
17206 		func[i]->aux->func_cnt = env->subprog_cnt;
17207 	}
17208 	for (i = 0; i < env->subprog_cnt; i++) {
17209 		old_bpf_func = func[i]->bpf_func;
17210 		tmp = bpf_int_jit_compile(func[i]);
17211 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
17212 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
17213 			err = -ENOTSUPP;
17214 			goto out_free;
17215 		}
17216 		cond_resched();
17217 	}
17218 
17219 	/* finally lock prog and jit images for all functions and
17220 	 * populate kallsysm. Begin at the first subprogram, since
17221 	 * bpf_prog_load will add the kallsyms for the main program.
17222 	 */
17223 	for (i = 1; i < env->subprog_cnt; i++) {
17224 		bpf_prog_lock_ro(func[i]);
17225 		bpf_prog_kallsyms_add(func[i]);
17226 	}
17227 
17228 	/* Last step: make now unused interpreter insns from main
17229 	 * prog consistent for later dump requests, so they can
17230 	 * later look the same as if they were interpreted only.
17231 	 */
17232 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
17233 		if (bpf_pseudo_func(insn)) {
17234 			insn[0].imm = env->insn_aux_data[i].call_imm;
17235 			insn[1].imm = insn->off;
17236 			insn->off = 0;
17237 			continue;
17238 		}
17239 		if (!bpf_pseudo_call(insn))
17240 			continue;
17241 		insn->off = env->insn_aux_data[i].call_imm;
17242 		subprog = find_subprog(env, i + insn->off + 1);
17243 		insn->imm = subprog;
17244 	}
17245 
17246 	prog->jited = 1;
17247 	prog->bpf_func = func[0]->bpf_func;
17248 	prog->jited_len = func[0]->jited_len;
17249 	prog->aux->extable = func[0]->aux->extable;
17250 	prog->aux->num_exentries = func[0]->aux->num_exentries;
17251 	prog->aux->func = func;
17252 	prog->aux->func_cnt = env->subprog_cnt;
17253 	bpf_prog_jit_attempt_done(prog);
17254 	return 0;
17255 out_free:
17256 	/* We failed JIT'ing, so at this point we need to unregister poke
17257 	 * descriptors from subprogs, so that kernel is not attempting to
17258 	 * patch it anymore as we're freeing the subprog JIT memory.
17259 	 */
17260 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
17261 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
17262 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
17263 	}
17264 	/* At this point we're guaranteed that poke descriptors are not
17265 	 * live anymore. We can just unlink its descriptor table as it's
17266 	 * released with the main prog.
17267 	 */
17268 	for (i = 0; i < env->subprog_cnt; i++) {
17269 		if (!func[i])
17270 			continue;
17271 		func[i]->aux->poke_tab = NULL;
17272 		bpf_jit_free(func[i]);
17273 	}
17274 	kfree(func);
17275 out_undo_insn:
17276 	/* cleanup main prog to be interpreted */
17277 	prog->jit_requested = 0;
17278 	prog->blinding_requested = 0;
17279 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
17280 		if (!bpf_pseudo_call(insn))
17281 			continue;
17282 		insn->off = 0;
17283 		insn->imm = env->insn_aux_data[i].call_imm;
17284 	}
17285 	bpf_prog_jit_attempt_done(prog);
17286 	return err;
17287 }
17288 
17289 static int fixup_call_args(struct bpf_verifier_env *env)
17290 {
17291 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
17292 	struct bpf_prog *prog = env->prog;
17293 	struct bpf_insn *insn = prog->insnsi;
17294 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
17295 	int i, depth;
17296 #endif
17297 	int err = 0;
17298 
17299 	if (env->prog->jit_requested &&
17300 	    !bpf_prog_is_offloaded(env->prog->aux)) {
17301 		err = jit_subprogs(env);
17302 		if (err == 0)
17303 			return 0;
17304 		if (err == -EFAULT)
17305 			return err;
17306 	}
17307 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
17308 	if (has_kfunc_call) {
17309 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
17310 		return -EINVAL;
17311 	}
17312 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
17313 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
17314 		 * have to be rejected, since interpreter doesn't support them yet.
17315 		 */
17316 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
17317 		return -EINVAL;
17318 	}
17319 	for (i = 0; i < prog->len; i++, insn++) {
17320 		if (bpf_pseudo_func(insn)) {
17321 			/* When JIT fails the progs with callback calls
17322 			 * have to be rejected, since interpreter doesn't support them yet.
17323 			 */
17324 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
17325 			return -EINVAL;
17326 		}
17327 
17328 		if (!bpf_pseudo_call(insn))
17329 			continue;
17330 		depth = get_callee_stack_depth(env, insn, i);
17331 		if (depth < 0)
17332 			return depth;
17333 		bpf_patch_call_args(insn, depth);
17334 	}
17335 	err = 0;
17336 #endif
17337 	return err;
17338 }
17339 
17340 /* replace a generic kfunc with a specialized version if necessary */
17341 static void specialize_kfunc(struct bpf_verifier_env *env,
17342 			     u32 func_id, u16 offset, unsigned long *addr)
17343 {
17344 	struct bpf_prog *prog = env->prog;
17345 	bool seen_direct_write;
17346 	void *xdp_kfunc;
17347 	bool is_rdonly;
17348 
17349 	if (bpf_dev_bound_kfunc_id(func_id)) {
17350 		xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
17351 		if (xdp_kfunc) {
17352 			*addr = (unsigned long)xdp_kfunc;
17353 			return;
17354 		}
17355 		/* fallback to default kfunc when not supported by netdev */
17356 	}
17357 
17358 	if (offset)
17359 		return;
17360 
17361 	if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
17362 		seen_direct_write = env->seen_direct_write;
17363 		is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
17364 
17365 		if (is_rdonly)
17366 			*addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
17367 
17368 		/* restore env->seen_direct_write to its original value, since
17369 		 * may_access_direct_pkt_data mutates it
17370 		 */
17371 		env->seen_direct_write = seen_direct_write;
17372 	}
17373 }
17374 
17375 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
17376 					    u16 struct_meta_reg,
17377 					    u16 node_offset_reg,
17378 					    struct bpf_insn *insn,
17379 					    struct bpf_insn *insn_buf,
17380 					    int *cnt)
17381 {
17382 	struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta;
17383 	struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) };
17384 
17385 	insn_buf[0] = addr[0];
17386 	insn_buf[1] = addr[1];
17387 	insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off);
17388 	insn_buf[3] = *insn;
17389 	*cnt = 4;
17390 }
17391 
17392 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
17393 			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
17394 {
17395 	const struct bpf_kfunc_desc *desc;
17396 
17397 	if (!insn->imm) {
17398 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
17399 		return -EINVAL;
17400 	}
17401 
17402 	*cnt = 0;
17403 
17404 	/* insn->imm has the btf func_id. Replace it with an offset relative to
17405 	 * __bpf_call_base, unless the JIT needs to call functions that are
17406 	 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()).
17407 	 */
17408 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
17409 	if (!desc) {
17410 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
17411 			insn->imm);
17412 		return -EFAULT;
17413 	}
17414 
17415 	if (!bpf_jit_supports_far_kfunc_call())
17416 		insn->imm = BPF_CALL_IMM(desc->addr);
17417 	if (insn->off)
17418 		return 0;
17419 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
17420 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
17421 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
17422 		u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
17423 
17424 		insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
17425 		insn_buf[1] = addr[0];
17426 		insn_buf[2] = addr[1];
17427 		insn_buf[3] = *insn;
17428 		*cnt = 4;
17429 	} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
17430 		   desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
17431 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
17432 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
17433 
17434 		insn_buf[0] = addr[0];
17435 		insn_buf[1] = addr[1];
17436 		insn_buf[2] = *insn;
17437 		*cnt = 3;
17438 	} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
17439 		   desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
17440 		   desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
17441 		int struct_meta_reg = BPF_REG_3;
17442 		int node_offset_reg = BPF_REG_4;
17443 
17444 		/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
17445 		if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
17446 			struct_meta_reg = BPF_REG_4;
17447 			node_offset_reg = BPF_REG_5;
17448 		}
17449 
17450 		__fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
17451 						node_offset_reg, insn, insn_buf, cnt);
17452 	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
17453 		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
17454 		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
17455 		*cnt = 1;
17456 	}
17457 	return 0;
17458 }
17459 
17460 /* Do various post-verification rewrites in a single program pass.
17461  * These rewrites simplify JIT and interpreter implementations.
17462  */
17463 static int do_misc_fixups(struct bpf_verifier_env *env)
17464 {
17465 	struct bpf_prog *prog = env->prog;
17466 	enum bpf_attach_type eatype = prog->expected_attach_type;
17467 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
17468 	struct bpf_insn *insn = prog->insnsi;
17469 	const struct bpf_func_proto *fn;
17470 	const int insn_cnt = prog->len;
17471 	const struct bpf_map_ops *ops;
17472 	struct bpf_insn_aux_data *aux;
17473 	struct bpf_insn insn_buf[16];
17474 	struct bpf_prog *new_prog;
17475 	struct bpf_map *map_ptr;
17476 	int i, ret, cnt, delta = 0;
17477 
17478 	for (i = 0; i < insn_cnt; i++, insn++) {
17479 		/* Make divide-by-zero exceptions impossible. */
17480 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
17481 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
17482 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
17483 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
17484 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
17485 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
17486 			struct bpf_insn *patchlet;
17487 			struct bpf_insn chk_and_div[] = {
17488 				/* [R,W]x div 0 -> 0 */
17489 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
17490 					     BPF_JNE | BPF_K, insn->src_reg,
17491 					     0, 2, 0),
17492 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
17493 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
17494 				*insn,
17495 			};
17496 			struct bpf_insn chk_and_mod[] = {
17497 				/* [R,W]x mod 0 -> [R,W]x */
17498 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
17499 					     BPF_JEQ | BPF_K, insn->src_reg,
17500 					     0, 1 + (is64 ? 0 : 1), 0),
17501 				*insn,
17502 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
17503 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
17504 			};
17505 
17506 			patchlet = isdiv ? chk_and_div : chk_and_mod;
17507 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
17508 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
17509 
17510 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
17511 			if (!new_prog)
17512 				return -ENOMEM;
17513 
17514 			delta    += cnt - 1;
17515 			env->prog = prog = new_prog;
17516 			insn      = new_prog->insnsi + i + delta;
17517 			continue;
17518 		}
17519 
17520 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
17521 		if (BPF_CLASS(insn->code) == BPF_LD &&
17522 		    (BPF_MODE(insn->code) == BPF_ABS ||
17523 		     BPF_MODE(insn->code) == BPF_IND)) {
17524 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
17525 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
17526 				verbose(env, "bpf verifier is misconfigured\n");
17527 				return -EINVAL;
17528 			}
17529 
17530 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17531 			if (!new_prog)
17532 				return -ENOMEM;
17533 
17534 			delta    += cnt - 1;
17535 			env->prog = prog = new_prog;
17536 			insn      = new_prog->insnsi + i + delta;
17537 			continue;
17538 		}
17539 
17540 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
17541 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
17542 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
17543 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
17544 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
17545 			struct bpf_insn *patch = &insn_buf[0];
17546 			bool issrc, isneg, isimm;
17547 			u32 off_reg;
17548 
17549 			aux = &env->insn_aux_data[i + delta];
17550 			if (!aux->alu_state ||
17551 			    aux->alu_state == BPF_ALU_NON_POINTER)
17552 				continue;
17553 
17554 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
17555 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
17556 				BPF_ALU_SANITIZE_SRC;
17557 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
17558 
17559 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
17560 			if (isimm) {
17561 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
17562 			} else {
17563 				if (isneg)
17564 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
17565 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
17566 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
17567 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
17568 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
17569 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
17570 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
17571 			}
17572 			if (!issrc)
17573 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
17574 			insn->src_reg = BPF_REG_AX;
17575 			if (isneg)
17576 				insn->code = insn->code == code_add ?
17577 					     code_sub : code_add;
17578 			*patch++ = *insn;
17579 			if (issrc && isneg && !isimm)
17580 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
17581 			cnt = patch - insn_buf;
17582 
17583 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17584 			if (!new_prog)
17585 				return -ENOMEM;
17586 
17587 			delta    += cnt - 1;
17588 			env->prog = prog = new_prog;
17589 			insn      = new_prog->insnsi + i + delta;
17590 			continue;
17591 		}
17592 
17593 		if (insn->code != (BPF_JMP | BPF_CALL))
17594 			continue;
17595 		if (insn->src_reg == BPF_PSEUDO_CALL)
17596 			continue;
17597 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
17598 			ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
17599 			if (ret)
17600 				return ret;
17601 			if (cnt == 0)
17602 				continue;
17603 
17604 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17605 			if (!new_prog)
17606 				return -ENOMEM;
17607 
17608 			delta	 += cnt - 1;
17609 			env->prog = prog = new_prog;
17610 			insn	  = new_prog->insnsi + i + delta;
17611 			continue;
17612 		}
17613 
17614 		if (insn->imm == BPF_FUNC_get_route_realm)
17615 			prog->dst_needed = 1;
17616 		if (insn->imm == BPF_FUNC_get_prandom_u32)
17617 			bpf_user_rnd_init_once();
17618 		if (insn->imm == BPF_FUNC_override_return)
17619 			prog->kprobe_override = 1;
17620 		if (insn->imm == BPF_FUNC_tail_call) {
17621 			/* If we tail call into other programs, we
17622 			 * cannot make any assumptions since they can
17623 			 * be replaced dynamically during runtime in
17624 			 * the program array.
17625 			 */
17626 			prog->cb_access = 1;
17627 			if (!allow_tail_call_in_subprogs(env))
17628 				prog->aux->stack_depth = MAX_BPF_STACK;
17629 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
17630 
17631 			/* mark bpf_tail_call as different opcode to avoid
17632 			 * conditional branch in the interpreter for every normal
17633 			 * call and to prevent accidental JITing by JIT compiler
17634 			 * that doesn't support bpf_tail_call yet
17635 			 */
17636 			insn->imm = 0;
17637 			insn->code = BPF_JMP | BPF_TAIL_CALL;
17638 
17639 			aux = &env->insn_aux_data[i + delta];
17640 			if (env->bpf_capable && !prog->blinding_requested &&
17641 			    prog->jit_requested &&
17642 			    !bpf_map_key_poisoned(aux) &&
17643 			    !bpf_map_ptr_poisoned(aux) &&
17644 			    !bpf_map_ptr_unpriv(aux)) {
17645 				struct bpf_jit_poke_descriptor desc = {
17646 					.reason = BPF_POKE_REASON_TAIL_CALL,
17647 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
17648 					.tail_call.key = bpf_map_key_immediate(aux),
17649 					.insn_idx = i + delta,
17650 				};
17651 
17652 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
17653 				if (ret < 0) {
17654 					verbose(env, "adding tail call poke descriptor failed\n");
17655 					return ret;
17656 				}
17657 
17658 				insn->imm = ret + 1;
17659 				continue;
17660 			}
17661 
17662 			if (!bpf_map_ptr_unpriv(aux))
17663 				continue;
17664 
17665 			/* instead of changing every JIT dealing with tail_call
17666 			 * emit two extra insns:
17667 			 * if (index >= max_entries) goto out;
17668 			 * index &= array->index_mask;
17669 			 * to avoid out-of-bounds cpu speculation
17670 			 */
17671 			if (bpf_map_ptr_poisoned(aux)) {
17672 				verbose(env, "tail_call abusing map_ptr\n");
17673 				return -EINVAL;
17674 			}
17675 
17676 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
17677 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
17678 						  map_ptr->max_entries, 2);
17679 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
17680 						    container_of(map_ptr,
17681 								 struct bpf_array,
17682 								 map)->index_mask);
17683 			insn_buf[2] = *insn;
17684 			cnt = 3;
17685 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17686 			if (!new_prog)
17687 				return -ENOMEM;
17688 
17689 			delta    += cnt - 1;
17690 			env->prog = prog = new_prog;
17691 			insn      = new_prog->insnsi + i + delta;
17692 			continue;
17693 		}
17694 
17695 		if (insn->imm == BPF_FUNC_timer_set_callback) {
17696 			/* The verifier will process callback_fn as many times as necessary
17697 			 * with different maps and the register states prepared by
17698 			 * set_timer_callback_state will be accurate.
17699 			 *
17700 			 * The following use case is valid:
17701 			 *   map1 is shared by prog1, prog2, prog3.
17702 			 *   prog1 calls bpf_timer_init for some map1 elements
17703 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
17704 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
17705 			 *   prog3 calls bpf_timer_start for some map1 elements.
17706 			 *     Those that were not both bpf_timer_init-ed and
17707 			 *     bpf_timer_set_callback-ed will return -EINVAL.
17708 			 */
17709 			struct bpf_insn ld_addrs[2] = {
17710 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
17711 			};
17712 
17713 			insn_buf[0] = ld_addrs[0];
17714 			insn_buf[1] = ld_addrs[1];
17715 			insn_buf[2] = *insn;
17716 			cnt = 3;
17717 
17718 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17719 			if (!new_prog)
17720 				return -ENOMEM;
17721 
17722 			delta    += cnt - 1;
17723 			env->prog = prog = new_prog;
17724 			insn      = new_prog->insnsi + i + delta;
17725 			goto patch_call_imm;
17726 		}
17727 
17728 		if (is_storage_get_function(insn->imm)) {
17729 			if (!env->prog->aux->sleepable ||
17730 			    env->insn_aux_data[i + delta].storage_get_func_atomic)
17731 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
17732 			else
17733 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
17734 			insn_buf[1] = *insn;
17735 			cnt = 2;
17736 
17737 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17738 			if (!new_prog)
17739 				return -ENOMEM;
17740 
17741 			delta += cnt - 1;
17742 			env->prog = prog = new_prog;
17743 			insn = new_prog->insnsi + i + delta;
17744 			goto patch_call_imm;
17745 		}
17746 
17747 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
17748 		 * and other inlining handlers are currently limited to 64 bit
17749 		 * only.
17750 		 */
17751 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
17752 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
17753 		     insn->imm == BPF_FUNC_map_update_elem ||
17754 		     insn->imm == BPF_FUNC_map_delete_elem ||
17755 		     insn->imm == BPF_FUNC_map_push_elem   ||
17756 		     insn->imm == BPF_FUNC_map_pop_elem    ||
17757 		     insn->imm == BPF_FUNC_map_peek_elem   ||
17758 		     insn->imm == BPF_FUNC_redirect_map    ||
17759 		     insn->imm == BPF_FUNC_for_each_map_elem ||
17760 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
17761 			aux = &env->insn_aux_data[i + delta];
17762 			if (bpf_map_ptr_poisoned(aux))
17763 				goto patch_call_imm;
17764 
17765 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
17766 			ops = map_ptr->ops;
17767 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
17768 			    ops->map_gen_lookup) {
17769 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
17770 				if (cnt == -EOPNOTSUPP)
17771 					goto patch_map_ops_generic;
17772 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
17773 					verbose(env, "bpf verifier is misconfigured\n");
17774 					return -EINVAL;
17775 				}
17776 
17777 				new_prog = bpf_patch_insn_data(env, i + delta,
17778 							       insn_buf, cnt);
17779 				if (!new_prog)
17780 					return -ENOMEM;
17781 
17782 				delta    += cnt - 1;
17783 				env->prog = prog = new_prog;
17784 				insn      = new_prog->insnsi + i + delta;
17785 				continue;
17786 			}
17787 
17788 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
17789 				     (void *(*)(struct bpf_map *map, void *key))NULL));
17790 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
17791 				     (long (*)(struct bpf_map *map, void *key))NULL));
17792 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
17793 				     (long (*)(struct bpf_map *map, void *key, void *value,
17794 					      u64 flags))NULL));
17795 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
17796 				     (long (*)(struct bpf_map *map, void *value,
17797 					      u64 flags))NULL));
17798 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
17799 				     (long (*)(struct bpf_map *map, void *value))NULL));
17800 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
17801 				     (long (*)(struct bpf_map *map, void *value))NULL));
17802 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
17803 				     (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
17804 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
17805 				     (long (*)(struct bpf_map *map,
17806 					      bpf_callback_t callback_fn,
17807 					      void *callback_ctx,
17808 					      u64 flags))NULL));
17809 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
17810 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
17811 
17812 patch_map_ops_generic:
17813 			switch (insn->imm) {
17814 			case BPF_FUNC_map_lookup_elem:
17815 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
17816 				continue;
17817 			case BPF_FUNC_map_update_elem:
17818 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
17819 				continue;
17820 			case BPF_FUNC_map_delete_elem:
17821 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
17822 				continue;
17823 			case BPF_FUNC_map_push_elem:
17824 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
17825 				continue;
17826 			case BPF_FUNC_map_pop_elem:
17827 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
17828 				continue;
17829 			case BPF_FUNC_map_peek_elem:
17830 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
17831 				continue;
17832 			case BPF_FUNC_redirect_map:
17833 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
17834 				continue;
17835 			case BPF_FUNC_for_each_map_elem:
17836 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
17837 				continue;
17838 			case BPF_FUNC_map_lookup_percpu_elem:
17839 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
17840 				continue;
17841 			}
17842 
17843 			goto patch_call_imm;
17844 		}
17845 
17846 		/* Implement bpf_jiffies64 inline. */
17847 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
17848 		    insn->imm == BPF_FUNC_jiffies64) {
17849 			struct bpf_insn ld_jiffies_addr[2] = {
17850 				BPF_LD_IMM64(BPF_REG_0,
17851 					     (unsigned long)&jiffies),
17852 			};
17853 
17854 			insn_buf[0] = ld_jiffies_addr[0];
17855 			insn_buf[1] = ld_jiffies_addr[1];
17856 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
17857 						  BPF_REG_0, 0);
17858 			cnt = 3;
17859 
17860 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
17861 						       cnt);
17862 			if (!new_prog)
17863 				return -ENOMEM;
17864 
17865 			delta    += cnt - 1;
17866 			env->prog = prog = new_prog;
17867 			insn      = new_prog->insnsi + i + delta;
17868 			continue;
17869 		}
17870 
17871 		/* Implement bpf_get_func_arg inline. */
17872 		if (prog_type == BPF_PROG_TYPE_TRACING &&
17873 		    insn->imm == BPF_FUNC_get_func_arg) {
17874 			/* Load nr_args from ctx - 8 */
17875 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17876 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
17877 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
17878 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
17879 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
17880 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
17881 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
17882 			insn_buf[7] = BPF_JMP_A(1);
17883 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
17884 			cnt = 9;
17885 
17886 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17887 			if (!new_prog)
17888 				return -ENOMEM;
17889 
17890 			delta    += cnt - 1;
17891 			env->prog = prog = new_prog;
17892 			insn      = new_prog->insnsi + i + delta;
17893 			continue;
17894 		}
17895 
17896 		/* Implement bpf_get_func_ret inline. */
17897 		if (prog_type == BPF_PROG_TYPE_TRACING &&
17898 		    insn->imm == BPF_FUNC_get_func_ret) {
17899 			if (eatype == BPF_TRACE_FEXIT ||
17900 			    eatype == BPF_MODIFY_RETURN) {
17901 				/* Load nr_args from ctx - 8 */
17902 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17903 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
17904 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
17905 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
17906 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
17907 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
17908 				cnt = 6;
17909 			} else {
17910 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
17911 				cnt = 1;
17912 			}
17913 
17914 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17915 			if (!new_prog)
17916 				return -ENOMEM;
17917 
17918 			delta    += cnt - 1;
17919 			env->prog = prog = new_prog;
17920 			insn      = new_prog->insnsi + i + delta;
17921 			continue;
17922 		}
17923 
17924 		/* Implement get_func_arg_cnt inline. */
17925 		if (prog_type == BPF_PROG_TYPE_TRACING &&
17926 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
17927 			/* Load nr_args from ctx - 8 */
17928 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17929 
17930 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
17931 			if (!new_prog)
17932 				return -ENOMEM;
17933 
17934 			env->prog = prog = new_prog;
17935 			insn      = new_prog->insnsi + i + delta;
17936 			continue;
17937 		}
17938 
17939 		/* Implement bpf_get_func_ip inline. */
17940 		if (prog_type == BPF_PROG_TYPE_TRACING &&
17941 		    insn->imm == BPF_FUNC_get_func_ip) {
17942 			/* Load IP address from ctx - 16 */
17943 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
17944 
17945 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
17946 			if (!new_prog)
17947 				return -ENOMEM;
17948 
17949 			env->prog = prog = new_prog;
17950 			insn      = new_prog->insnsi + i + delta;
17951 			continue;
17952 		}
17953 
17954 patch_call_imm:
17955 		fn = env->ops->get_func_proto(insn->imm, env->prog);
17956 		/* all functions that have prototype and verifier allowed
17957 		 * programs to call them, must be real in-kernel functions
17958 		 */
17959 		if (!fn->func) {
17960 			verbose(env,
17961 				"kernel subsystem misconfigured func %s#%d\n",
17962 				func_id_name(insn->imm), insn->imm);
17963 			return -EFAULT;
17964 		}
17965 		insn->imm = fn->func - __bpf_call_base;
17966 	}
17967 
17968 	/* Since poke tab is now finalized, publish aux to tracker. */
17969 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
17970 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
17971 		if (!map_ptr->ops->map_poke_track ||
17972 		    !map_ptr->ops->map_poke_untrack ||
17973 		    !map_ptr->ops->map_poke_run) {
17974 			verbose(env, "bpf verifier is misconfigured\n");
17975 			return -EINVAL;
17976 		}
17977 
17978 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
17979 		if (ret < 0) {
17980 			verbose(env, "tracking tail call prog failed\n");
17981 			return ret;
17982 		}
17983 	}
17984 
17985 	sort_kfunc_descs_by_imm_off(env->prog);
17986 
17987 	return 0;
17988 }
17989 
17990 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
17991 					int position,
17992 					s32 stack_base,
17993 					u32 callback_subprogno,
17994 					u32 *cnt)
17995 {
17996 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
17997 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
17998 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
17999 	int reg_loop_max = BPF_REG_6;
18000 	int reg_loop_cnt = BPF_REG_7;
18001 	int reg_loop_ctx = BPF_REG_8;
18002 
18003 	struct bpf_prog *new_prog;
18004 	u32 callback_start;
18005 	u32 call_insn_offset;
18006 	s32 callback_offset;
18007 
18008 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
18009 	 * be careful to modify this code in sync.
18010 	 */
18011 	struct bpf_insn insn_buf[] = {
18012 		/* Return error and jump to the end of the patch if
18013 		 * expected number of iterations is too big.
18014 		 */
18015 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
18016 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
18017 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
18018 		/* spill R6, R7, R8 to use these as loop vars */
18019 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
18020 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
18021 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
18022 		/* initialize loop vars */
18023 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
18024 		BPF_MOV32_IMM(reg_loop_cnt, 0),
18025 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
18026 		/* loop header,
18027 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
18028 		 */
18029 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
18030 		/* callback call,
18031 		 * correct callback offset would be set after patching
18032 		 */
18033 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
18034 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
18035 		BPF_CALL_REL(0),
18036 		/* increment loop counter */
18037 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
18038 		/* jump to loop header if callback returned 0 */
18039 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
18040 		/* return value of bpf_loop,
18041 		 * set R0 to the number of iterations
18042 		 */
18043 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
18044 		/* restore original values of R6, R7, R8 */
18045 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
18046 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
18047 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
18048 	};
18049 
18050 	*cnt = ARRAY_SIZE(insn_buf);
18051 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
18052 	if (!new_prog)
18053 		return new_prog;
18054 
18055 	/* callback start is known only after patching */
18056 	callback_start = env->subprog_info[callback_subprogno].start;
18057 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
18058 	call_insn_offset = position + 12;
18059 	callback_offset = callback_start - call_insn_offset - 1;
18060 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
18061 
18062 	return new_prog;
18063 }
18064 
18065 static bool is_bpf_loop_call(struct bpf_insn *insn)
18066 {
18067 	return insn->code == (BPF_JMP | BPF_CALL) &&
18068 		insn->src_reg == 0 &&
18069 		insn->imm == BPF_FUNC_loop;
18070 }
18071 
18072 /* For all sub-programs in the program (including main) check
18073  * insn_aux_data to see if there are bpf_loop calls that require
18074  * inlining. If such calls are found the calls are replaced with a
18075  * sequence of instructions produced by `inline_bpf_loop` function and
18076  * subprog stack_depth is increased by the size of 3 registers.
18077  * This stack space is used to spill values of the R6, R7, R8.  These
18078  * registers are used to store the loop bound, counter and context
18079  * variables.
18080  */
18081 static int optimize_bpf_loop(struct bpf_verifier_env *env)
18082 {
18083 	struct bpf_subprog_info *subprogs = env->subprog_info;
18084 	int i, cur_subprog = 0, cnt, delta = 0;
18085 	struct bpf_insn *insn = env->prog->insnsi;
18086 	int insn_cnt = env->prog->len;
18087 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
18088 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
18089 	u16 stack_depth_extra = 0;
18090 
18091 	for (i = 0; i < insn_cnt; i++, insn++) {
18092 		struct bpf_loop_inline_state *inline_state =
18093 			&env->insn_aux_data[i + delta].loop_inline_state;
18094 
18095 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
18096 			struct bpf_prog *new_prog;
18097 
18098 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
18099 			new_prog = inline_bpf_loop(env,
18100 						   i + delta,
18101 						   -(stack_depth + stack_depth_extra),
18102 						   inline_state->callback_subprogno,
18103 						   &cnt);
18104 			if (!new_prog)
18105 				return -ENOMEM;
18106 
18107 			delta     += cnt - 1;
18108 			env->prog  = new_prog;
18109 			insn       = new_prog->insnsi + i + delta;
18110 		}
18111 
18112 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
18113 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
18114 			cur_subprog++;
18115 			stack_depth = subprogs[cur_subprog].stack_depth;
18116 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
18117 			stack_depth_extra = 0;
18118 		}
18119 	}
18120 
18121 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
18122 
18123 	return 0;
18124 }
18125 
18126 static void free_states(struct bpf_verifier_env *env)
18127 {
18128 	struct bpf_verifier_state_list *sl, *sln;
18129 	int i;
18130 
18131 	sl = env->free_list;
18132 	while (sl) {
18133 		sln = sl->next;
18134 		free_verifier_state(&sl->state, false);
18135 		kfree(sl);
18136 		sl = sln;
18137 	}
18138 	env->free_list = NULL;
18139 
18140 	if (!env->explored_states)
18141 		return;
18142 
18143 	for (i = 0; i < state_htab_size(env); i++) {
18144 		sl = env->explored_states[i];
18145 
18146 		while (sl) {
18147 			sln = sl->next;
18148 			free_verifier_state(&sl->state, false);
18149 			kfree(sl);
18150 			sl = sln;
18151 		}
18152 		env->explored_states[i] = NULL;
18153 	}
18154 }
18155 
18156 static int do_check_common(struct bpf_verifier_env *env, int subprog)
18157 {
18158 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
18159 	struct bpf_verifier_state *state;
18160 	struct bpf_reg_state *regs;
18161 	int ret, i;
18162 
18163 	env->prev_linfo = NULL;
18164 	env->pass_cnt++;
18165 
18166 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
18167 	if (!state)
18168 		return -ENOMEM;
18169 	state->curframe = 0;
18170 	state->speculative = false;
18171 	state->branches = 1;
18172 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
18173 	if (!state->frame[0]) {
18174 		kfree(state);
18175 		return -ENOMEM;
18176 	}
18177 	env->cur_state = state;
18178 	init_func_state(env, state->frame[0],
18179 			BPF_MAIN_FUNC /* callsite */,
18180 			0 /* frameno */,
18181 			subprog);
18182 	state->first_insn_idx = env->subprog_info[subprog].start;
18183 	state->last_insn_idx = -1;
18184 
18185 	regs = state->frame[state->curframe]->regs;
18186 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
18187 		ret = btf_prepare_func_args(env, subprog, regs);
18188 		if (ret)
18189 			goto out;
18190 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
18191 			if (regs[i].type == PTR_TO_CTX)
18192 				mark_reg_known_zero(env, regs, i);
18193 			else if (regs[i].type == SCALAR_VALUE)
18194 				mark_reg_unknown(env, regs, i);
18195 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
18196 				const u32 mem_size = regs[i].mem_size;
18197 
18198 				mark_reg_known_zero(env, regs, i);
18199 				regs[i].mem_size = mem_size;
18200 				regs[i].id = ++env->id_gen;
18201 			}
18202 		}
18203 	} else {
18204 		/* 1st arg to a function */
18205 		regs[BPF_REG_1].type = PTR_TO_CTX;
18206 		mark_reg_known_zero(env, regs, BPF_REG_1);
18207 		ret = btf_check_subprog_arg_match(env, subprog, regs);
18208 		if (ret == -EFAULT)
18209 			/* unlikely verifier bug. abort.
18210 			 * ret == 0 and ret < 0 are sadly acceptable for
18211 			 * main() function due to backward compatibility.
18212 			 * Like socket filter program may be written as:
18213 			 * int bpf_prog(struct pt_regs *ctx)
18214 			 * and never dereference that ctx in the program.
18215 			 * 'struct pt_regs' is a type mismatch for socket
18216 			 * filter that should be using 'struct __sk_buff'.
18217 			 */
18218 			goto out;
18219 	}
18220 
18221 	ret = do_check(env);
18222 out:
18223 	/* check for NULL is necessary, since cur_state can be freed inside
18224 	 * do_check() under memory pressure.
18225 	 */
18226 	if (env->cur_state) {
18227 		free_verifier_state(env->cur_state, true);
18228 		env->cur_state = NULL;
18229 	}
18230 	while (!pop_stack(env, NULL, NULL, false));
18231 	if (!ret && pop_log)
18232 		bpf_vlog_reset(&env->log, 0);
18233 	free_states(env);
18234 	return ret;
18235 }
18236 
18237 /* Verify all global functions in a BPF program one by one based on their BTF.
18238  * All global functions must pass verification. Otherwise the whole program is rejected.
18239  * Consider:
18240  * int bar(int);
18241  * int foo(int f)
18242  * {
18243  *    return bar(f);
18244  * }
18245  * int bar(int b)
18246  * {
18247  *    ...
18248  * }
18249  * foo() will be verified first for R1=any_scalar_value. During verification it
18250  * will be assumed that bar() already verified successfully and call to bar()
18251  * from foo() will be checked for type match only. Later bar() will be verified
18252  * independently to check that it's safe for R1=any_scalar_value.
18253  */
18254 static int do_check_subprogs(struct bpf_verifier_env *env)
18255 {
18256 	struct bpf_prog_aux *aux = env->prog->aux;
18257 	int i, ret;
18258 
18259 	if (!aux->func_info)
18260 		return 0;
18261 
18262 	for (i = 1; i < env->subprog_cnt; i++) {
18263 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
18264 			continue;
18265 		env->insn_idx = env->subprog_info[i].start;
18266 		WARN_ON_ONCE(env->insn_idx == 0);
18267 		ret = do_check_common(env, i);
18268 		if (ret) {
18269 			return ret;
18270 		} else if (env->log.level & BPF_LOG_LEVEL) {
18271 			verbose(env,
18272 				"Func#%d is safe for any args that match its prototype\n",
18273 				i);
18274 		}
18275 	}
18276 	return 0;
18277 }
18278 
18279 static int do_check_main(struct bpf_verifier_env *env)
18280 {
18281 	int ret;
18282 
18283 	env->insn_idx = 0;
18284 	ret = do_check_common(env, 0);
18285 	if (!ret)
18286 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
18287 	return ret;
18288 }
18289 
18290 
18291 static void print_verification_stats(struct bpf_verifier_env *env)
18292 {
18293 	int i;
18294 
18295 	if (env->log.level & BPF_LOG_STATS) {
18296 		verbose(env, "verification time %lld usec\n",
18297 			div_u64(env->verification_time, 1000));
18298 		verbose(env, "stack depth ");
18299 		for (i = 0; i < env->subprog_cnt; i++) {
18300 			u32 depth = env->subprog_info[i].stack_depth;
18301 
18302 			verbose(env, "%d", depth);
18303 			if (i + 1 < env->subprog_cnt)
18304 				verbose(env, "+");
18305 		}
18306 		verbose(env, "\n");
18307 	}
18308 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
18309 		"total_states %d peak_states %d mark_read %d\n",
18310 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
18311 		env->max_states_per_insn, env->total_states,
18312 		env->peak_states, env->longest_mark_read_walk);
18313 }
18314 
18315 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
18316 {
18317 	const struct btf_type *t, *func_proto;
18318 	const struct bpf_struct_ops *st_ops;
18319 	const struct btf_member *member;
18320 	struct bpf_prog *prog = env->prog;
18321 	u32 btf_id, member_idx;
18322 	const char *mname;
18323 
18324 	if (!prog->gpl_compatible) {
18325 		verbose(env, "struct ops programs must have a GPL compatible license\n");
18326 		return -EINVAL;
18327 	}
18328 
18329 	btf_id = prog->aux->attach_btf_id;
18330 	st_ops = bpf_struct_ops_find(btf_id);
18331 	if (!st_ops) {
18332 		verbose(env, "attach_btf_id %u is not a supported struct\n",
18333 			btf_id);
18334 		return -ENOTSUPP;
18335 	}
18336 
18337 	t = st_ops->type;
18338 	member_idx = prog->expected_attach_type;
18339 	if (member_idx >= btf_type_vlen(t)) {
18340 		verbose(env, "attach to invalid member idx %u of struct %s\n",
18341 			member_idx, st_ops->name);
18342 		return -EINVAL;
18343 	}
18344 
18345 	member = &btf_type_member(t)[member_idx];
18346 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
18347 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
18348 					       NULL);
18349 	if (!func_proto) {
18350 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
18351 			mname, member_idx, st_ops->name);
18352 		return -EINVAL;
18353 	}
18354 
18355 	if (st_ops->check_member) {
18356 		int err = st_ops->check_member(t, member, prog);
18357 
18358 		if (err) {
18359 			verbose(env, "attach to unsupported member %s of struct %s\n",
18360 				mname, st_ops->name);
18361 			return err;
18362 		}
18363 	}
18364 
18365 	prog->aux->attach_func_proto = func_proto;
18366 	prog->aux->attach_func_name = mname;
18367 	env->ops = st_ops->verifier_ops;
18368 
18369 	return 0;
18370 }
18371 #define SECURITY_PREFIX "security_"
18372 
18373 static int check_attach_modify_return(unsigned long addr, const char *func_name)
18374 {
18375 	if (within_error_injection_list(addr) ||
18376 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
18377 		return 0;
18378 
18379 	return -EINVAL;
18380 }
18381 
18382 /* list of non-sleepable functions that are otherwise on
18383  * ALLOW_ERROR_INJECTION list
18384  */
18385 BTF_SET_START(btf_non_sleepable_error_inject)
18386 /* Three functions below can be called from sleepable and non-sleepable context.
18387  * Assume non-sleepable from bpf safety point of view.
18388  */
18389 BTF_ID(func, __filemap_add_folio)
18390 BTF_ID(func, should_fail_alloc_page)
18391 BTF_ID(func, should_failslab)
18392 BTF_SET_END(btf_non_sleepable_error_inject)
18393 
18394 static int check_non_sleepable_error_inject(u32 btf_id)
18395 {
18396 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
18397 }
18398 
18399 int bpf_check_attach_target(struct bpf_verifier_log *log,
18400 			    const struct bpf_prog *prog,
18401 			    const struct bpf_prog *tgt_prog,
18402 			    u32 btf_id,
18403 			    struct bpf_attach_target_info *tgt_info)
18404 {
18405 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
18406 	const char prefix[] = "btf_trace_";
18407 	int ret = 0, subprog = -1, i;
18408 	const struct btf_type *t;
18409 	bool conservative = true;
18410 	const char *tname;
18411 	struct btf *btf;
18412 	long addr = 0;
18413 	struct module *mod = NULL;
18414 
18415 	if (!btf_id) {
18416 		bpf_log(log, "Tracing programs must provide btf_id\n");
18417 		return -EINVAL;
18418 	}
18419 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
18420 	if (!btf) {
18421 		bpf_log(log,
18422 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
18423 		return -EINVAL;
18424 	}
18425 	t = btf_type_by_id(btf, btf_id);
18426 	if (!t) {
18427 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
18428 		return -EINVAL;
18429 	}
18430 	tname = btf_name_by_offset(btf, t->name_off);
18431 	if (!tname) {
18432 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
18433 		return -EINVAL;
18434 	}
18435 	if (tgt_prog) {
18436 		struct bpf_prog_aux *aux = tgt_prog->aux;
18437 
18438 		if (bpf_prog_is_dev_bound(prog->aux) &&
18439 		    !bpf_prog_dev_bound_match(prog, tgt_prog)) {
18440 			bpf_log(log, "Target program bound device mismatch");
18441 			return -EINVAL;
18442 		}
18443 
18444 		for (i = 0; i < aux->func_info_cnt; i++)
18445 			if (aux->func_info[i].type_id == btf_id) {
18446 				subprog = i;
18447 				break;
18448 			}
18449 		if (subprog == -1) {
18450 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
18451 			return -EINVAL;
18452 		}
18453 		conservative = aux->func_info_aux[subprog].unreliable;
18454 		if (prog_extension) {
18455 			if (conservative) {
18456 				bpf_log(log,
18457 					"Cannot replace static functions\n");
18458 				return -EINVAL;
18459 			}
18460 			if (!prog->jit_requested) {
18461 				bpf_log(log,
18462 					"Extension programs should be JITed\n");
18463 				return -EINVAL;
18464 			}
18465 		}
18466 		if (!tgt_prog->jited) {
18467 			bpf_log(log, "Can attach to only JITed progs\n");
18468 			return -EINVAL;
18469 		}
18470 		if (tgt_prog->type == prog->type) {
18471 			/* Cannot fentry/fexit another fentry/fexit program.
18472 			 * Cannot attach program extension to another extension.
18473 			 * It's ok to attach fentry/fexit to extension program.
18474 			 */
18475 			bpf_log(log, "Cannot recursively attach\n");
18476 			return -EINVAL;
18477 		}
18478 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
18479 		    prog_extension &&
18480 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
18481 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
18482 			/* Program extensions can extend all program types
18483 			 * except fentry/fexit. The reason is the following.
18484 			 * The fentry/fexit programs are used for performance
18485 			 * analysis, stats and can be attached to any program
18486 			 * type except themselves. When extension program is
18487 			 * replacing XDP function it is necessary to allow
18488 			 * performance analysis of all functions. Both original
18489 			 * XDP program and its program extension. Hence
18490 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
18491 			 * allowed. If extending of fentry/fexit was allowed it
18492 			 * would be possible to create long call chain
18493 			 * fentry->extension->fentry->extension beyond
18494 			 * reasonable stack size. Hence extending fentry is not
18495 			 * allowed.
18496 			 */
18497 			bpf_log(log, "Cannot extend fentry/fexit\n");
18498 			return -EINVAL;
18499 		}
18500 	} else {
18501 		if (prog_extension) {
18502 			bpf_log(log, "Cannot replace kernel functions\n");
18503 			return -EINVAL;
18504 		}
18505 	}
18506 
18507 	switch (prog->expected_attach_type) {
18508 	case BPF_TRACE_RAW_TP:
18509 		if (tgt_prog) {
18510 			bpf_log(log,
18511 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
18512 			return -EINVAL;
18513 		}
18514 		if (!btf_type_is_typedef(t)) {
18515 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
18516 				btf_id);
18517 			return -EINVAL;
18518 		}
18519 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
18520 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
18521 				btf_id, tname);
18522 			return -EINVAL;
18523 		}
18524 		tname += sizeof(prefix) - 1;
18525 		t = btf_type_by_id(btf, t->type);
18526 		if (!btf_type_is_ptr(t))
18527 			/* should never happen in valid vmlinux build */
18528 			return -EINVAL;
18529 		t = btf_type_by_id(btf, t->type);
18530 		if (!btf_type_is_func_proto(t))
18531 			/* should never happen in valid vmlinux build */
18532 			return -EINVAL;
18533 
18534 		break;
18535 	case BPF_TRACE_ITER:
18536 		if (!btf_type_is_func(t)) {
18537 			bpf_log(log, "attach_btf_id %u is not a function\n",
18538 				btf_id);
18539 			return -EINVAL;
18540 		}
18541 		t = btf_type_by_id(btf, t->type);
18542 		if (!btf_type_is_func_proto(t))
18543 			return -EINVAL;
18544 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
18545 		if (ret)
18546 			return ret;
18547 		break;
18548 	default:
18549 		if (!prog_extension)
18550 			return -EINVAL;
18551 		fallthrough;
18552 	case BPF_MODIFY_RETURN:
18553 	case BPF_LSM_MAC:
18554 	case BPF_LSM_CGROUP:
18555 	case BPF_TRACE_FENTRY:
18556 	case BPF_TRACE_FEXIT:
18557 		if (!btf_type_is_func(t)) {
18558 			bpf_log(log, "attach_btf_id %u is not a function\n",
18559 				btf_id);
18560 			return -EINVAL;
18561 		}
18562 		if (prog_extension &&
18563 		    btf_check_type_match(log, prog, btf, t))
18564 			return -EINVAL;
18565 		t = btf_type_by_id(btf, t->type);
18566 		if (!btf_type_is_func_proto(t))
18567 			return -EINVAL;
18568 
18569 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
18570 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
18571 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
18572 			return -EINVAL;
18573 
18574 		if (tgt_prog && conservative)
18575 			t = NULL;
18576 
18577 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
18578 		if (ret < 0)
18579 			return ret;
18580 
18581 		if (tgt_prog) {
18582 			if (subprog == 0)
18583 				addr = (long) tgt_prog->bpf_func;
18584 			else
18585 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
18586 		} else {
18587 			if (btf_is_module(btf)) {
18588 				mod = btf_try_get_module(btf);
18589 				if (mod)
18590 					addr = find_kallsyms_symbol_value(mod, tname);
18591 				else
18592 					addr = 0;
18593 			} else {
18594 				addr = kallsyms_lookup_name(tname);
18595 			}
18596 			if (!addr) {
18597 				module_put(mod);
18598 				bpf_log(log,
18599 					"The address of function %s cannot be found\n",
18600 					tname);
18601 				return -ENOENT;
18602 			}
18603 		}
18604 
18605 		if (prog->aux->sleepable) {
18606 			ret = -EINVAL;
18607 			switch (prog->type) {
18608 			case BPF_PROG_TYPE_TRACING:
18609 
18610 				/* fentry/fexit/fmod_ret progs can be sleepable if they are
18611 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
18612 				 */
18613 				if (!check_non_sleepable_error_inject(btf_id) &&
18614 				    within_error_injection_list(addr))
18615 					ret = 0;
18616 				/* fentry/fexit/fmod_ret progs can also be sleepable if they are
18617 				 * in the fmodret id set with the KF_SLEEPABLE flag.
18618 				 */
18619 				else {
18620 					u32 *flags = btf_kfunc_is_modify_return(btf, btf_id);
18621 
18622 					if (flags && (*flags & KF_SLEEPABLE))
18623 						ret = 0;
18624 				}
18625 				break;
18626 			case BPF_PROG_TYPE_LSM:
18627 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
18628 				 * Only some of them are sleepable.
18629 				 */
18630 				if (bpf_lsm_is_sleepable_hook(btf_id))
18631 					ret = 0;
18632 				break;
18633 			default:
18634 				break;
18635 			}
18636 			if (ret) {
18637 				module_put(mod);
18638 				bpf_log(log, "%s is not sleepable\n", tname);
18639 				return ret;
18640 			}
18641 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
18642 			if (tgt_prog) {
18643 				module_put(mod);
18644 				bpf_log(log, "can't modify return codes of BPF programs\n");
18645 				return -EINVAL;
18646 			}
18647 			ret = -EINVAL;
18648 			if (btf_kfunc_is_modify_return(btf, btf_id) ||
18649 			    !check_attach_modify_return(addr, tname))
18650 				ret = 0;
18651 			if (ret) {
18652 				module_put(mod);
18653 				bpf_log(log, "%s() is not modifiable\n", tname);
18654 				return ret;
18655 			}
18656 		}
18657 
18658 		break;
18659 	}
18660 	tgt_info->tgt_addr = addr;
18661 	tgt_info->tgt_name = tname;
18662 	tgt_info->tgt_type = t;
18663 	tgt_info->tgt_mod = mod;
18664 	return 0;
18665 }
18666 
18667 BTF_SET_START(btf_id_deny)
18668 BTF_ID_UNUSED
18669 #ifdef CONFIG_SMP
18670 BTF_ID(func, migrate_disable)
18671 BTF_ID(func, migrate_enable)
18672 #endif
18673 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
18674 BTF_ID(func, rcu_read_unlock_strict)
18675 #endif
18676 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
18677 BTF_ID(func, preempt_count_add)
18678 BTF_ID(func, preempt_count_sub)
18679 #endif
18680 #ifdef CONFIG_PREEMPT_RCU
18681 BTF_ID(func, __rcu_read_lock)
18682 BTF_ID(func, __rcu_read_unlock)
18683 #endif
18684 BTF_SET_END(btf_id_deny)
18685 
18686 static bool can_be_sleepable(struct bpf_prog *prog)
18687 {
18688 	if (prog->type == BPF_PROG_TYPE_TRACING) {
18689 		switch (prog->expected_attach_type) {
18690 		case BPF_TRACE_FENTRY:
18691 		case BPF_TRACE_FEXIT:
18692 		case BPF_MODIFY_RETURN:
18693 		case BPF_TRACE_ITER:
18694 			return true;
18695 		default:
18696 			return false;
18697 		}
18698 	}
18699 	return prog->type == BPF_PROG_TYPE_LSM ||
18700 	       prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
18701 	       prog->type == BPF_PROG_TYPE_STRUCT_OPS;
18702 }
18703 
18704 static int check_attach_btf_id(struct bpf_verifier_env *env)
18705 {
18706 	struct bpf_prog *prog = env->prog;
18707 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
18708 	struct bpf_attach_target_info tgt_info = {};
18709 	u32 btf_id = prog->aux->attach_btf_id;
18710 	struct bpf_trampoline *tr;
18711 	int ret;
18712 	u64 key;
18713 
18714 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
18715 		if (prog->aux->sleepable)
18716 			/* attach_btf_id checked to be zero already */
18717 			return 0;
18718 		verbose(env, "Syscall programs can only be sleepable\n");
18719 		return -EINVAL;
18720 	}
18721 
18722 	if (prog->aux->sleepable && !can_be_sleepable(prog)) {
18723 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
18724 		return -EINVAL;
18725 	}
18726 
18727 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
18728 		return check_struct_ops_btf_id(env);
18729 
18730 	if (prog->type != BPF_PROG_TYPE_TRACING &&
18731 	    prog->type != BPF_PROG_TYPE_LSM &&
18732 	    prog->type != BPF_PROG_TYPE_EXT)
18733 		return 0;
18734 
18735 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
18736 	if (ret)
18737 		return ret;
18738 
18739 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
18740 		/* to make freplace equivalent to their targets, they need to
18741 		 * inherit env->ops and expected_attach_type for the rest of the
18742 		 * verification
18743 		 */
18744 		env->ops = bpf_verifier_ops[tgt_prog->type];
18745 		prog->expected_attach_type = tgt_prog->expected_attach_type;
18746 	}
18747 
18748 	/* store info about the attachment target that will be used later */
18749 	prog->aux->attach_func_proto = tgt_info.tgt_type;
18750 	prog->aux->attach_func_name = tgt_info.tgt_name;
18751 	prog->aux->mod = tgt_info.tgt_mod;
18752 
18753 	if (tgt_prog) {
18754 		prog->aux->saved_dst_prog_type = tgt_prog->type;
18755 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
18756 	}
18757 
18758 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
18759 		prog->aux->attach_btf_trace = true;
18760 		return 0;
18761 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
18762 		if (!bpf_iter_prog_supported(prog))
18763 			return -EINVAL;
18764 		return 0;
18765 	}
18766 
18767 	if (prog->type == BPF_PROG_TYPE_LSM) {
18768 		ret = bpf_lsm_verify_prog(&env->log, prog);
18769 		if (ret < 0)
18770 			return ret;
18771 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
18772 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
18773 		return -EINVAL;
18774 	}
18775 
18776 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
18777 	tr = bpf_trampoline_get(key, &tgt_info);
18778 	if (!tr)
18779 		return -ENOMEM;
18780 
18781 	prog->aux->dst_trampoline = tr;
18782 	return 0;
18783 }
18784 
18785 struct btf *bpf_get_btf_vmlinux(void)
18786 {
18787 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
18788 		mutex_lock(&bpf_verifier_lock);
18789 		if (!btf_vmlinux)
18790 			btf_vmlinux = btf_parse_vmlinux();
18791 		mutex_unlock(&bpf_verifier_lock);
18792 	}
18793 	return btf_vmlinux;
18794 }
18795 
18796 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
18797 {
18798 	u64 start_time = ktime_get_ns();
18799 	struct bpf_verifier_env *env;
18800 	int i, len, ret = -EINVAL, err;
18801 	u32 log_true_size;
18802 	bool is_priv;
18803 
18804 	/* no program is valid */
18805 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
18806 		return -EINVAL;
18807 
18808 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
18809 	 * allocate/free it every time bpf_check() is called
18810 	 */
18811 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
18812 	if (!env)
18813 		return -ENOMEM;
18814 
18815 	len = (*prog)->len;
18816 	env->insn_aux_data =
18817 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
18818 	ret = -ENOMEM;
18819 	if (!env->insn_aux_data)
18820 		goto err_free_env;
18821 	for (i = 0; i < len; i++)
18822 		env->insn_aux_data[i].orig_idx = i;
18823 	env->prog = *prog;
18824 	env->ops = bpf_verifier_ops[env->prog->type];
18825 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
18826 	is_priv = bpf_capable();
18827 
18828 	bpf_get_btf_vmlinux();
18829 
18830 	/* grab the mutex to protect few globals used by verifier */
18831 	if (!is_priv)
18832 		mutex_lock(&bpf_verifier_lock);
18833 
18834 	/* user could have requested verbose verifier output
18835 	 * and supplied buffer to store the verification trace
18836 	 */
18837 	ret = bpf_vlog_init(&env->log, attr->log_level,
18838 			    (char __user *) (unsigned long) attr->log_buf,
18839 			    attr->log_size);
18840 	if (ret)
18841 		goto err_unlock;
18842 
18843 	mark_verifier_state_clean(env);
18844 
18845 	if (IS_ERR(btf_vmlinux)) {
18846 		/* Either gcc or pahole or kernel are broken. */
18847 		verbose(env, "in-kernel BTF is malformed\n");
18848 		ret = PTR_ERR(btf_vmlinux);
18849 		goto skip_full_check;
18850 	}
18851 
18852 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
18853 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
18854 		env->strict_alignment = true;
18855 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
18856 		env->strict_alignment = false;
18857 
18858 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
18859 	env->allow_uninit_stack = bpf_allow_uninit_stack();
18860 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
18861 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
18862 	env->bpf_capable = bpf_capable();
18863 
18864 	if (is_priv)
18865 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
18866 
18867 	env->explored_states = kvcalloc(state_htab_size(env),
18868 				       sizeof(struct bpf_verifier_state_list *),
18869 				       GFP_USER);
18870 	ret = -ENOMEM;
18871 	if (!env->explored_states)
18872 		goto skip_full_check;
18873 
18874 	ret = add_subprog_and_kfunc(env);
18875 	if (ret < 0)
18876 		goto skip_full_check;
18877 
18878 	ret = check_subprogs(env);
18879 	if (ret < 0)
18880 		goto skip_full_check;
18881 
18882 	ret = check_btf_info(env, attr, uattr);
18883 	if (ret < 0)
18884 		goto skip_full_check;
18885 
18886 	ret = check_attach_btf_id(env);
18887 	if (ret)
18888 		goto skip_full_check;
18889 
18890 	ret = resolve_pseudo_ldimm64(env);
18891 	if (ret < 0)
18892 		goto skip_full_check;
18893 
18894 	if (bpf_prog_is_offloaded(env->prog->aux)) {
18895 		ret = bpf_prog_offload_verifier_prep(env->prog);
18896 		if (ret)
18897 			goto skip_full_check;
18898 	}
18899 
18900 	ret = check_cfg(env);
18901 	if (ret < 0)
18902 		goto skip_full_check;
18903 
18904 	ret = do_check_subprogs(env);
18905 	ret = ret ?: do_check_main(env);
18906 
18907 	if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
18908 		ret = bpf_prog_offload_finalize(env);
18909 
18910 skip_full_check:
18911 	kvfree(env->explored_states);
18912 
18913 	if (ret == 0)
18914 		ret = check_max_stack_depth(env);
18915 
18916 	/* instruction rewrites happen after this point */
18917 	if (ret == 0)
18918 		ret = optimize_bpf_loop(env);
18919 
18920 	if (is_priv) {
18921 		if (ret == 0)
18922 			opt_hard_wire_dead_code_branches(env);
18923 		if (ret == 0)
18924 			ret = opt_remove_dead_code(env);
18925 		if (ret == 0)
18926 			ret = opt_remove_nops(env);
18927 	} else {
18928 		if (ret == 0)
18929 			sanitize_dead_code(env);
18930 	}
18931 
18932 	if (ret == 0)
18933 		/* program is valid, convert *(u32*)(ctx + off) accesses */
18934 		ret = convert_ctx_accesses(env);
18935 
18936 	if (ret == 0)
18937 		ret = do_misc_fixups(env);
18938 
18939 	/* do 32-bit optimization after insn patching has done so those patched
18940 	 * insns could be handled correctly.
18941 	 */
18942 	if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
18943 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
18944 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
18945 								     : false;
18946 	}
18947 
18948 	if (ret == 0)
18949 		ret = fixup_call_args(env);
18950 
18951 	env->verification_time = ktime_get_ns() - start_time;
18952 	print_verification_stats(env);
18953 	env->prog->aux->verified_insns = env->insn_processed;
18954 
18955 	/* preserve original error even if log finalization is successful */
18956 	err = bpf_vlog_finalize(&env->log, &log_true_size);
18957 	if (err)
18958 		ret = err;
18959 
18960 	if (uattr_size >= offsetofend(union bpf_attr, log_true_size) &&
18961 	    copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size),
18962 				  &log_true_size, sizeof(log_true_size))) {
18963 		ret = -EFAULT;
18964 		goto err_release_maps;
18965 	}
18966 
18967 	if (ret)
18968 		goto err_release_maps;
18969 
18970 	if (env->used_map_cnt) {
18971 		/* if program passed verifier, update used_maps in bpf_prog_info */
18972 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
18973 							  sizeof(env->used_maps[0]),
18974 							  GFP_KERNEL);
18975 
18976 		if (!env->prog->aux->used_maps) {
18977 			ret = -ENOMEM;
18978 			goto err_release_maps;
18979 		}
18980 
18981 		memcpy(env->prog->aux->used_maps, env->used_maps,
18982 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
18983 		env->prog->aux->used_map_cnt = env->used_map_cnt;
18984 	}
18985 	if (env->used_btf_cnt) {
18986 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
18987 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
18988 							  sizeof(env->used_btfs[0]),
18989 							  GFP_KERNEL);
18990 		if (!env->prog->aux->used_btfs) {
18991 			ret = -ENOMEM;
18992 			goto err_release_maps;
18993 		}
18994 
18995 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
18996 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
18997 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
18998 	}
18999 	if (env->used_map_cnt || env->used_btf_cnt) {
19000 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
19001 		 * bpf_ld_imm64 instructions
19002 		 */
19003 		convert_pseudo_ld_imm64(env);
19004 	}
19005 
19006 	adjust_btf_func(env);
19007 
19008 err_release_maps:
19009 	if (!env->prog->aux->used_maps)
19010 		/* if we didn't copy map pointers into bpf_prog_info, release
19011 		 * them now. Otherwise free_used_maps() will release them.
19012 		 */
19013 		release_maps(env);
19014 	if (!env->prog->aux->used_btfs)
19015 		release_btfs(env);
19016 
19017 	/* extension progs temporarily inherit the attach_type of their targets
19018 	   for verification purposes, so set it back to zero before returning
19019 	 */
19020 	if (env->prog->type == BPF_PROG_TYPE_EXT)
19021 		env->prog->expected_attach_type = 0;
19022 
19023 	*prog = env->prog;
19024 err_unlock:
19025 	if (!is_priv)
19026 		mutex_unlock(&bpf_verifier_lock);
19027 	vfree(env->insn_aux_data);
19028 err_free_env:
19029 	kfree(env);
19030 	return ret;
19031 }
19032