xref: /openbmc/linux/kernel/bpf/verifier.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 
28 #include "disasm.h"
29 
30 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
31 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
32 	[_id] = & _name ## _verifier_ops,
33 #define BPF_MAP_TYPE(_id, _ops)
34 #define BPF_LINK_TYPE(_id, _name)
35 #include <linux/bpf_types.h>
36 #undef BPF_PROG_TYPE
37 #undef BPF_MAP_TYPE
38 #undef BPF_LINK_TYPE
39 };
40 
41 /* bpf_check() is a static code analyzer that walks eBPF program
42  * instruction by instruction and updates register/stack state.
43  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
44  *
45  * The first pass is depth-first-search to check that the program is a DAG.
46  * It rejects the following programs:
47  * - larger than BPF_MAXINSNS insns
48  * - if loop is present (detected via back-edge)
49  * - unreachable insns exist (shouldn't be a forest. program = one function)
50  * - out of bounds or malformed jumps
51  * The second pass is all possible path descent from the 1st insn.
52  * Since it's analyzing all paths through the program, the length of the
53  * analysis is limited to 64k insn, which may be hit even if total number of
54  * insn is less then 4K, but there are too many branches that change stack/regs.
55  * Number of 'branches to be analyzed' is limited to 1k
56  *
57  * On entry to each instruction, each register has a type, and the instruction
58  * changes the types of the registers depending on instruction semantics.
59  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60  * copied to R1.
61  *
62  * All registers are 64-bit.
63  * R0 - return register
64  * R1-R5 argument passing registers
65  * R6-R9 callee saved registers
66  * R10 - frame pointer read-only
67  *
68  * At the start of BPF program the register R1 contains a pointer to bpf_context
69  * and has type PTR_TO_CTX.
70  *
71  * Verifier tracks arithmetic operations on pointers in case:
72  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
74  * 1st insn copies R10 (which has FRAME_PTR) type into R1
75  * and 2nd arithmetic instruction is pattern matched to recognize
76  * that it wants to construct a pointer to some element within stack.
77  * So after 2nd insn, the register R1 has type PTR_TO_STACK
78  * (and -20 constant is saved for further stack bounds checking).
79  * Meaning that this reg is a pointer to stack plus known immediate constant.
80  *
81  * Most of the time the registers have SCALAR_VALUE type, which
82  * means the register has some value, but it's not a valid pointer.
83  * (like pointer plus pointer becomes SCALAR_VALUE type)
84  *
85  * When verifier sees load or store instructions the type of base register
86  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
87  * four pointer types recognized by check_mem_access() function.
88  *
89  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
90  * and the range of [ptr, ptr + map's value_size) is accessible.
91  *
92  * registers used to pass values to function calls are checked against
93  * function argument constraints.
94  *
95  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
96  * It means that the register type passed to this function must be
97  * PTR_TO_STACK and it will be used inside the function as
98  * 'pointer to map element key'
99  *
100  * For example the argument constraints for bpf_map_lookup_elem():
101  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
102  *   .arg1_type = ARG_CONST_MAP_PTR,
103  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
104  *
105  * ret_type says that this function returns 'pointer to map elem value or null'
106  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
107  * 2nd argument should be a pointer to stack, which will be used inside
108  * the helper function as a pointer to map element key.
109  *
110  * On the kernel side the helper function looks like:
111  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
112  * {
113  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114  *    void *key = (void *) (unsigned long) r2;
115  *    void *value;
116  *
117  *    here kernel can access 'key' and 'map' pointers safely, knowing that
118  *    [key, key + map->key_size) bytes are valid and were initialized on
119  *    the stack of eBPF program.
120  * }
121  *
122  * Corresponding eBPF program may look like:
123  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
124  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
125  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
126  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
127  * here verifier looks at prototype of map_lookup_elem() and sees:
128  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
129  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
130  *
131  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
132  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
133  * and were initialized prior to this call.
134  * If it's ok, then verifier allows this BPF_CALL insn and looks at
135  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
136  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
137  * returns either pointer to map value or NULL.
138  *
139  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
140  * insn, the register holding that pointer in the true branch changes state to
141  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
142  * branch. See check_cond_jmp_op().
143  *
144  * After the call R0 is set to return type of the function and registers R1-R5
145  * are set to NOT_INIT to indicate that they are no longer readable.
146  *
147  * The following reference types represent a potential reference to a kernel
148  * resource which, after first being allocated, must be checked and freed by
149  * the BPF program:
150  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
151  *
152  * When the verifier sees a helper call return a reference type, it allocates a
153  * pointer id for the reference and stores it in the current function state.
154  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
155  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
156  * passes through a NULL-check conditional. For the branch wherein the state is
157  * changed to CONST_IMM, the verifier releases the reference.
158  *
159  * For each helper function that allocates a reference, such as
160  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
161  * bpf_sk_release(). When a reference type passes into the release function,
162  * the verifier also releases the reference. If any unchecked or unreleased
163  * reference remains at the end of the program, the verifier rejects it.
164  */
165 
166 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
167 struct bpf_verifier_stack_elem {
168 	/* verifer state is 'st'
169 	 * before processing instruction 'insn_idx'
170 	 * and after processing instruction 'prev_insn_idx'
171 	 */
172 	struct bpf_verifier_state st;
173 	int insn_idx;
174 	int prev_insn_idx;
175 	struct bpf_verifier_stack_elem *next;
176 	/* length of verifier log at the time this state was pushed on stack */
177 	u32 log_pos;
178 };
179 
180 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_KEY_POISON	(1ULL << 63)
184 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
185 
186 #define BPF_MAP_PTR_UNPRIV	1UL
187 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
188 					  POISON_POINTER_DELTA))
189 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
190 
191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
192 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
193 static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
194 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
195 static int ref_set_non_owning(struct bpf_verifier_env *env,
196 			      struct bpf_reg_state *reg);
197 
198 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
199 {
200 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
201 }
202 
203 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
204 {
205 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
206 }
207 
208 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
209 			      const struct bpf_map *map, bool unpriv)
210 {
211 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
212 	unpriv |= bpf_map_ptr_unpriv(aux);
213 	aux->map_ptr_state = (unsigned long)map |
214 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
215 }
216 
217 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
218 {
219 	return aux->map_key_state & BPF_MAP_KEY_POISON;
220 }
221 
222 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
223 {
224 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
225 }
226 
227 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
228 {
229 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
230 }
231 
232 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
233 {
234 	bool poisoned = bpf_map_key_poisoned(aux);
235 
236 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
237 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
238 }
239 
240 static bool bpf_pseudo_call(const struct bpf_insn *insn)
241 {
242 	return insn->code == (BPF_JMP | BPF_CALL) &&
243 	       insn->src_reg == BPF_PSEUDO_CALL;
244 }
245 
246 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
247 {
248 	return insn->code == (BPF_JMP | BPF_CALL) &&
249 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
250 }
251 
252 struct bpf_call_arg_meta {
253 	struct bpf_map *map_ptr;
254 	bool raw_mode;
255 	bool pkt_access;
256 	u8 release_regno;
257 	int regno;
258 	int access_size;
259 	int mem_size;
260 	u64 msize_max_value;
261 	int ref_obj_id;
262 	int dynptr_id;
263 	int map_uid;
264 	int func_id;
265 	struct btf *btf;
266 	u32 btf_id;
267 	struct btf *ret_btf;
268 	u32 ret_btf_id;
269 	u32 subprogno;
270 	struct btf_field *kptr_field;
271 	u8 uninit_dynptr_regno;
272 };
273 
274 struct btf *btf_vmlinux;
275 
276 static DEFINE_MUTEX(bpf_verifier_lock);
277 
278 static const struct bpf_line_info *
279 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
280 {
281 	const struct bpf_line_info *linfo;
282 	const struct bpf_prog *prog;
283 	u32 i, nr_linfo;
284 
285 	prog = env->prog;
286 	nr_linfo = prog->aux->nr_linfo;
287 
288 	if (!nr_linfo || insn_off >= prog->len)
289 		return NULL;
290 
291 	linfo = prog->aux->linfo;
292 	for (i = 1; i < nr_linfo; i++)
293 		if (insn_off < linfo[i].insn_off)
294 			break;
295 
296 	return &linfo[i - 1];
297 }
298 
299 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
300 		       va_list args)
301 {
302 	unsigned int n;
303 
304 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
305 
306 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
307 		  "verifier log line truncated - local buffer too short\n");
308 
309 	if (log->level == BPF_LOG_KERNEL) {
310 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
311 
312 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
313 		return;
314 	}
315 
316 	n = min(log->len_total - log->len_used - 1, n);
317 	log->kbuf[n] = '\0';
318 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
319 		log->len_used += n;
320 	else
321 		log->ubuf = NULL;
322 }
323 
324 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
325 {
326 	char zero = 0;
327 
328 	if (!bpf_verifier_log_needed(log))
329 		return;
330 
331 	log->len_used = new_pos;
332 	if (put_user(zero, log->ubuf + new_pos))
333 		log->ubuf = NULL;
334 }
335 
336 /* log_level controls verbosity level of eBPF verifier.
337  * bpf_verifier_log_write() is used to dump the verification trace to the log,
338  * so the user can figure out what's wrong with the program
339  */
340 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
341 					   const char *fmt, ...)
342 {
343 	va_list args;
344 
345 	if (!bpf_verifier_log_needed(&env->log))
346 		return;
347 
348 	va_start(args, fmt);
349 	bpf_verifier_vlog(&env->log, fmt, args);
350 	va_end(args);
351 }
352 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
353 
354 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
355 {
356 	struct bpf_verifier_env *env = private_data;
357 	va_list args;
358 
359 	if (!bpf_verifier_log_needed(&env->log))
360 		return;
361 
362 	va_start(args, fmt);
363 	bpf_verifier_vlog(&env->log, fmt, args);
364 	va_end(args);
365 }
366 
367 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
368 			    const char *fmt, ...)
369 {
370 	va_list args;
371 
372 	if (!bpf_verifier_log_needed(log))
373 		return;
374 
375 	va_start(args, fmt);
376 	bpf_verifier_vlog(log, fmt, args);
377 	va_end(args);
378 }
379 EXPORT_SYMBOL_GPL(bpf_log);
380 
381 static const char *ltrim(const char *s)
382 {
383 	while (isspace(*s))
384 		s++;
385 
386 	return s;
387 }
388 
389 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
390 					 u32 insn_off,
391 					 const char *prefix_fmt, ...)
392 {
393 	const struct bpf_line_info *linfo;
394 
395 	if (!bpf_verifier_log_needed(&env->log))
396 		return;
397 
398 	linfo = find_linfo(env, insn_off);
399 	if (!linfo || linfo == env->prev_linfo)
400 		return;
401 
402 	if (prefix_fmt) {
403 		va_list args;
404 
405 		va_start(args, prefix_fmt);
406 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
407 		va_end(args);
408 	}
409 
410 	verbose(env, "%s\n",
411 		ltrim(btf_name_by_offset(env->prog->aux->btf,
412 					 linfo->line_off)));
413 
414 	env->prev_linfo = linfo;
415 }
416 
417 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
418 				   struct bpf_reg_state *reg,
419 				   struct tnum *range, const char *ctx,
420 				   const char *reg_name)
421 {
422 	char tn_buf[48];
423 
424 	verbose(env, "At %s the register %s ", ctx, reg_name);
425 	if (!tnum_is_unknown(reg->var_off)) {
426 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
427 		verbose(env, "has value %s", tn_buf);
428 	} else {
429 		verbose(env, "has unknown scalar value");
430 	}
431 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
432 	verbose(env, " should have been in %s\n", tn_buf);
433 }
434 
435 static bool type_is_pkt_pointer(enum bpf_reg_type type)
436 {
437 	type = base_type(type);
438 	return type == PTR_TO_PACKET ||
439 	       type == PTR_TO_PACKET_META;
440 }
441 
442 static bool type_is_sk_pointer(enum bpf_reg_type type)
443 {
444 	return type == PTR_TO_SOCKET ||
445 		type == PTR_TO_SOCK_COMMON ||
446 		type == PTR_TO_TCP_SOCK ||
447 		type == PTR_TO_XDP_SOCK;
448 }
449 
450 static bool reg_type_not_null(enum bpf_reg_type type)
451 {
452 	return type == PTR_TO_SOCKET ||
453 		type == PTR_TO_TCP_SOCK ||
454 		type == PTR_TO_MAP_VALUE ||
455 		type == PTR_TO_MAP_KEY ||
456 		type == PTR_TO_SOCK_COMMON;
457 }
458 
459 static bool type_is_ptr_alloc_obj(u32 type)
460 {
461 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
462 }
463 
464 static bool type_is_non_owning_ref(u32 type)
465 {
466 	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
467 }
468 
469 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
470 {
471 	struct btf_record *rec = NULL;
472 	struct btf_struct_meta *meta;
473 
474 	if (reg->type == PTR_TO_MAP_VALUE) {
475 		rec = reg->map_ptr->record;
476 	} else if (type_is_ptr_alloc_obj(reg->type)) {
477 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
478 		if (meta)
479 			rec = meta->record;
480 	}
481 	return rec;
482 }
483 
484 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
485 {
486 	return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
487 }
488 
489 static bool type_is_rdonly_mem(u32 type)
490 {
491 	return type & MEM_RDONLY;
492 }
493 
494 static bool type_may_be_null(u32 type)
495 {
496 	return type & PTR_MAYBE_NULL;
497 }
498 
499 static bool is_acquire_function(enum bpf_func_id func_id,
500 				const struct bpf_map *map)
501 {
502 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
503 
504 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
505 	    func_id == BPF_FUNC_sk_lookup_udp ||
506 	    func_id == BPF_FUNC_skc_lookup_tcp ||
507 	    func_id == BPF_FUNC_ringbuf_reserve ||
508 	    func_id == BPF_FUNC_kptr_xchg)
509 		return true;
510 
511 	if (func_id == BPF_FUNC_map_lookup_elem &&
512 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
513 	     map_type == BPF_MAP_TYPE_SOCKHASH))
514 		return true;
515 
516 	return false;
517 }
518 
519 static bool is_ptr_cast_function(enum bpf_func_id func_id)
520 {
521 	return func_id == BPF_FUNC_tcp_sock ||
522 		func_id == BPF_FUNC_sk_fullsock ||
523 		func_id == BPF_FUNC_skc_to_tcp_sock ||
524 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
525 		func_id == BPF_FUNC_skc_to_udp6_sock ||
526 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
527 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
528 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
529 }
530 
531 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
532 {
533 	return func_id == BPF_FUNC_dynptr_data;
534 }
535 
536 static bool is_callback_calling_function(enum bpf_func_id func_id)
537 {
538 	return func_id == BPF_FUNC_for_each_map_elem ||
539 	       func_id == BPF_FUNC_timer_set_callback ||
540 	       func_id == BPF_FUNC_find_vma ||
541 	       func_id == BPF_FUNC_loop ||
542 	       func_id == BPF_FUNC_user_ringbuf_drain;
543 }
544 
545 static bool is_storage_get_function(enum bpf_func_id func_id)
546 {
547 	return func_id == BPF_FUNC_sk_storage_get ||
548 	       func_id == BPF_FUNC_inode_storage_get ||
549 	       func_id == BPF_FUNC_task_storage_get ||
550 	       func_id == BPF_FUNC_cgrp_storage_get;
551 }
552 
553 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
554 					const struct bpf_map *map)
555 {
556 	int ref_obj_uses = 0;
557 
558 	if (is_ptr_cast_function(func_id))
559 		ref_obj_uses++;
560 	if (is_acquire_function(func_id, map))
561 		ref_obj_uses++;
562 	if (is_dynptr_ref_function(func_id))
563 		ref_obj_uses++;
564 
565 	return ref_obj_uses > 1;
566 }
567 
568 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
569 {
570 	return BPF_CLASS(insn->code) == BPF_STX &&
571 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
572 	       insn->imm == BPF_CMPXCHG;
573 }
574 
575 /* string representation of 'enum bpf_reg_type'
576  *
577  * Note that reg_type_str() can not appear more than once in a single verbose()
578  * statement.
579  */
580 static const char *reg_type_str(struct bpf_verifier_env *env,
581 				enum bpf_reg_type type)
582 {
583 	char postfix[16] = {0}, prefix[64] = {0};
584 	static const char * const str[] = {
585 		[NOT_INIT]		= "?",
586 		[SCALAR_VALUE]		= "scalar",
587 		[PTR_TO_CTX]		= "ctx",
588 		[CONST_PTR_TO_MAP]	= "map_ptr",
589 		[PTR_TO_MAP_VALUE]	= "map_value",
590 		[PTR_TO_STACK]		= "fp",
591 		[PTR_TO_PACKET]		= "pkt",
592 		[PTR_TO_PACKET_META]	= "pkt_meta",
593 		[PTR_TO_PACKET_END]	= "pkt_end",
594 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
595 		[PTR_TO_SOCKET]		= "sock",
596 		[PTR_TO_SOCK_COMMON]	= "sock_common",
597 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
598 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
599 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
600 		[PTR_TO_BTF_ID]		= "ptr_",
601 		[PTR_TO_MEM]		= "mem",
602 		[PTR_TO_BUF]		= "buf",
603 		[PTR_TO_FUNC]		= "func",
604 		[PTR_TO_MAP_KEY]	= "map_key",
605 		[CONST_PTR_TO_DYNPTR]	= "dynptr_ptr",
606 	};
607 
608 	if (type & PTR_MAYBE_NULL) {
609 		if (base_type(type) == PTR_TO_BTF_ID)
610 			strncpy(postfix, "or_null_", 16);
611 		else
612 			strncpy(postfix, "_or_null", 16);
613 	}
614 
615 	snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
616 		 type & MEM_RDONLY ? "rdonly_" : "",
617 		 type & MEM_RINGBUF ? "ringbuf_" : "",
618 		 type & MEM_USER ? "user_" : "",
619 		 type & MEM_PERCPU ? "percpu_" : "",
620 		 type & MEM_RCU ? "rcu_" : "",
621 		 type & PTR_UNTRUSTED ? "untrusted_" : "",
622 		 type & PTR_TRUSTED ? "trusted_" : ""
623 	);
624 
625 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
626 		 prefix, str[base_type(type)], postfix);
627 	return env->type_str_buf;
628 }
629 
630 static char slot_type_char[] = {
631 	[STACK_INVALID]	= '?',
632 	[STACK_SPILL]	= 'r',
633 	[STACK_MISC]	= 'm',
634 	[STACK_ZERO]	= '0',
635 	[STACK_DYNPTR]	= 'd',
636 };
637 
638 static void print_liveness(struct bpf_verifier_env *env,
639 			   enum bpf_reg_liveness live)
640 {
641 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
642 	    verbose(env, "_");
643 	if (live & REG_LIVE_READ)
644 		verbose(env, "r");
645 	if (live & REG_LIVE_WRITTEN)
646 		verbose(env, "w");
647 	if (live & REG_LIVE_DONE)
648 		verbose(env, "D");
649 }
650 
651 static int __get_spi(s32 off)
652 {
653 	return (-off - 1) / BPF_REG_SIZE;
654 }
655 
656 static struct bpf_func_state *func(struct bpf_verifier_env *env,
657 				   const struct bpf_reg_state *reg)
658 {
659 	struct bpf_verifier_state *cur = env->cur_state;
660 
661 	return cur->frame[reg->frameno];
662 }
663 
664 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
665 {
666        int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
667 
668        /* We need to check that slots between [spi - nr_slots + 1, spi] are
669 	* within [0, allocated_stack).
670 	*
671 	* Please note that the spi grows downwards. For example, a dynptr
672 	* takes the size of two stack slots; the first slot will be at
673 	* spi and the second slot will be at spi - 1.
674 	*/
675        return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
676 }
677 
678 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
679 {
680 	int off, spi;
681 
682 	if (!tnum_is_const(reg->var_off)) {
683 		verbose(env, "dynptr has to be at a constant offset\n");
684 		return -EINVAL;
685 	}
686 
687 	off = reg->off + reg->var_off.value;
688 	if (off % BPF_REG_SIZE) {
689 		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
690 		return -EINVAL;
691 	}
692 
693 	spi = __get_spi(off);
694 	if (spi < 1) {
695 		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
696 		return -EINVAL;
697 	}
698 
699 	if (!is_spi_bounds_valid(func(env, reg), spi, BPF_DYNPTR_NR_SLOTS))
700 		return -ERANGE;
701 	return spi;
702 }
703 
704 static const char *kernel_type_name(const struct btf* btf, u32 id)
705 {
706 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
707 }
708 
709 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
710 {
711 	env->scratched_regs |= 1U << regno;
712 }
713 
714 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
715 {
716 	env->scratched_stack_slots |= 1ULL << spi;
717 }
718 
719 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
720 {
721 	return (env->scratched_regs >> regno) & 1;
722 }
723 
724 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
725 {
726 	return (env->scratched_stack_slots >> regno) & 1;
727 }
728 
729 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
730 {
731 	return env->scratched_regs || env->scratched_stack_slots;
732 }
733 
734 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
735 {
736 	env->scratched_regs = 0U;
737 	env->scratched_stack_slots = 0ULL;
738 }
739 
740 /* Used for printing the entire verifier state. */
741 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
742 {
743 	env->scratched_regs = ~0U;
744 	env->scratched_stack_slots = ~0ULL;
745 }
746 
747 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
748 {
749 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
750 	case DYNPTR_TYPE_LOCAL:
751 		return BPF_DYNPTR_TYPE_LOCAL;
752 	case DYNPTR_TYPE_RINGBUF:
753 		return BPF_DYNPTR_TYPE_RINGBUF;
754 	default:
755 		return BPF_DYNPTR_TYPE_INVALID;
756 	}
757 }
758 
759 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
760 {
761 	return type == BPF_DYNPTR_TYPE_RINGBUF;
762 }
763 
764 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
765 			      enum bpf_dynptr_type type,
766 			      bool first_slot, int dynptr_id);
767 
768 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
769 				struct bpf_reg_state *reg);
770 
771 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
772 				   struct bpf_reg_state *sreg1,
773 				   struct bpf_reg_state *sreg2,
774 				   enum bpf_dynptr_type type)
775 {
776 	int id = ++env->id_gen;
777 
778 	__mark_dynptr_reg(sreg1, type, true, id);
779 	__mark_dynptr_reg(sreg2, type, false, id);
780 }
781 
782 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
783 			       struct bpf_reg_state *reg,
784 			       enum bpf_dynptr_type type)
785 {
786 	__mark_dynptr_reg(reg, type, true, ++env->id_gen);
787 }
788 
789 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
790 				        struct bpf_func_state *state, int spi);
791 
792 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
793 				   enum bpf_arg_type arg_type, int insn_idx)
794 {
795 	struct bpf_func_state *state = func(env, reg);
796 	enum bpf_dynptr_type type;
797 	int spi, i, id, err;
798 
799 	spi = dynptr_get_spi(env, reg);
800 	if (spi < 0)
801 		return spi;
802 
803 	/* We cannot assume both spi and spi - 1 belong to the same dynptr,
804 	 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
805 	 * to ensure that for the following example:
806 	 *	[d1][d1][d2][d2]
807 	 * spi    3   2   1   0
808 	 * So marking spi = 2 should lead to destruction of both d1 and d2. In
809 	 * case they do belong to same dynptr, second call won't see slot_type
810 	 * as STACK_DYNPTR and will simply skip destruction.
811 	 */
812 	err = destroy_if_dynptr_stack_slot(env, state, spi);
813 	if (err)
814 		return err;
815 	err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
816 	if (err)
817 		return err;
818 
819 	for (i = 0; i < BPF_REG_SIZE; i++) {
820 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
821 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
822 	}
823 
824 	type = arg_to_dynptr_type(arg_type);
825 	if (type == BPF_DYNPTR_TYPE_INVALID)
826 		return -EINVAL;
827 
828 	mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
829 			       &state->stack[spi - 1].spilled_ptr, type);
830 
831 	if (dynptr_type_refcounted(type)) {
832 		/* The id is used to track proper releasing */
833 		id = acquire_reference_state(env, insn_idx);
834 		if (id < 0)
835 			return id;
836 
837 		state->stack[spi].spilled_ptr.ref_obj_id = id;
838 		state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
839 	}
840 
841 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
842 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
843 
844 	return 0;
845 }
846 
847 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
848 {
849 	struct bpf_func_state *state = func(env, reg);
850 	int spi, i;
851 
852 	spi = dynptr_get_spi(env, reg);
853 	if (spi < 0)
854 		return spi;
855 
856 	for (i = 0; i < BPF_REG_SIZE; i++) {
857 		state->stack[spi].slot_type[i] = STACK_INVALID;
858 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
859 	}
860 
861 	/* Invalidate any slices associated with this dynptr */
862 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type))
863 		WARN_ON_ONCE(release_reference(env, state->stack[spi].spilled_ptr.ref_obj_id));
864 
865 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
866 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
867 
868 	/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
869 	 *
870 	 * While we don't allow reading STACK_INVALID, it is still possible to
871 	 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
872 	 * helpers or insns can do partial read of that part without failing,
873 	 * but check_stack_range_initialized, check_stack_read_var_off, and
874 	 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
875 	 * the slot conservatively. Hence we need to prevent those liveness
876 	 * marking walks.
877 	 *
878 	 * This was not a problem before because STACK_INVALID is only set by
879 	 * default (where the default reg state has its reg->parent as NULL), or
880 	 * in clean_live_states after REG_LIVE_DONE (at which point
881 	 * mark_reg_read won't walk reg->parent chain), but not randomly during
882 	 * verifier state exploration (like we did above). Hence, for our case
883 	 * parentage chain will still be live (i.e. reg->parent may be
884 	 * non-NULL), while earlier reg->parent was NULL, so we need
885 	 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
886 	 * done later on reads or by mark_dynptr_read as well to unnecessary
887 	 * mark registers in verifier state.
888 	 */
889 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
890 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
891 
892 	return 0;
893 }
894 
895 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
896 			       struct bpf_reg_state *reg);
897 
898 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
899 				        struct bpf_func_state *state, int spi)
900 {
901 	struct bpf_func_state *fstate;
902 	struct bpf_reg_state *dreg;
903 	int i, dynptr_id;
904 
905 	/* We always ensure that STACK_DYNPTR is never set partially,
906 	 * hence just checking for slot_type[0] is enough. This is
907 	 * different for STACK_SPILL, where it may be only set for
908 	 * 1 byte, so code has to use is_spilled_reg.
909 	 */
910 	if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
911 		return 0;
912 
913 	/* Reposition spi to first slot */
914 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
915 		spi = spi + 1;
916 
917 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
918 		verbose(env, "cannot overwrite referenced dynptr\n");
919 		return -EINVAL;
920 	}
921 
922 	mark_stack_slot_scratched(env, spi);
923 	mark_stack_slot_scratched(env, spi - 1);
924 
925 	/* Writing partially to one dynptr stack slot destroys both. */
926 	for (i = 0; i < BPF_REG_SIZE; i++) {
927 		state->stack[spi].slot_type[i] = STACK_INVALID;
928 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
929 	}
930 
931 	dynptr_id = state->stack[spi].spilled_ptr.id;
932 	/* Invalidate any slices associated with this dynptr */
933 	bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
934 		/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
935 		if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
936 			continue;
937 		if (dreg->dynptr_id == dynptr_id) {
938 			if (!env->allow_ptr_leaks)
939 				__mark_reg_not_init(env, dreg);
940 			else
941 				__mark_reg_unknown(env, dreg);
942 		}
943 	}));
944 
945 	/* Do not release reference state, we are destroying dynptr on stack,
946 	 * not using some helper to release it. Just reset register.
947 	 */
948 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
949 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
950 
951 	/* Same reason as unmark_stack_slots_dynptr above */
952 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
953 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
954 
955 	return 0;
956 }
957 
958 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
959 				       int spi)
960 {
961 	if (reg->type == CONST_PTR_TO_DYNPTR)
962 		return false;
963 
964 	/* For -ERANGE (i.e. spi not falling into allocated stack slots), we
965 	 * will do check_mem_access to check and update stack bounds later, so
966 	 * return true for that case.
967 	 */
968 	if (spi < 0)
969 		return spi == -ERANGE;
970 	/* We allow overwriting existing unreferenced STACK_DYNPTR slots, see
971 	 * mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to
972 	 * ensure dynptr objects at the slots we are touching are completely
973 	 * destructed before we reinitialize them for a new one. For referenced
974 	 * ones, destroy_if_dynptr_stack_slot returns an error early instead of
975 	 * delaying it until the end where the user will get "Unreleased
976 	 * reference" error.
977 	 */
978 	return true;
979 }
980 
981 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
982 				     int spi)
983 {
984 	struct bpf_func_state *state = func(env, reg);
985 	int i;
986 
987 	/* This already represents first slot of initialized bpf_dynptr */
988 	if (reg->type == CONST_PTR_TO_DYNPTR)
989 		return true;
990 
991 	if (spi < 0)
992 		return false;
993 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
994 		return false;
995 
996 	for (i = 0; i < BPF_REG_SIZE; i++) {
997 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
998 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
999 			return false;
1000 	}
1001 
1002 	return true;
1003 }
1004 
1005 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1006 				    enum bpf_arg_type arg_type)
1007 {
1008 	struct bpf_func_state *state = func(env, reg);
1009 	enum bpf_dynptr_type dynptr_type;
1010 	int spi;
1011 
1012 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1013 	if (arg_type == ARG_PTR_TO_DYNPTR)
1014 		return true;
1015 
1016 	dynptr_type = arg_to_dynptr_type(arg_type);
1017 	if (reg->type == CONST_PTR_TO_DYNPTR) {
1018 		return reg->dynptr.type == dynptr_type;
1019 	} else {
1020 		spi = dynptr_get_spi(env, reg);
1021 		if (spi < 0)
1022 			return false;
1023 		return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
1024 	}
1025 }
1026 
1027 /* The reg state of a pointer or a bounded scalar was saved when
1028  * it was spilled to the stack.
1029  */
1030 static bool is_spilled_reg(const struct bpf_stack_state *stack)
1031 {
1032 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1033 }
1034 
1035 static void scrub_spilled_slot(u8 *stype)
1036 {
1037 	if (*stype != STACK_INVALID)
1038 		*stype = STACK_MISC;
1039 }
1040 
1041 static void print_verifier_state(struct bpf_verifier_env *env,
1042 				 const struct bpf_func_state *state,
1043 				 bool print_all)
1044 {
1045 	const struct bpf_reg_state *reg;
1046 	enum bpf_reg_type t;
1047 	int i;
1048 
1049 	if (state->frameno)
1050 		verbose(env, " frame%d:", state->frameno);
1051 	for (i = 0; i < MAX_BPF_REG; i++) {
1052 		reg = &state->regs[i];
1053 		t = reg->type;
1054 		if (t == NOT_INIT)
1055 			continue;
1056 		if (!print_all && !reg_scratched(env, i))
1057 			continue;
1058 		verbose(env, " R%d", i);
1059 		print_liveness(env, reg->live);
1060 		verbose(env, "=");
1061 		if (t == SCALAR_VALUE && reg->precise)
1062 			verbose(env, "P");
1063 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
1064 		    tnum_is_const(reg->var_off)) {
1065 			/* reg->off should be 0 for SCALAR_VALUE */
1066 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1067 			verbose(env, "%lld", reg->var_off.value + reg->off);
1068 		} else {
1069 			const char *sep = "";
1070 
1071 			verbose(env, "%s", reg_type_str(env, t));
1072 			if (base_type(t) == PTR_TO_BTF_ID)
1073 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
1074 			verbose(env, "(");
1075 /*
1076  * _a stands for append, was shortened to avoid multiline statements below.
1077  * This macro is used to output a comma separated list of attributes.
1078  */
1079 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
1080 
1081 			if (reg->id)
1082 				verbose_a("id=%d", reg->id);
1083 			if (reg->ref_obj_id)
1084 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
1085 			if (type_is_non_owning_ref(reg->type))
1086 				verbose_a("%s", "non_own_ref");
1087 			if (t != SCALAR_VALUE)
1088 				verbose_a("off=%d", reg->off);
1089 			if (type_is_pkt_pointer(t))
1090 				verbose_a("r=%d", reg->range);
1091 			else if (base_type(t) == CONST_PTR_TO_MAP ||
1092 				 base_type(t) == PTR_TO_MAP_KEY ||
1093 				 base_type(t) == PTR_TO_MAP_VALUE)
1094 				verbose_a("ks=%d,vs=%d",
1095 					  reg->map_ptr->key_size,
1096 					  reg->map_ptr->value_size);
1097 			if (tnum_is_const(reg->var_off)) {
1098 				/* Typically an immediate SCALAR_VALUE, but
1099 				 * could be a pointer whose offset is too big
1100 				 * for reg->off
1101 				 */
1102 				verbose_a("imm=%llx", reg->var_off.value);
1103 			} else {
1104 				if (reg->smin_value != reg->umin_value &&
1105 				    reg->smin_value != S64_MIN)
1106 					verbose_a("smin=%lld", (long long)reg->smin_value);
1107 				if (reg->smax_value != reg->umax_value &&
1108 				    reg->smax_value != S64_MAX)
1109 					verbose_a("smax=%lld", (long long)reg->smax_value);
1110 				if (reg->umin_value != 0)
1111 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
1112 				if (reg->umax_value != U64_MAX)
1113 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
1114 				if (!tnum_is_unknown(reg->var_off)) {
1115 					char tn_buf[48];
1116 
1117 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1118 					verbose_a("var_off=%s", tn_buf);
1119 				}
1120 				if (reg->s32_min_value != reg->smin_value &&
1121 				    reg->s32_min_value != S32_MIN)
1122 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
1123 				if (reg->s32_max_value != reg->smax_value &&
1124 				    reg->s32_max_value != S32_MAX)
1125 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
1126 				if (reg->u32_min_value != reg->umin_value &&
1127 				    reg->u32_min_value != U32_MIN)
1128 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
1129 				if (reg->u32_max_value != reg->umax_value &&
1130 				    reg->u32_max_value != U32_MAX)
1131 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
1132 			}
1133 #undef verbose_a
1134 
1135 			verbose(env, ")");
1136 		}
1137 	}
1138 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1139 		char types_buf[BPF_REG_SIZE + 1];
1140 		bool valid = false;
1141 		int j;
1142 
1143 		for (j = 0; j < BPF_REG_SIZE; j++) {
1144 			if (state->stack[i].slot_type[j] != STACK_INVALID)
1145 				valid = true;
1146 			types_buf[j] = slot_type_char[
1147 					state->stack[i].slot_type[j]];
1148 		}
1149 		types_buf[BPF_REG_SIZE] = 0;
1150 		if (!valid)
1151 			continue;
1152 		if (!print_all && !stack_slot_scratched(env, i))
1153 			continue;
1154 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1155 		print_liveness(env, state->stack[i].spilled_ptr.live);
1156 		if (is_spilled_reg(&state->stack[i])) {
1157 			reg = &state->stack[i].spilled_ptr;
1158 			t = reg->type;
1159 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1160 			if (t == SCALAR_VALUE && reg->precise)
1161 				verbose(env, "P");
1162 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
1163 				verbose(env, "%lld", reg->var_off.value + reg->off);
1164 		} else {
1165 			verbose(env, "=%s", types_buf);
1166 		}
1167 	}
1168 	if (state->acquired_refs && state->refs[0].id) {
1169 		verbose(env, " refs=%d", state->refs[0].id);
1170 		for (i = 1; i < state->acquired_refs; i++)
1171 			if (state->refs[i].id)
1172 				verbose(env, ",%d", state->refs[i].id);
1173 	}
1174 	if (state->in_callback_fn)
1175 		verbose(env, " cb");
1176 	if (state->in_async_callback_fn)
1177 		verbose(env, " async_cb");
1178 	verbose(env, "\n");
1179 	mark_verifier_state_clean(env);
1180 }
1181 
1182 static inline u32 vlog_alignment(u32 pos)
1183 {
1184 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1185 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1186 }
1187 
1188 static void print_insn_state(struct bpf_verifier_env *env,
1189 			     const struct bpf_func_state *state)
1190 {
1191 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
1192 		/* remove new line character */
1193 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
1194 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
1195 	} else {
1196 		verbose(env, "%d:", env->insn_idx);
1197 	}
1198 	print_verifier_state(env, state, false);
1199 }
1200 
1201 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1202  * small to hold src. This is different from krealloc since we don't want to preserve
1203  * the contents of dst.
1204  *
1205  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1206  * not be allocated.
1207  */
1208 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1209 {
1210 	size_t alloc_bytes;
1211 	void *orig = dst;
1212 	size_t bytes;
1213 
1214 	if (ZERO_OR_NULL_PTR(src))
1215 		goto out;
1216 
1217 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1218 		return NULL;
1219 
1220 	alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1221 	dst = krealloc(orig, alloc_bytes, flags);
1222 	if (!dst) {
1223 		kfree(orig);
1224 		return NULL;
1225 	}
1226 
1227 	memcpy(dst, src, bytes);
1228 out:
1229 	return dst ? dst : ZERO_SIZE_PTR;
1230 }
1231 
1232 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1233  * small to hold new_n items. new items are zeroed out if the array grows.
1234  *
1235  * Contrary to krealloc_array, does not free arr if new_n is zero.
1236  */
1237 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1238 {
1239 	size_t alloc_size;
1240 	void *new_arr;
1241 
1242 	if (!new_n || old_n == new_n)
1243 		goto out;
1244 
1245 	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1246 	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
1247 	if (!new_arr) {
1248 		kfree(arr);
1249 		return NULL;
1250 	}
1251 	arr = new_arr;
1252 
1253 	if (new_n > old_n)
1254 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1255 
1256 out:
1257 	return arr ? arr : ZERO_SIZE_PTR;
1258 }
1259 
1260 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1261 {
1262 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1263 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1264 	if (!dst->refs)
1265 		return -ENOMEM;
1266 
1267 	dst->acquired_refs = src->acquired_refs;
1268 	return 0;
1269 }
1270 
1271 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1272 {
1273 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1274 
1275 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1276 				GFP_KERNEL);
1277 	if (!dst->stack)
1278 		return -ENOMEM;
1279 
1280 	dst->allocated_stack = src->allocated_stack;
1281 	return 0;
1282 }
1283 
1284 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1285 {
1286 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1287 				    sizeof(struct bpf_reference_state));
1288 	if (!state->refs)
1289 		return -ENOMEM;
1290 
1291 	state->acquired_refs = n;
1292 	return 0;
1293 }
1294 
1295 static int grow_stack_state(struct bpf_func_state *state, int size)
1296 {
1297 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1298 
1299 	if (old_n >= n)
1300 		return 0;
1301 
1302 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1303 	if (!state->stack)
1304 		return -ENOMEM;
1305 
1306 	state->allocated_stack = size;
1307 	return 0;
1308 }
1309 
1310 /* Acquire a pointer id from the env and update the state->refs to include
1311  * this new pointer reference.
1312  * On success, returns a valid pointer id to associate with the register
1313  * On failure, returns a negative errno.
1314  */
1315 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1316 {
1317 	struct bpf_func_state *state = cur_func(env);
1318 	int new_ofs = state->acquired_refs;
1319 	int id, err;
1320 
1321 	err = resize_reference_state(state, state->acquired_refs + 1);
1322 	if (err)
1323 		return err;
1324 	id = ++env->id_gen;
1325 	state->refs[new_ofs].id = id;
1326 	state->refs[new_ofs].insn_idx = insn_idx;
1327 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1328 
1329 	return id;
1330 }
1331 
1332 /* release function corresponding to acquire_reference_state(). Idempotent. */
1333 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1334 {
1335 	int i, last_idx;
1336 
1337 	last_idx = state->acquired_refs - 1;
1338 	for (i = 0; i < state->acquired_refs; i++) {
1339 		if (state->refs[i].id == ptr_id) {
1340 			/* Cannot release caller references in callbacks */
1341 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1342 				return -EINVAL;
1343 			if (last_idx && i != last_idx)
1344 				memcpy(&state->refs[i], &state->refs[last_idx],
1345 				       sizeof(*state->refs));
1346 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1347 			state->acquired_refs--;
1348 			return 0;
1349 		}
1350 	}
1351 	return -EINVAL;
1352 }
1353 
1354 static void free_func_state(struct bpf_func_state *state)
1355 {
1356 	if (!state)
1357 		return;
1358 	kfree(state->refs);
1359 	kfree(state->stack);
1360 	kfree(state);
1361 }
1362 
1363 static void clear_jmp_history(struct bpf_verifier_state *state)
1364 {
1365 	kfree(state->jmp_history);
1366 	state->jmp_history = NULL;
1367 	state->jmp_history_cnt = 0;
1368 }
1369 
1370 static void free_verifier_state(struct bpf_verifier_state *state,
1371 				bool free_self)
1372 {
1373 	int i;
1374 
1375 	for (i = 0; i <= state->curframe; i++) {
1376 		free_func_state(state->frame[i]);
1377 		state->frame[i] = NULL;
1378 	}
1379 	clear_jmp_history(state);
1380 	if (free_self)
1381 		kfree(state);
1382 }
1383 
1384 /* copy verifier state from src to dst growing dst stack space
1385  * when necessary to accommodate larger src stack
1386  */
1387 static int copy_func_state(struct bpf_func_state *dst,
1388 			   const struct bpf_func_state *src)
1389 {
1390 	int err;
1391 
1392 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1393 	err = copy_reference_state(dst, src);
1394 	if (err)
1395 		return err;
1396 	return copy_stack_state(dst, src);
1397 }
1398 
1399 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1400 			       const struct bpf_verifier_state *src)
1401 {
1402 	struct bpf_func_state *dst;
1403 	int i, err;
1404 
1405 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1406 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1407 					    GFP_USER);
1408 	if (!dst_state->jmp_history)
1409 		return -ENOMEM;
1410 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1411 
1412 	/* if dst has more stack frames then src frame, free them */
1413 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1414 		free_func_state(dst_state->frame[i]);
1415 		dst_state->frame[i] = NULL;
1416 	}
1417 	dst_state->speculative = src->speculative;
1418 	dst_state->active_rcu_lock = src->active_rcu_lock;
1419 	dst_state->curframe = src->curframe;
1420 	dst_state->active_lock.ptr = src->active_lock.ptr;
1421 	dst_state->active_lock.id = src->active_lock.id;
1422 	dst_state->branches = src->branches;
1423 	dst_state->parent = src->parent;
1424 	dst_state->first_insn_idx = src->first_insn_idx;
1425 	dst_state->last_insn_idx = src->last_insn_idx;
1426 	for (i = 0; i <= src->curframe; i++) {
1427 		dst = dst_state->frame[i];
1428 		if (!dst) {
1429 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1430 			if (!dst)
1431 				return -ENOMEM;
1432 			dst_state->frame[i] = dst;
1433 		}
1434 		err = copy_func_state(dst, src->frame[i]);
1435 		if (err)
1436 			return err;
1437 	}
1438 	return 0;
1439 }
1440 
1441 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1442 {
1443 	while (st) {
1444 		u32 br = --st->branches;
1445 
1446 		/* WARN_ON(br > 1) technically makes sense here,
1447 		 * but see comment in push_stack(), hence:
1448 		 */
1449 		WARN_ONCE((int)br < 0,
1450 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1451 			  br);
1452 		if (br)
1453 			break;
1454 		st = st->parent;
1455 	}
1456 }
1457 
1458 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1459 		     int *insn_idx, bool pop_log)
1460 {
1461 	struct bpf_verifier_state *cur = env->cur_state;
1462 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1463 	int err;
1464 
1465 	if (env->head == NULL)
1466 		return -ENOENT;
1467 
1468 	if (cur) {
1469 		err = copy_verifier_state(cur, &head->st);
1470 		if (err)
1471 			return err;
1472 	}
1473 	if (pop_log)
1474 		bpf_vlog_reset(&env->log, head->log_pos);
1475 	if (insn_idx)
1476 		*insn_idx = head->insn_idx;
1477 	if (prev_insn_idx)
1478 		*prev_insn_idx = head->prev_insn_idx;
1479 	elem = head->next;
1480 	free_verifier_state(&head->st, false);
1481 	kfree(head);
1482 	env->head = elem;
1483 	env->stack_size--;
1484 	return 0;
1485 }
1486 
1487 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1488 					     int insn_idx, int prev_insn_idx,
1489 					     bool speculative)
1490 {
1491 	struct bpf_verifier_state *cur = env->cur_state;
1492 	struct bpf_verifier_stack_elem *elem;
1493 	int err;
1494 
1495 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1496 	if (!elem)
1497 		goto err;
1498 
1499 	elem->insn_idx = insn_idx;
1500 	elem->prev_insn_idx = prev_insn_idx;
1501 	elem->next = env->head;
1502 	elem->log_pos = env->log.len_used;
1503 	env->head = elem;
1504 	env->stack_size++;
1505 	err = copy_verifier_state(&elem->st, cur);
1506 	if (err)
1507 		goto err;
1508 	elem->st.speculative |= speculative;
1509 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1510 		verbose(env, "The sequence of %d jumps is too complex.\n",
1511 			env->stack_size);
1512 		goto err;
1513 	}
1514 	if (elem->st.parent) {
1515 		++elem->st.parent->branches;
1516 		/* WARN_ON(branches > 2) technically makes sense here,
1517 		 * but
1518 		 * 1. speculative states will bump 'branches' for non-branch
1519 		 * instructions
1520 		 * 2. is_state_visited() heuristics may decide not to create
1521 		 * a new state for a sequence of branches and all such current
1522 		 * and cloned states will be pointing to a single parent state
1523 		 * which might have large 'branches' count.
1524 		 */
1525 	}
1526 	return &elem->st;
1527 err:
1528 	free_verifier_state(env->cur_state, true);
1529 	env->cur_state = NULL;
1530 	/* pop all elements and return */
1531 	while (!pop_stack(env, NULL, NULL, false));
1532 	return NULL;
1533 }
1534 
1535 #define CALLER_SAVED_REGS 6
1536 static const int caller_saved[CALLER_SAVED_REGS] = {
1537 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1538 };
1539 
1540 /* This helper doesn't clear reg->id */
1541 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1542 {
1543 	reg->var_off = tnum_const(imm);
1544 	reg->smin_value = (s64)imm;
1545 	reg->smax_value = (s64)imm;
1546 	reg->umin_value = imm;
1547 	reg->umax_value = imm;
1548 
1549 	reg->s32_min_value = (s32)imm;
1550 	reg->s32_max_value = (s32)imm;
1551 	reg->u32_min_value = (u32)imm;
1552 	reg->u32_max_value = (u32)imm;
1553 }
1554 
1555 /* Mark the unknown part of a register (variable offset or scalar value) as
1556  * known to have the value @imm.
1557  */
1558 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1559 {
1560 	/* Clear off and union(map_ptr, range) */
1561 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1562 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1563 	reg->id = 0;
1564 	reg->ref_obj_id = 0;
1565 	___mark_reg_known(reg, imm);
1566 }
1567 
1568 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1569 {
1570 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1571 	reg->s32_min_value = (s32)imm;
1572 	reg->s32_max_value = (s32)imm;
1573 	reg->u32_min_value = (u32)imm;
1574 	reg->u32_max_value = (u32)imm;
1575 }
1576 
1577 /* Mark the 'variable offset' part of a register as zero.  This should be
1578  * used only on registers holding a pointer type.
1579  */
1580 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1581 {
1582 	__mark_reg_known(reg, 0);
1583 }
1584 
1585 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1586 {
1587 	__mark_reg_known(reg, 0);
1588 	reg->type = SCALAR_VALUE;
1589 }
1590 
1591 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1592 				struct bpf_reg_state *regs, u32 regno)
1593 {
1594 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1595 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1596 		/* Something bad happened, let's kill all regs */
1597 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1598 			__mark_reg_not_init(env, regs + regno);
1599 		return;
1600 	}
1601 	__mark_reg_known_zero(regs + regno);
1602 }
1603 
1604 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
1605 			      bool first_slot, int dynptr_id)
1606 {
1607 	/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1608 	 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1609 	 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1610 	 */
1611 	__mark_reg_known_zero(reg);
1612 	reg->type = CONST_PTR_TO_DYNPTR;
1613 	/* Give each dynptr a unique id to uniquely associate slices to it. */
1614 	reg->id = dynptr_id;
1615 	reg->dynptr.type = type;
1616 	reg->dynptr.first_slot = first_slot;
1617 }
1618 
1619 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1620 {
1621 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1622 		const struct bpf_map *map = reg->map_ptr;
1623 
1624 		if (map->inner_map_meta) {
1625 			reg->type = CONST_PTR_TO_MAP;
1626 			reg->map_ptr = map->inner_map_meta;
1627 			/* transfer reg's id which is unique for every map_lookup_elem
1628 			 * as UID of the inner map.
1629 			 */
1630 			if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
1631 				reg->map_uid = reg->id;
1632 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1633 			reg->type = PTR_TO_XDP_SOCK;
1634 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1635 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1636 			reg->type = PTR_TO_SOCKET;
1637 		} else {
1638 			reg->type = PTR_TO_MAP_VALUE;
1639 		}
1640 		return;
1641 	}
1642 
1643 	reg->type &= ~PTR_MAYBE_NULL;
1644 }
1645 
1646 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1647 				struct btf_field_graph_root *ds_head)
1648 {
1649 	__mark_reg_known_zero(&regs[regno]);
1650 	regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1651 	regs[regno].btf = ds_head->btf;
1652 	regs[regno].btf_id = ds_head->value_btf_id;
1653 	regs[regno].off = ds_head->node_offset;
1654 }
1655 
1656 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1657 {
1658 	return type_is_pkt_pointer(reg->type);
1659 }
1660 
1661 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1662 {
1663 	return reg_is_pkt_pointer(reg) ||
1664 	       reg->type == PTR_TO_PACKET_END;
1665 }
1666 
1667 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1668 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1669 				    enum bpf_reg_type which)
1670 {
1671 	/* The register can already have a range from prior markings.
1672 	 * This is fine as long as it hasn't been advanced from its
1673 	 * origin.
1674 	 */
1675 	return reg->type == which &&
1676 	       reg->id == 0 &&
1677 	       reg->off == 0 &&
1678 	       tnum_equals_const(reg->var_off, 0);
1679 }
1680 
1681 /* Reset the min/max bounds of a register */
1682 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1683 {
1684 	reg->smin_value = S64_MIN;
1685 	reg->smax_value = S64_MAX;
1686 	reg->umin_value = 0;
1687 	reg->umax_value = U64_MAX;
1688 
1689 	reg->s32_min_value = S32_MIN;
1690 	reg->s32_max_value = S32_MAX;
1691 	reg->u32_min_value = 0;
1692 	reg->u32_max_value = U32_MAX;
1693 }
1694 
1695 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1696 {
1697 	reg->smin_value = S64_MIN;
1698 	reg->smax_value = S64_MAX;
1699 	reg->umin_value = 0;
1700 	reg->umax_value = U64_MAX;
1701 }
1702 
1703 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1704 {
1705 	reg->s32_min_value = S32_MIN;
1706 	reg->s32_max_value = S32_MAX;
1707 	reg->u32_min_value = 0;
1708 	reg->u32_max_value = U32_MAX;
1709 }
1710 
1711 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1712 {
1713 	struct tnum var32_off = tnum_subreg(reg->var_off);
1714 
1715 	/* min signed is max(sign bit) | min(other bits) */
1716 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1717 			var32_off.value | (var32_off.mask & S32_MIN));
1718 	/* max signed is min(sign bit) | max(other bits) */
1719 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1720 			var32_off.value | (var32_off.mask & S32_MAX));
1721 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1722 	reg->u32_max_value = min(reg->u32_max_value,
1723 				 (u32)(var32_off.value | var32_off.mask));
1724 }
1725 
1726 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1727 {
1728 	/* min signed is max(sign bit) | min(other bits) */
1729 	reg->smin_value = max_t(s64, reg->smin_value,
1730 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1731 	/* max signed is min(sign bit) | max(other bits) */
1732 	reg->smax_value = min_t(s64, reg->smax_value,
1733 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1734 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1735 	reg->umax_value = min(reg->umax_value,
1736 			      reg->var_off.value | reg->var_off.mask);
1737 }
1738 
1739 static void __update_reg_bounds(struct bpf_reg_state *reg)
1740 {
1741 	__update_reg32_bounds(reg);
1742 	__update_reg64_bounds(reg);
1743 }
1744 
1745 /* Uses signed min/max values to inform unsigned, and vice-versa */
1746 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1747 {
1748 	/* Learn sign from signed bounds.
1749 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1750 	 * are the same, so combine.  This works even in the negative case, e.g.
1751 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1752 	 */
1753 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1754 		reg->s32_min_value = reg->u32_min_value =
1755 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1756 		reg->s32_max_value = reg->u32_max_value =
1757 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1758 		return;
1759 	}
1760 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1761 	 * boundary, so we must be careful.
1762 	 */
1763 	if ((s32)reg->u32_max_value >= 0) {
1764 		/* Positive.  We can't learn anything from the smin, but smax
1765 		 * is positive, hence safe.
1766 		 */
1767 		reg->s32_min_value = reg->u32_min_value;
1768 		reg->s32_max_value = reg->u32_max_value =
1769 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1770 	} else if ((s32)reg->u32_min_value < 0) {
1771 		/* Negative.  We can't learn anything from the smax, but smin
1772 		 * is negative, hence safe.
1773 		 */
1774 		reg->s32_min_value = reg->u32_min_value =
1775 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1776 		reg->s32_max_value = reg->u32_max_value;
1777 	}
1778 }
1779 
1780 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1781 {
1782 	/* Learn sign from signed bounds.
1783 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1784 	 * are the same, so combine.  This works even in the negative case, e.g.
1785 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1786 	 */
1787 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1788 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1789 							  reg->umin_value);
1790 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1791 							  reg->umax_value);
1792 		return;
1793 	}
1794 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1795 	 * boundary, so we must be careful.
1796 	 */
1797 	if ((s64)reg->umax_value >= 0) {
1798 		/* Positive.  We can't learn anything from the smin, but smax
1799 		 * is positive, hence safe.
1800 		 */
1801 		reg->smin_value = reg->umin_value;
1802 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1803 							  reg->umax_value);
1804 	} else if ((s64)reg->umin_value < 0) {
1805 		/* Negative.  We can't learn anything from the smax, but smin
1806 		 * is negative, hence safe.
1807 		 */
1808 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1809 							  reg->umin_value);
1810 		reg->smax_value = reg->umax_value;
1811 	}
1812 }
1813 
1814 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1815 {
1816 	__reg32_deduce_bounds(reg);
1817 	__reg64_deduce_bounds(reg);
1818 }
1819 
1820 /* Attempts to improve var_off based on unsigned min/max information */
1821 static void __reg_bound_offset(struct bpf_reg_state *reg)
1822 {
1823 	struct tnum var64_off = tnum_intersect(reg->var_off,
1824 					       tnum_range(reg->umin_value,
1825 							  reg->umax_value));
1826 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1827 						tnum_range(reg->u32_min_value,
1828 							   reg->u32_max_value));
1829 
1830 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1831 }
1832 
1833 static void reg_bounds_sync(struct bpf_reg_state *reg)
1834 {
1835 	/* We might have learned new bounds from the var_off. */
1836 	__update_reg_bounds(reg);
1837 	/* We might have learned something about the sign bit. */
1838 	__reg_deduce_bounds(reg);
1839 	/* We might have learned some bits from the bounds. */
1840 	__reg_bound_offset(reg);
1841 	/* Intersecting with the old var_off might have improved our bounds
1842 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1843 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1844 	 */
1845 	__update_reg_bounds(reg);
1846 }
1847 
1848 static bool __reg32_bound_s64(s32 a)
1849 {
1850 	return a >= 0 && a <= S32_MAX;
1851 }
1852 
1853 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1854 {
1855 	reg->umin_value = reg->u32_min_value;
1856 	reg->umax_value = reg->u32_max_value;
1857 
1858 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1859 	 * be positive otherwise set to worse case bounds and refine later
1860 	 * from tnum.
1861 	 */
1862 	if (__reg32_bound_s64(reg->s32_min_value) &&
1863 	    __reg32_bound_s64(reg->s32_max_value)) {
1864 		reg->smin_value = reg->s32_min_value;
1865 		reg->smax_value = reg->s32_max_value;
1866 	} else {
1867 		reg->smin_value = 0;
1868 		reg->smax_value = U32_MAX;
1869 	}
1870 }
1871 
1872 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1873 {
1874 	/* special case when 64-bit register has upper 32-bit register
1875 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1876 	 * allowing us to use 32-bit bounds directly,
1877 	 */
1878 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1879 		__reg_assign_32_into_64(reg);
1880 	} else {
1881 		/* Otherwise the best we can do is push lower 32bit known and
1882 		 * unknown bits into register (var_off set from jmp logic)
1883 		 * then learn as much as possible from the 64-bit tnum
1884 		 * known and unknown bits. The previous smin/smax bounds are
1885 		 * invalid here because of jmp32 compare so mark them unknown
1886 		 * so they do not impact tnum bounds calculation.
1887 		 */
1888 		__mark_reg64_unbounded(reg);
1889 	}
1890 	reg_bounds_sync(reg);
1891 }
1892 
1893 static bool __reg64_bound_s32(s64 a)
1894 {
1895 	return a >= S32_MIN && a <= S32_MAX;
1896 }
1897 
1898 static bool __reg64_bound_u32(u64 a)
1899 {
1900 	return a >= U32_MIN && a <= U32_MAX;
1901 }
1902 
1903 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1904 {
1905 	__mark_reg32_unbounded(reg);
1906 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1907 		reg->s32_min_value = (s32)reg->smin_value;
1908 		reg->s32_max_value = (s32)reg->smax_value;
1909 	}
1910 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1911 		reg->u32_min_value = (u32)reg->umin_value;
1912 		reg->u32_max_value = (u32)reg->umax_value;
1913 	}
1914 	reg_bounds_sync(reg);
1915 }
1916 
1917 /* Mark a register as having a completely unknown (scalar) value. */
1918 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1919 			       struct bpf_reg_state *reg)
1920 {
1921 	/*
1922 	 * Clear type, off, and union(map_ptr, range) and
1923 	 * padding between 'type' and union
1924 	 */
1925 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1926 	reg->type = SCALAR_VALUE;
1927 	reg->id = 0;
1928 	reg->ref_obj_id = 0;
1929 	reg->var_off = tnum_unknown;
1930 	reg->frameno = 0;
1931 	reg->precise = !env->bpf_capable;
1932 	__mark_reg_unbounded(reg);
1933 }
1934 
1935 static void mark_reg_unknown(struct bpf_verifier_env *env,
1936 			     struct bpf_reg_state *regs, u32 regno)
1937 {
1938 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1939 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1940 		/* Something bad happened, let's kill all regs except FP */
1941 		for (regno = 0; regno < BPF_REG_FP; regno++)
1942 			__mark_reg_not_init(env, regs + regno);
1943 		return;
1944 	}
1945 	__mark_reg_unknown(env, regs + regno);
1946 }
1947 
1948 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1949 				struct bpf_reg_state *reg)
1950 {
1951 	__mark_reg_unknown(env, reg);
1952 	reg->type = NOT_INIT;
1953 }
1954 
1955 static void mark_reg_not_init(struct bpf_verifier_env *env,
1956 			      struct bpf_reg_state *regs, u32 regno)
1957 {
1958 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1959 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1960 		/* Something bad happened, let's kill all regs except FP */
1961 		for (regno = 0; regno < BPF_REG_FP; regno++)
1962 			__mark_reg_not_init(env, regs + regno);
1963 		return;
1964 	}
1965 	__mark_reg_not_init(env, regs + regno);
1966 }
1967 
1968 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1969 			    struct bpf_reg_state *regs, u32 regno,
1970 			    enum bpf_reg_type reg_type,
1971 			    struct btf *btf, u32 btf_id,
1972 			    enum bpf_type_flag flag)
1973 {
1974 	if (reg_type == SCALAR_VALUE) {
1975 		mark_reg_unknown(env, regs, regno);
1976 		return;
1977 	}
1978 	mark_reg_known_zero(env, regs, regno);
1979 	regs[regno].type = PTR_TO_BTF_ID | flag;
1980 	regs[regno].btf = btf;
1981 	regs[regno].btf_id = btf_id;
1982 }
1983 
1984 #define DEF_NOT_SUBREG	(0)
1985 static void init_reg_state(struct bpf_verifier_env *env,
1986 			   struct bpf_func_state *state)
1987 {
1988 	struct bpf_reg_state *regs = state->regs;
1989 	int i;
1990 
1991 	for (i = 0; i < MAX_BPF_REG; i++) {
1992 		mark_reg_not_init(env, regs, i);
1993 		regs[i].live = REG_LIVE_NONE;
1994 		regs[i].parent = NULL;
1995 		regs[i].subreg_def = DEF_NOT_SUBREG;
1996 	}
1997 
1998 	/* frame pointer */
1999 	regs[BPF_REG_FP].type = PTR_TO_STACK;
2000 	mark_reg_known_zero(env, regs, BPF_REG_FP);
2001 	regs[BPF_REG_FP].frameno = state->frameno;
2002 }
2003 
2004 #define BPF_MAIN_FUNC (-1)
2005 static void init_func_state(struct bpf_verifier_env *env,
2006 			    struct bpf_func_state *state,
2007 			    int callsite, int frameno, int subprogno)
2008 {
2009 	state->callsite = callsite;
2010 	state->frameno = frameno;
2011 	state->subprogno = subprogno;
2012 	state->callback_ret_range = tnum_range(0, 0);
2013 	init_reg_state(env, state);
2014 	mark_verifier_state_scratched(env);
2015 }
2016 
2017 /* Similar to push_stack(), but for async callbacks */
2018 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
2019 						int insn_idx, int prev_insn_idx,
2020 						int subprog)
2021 {
2022 	struct bpf_verifier_stack_elem *elem;
2023 	struct bpf_func_state *frame;
2024 
2025 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2026 	if (!elem)
2027 		goto err;
2028 
2029 	elem->insn_idx = insn_idx;
2030 	elem->prev_insn_idx = prev_insn_idx;
2031 	elem->next = env->head;
2032 	elem->log_pos = env->log.len_used;
2033 	env->head = elem;
2034 	env->stack_size++;
2035 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2036 		verbose(env,
2037 			"The sequence of %d jumps is too complex for async cb.\n",
2038 			env->stack_size);
2039 		goto err;
2040 	}
2041 	/* Unlike push_stack() do not copy_verifier_state().
2042 	 * The caller state doesn't matter.
2043 	 * This is async callback. It starts in a fresh stack.
2044 	 * Initialize it similar to do_check_common().
2045 	 */
2046 	elem->st.branches = 1;
2047 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2048 	if (!frame)
2049 		goto err;
2050 	init_func_state(env, frame,
2051 			BPF_MAIN_FUNC /* callsite */,
2052 			0 /* frameno within this callchain */,
2053 			subprog /* subprog number within this prog */);
2054 	elem->st.frame[0] = frame;
2055 	return &elem->st;
2056 err:
2057 	free_verifier_state(env->cur_state, true);
2058 	env->cur_state = NULL;
2059 	/* pop all elements and return */
2060 	while (!pop_stack(env, NULL, NULL, false));
2061 	return NULL;
2062 }
2063 
2064 
2065 enum reg_arg_type {
2066 	SRC_OP,		/* register is used as source operand */
2067 	DST_OP,		/* register is used as destination operand */
2068 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
2069 };
2070 
2071 static int cmp_subprogs(const void *a, const void *b)
2072 {
2073 	return ((struct bpf_subprog_info *)a)->start -
2074 	       ((struct bpf_subprog_info *)b)->start;
2075 }
2076 
2077 static int find_subprog(struct bpf_verifier_env *env, int off)
2078 {
2079 	struct bpf_subprog_info *p;
2080 
2081 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2082 		    sizeof(env->subprog_info[0]), cmp_subprogs);
2083 	if (!p)
2084 		return -ENOENT;
2085 	return p - env->subprog_info;
2086 
2087 }
2088 
2089 static int add_subprog(struct bpf_verifier_env *env, int off)
2090 {
2091 	int insn_cnt = env->prog->len;
2092 	int ret;
2093 
2094 	if (off >= insn_cnt || off < 0) {
2095 		verbose(env, "call to invalid destination\n");
2096 		return -EINVAL;
2097 	}
2098 	ret = find_subprog(env, off);
2099 	if (ret >= 0)
2100 		return ret;
2101 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
2102 		verbose(env, "too many subprograms\n");
2103 		return -E2BIG;
2104 	}
2105 	/* determine subprog starts. The end is one before the next starts */
2106 	env->subprog_info[env->subprog_cnt++].start = off;
2107 	sort(env->subprog_info, env->subprog_cnt,
2108 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
2109 	return env->subprog_cnt - 1;
2110 }
2111 
2112 #define MAX_KFUNC_DESCS 256
2113 #define MAX_KFUNC_BTFS	256
2114 
2115 struct bpf_kfunc_desc {
2116 	struct btf_func_model func_model;
2117 	u32 func_id;
2118 	s32 imm;
2119 	u16 offset;
2120 };
2121 
2122 struct bpf_kfunc_btf {
2123 	struct btf *btf;
2124 	struct module *module;
2125 	u16 offset;
2126 };
2127 
2128 struct bpf_kfunc_desc_tab {
2129 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2130 	u32 nr_descs;
2131 };
2132 
2133 struct bpf_kfunc_btf_tab {
2134 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2135 	u32 nr_descs;
2136 };
2137 
2138 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
2139 {
2140 	const struct bpf_kfunc_desc *d0 = a;
2141 	const struct bpf_kfunc_desc *d1 = b;
2142 
2143 	/* func_id is not greater than BTF_MAX_TYPE */
2144 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2145 }
2146 
2147 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2148 {
2149 	const struct bpf_kfunc_btf *d0 = a;
2150 	const struct bpf_kfunc_btf *d1 = b;
2151 
2152 	return d0->offset - d1->offset;
2153 }
2154 
2155 static const struct bpf_kfunc_desc *
2156 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
2157 {
2158 	struct bpf_kfunc_desc desc = {
2159 		.func_id = func_id,
2160 		.offset = offset,
2161 	};
2162 	struct bpf_kfunc_desc_tab *tab;
2163 
2164 	tab = prog->aux->kfunc_tab;
2165 	return bsearch(&desc, tab->descs, tab->nr_descs,
2166 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2167 }
2168 
2169 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
2170 					 s16 offset)
2171 {
2172 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
2173 	struct bpf_kfunc_btf_tab *tab;
2174 	struct bpf_kfunc_btf *b;
2175 	struct module *mod;
2176 	struct btf *btf;
2177 	int btf_fd;
2178 
2179 	tab = env->prog->aux->kfunc_btf_tab;
2180 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2181 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2182 	if (!b) {
2183 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
2184 			verbose(env, "too many different module BTFs\n");
2185 			return ERR_PTR(-E2BIG);
2186 		}
2187 
2188 		if (bpfptr_is_null(env->fd_array)) {
2189 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2190 			return ERR_PTR(-EPROTO);
2191 		}
2192 
2193 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2194 					    offset * sizeof(btf_fd),
2195 					    sizeof(btf_fd)))
2196 			return ERR_PTR(-EFAULT);
2197 
2198 		btf = btf_get_by_fd(btf_fd);
2199 		if (IS_ERR(btf)) {
2200 			verbose(env, "invalid module BTF fd specified\n");
2201 			return btf;
2202 		}
2203 
2204 		if (!btf_is_module(btf)) {
2205 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
2206 			btf_put(btf);
2207 			return ERR_PTR(-EINVAL);
2208 		}
2209 
2210 		mod = btf_try_get_module(btf);
2211 		if (!mod) {
2212 			btf_put(btf);
2213 			return ERR_PTR(-ENXIO);
2214 		}
2215 
2216 		b = &tab->descs[tab->nr_descs++];
2217 		b->btf = btf;
2218 		b->module = mod;
2219 		b->offset = offset;
2220 
2221 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2222 		     kfunc_btf_cmp_by_off, NULL);
2223 	}
2224 	return b->btf;
2225 }
2226 
2227 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2228 {
2229 	if (!tab)
2230 		return;
2231 
2232 	while (tab->nr_descs--) {
2233 		module_put(tab->descs[tab->nr_descs].module);
2234 		btf_put(tab->descs[tab->nr_descs].btf);
2235 	}
2236 	kfree(tab);
2237 }
2238 
2239 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2240 {
2241 	if (offset) {
2242 		if (offset < 0) {
2243 			/* In the future, this can be allowed to increase limit
2244 			 * of fd index into fd_array, interpreted as u16.
2245 			 */
2246 			verbose(env, "negative offset disallowed for kernel module function call\n");
2247 			return ERR_PTR(-EINVAL);
2248 		}
2249 
2250 		return __find_kfunc_desc_btf(env, offset);
2251 	}
2252 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2253 }
2254 
2255 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2256 {
2257 	const struct btf_type *func, *func_proto;
2258 	struct bpf_kfunc_btf_tab *btf_tab;
2259 	struct bpf_kfunc_desc_tab *tab;
2260 	struct bpf_prog_aux *prog_aux;
2261 	struct bpf_kfunc_desc *desc;
2262 	const char *func_name;
2263 	struct btf *desc_btf;
2264 	unsigned long call_imm;
2265 	unsigned long addr;
2266 	int err;
2267 
2268 	prog_aux = env->prog->aux;
2269 	tab = prog_aux->kfunc_tab;
2270 	btf_tab = prog_aux->kfunc_btf_tab;
2271 	if (!tab) {
2272 		if (!btf_vmlinux) {
2273 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2274 			return -ENOTSUPP;
2275 		}
2276 
2277 		if (!env->prog->jit_requested) {
2278 			verbose(env, "JIT is required for calling kernel function\n");
2279 			return -ENOTSUPP;
2280 		}
2281 
2282 		if (!bpf_jit_supports_kfunc_call()) {
2283 			verbose(env, "JIT does not support calling kernel function\n");
2284 			return -ENOTSUPP;
2285 		}
2286 
2287 		if (!env->prog->gpl_compatible) {
2288 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2289 			return -EINVAL;
2290 		}
2291 
2292 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2293 		if (!tab)
2294 			return -ENOMEM;
2295 		prog_aux->kfunc_tab = tab;
2296 	}
2297 
2298 	/* func_id == 0 is always invalid, but instead of returning an error, be
2299 	 * conservative and wait until the code elimination pass before returning
2300 	 * error, so that invalid calls that get pruned out can be in BPF programs
2301 	 * loaded from userspace.  It is also required that offset be untouched
2302 	 * for such calls.
2303 	 */
2304 	if (!func_id && !offset)
2305 		return 0;
2306 
2307 	if (!btf_tab && offset) {
2308 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2309 		if (!btf_tab)
2310 			return -ENOMEM;
2311 		prog_aux->kfunc_btf_tab = btf_tab;
2312 	}
2313 
2314 	desc_btf = find_kfunc_desc_btf(env, offset);
2315 	if (IS_ERR(desc_btf)) {
2316 		verbose(env, "failed to find BTF for kernel function\n");
2317 		return PTR_ERR(desc_btf);
2318 	}
2319 
2320 	if (find_kfunc_desc(env->prog, func_id, offset))
2321 		return 0;
2322 
2323 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2324 		verbose(env, "too many different kernel function calls\n");
2325 		return -E2BIG;
2326 	}
2327 
2328 	func = btf_type_by_id(desc_btf, func_id);
2329 	if (!func || !btf_type_is_func(func)) {
2330 		verbose(env, "kernel btf_id %u is not a function\n",
2331 			func_id);
2332 		return -EINVAL;
2333 	}
2334 	func_proto = btf_type_by_id(desc_btf, func->type);
2335 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2336 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2337 			func_id);
2338 		return -EINVAL;
2339 	}
2340 
2341 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2342 	addr = kallsyms_lookup_name(func_name);
2343 	if (!addr) {
2344 		verbose(env, "cannot find address for kernel function %s\n",
2345 			func_name);
2346 		return -EINVAL;
2347 	}
2348 
2349 	call_imm = BPF_CALL_IMM(addr);
2350 	/* Check whether or not the relative offset overflows desc->imm */
2351 	if ((unsigned long)(s32)call_imm != call_imm) {
2352 		verbose(env, "address of kernel function %s is out of range\n",
2353 			func_name);
2354 		return -EINVAL;
2355 	}
2356 
2357 	if (bpf_dev_bound_kfunc_id(func_id)) {
2358 		err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2359 		if (err)
2360 			return err;
2361 	}
2362 
2363 	desc = &tab->descs[tab->nr_descs++];
2364 	desc->func_id = func_id;
2365 	desc->imm = call_imm;
2366 	desc->offset = offset;
2367 	err = btf_distill_func_proto(&env->log, desc_btf,
2368 				     func_proto, func_name,
2369 				     &desc->func_model);
2370 	if (!err)
2371 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2372 		     kfunc_desc_cmp_by_id_off, NULL);
2373 	return err;
2374 }
2375 
2376 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2377 {
2378 	const struct bpf_kfunc_desc *d0 = a;
2379 	const struct bpf_kfunc_desc *d1 = b;
2380 
2381 	if (d0->imm > d1->imm)
2382 		return 1;
2383 	else if (d0->imm < d1->imm)
2384 		return -1;
2385 	return 0;
2386 }
2387 
2388 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2389 {
2390 	struct bpf_kfunc_desc_tab *tab;
2391 
2392 	tab = prog->aux->kfunc_tab;
2393 	if (!tab)
2394 		return;
2395 
2396 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2397 	     kfunc_desc_cmp_by_imm, NULL);
2398 }
2399 
2400 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2401 {
2402 	return !!prog->aux->kfunc_tab;
2403 }
2404 
2405 const struct btf_func_model *
2406 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2407 			 const struct bpf_insn *insn)
2408 {
2409 	const struct bpf_kfunc_desc desc = {
2410 		.imm = insn->imm,
2411 	};
2412 	const struct bpf_kfunc_desc *res;
2413 	struct bpf_kfunc_desc_tab *tab;
2414 
2415 	tab = prog->aux->kfunc_tab;
2416 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2417 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2418 
2419 	return res ? &res->func_model : NULL;
2420 }
2421 
2422 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2423 {
2424 	struct bpf_subprog_info *subprog = env->subprog_info;
2425 	struct bpf_insn *insn = env->prog->insnsi;
2426 	int i, ret, insn_cnt = env->prog->len;
2427 
2428 	/* Add entry function. */
2429 	ret = add_subprog(env, 0);
2430 	if (ret)
2431 		return ret;
2432 
2433 	for (i = 0; i < insn_cnt; i++, insn++) {
2434 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2435 		    !bpf_pseudo_kfunc_call(insn))
2436 			continue;
2437 
2438 		if (!env->bpf_capable) {
2439 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2440 			return -EPERM;
2441 		}
2442 
2443 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2444 			ret = add_subprog(env, i + insn->imm + 1);
2445 		else
2446 			ret = add_kfunc_call(env, insn->imm, insn->off);
2447 
2448 		if (ret < 0)
2449 			return ret;
2450 	}
2451 
2452 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2453 	 * logic. 'subprog_cnt' should not be increased.
2454 	 */
2455 	subprog[env->subprog_cnt].start = insn_cnt;
2456 
2457 	if (env->log.level & BPF_LOG_LEVEL2)
2458 		for (i = 0; i < env->subprog_cnt; i++)
2459 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2460 
2461 	return 0;
2462 }
2463 
2464 static int check_subprogs(struct bpf_verifier_env *env)
2465 {
2466 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2467 	struct bpf_subprog_info *subprog = env->subprog_info;
2468 	struct bpf_insn *insn = env->prog->insnsi;
2469 	int insn_cnt = env->prog->len;
2470 
2471 	/* now check that all jumps are within the same subprog */
2472 	subprog_start = subprog[cur_subprog].start;
2473 	subprog_end = subprog[cur_subprog + 1].start;
2474 	for (i = 0; i < insn_cnt; i++) {
2475 		u8 code = insn[i].code;
2476 
2477 		if (code == (BPF_JMP | BPF_CALL) &&
2478 		    insn[i].imm == BPF_FUNC_tail_call &&
2479 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2480 			subprog[cur_subprog].has_tail_call = true;
2481 		if (BPF_CLASS(code) == BPF_LD &&
2482 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2483 			subprog[cur_subprog].has_ld_abs = true;
2484 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2485 			goto next;
2486 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2487 			goto next;
2488 		off = i + insn[i].off + 1;
2489 		if (off < subprog_start || off >= subprog_end) {
2490 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2491 			return -EINVAL;
2492 		}
2493 next:
2494 		if (i == subprog_end - 1) {
2495 			/* to avoid fall-through from one subprog into another
2496 			 * the last insn of the subprog should be either exit
2497 			 * or unconditional jump back
2498 			 */
2499 			if (code != (BPF_JMP | BPF_EXIT) &&
2500 			    code != (BPF_JMP | BPF_JA)) {
2501 				verbose(env, "last insn is not an exit or jmp\n");
2502 				return -EINVAL;
2503 			}
2504 			subprog_start = subprog_end;
2505 			cur_subprog++;
2506 			if (cur_subprog < env->subprog_cnt)
2507 				subprog_end = subprog[cur_subprog + 1].start;
2508 		}
2509 	}
2510 	return 0;
2511 }
2512 
2513 /* Parentage chain of this register (or stack slot) should take care of all
2514  * issues like callee-saved registers, stack slot allocation time, etc.
2515  */
2516 static int mark_reg_read(struct bpf_verifier_env *env,
2517 			 const struct bpf_reg_state *state,
2518 			 struct bpf_reg_state *parent, u8 flag)
2519 {
2520 	bool writes = parent == state->parent; /* Observe write marks */
2521 	int cnt = 0;
2522 
2523 	while (parent) {
2524 		/* if read wasn't screened by an earlier write ... */
2525 		if (writes && state->live & REG_LIVE_WRITTEN)
2526 			break;
2527 		if (parent->live & REG_LIVE_DONE) {
2528 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2529 				reg_type_str(env, parent->type),
2530 				parent->var_off.value, parent->off);
2531 			return -EFAULT;
2532 		}
2533 		/* The first condition is more likely to be true than the
2534 		 * second, checked it first.
2535 		 */
2536 		if ((parent->live & REG_LIVE_READ) == flag ||
2537 		    parent->live & REG_LIVE_READ64)
2538 			/* The parentage chain never changes and
2539 			 * this parent was already marked as LIVE_READ.
2540 			 * There is no need to keep walking the chain again and
2541 			 * keep re-marking all parents as LIVE_READ.
2542 			 * This case happens when the same register is read
2543 			 * multiple times without writes into it in-between.
2544 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2545 			 * then no need to set the weak REG_LIVE_READ32.
2546 			 */
2547 			break;
2548 		/* ... then we depend on parent's value */
2549 		parent->live |= flag;
2550 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2551 		if (flag == REG_LIVE_READ64)
2552 			parent->live &= ~REG_LIVE_READ32;
2553 		state = parent;
2554 		parent = state->parent;
2555 		writes = true;
2556 		cnt++;
2557 	}
2558 
2559 	if (env->longest_mark_read_walk < cnt)
2560 		env->longest_mark_read_walk = cnt;
2561 	return 0;
2562 }
2563 
2564 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2565 {
2566 	struct bpf_func_state *state = func(env, reg);
2567 	int spi, ret;
2568 
2569 	/* For CONST_PTR_TO_DYNPTR, it must have already been done by
2570 	 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
2571 	 * check_kfunc_call.
2572 	 */
2573 	if (reg->type == CONST_PTR_TO_DYNPTR)
2574 		return 0;
2575 	spi = dynptr_get_spi(env, reg);
2576 	if (spi < 0)
2577 		return spi;
2578 	/* Caller ensures dynptr is valid and initialized, which means spi is in
2579 	 * bounds and spi is the first dynptr slot. Simply mark stack slot as
2580 	 * read.
2581 	 */
2582 	ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
2583 			    state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
2584 	if (ret)
2585 		return ret;
2586 	return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
2587 			     state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
2588 }
2589 
2590 /* This function is supposed to be used by the following 32-bit optimization
2591  * code only. It returns TRUE if the source or destination register operates
2592  * on 64-bit, otherwise return FALSE.
2593  */
2594 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2595 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2596 {
2597 	u8 code, class, op;
2598 
2599 	code = insn->code;
2600 	class = BPF_CLASS(code);
2601 	op = BPF_OP(code);
2602 	if (class == BPF_JMP) {
2603 		/* BPF_EXIT for "main" will reach here. Return TRUE
2604 		 * conservatively.
2605 		 */
2606 		if (op == BPF_EXIT)
2607 			return true;
2608 		if (op == BPF_CALL) {
2609 			/* BPF to BPF call will reach here because of marking
2610 			 * caller saved clobber with DST_OP_NO_MARK for which we
2611 			 * don't care the register def because they are anyway
2612 			 * marked as NOT_INIT already.
2613 			 */
2614 			if (insn->src_reg == BPF_PSEUDO_CALL)
2615 				return false;
2616 			/* Helper call will reach here because of arg type
2617 			 * check, conservatively return TRUE.
2618 			 */
2619 			if (t == SRC_OP)
2620 				return true;
2621 
2622 			return false;
2623 		}
2624 	}
2625 
2626 	if (class == BPF_ALU64 || class == BPF_JMP ||
2627 	    /* BPF_END always use BPF_ALU class. */
2628 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2629 		return true;
2630 
2631 	if (class == BPF_ALU || class == BPF_JMP32)
2632 		return false;
2633 
2634 	if (class == BPF_LDX) {
2635 		if (t != SRC_OP)
2636 			return BPF_SIZE(code) == BPF_DW;
2637 		/* LDX source must be ptr. */
2638 		return true;
2639 	}
2640 
2641 	if (class == BPF_STX) {
2642 		/* BPF_STX (including atomic variants) has multiple source
2643 		 * operands, one of which is a ptr. Check whether the caller is
2644 		 * asking about it.
2645 		 */
2646 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2647 			return true;
2648 		return BPF_SIZE(code) == BPF_DW;
2649 	}
2650 
2651 	if (class == BPF_LD) {
2652 		u8 mode = BPF_MODE(code);
2653 
2654 		/* LD_IMM64 */
2655 		if (mode == BPF_IMM)
2656 			return true;
2657 
2658 		/* Both LD_IND and LD_ABS return 32-bit data. */
2659 		if (t != SRC_OP)
2660 			return  false;
2661 
2662 		/* Implicit ctx ptr. */
2663 		if (regno == BPF_REG_6)
2664 			return true;
2665 
2666 		/* Explicit source could be any width. */
2667 		return true;
2668 	}
2669 
2670 	if (class == BPF_ST)
2671 		/* The only source register for BPF_ST is a ptr. */
2672 		return true;
2673 
2674 	/* Conservatively return true at default. */
2675 	return true;
2676 }
2677 
2678 /* Return the regno defined by the insn, or -1. */
2679 static int insn_def_regno(const struct bpf_insn *insn)
2680 {
2681 	switch (BPF_CLASS(insn->code)) {
2682 	case BPF_JMP:
2683 	case BPF_JMP32:
2684 	case BPF_ST:
2685 		return -1;
2686 	case BPF_STX:
2687 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2688 		    (insn->imm & BPF_FETCH)) {
2689 			if (insn->imm == BPF_CMPXCHG)
2690 				return BPF_REG_0;
2691 			else
2692 				return insn->src_reg;
2693 		} else {
2694 			return -1;
2695 		}
2696 	default:
2697 		return insn->dst_reg;
2698 	}
2699 }
2700 
2701 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2702 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2703 {
2704 	int dst_reg = insn_def_regno(insn);
2705 
2706 	if (dst_reg == -1)
2707 		return false;
2708 
2709 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2710 }
2711 
2712 static void mark_insn_zext(struct bpf_verifier_env *env,
2713 			   struct bpf_reg_state *reg)
2714 {
2715 	s32 def_idx = reg->subreg_def;
2716 
2717 	if (def_idx == DEF_NOT_SUBREG)
2718 		return;
2719 
2720 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2721 	/* The dst will be zero extended, so won't be sub-register anymore. */
2722 	reg->subreg_def = DEF_NOT_SUBREG;
2723 }
2724 
2725 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2726 			 enum reg_arg_type t)
2727 {
2728 	struct bpf_verifier_state *vstate = env->cur_state;
2729 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2730 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2731 	struct bpf_reg_state *reg, *regs = state->regs;
2732 	bool rw64;
2733 
2734 	if (regno >= MAX_BPF_REG) {
2735 		verbose(env, "R%d is invalid\n", regno);
2736 		return -EINVAL;
2737 	}
2738 
2739 	mark_reg_scratched(env, regno);
2740 
2741 	reg = &regs[regno];
2742 	rw64 = is_reg64(env, insn, regno, reg, t);
2743 	if (t == SRC_OP) {
2744 		/* check whether register used as source operand can be read */
2745 		if (reg->type == NOT_INIT) {
2746 			verbose(env, "R%d !read_ok\n", regno);
2747 			return -EACCES;
2748 		}
2749 		/* We don't need to worry about FP liveness because it's read-only */
2750 		if (regno == BPF_REG_FP)
2751 			return 0;
2752 
2753 		if (rw64)
2754 			mark_insn_zext(env, reg);
2755 
2756 		return mark_reg_read(env, reg, reg->parent,
2757 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2758 	} else {
2759 		/* check whether register used as dest operand can be written to */
2760 		if (regno == BPF_REG_FP) {
2761 			verbose(env, "frame pointer is read only\n");
2762 			return -EACCES;
2763 		}
2764 		reg->live |= REG_LIVE_WRITTEN;
2765 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2766 		if (t == DST_OP)
2767 			mark_reg_unknown(env, regs, regno);
2768 	}
2769 	return 0;
2770 }
2771 
2772 static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
2773 {
2774 	env->insn_aux_data[idx].jmp_point = true;
2775 }
2776 
2777 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
2778 {
2779 	return env->insn_aux_data[insn_idx].jmp_point;
2780 }
2781 
2782 /* for any branch, call, exit record the history of jmps in the given state */
2783 static int push_jmp_history(struct bpf_verifier_env *env,
2784 			    struct bpf_verifier_state *cur)
2785 {
2786 	u32 cnt = cur->jmp_history_cnt;
2787 	struct bpf_idx_pair *p;
2788 	size_t alloc_size;
2789 
2790 	if (!is_jmp_point(env, env->insn_idx))
2791 		return 0;
2792 
2793 	cnt++;
2794 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
2795 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
2796 	if (!p)
2797 		return -ENOMEM;
2798 	p[cnt - 1].idx = env->insn_idx;
2799 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2800 	cur->jmp_history = p;
2801 	cur->jmp_history_cnt = cnt;
2802 	return 0;
2803 }
2804 
2805 /* Backtrack one insn at a time. If idx is not at the top of recorded
2806  * history then previous instruction came from straight line execution.
2807  */
2808 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2809 			     u32 *history)
2810 {
2811 	u32 cnt = *history;
2812 
2813 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2814 		i = st->jmp_history[cnt - 1].prev_idx;
2815 		(*history)--;
2816 	} else {
2817 		i--;
2818 	}
2819 	return i;
2820 }
2821 
2822 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2823 {
2824 	const struct btf_type *func;
2825 	struct btf *desc_btf;
2826 
2827 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2828 		return NULL;
2829 
2830 	desc_btf = find_kfunc_desc_btf(data, insn->off);
2831 	if (IS_ERR(desc_btf))
2832 		return "<error>";
2833 
2834 	func = btf_type_by_id(desc_btf, insn->imm);
2835 	return btf_name_by_offset(desc_btf, func->name_off);
2836 }
2837 
2838 /* For given verifier state backtrack_insn() is called from the last insn to
2839  * the first insn. Its purpose is to compute a bitmask of registers and
2840  * stack slots that needs precision in the parent verifier state.
2841  */
2842 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2843 			  u32 *reg_mask, u64 *stack_mask)
2844 {
2845 	const struct bpf_insn_cbs cbs = {
2846 		.cb_call	= disasm_kfunc_name,
2847 		.cb_print	= verbose,
2848 		.private_data	= env,
2849 	};
2850 	struct bpf_insn *insn = env->prog->insnsi + idx;
2851 	u8 class = BPF_CLASS(insn->code);
2852 	u8 opcode = BPF_OP(insn->code);
2853 	u8 mode = BPF_MODE(insn->code);
2854 	u32 dreg = 1u << insn->dst_reg;
2855 	u32 sreg = 1u << insn->src_reg;
2856 	u32 spi;
2857 
2858 	if (insn->code == 0)
2859 		return 0;
2860 	if (env->log.level & BPF_LOG_LEVEL2) {
2861 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2862 		verbose(env, "%d: ", idx);
2863 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2864 	}
2865 
2866 	if (class == BPF_ALU || class == BPF_ALU64) {
2867 		if (!(*reg_mask & dreg))
2868 			return 0;
2869 		if (opcode == BPF_MOV) {
2870 			if (BPF_SRC(insn->code) == BPF_X) {
2871 				/* dreg = sreg
2872 				 * dreg needs precision after this insn
2873 				 * sreg needs precision before this insn
2874 				 */
2875 				*reg_mask &= ~dreg;
2876 				*reg_mask |= sreg;
2877 			} else {
2878 				/* dreg = K
2879 				 * dreg needs precision after this insn.
2880 				 * Corresponding register is already marked
2881 				 * as precise=true in this verifier state.
2882 				 * No further markings in parent are necessary
2883 				 */
2884 				*reg_mask &= ~dreg;
2885 			}
2886 		} else {
2887 			if (BPF_SRC(insn->code) == BPF_X) {
2888 				/* dreg += sreg
2889 				 * both dreg and sreg need precision
2890 				 * before this insn
2891 				 */
2892 				*reg_mask |= sreg;
2893 			} /* else dreg += K
2894 			   * dreg still needs precision before this insn
2895 			   */
2896 		}
2897 	} else if (class == BPF_LDX) {
2898 		if (!(*reg_mask & dreg))
2899 			return 0;
2900 		*reg_mask &= ~dreg;
2901 
2902 		/* scalars can only be spilled into stack w/o losing precision.
2903 		 * Load from any other memory can be zero extended.
2904 		 * The desire to keep that precision is already indicated
2905 		 * by 'precise' mark in corresponding register of this state.
2906 		 * No further tracking necessary.
2907 		 */
2908 		if (insn->src_reg != BPF_REG_FP)
2909 			return 0;
2910 
2911 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2912 		 * that [fp - off] slot contains scalar that needs to be
2913 		 * tracked with precision
2914 		 */
2915 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2916 		if (spi >= 64) {
2917 			verbose(env, "BUG spi %d\n", spi);
2918 			WARN_ONCE(1, "verifier backtracking bug");
2919 			return -EFAULT;
2920 		}
2921 		*stack_mask |= 1ull << spi;
2922 	} else if (class == BPF_STX || class == BPF_ST) {
2923 		if (*reg_mask & dreg)
2924 			/* stx & st shouldn't be using _scalar_ dst_reg
2925 			 * to access memory. It means backtracking
2926 			 * encountered a case of pointer subtraction.
2927 			 */
2928 			return -ENOTSUPP;
2929 		/* scalars can only be spilled into stack */
2930 		if (insn->dst_reg != BPF_REG_FP)
2931 			return 0;
2932 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2933 		if (spi >= 64) {
2934 			verbose(env, "BUG spi %d\n", spi);
2935 			WARN_ONCE(1, "verifier backtracking bug");
2936 			return -EFAULT;
2937 		}
2938 		if (!(*stack_mask & (1ull << spi)))
2939 			return 0;
2940 		*stack_mask &= ~(1ull << spi);
2941 		if (class == BPF_STX)
2942 			*reg_mask |= sreg;
2943 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2944 		if (opcode == BPF_CALL) {
2945 			if (insn->src_reg == BPF_PSEUDO_CALL)
2946 				return -ENOTSUPP;
2947 			/* BPF helpers that invoke callback subprogs are
2948 			 * equivalent to BPF_PSEUDO_CALL above
2949 			 */
2950 			if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
2951 				return -ENOTSUPP;
2952 			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
2953 			 * catch this error later. Make backtracking conservative
2954 			 * with ENOTSUPP.
2955 			 */
2956 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
2957 				return -ENOTSUPP;
2958 			/* regular helper call sets R0 */
2959 			*reg_mask &= ~1;
2960 			if (*reg_mask & 0x3f) {
2961 				/* if backtracing was looking for registers R1-R5
2962 				 * they should have been found already.
2963 				 */
2964 				verbose(env, "BUG regs %x\n", *reg_mask);
2965 				WARN_ONCE(1, "verifier backtracking bug");
2966 				return -EFAULT;
2967 			}
2968 		} else if (opcode == BPF_EXIT) {
2969 			return -ENOTSUPP;
2970 		} else if (BPF_SRC(insn->code) == BPF_X) {
2971 			if (!(*reg_mask & (dreg | sreg)))
2972 				return 0;
2973 			/* dreg <cond> sreg
2974 			 * Both dreg and sreg need precision before
2975 			 * this insn. If only sreg was marked precise
2976 			 * before it would be equally necessary to
2977 			 * propagate it to dreg.
2978 			 */
2979 			*reg_mask |= (sreg | dreg);
2980 			 /* else dreg <cond> K
2981 			  * Only dreg still needs precision before
2982 			  * this insn, so for the K-based conditional
2983 			  * there is nothing new to be marked.
2984 			  */
2985 		}
2986 	} else if (class == BPF_LD) {
2987 		if (!(*reg_mask & dreg))
2988 			return 0;
2989 		*reg_mask &= ~dreg;
2990 		/* It's ld_imm64 or ld_abs or ld_ind.
2991 		 * For ld_imm64 no further tracking of precision
2992 		 * into parent is necessary
2993 		 */
2994 		if (mode == BPF_IND || mode == BPF_ABS)
2995 			/* to be analyzed */
2996 			return -ENOTSUPP;
2997 	}
2998 	return 0;
2999 }
3000 
3001 /* the scalar precision tracking algorithm:
3002  * . at the start all registers have precise=false.
3003  * . scalar ranges are tracked as normal through alu and jmp insns.
3004  * . once precise value of the scalar register is used in:
3005  *   .  ptr + scalar alu
3006  *   . if (scalar cond K|scalar)
3007  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
3008  *   backtrack through the verifier states and mark all registers and
3009  *   stack slots with spilled constants that these scalar regisers
3010  *   should be precise.
3011  * . during state pruning two registers (or spilled stack slots)
3012  *   are equivalent if both are not precise.
3013  *
3014  * Note the verifier cannot simply walk register parentage chain,
3015  * since many different registers and stack slots could have been
3016  * used to compute single precise scalar.
3017  *
3018  * The approach of starting with precise=true for all registers and then
3019  * backtrack to mark a register as not precise when the verifier detects
3020  * that program doesn't care about specific value (e.g., when helper
3021  * takes register as ARG_ANYTHING parameter) is not safe.
3022  *
3023  * It's ok to walk single parentage chain of the verifier states.
3024  * It's possible that this backtracking will go all the way till 1st insn.
3025  * All other branches will be explored for needing precision later.
3026  *
3027  * The backtracking needs to deal with cases like:
3028  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
3029  * r9 -= r8
3030  * r5 = r9
3031  * if r5 > 0x79f goto pc+7
3032  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
3033  * r5 += 1
3034  * ...
3035  * call bpf_perf_event_output#25
3036  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3037  *
3038  * and this case:
3039  * r6 = 1
3040  * call foo // uses callee's r6 inside to compute r0
3041  * r0 += r6
3042  * if r0 == 0 goto
3043  *
3044  * to track above reg_mask/stack_mask needs to be independent for each frame.
3045  *
3046  * Also if parent's curframe > frame where backtracking started,
3047  * the verifier need to mark registers in both frames, otherwise callees
3048  * may incorrectly prune callers. This is similar to
3049  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3050  *
3051  * For now backtracking falls back into conservative marking.
3052  */
3053 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3054 				     struct bpf_verifier_state *st)
3055 {
3056 	struct bpf_func_state *func;
3057 	struct bpf_reg_state *reg;
3058 	int i, j;
3059 
3060 	/* big hammer: mark all scalars precise in this path.
3061 	 * pop_stack may still get !precise scalars.
3062 	 * We also skip current state and go straight to first parent state,
3063 	 * because precision markings in current non-checkpointed state are
3064 	 * not needed. See why in the comment in __mark_chain_precision below.
3065 	 */
3066 	for (st = st->parent; st; st = st->parent) {
3067 		for (i = 0; i <= st->curframe; i++) {
3068 			func = st->frame[i];
3069 			for (j = 0; j < BPF_REG_FP; j++) {
3070 				reg = &func->regs[j];
3071 				if (reg->type != SCALAR_VALUE)
3072 					continue;
3073 				reg->precise = true;
3074 			}
3075 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3076 				if (!is_spilled_reg(&func->stack[j]))
3077 					continue;
3078 				reg = &func->stack[j].spilled_ptr;
3079 				if (reg->type != SCALAR_VALUE)
3080 					continue;
3081 				reg->precise = true;
3082 			}
3083 		}
3084 	}
3085 }
3086 
3087 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3088 {
3089 	struct bpf_func_state *func;
3090 	struct bpf_reg_state *reg;
3091 	int i, j;
3092 
3093 	for (i = 0; i <= st->curframe; i++) {
3094 		func = st->frame[i];
3095 		for (j = 0; j < BPF_REG_FP; j++) {
3096 			reg = &func->regs[j];
3097 			if (reg->type != SCALAR_VALUE)
3098 				continue;
3099 			reg->precise = false;
3100 		}
3101 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3102 			if (!is_spilled_reg(&func->stack[j]))
3103 				continue;
3104 			reg = &func->stack[j].spilled_ptr;
3105 			if (reg->type != SCALAR_VALUE)
3106 				continue;
3107 			reg->precise = false;
3108 		}
3109 	}
3110 }
3111 
3112 /*
3113  * __mark_chain_precision() backtracks BPF program instruction sequence and
3114  * chain of verifier states making sure that register *regno* (if regno >= 0)
3115  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
3116  * SCALARS, as well as any other registers and slots that contribute to
3117  * a tracked state of given registers/stack slots, depending on specific BPF
3118  * assembly instructions (see backtrack_insns() for exact instruction handling
3119  * logic). This backtracking relies on recorded jmp_history and is able to
3120  * traverse entire chain of parent states. This process ends only when all the
3121  * necessary registers/slots and their transitive dependencies are marked as
3122  * precise.
3123  *
3124  * One important and subtle aspect is that precise marks *do not matter* in
3125  * the currently verified state (current state). It is important to understand
3126  * why this is the case.
3127  *
3128  * First, note that current state is the state that is not yet "checkpointed",
3129  * i.e., it is not yet put into env->explored_states, and it has no children
3130  * states as well. It's ephemeral, and can end up either a) being discarded if
3131  * compatible explored state is found at some point or BPF_EXIT instruction is
3132  * reached or b) checkpointed and put into env->explored_states, branching out
3133  * into one or more children states.
3134  *
3135  * In the former case, precise markings in current state are completely
3136  * ignored by state comparison code (see regsafe() for details). Only
3137  * checkpointed ("old") state precise markings are important, and if old
3138  * state's register/slot is precise, regsafe() assumes current state's
3139  * register/slot as precise and checks value ranges exactly and precisely. If
3140  * states turn out to be compatible, current state's necessary precise
3141  * markings and any required parent states' precise markings are enforced
3142  * after the fact with propagate_precision() logic, after the fact. But it's
3143  * important to realize that in this case, even after marking current state
3144  * registers/slots as precise, we immediately discard current state. So what
3145  * actually matters is any of the precise markings propagated into current
3146  * state's parent states, which are always checkpointed (due to b) case above).
3147  * As such, for scenario a) it doesn't matter if current state has precise
3148  * markings set or not.
3149  *
3150  * Now, for the scenario b), checkpointing and forking into child(ren)
3151  * state(s). Note that before current state gets to checkpointing step, any
3152  * processed instruction always assumes precise SCALAR register/slot
3153  * knowledge: if precise value or range is useful to prune jump branch, BPF
3154  * verifier takes this opportunity enthusiastically. Similarly, when
3155  * register's value is used to calculate offset or memory address, exact
3156  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
3157  * what we mentioned above about state comparison ignoring precise markings
3158  * during state comparison, BPF verifier ignores and also assumes precise
3159  * markings *at will* during instruction verification process. But as verifier
3160  * assumes precision, it also propagates any precision dependencies across
3161  * parent states, which are not yet finalized, so can be further restricted
3162  * based on new knowledge gained from restrictions enforced by their children
3163  * states. This is so that once those parent states are finalized, i.e., when
3164  * they have no more active children state, state comparison logic in
3165  * is_state_visited() would enforce strict and precise SCALAR ranges, if
3166  * required for correctness.
3167  *
3168  * To build a bit more intuition, note also that once a state is checkpointed,
3169  * the path we took to get to that state is not important. This is crucial
3170  * property for state pruning. When state is checkpointed and finalized at
3171  * some instruction index, it can be correctly and safely used to "short
3172  * circuit" any *compatible* state that reaches exactly the same instruction
3173  * index. I.e., if we jumped to that instruction from a completely different
3174  * code path than original finalized state was derived from, it doesn't
3175  * matter, current state can be discarded because from that instruction
3176  * forward having a compatible state will ensure we will safely reach the
3177  * exit. States describe preconditions for further exploration, but completely
3178  * forget the history of how we got here.
3179  *
3180  * This also means that even if we needed precise SCALAR range to get to
3181  * finalized state, but from that point forward *that same* SCALAR register is
3182  * never used in a precise context (i.e., it's precise value is not needed for
3183  * correctness), it's correct and safe to mark such register as "imprecise"
3184  * (i.e., precise marking set to false). This is what we rely on when we do
3185  * not set precise marking in current state. If no child state requires
3186  * precision for any given SCALAR register, it's safe to dictate that it can
3187  * be imprecise. If any child state does require this register to be precise,
3188  * we'll mark it precise later retroactively during precise markings
3189  * propagation from child state to parent states.
3190  *
3191  * Skipping precise marking setting in current state is a mild version of
3192  * relying on the above observation. But we can utilize this property even
3193  * more aggressively by proactively forgetting any precise marking in the
3194  * current state (which we inherited from the parent state), right before we
3195  * checkpoint it and branch off into new child state. This is done by
3196  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
3197  * finalized states which help in short circuiting more future states.
3198  */
3199 static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
3200 				  int spi)
3201 {
3202 	struct bpf_verifier_state *st = env->cur_state;
3203 	int first_idx = st->first_insn_idx;
3204 	int last_idx = env->insn_idx;
3205 	struct bpf_func_state *func;
3206 	struct bpf_reg_state *reg;
3207 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
3208 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
3209 	bool skip_first = true;
3210 	bool new_marks = false;
3211 	int i, err;
3212 
3213 	if (!env->bpf_capable)
3214 		return 0;
3215 
3216 	/* Do sanity checks against current state of register and/or stack
3217 	 * slot, but don't set precise flag in current state, as precision
3218 	 * tracking in the current state is unnecessary.
3219 	 */
3220 	func = st->frame[frame];
3221 	if (regno >= 0) {
3222 		reg = &func->regs[regno];
3223 		if (reg->type != SCALAR_VALUE) {
3224 			WARN_ONCE(1, "backtracing misuse");
3225 			return -EFAULT;
3226 		}
3227 		new_marks = true;
3228 	}
3229 
3230 	while (spi >= 0) {
3231 		if (!is_spilled_reg(&func->stack[spi])) {
3232 			stack_mask = 0;
3233 			break;
3234 		}
3235 		reg = &func->stack[spi].spilled_ptr;
3236 		if (reg->type != SCALAR_VALUE) {
3237 			stack_mask = 0;
3238 			break;
3239 		}
3240 		new_marks = true;
3241 		break;
3242 	}
3243 
3244 	if (!new_marks)
3245 		return 0;
3246 	if (!reg_mask && !stack_mask)
3247 		return 0;
3248 
3249 	for (;;) {
3250 		DECLARE_BITMAP(mask, 64);
3251 		u32 history = st->jmp_history_cnt;
3252 
3253 		if (env->log.level & BPF_LOG_LEVEL2)
3254 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
3255 
3256 		if (last_idx < 0) {
3257 			/* we are at the entry into subprog, which
3258 			 * is expected for global funcs, but only if
3259 			 * requested precise registers are R1-R5
3260 			 * (which are global func's input arguments)
3261 			 */
3262 			if (st->curframe == 0 &&
3263 			    st->frame[0]->subprogno > 0 &&
3264 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
3265 			    stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
3266 				bitmap_from_u64(mask, reg_mask);
3267 				for_each_set_bit(i, mask, 32) {
3268 					reg = &st->frame[0]->regs[i];
3269 					if (reg->type != SCALAR_VALUE) {
3270 						reg_mask &= ~(1u << i);
3271 						continue;
3272 					}
3273 					reg->precise = true;
3274 				}
3275 				return 0;
3276 			}
3277 
3278 			verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
3279 				st->frame[0]->subprogno, reg_mask, stack_mask);
3280 			WARN_ONCE(1, "verifier backtracking bug");
3281 			return -EFAULT;
3282 		}
3283 
3284 		for (i = last_idx;;) {
3285 			if (skip_first) {
3286 				err = 0;
3287 				skip_first = false;
3288 			} else {
3289 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
3290 			}
3291 			if (err == -ENOTSUPP) {
3292 				mark_all_scalars_precise(env, st);
3293 				return 0;
3294 			} else if (err) {
3295 				return err;
3296 			}
3297 			if (!reg_mask && !stack_mask)
3298 				/* Found assignment(s) into tracked register in this state.
3299 				 * Since this state is already marked, just return.
3300 				 * Nothing to be tracked further in the parent state.
3301 				 */
3302 				return 0;
3303 			if (i == first_idx)
3304 				break;
3305 			i = get_prev_insn_idx(st, i, &history);
3306 			if (i >= env->prog->len) {
3307 				/* This can happen if backtracking reached insn 0
3308 				 * and there are still reg_mask or stack_mask
3309 				 * to backtrack.
3310 				 * It means the backtracking missed the spot where
3311 				 * particular register was initialized with a constant.
3312 				 */
3313 				verbose(env, "BUG backtracking idx %d\n", i);
3314 				WARN_ONCE(1, "verifier backtracking bug");
3315 				return -EFAULT;
3316 			}
3317 		}
3318 		st = st->parent;
3319 		if (!st)
3320 			break;
3321 
3322 		new_marks = false;
3323 		func = st->frame[frame];
3324 		bitmap_from_u64(mask, reg_mask);
3325 		for_each_set_bit(i, mask, 32) {
3326 			reg = &func->regs[i];
3327 			if (reg->type != SCALAR_VALUE) {
3328 				reg_mask &= ~(1u << i);
3329 				continue;
3330 			}
3331 			if (!reg->precise)
3332 				new_marks = true;
3333 			reg->precise = true;
3334 		}
3335 
3336 		bitmap_from_u64(mask, stack_mask);
3337 		for_each_set_bit(i, mask, 64) {
3338 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
3339 				/* the sequence of instructions:
3340 				 * 2: (bf) r3 = r10
3341 				 * 3: (7b) *(u64 *)(r3 -8) = r0
3342 				 * 4: (79) r4 = *(u64 *)(r10 -8)
3343 				 * doesn't contain jmps. It's backtracked
3344 				 * as a single block.
3345 				 * During backtracking insn 3 is not recognized as
3346 				 * stack access, so at the end of backtracking
3347 				 * stack slot fp-8 is still marked in stack_mask.
3348 				 * However the parent state may not have accessed
3349 				 * fp-8 and it's "unallocated" stack space.
3350 				 * In such case fallback to conservative.
3351 				 */
3352 				mark_all_scalars_precise(env, st);
3353 				return 0;
3354 			}
3355 
3356 			if (!is_spilled_reg(&func->stack[i])) {
3357 				stack_mask &= ~(1ull << i);
3358 				continue;
3359 			}
3360 			reg = &func->stack[i].spilled_ptr;
3361 			if (reg->type != SCALAR_VALUE) {
3362 				stack_mask &= ~(1ull << i);
3363 				continue;
3364 			}
3365 			if (!reg->precise)
3366 				new_marks = true;
3367 			reg->precise = true;
3368 		}
3369 		if (env->log.level & BPF_LOG_LEVEL2) {
3370 			verbose(env, "parent %s regs=%x stack=%llx marks:",
3371 				new_marks ? "didn't have" : "already had",
3372 				reg_mask, stack_mask);
3373 			print_verifier_state(env, func, true);
3374 		}
3375 
3376 		if (!reg_mask && !stack_mask)
3377 			break;
3378 		if (!new_marks)
3379 			break;
3380 
3381 		last_idx = st->last_insn_idx;
3382 		first_idx = st->first_insn_idx;
3383 	}
3384 	return 0;
3385 }
3386 
3387 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
3388 {
3389 	return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
3390 }
3391 
3392 static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
3393 {
3394 	return __mark_chain_precision(env, frame, regno, -1);
3395 }
3396 
3397 static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
3398 {
3399 	return __mark_chain_precision(env, frame, -1, spi);
3400 }
3401 
3402 static bool is_spillable_regtype(enum bpf_reg_type type)
3403 {
3404 	switch (base_type(type)) {
3405 	case PTR_TO_MAP_VALUE:
3406 	case PTR_TO_STACK:
3407 	case PTR_TO_CTX:
3408 	case PTR_TO_PACKET:
3409 	case PTR_TO_PACKET_META:
3410 	case PTR_TO_PACKET_END:
3411 	case PTR_TO_FLOW_KEYS:
3412 	case CONST_PTR_TO_MAP:
3413 	case PTR_TO_SOCKET:
3414 	case PTR_TO_SOCK_COMMON:
3415 	case PTR_TO_TCP_SOCK:
3416 	case PTR_TO_XDP_SOCK:
3417 	case PTR_TO_BTF_ID:
3418 	case PTR_TO_BUF:
3419 	case PTR_TO_MEM:
3420 	case PTR_TO_FUNC:
3421 	case PTR_TO_MAP_KEY:
3422 		return true;
3423 	default:
3424 		return false;
3425 	}
3426 }
3427 
3428 /* Does this register contain a constant zero? */
3429 static bool register_is_null(struct bpf_reg_state *reg)
3430 {
3431 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
3432 }
3433 
3434 static bool register_is_const(struct bpf_reg_state *reg)
3435 {
3436 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
3437 }
3438 
3439 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
3440 {
3441 	return tnum_is_unknown(reg->var_off) &&
3442 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
3443 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
3444 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
3445 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
3446 }
3447 
3448 static bool register_is_bounded(struct bpf_reg_state *reg)
3449 {
3450 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
3451 }
3452 
3453 static bool __is_pointer_value(bool allow_ptr_leaks,
3454 			       const struct bpf_reg_state *reg)
3455 {
3456 	if (allow_ptr_leaks)
3457 		return false;
3458 
3459 	return reg->type != SCALAR_VALUE;
3460 }
3461 
3462 /* Copy src state preserving dst->parent and dst->live fields */
3463 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
3464 {
3465 	struct bpf_reg_state *parent = dst->parent;
3466 	enum bpf_reg_liveness live = dst->live;
3467 
3468 	*dst = *src;
3469 	dst->parent = parent;
3470 	dst->live = live;
3471 }
3472 
3473 static void save_register_state(struct bpf_func_state *state,
3474 				int spi, struct bpf_reg_state *reg,
3475 				int size)
3476 {
3477 	int i;
3478 
3479 	copy_register_state(&state->stack[spi].spilled_ptr, reg);
3480 	if (size == BPF_REG_SIZE)
3481 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3482 
3483 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3484 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
3485 
3486 	/* size < 8 bytes spill */
3487 	for (; i; i--)
3488 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
3489 }
3490 
3491 static bool is_bpf_st_mem(struct bpf_insn *insn)
3492 {
3493 	return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
3494 }
3495 
3496 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
3497  * stack boundary and alignment are checked in check_mem_access()
3498  */
3499 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3500 				       /* stack frame we're writing to */
3501 				       struct bpf_func_state *state,
3502 				       int off, int size, int value_regno,
3503 				       int insn_idx)
3504 {
3505 	struct bpf_func_state *cur; /* state of the current function */
3506 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3507 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3508 	struct bpf_reg_state *reg = NULL;
3509 	u32 dst_reg = insn->dst_reg;
3510 
3511 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3512 	if (err)
3513 		return err;
3514 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3515 	 * so it's aligned access and [off, off + size) are within stack limits
3516 	 */
3517 	if (!env->allow_ptr_leaks &&
3518 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3519 	    size != BPF_REG_SIZE) {
3520 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3521 		return -EACCES;
3522 	}
3523 
3524 	cur = env->cur_state->frame[env->cur_state->curframe];
3525 	if (value_regno >= 0)
3526 		reg = &cur->regs[value_regno];
3527 	if (!env->bypass_spec_v4) {
3528 		bool sanitize = reg && is_spillable_regtype(reg->type);
3529 
3530 		for (i = 0; i < size; i++) {
3531 			u8 type = state->stack[spi].slot_type[i];
3532 
3533 			if (type != STACK_MISC && type != STACK_ZERO) {
3534 				sanitize = true;
3535 				break;
3536 			}
3537 		}
3538 
3539 		if (sanitize)
3540 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3541 	}
3542 
3543 	err = destroy_if_dynptr_stack_slot(env, state, spi);
3544 	if (err)
3545 		return err;
3546 
3547 	mark_stack_slot_scratched(env, spi);
3548 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3549 	    !register_is_null(reg) && env->bpf_capable) {
3550 		if (dst_reg != BPF_REG_FP) {
3551 			/* The backtracking logic can only recognize explicit
3552 			 * stack slot address like [fp - 8]. Other spill of
3553 			 * scalar via different register has to be conservative.
3554 			 * Backtrack from here and mark all registers as precise
3555 			 * that contributed into 'reg' being a constant.
3556 			 */
3557 			err = mark_chain_precision(env, value_regno);
3558 			if (err)
3559 				return err;
3560 		}
3561 		save_register_state(state, spi, reg, size);
3562 	} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
3563 		   insn->imm != 0 && env->bpf_capable) {
3564 		struct bpf_reg_state fake_reg = {};
3565 
3566 		__mark_reg_known(&fake_reg, (u32)insn->imm);
3567 		fake_reg.type = SCALAR_VALUE;
3568 		save_register_state(state, spi, &fake_reg, size);
3569 	} else if (reg && is_spillable_regtype(reg->type)) {
3570 		/* register containing pointer is being spilled into stack */
3571 		if (size != BPF_REG_SIZE) {
3572 			verbose_linfo(env, insn_idx, "; ");
3573 			verbose(env, "invalid size of register spill\n");
3574 			return -EACCES;
3575 		}
3576 		if (state != cur && reg->type == PTR_TO_STACK) {
3577 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3578 			return -EINVAL;
3579 		}
3580 		save_register_state(state, spi, reg, size);
3581 	} else {
3582 		u8 type = STACK_MISC;
3583 
3584 		/* regular write of data into stack destroys any spilled ptr */
3585 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3586 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
3587 		if (is_spilled_reg(&state->stack[spi]))
3588 			for (i = 0; i < BPF_REG_SIZE; i++)
3589 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3590 
3591 		/* only mark the slot as written if all 8 bytes were written
3592 		 * otherwise read propagation may incorrectly stop too soon
3593 		 * when stack slots are partially written.
3594 		 * This heuristic means that read propagation will be
3595 		 * conservative, since it will add reg_live_read marks
3596 		 * to stack slots all the way to first state when programs
3597 		 * writes+reads less than 8 bytes
3598 		 */
3599 		if (size == BPF_REG_SIZE)
3600 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3601 
3602 		/* when we zero initialize stack slots mark them as such */
3603 		if ((reg && register_is_null(reg)) ||
3604 		    (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
3605 			/* backtracking doesn't work for STACK_ZERO yet. */
3606 			err = mark_chain_precision(env, value_regno);
3607 			if (err)
3608 				return err;
3609 			type = STACK_ZERO;
3610 		}
3611 
3612 		/* Mark slots affected by this stack write. */
3613 		for (i = 0; i < size; i++)
3614 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3615 				type;
3616 	}
3617 	return 0;
3618 }
3619 
3620 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3621  * known to contain a variable offset.
3622  * This function checks whether the write is permitted and conservatively
3623  * tracks the effects of the write, considering that each stack slot in the
3624  * dynamic range is potentially written to.
3625  *
3626  * 'off' includes 'regno->off'.
3627  * 'value_regno' can be -1, meaning that an unknown value is being written to
3628  * the stack.
3629  *
3630  * Spilled pointers in range are not marked as written because we don't know
3631  * what's going to be actually written. This means that read propagation for
3632  * future reads cannot be terminated by this write.
3633  *
3634  * For privileged programs, uninitialized stack slots are considered
3635  * initialized by this write (even though we don't know exactly what offsets
3636  * are going to be written to). The idea is that we don't want the verifier to
3637  * reject future reads that access slots written to through variable offsets.
3638  */
3639 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3640 				     /* func where register points to */
3641 				     struct bpf_func_state *state,
3642 				     int ptr_regno, int off, int size,
3643 				     int value_regno, int insn_idx)
3644 {
3645 	struct bpf_func_state *cur; /* state of the current function */
3646 	int min_off, max_off;
3647 	int i, err;
3648 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3649 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3650 	bool writing_zero = false;
3651 	/* set if the fact that we're writing a zero is used to let any
3652 	 * stack slots remain STACK_ZERO
3653 	 */
3654 	bool zero_used = false;
3655 
3656 	cur = env->cur_state->frame[env->cur_state->curframe];
3657 	ptr_reg = &cur->regs[ptr_regno];
3658 	min_off = ptr_reg->smin_value + off;
3659 	max_off = ptr_reg->smax_value + off + size;
3660 	if (value_regno >= 0)
3661 		value_reg = &cur->regs[value_regno];
3662 	if ((value_reg && register_is_null(value_reg)) ||
3663 	    (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
3664 		writing_zero = true;
3665 
3666 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3667 	if (err)
3668 		return err;
3669 
3670 	for (i = min_off; i < max_off; i++) {
3671 		int spi;
3672 
3673 		spi = __get_spi(i);
3674 		err = destroy_if_dynptr_stack_slot(env, state, spi);
3675 		if (err)
3676 			return err;
3677 	}
3678 
3679 	/* Variable offset writes destroy any spilled pointers in range. */
3680 	for (i = min_off; i < max_off; i++) {
3681 		u8 new_type, *stype;
3682 		int slot, spi;
3683 
3684 		slot = -i - 1;
3685 		spi = slot / BPF_REG_SIZE;
3686 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3687 		mark_stack_slot_scratched(env, spi);
3688 
3689 		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
3690 			/* Reject the write if range we may write to has not
3691 			 * been initialized beforehand. If we didn't reject
3692 			 * here, the ptr status would be erased below (even
3693 			 * though not all slots are actually overwritten),
3694 			 * possibly opening the door to leaks.
3695 			 *
3696 			 * We do however catch STACK_INVALID case below, and
3697 			 * only allow reading possibly uninitialized memory
3698 			 * later for CAP_PERFMON, as the write may not happen to
3699 			 * that slot.
3700 			 */
3701 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3702 				insn_idx, i);
3703 			return -EINVAL;
3704 		}
3705 
3706 		/* Erase all spilled pointers. */
3707 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3708 
3709 		/* Update the slot type. */
3710 		new_type = STACK_MISC;
3711 		if (writing_zero && *stype == STACK_ZERO) {
3712 			new_type = STACK_ZERO;
3713 			zero_used = true;
3714 		}
3715 		/* If the slot is STACK_INVALID, we check whether it's OK to
3716 		 * pretend that it will be initialized by this write. The slot
3717 		 * might not actually be written to, and so if we mark it as
3718 		 * initialized future reads might leak uninitialized memory.
3719 		 * For privileged programs, we will accept such reads to slots
3720 		 * that may or may not be written because, if we're reject
3721 		 * them, the error would be too confusing.
3722 		 */
3723 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3724 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3725 					insn_idx, i);
3726 			return -EINVAL;
3727 		}
3728 		*stype = new_type;
3729 	}
3730 	if (zero_used) {
3731 		/* backtracking doesn't work for STACK_ZERO yet. */
3732 		err = mark_chain_precision(env, value_regno);
3733 		if (err)
3734 			return err;
3735 	}
3736 	return 0;
3737 }
3738 
3739 /* When register 'dst_regno' is assigned some values from stack[min_off,
3740  * max_off), we set the register's type according to the types of the
3741  * respective stack slots. If all the stack values are known to be zeros, then
3742  * so is the destination reg. Otherwise, the register is considered to be
3743  * SCALAR. This function does not deal with register filling; the caller must
3744  * ensure that all spilled registers in the stack range have been marked as
3745  * read.
3746  */
3747 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3748 				/* func where src register points to */
3749 				struct bpf_func_state *ptr_state,
3750 				int min_off, int max_off, int dst_regno)
3751 {
3752 	struct bpf_verifier_state *vstate = env->cur_state;
3753 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3754 	int i, slot, spi;
3755 	u8 *stype;
3756 	int zeros = 0;
3757 
3758 	for (i = min_off; i < max_off; i++) {
3759 		slot = -i - 1;
3760 		spi = slot / BPF_REG_SIZE;
3761 		stype = ptr_state->stack[spi].slot_type;
3762 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3763 			break;
3764 		zeros++;
3765 	}
3766 	if (zeros == max_off - min_off) {
3767 		/* any access_size read into register is zero extended,
3768 		 * so the whole register == const_zero
3769 		 */
3770 		__mark_reg_const_zero(&state->regs[dst_regno]);
3771 		/* backtracking doesn't support STACK_ZERO yet,
3772 		 * so mark it precise here, so that later
3773 		 * backtracking can stop here.
3774 		 * Backtracking may not need this if this register
3775 		 * doesn't participate in pointer adjustment.
3776 		 * Forward propagation of precise flag is not
3777 		 * necessary either. This mark is only to stop
3778 		 * backtracking. Any register that contributed
3779 		 * to const 0 was marked precise before spill.
3780 		 */
3781 		state->regs[dst_regno].precise = true;
3782 	} else {
3783 		/* have read misc data from the stack */
3784 		mark_reg_unknown(env, state->regs, dst_regno);
3785 	}
3786 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3787 }
3788 
3789 /* Read the stack at 'off' and put the results into the register indicated by
3790  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3791  * spilled reg.
3792  *
3793  * 'dst_regno' can be -1, meaning that the read value is not going to a
3794  * register.
3795  *
3796  * The access is assumed to be within the current stack bounds.
3797  */
3798 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3799 				      /* func where src register points to */
3800 				      struct bpf_func_state *reg_state,
3801 				      int off, int size, int dst_regno)
3802 {
3803 	struct bpf_verifier_state *vstate = env->cur_state;
3804 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3805 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3806 	struct bpf_reg_state *reg;
3807 	u8 *stype, type;
3808 
3809 	stype = reg_state->stack[spi].slot_type;
3810 	reg = &reg_state->stack[spi].spilled_ptr;
3811 
3812 	if (is_spilled_reg(&reg_state->stack[spi])) {
3813 		u8 spill_size = 1;
3814 
3815 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3816 			spill_size++;
3817 
3818 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3819 			if (reg->type != SCALAR_VALUE) {
3820 				verbose_linfo(env, env->insn_idx, "; ");
3821 				verbose(env, "invalid size of register fill\n");
3822 				return -EACCES;
3823 			}
3824 
3825 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3826 			if (dst_regno < 0)
3827 				return 0;
3828 
3829 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3830 				/* The earlier check_reg_arg() has decided the
3831 				 * subreg_def for this insn.  Save it first.
3832 				 */
3833 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3834 
3835 				copy_register_state(&state->regs[dst_regno], reg);
3836 				state->regs[dst_regno].subreg_def = subreg_def;
3837 			} else {
3838 				for (i = 0; i < size; i++) {
3839 					type = stype[(slot - i) % BPF_REG_SIZE];
3840 					if (type == STACK_SPILL)
3841 						continue;
3842 					if (type == STACK_MISC)
3843 						continue;
3844 					if (type == STACK_INVALID && env->allow_uninit_stack)
3845 						continue;
3846 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3847 						off, i, size);
3848 					return -EACCES;
3849 				}
3850 				mark_reg_unknown(env, state->regs, dst_regno);
3851 			}
3852 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3853 			return 0;
3854 		}
3855 
3856 		if (dst_regno >= 0) {
3857 			/* restore register state from stack */
3858 			copy_register_state(&state->regs[dst_regno], reg);
3859 			/* mark reg as written since spilled pointer state likely
3860 			 * has its liveness marks cleared by is_state_visited()
3861 			 * which resets stack/reg liveness for state transitions
3862 			 */
3863 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3864 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3865 			/* If dst_regno==-1, the caller is asking us whether
3866 			 * it is acceptable to use this value as a SCALAR_VALUE
3867 			 * (e.g. for XADD).
3868 			 * We must not allow unprivileged callers to do that
3869 			 * with spilled pointers.
3870 			 */
3871 			verbose(env, "leaking pointer from stack off %d\n",
3872 				off);
3873 			return -EACCES;
3874 		}
3875 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3876 	} else {
3877 		for (i = 0; i < size; i++) {
3878 			type = stype[(slot - i) % BPF_REG_SIZE];
3879 			if (type == STACK_MISC)
3880 				continue;
3881 			if (type == STACK_ZERO)
3882 				continue;
3883 			if (type == STACK_INVALID && env->allow_uninit_stack)
3884 				continue;
3885 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3886 				off, i, size);
3887 			return -EACCES;
3888 		}
3889 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3890 		if (dst_regno >= 0)
3891 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3892 	}
3893 	return 0;
3894 }
3895 
3896 enum bpf_access_src {
3897 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3898 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3899 };
3900 
3901 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3902 					 int regno, int off, int access_size,
3903 					 bool zero_size_allowed,
3904 					 enum bpf_access_src type,
3905 					 struct bpf_call_arg_meta *meta);
3906 
3907 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3908 {
3909 	return cur_regs(env) + regno;
3910 }
3911 
3912 /* Read the stack at 'ptr_regno + off' and put the result into the register
3913  * 'dst_regno'.
3914  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3915  * but not its variable offset.
3916  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3917  *
3918  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3919  * filling registers (i.e. reads of spilled register cannot be detected when
3920  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3921  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3922  * offset; for a fixed offset check_stack_read_fixed_off should be used
3923  * instead.
3924  */
3925 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3926 				    int ptr_regno, int off, int size, int dst_regno)
3927 {
3928 	/* The state of the source register. */
3929 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3930 	struct bpf_func_state *ptr_state = func(env, reg);
3931 	int err;
3932 	int min_off, max_off;
3933 
3934 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3935 	 */
3936 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3937 					    false, ACCESS_DIRECT, NULL);
3938 	if (err)
3939 		return err;
3940 
3941 	min_off = reg->smin_value + off;
3942 	max_off = reg->smax_value + off;
3943 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3944 	return 0;
3945 }
3946 
3947 /* check_stack_read dispatches to check_stack_read_fixed_off or
3948  * check_stack_read_var_off.
3949  *
3950  * The caller must ensure that the offset falls within the allocated stack
3951  * bounds.
3952  *
3953  * 'dst_regno' is a register which will receive the value from the stack. It
3954  * can be -1, meaning that the read value is not going to a register.
3955  */
3956 static int check_stack_read(struct bpf_verifier_env *env,
3957 			    int ptr_regno, int off, int size,
3958 			    int dst_regno)
3959 {
3960 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3961 	struct bpf_func_state *state = func(env, reg);
3962 	int err;
3963 	/* Some accesses are only permitted with a static offset. */
3964 	bool var_off = !tnum_is_const(reg->var_off);
3965 
3966 	/* The offset is required to be static when reads don't go to a
3967 	 * register, in order to not leak pointers (see
3968 	 * check_stack_read_fixed_off).
3969 	 */
3970 	if (dst_regno < 0 && var_off) {
3971 		char tn_buf[48];
3972 
3973 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3974 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3975 			tn_buf, off, size);
3976 		return -EACCES;
3977 	}
3978 	/* Variable offset is prohibited for unprivileged mode for simplicity
3979 	 * since it requires corresponding support in Spectre masking for stack
3980 	 * ALU. See also retrieve_ptr_limit().
3981 	 */
3982 	if (!env->bypass_spec_v1 && var_off) {
3983 		char tn_buf[48];
3984 
3985 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3986 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3987 				ptr_regno, tn_buf);
3988 		return -EACCES;
3989 	}
3990 
3991 	if (!var_off) {
3992 		off += reg->var_off.value;
3993 		err = check_stack_read_fixed_off(env, state, off, size,
3994 						 dst_regno);
3995 	} else {
3996 		/* Variable offset stack reads need more conservative handling
3997 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3998 		 * branch.
3999 		 */
4000 		err = check_stack_read_var_off(env, ptr_regno, off, size,
4001 					       dst_regno);
4002 	}
4003 	return err;
4004 }
4005 
4006 
4007 /* check_stack_write dispatches to check_stack_write_fixed_off or
4008  * check_stack_write_var_off.
4009  *
4010  * 'ptr_regno' is the register used as a pointer into the stack.
4011  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
4012  * 'value_regno' is the register whose value we're writing to the stack. It can
4013  * be -1, meaning that we're not writing from a register.
4014  *
4015  * The caller must ensure that the offset falls within the maximum stack size.
4016  */
4017 static int check_stack_write(struct bpf_verifier_env *env,
4018 			     int ptr_regno, int off, int size,
4019 			     int value_regno, int insn_idx)
4020 {
4021 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4022 	struct bpf_func_state *state = func(env, reg);
4023 	int err;
4024 
4025 	if (tnum_is_const(reg->var_off)) {
4026 		off += reg->var_off.value;
4027 		err = check_stack_write_fixed_off(env, state, off, size,
4028 						  value_regno, insn_idx);
4029 	} else {
4030 		/* Variable offset stack reads need more conservative handling
4031 		 * than fixed offset ones.
4032 		 */
4033 		err = check_stack_write_var_off(env, state,
4034 						ptr_regno, off, size,
4035 						value_regno, insn_idx);
4036 	}
4037 	return err;
4038 }
4039 
4040 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
4041 				 int off, int size, enum bpf_access_type type)
4042 {
4043 	struct bpf_reg_state *regs = cur_regs(env);
4044 	struct bpf_map *map = regs[regno].map_ptr;
4045 	u32 cap = bpf_map_flags_to_cap(map);
4046 
4047 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
4048 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
4049 			map->value_size, off, size);
4050 		return -EACCES;
4051 	}
4052 
4053 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
4054 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
4055 			map->value_size, off, size);
4056 		return -EACCES;
4057 	}
4058 
4059 	return 0;
4060 }
4061 
4062 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
4063 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
4064 			      int off, int size, u32 mem_size,
4065 			      bool zero_size_allowed)
4066 {
4067 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
4068 	struct bpf_reg_state *reg;
4069 
4070 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
4071 		return 0;
4072 
4073 	reg = &cur_regs(env)[regno];
4074 	switch (reg->type) {
4075 	case PTR_TO_MAP_KEY:
4076 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
4077 			mem_size, off, size);
4078 		break;
4079 	case PTR_TO_MAP_VALUE:
4080 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
4081 			mem_size, off, size);
4082 		break;
4083 	case PTR_TO_PACKET:
4084 	case PTR_TO_PACKET_META:
4085 	case PTR_TO_PACKET_END:
4086 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
4087 			off, size, regno, reg->id, off, mem_size);
4088 		break;
4089 	case PTR_TO_MEM:
4090 	default:
4091 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
4092 			mem_size, off, size);
4093 	}
4094 
4095 	return -EACCES;
4096 }
4097 
4098 /* check read/write into a memory region with possible variable offset */
4099 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
4100 				   int off, int size, u32 mem_size,
4101 				   bool zero_size_allowed)
4102 {
4103 	struct bpf_verifier_state *vstate = env->cur_state;
4104 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4105 	struct bpf_reg_state *reg = &state->regs[regno];
4106 	int err;
4107 
4108 	/* We may have adjusted the register pointing to memory region, so we
4109 	 * need to try adding each of min_value and max_value to off
4110 	 * to make sure our theoretical access will be safe.
4111 	 *
4112 	 * The minimum value is only important with signed
4113 	 * comparisons where we can't assume the floor of a
4114 	 * value is 0.  If we are using signed variables for our
4115 	 * index'es we need to make sure that whatever we use
4116 	 * will have a set floor within our range.
4117 	 */
4118 	if (reg->smin_value < 0 &&
4119 	    (reg->smin_value == S64_MIN ||
4120 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
4121 	      reg->smin_value + off < 0)) {
4122 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4123 			regno);
4124 		return -EACCES;
4125 	}
4126 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
4127 				 mem_size, zero_size_allowed);
4128 	if (err) {
4129 		verbose(env, "R%d min value is outside of the allowed memory range\n",
4130 			regno);
4131 		return err;
4132 	}
4133 
4134 	/* If we haven't set a max value then we need to bail since we can't be
4135 	 * sure we won't do bad things.
4136 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
4137 	 */
4138 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
4139 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
4140 			regno);
4141 		return -EACCES;
4142 	}
4143 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
4144 				 mem_size, zero_size_allowed);
4145 	if (err) {
4146 		verbose(env, "R%d max value is outside of the allowed memory range\n",
4147 			regno);
4148 		return err;
4149 	}
4150 
4151 	return 0;
4152 }
4153 
4154 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
4155 			       const struct bpf_reg_state *reg, int regno,
4156 			       bool fixed_off_ok)
4157 {
4158 	/* Access to this pointer-typed register or passing it to a helper
4159 	 * is only allowed in its original, unmodified form.
4160 	 */
4161 
4162 	if (reg->off < 0) {
4163 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
4164 			reg_type_str(env, reg->type), regno, reg->off);
4165 		return -EACCES;
4166 	}
4167 
4168 	if (!fixed_off_ok && reg->off) {
4169 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
4170 			reg_type_str(env, reg->type), regno, reg->off);
4171 		return -EACCES;
4172 	}
4173 
4174 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4175 		char tn_buf[48];
4176 
4177 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4178 		verbose(env, "variable %s access var_off=%s disallowed\n",
4179 			reg_type_str(env, reg->type), tn_buf);
4180 		return -EACCES;
4181 	}
4182 
4183 	return 0;
4184 }
4185 
4186 int check_ptr_off_reg(struct bpf_verifier_env *env,
4187 		      const struct bpf_reg_state *reg, int regno)
4188 {
4189 	return __check_ptr_off_reg(env, reg, regno, false);
4190 }
4191 
4192 static int map_kptr_match_type(struct bpf_verifier_env *env,
4193 			       struct btf_field *kptr_field,
4194 			       struct bpf_reg_state *reg, u32 regno)
4195 {
4196 	const char *targ_name = kernel_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
4197 	int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED;
4198 	const char *reg_name = "";
4199 
4200 	/* Only unreferenced case accepts untrusted pointers */
4201 	if (kptr_field->type == BPF_KPTR_UNREF)
4202 		perm_flags |= PTR_UNTRUSTED;
4203 
4204 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
4205 		goto bad_type;
4206 
4207 	if (!btf_is_kernel(reg->btf)) {
4208 		verbose(env, "R%d must point to kernel BTF\n", regno);
4209 		return -EINVAL;
4210 	}
4211 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
4212 	reg_name = kernel_type_name(reg->btf, reg->btf_id);
4213 
4214 	/* For ref_ptr case, release function check should ensure we get one
4215 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
4216 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
4217 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
4218 	 * reg->off and reg->ref_obj_id are not needed here.
4219 	 */
4220 	if (__check_ptr_off_reg(env, reg, regno, true))
4221 		return -EACCES;
4222 
4223 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
4224 	 * we also need to take into account the reg->off.
4225 	 *
4226 	 * We want to support cases like:
4227 	 *
4228 	 * struct foo {
4229 	 *         struct bar br;
4230 	 *         struct baz bz;
4231 	 * };
4232 	 *
4233 	 * struct foo *v;
4234 	 * v = func();	      // PTR_TO_BTF_ID
4235 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
4236 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
4237 	 *                    // first member type of struct after comparison fails
4238 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
4239 	 *                    // to match type
4240 	 *
4241 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
4242 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
4243 	 * the struct to match type against first member of struct, i.e. reject
4244 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
4245 	 * strict mode to true for type match.
4246 	 */
4247 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
4248 				  kptr_field->kptr.btf, kptr_field->kptr.btf_id,
4249 				  kptr_field->type == BPF_KPTR_REF))
4250 		goto bad_type;
4251 	return 0;
4252 bad_type:
4253 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
4254 		reg_type_str(env, reg->type), reg_name);
4255 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
4256 	if (kptr_field->type == BPF_KPTR_UNREF)
4257 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
4258 			targ_name);
4259 	else
4260 		verbose(env, "\n");
4261 	return -EINVAL;
4262 }
4263 
4264 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
4265 				 int value_regno, int insn_idx,
4266 				 struct btf_field *kptr_field)
4267 {
4268 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4269 	int class = BPF_CLASS(insn->code);
4270 	struct bpf_reg_state *val_reg;
4271 
4272 	/* Things we already checked for in check_map_access and caller:
4273 	 *  - Reject cases where variable offset may touch kptr
4274 	 *  - size of access (must be BPF_DW)
4275 	 *  - tnum_is_const(reg->var_off)
4276 	 *  - kptr_field->offset == off + reg->var_off.value
4277 	 */
4278 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
4279 	if (BPF_MODE(insn->code) != BPF_MEM) {
4280 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
4281 		return -EACCES;
4282 	}
4283 
4284 	/* We only allow loading referenced kptr, since it will be marked as
4285 	 * untrusted, similar to unreferenced kptr.
4286 	 */
4287 	if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
4288 		verbose(env, "store to referenced kptr disallowed\n");
4289 		return -EACCES;
4290 	}
4291 
4292 	if (class == BPF_LDX) {
4293 		val_reg = reg_state(env, value_regno);
4294 		/* We can simply mark the value_regno receiving the pointer
4295 		 * value from map as PTR_TO_BTF_ID, with the correct type.
4296 		 */
4297 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
4298 				kptr_field->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
4299 		/* For mark_ptr_or_null_reg */
4300 		val_reg->id = ++env->id_gen;
4301 	} else if (class == BPF_STX) {
4302 		val_reg = reg_state(env, value_regno);
4303 		if (!register_is_null(val_reg) &&
4304 		    map_kptr_match_type(env, kptr_field, val_reg, value_regno))
4305 			return -EACCES;
4306 	} else if (class == BPF_ST) {
4307 		if (insn->imm) {
4308 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
4309 				kptr_field->offset);
4310 			return -EACCES;
4311 		}
4312 	} else {
4313 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
4314 		return -EACCES;
4315 	}
4316 	return 0;
4317 }
4318 
4319 /* check read/write into a map element with possible variable offset */
4320 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
4321 			    int off, int size, bool zero_size_allowed,
4322 			    enum bpf_access_src src)
4323 {
4324 	struct bpf_verifier_state *vstate = env->cur_state;
4325 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4326 	struct bpf_reg_state *reg = &state->regs[regno];
4327 	struct bpf_map *map = reg->map_ptr;
4328 	struct btf_record *rec;
4329 	int err, i;
4330 
4331 	err = check_mem_region_access(env, regno, off, size, map->value_size,
4332 				      zero_size_allowed);
4333 	if (err)
4334 		return err;
4335 
4336 	if (IS_ERR_OR_NULL(map->record))
4337 		return 0;
4338 	rec = map->record;
4339 	for (i = 0; i < rec->cnt; i++) {
4340 		struct btf_field *field = &rec->fields[i];
4341 		u32 p = field->offset;
4342 
4343 		/* If any part of a field  can be touched by load/store, reject
4344 		 * this program. To check that [x1, x2) overlaps with [y1, y2),
4345 		 * it is sufficient to check x1 < y2 && y1 < x2.
4346 		 */
4347 		if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
4348 		    p < reg->umax_value + off + size) {
4349 			switch (field->type) {
4350 			case BPF_KPTR_UNREF:
4351 			case BPF_KPTR_REF:
4352 				if (src != ACCESS_DIRECT) {
4353 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
4354 					return -EACCES;
4355 				}
4356 				if (!tnum_is_const(reg->var_off)) {
4357 					verbose(env, "kptr access cannot have variable offset\n");
4358 					return -EACCES;
4359 				}
4360 				if (p != off + reg->var_off.value) {
4361 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
4362 						p, off + reg->var_off.value);
4363 					return -EACCES;
4364 				}
4365 				if (size != bpf_size_to_bytes(BPF_DW)) {
4366 					verbose(env, "kptr access size must be BPF_DW\n");
4367 					return -EACCES;
4368 				}
4369 				break;
4370 			default:
4371 				verbose(env, "%s cannot be accessed directly by load/store\n",
4372 					btf_field_type_name(field->type));
4373 				return -EACCES;
4374 			}
4375 		}
4376 	}
4377 	return 0;
4378 }
4379 
4380 #define MAX_PACKET_OFF 0xffff
4381 
4382 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
4383 				       const struct bpf_call_arg_meta *meta,
4384 				       enum bpf_access_type t)
4385 {
4386 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
4387 
4388 	switch (prog_type) {
4389 	/* Program types only with direct read access go here! */
4390 	case BPF_PROG_TYPE_LWT_IN:
4391 	case BPF_PROG_TYPE_LWT_OUT:
4392 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
4393 	case BPF_PROG_TYPE_SK_REUSEPORT:
4394 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4395 	case BPF_PROG_TYPE_CGROUP_SKB:
4396 		if (t == BPF_WRITE)
4397 			return false;
4398 		fallthrough;
4399 
4400 	/* Program types with direct read + write access go here! */
4401 	case BPF_PROG_TYPE_SCHED_CLS:
4402 	case BPF_PROG_TYPE_SCHED_ACT:
4403 	case BPF_PROG_TYPE_XDP:
4404 	case BPF_PROG_TYPE_LWT_XMIT:
4405 	case BPF_PROG_TYPE_SK_SKB:
4406 	case BPF_PROG_TYPE_SK_MSG:
4407 		if (meta)
4408 			return meta->pkt_access;
4409 
4410 		env->seen_direct_write = true;
4411 		return true;
4412 
4413 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4414 		if (t == BPF_WRITE)
4415 			env->seen_direct_write = true;
4416 
4417 		return true;
4418 
4419 	default:
4420 		return false;
4421 	}
4422 }
4423 
4424 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
4425 			       int size, bool zero_size_allowed)
4426 {
4427 	struct bpf_reg_state *regs = cur_regs(env);
4428 	struct bpf_reg_state *reg = &regs[regno];
4429 	int err;
4430 
4431 	/* We may have added a variable offset to the packet pointer; but any
4432 	 * reg->range we have comes after that.  We are only checking the fixed
4433 	 * offset.
4434 	 */
4435 
4436 	/* We don't allow negative numbers, because we aren't tracking enough
4437 	 * detail to prove they're safe.
4438 	 */
4439 	if (reg->smin_value < 0) {
4440 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4441 			regno);
4442 		return -EACCES;
4443 	}
4444 
4445 	err = reg->range < 0 ? -EINVAL :
4446 	      __check_mem_access(env, regno, off, size, reg->range,
4447 				 zero_size_allowed);
4448 	if (err) {
4449 		verbose(env, "R%d offset is outside of the packet\n", regno);
4450 		return err;
4451 	}
4452 
4453 	/* __check_mem_access has made sure "off + size - 1" is within u16.
4454 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
4455 	 * otherwise find_good_pkt_pointers would have refused to set range info
4456 	 * that __check_mem_access would have rejected this pkt access.
4457 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
4458 	 */
4459 	env->prog->aux->max_pkt_offset =
4460 		max_t(u32, env->prog->aux->max_pkt_offset,
4461 		      off + reg->umax_value + size - 1);
4462 
4463 	return err;
4464 }
4465 
4466 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
4467 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
4468 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
4469 			    struct btf **btf, u32 *btf_id)
4470 {
4471 	struct bpf_insn_access_aux info = {
4472 		.reg_type = *reg_type,
4473 		.log = &env->log,
4474 	};
4475 
4476 	if (env->ops->is_valid_access &&
4477 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
4478 		/* A non zero info.ctx_field_size indicates that this field is a
4479 		 * candidate for later verifier transformation to load the whole
4480 		 * field and then apply a mask when accessed with a narrower
4481 		 * access than actual ctx access size. A zero info.ctx_field_size
4482 		 * will only allow for whole field access and rejects any other
4483 		 * type of narrower access.
4484 		 */
4485 		*reg_type = info.reg_type;
4486 
4487 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
4488 			*btf = info.btf;
4489 			*btf_id = info.btf_id;
4490 		} else {
4491 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
4492 		}
4493 		/* remember the offset of last byte accessed in ctx */
4494 		if (env->prog->aux->max_ctx_offset < off + size)
4495 			env->prog->aux->max_ctx_offset = off + size;
4496 		return 0;
4497 	}
4498 
4499 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
4500 	return -EACCES;
4501 }
4502 
4503 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4504 				  int size)
4505 {
4506 	if (size < 0 || off < 0 ||
4507 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
4508 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
4509 			off, size);
4510 		return -EACCES;
4511 	}
4512 	return 0;
4513 }
4514 
4515 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4516 			     u32 regno, int off, int size,
4517 			     enum bpf_access_type t)
4518 {
4519 	struct bpf_reg_state *regs = cur_regs(env);
4520 	struct bpf_reg_state *reg = &regs[regno];
4521 	struct bpf_insn_access_aux info = {};
4522 	bool valid;
4523 
4524 	if (reg->smin_value < 0) {
4525 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4526 			regno);
4527 		return -EACCES;
4528 	}
4529 
4530 	switch (reg->type) {
4531 	case PTR_TO_SOCK_COMMON:
4532 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4533 		break;
4534 	case PTR_TO_SOCKET:
4535 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4536 		break;
4537 	case PTR_TO_TCP_SOCK:
4538 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4539 		break;
4540 	case PTR_TO_XDP_SOCK:
4541 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4542 		break;
4543 	default:
4544 		valid = false;
4545 	}
4546 
4547 
4548 	if (valid) {
4549 		env->insn_aux_data[insn_idx].ctx_field_size =
4550 			info.ctx_field_size;
4551 		return 0;
4552 	}
4553 
4554 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4555 		regno, reg_type_str(env, reg->type), off, size);
4556 
4557 	return -EACCES;
4558 }
4559 
4560 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4561 {
4562 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4563 }
4564 
4565 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4566 {
4567 	const struct bpf_reg_state *reg = reg_state(env, regno);
4568 
4569 	return reg->type == PTR_TO_CTX;
4570 }
4571 
4572 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4573 {
4574 	const struct bpf_reg_state *reg = reg_state(env, regno);
4575 
4576 	return type_is_sk_pointer(reg->type);
4577 }
4578 
4579 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4580 {
4581 	const struct bpf_reg_state *reg = reg_state(env, regno);
4582 
4583 	return type_is_pkt_pointer(reg->type);
4584 }
4585 
4586 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4587 {
4588 	const struct bpf_reg_state *reg = reg_state(env, regno);
4589 
4590 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4591 	return reg->type == PTR_TO_FLOW_KEYS;
4592 }
4593 
4594 static bool is_trusted_reg(const struct bpf_reg_state *reg)
4595 {
4596 	/* A referenced register is always trusted. */
4597 	if (reg->ref_obj_id)
4598 		return true;
4599 
4600 	/* If a register is not referenced, it is trusted if it has the
4601 	 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
4602 	 * other type modifiers may be safe, but we elect to take an opt-in
4603 	 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
4604 	 * not.
4605 	 *
4606 	 * Eventually, we should make PTR_TRUSTED the single source of truth
4607 	 * for whether a register is trusted.
4608 	 */
4609 	return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
4610 	       !bpf_type_has_unsafe_modifiers(reg->type);
4611 }
4612 
4613 static bool is_rcu_reg(const struct bpf_reg_state *reg)
4614 {
4615 	return reg->type & MEM_RCU;
4616 }
4617 
4618 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4619 				   const struct bpf_reg_state *reg,
4620 				   int off, int size, bool strict)
4621 {
4622 	struct tnum reg_off;
4623 	int ip_align;
4624 
4625 	/* Byte size accesses are always allowed. */
4626 	if (!strict || size == 1)
4627 		return 0;
4628 
4629 	/* For platforms that do not have a Kconfig enabling
4630 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4631 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4632 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4633 	 * to this code only in strict mode where we want to emulate
4634 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4635 	 * unconditional IP align value of '2'.
4636 	 */
4637 	ip_align = 2;
4638 
4639 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4640 	if (!tnum_is_aligned(reg_off, size)) {
4641 		char tn_buf[48];
4642 
4643 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4644 		verbose(env,
4645 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4646 			ip_align, tn_buf, reg->off, off, size);
4647 		return -EACCES;
4648 	}
4649 
4650 	return 0;
4651 }
4652 
4653 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4654 				       const struct bpf_reg_state *reg,
4655 				       const char *pointer_desc,
4656 				       int off, int size, bool strict)
4657 {
4658 	struct tnum reg_off;
4659 
4660 	/* Byte size accesses are always allowed. */
4661 	if (!strict || size == 1)
4662 		return 0;
4663 
4664 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4665 	if (!tnum_is_aligned(reg_off, size)) {
4666 		char tn_buf[48];
4667 
4668 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4669 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
4670 			pointer_desc, tn_buf, reg->off, off, size);
4671 		return -EACCES;
4672 	}
4673 
4674 	return 0;
4675 }
4676 
4677 static int check_ptr_alignment(struct bpf_verifier_env *env,
4678 			       const struct bpf_reg_state *reg, int off,
4679 			       int size, bool strict_alignment_once)
4680 {
4681 	bool strict = env->strict_alignment || strict_alignment_once;
4682 	const char *pointer_desc = "";
4683 
4684 	switch (reg->type) {
4685 	case PTR_TO_PACKET:
4686 	case PTR_TO_PACKET_META:
4687 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
4688 		 * right in front, treat it the very same way.
4689 		 */
4690 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
4691 	case PTR_TO_FLOW_KEYS:
4692 		pointer_desc = "flow keys ";
4693 		break;
4694 	case PTR_TO_MAP_KEY:
4695 		pointer_desc = "key ";
4696 		break;
4697 	case PTR_TO_MAP_VALUE:
4698 		pointer_desc = "value ";
4699 		break;
4700 	case PTR_TO_CTX:
4701 		pointer_desc = "context ";
4702 		break;
4703 	case PTR_TO_STACK:
4704 		pointer_desc = "stack ";
4705 		/* The stack spill tracking logic in check_stack_write_fixed_off()
4706 		 * and check_stack_read_fixed_off() relies on stack accesses being
4707 		 * aligned.
4708 		 */
4709 		strict = true;
4710 		break;
4711 	case PTR_TO_SOCKET:
4712 		pointer_desc = "sock ";
4713 		break;
4714 	case PTR_TO_SOCK_COMMON:
4715 		pointer_desc = "sock_common ";
4716 		break;
4717 	case PTR_TO_TCP_SOCK:
4718 		pointer_desc = "tcp_sock ";
4719 		break;
4720 	case PTR_TO_XDP_SOCK:
4721 		pointer_desc = "xdp_sock ";
4722 		break;
4723 	default:
4724 		break;
4725 	}
4726 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4727 					   strict);
4728 }
4729 
4730 static int update_stack_depth(struct bpf_verifier_env *env,
4731 			      const struct bpf_func_state *func,
4732 			      int off)
4733 {
4734 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
4735 
4736 	if (stack >= -off)
4737 		return 0;
4738 
4739 	/* update known max for given subprogram */
4740 	env->subprog_info[func->subprogno].stack_depth = -off;
4741 	return 0;
4742 }
4743 
4744 /* starting from main bpf function walk all instructions of the function
4745  * and recursively walk all callees that given function can call.
4746  * Ignore jump and exit insns.
4747  * Since recursion is prevented by check_cfg() this algorithm
4748  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4749  */
4750 static int check_max_stack_depth(struct bpf_verifier_env *env)
4751 {
4752 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4753 	struct bpf_subprog_info *subprog = env->subprog_info;
4754 	struct bpf_insn *insn = env->prog->insnsi;
4755 	bool tail_call_reachable = false;
4756 	int ret_insn[MAX_CALL_FRAMES];
4757 	int ret_prog[MAX_CALL_FRAMES];
4758 	int j;
4759 
4760 process_func:
4761 	/* protect against potential stack overflow that might happen when
4762 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4763 	 * depth for such case down to 256 so that the worst case scenario
4764 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
4765 	 * 8k).
4766 	 *
4767 	 * To get the idea what might happen, see an example:
4768 	 * func1 -> sub rsp, 128
4769 	 *  subfunc1 -> sub rsp, 256
4770 	 *  tailcall1 -> add rsp, 256
4771 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4772 	 *   subfunc2 -> sub rsp, 64
4773 	 *   subfunc22 -> sub rsp, 128
4774 	 *   tailcall2 -> add rsp, 128
4775 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4776 	 *
4777 	 * tailcall will unwind the current stack frame but it will not get rid
4778 	 * of caller's stack as shown on the example above.
4779 	 */
4780 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
4781 		verbose(env,
4782 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4783 			depth);
4784 		return -EACCES;
4785 	}
4786 	/* round up to 32-bytes, since this is granularity
4787 	 * of interpreter stack size
4788 	 */
4789 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4790 	if (depth > MAX_BPF_STACK) {
4791 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
4792 			frame + 1, depth);
4793 		return -EACCES;
4794 	}
4795 continue_func:
4796 	subprog_end = subprog[idx + 1].start;
4797 	for (; i < subprog_end; i++) {
4798 		int next_insn;
4799 
4800 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
4801 			continue;
4802 		/* remember insn and function to return to */
4803 		ret_insn[frame] = i + 1;
4804 		ret_prog[frame] = idx;
4805 
4806 		/* find the callee */
4807 		next_insn = i + insn[i].imm + 1;
4808 		idx = find_subprog(env, next_insn);
4809 		if (idx < 0) {
4810 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4811 				  next_insn);
4812 			return -EFAULT;
4813 		}
4814 		if (subprog[idx].is_async_cb) {
4815 			if (subprog[idx].has_tail_call) {
4816 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4817 				return -EFAULT;
4818 			}
4819 			 /* async callbacks don't increase bpf prog stack size */
4820 			continue;
4821 		}
4822 		i = next_insn;
4823 
4824 		if (subprog[idx].has_tail_call)
4825 			tail_call_reachable = true;
4826 
4827 		frame++;
4828 		if (frame >= MAX_CALL_FRAMES) {
4829 			verbose(env, "the call stack of %d frames is too deep !\n",
4830 				frame);
4831 			return -E2BIG;
4832 		}
4833 		goto process_func;
4834 	}
4835 	/* if tail call got detected across bpf2bpf calls then mark each of the
4836 	 * currently present subprog frames as tail call reachable subprogs;
4837 	 * this info will be utilized by JIT so that we will be preserving the
4838 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
4839 	 */
4840 	if (tail_call_reachable)
4841 		for (j = 0; j < frame; j++)
4842 			subprog[ret_prog[j]].tail_call_reachable = true;
4843 	if (subprog[0].tail_call_reachable)
4844 		env->prog->aux->tail_call_reachable = true;
4845 
4846 	/* end of for() loop means the last insn of the 'subprog'
4847 	 * was reached. Doesn't matter whether it was JA or EXIT
4848 	 */
4849 	if (frame == 0)
4850 		return 0;
4851 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4852 	frame--;
4853 	i = ret_insn[frame];
4854 	idx = ret_prog[frame];
4855 	goto continue_func;
4856 }
4857 
4858 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
4859 static int get_callee_stack_depth(struct bpf_verifier_env *env,
4860 				  const struct bpf_insn *insn, int idx)
4861 {
4862 	int start = idx + insn->imm + 1, subprog;
4863 
4864 	subprog = find_subprog(env, start);
4865 	if (subprog < 0) {
4866 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4867 			  start);
4868 		return -EFAULT;
4869 	}
4870 	return env->subprog_info[subprog].stack_depth;
4871 }
4872 #endif
4873 
4874 static int __check_buffer_access(struct bpf_verifier_env *env,
4875 				 const char *buf_info,
4876 				 const struct bpf_reg_state *reg,
4877 				 int regno, int off, int size)
4878 {
4879 	if (off < 0) {
4880 		verbose(env,
4881 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4882 			regno, buf_info, off, size);
4883 		return -EACCES;
4884 	}
4885 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4886 		char tn_buf[48];
4887 
4888 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4889 		verbose(env,
4890 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4891 			regno, off, tn_buf);
4892 		return -EACCES;
4893 	}
4894 
4895 	return 0;
4896 }
4897 
4898 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4899 				  const struct bpf_reg_state *reg,
4900 				  int regno, int off, int size)
4901 {
4902 	int err;
4903 
4904 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4905 	if (err)
4906 		return err;
4907 
4908 	if (off + size > env->prog->aux->max_tp_access)
4909 		env->prog->aux->max_tp_access = off + size;
4910 
4911 	return 0;
4912 }
4913 
4914 static int check_buffer_access(struct bpf_verifier_env *env,
4915 			       const struct bpf_reg_state *reg,
4916 			       int regno, int off, int size,
4917 			       bool zero_size_allowed,
4918 			       u32 *max_access)
4919 {
4920 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4921 	int err;
4922 
4923 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4924 	if (err)
4925 		return err;
4926 
4927 	if (off + size > *max_access)
4928 		*max_access = off + size;
4929 
4930 	return 0;
4931 }
4932 
4933 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4934 static void zext_32_to_64(struct bpf_reg_state *reg)
4935 {
4936 	reg->var_off = tnum_subreg(reg->var_off);
4937 	__reg_assign_32_into_64(reg);
4938 }
4939 
4940 /* truncate register to smaller size (in bytes)
4941  * must be called with size < BPF_REG_SIZE
4942  */
4943 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4944 {
4945 	u64 mask;
4946 
4947 	/* clear high bits in bit representation */
4948 	reg->var_off = tnum_cast(reg->var_off, size);
4949 
4950 	/* fix arithmetic bounds */
4951 	mask = ((u64)1 << (size * 8)) - 1;
4952 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4953 		reg->umin_value &= mask;
4954 		reg->umax_value &= mask;
4955 	} else {
4956 		reg->umin_value = 0;
4957 		reg->umax_value = mask;
4958 	}
4959 	reg->smin_value = reg->umin_value;
4960 	reg->smax_value = reg->umax_value;
4961 
4962 	/* If size is smaller than 32bit register the 32bit register
4963 	 * values are also truncated so we push 64-bit bounds into
4964 	 * 32-bit bounds. Above were truncated < 32-bits already.
4965 	 */
4966 	if (size >= 4)
4967 		return;
4968 	__reg_combine_64_into_32(reg);
4969 }
4970 
4971 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4972 {
4973 	/* A map is considered read-only if the following condition are true:
4974 	 *
4975 	 * 1) BPF program side cannot change any of the map content. The
4976 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4977 	 *    and was set at map creation time.
4978 	 * 2) The map value(s) have been initialized from user space by a
4979 	 *    loader and then "frozen", such that no new map update/delete
4980 	 *    operations from syscall side are possible for the rest of
4981 	 *    the map's lifetime from that point onwards.
4982 	 * 3) Any parallel/pending map update/delete operations from syscall
4983 	 *    side have been completed. Only after that point, it's safe to
4984 	 *    assume that map value(s) are immutable.
4985 	 */
4986 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4987 	       READ_ONCE(map->frozen) &&
4988 	       !bpf_map_write_active(map);
4989 }
4990 
4991 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4992 {
4993 	void *ptr;
4994 	u64 addr;
4995 	int err;
4996 
4997 	err = map->ops->map_direct_value_addr(map, &addr, off);
4998 	if (err)
4999 		return err;
5000 	ptr = (void *)(long)addr + off;
5001 
5002 	switch (size) {
5003 	case sizeof(u8):
5004 		*val = (u64)*(u8 *)ptr;
5005 		break;
5006 	case sizeof(u16):
5007 		*val = (u64)*(u16 *)ptr;
5008 		break;
5009 	case sizeof(u32):
5010 		*val = (u64)*(u32 *)ptr;
5011 		break;
5012 	case sizeof(u64):
5013 		*val = *(u64 *)ptr;
5014 		break;
5015 	default:
5016 		return -EINVAL;
5017 	}
5018 	return 0;
5019 }
5020 
5021 #define BTF_TYPE_SAFE_NESTED(__type)  __PASTE(__type, __safe_fields)
5022 
5023 BTF_TYPE_SAFE_NESTED(struct task_struct) {
5024 	const cpumask_t *cpus_ptr;
5025 };
5026 
5027 static bool nested_ptr_is_trusted(struct bpf_verifier_env *env,
5028 				  struct bpf_reg_state *reg,
5029 				  int off)
5030 {
5031 	/* If its parent is not trusted, it can't regain its trusted status. */
5032 	if (!is_trusted_reg(reg))
5033 		return false;
5034 
5035 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct task_struct));
5036 
5037 	return btf_nested_type_is_trusted(&env->log, reg, off);
5038 }
5039 
5040 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
5041 				   struct bpf_reg_state *regs,
5042 				   int regno, int off, int size,
5043 				   enum bpf_access_type atype,
5044 				   int value_regno)
5045 {
5046 	struct bpf_reg_state *reg = regs + regno;
5047 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
5048 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
5049 	enum bpf_type_flag flag = 0;
5050 	u32 btf_id;
5051 	int ret;
5052 
5053 	if (!env->allow_ptr_leaks) {
5054 		verbose(env,
5055 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5056 			tname);
5057 		return -EPERM;
5058 	}
5059 	if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
5060 		verbose(env,
5061 			"Cannot access kernel 'struct %s' from non-GPL compatible program\n",
5062 			tname);
5063 		return -EINVAL;
5064 	}
5065 	if (off < 0) {
5066 		verbose(env,
5067 			"R%d is ptr_%s invalid negative access: off=%d\n",
5068 			regno, tname, off);
5069 		return -EACCES;
5070 	}
5071 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5072 		char tn_buf[48];
5073 
5074 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5075 		verbose(env,
5076 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
5077 			regno, tname, off, tn_buf);
5078 		return -EACCES;
5079 	}
5080 
5081 	if (reg->type & MEM_USER) {
5082 		verbose(env,
5083 			"R%d is ptr_%s access user memory: off=%d\n",
5084 			regno, tname, off);
5085 		return -EACCES;
5086 	}
5087 
5088 	if (reg->type & MEM_PERCPU) {
5089 		verbose(env,
5090 			"R%d is ptr_%s access percpu memory: off=%d\n",
5091 			regno, tname, off);
5092 		return -EACCES;
5093 	}
5094 
5095 	if (env->ops->btf_struct_access && !type_is_alloc(reg->type)) {
5096 		if (!btf_is_kernel(reg->btf)) {
5097 			verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
5098 			return -EFAULT;
5099 		}
5100 		ret = env->ops->btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
5101 	} else {
5102 		/* Writes are permitted with default btf_struct_access for
5103 		 * program allocated objects (which always have ref_obj_id > 0),
5104 		 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
5105 		 */
5106 		if (atype != BPF_READ && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
5107 			verbose(env, "only read is supported\n");
5108 			return -EACCES;
5109 		}
5110 
5111 		if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
5112 		    !reg->ref_obj_id) {
5113 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
5114 			return -EFAULT;
5115 		}
5116 
5117 		ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
5118 	}
5119 
5120 	if (ret < 0)
5121 		return ret;
5122 
5123 	/* If this is an untrusted pointer, all pointers formed by walking it
5124 	 * also inherit the untrusted flag.
5125 	 */
5126 	if (type_flag(reg->type) & PTR_UNTRUSTED)
5127 		flag |= PTR_UNTRUSTED;
5128 
5129 	/* By default any pointer obtained from walking a trusted pointer is no
5130 	 * longer trusted, unless the field being accessed has explicitly been
5131 	 * marked as inheriting its parent's state of trust.
5132 	 *
5133 	 * An RCU-protected pointer can also be deemed trusted if we are in an
5134 	 * RCU read region. This case is handled below.
5135 	 */
5136 	if (nested_ptr_is_trusted(env, reg, off))
5137 		flag |= PTR_TRUSTED;
5138 	else
5139 		flag &= ~PTR_TRUSTED;
5140 
5141 	if (flag & MEM_RCU) {
5142 		/* Mark value register as MEM_RCU only if it is protected by
5143 		 * bpf_rcu_read_lock() and the ptr reg is rcu or trusted. MEM_RCU
5144 		 * itself can already indicate trustedness inside the rcu
5145 		 * read lock region. Also mark rcu pointer as PTR_MAYBE_NULL since
5146 		 * it could be null in some cases.
5147 		 */
5148 		if (!env->cur_state->active_rcu_lock ||
5149 		    !(is_trusted_reg(reg) || is_rcu_reg(reg)))
5150 			flag &= ~MEM_RCU;
5151 		else
5152 			flag |= PTR_MAYBE_NULL;
5153 	} else if (reg->type & MEM_RCU) {
5154 		/* ptr (reg) is marked as MEM_RCU, but the struct field is not tagged
5155 		 * with __rcu. Mark the flag as PTR_UNTRUSTED conservatively.
5156 		 */
5157 		flag |= PTR_UNTRUSTED;
5158 	}
5159 
5160 	if (atype == BPF_READ && value_regno >= 0)
5161 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
5162 
5163 	return 0;
5164 }
5165 
5166 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
5167 				   struct bpf_reg_state *regs,
5168 				   int regno, int off, int size,
5169 				   enum bpf_access_type atype,
5170 				   int value_regno)
5171 {
5172 	struct bpf_reg_state *reg = regs + regno;
5173 	struct bpf_map *map = reg->map_ptr;
5174 	struct bpf_reg_state map_reg;
5175 	enum bpf_type_flag flag = 0;
5176 	const struct btf_type *t;
5177 	const char *tname;
5178 	u32 btf_id;
5179 	int ret;
5180 
5181 	if (!btf_vmlinux) {
5182 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
5183 		return -ENOTSUPP;
5184 	}
5185 
5186 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
5187 		verbose(env, "map_ptr access not supported for map type %d\n",
5188 			map->map_type);
5189 		return -ENOTSUPP;
5190 	}
5191 
5192 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
5193 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
5194 
5195 	if (!env->allow_ptr_leaks) {
5196 		verbose(env,
5197 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5198 			tname);
5199 		return -EPERM;
5200 	}
5201 
5202 	if (off < 0) {
5203 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
5204 			regno, tname, off);
5205 		return -EACCES;
5206 	}
5207 
5208 	if (atype != BPF_READ) {
5209 		verbose(env, "only read from %s is supported\n", tname);
5210 		return -EACCES;
5211 	}
5212 
5213 	/* Simulate access to a PTR_TO_BTF_ID */
5214 	memset(&map_reg, 0, sizeof(map_reg));
5215 	mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
5216 	ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag);
5217 	if (ret < 0)
5218 		return ret;
5219 
5220 	if (value_regno >= 0)
5221 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
5222 
5223 	return 0;
5224 }
5225 
5226 /* Check that the stack access at the given offset is within bounds. The
5227  * maximum valid offset is -1.
5228  *
5229  * The minimum valid offset is -MAX_BPF_STACK for writes, and
5230  * -state->allocated_stack for reads.
5231  */
5232 static int check_stack_slot_within_bounds(int off,
5233 					  struct bpf_func_state *state,
5234 					  enum bpf_access_type t)
5235 {
5236 	int min_valid_off;
5237 
5238 	if (t == BPF_WRITE)
5239 		min_valid_off = -MAX_BPF_STACK;
5240 	else
5241 		min_valid_off = -state->allocated_stack;
5242 
5243 	if (off < min_valid_off || off > -1)
5244 		return -EACCES;
5245 	return 0;
5246 }
5247 
5248 /* Check that the stack access at 'regno + off' falls within the maximum stack
5249  * bounds.
5250  *
5251  * 'off' includes `regno->offset`, but not its dynamic part (if any).
5252  */
5253 static int check_stack_access_within_bounds(
5254 		struct bpf_verifier_env *env,
5255 		int regno, int off, int access_size,
5256 		enum bpf_access_src src, enum bpf_access_type type)
5257 {
5258 	struct bpf_reg_state *regs = cur_regs(env);
5259 	struct bpf_reg_state *reg = regs + regno;
5260 	struct bpf_func_state *state = func(env, reg);
5261 	int min_off, max_off;
5262 	int err;
5263 	char *err_extra;
5264 
5265 	if (src == ACCESS_HELPER)
5266 		/* We don't know if helpers are reading or writing (or both). */
5267 		err_extra = " indirect access to";
5268 	else if (type == BPF_READ)
5269 		err_extra = " read from";
5270 	else
5271 		err_extra = " write to";
5272 
5273 	if (tnum_is_const(reg->var_off)) {
5274 		min_off = reg->var_off.value + off;
5275 		if (access_size > 0)
5276 			max_off = min_off + access_size - 1;
5277 		else
5278 			max_off = min_off;
5279 	} else {
5280 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
5281 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
5282 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
5283 				err_extra, regno);
5284 			return -EACCES;
5285 		}
5286 		min_off = reg->smin_value + off;
5287 		if (access_size > 0)
5288 			max_off = reg->smax_value + off + access_size - 1;
5289 		else
5290 			max_off = min_off;
5291 	}
5292 
5293 	err = check_stack_slot_within_bounds(min_off, state, type);
5294 	if (!err)
5295 		err = check_stack_slot_within_bounds(max_off, state, type);
5296 
5297 	if (err) {
5298 		if (tnum_is_const(reg->var_off)) {
5299 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
5300 				err_extra, regno, off, access_size);
5301 		} else {
5302 			char tn_buf[48];
5303 
5304 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5305 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
5306 				err_extra, regno, tn_buf, access_size);
5307 		}
5308 	}
5309 	return err;
5310 }
5311 
5312 /* check whether memory at (regno + off) is accessible for t = (read | write)
5313  * if t==write, value_regno is a register which value is stored into memory
5314  * if t==read, value_regno is a register which will receive the value from memory
5315  * if t==write && value_regno==-1, some unknown value is stored into memory
5316  * if t==read && value_regno==-1, don't care what we read from memory
5317  */
5318 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
5319 			    int off, int bpf_size, enum bpf_access_type t,
5320 			    int value_regno, bool strict_alignment_once)
5321 {
5322 	struct bpf_reg_state *regs = cur_regs(env);
5323 	struct bpf_reg_state *reg = regs + regno;
5324 	struct bpf_func_state *state;
5325 	int size, err = 0;
5326 
5327 	size = bpf_size_to_bytes(bpf_size);
5328 	if (size < 0)
5329 		return size;
5330 
5331 	/* alignment checks will add in reg->off themselves */
5332 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
5333 	if (err)
5334 		return err;
5335 
5336 	/* for access checks, reg->off is just part of off */
5337 	off += reg->off;
5338 
5339 	if (reg->type == PTR_TO_MAP_KEY) {
5340 		if (t == BPF_WRITE) {
5341 			verbose(env, "write to change key R%d not allowed\n", regno);
5342 			return -EACCES;
5343 		}
5344 
5345 		err = check_mem_region_access(env, regno, off, size,
5346 					      reg->map_ptr->key_size, false);
5347 		if (err)
5348 			return err;
5349 		if (value_regno >= 0)
5350 			mark_reg_unknown(env, regs, value_regno);
5351 	} else if (reg->type == PTR_TO_MAP_VALUE) {
5352 		struct btf_field *kptr_field = NULL;
5353 
5354 		if (t == BPF_WRITE && value_regno >= 0 &&
5355 		    is_pointer_value(env, value_regno)) {
5356 			verbose(env, "R%d leaks addr into map\n", value_regno);
5357 			return -EACCES;
5358 		}
5359 		err = check_map_access_type(env, regno, off, size, t);
5360 		if (err)
5361 			return err;
5362 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
5363 		if (err)
5364 			return err;
5365 		if (tnum_is_const(reg->var_off))
5366 			kptr_field = btf_record_find(reg->map_ptr->record,
5367 						     off + reg->var_off.value, BPF_KPTR);
5368 		if (kptr_field) {
5369 			err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
5370 		} else if (t == BPF_READ && value_regno >= 0) {
5371 			struct bpf_map *map = reg->map_ptr;
5372 
5373 			/* if map is read-only, track its contents as scalars */
5374 			if (tnum_is_const(reg->var_off) &&
5375 			    bpf_map_is_rdonly(map) &&
5376 			    map->ops->map_direct_value_addr) {
5377 				int map_off = off + reg->var_off.value;
5378 				u64 val = 0;
5379 
5380 				err = bpf_map_direct_read(map, map_off, size,
5381 							  &val);
5382 				if (err)
5383 					return err;
5384 
5385 				regs[value_regno].type = SCALAR_VALUE;
5386 				__mark_reg_known(&regs[value_regno], val);
5387 			} else {
5388 				mark_reg_unknown(env, regs, value_regno);
5389 			}
5390 		}
5391 	} else if (base_type(reg->type) == PTR_TO_MEM) {
5392 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5393 
5394 		if (type_may_be_null(reg->type)) {
5395 			verbose(env, "R%d invalid mem access '%s'\n", regno,
5396 				reg_type_str(env, reg->type));
5397 			return -EACCES;
5398 		}
5399 
5400 		if (t == BPF_WRITE && rdonly_mem) {
5401 			verbose(env, "R%d cannot write into %s\n",
5402 				regno, reg_type_str(env, reg->type));
5403 			return -EACCES;
5404 		}
5405 
5406 		if (t == BPF_WRITE && value_regno >= 0 &&
5407 		    is_pointer_value(env, value_regno)) {
5408 			verbose(env, "R%d leaks addr into mem\n", value_regno);
5409 			return -EACCES;
5410 		}
5411 
5412 		err = check_mem_region_access(env, regno, off, size,
5413 					      reg->mem_size, false);
5414 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
5415 			mark_reg_unknown(env, regs, value_regno);
5416 	} else if (reg->type == PTR_TO_CTX) {
5417 		enum bpf_reg_type reg_type = SCALAR_VALUE;
5418 		struct btf *btf = NULL;
5419 		u32 btf_id = 0;
5420 
5421 		if (t == BPF_WRITE && value_regno >= 0 &&
5422 		    is_pointer_value(env, value_regno)) {
5423 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
5424 			return -EACCES;
5425 		}
5426 
5427 		err = check_ptr_off_reg(env, reg, regno);
5428 		if (err < 0)
5429 			return err;
5430 
5431 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
5432 				       &btf_id);
5433 		if (err)
5434 			verbose_linfo(env, insn_idx, "; ");
5435 		if (!err && t == BPF_READ && value_regno >= 0) {
5436 			/* ctx access returns either a scalar, or a
5437 			 * PTR_TO_PACKET[_META,_END]. In the latter
5438 			 * case, we know the offset is zero.
5439 			 */
5440 			if (reg_type == SCALAR_VALUE) {
5441 				mark_reg_unknown(env, regs, value_regno);
5442 			} else {
5443 				mark_reg_known_zero(env, regs,
5444 						    value_regno);
5445 				if (type_may_be_null(reg_type))
5446 					regs[value_regno].id = ++env->id_gen;
5447 				/* A load of ctx field could have different
5448 				 * actual load size with the one encoded in the
5449 				 * insn. When the dst is PTR, it is for sure not
5450 				 * a sub-register.
5451 				 */
5452 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
5453 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
5454 					regs[value_regno].btf = btf;
5455 					regs[value_regno].btf_id = btf_id;
5456 				}
5457 			}
5458 			regs[value_regno].type = reg_type;
5459 		}
5460 
5461 	} else if (reg->type == PTR_TO_STACK) {
5462 		/* Basic bounds checks. */
5463 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
5464 		if (err)
5465 			return err;
5466 
5467 		state = func(env, reg);
5468 		err = update_stack_depth(env, state, off);
5469 		if (err)
5470 			return err;
5471 
5472 		if (t == BPF_READ)
5473 			err = check_stack_read(env, regno, off, size,
5474 					       value_regno);
5475 		else
5476 			err = check_stack_write(env, regno, off, size,
5477 						value_regno, insn_idx);
5478 	} else if (reg_is_pkt_pointer(reg)) {
5479 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
5480 			verbose(env, "cannot write into packet\n");
5481 			return -EACCES;
5482 		}
5483 		if (t == BPF_WRITE && value_regno >= 0 &&
5484 		    is_pointer_value(env, value_regno)) {
5485 			verbose(env, "R%d leaks addr into packet\n",
5486 				value_regno);
5487 			return -EACCES;
5488 		}
5489 		err = check_packet_access(env, regno, off, size, false);
5490 		if (!err && t == BPF_READ && value_regno >= 0)
5491 			mark_reg_unknown(env, regs, value_regno);
5492 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
5493 		if (t == BPF_WRITE && value_regno >= 0 &&
5494 		    is_pointer_value(env, value_regno)) {
5495 			verbose(env, "R%d leaks addr into flow keys\n",
5496 				value_regno);
5497 			return -EACCES;
5498 		}
5499 
5500 		err = check_flow_keys_access(env, off, size);
5501 		if (!err && t == BPF_READ && value_regno >= 0)
5502 			mark_reg_unknown(env, regs, value_regno);
5503 	} else if (type_is_sk_pointer(reg->type)) {
5504 		if (t == BPF_WRITE) {
5505 			verbose(env, "R%d cannot write into %s\n",
5506 				regno, reg_type_str(env, reg->type));
5507 			return -EACCES;
5508 		}
5509 		err = check_sock_access(env, insn_idx, regno, off, size, t);
5510 		if (!err && value_regno >= 0)
5511 			mark_reg_unknown(env, regs, value_regno);
5512 	} else if (reg->type == PTR_TO_TP_BUFFER) {
5513 		err = check_tp_buffer_access(env, reg, regno, off, size);
5514 		if (!err && t == BPF_READ && value_regno >= 0)
5515 			mark_reg_unknown(env, regs, value_regno);
5516 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
5517 		   !type_may_be_null(reg->type)) {
5518 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
5519 					      value_regno);
5520 	} else if (reg->type == CONST_PTR_TO_MAP) {
5521 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
5522 					      value_regno);
5523 	} else if (base_type(reg->type) == PTR_TO_BUF) {
5524 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5525 		u32 *max_access;
5526 
5527 		if (rdonly_mem) {
5528 			if (t == BPF_WRITE) {
5529 				verbose(env, "R%d cannot write into %s\n",
5530 					regno, reg_type_str(env, reg->type));
5531 				return -EACCES;
5532 			}
5533 			max_access = &env->prog->aux->max_rdonly_access;
5534 		} else {
5535 			max_access = &env->prog->aux->max_rdwr_access;
5536 		}
5537 
5538 		err = check_buffer_access(env, reg, regno, off, size, false,
5539 					  max_access);
5540 
5541 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
5542 			mark_reg_unknown(env, regs, value_regno);
5543 	} else {
5544 		verbose(env, "R%d invalid mem access '%s'\n", regno,
5545 			reg_type_str(env, reg->type));
5546 		return -EACCES;
5547 	}
5548 
5549 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
5550 	    regs[value_regno].type == SCALAR_VALUE) {
5551 		/* b/h/w load zero-extends, mark upper bits as known 0 */
5552 		coerce_reg_to_size(&regs[value_regno], size);
5553 	}
5554 	return err;
5555 }
5556 
5557 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
5558 {
5559 	int load_reg;
5560 	int err;
5561 
5562 	switch (insn->imm) {
5563 	case BPF_ADD:
5564 	case BPF_ADD | BPF_FETCH:
5565 	case BPF_AND:
5566 	case BPF_AND | BPF_FETCH:
5567 	case BPF_OR:
5568 	case BPF_OR | BPF_FETCH:
5569 	case BPF_XOR:
5570 	case BPF_XOR | BPF_FETCH:
5571 	case BPF_XCHG:
5572 	case BPF_CMPXCHG:
5573 		break;
5574 	default:
5575 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
5576 		return -EINVAL;
5577 	}
5578 
5579 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
5580 		verbose(env, "invalid atomic operand size\n");
5581 		return -EINVAL;
5582 	}
5583 
5584 	/* check src1 operand */
5585 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
5586 	if (err)
5587 		return err;
5588 
5589 	/* check src2 operand */
5590 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5591 	if (err)
5592 		return err;
5593 
5594 	if (insn->imm == BPF_CMPXCHG) {
5595 		/* Check comparison of R0 with memory location */
5596 		const u32 aux_reg = BPF_REG_0;
5597 
5598 		err = check_reg_arg(env, aux_reg, SRC_OP);
5599 		if (err)
5600 			return err;
5601 
5602 		if (is_pointer_value(env, aux_reg)) {
5603 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
5604 			return -EACCES;
5605 		}
5606 	}
5607 
5608 	if (is_pointer_value(env, insn->src_reg)) {
5609 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
5610 		return -EACCES;
5611 	}
5612 
5613 	if (is_ctx_reg(env, insn->dst_reg) ||
5614 	    is_pkt_reg(env, insn->dst_reg) ||
5615 	    is_flow_key_reg(env, insn->dst_reg) ||
5616 	    is_sk_reg(env, insn->dst_reg)) {
5617 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
5618 			insn->dst_reg,
5619 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
5620 		return -EACCES;
5621 	}
5622 
5623 	if (insn->imm & BPF_FETCH) {
5624 		if (insn->imm == BPF_CMPXCHG)
5625 			load_reg = BPF_REG_0;
5626 		else
5627 			load_reg = insn->src_reg;
5628 
5629 		/* check and record load of old value */
5630 		err = check_reg_arg(env, load_reg, DST_OP);
5631 		if (err)
5632 			return err;
5633 	} else {
5634 		/* This instruction accesses a memory location but doesn't
5635 		 * actually load it into a register.
5636 		 */
5637 		load_reg = -1;
5638 	}
5639 
5640 	/* Check whether we can read the memory, with second call for fetch
5641 	 * case to simulate the register fill.
5642 	 */
5643 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5644 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
5645 	if (!err && load_reg >= 0)
5646 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5647 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
5648 				       true);
5649 	if (err)
5650 		return err;
5651 
5652 	/* Check whether we can write into the same memory. */
5653 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5654 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5655 	if (err)
5656 		return err;
5657 
5658 	return 0;
5659 }
5660 
5661 /* When register 'regno' is used to read the stack (either directly or through
5662  * a helper function) make sure that it's within stack boundary and, depending
5663  * on the access type, that all elements of the stack are initialized.
5664  *
5665  * 'off' includes 'regno->off', but not its dynamic part (if any).
5666  *
5667  * All registers that have been spilled on the stack in the slots within the
5668  * read offsets are marked as read.
5669  */
5670 static int check_stack_range_initialized(
5671 		struct bpf_verifier_env *env, int regno, int off,
5672 		int access_size, bool zero_size_allowed,
5673 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
5674 {
5675 	struct bpf_reg_state *reg = reg_state(env, regno);
5676 	struct bpf_func_state *state = func(env, reg);
5677 	int err, min_off, max_off, i, j, slot, spi;
5678 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5679 	enum bpf_access_type bounds_check_type;
5680 	/* Some accesses can write anything into the stack, others are
5681 	 * read-only.
5682 	 */
5683 	bool clobber = false;
5684 
5685 	if (access_size == 0 && !zero_size_allowed) {
5686 		verbose(env, "invalid zero-sized read\n");
5687 		return -EACCES;
5688 	}
5689 
5690 	if (type == ACCESS_HELPER) {
5691 		/* The bounds checks for writes are more permissive than for
5692 		 * reads. However, if raw_mode is not set, we'll do extra
5693 		 * checks below.
5694 		 */
5695 		bounds_check_type = BPF_WRITE;
5696 		clobber = true;
5697 	} else {
5698 		bounds_check_type = BPF_READ;
5699 	}
5700 	err = check_stack_access_within_bounds(env, regno, off, access_size,
5701 					       type, bounds_check_type);
5702 	if (err)
5703 		return err;
5704 
5705 
5706 	if (tnum_is_const(reg->var_off)) {
5707 		min_off = max_off = reg->var_off.value + off;
5708 	} else {
5709 		/* Variable offset is prohibited for unprivileged mode for
5710 		 * simplicity since it requires corresponding support in
5711 		 * Spectre masking for stack ALU.
5712 		 * See also retrieve_ptr_limit().
5713 		 */
5714 		if (!env->bypass_spec_v1) {
5715 			char tn_buf[48];
5716 
5717 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5718 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5719 				regno, err_extra, tn_buf);
5720 			return -EACCES;
5721 		}
5722 		/* Only initialized buffer on stack is allowed to be accessed
5723 		 * with variable offset. With uninitialized buffer it's hard to
5724 		 * guarantee that whole memory is marked as initialized on
5725 		 * helper return since specific bounds are unknown what may
5726 		 * cause uninitialized stack leaking.
5727 		 */
5728 		if (meta && meta->raw_mode)
5729 			meta = NULL;
5730 
5731 		min_off = reg->smin_value + off;
5732 		max_off = reg->smax_value + off;
5733 	}
5734 
5735 	if (meta && meta->raw_mode) {
5736 		/* Ensure we won't be overwriting dynptrs when simulating byte
5737 		 * by byte access in check_helper_call using meta.access_size.
5738 		 * This would be a problem if we have a helper in the future
5739 		 * which takes:
5740 		 *
5741 		 *	helper(uninit_mem, len, dynptr)
5742 		 *
5743 		 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
5744 		 * may end up writing to dynptr itself when touching memory from
5745 		 * arg 1. This can be relaxed on a case by case basis for known
5746 		 * safe cases, but reject due to the possibilitiy of aliasing by
5747 		 * default.
5748 		 */
5749 		for (i = min_off; i < max_off + access_size; i++) {
5750 			int stack_off = -i - 1;
5751 
5752 			spi = __get_spi(i);
5753 			/* raw_mode may write past allocated_stack */
5754 			if (state->allocated_stack <= stack_off)
5755 				continue;
5756 			if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
5757 				verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
5758 				return -EACCES;
5759 			}
5760 		}
5761 		meta->access_size = access_size;
5762 		meta->regno = regno;
5763 		return 0;
5764 	}
5765 
5766 	for (i = min_off; i < max_off + access_size; i++) {
5767 		u8 *stype;
5768 
5769 		slot = -i - 1;
5770 		spi = slot / BPF_REG_SIZE;
5771 		if (state->allocated_stack <= slot)
5772 			goto err;
5773 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5774 		if (*stype == STACK_MISC)
5775 			goto mark;
5776 		if ((*stype == STACK_ZERO) ||
5777 		    (*stype == STACK_INVALID && env->allow_uninit_stack)) {
5778 			if (clobber) {
5779 				/* helper can write anything into the stack */
5780 				*stype = STACK_MISC;
5781 			}
5782 			goto mark;
5783 		}
5784 
5785 		if (is_spilled_reg(&state->stack[spi]) &&
5786 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5787 		     env->allow_ptr_leaks)) {
5788 			if (clobber) {
5789 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5790 				for (j = 0; j < BPF_REG_SIZE; j++)
5791 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
5792 			}
5793 			goto mark;
5794 		}
5795 
5796 err:
5797 		if (tnum_is_const(reg->var_off)) {
5798 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5799 				err_extra, regno, min_off, i - min_off, access_size);
5800 		} else {
5801 			char tn_buf[48];
5802 
5803 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5804 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5805 				err_extra, regno, tn_buf, i - min_off, access_size);
5806 		}
5807 		return -EACCES;
5808 mark:
5809 		/* reading any byte out of 8-byte 'spill_slot' will cause
5810 		 * the whole slot to be marked as 'read'
5811 		 */
5812 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
5813 			      state->stack[spi].spilled_ptr.parent,
5814 			      REG_LIVE_READ64);
5815 		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
5816 		 * be sure that whether stack slot is written to or not. Hence,
5817 		 * we must still conservatively propagate reads upwards even if
5818 		 * helper may write to the entire memory range.
5819 		 */
5820 	}
5821 	return update_stack_depth(env, state, min_off);
5822 }
5823 
5824 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5825 				   int access_size, bool zero_size_allowed,
5826 				   struct bpf_call_arg_meta *meta)
5827 {
5828 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5829 	u32 *max_access;
5830 
5831 	switch (base_type(reg->type)) {
5832 	case PTR_TO_PACKET:
5833 	case PTR_TO_PACKET_META:
5834 		return check_packet_access(env, regno, reg->off, access_size,
5835 					   zero_size_allowed);
5836 	case PTR_TO_MAP_KEY:
5837 		if (meta && meta->raw_mode) {
5838 			verbose(env, "R%d cannot write into %s\n", regno,
5839 				reg_type_str(env, reg->type));
5840 			return -EACCES;
5841 		}
5842 		return check_mem_region_access(env, regno, reg->off, access_size,
5843 					       reg->map_ptr->key_size, false);
5844 	case PTR_TO_MAP_VALUE:
5845 		if (check_map_access_type(env, regno, reg->off, access_size,
5846 					  meta && meta->raw_mode ? BPF_WRITE :
5847 					  BPF_READ))
5848 			return -EACCES;
5849 		return check_map_access(env, regno, reg->off, access_size,
5850 					zero_size_allowed, ACCESS_HELPER);
5851 	case PTR_TO_MEM:
5852 		if (type_is_rdonly_mem(reg->type)) {
5853 			if (meta && meta->raw_mode) {
5854 				verbose(env, "R%d cannot write into %s\n", regno,
5855 					reg_type_str(env, reg->type));
5856 				return -EACCES;
5857 			}
5858 		}
5859 		return check_mem_region_access(env, regno, reg->off,
5860 					       access_size, reg->mem_size,
5861 					       zero_size_allowed);
5862 	case PTR_TO_BUF:
5863 		if (type_is_rdonly_mem(reg->type)) {
5864 			if (meta && meta->raw_mode) {
5865 				verbose(env, "R%d cannot write into %s\n", regno,
5866 					reg_type_str(env, reg->type));
5867 				return -EACCES;
5868 			}
5869 
5870 			max_access = &env->prog->aux->max_rdonly_access;
5871 		} else {
5872 			max_access = &env->prog->aux->max_rdwr_access;
5873 		}
5874 		return check_buffer_access(env, reg, regno, reg->off,
5875 					   access_size, zero_size_allowed,
5876 					   max_access);
5877 	case PTR_TO_STACK:
5878 		return check_stack_range_initialized(
5879 				env,
5880 				regno, reg->off, access_size,
5881 				zero_size_allowed, ACCESS_HELPER, meta);
5882 	case PTR_TO_CTX:
5883 		/* in case the function doesn't know how to access the context,
5884 		 * (because we are in a program of type SYSCALL for example), we
5885 		 * can not statically check its size.
5886 		 * Dynamically check it now.
5887 		 */
5888 		if (!env->ops->convert_ctx_access) {
5889 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
5890 			int offset = access_size - 1;
5891 
5892 			/* Allow zero-byte read from PTR_TO_CTX */
5893 			if (access_size == 0)
5894 				return zero_size_allowed ? 0 : -EACCES;
5895 
5896 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
5897 						atype, -1, false);
5898 		}
5899 
5900 		fallthrough;
5901 	default: /* scalar_value or invalid ptr */
5902 		/* Allow zero-byte read from NULL, regardless of pointer type */
5903 		if (zero_size_allowed && access_size == 0 &&
5904 		    register_is_null(reg))
5905 			return 0;
5906 
5907 		verbose(env, "R%d type=%s ", regno,
5908 			reg_type_str(env, reg->type));
5909 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
5910 		return -EACCES;
5911 	}
5912 }
5913 
5914 static int check_mem_size_reg(struct bpf_verifier_env *env,
5915 			      struct bpf_reg_state *reg, u32 regno,
5916 			      bool zero_size_allowed,
5917 			      struct bpf_call_arg_meta *meta)
5918 {
5919 	int err;
5920 
5921 	/* This is used to refine r0 return value bounds for helpers
5922 	 * that enforce this value as an upper bound on return values.
5923 	 * See do_refine_retval_range() for helpers that can refine
5924 	 * the return value. C type of helper is u32 so we pull register
5925 	 * bound from umax_value however, if negative verifier errors
5926 	 * out. Only upper bounds can be learned because retval is an
5927 	 * int type and negative retvals are allowed.
5928 	 */
5929 	meta->msize_max_value = reg->umax_value;
5930 
5931 	/* The register is SCALAR_VALUE; the access check
5932 	 * happens using its boundaries.
5933 	 */
5934 	if (!tnum_is_const(reg->var_off))
5935 		/* For unprivileged variable accesses, disable raw
5936 		 * mode so that the program is required to
5937 		 * initialize all the memory that the helper could
5938 		 * just partially fill up.
5939 		 */
5940 		meta = NULL;
5941 
5942 	if (reg->smin_value < 0) {
5943 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5944 			regno);
5945 		return -EACCES;
5946 	}
5947 
5948 	if (reg->umin_value == 0) {
5949 		err = check_helper_mem_access(env, regno - 1, 0,
5950 					      zero_size_allowed,
5951 					      meta);
5952 		if (err)
5953 			return err;
5954 	}
5955 
5956 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5957 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5958 			regno);
5959 		return -EACCES;
5960 	}
5961 	err = check_helper_mem_access(env, regno - 1,
5962 				      reg->umax_value,
5963 				      zero_size_allowed, meta);
5964 	if (!err)
5965 		err = mark_chain_precision(env, regno);
5966 	return err;
5967 }
5968 
5969 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5970 		   u32 regno, u32 mem_size)
5971 {
5972 	bool may_be_null = type_may_be_null(reg->type);
5973 	struct bpf_reg_state saved_reg;
5974 	struct bpf_call_arg_meta meta;
5975 	int err;
5976 
5977 	if (register_is_null(reg))
5978 		return 0;
5979 
5980 	memset(&meta, 0, sizeof(meta));
5981 	/* Assuming that the register contains a value check if the memory
5982 	 * access is safe. Temporarily save and restore the register's state as
5983 	 * the conversion shouldn't be visible to a caller.
5984 	 */
5985 	if (may_be_null) {
5986 		saved_reg = *reg;
5987 		mark_ptr_not_null_reg(reg);
5988 	}
5989 
5990 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5991 	/* Check access for BPF_WRITE */
5992 	meta.raw_mode = true;
5993 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5994 
5995 	if (may_be_null)
5996 		*reg = saved_reg;
5997 
5998 	return err;
5999 }
6000 
6001 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
6002 				    u32 regno)
6003 {
6004 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
6005 	bool may_be_null = type_may_be_null(mem_reg->type);
6006 	struct bpf_reg_state saved_reg;
6007 	struct bpf_call_arg_meta meta;
6008 	int err;
6009 
6010 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
6011 
6012 	memset(&meta, 0, sizeof(meta));
6013 
6014 	if (may_be_null) {
6015 		saved_reg = *mem_reg;
6016 		mark_ptr_not_null_reg(mem_reg);
6017 	}
6018 
6019 	err = check_mem_size_reg(env, reg, regno, true, &meta);
6020 	/* Check access for BPF_WRITE */
6021 	meta.raw_mode = true;
6022 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
6023 
6024 	if (may_be_null)
6025 		*mem_reg = saved_reg;
6026 	return err;
6027 }
6028 
6029 /* Implementation details:
6030  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
6031  * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
6032  * Two bpf_map_lookups (even with the same key) will have different reg->id.
6033  * Two separate bpf_obj_new will also have different reg->id.
6034  * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
6035  * clears reg->id after value_or_null->value transition, since the verifier only
6036  * cares about the range of access to valid map value pointer and doesn't care
6037  * about actual address of the map element.
6038  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
6039  * reg->id > 0 after value_or_null->value transition. By doing so
6040  * two bpf_map_lookups will be considered two different pointers that
6041  * point to different bpf_spin_locks. Likewise for pointers to allocated objects
6042  * returned from bpf_obj_new.
6043  * The verifier allows taking only one bpf_spin_lock at a time to avoid
6044  * dead-locks.
6045  * Since only one bpf_spin_lock is allowed the checks are simpler than
6046  * reg_is_refcounted() logic. The verifier needs to remember only
6047  * one spin_lock instead of array of acquired_refs.
6048  * cur_state->active_lock remembers which map value element or allocated
6049  * object got locked and clears it after bpf_spin_unlock.
6050  */
6051 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
6052 			     bool is_lock)
6053 {
6054 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6055 	struct bpf_verifier_state *cur = env->cur_state;
6056 	bool is_const = tnum_is_const(reg->var_off);
6057 	u64 val = reg->var_off.value;
6058 	struct bpf_map *map = NULL;
6059 	struct btf *btf = NULL;
6060 	struct btf_record *rec;
6061 
6062 	if (!is_const) {
6063 		verbose(env,
6064 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
6065 			regno);
6066 		return -EINVAL;
6067 	}
6068 	if (reg->type == PTR_TO_MAP_VALUE) {
6069 		map = reg->map_ptr;
6070 		if (!map->btf) {
6071 			verbose(env,
6072 				"map '%s' has to have BTF in order to use bpf_spin_lock\n",
6073 				map->name);
6074 			return -EINVAL;
6075 		}
6076 	} else {
6077 		btf = reg->btf;
6078 	}
6079 
6080 	rec = reg_btf_record(reg);
6081 	if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
6082 		verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
6083 			map ? map->name : "kptr");
6084 		return -EINVAL;
6085 	}
6086 	if (rec->spin_lock_off != val + reg->off) {
6087 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
6088 			val + reg->off, rec->spin_lock_off);
6089 		return -EINVAL;
6090 	}
6091 	if (is_lock) {
6092 		if (cur->active_lock.ptr) {
6093 			verbose(env,
6094 				"Locking two bpf_spin_locks are not allowed\n");
6095 			return -EINVAL;
6096 		}
6097 		if (map)
6098 			cur->active_lock.ptr = map;
6099 		else
6100 			cur->active_lock.ptr = btf;
6101 		cur->active_lock.id = reg->id;
6102 	} else {
6103 		void *ptr;
6104 
6105 		if (map)
6106 			ptr = map;
6107 		else
6108 			ptr = btf;
6109 
6110 		if (!cur->active_lock.ptr) {
6111 			verbose(env, "bpf_spin_unlock without taking a lock\n");
6112 			return -EINVAL;
6113 		}
6114 		if (cur->active_lock.ptr != ptr ||
6115 		    cur->active_lock.id != reg->id) {
6116 			verbose(env, "bpf_spin_unlock of different lock\n");
6117 			return -EINVAL;
6118 		}
6119 
6120 		invalidate_non_owning_refs(env);
6121 
6122 		cur->active_lock.ptr = NULL;
6123 		cur->active_lock.id = 0;
6124 	}
6125 	return 0;
6126 }
6127 
6128 static int process_timer_func(struct bpf_verifier_env *env, int regno,
6129 			      struct bpf_call_arg_meta *meta)
6130 {
6131 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6132 	bool is_const = tnum_is_const(reg->var_off);
6133 	struct bpf_map *map = reg->map_ptr;
6134 	u64 val = reg->var_off.value;
6135 
6136 	if (!is_const) {
6137 		verbose(env,
6138 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
6139 			regno);
6140 		return -EINVAL;
6141 	}
6142 	if (!map->btf) {
6143 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
6144 			map->name);
6145 		return -EINVAL;
6146 	}
6147 	if (!btf_record_has_field(map->record, BPF_TIMER)) {
6148 		verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
6149 		return -EINVAL;
6150 	}
6151 	if (map->record->timer_off != val + reg->off) {
6152 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
6153 			val + reg->off, map->record->timer_off);
6154 		return -EINVAL;
6155 	}
6156 	if (meta->map_ptr) {
6157 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
6158 		return -EFAULT;
6159 	}
6160 	meta->map_uid = reg->map_uid;
6161 	meta->map_ptr = map;
6162 	return 0;
6163 }
6164 
6165 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
6166 			     struct bpf_call_arg_meta *meta)
6167 {
6168 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6169 	struct bpf_map *map_ptr = reg->map_ptr;
6170 	struct btf_field *kptr_field;
6171 	u32 kptr_off;
6172 
6173 	if (!tnum_is_const(reg->var_off)) {
6174 		verbose(env,
6175 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
6176 			regno);
6177 		return -EINVAL;
6178 	}
6179 	if (!map_ptr->btf) {
6180 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
6181 			map_ptr->name);
6182 		return -EINVAL;
6183 	}
6184 	if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
6185 		verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
6186 		return -EINVAL;
6187 	}
6188 
6189 	meta->map_ptr = map_ptr;
6190 	kptr_off = reg->off + reg->var_off.value;
6191 	kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
6192 	if (!kptr_field) {
6193 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
6194 		return -EACCES;
6195 	}
6196 	if (kptr_field->type != BPF_KPTR_REF) {
6197 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
6198 		return -EACCES;
6199 	}
6200 	meta->kptr_field = kptr_field;
6201 	return 0;
6202 }
6203 
6204 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
6205  * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
6206  *
6207  * In both cases we deal with the first 8 bytes, but need to mark the next 8
6208  * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
6209  * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
6210  *
6211  * Mutability of bpf_dynptr is at two levels, one is at the level of struct
6212  * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
6213  * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
6214  * mutate the view of the dynptr and also possibly destroy it. In the latter
6215  * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
6216  * memory that dynptr points to.
6217  *
6218  * The verifier will keep track both levels of mutation (bpf_dynptr's in
6219  * reg->type and the memory's in reg->dynptr.type), but there is no support for
6220  * readonly dynptr view yet, hence only the first case is tracked and checked.
6221  *
6222  * This is consistent with how C applies the const modifier to a struct object,
6223  * where the pointer itself inside bpf_dynptr becomes const but not what it
6224  * points to.
6225  *
6226  * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
6227  * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
6228  */
6229 int process_dynptr_func(struct bpf_verifier_env *env, int regno,
6230 			enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta)
6231 {
6232 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6233 	int spi = 0;
6234 
6235 	/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
6236 	 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
6237 	 */
6238 	if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
6239 		verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
6240 		return -EFAULT;
6241 	}
6242 	/* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
6243 	 * check_func_arg_reg_off's logic. We only need to check offset
6244 	 * and its alignment for PTR_TO_STACK.
6245 	 */
6246 	if (reg->type == PTR_TO_STACK) {
6247 		spi = dynptr_get_spi(env, reg);
6248 		if (spi < 0 && spi != -ERANGE)
6249 			return spi;
6250 	}
6251 
6252 	/*  MEM_UNINIT - Points to memory that is an appropriate candidate for
6253 	 *		 constructing a mutable bpf_dynptr object.
6254 	 *
6255 	 *		 Currently, this is only possible with PTR_TO_STACK
6256 	 *		 pointing to a region of at least 16 bytes which doesn't
6257 	 *		 contain an existing bpf_dynptr.
6258 	 *
6259 	 *  MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
6260 	 *		 mutated or destroyed. However, the memory it points to
6261 	 *		 may be mutated.
6262 	 *
6263 	 *  None       - Points to a initialized dynptr that can be mutated and
6264 	 *		 destroyed, including mutation of the memory it points
6265 	 *		 to.
6266 	 */
6267 	if (arg_type & MEM_UNINIT) {
6268 		if (!is_dynptr_reg_valid_uninit(env, reg, spi)) {
6269 			verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6270 			return -EINVAL;
6271 		}
6272 
6273 		/* We only support one dynptr being uninitialized at the moment,
6274 		 * which is sufficient for the helper functions we have right now.
6275 		 */
6276 		if (meta->uninit_dynptr_regno) {
6277 			verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6278 			return -EFAULT;
6279 		}
6280 
6281 		meta->uninit_dynptr_regno = regno;
6282 	} else /* MEM_RDONLY and None case from above */ {
6283 		int err;
6284 
6285 		/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
6286 		if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
6287 			verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
6288 			return -EINVAL;
6289 		}
6290 
6291 		if (!is_dynptr_reg_valid_init(env, reg, spi)) {
6292 			verbose(env,
6293 				"Expected an initialized dynptr as arg #%d\n",
6294 				regno);
6295 			return -EINVAL;
6296 		}
6297 
6298 		/* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
6299 		if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
6300 			const char *err_extra = "";
6301 
6302 			switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6303 			case DYNPTR_TYPE_LOCAL:
6304 				err_extra = "local";
6305 				break;
6306 			case DYNPTR_TYPE_RINGBUF:
6307 				err_extra = "ringbuf";
6308 				break;
6309 			default:
6310 				err_extra = "<unknown>";
6311 				break;
6312 			}
6313 			verbose(env,
6314 				"Expected a dynptr of type %s as arg #%d\n",
6315 				err_extra, regno);
6316 			return -EINVAL;
6317 		}
6318 
6319 		err = mark_dynptr_read(env, reg);
6320 		if (err)
6321 			return err;
6322 	}
6323 	return 0;
6324 }
6325 
6326 static bool arg_type_is_mem_size(enum bpf_arg_type type)
6327 {
6328 	return type == ARG_CONST_SIZE ||
6329 	       type == ARG_CONST_SIZE_OR_ZERO;
6330 }
6331 
6332 static bool arg_type_is_release(enum bpf_arg_type type)
6333 {
6334 	return type & OBJ_RELEASE;
6335 }
6336 
6337 static bool arg_type_is_dynptr(enum bpf_arg_type type)
6338 {
6339 	return base_type(type) == ARG_PTR_TO_DYNPTR;
6340 }
6341 
6342 static int int_ptr_type_to_size(enum bpf_arg_type type)
6343 {
6344 	if (type == ARG_PTR_TO_INT)
6345 		return sizeof(u32);
6346 	else if (type == ARG_PTR_TO_LONG)
6347 		return sizeof(u64);
6348 
6349 	return -EINVAL;
6350 }
6351 
6352 static int resolve_map_arg_type(struct bpf_verifier_env *env,
6353 				 const struct bpf_call_arg_meta *meta,
6354 				 enum bpf_arg_type *arg_type)
6355 {
6356 	if (!meta->map_ptr) {
6357 		/* kernel subsystem misconfigured verifier */
6358 		verbose(env, "invalid map_ptr to access map->type\n");
6359 		return -EACCES;
6360 	}
6361 
6362 	switch (meta->map_ptr->map_type) {
6363 	case BPF_MAP_TYPE_SOCKMAP:
6364 	case BPF_MAP_TYPE_SOCKHASH:
6365 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6366 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
6367 		} else {
6368 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
6369 			return -EINVAL;
6370 		}
6371 		break;
6372 	case BPF_MAP_TYPE_BLOOM_FILTER:
6373 		if (meta->func_id == BPF_FUNC_map_peek_elem)
6374 			*arg_type = ARG_PTR_TO_MAP_VALUE;
6375 		break;
6376 	default:
6377 		break;
6378 	}
6379 	return 0;
6380 }
6381 
6382 struct bpf_reg_types {
6383 	const enum bpf_reg_type types[10];
6384 	u32 *btf_id;
6385 };
6386 
6387 static const struct bpf_reg_types sock_types = {
6388 	.types = {
6389 		PTR_TO_SOCK_COMMON,
6390 		PTR_TO_SOCKET,
6391 		PTR_TO_TCP_SOCK,
6392 		PTR_TO_XDP_SOCK,
6393 	},
6394 };
6395 
6396 #ifdef CONFIG_NET
6397 static const struct bpf_reg_types btf_id_sock_common_types = {
6398 	.types = {
6399 		PTR_TO_SOCK_COMMON,
6400 		PTR_TO_SOCKET,
6401 		PTR_TO_TCP_SOCK,
6402 		PTR_TO_XDP_SOCK,
6403 		PTR_TO_BTF_ID,
6404 		PTR_TO_BTF_ID | PTR_TRUSTED,
6405 	},
6406 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
6407 };
6408 #endif
6409 
6410 static const struct bpf_reg_types mem_types = {
6411 	.types = {
6412 		PTR_TO_STACK,
6413 		PTR_TO_PACKET,
6414 		PTR_TO_PACKET_META,
6415 		PTR_TO_MAP_KEY,
6416 		PTR_TO_MAP_VALUE,
6417 		PTR_TO_MEM,
6418 		PTR_TO_MEM | MEM_RINGBUF,
6419 		PTR_TO_BUF,
6420 	},
6421 };
6422 
6423 static const struct bpf_reg_types int_ptr_types = {
6424 	.types = {
6425 		PTR_TO_STACK,
6426 		PTR_TO_PACKET,
6427 		PTR_TO_PACKET_META,
6428 		PTR_TO_MAP_KEY,
6429 		PTR_TO_MAP_VALUE,
6430 	},
6431 };
6432 
6433 static const struct bpf_reg_types spin_lock_types = {
6434 	.types = {
6435 		PTR_TO_MAP_VALUE,
6436 		PTR_TO_BTF_ID | MEM_ALLOC,
6437 	}
6438 };
6439 
6440 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
6441 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
6442 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
6443 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
6444 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
6445 static const struct bpf_reg_types btf_ptr_types = {
6446 	.types = {
6447 		PTR_TO_BTF_ID,
6448 		PTR_TO_BTF_ID | PTR_TRUSTED,
6449 		PTR_TO_BTF_ID | MEM_RCU,
6450 	},
6451 };
6452 static const struct bpf_reg_types percpu_btf_ptr_types = {
6453 	.types = {
6454 		PTR_TO_BTF_ID | MEM_PERCPU,
6455 		PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
6456 	}
6457 };
6458 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
6459 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
6460 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
6461 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
6462 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
6463 static const struct bpf_reg_types dynptr_types = {
6464 	.types = {
6465 		PTR_TO_STACK,
6466 		CONST_PTR_TO_DYNPTR,
6467 	}
6468 };
6469 
6470 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
6471 	[ARG_PTR_TO_MAP_KEY]		= &mem_types,
6472 	[ARG_PTR_TO_MAP_VALUE]		= &mem_types,
6473 	[ARG_CONST_SIZE]		= &scalar_types,
6474 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
6475 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
6476 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
6477 	[ARG_PTR_TO_CTX]		= &context_types,
6478 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
6479 #ifdef CONFIG_NET
6480 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
6481 #endif
6482 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
6483 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
6484 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
6485 	[ARG_PTR_TO_MEM]		= &mem_types,
6486 	[ARG_PTR_TO_RINGBUF_MEM]	= &ringbuf_mem_types,
6487 	[ARG_PTR_TO_INT]		= &int_ptr_types,
6488 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
6489 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
6490 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
6491 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
6492 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
6493 	[ARG_PTR_TO_TIMER]		= &timer_types,
6494 	[ARG_PTR_TO_KPTR]		= &kptr_types,
6495 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
6496 };
6497 
6498 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
6499 			  enum bpf_arg_type arg_type,
6500 			  const u32 *arg_btf_id,
6501 			  struct bpf_call_arg_meta *meta)
6502 {
6503 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6504 	enum bpf_reg_type expected, type = reg->type;
6505 	const struct bpf_reg_types *compatible;
6506 	int i, j;
6507 
6508 	compatible = compatible_reg_types[base_type(arg_type)];
6509 	if (!compatible) {
6510 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
6511 		return -EFAULT;
6512 	}
6513 
6514 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
6515 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
6516 	 *
6517 	 * Same for MAYBE_NULL:
6518 	 *
6519 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
6520 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
6521 	 *
6522 	 * Therefore we fold these flags depending on the arg_type before comparison.
6523 	 */
6524 	if (arg_type & MEM_RDONLY)
6525 		type &= ~MEM_RDONLY;
6526 	if (arg_type & PTR_MAYBE_NULL)
6527 		type &= ~PTR_MAYBE_NULL;
6528 
6529 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
6530 		expected = compatible->types[i];
6531 		if (expected == NOT_INIT)
6532 			break;
6533 
6534 		if (type == expected)
6535 			goto found;
6536 	}
6537 
6538 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
6539 	for (j = 0; j + 1 < i; j++)
6540 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
6541 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
6542 	return -EACCES;
6543 
6544 found:
6545 	if (reg->type == PTR_TO_BTF_ID || reg->type & PTR_TRUSTED) {
6546 		/* For bpf_sk_release, it needs to match against first member
6547 		 * 'struct sock_common', hence make an exception for it. This
6548 		 * allows bpf_sk_release to work for multiple socket types.
6549 		 */
6550 		bool strict_type_match = arg_type_is_release(arg_type) &&
6551 					 meta->func_id != BPF_FUNC_sk_release;
6552 
6553 		if (!arg_btf_id) {
6554 			if (!compatible->btf_id) {
6555 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
6556 				return -EFAULT;
6557 			}
6558 			arg_btf_id = compatible->btf_id;
6559 		}
6560 
6561 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
6562 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
6563 				return -EACCES;
6564 		} else {
6565 			if (arg_btf_id == BPF_PTR_POISON) {
6566 				verbose(env, "verifier internal error:");
6567 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
6568 					regno);
6569 				return -EACCES;
6570 			}
6571 
6572 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
6573 						  btf_vmlinux, *arg_btf_id,
6574 						  strict_type_match)) {
6575 				verbose(env, "R%d is of type %s but %s is expected\n",
6576 					regno, kernel_type_name(reg->btf, reg->btf_id),
6577 					kernel_type_name(btf_vmlinux, *arg_btf_id));
6578 				return -EACCES;
6579 			}
6580 		}
6581 	} else if (type_is_alloc(reg->type)) {
6582 		if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock) {
6583 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
6584 			return -EFAULT;
6585 		}
6586 	}
6587 
6588 	return 0;
6589 }
6590 
6591 static struct btf_field *
6592 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
6593 {
6594 	struct btf_field *field;
6595 	struct btf_record *rec;
6596 
6597 	rec = reg_btf_record(reg);
6598 	if (!rec)
6599 		return NULL;
6600 
6601 	field = btf_record_find(rec, off, fields);
6602 	if (!field)
6603 		return NULL;
6604 
6605 	return field;
6606 }
6607 
6608 int check_func_arg_reg_off(struct bpf_verifier_env *env,
6609 			   const struct bpf_reg_state *reg, int regno,
6610 			   enum bpf_arg_type arg_type)
6611 {
6612 	u32 type = reg->type;
6613 
6614 	/* When referenced register is passed to release function, its fixed
6615 	 * offset must be 0.
6616 	 *
6617 	 * We will check arg_type_is_release reg has ref_obj_id when storing
6618 	 * meta->release_regno.
6619 	 */
6620 	if (arg_type_is_release(arg_type)) {
6621 		/* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
6622 		 * may not directly point to the object being released, but to
6623 		 * dynptr pointing to such object, which might be at some offset
6624 		 * on the stack. In that case, we simply to fallback to the
6625 		 * default handling.
6626 		 */
6627 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
6628 			return 0;
6629 
6630 		if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
6631 			if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
6632 				return __check_ptr_off_reg(env, reg, regno, true);
6633 
6634 			verbose(env, "R%d must have zero offset when passed to release func\n",
6635 				regno);
6636 			verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
6637 				kernel_type_name(reg->btf, reg->btf_id), reg->off);
6638 			return -EINVAL;
6639 		}
6640 
6641 		/* Doing check_ptr_off_reg check for the offset will catch this
6642 		 * because fixed_off_ok is false, but checking here allows us
6643 		 * to give the user a better error message.
6644 		 */
6645 		if (reg->off) {
6646 			verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
6647 				regno);
6648 			return -EINVAL;
6649 		}
6650 		return __check_ptr_off_reg(env, reg, regno, false);
6651 	}
6652 
6653 	switch (type) {
6654 	/* Pointer types where both fixed and variable offset is explicitly allowed: */
6655 	case PTR_TO_STACK:
6656 	case PTR_TO_PACKET:
6657 	case PTR_TO_PACKET_META:
6658 	case PTR_TO_MAP_KEY:
6659 	case PTR_TO_MAP_VALUE:
6660 	case PTR_TO_MEM:
6661 	case PTR_TO_MEM | MEM_RDONLY:
6662 	case PTR_TO_MEM | MEM_RINGBUF:
6663 	case PTR_TO_BUF:
6664 	case PTR_TO_BUF | MEM_RDONLY:
6665 	case SCALAR_VALUE:
6666 		return 0;
6667 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
6668 	 * fixed offset.
6669 	 */
6670 	case PTR_TO_BTF_ID:
6671 	case PTR_TO_BTF_ID | MEM_ALLOC:
6672 	case PTR_TO_BTF_ID | PTR_TRUSTED:
6673 	case PTR_TO_BTF_ID | MEM_RCU:
6674 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
6675 	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
6676 		/* When referenced PTR_TO_BTF_ID is passed to release function,
6677 		 * its fixed offset must be 0. In the other cases, fixed offset
6678 		 * can be non-zero. This was already checked above. So pass
6679 		 * fixed_off_ok as true to allow fixed offset for all other
6680 		 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
6681 		 * still need to do checks instead of returning.
6682 		 */
6683 		return __check_ptr_off_reg(env, reg, regno, true);
6684 	default:
6685 		return __check_ptr_off_reg(env, reg, regno, false);
6686 	}
6687 }
6688 
6689 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
6690 {
6691 	struct bpf_func_state *state = func(env, reg);
6692 	int spi;
6693 
6694 	if (reg->type == CONST_PTR_TO_DYNPTR)
6695 		return reg->id;
6696 	spi = dynptr_get_spi(env, reg);
6697 	if (spi < 0)
6698 		return spi;
6699 	return state->stack[spi].spilled_ptr.id;
6700 }
6701 
6702 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
6703 {
6704 	struct bpf_func_state *state = func(env, reg);
6705 	int spi;
6706 
6707 	if (reg->type == CONST_PTR_TO_DYNPTR)
6708 		return reg->ref_obj_id;
6709 	spi = dynptr_get_spi(env, reg);
6710 	if (spi < 0)
6711 		return spi;
6712 	return state->stack[spi].spilled_ptr.ref_obj_id;
6713 }
6714 
6715 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
6716 			  struct bpf_call_arg_meta *meta,
6717 			  const struct bpf_func_proto *fn)
6718 {
6719 	u32 regno = BPF_REG_1 + arg;
6720 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6721 	enum bpf_arg_type arg_type = fn->arg_type[arg];
6722 	enum bpf_reg_type type = reg->type;
6723 	u32 *arg_btf_id = NULL;
6724 	int err = 0;
6725 
6726 	if (arg_type == ARG_DONTCARE)
6727 		return 0;
6728 
6729 	err = check_reg_arg(env, regno, SRC_OP);
6730 	if (err)
6731 		return err;
6732 
6733 	if (arg_type == ARG_ANYTHING) {
6734 		if (is_pointer_value(env, regno)) {
6735 			verbose(env, "R%d leaks addr into helper function\n",
6736 				regno);
6737 			return -EACCES;
6738 		}
6739 		return 0;
6740 	}
6741 
6742 	if (type_is_pkt_pointer(type) &&
6743 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
6744 		verbose(env, "helper access to the packet is not allowed\n");
6745 		return -EACCES;
6746 	}
6747 
6748 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
6749 		err = resolve_map_arg_type(env, meta, &arg_type);
6750 		if (err)
6751 			return err;
6752 	}
6753 
6754 	if (register_is_null(reg) && type_may_be_null(arg_type))
6755 		/* A NULL register has a SCALAR_VALUE type, so skip
6756 		 * type checking.
6757 		 */
6758 		goto skip_type_check;
6759 
6760 	/* arg_btf_id and arg_size are in a union. */
6761 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
6762 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
6763 		arg_btf_id = fn->arg_btf_id[arg];
6764 
6765 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
6766 	if (err)
6767 		return err;
6768 
6769 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
6770 	if (err)
6771 		return err;
6772 
6773 skip_type_check:
6774 	if (arg_type_is_release(arg_type)) {
6775 		if (arg_type_is_dynptr(arg_type)) {
6776 			struct bpf_func_state *state = func(env, reg);
6777 			int spi;
6778 
6779 			/* Only dynptr created on stack can be released, thus
6780 			 * the get_spi and stack state checks for spilled_ptr
6781 			 * should only be done before process_dynptr_func for
6782 			 * PTR_TO_STACK.
6783 			 */
6784 			if (reg->type == PTR_TO_STACK) {
6785 				spi = dynptr_get_spi(env, reg);
6786 				if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
6787 					verbose(env, "arg %d is an unacquired reference\n", regno);
6788 					return -EINVAL;
6789 				}
6790 			} else {
6791 				verbose(env, "cannot release unowned const bpf_dynptr\n");
6792 				return -EINVAL;
6793 			}
6794 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
6795 			verbose(env, "R%d must be referenced when passed to release function\n",
6796 				regno);
6797 			return -EINVAL;
6798 		}
6799 		if (meta->release_regno) {
6800 			verbose(env, "verifier internal error: more than one release argument\n");
6801 			return -EFAULT;
6802 		}
6803 		meta->release_regno = regno;
6804 	}
6805 
6806 	if (reg->ref_obj_id) {
6807 		if (meta->ref_obj_id) {
6808 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
6809 				regno, reg->ref_obj_id,
6810 				meta->ref_obj_id);
6811 			return -EFAULT;
6812 		}
6813 		meta->ref_obj_id = reg->ref_obj_id;
6814 	}
6815 
6816 	switch (base_type(arg_type)) {
6817 	case ARG_CONST_MAP_PTR:
6818 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
6819 		if (meta->map_ptr) {
6820 			/* Use map_uid (which is unique id of inner map) to reject:
6821 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
6822 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
6823 			 * if (inner_map1 && inner_map2) {
6824 			 *     timer = bpf_map_lookup_elem(inner_map1);
6825 			 *     if (timer)
6826 			 *         // mismatch would have been allowed
6827 			 *         bpf_timer_init(timer, inner_map2);
6828 			 * }
6829 			 *
6830 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
6831 			 */
6832 			if (meta->map_ptr != reg->map_ptr ||
6833 			    meta->map_uid != reg->map_uid) {
6834 				verbose(env,
6835 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
6836 					meta->map_uid, reg->map_uid);
6837 				return -EINVAL;
6838 			}
6839 		}
6840 		meta->map_ptr = reg->map_ptr;
6841 		meta->map_uid = reg->map_uid;
6842 		break;
6843 	case ARG_PTR_TO_MAP_KEY:
6844 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
6845 		 * check that [key, key + map->key_size) are within
6846 		 * stack limits and initialized
6847 		 */
6848 		if (!meta->map_ptr) {
6849 			/* in function declaration map_ptr must come before
6850 			 * map_key, so that it's verified and known before
6851 			 * we have to check map_key here. Otherwise it means
6852 			 * that kernel subsystem misconfigured verifier
6853 			 */
6854 			verbose(env, "invalid map_ptr to access map->key\n");
6855 			return -EACCES;
6856 		}
6857 		err = check_helper_mem_access(env, regno,
6858 					      meta->map_ptr->key_size, false,
6859 					      NULL);
6860 		break;
6861 	case ARG_PTR_TO_MAP_VALUE:
6862 		if (type_may_be_null(arg_type) && register_is_null(reg))
6863 			return 0;
6864 
6865 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
6866 		 * check [value, value + map->value_size) validity
6867 		 */
6868 		if (!meta->map_ptr) {
6869 			/* kernel subsystem misconfigured verifier */
6870 			verbose(env, "invalid map_ptr to access map->value\n");
6871 			return -EACCES;
6872 		}
6873 		meta->raw_mode = arg_type & MEM_UNINIT;
6874 		err = check_helper_mem_access(env, regno,
6875 					      meta->map_ptr->value_size, false,
6876 					      meta);
6877 		break;
6878 	case ARG_PTR_TO_PERCPU_BTF_ID:
6879 		if (!reg->btf_id) {
6880 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
6881 			return -EACCES;
6882 		}
6883 		meta->ret_btf = reg->btf;
6884 		meta->ret_btf_id = reg->btf_id;
6885 		break;
6886 	case ARG_PTR_TO_SPIN_LOCK:
6887 		if (in_rbtree_lock_required_cb(env)) {
6888 			verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
6889 			return -EACCES;
6890 		}
6891 		if (meta->func_id == BPF_FUNC_spin_lock) {
6892 			err = process_spin_lock(env, regno, true);
6893 			if (err)
6894 				return err;
6895 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
6896 			err = process_spin_lock(env, regno, false);
6897 			if (err)
6898 				return err;
6899 		} else {
6900 			verbose(env, "verifier internal error\n");
6901 			return -EFAULT;
6902 		}
6903 		break;
6904 	case ARG_PTR_TO_TIMER:
6905 		err = process_timer_func(env, regno, meta);
6906 		if (err)
6907 			return err;
6908 		break;
6909 	case ARG_PTR_TO_FUNC:
6910 		meta->subprogno = reg->subprogno;
6911 		break;
6912 	case ARG_PTR_TO_MEM:
6913 		/* The access to this pointer is only checked when we hit the
6914 		 * next is_mem_size argument below.
6915 		 */
6916 		meta->raw_mode = arg_type & MEM_UNINIT;
6917 		if (arg_type & MEM_FIXED_SIZE) {
6918 			err = check_helper_mem_access(env, regno,
6919 						      fn->arg_size[arg], false,
6920 						      meta);
6921 		}
6922 		break;
6923 	case ARG_CONST_SIZE:
6924 		err = check_mem_size_reg(env, reg, regno, false, meta);
6925 		break;
6926 	case ARG_CONST_SIZE_OR_ZERO:
6927 		err = check_mem_size_reg(env, reg, regno, true, meta);
6928 		break;
6929 	case ARG_PTR_TO_DYNPTR:
6930 		err = process_dynptr_func(env, regno, arg_type, meta);
6931 		if (err)
6932 			return err;
6933 		break;
6934 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
6935 		if (!tnum_is_const(reg->var_off)) {
6936 			verbose(env, "R%d is not a known constant'\n",
6937 				regno);
6938 			return -EACCES;
6939 		}
6940 		meta->mem_size = reg->var_off.value;
6941 		err = mark_chain_precision(env, regno);
6942 		if (err)
6943 			return err;
6944 		break;
6945 	case ARG_PTR_TO_INT:
6946 	case ARG_PTR_TO_LONG:
6947 	{
6948 		int size = int_ptr_type_to_size(arg_type);
6949 
6950 		err = check_helper_mem_access(env, regno, size, false, meta);
6951 		if (err)
6952 			return err;
6953 		err = check_ptr_alignment(env, reg, 0, size, true);
6954 		break;
6955 	}
6956 	case ARG_PTR_TO_CONST_STR:
6957 	{
6958 		struct bpf_map *map = reg->map_ptr;
6959 		int map_off;
6960 		u64 map_addr;
6961 		char *str_ptr;
6962 
6963 		if (!bpf_map_is_rdonly(map)) {
6964 			verbose(env, "R%d does not point to a readonly map'\n", regno);
6965 			return -EACCES;
6966 		}
6967 
6968 		if (!tnum_is_const(reg->var_off)) {
6969 			verbose(env, "R%d is not a constant address'\n", regno);
6970 			return -EACCES;
6971 		}
6972 
6973 		if (!map->ops->map_direct_value_addr) {
6974 			verbose(env, "no direct value access support for this map type\n");
6975 			return -EACCES;
6976 		}
6977 
6978 		err = check_map_access(env, regno, reg->off,
6979 				       map->value_size - reg->off, false,
6980 				       ACCESS_HELPER);
6981 		if (err)
6982 			return err;
6983 
6984 		map_off = reg->off + reg->var_off.value;
6985 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6986 		if (err) {
6987 			verbose(env, "direct value access on string failed\n");
6988 			return err;
6989 		}
6990 
6991 		str_ptr = (char *)(long)(map_addr);
6992 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6993 			verbose(env, "string is not zero-terminated\n");
6994 			return -EINVAL;
6995 		}
6996 		break;
6997 	}
6998 	case ARG_PTR_TO_KPTR:
6999 		err = process_kptr_func(env, regno, meta);
7000 		if (err)
7001 			return err;
7002 		break;
7003 	}
7004 
7005 	return err;
7006 }
7007 
7008 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
7009 {
7010 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
7011 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7012 
7013 	if (func_id != BPF_FUNC_map_update_elem)
7014 		return false;
7015 
7016 	/* It's not possible to get access to a locked struct sock in these
7017 	 * contexts, so updating is safe.
7018 	 */
7019 	switch (type) {
7020 	case BPF_PROG_TYPE_TRACING:
7021 		if (eatype == BPF_TRACE_ITER)
7022 			return true;
7023 		break;
7024 	case BPF_PROG_TYPE_SOCKET_FILTER:
7025 	case BPF_PROG_TYPE_SCHED_CLS:
7026 	case BPF_PROG_TYPE_SCHED_ACT:
7027 	case BPF_PROG_TYPE_XDP:
7028 	case BPF_PROG_TYPE_SK_REUSEPORT:
7029 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
7030 	case BPF_PROG_TYPE_SK_LOOKUP:
7031 		return true;
7032 	default:
7033 		break;
7034 	}
7035 
7036 	verbose(env, "cannot update sockmap in this context\n");
7037 	return false;
7038 }
7039 
7040 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
7041 {
7042 	return env->prog->jit_requested &&
7043 	       bpf_jit_supports_subprog_tailcalls();
7044 }
7045 
7046 static int check_map_func_compatibility(struct bpf_verifier_env *env,
7047 					struct bpf_map *map, int func_id)
7048 {
7049 	if (!map)
7050 		return 0;
7051 
7052 	/* We need a two way check, first is from map perspective ... */
7053 	switch (map->map_type) {
7054 	case BPF_MAP_TYPE_PROG_ARRAY:
7055 		if (func_id != BPF_FUNC_tail_call)
7056 			goto error;
7057 		break;
7058 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
7059 		if (func_id != BPF_FUNC_perf_event_read &&
7060 		    func_id != BPF_FUNC_perf_event_output &&
7061 		    func_id != BPF_FUNC_skb_output &&
7062 		    func_id != BPF_FUNC_perf_event_read_value &&
7063 		    func_id != BPF_FUNC_xdp_output)
7064 			goto error;
7065 		break;
7066 	case BPF_MAP_TYPE_RINGBUF:
7067 		if (func_id != BPF_FUNC_ringbuf_output &&
7068 		    func_id != BPF_FUNC_ringbuf_reserve &&
7069 		    func_id != BPF_FUNC_ringbuf_query &&
7070 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
7071 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
7072 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
7073 			goto error;
7074 		break;
7075 	case BPF_MAP_TYPE_USER_RINGBUF:
7076 		if (func_id != BPF_FUNC_user_ringbuf_drain)
7077 			goto error;
7078 		break;
7079 	case BPF_MAP_TYPE_STACK_TRACE:
7080 		if (func_id != BPF_FUNC_get_stackid)
7081 			goto error;
7082 		break;
7083 	case BPF_MAP_TYPE_CGROUP_ARRAY:
7084 		if (func_id != BPF_FUNC_skb_under_cgroup &&
7085 		    func_id != BPF_FUNC_current_task_under_cgroup)
7086 			goto error;
7087 		break;
7088 	case BPF_MAP_TYPE_CGROUP_STORAGE:
7089 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
7090 		if (func_id != BPF_FUNC_get_local_storage)
7091 			goto error;
7092 		break;
7093 	case BPF_MAP_TYPE_DEVMAP:
7094 	case BPF_MAP_TYPE_DEVMAP_HASH:
7095 		if (func_id != BPF_FUNC_redirect_map &&
7096 		    func_id != BPF_FUNC_map_lookup_elem)
7097 			goto error;
7098 		break;
7099 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
7100 	 * appear.
7101 	 */
7102 	case BPF_MAP_TYPE_CPUMAP:
7103 		if (func_id != BPF_FUNC_redirect_map)
7104 			goto error;
7105 		break;
7106 	case BPF_MAP_TYPE_XSKMAP:
7107 		if (func_id != BPF_FUNC_redirect_map &&
7108 		    func_id != BPF_FUNC_map_lookup_elem)
7109 			goto error;
7110 		break;
7111 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
7112 	case BPF_MAP_TYPE_HASH_OF_MAPS:
7113 		if (func_id != BPF_FUNC_map_lookup_elem)
7114 			goto error;
7115 		break;
7116 	case BPF_MAP_TYPE_SOCKMAP:
7117 		if (func_id != BPF_FUNC_sk_redirect_map &&
7118 		    func_id != BPF_FUNC_sock_map_update &&
7119 		    func_id != BPF_FUNC_map_delete_elem &&
7120 		    func_id != BPF_FUNC_msg_redirect_map &&
7121 		    func_id != BPF_FUNC_sk_select_reuseport &&
7122 		    func_id != BPF_FUNC_map_lookup_elem &&
7123 		    !may_update_sockmap(env, func_id))
7124 			goto error;
7125 		break;
7126 	case BPF_MAP_TYPE_SOCKHASH:
7127 		if (func_id != BPF_FUNC_sk_redirect_hash &&
7128 		    func_id != BPF_FUNC_sock_hash_update &&
7129 		    func_id != BPF_FUNC_map_delete_elem &&
7130 		    func_id != BPF_FUNC_msg_redirect_hash &&
7131 		    func_id != BPF_FUNC_sk_select_reuseport &&
7132 		    func_id != BPF_FUNC_map_lookup_elem &&
7133 		    !may_update_sockmap(env, func_id))
7134 			goto error;
7135 		break;
7136 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
7137 		if (func_id != BPF_FUNC_sk_select_reuseport)
7138 			goto error;
7139 		break;
7140 	case BPF_MAP_TYPE_QUEUE:
7141 	case BPF_MAP_TYPE_STACK:
7142 		if (func_id != BPF_FUNC_map_peek_elem &&
7143 		    func_id != BPF_FUNC_map_pop_elem &&
7144 		    func_id != BPF_FUNC_map_push_elem)
7145 			goto error;
7146 		break;
7147 	case BPF_MAP_TYPE_SK_STORAGE:
7148 		if (func_id != BPF_FUNC_sk_storage_get &&
7149 		    func_id != BPF_FUNC_sk_storage_delete)
7150 			goto error;
7151 		break;
7152 	case BPF_MAP_TYPE_INODE_STORAGE:
7153 		if (func_id != BPF_FUNC_inode_storage_get &&
7154 		    func_id != BPF_FUNC_inode_storage_delete)
7155 			goto error;
7156 		break;
7157 	case BPF_MAP_TYPE_TASK_STORAGE:
7158 		if (func_id != BPF_FUNC_task_storage_get &&
7159 		    func_id != BPF_FUNC_task_storage_delete)
7160 			goto error;
7161 		break;
7162 	case BPF_MAP_TYPE_CGRP_STORAGE:
7163 		if (func_id != BPF_FUNC_cgrp_storage_get &&
7164 		    func_id != BPF_FUNC_cgrp_storage_delete)
7165 			goto error;
7166 		break;
7167 	case BPF_MAP_TYPE_BLOOM_FILTER:
7168 		if (func_id != BPF_FUNC_map_peek_elem &&
7169 		    func_id != BPF_FUNC_map_push_elem)
7170 			goto error;
7171 		break;
7172 	default:
7173 		break;
7174 	}
7175 
7176 	/* ... and second from the function itself. */
7177 	switch (func_id) {
7178 	case BPF_FUNC_tail_call:
7179 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
7180 			goto error;
7181 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
7182 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
7183 			return -EINVAL;
7184 		}
7185 		break;
7186 	case BPF_FUNC_perf_event_read:
7187 	case BPF_FUNC_perf_event_output:
7188 	case BPF_FUNC_perf_event_read_value:
7189 	case BPF_FUNC_skb_output:
7190 	case BPF_FUNC_xdp_output:
7191 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
7192 			goto error;
7193 		break;
7194 	case BPF_FUNC_ringbuf_output:
7195 	case BPF_FUNC_ringbuf_reserve:
7196 	case BPF_FUNC_ringbuf_query:
7197 	case BPF_FUNC_ringbuf_reserve_dynptr:
7198 	case BPF_FUNC_ringbuf_submit_dynptr:
7199 	case BPF_FUNC_ringbuf_discard_dynptr:
7200 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
7201 			goto error;
7202 		break;
7203 	case BPF_FUNC_user_ringbuf_drain:
7204 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
7205 			goto error;
7206 		break;
7207 	case BPF_FUNC_get_stackid:
7208 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
7209 			goto error;
7210 		break;
7211 	case BPF_FUNC_current_task_under_cgroup:
7212 	case BPF_FUNC_skb_under_cgroup:
7213 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
7214 			goto error;
7215 		break;
7216 	case BPF_FUNC_redirect_map:
7217 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
7218 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
7219 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
7220 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
7221 			goto error;
7222 		break;
7223 	case BPF_FUNC_sk_redirect_map:
7224 	case BPF_FUNC_msg_redirect_map:
7225 	case BPF_FUNC_sock_map_update:
7226 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
7227 			goto error;
7228 		break;
7229 	case BPF_FUNC_sk_redirect_hash:
7230 	case BPF_FUNC_msg_redirect_hash:
7231 	case BPF_FUNC_sock_hash_update:
7232 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
7233 			goto error;
7234 		break;
7235 	case BPF_FUNC_get_local_storage:
7236 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
7237 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
7238 			goto error;
7239 		break;
7240 	case BPF_FUNC_sk_select_reuseport:
7241 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
7242 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
7243 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
7244 			goto error;
7245 		break;
7246 	case BPF_FUNC_map_pop_elem:
7247 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7248 		    map->map_type != BPF_MAP_TYPE_STACK)
7249 			goto error;
7250 		break;
7251 	case BPF_FUNC_map_peek_elem:
7252 	case BPF_FUNC_map_push_elem:
7253 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7254 		    map->map_type != BPF_MAP_TYPE_STACK &&
7255 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
7256 			goto error;
7257 		break;
7258 	case BPF_FUNC_map_lookup_percpu_elem:
7259 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
7260 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
7261 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
7262 			goto error;
7263 		break;
7264 	case BPF_FUNC_sk_storage_get:
7265 	case BPF_FUNC_sk_storage_delete:
7266 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
7267 			goto error;
7268 		break;
7269 	case BPF_FUNC_inode_storage_get:
7270 	case BPF_FUNC_inode_storage_delete:
7271 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
7272 			goto error;
7273 		break;
7274 	case BPF_FUNC_task_storage_get:
7275 	case BPF_FUNC_task_storage_delete:
7276 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
7277 			goto error;
7278 		break;
7279 	case BPF_FUNC_cgrp_storage_get:
7280 	case BPF_FUNC_cgrp_storage_delete:
7281 		if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
7282 			goto error;
7283 		break;
7284 	default:
7285 		break;
7286 	}
7287 
7288 	return 0;
7289 error:
7290 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
7291 		map->map_type, func_id_name(func_id), func_id);
7292 	return -EINVAL;
7293 }
7294 
7295 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
7296 {
7297 	int count = 0;
7298 
7299 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
7300 		count++;
7301 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
7302 		count++;
7303 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
7304 		count++;
7305 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
7306 		count++;
7307 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
7308 		count++;
7309 
7310 	/* We only support one arg being in raw mode at the moment,
7311 	 * which is sufficient for the helper functions we have
7312 	 * right now.
7313 	 */
7314 	return count <= 1;
7315 }
7316 
7317 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
7318 {
7319 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
7320 	bool has_size = fn->arg_size[arg] != 0;
7321 	bool is_next_size = false;
7322 
7323 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
7324 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
7325 
7326 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
7327 		return is_next_size;
7328 
7329 	return has_size == is_next_size || is_next_size == is_fixed;
7330 }
7331 
7332 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
7333 {
7334 	/* bpf_xxx(..., buf, len) call will access 'len'
7335 	 * bytes from memory 'buf'. Both arg types need
7336 	 * to be paired, so make sure there's no buggy
7337 	 * helper function specification.
7338 	 */
7339 	if (arg_type_is_mem_size(fn->arg1_type) ||
7340 	    check_args_pair_invalid(fn, 0) ||
7341 	    check_args_pair_invalid(fn, 1) ||
7342 	    check_args_pair_invalid(fn, 2) ||
7343 	    check_args_pair_invalid(fn, 3) ||
7344 	    check_args_pair_invalid(fn, 4))
7345 		return false;
7346 
7347 	return true;
7348 }
7349 
7350 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
7351 {
7352 	int i;
7353 
7354 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
7355 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
7356 			return !!fn->arg_btf_id[i];
7357 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
7358 			return fn->arg_btf_id[i] == BPF_PTR_POISON;
7359 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
7360 		    /* arg_btf_id and arg_size are in a union. */
7361 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
7362 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
7363 			return false;
7364 	}
7365 
7366 	return true;
7367 }
7368 
7369 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
7370 {
7371 	return check_raw_mode_ok(fn) &&
7372 	       check_arg_pair_ok(fn) &&
7373 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
7374 }
7375 
7376 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
7377  * are now invalid, so turn them into unknown SCALAR_VALUE.
7378  */
7379 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
7380 {
7381 	struct bpf_func_state *state;
7382 	struct bpf_reg_state *reg;
7383 
7384 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
7385 		if (reg_is_pkt_pointer_any(reg))
7386 			__mark_reg_unknown(env, reg);
7387 	}));
7388 }
7389 
7390 enum {
7391 	AT_PKT_END = -1,
7392 	BEYOND_PKT_END = -2,
7393 };
7394 
7395 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
7396 {
7397 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
7398 	struct bpf_reg_state *reg = &state->regs[regn];
7399 
7400 	if (reg->type != PTR_TO_PACKET)
7401 		/* PTR_TO_PACKET_META is not supported yet */
7402 		return;
7403 
7404 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
7405 	 * How far beyond pkt_end it goes is unknown.
7406 	 * if (!range_open) it's the case of pkt >= pkt_end
7407 	 * if (range_open) it's the case of pkt > pkt_end
7408 	 * hence this pointer is at least 1 byte bigger than pkt_end
7409 	 */
7410 	if (range_open)
7411 		reg->range = BEYOND_PKT_END;
7412 	else
7413 		reg->range = AT_PKT_END;
7414 }
7415 
7416 /* The pointer with the specified id has released its reference to kernel
7417  * resources. Identify all copies of the same pointer and clear the reference.
7418  */
7419 static int release_reference(struct bpf_verifier_env *env,
7420 			     int ref_obj_id)
7421 {
7422 	struct bpf_func_state *state;
7423 	struct bpf_reg_state *reg;
7424 	int err;
7425 
7426 	err = release_reference_state(cur_func(env), ref_obj_id);
7427 	if (err)
7428 		return err;
7429 
7430 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
7431 		if (reg->ref_obj_id == ref_obj_id) {
7432 			if (!env->allow_ptr_leaks)
7433 				__mark_reg_not_init(env, reg);
7434 			else
7435 				__mark_reg_unknown(env, reg);
7436 		}
7437 	}));
7438 
7439 	return 0;
7440 }
7441 
7442 static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
7443 {
7444 	struct bpf_func_state *unused;
7445 	struct bpf_reg_state *reg;
7446 
7447 	bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
7448 		if (type_is_non_owning_ref(reg->type))
7449 			__mark_reg_unknown(env, reg);
7450 	}));
7451 }
7452 
7453 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
7454 				    struct bpf_reg_state *regs)
7455 {
7456 	int i;
7457 
7458 	/* after the call registers r0 - r5 were scratched */
7459 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
7460 		mark_reg_not_init(env, regs, caller_saved[i]);
7461 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7462 	}
7463 }
7464 
7465 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
7466 				   struct bpf_func_state *caller,
7467 				   struct bpf_func_state *callee,
7468 				   int insn_idx);
7469 
7470 static int set_callee_state(struct bpf_verifier_env *env,
7471 			    struct bpf_func_state *caller,
7472 			    struct bpf_func_state *callee, int insn_idx);
7473 
7474 static bool is_callback_calling_kfunc(u32 btf_id);
7475 
7476 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7477 			     int *insn_idx, int subprog,
7478 			     set_callee_state_fn set_callee_state_cb)
7479 {
7480 	struct bpf_verifier_state *state = env->cur_state;
7481 	struct bpf_func_info_aux *func_info_aux;
7482 	struct bpf_func_state *caller, *callee;
7483 	int err;
7484 	bool is_global = false;
7485 
7486 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
7487 		verbose(env, "the call stack of %d frames is too deep\n",
7488 			state->curframe + 2);
7489 		return -E2BIG;
7490 	}
7491 
7492 	caller = state->frame[state->curframe];
7493 	if (state->frame[state->curframe + 1]) {
7494 		verbose(env, "verifier bug. Frame %d already allocated\n",
7495 			state->curframe + 1);
7496 		return -EFAULT;
7497 	}
7498 
7499 	func_info_aux = env->prog->aux->func_info_aux;
7500 	if (func_info_aux)
7501 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
7502 	err = btf_check_subprog_call(env, subprog, caller->regs);
7503 	if (err == -EFAULT)
7504 		return err;
7505 	if (is_global) {
7506 		if (err) {
7507 			verbose(env, "Caller passes invalid args into func#%d\n",
7508 				subprog);
7509 			return err;
7510 		} else {
7511 			if (env->log.level & BPF_LOG_LEVEL)
7512 				verbose(env,
7513 					"Func#%d is global and valid. Skipping.\n",
7514 					subprog);
7515 			clear_caller_saved_regs(env, caller->regs);
7516 
7517 			/* All global functions return a 64-bit SCALAR_VALUE */
7518 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
7519 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7520 
7521 			/* continue with next insn after call */
7522 			return 0;
7523 		}
7524 	}
7525 
7526 	/* set_callee_state is used for direct subprog calls, but we are
7527 	 * interested in validating only BPF helpers that can call subprogs as
7528 	 * callbacks
7529 	 */
7530 	if (set_callee_state_cb != set_callee_state) {
7531 		if (bpf_pseudo_kfunc_call(insn) &&
7532 		    !is_callback_calling_kfunc(insn->imm)) {
7533 			verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
7534 				func_id_name(insn->imm), insn->imm);
7535 			return -EFAULT;
7536 		} else if (!bpf_pseudo_kfunc_call(insn) &&
7537 			   !is_callback_calling_function(insn->imm)) { /* helper */
7538 			verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
7539 				func_id_name(insn->imm), insn->imm);
7540 			return -EFAULT;
7541 		}
7542 	}
7543 
7544 	if (insn->code == (BPF_JMP | BPF_CALL) &&
7545 	    insn->src_reg == 0 &&
7546 	    insn->imm == BPF_FUNC_timer_set_callback) {
7547 		struct bpf_verifier_state *async_cb;
7548 
7549 		/* there is no real recursion here. timer callbacks are async */
7550 		env->subprog_info[subprog].is_async_cb = true;
7551 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
7552 					 *insn_idx, subprog);
7553 		if (!async_cb)
7554 			return -EFAULT;
7555 		callee = async_cb->frame[0];
7556 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
7557 
7558 		/* Convert bpf_timer_set_callback() args into timer callback args */
7559 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
7560 		if (err)
7561 			return err;
7562 
7563 		clear_caller_saved_regs(env, caller->regs);
7564 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
7565 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7566 		/* continue with next insn after call */
7567 		return 0;
7568 	}
7569 
7570 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
7571 	if (!callee)
7572 		return -ENOMEM;
7573 	state->frame[state->curframe + 1] = callee;
7574 
7575 	/* callee cannot access r0, r6 - r9 for reading and has to write
7576 	 * into its own stack before reading from it.
7577 	 * callee can read/write into caller's stack
7578 	 */
7579 	init_func_state(env, callee,
7580 			/* remember the callsite, it will be used by bpf_exit */
7581 			*insn_idx /* callsite */,
7582 			state->curframe + 1 /* frameno within this callchain */,
7583 			subprog /* subprog number within this prog */);
7584 
7585 	/* Transfer references to the callee */
7586 	err = copy_reference_state(callee, caller);
7587 	if (err)
7588 		goto err_out;
7589 
7590 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
7591 	if (err)
7592 		goto err_out;
7593 
7594 	clear_caller_saved_regs(env, caller->regs);
7595 
7596 	/* only increment it after check_reg_arg() finished */
7597 	state->curframe++;
7598 
7599 	/* and go analyze first insn of the callee */
7600 	*insn_idx = env->subprog_info[subprog].start - 1;
7601 
7602 	if (env->log.level & BPF_LOG_LEVEL) {
7603 		verbose(env, "caller:\n");
7604 		print_verifier_state(env, caller, true);
7605 		verbose(env, "callee:\n");
7606 		print_verifier_state(env, callee, true);
7607 	}
7608 	return 0;
7609 
7610 err_out:
7611 	free_func_state(callee);
7612 	state->frame[state->curframe + 1] = NULL;
7613 	return err;
7614 }
7615 
7616 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
7617 				   struct bpf_func_state *caller,
7618 				   struct bpf_func_state *callee)
7619 {
7620 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
7621 	 *      void *callback_ctx, u64 flags);
7622 	 * callback_fn(struct bpf_map *map, void *key, void *value,
7623 	 *      void *callback_ctx);
7624 	 */
7625 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7626 
7627 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7628 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7629 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7630 
7631 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7632 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7633 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7634 
7635 	/* pointer to stack or null */
7636 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
7637 
7638 	/* unused */
7639 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7640 	return 0;
7641 }
7642 
7643 static int set_callee_state(struct bpf_verifier_env *env,
7644 			    struct bpf_func_state *caller,
7645 			    struct bpf_func_state *callee, int insn_idx)
7646 {
7647 	int i;
7648 
7649 	/* copy r1 - r5 args that callee can access.  The copy includes parent
7650 	 * pointers, which connects us up to the liveness chain
7651 	 */
7652 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
7653 		callee->regs[i] = caller->regs[i];
7654 	return 0;
7655 }
7656 
7657 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7658 			   int *insn_idx)
7659 {
7660 	int subprog, target_insn;
7661 
7662 	target_insn = *insn_idx + insn->imm + 1;
7663 	subprog = find_subprog(env, target_insn);
7664 	if (subprog < 0) {
7665 		verbose(env, "verifier bug. No program starts at insn %d\n",
7666 			target_insn);
7667 		return -EFAULT;
7668 	}
7669 
7670 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
7671 }
7672 
7673 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
7674 				       struct bpf_func_state *caller,
7675 				       struct bpf_func_state *callee,
7676 				       int insn_idx)
7677 {
7678 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
7679 	struct bpf_map *map;
7680 	int err;
7681 
7682 	if (bpf_map_ptr_poisoned(insn_aux)) {
7683 		verbose(env, "tail_call abusing map_ptr\n");
7684 		return -EINVAL;
7685 	}
7686 
7687 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
7688 	if (!map->ops->map_set_for_each_callback_args ||
7689 	    !map->ops->map_for_each_callback) {
7690 		verbose(env, "callback function not allowed for map\n");
7691 		return -ENOTSUPP;
7692 	}
7693 
7694 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
7695 	if (err)
7696 		return err;
7697 
7698 	callee->in_callback_fn = true;
7699 	callee->callback_ret_range = tnum_range(0, 1);
7700 	return 0;
7701 }
7702 
7703 static int set_loop_callback_state(struct bpf_verifier_env *env,
7704 				   struct bpf_func_state *caller,
7705 				   struct bpf_func_state *callee,
7706 				   int insn_idx)
7707 {
7708 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
7709 	 *	    u64 flags);
7710 	 * callback_fn(u32 index, void *callback_ctx);
7711 	 */
7712 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
7713 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7714 
7715 	/* unused */
7716 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7717 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7718 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7719 
7720 	callee->in_callback_fn = true;
7721 	callee->callback_ret_range = tnum_range(0, 1);
7722 	return 0;
7723 }
7724 
7725 static int set_timer_callback_state(struct bpf_verifier_env *env,
7726 				    struct bpf_func_state *caller,
7727 				    struct bpf_func_state *callee,
7728 				    int insn_idx)
7729 {
7730 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
7731 
7732 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
7733 	 * callback_fn(struct bpf_map *map, void *key, void *value);
7734 	 */
7735 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
7736 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
7737 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
7738 
7739 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7740 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7741 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
7742 
7743 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7744 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7745 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
7746 
7747 	/* unused */
7748 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7749 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7750 	callee->in_async_callback_fn = true;
7751 	callee->callback_ret_range = tnum_range(0, 1);
7752 	return 0;
7753 }
7754 
7755 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
7756 				       struct bpf_func_state *caller,
7757 				       struct bpf_func_state *callee,
7758 				       int insn_idx)
7759 {
7760 	/* bpf_find_vma(struct task_struct *task, u64 addr,
7761 	 *               void *callback_fn, void *callback_ctx, u64 flags)
7762 	 * (callback_fn)(struct task_struct *task,
7763 	 *               struct vm_area_struct *vma, void *callback_ctx);
7764 	 */
7765 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7766 
7767 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
7768 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7769 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
7770 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7771 
7772 	/* pointer to stack or null */
7773 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
7774 
7775 	/* unused */
7776 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7777 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7778 	callee->in_callback_fn = true;
7779 	callee->callback_ret_range = tnum_range(0, 1);
7780 	return 0;
7781 }
7782 
7783 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
7784 					   struct bpf_func_state *caller,
7785 					   struct bpf_func_state *callee,
7786 					   int insn_idx)
7787 {
7788 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
7789 	 *			  callback_ctx, u64 flags);
7790 	 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
7791 	 */
7792 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
7793 	mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
7794 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7795 
7796 	/* unused */
7797 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7798 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7799 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7800 
7801 	callee->in_callback_fn = true;
7802 	callee->callback_ret_range = tnum_range(0, 1);
7803 	return 0;
7804 }
7805 
7806 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
7807 					 struct bpf_func_state *caller,
7808 					 struct bpf_func_state *callee,
7809 					 int insn_idx)
7810 {
7811 	/* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
7812 	 *                     bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
7813 	 *
7814 	 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset
7815 	 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
7816 	 * by this point, so look at 'root'
7817 	 */
7818 	struct btf_field *field;
7819 
7820 	field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
7821 				      BPF_RB_ROOT);
7822 	if (!field || !field->graph_root.value_btf_id)
7823 		return -EFAULT;
7824 
7825 	mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
7826 	ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
7827 	mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
7828 	ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
7829 
7830 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7831 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7832 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7833 	callee->in_callback_fn = true;
7834 	callee->callback_ret_range = tnum_range(0, 1);
7835 	return 0;
7836 }
7837 
7838 static bool is_rbtree_lock_required_kfunc(u32 btf_id);
7839 
7840 /* Are we currently verifying the callback for a rbtree helper that must
7841  * be called with lock held? If so, no need to complain about unreleased
7842  * lock
7843  */
7844 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
7845 {
7846 	struct bpf_verifier_state *state = env->cur_state;
7847 	struct bpf_insn *insn = env->prog->insnsi;
7848 	struct bpf_func_state *callee;
7849 	int kfunc_btf_id;
7850 
7851 	if (!state->curframe)
7852 		return false;
7853 
7854 	callee = state->frame[state->curframe];
7855 
7856 	if (!callee->in_callback_fn)
7857 		return false;
7858 
7859 	kfunc_btf_id = insn[callee->callsite].imm;
7860 	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
7861 }
7862 
7863 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
7864 {
7865 	struct bpf_verifier_state *state = env->cur_state;
7866 	struct bpf_func_state *caller, *callee;
7867 	struct bpf_reg_state *r0;
7868 	int err;
7869 
7870 	callee = state->frame[state->curframe];
7871 	r0 = &callee->regs[BPF_REG_0];
7872 	if (r0->type == PTR_TO_STACK) {
7873 		/* technically it's ok to return caller's stack pointer
7874 		 * (or caller's caller's pointer) back to the caller,
7875 		 * since these pointers are valid. Only current stack
7876 		 * pointer will be invalid as soon as function exits,
7877 		 * but let's be conservative
7878 		 */
7879 		verbose(env, "cannot return stack pointer to the caller\n");
7880 		return -EINVAL;
7881 	}
7882 
7883 	caller = state->frame[state->curframe - 1];
7884 	if (callee->in_callback_fn) {
7885 		/* enforce R0 return value range [0, 1]. */
7886 		struct tnum range = callee->callback_ret_range;
7887 
7888 		if (r0->type != SCALAR_VALUE) {
7889 			verbose(env, "R0 not a scalar value\n");
7890 			return -EACCES;
7891 		}
7892 		if (!tnum_in(range, r0->var_off)) {
7893 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
7894 			return -EINVAL;
7895 		}
7896 	} else {
7897 		/* return to the caller whatever r0 had in the callee */
7898 		caller->regs[BPF_REG_0] = *r0;
7899 	}
7900 
7901 	/* callback_fn frame should have released its own additions to parent's
7902 	 * reference state at this point, or check_reference_leak would
7903 	 * complain, hence it must be the same as the caller. There is no need
7904 	 * to copy it back.
7905 	 */
7906 	if (!callee->in_callback_fn) {
7907 		/* Transfer references to the caller */
7908 		err = copy_reference_state(caller, callee);
7909 		if (err)
7910 			return err;
7911 	}
7912 
7913 	*insn_idx = callee->callsite + 1;
7914 	if (env->log.level & BPF_LOG_LEVEL) {
7915 		verbose(env, "returning from callee:\n");
7916 		print_verifier_state(env, callee, true);
7917 		verbose(env, "to caller at %d:\n", *insn_idx);
7918 		print_verifier_state(env, caller, true);
7919 	}
7920 	/* clear everything in the callee */
7921 	free_func_state(callee);
7922 	state->frame[state->curframe--] = NULL;
7923 	return 0;
7924 }
7925 
7926 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
7927 				   int func_id,
7928 				   struct bpf_call_arg_meta *meta)
7929 {
7930 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
7931 
7932 	if (ret_type != RET_INTEGER ||
7933 	    (func_id != BPF_FUNC_get_stack &&
7934 	     func_id != BPF_FUNC_get_task_stack &&
7935 	     func_id != BPF_FUNC_probe_read_str &&
7936 	     func_id != BPF_FUNC_probe_read_kernel_str &&
7937 	     func_id != BPF_FUNC_probe_read_user_str))
7938 		return;
7939 
7940 	ret_reg->smax_value = meta->msize_max_value;
7941 	ret_reg->s32_max_value = meta->msize_max_value;
7942 	ret_reg->smin_value = -MAX_ERRNO;
7943 	ret_reg->s32_min_value = -MAX_ERRNO;
7944 	reg_bounds_sync(ret_reg);
7945 }
7946 
7947 static int
7948 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7949 		int func_id, int insn_idx)
7950 {
7951 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7952 	struct bpf_map *map = meta->map_ptr;
7953 
7954 	if (func_id != BPF_FUNC_tail_call &&
7955 	    func_id != BPF_FUNC_map_lookup_elem &&
7956 	    func_id != BPF_FUNC_map_update_elem &&
7957 	    func_id != BPF_FUNC_map_delete_elem &&
7958 	    func_id != BPF_FUNC_map_push_elem &&
7959 	    func_id != BPF_FUNC_map_pop_elem &&
7960 	    func_id != BPF_FUNC_map_peek_elem &&
7961 	    func_id != BPF_FUNC_for_each_map_elem &&
7962 	    func_id != BPF_FUNC_redirect_map &&
7963 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
7964 		return 0;
7965 
7966 	if (map == NULL) {
7967 		verbose(env, "kernel subsystem misconfigured verifier\n");
7968 		return -EINVAL;
7969 	}
7970 
7971 	/* In case of read-only, some additional restrictions
7972 	 * need to be applied in order to prevent altering the
7973 	 * state of the map from program side.
7974 	 */
7975 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
7976 	    (func_id == BPF_FUNC_map_delete_elem ||
7977 	     func_id == BPF_FUNC_map_update_elem ||
7978 	     func_id == BPF_FUNC_map_push_elem ||
7979 	     func_id == BPF_FUNC_map_pop_elem)) {
7980 		verbose(env, "write into map forbidden\n");
7981 		return -EACCES;
7982 	}
7983 
7984 	if (!BPF_MAP_PTR(aux->map_ptr_state))
7985 		bpf_map_ptr_store(aux, meta->map_ptr,
7986 				  !meta->map_ptr->bypass_spec_v1);
7987 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
7988 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
7989 				  !meta->map_ptr->bypass_spec_v1);
7990 	return 0;
7991 }
7992 
7993 static int
7994 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7995 		int func_id, int insn_idx)
7996 {
7997 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7998 	struct bpf_reg_state *regs = cur_regs(env), *reg;
7999 	struct bpf_map *map = meta->map_ptr;
8000 	u64 val, max;
8001 	int err;
8002 
8003 	if (func_id != BPF_FUNC_tail_call)
8004 		return 0;
8005 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
8006 		verbose(env, "kernel subsystem misconfigured verifier\n");
8007 		return -EINVAL;
8008 	}
8009 
8010 	reg = &regs[BPF_REG_3];
8011 	val = reg->var_off.value;
8012 	max = map->max_entries;
8013 
8014 	if (!(register_is_const(reg) && val < max)) {
8015 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8016 		return 0;
8017 	}
8018 
8019 	err = mark_chain_precision(env, BPF_REG_3);
8020 	if (err)
8021 		return err;
8022 	if (bpf_map_key_unseen(aux))
8023 		bpf_map_key_store(aux, val);
8024 	else if (!bpf_map_key_poisoned(aux) &&
8025 		  bpf_map_key_immediate(aux) != val)
8026 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8027 	return 0;
8028 }
8029 
8030 static int check_reference_leak(struct bpf_verifier_env *env)
8031 {
8032 	struct bpf_func_state *state = cur_func(env);
8033 	bool refs_lingering = false;
8034 	int i;
8035 
8036 	if (state->frameno && !state->in_callback_fn)
8037 		return 0;
8038 
8039 	for (i = 0; i < state->acquired_refs; i++) {
8040 		if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
8041 			continue;
8042 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
8043 			state->refs[i].id, state->refs[i].insn_idx);
8044 		refs_lingering = true;
8045 	}
8046 	return refs_lingering ? -EINVAL : 0;
8047 }
8048 
8049 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
8050 				   struct bpf_reg_state *regs)
8051 {
8052 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
8053 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
8054 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
8055 	struct bpf_bprintf_data data = {};
8056 	int err, fmt_map_off, num_args;
8057 	u64 fmt_addr;
8058 	char *fmt;
8059 
8060 	/* data must be an array of u64 */
8061 	if (data_len_reg->var_off.value % 8)
8062 		return -EINVAL;
8063 	num_args = data_len_reg->var_off.value / 8;
8064 
8065 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
8066 	 * and map_direct_value_addr is set.
8067 	 */
8068 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
8069 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
8070 						  fmt_map_off);
8071 	if (err) {
8072 		verbose(env, "verifier bug\n");
8073 		return -EFAULT;
8074 	}
8075 	fmt = (char *)(long)fmt_addr + fmt_map_off;
8076 
8077 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
8078 	 * can focus on validating the format specifiers.
8079 	 */
8080 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
8081 	if (err < 0)
8082 		verbose(env, "Invalid format string\n");
8083 
8084 	return err;
8085 }
8086 
8087 static int check_get_func_ip(struct bpf_verifier_env *env)
8088 {
8089 	enum bpf_prog_type type = resolve_prog_type(env->prog);
8090 	int func_id = BPF_FUNC_get_func_ip;
8091 
8092 	if (type == BPF_PROG_TYPE_TRACING) {
8093 		if (!bpf_prog_has_trampoline(env->prog)) {
8094 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
8095 				func_id_name(func_id), func_id);
8096 			return -ENOTSUPP;
8097 		}
8098 		return 0;
8099 	} else if (type == BPF_PROG_TYPE_KPROBE) {
8100 		return 0;
8101 	}
8102 
8103 	verbose(env, "func %s#%d not supported for program type %d\n",
8104 		func_id_name(func_id), func_id, type);
8105 	return -ENOTSUPP;
8106 }
8107 
8108 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
8109 {
8110 	return &env->insn_aux_data[env->insn_idx];
8111 }
8112 
8113 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
8114 {
8115 	struct bpf_reg_state *regs = cur_regs(env);
8116 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
8117 	bool reg_is_null = register_is_null(reg);
8118 
8119 	if (reg_is_null)
8120 		mark_chain_precision(env, BPF_REG_4);
8121 
8122 	return reg_is_null;
8123 }
8124 
8125 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
8126 {
8127 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
8128 
8129 	if (!state->initialized) {
8130 		state->initialized = 1;
8131 		state->fit_for_inline = loop_flag_is_zero(env);
8132 		state->callback_subprogno = subprogno;
8133 		return;
8134 	}
8135 
8136 	if (!state->fit_for_inline)
8137 		return;
8138 
8139 	state->fit_for_inline = (loop_flag_is_zero(env) &&
8140 				 state->callback_subprogno == subprogno);
8141 }
8142 
8143 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8144 			     int *insn_idx_p)
8145 {
8146 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
8147 	const struct bpf_func_proto *fn = NULL;
8148 	enum bpf_return_type ret_type;
8149 	enum bpf_type_flag ret_flag;
8150 	struct bpf_reg_state *regs;
8151 	struct bpf_call_arg_meta meta;
8152 	int insn_idx = *insn_idx_p;
8153 	bool changes_data;
8154 	int i, err, func_id;
8155 
8156 	/* find function prototype */
8157 	func_id = insn->imm;
8158 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
8159 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
8160 			func_id);
8161 		return -EINVAL;
8162 	}
8163 
8164 	if (env->ops->get_func_proto)
8165 		fn = env->ops->get_func_proto(func_id, env->prog);
8166 	if (!fn) {
8167 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
8168 			func_id);
8169 		return -EINVAL;
8170 	}
8171 
8172 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
8173 	if (!env->prog->gpl_compatible && fn->gpl_only) {
8174 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
8175 		return -EINVAL;
8176 	}
8177 
8178 	if (fn->allowed && !fn->allowed(env->prog)) {
8179 		verbose(env, "helper call is not allowed in probe\n");
8180 		return -EINVAL;
8181 	}
8182 
8183 	if (!env->prog->aux->sleepable && fn->might_sleep) {
8184 		verbose(env, "helper call might sleep in a non-sleepable prog\n");
8185 		return -EINVAL;
8186 	}
8187 
8188 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
8189 	changes_data = bpf_helper_changes_pkt_data(fn->func);
8190 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
8191 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
8192 			func_id_name(func_id), func_id);
8193 		return -EINVAL;
8194 	}
8195 
8196 	memset(&meta, 0, sizeof(meta));
8197 	meta.pkt_access = fn->pkt_access;
8198 
8199 	err = check_func_proto(fn, func_id);
8200 	if (err) {
8201 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
8202 			func_id_name(func_id), func_id);
8203 		return err;
8204 	}
8205 
8206 	if (env->cur_state->active_rcu_lock) {
8207 		if (fn->might_sleep) {
8208 			verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
8209 				func_id_name(func_id), func_id);
8210 			return -EINVAL;
8211 		}
8212 
8213 		if (env->prog->aux->sleepable && is_storage_get_function(func_id))
8214 			env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
8215 	}
8216 
8217 	meta.func_id = func_id;
8218 	/* check args */
8219 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
8220 		err = check_func_arg(env, i, &meta, fn);
8221 		if (err)
8222 			return err;
8223 	}
8224 
8225 	err = record_func_map(env, &meta, func_id, insn_idx);
8226 	if (err)
8227 		return err;
8228 
8229 	err = record_func_key(env, &meta, func_id, insn_idx);
8230 	if (err)
8231 		return err;
8232 
8233 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
8234 	 * is inferred from register state.
8235 	 */
8236 	for (i = 0; i < meta.access_size; i++) {
8237 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
8238 				       BPF_WRITE, -1, false);
8239 		if (err)
8240 			return err;
8241 	}
8242 
8243 	regs = cur_regs(env);
8244 
8245 	/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8246 	 * be reinitialized by any dynptr helper. Hence, mark_stack_slots_dynptr
8247 	 * is safe to do directly.
8248 	 */
8249 	if (meta.uninit_dynptr_regno) {
8250 		if (regs[meta.uninit_dynptr_regno].type == CONST_PTR_TO_DYNPTR) {
8251 			verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be initialized\n");
8252 			return -EFAULT;
8253 		}
8254 		/* we write BPF_DW bits (8 bytes) at a time */
8255 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
8256 			err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
8257 					       i, BPF_DW, BPF_WRITE, -1, false);
8258 			if (err)
8259 				return err;
8260 		}
8261 
8262 		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
8263 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
8264 					      insn_idx);
8265 		if (err)
8266 			return err;
8267 	}
8268 
8269 	if (meta.release_regno) {
8270 		err = -EINVAL;
8271 		/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8272 		 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
8273 		 * is safe to do directly.
8274 		 */
8275 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
8276 			if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
8277 				verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
8278 				return -EFAULT;
8279 			}
8280 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
8281 		} else if (meta.ref_obj_id) {
8282 			err = release_reference(env, meta.ref_obj_id);
8283 		} else if (register_is_null(&regs[meta.release_regno])) {
8284 			/* meta.ref_obj_id can only be 0 if register that is meant to be
8285 			 * released is NULL, which must be > R0.
8286 			 */
8287 			err = 0;
8288 		}
8289 		if (err) {
8290 			verbose(env, "func %s#%d reference has not been acquired before\n",
8291 				func_id_name(func_id), func_id);
8292 			return err;
8293 		}
8294 	}
8295 
8296 	switch (func_id) {
8297 	case BPF_FUNC_tail_call:
8298 		err = check_reference_leak(env);
8299 		if (err) {
8300 			verbose(env, "tail_call would lead to reference leak\n");
8301 			return err;
8302 		}
8303 		break;
8304 	case BPF_FUNC_get_local_storage:
8305 		/* check that flags argument in get_local_storage(map, flags) is 0,
8306 		 * this is required because get_local_storage() can't return an error.
8307 		 */
8308 		if (!register_is_null(&regs[BPF_REG_2])) {
8309 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
8310 			return -EINVAL;
8311 		}
8312 		break;
8313 	case BPF_FUNC_for_each_map_elem:
8314 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8315 					set_map_elem_callback_state);
8316 		break;
8317 	case BPF_FUNC_timer_set_callback:
8318 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8319 					set_timer_callback_state);
8320 		break;
8321 	case BPF_FUNC_find_vma:
8322 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8323 					set_find_vma_callback_state);
8324 		break;
8325 	case BPF_FUNC_snprintf:
8326 		err = check_bpf_snprintf_call(env, regs);
8327 		break;
8328 	case BPF_FUNC_loop:
8329 		update_loop_inline_state(env, meta.subprogno);
8330 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8331 					set_loop_callback_state);
8332 		break;
8333 	case BPF_FUNC_dynptr_from_mem:
8334 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
8335 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
8336 				reg_type_str(env, regs[BPF_REG_1].type));
8337 			return -EACCES;
8338 		}
8339 		break;
8340 	case BPF_FUNC_set_retval:
8341 		if (prog_type == BPF_PROG_TYPE_LSM &&
8342 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
8343 			if (!env->prog->aux->attach_func_proto->type) {
8344 				/* Make sure programs that attach to void
8345 				 * hooks don't try to modify return value.
8346 				 */
8347 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
8348 				return -EINVAL;
8349 			}
8350 		}
8351 		break;
8352 	case BPF_FUNC_dynptr_data:
8353 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
8354 			if (arg_type_is_dynptr(fn->arg_type[i])) {
8355 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
8356 				int id, ref_obj_id;
8357 
8358 				if (meta.dynptr_id) {
8359 					verbose(env, "verifier internal error: meta.dynptr_id already set\n");
8360 					return -EFAULT;
8361 				}
8362 
8363 				if (meta.ref_obj_id) {
8364 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
8365 					return -EFAULT;
8366 				}
8367 
8368 				id = dynptr_id(env, reg);
8369 				if (id < 0) {
8370 					verbose(env, "verifier internal error: failed to obtain dynptr id\n");
8371 					return id;
8372 				}
8373 
8374 				ref_obj_id = dynptr_ref_obj_id(env, reg);
8375 				if (ref_obj_id < 0) {
8376 					verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
8377 					return ref_obj_id;
8378 				}
8379 
8380 				meta.dynptr_id = id;
8381 				meta.ref_obj_id = ref_obj_id;
8382 				break;
8383 			}
8384 		}
8385 		if (i == MAX_BPF_FUNC_REG_ARGS) {
8386 			verbose(env, "verifier internal error: no dynptr in bpf_dynptr_data()\n");
8387 			return -EFAULT;
8388 		}
8389 		break;
8390 	case BPF_FUNC_user_ringbuf_drain:
8391 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8392 					set_user_ringbuf_callback_state);
8393 		break;
8394 	}
8395 
8396 	if (err)
8397 		return err;
8398 
8399 	/* reset caller saved regs */
8400 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
8401 		mark_reg_not_init(env, regs, caller_saved[i]);
8402 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
8403 	}
8404 
8405 	/* helper call returns 64-bit value. */
8406 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8407 
8408 	/* update return register (already marked as written above) */
8409 	ret_type = fn->ret_type;
8410 	ret_flag = type_flag(ret_type);
8411 
8412 	switch (base_type(ret_type)) {
8413 	case RET_INTEGER:
8414 		/* sets type to SCALAR_VALUE */
8415 		mark_reg_unknown(env, regs, BPF_REG_0);
8416 		break;
8417 	case RET_VOID:
8418 		regs[BPF_REG_0].type = NOT_INIT;
8419 		break;
8420 	case RET_PTR_TO_MAP_VALUE:
8421 		/* There is no offset yet applied, variable or fixed */
8422 		mark_reg_known_zero(env, regs, BPF_REG_0);
8423 		/* remember map_ptr, so that check_map_access()
8424 		 * can check 'value_size' boundary of memory access
8425 		 * to map element returned from bpf_map_lookup_elem()
8426 		 */
8427 		if (meta.map_ptr == NULL) {
8428 			verbose(env,
8429 				"kernel subsystem misconfigured verifier\n");
8430 			return -EINVAL;
8431 		}
8432 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
8433 		regs[BPF_REG_0].map_uid = meta.map_uid;
8434 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
8435 		if (!type_may_be_null(ret_type) &&
8436 		    btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
8437 			regs[BPF_REG_0].id = ++env->id_gen;
8438 		}
8439 		break;
8440 	case RET_PTR_TO_SOCKET:
8441 		mark_reg_known_zero(env, regs, BPF_REG_0);
8442 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
8443 		break;
8444 	case RET_PTR_TO_SOCK_COMMON:
8445 		mark_reg_known_zero(env, regs, BPF_REG_0);
8446 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
8447 		break;
8448 	case RET_PTR_TO_TCP_SOCK:
8449 		mark_reg_known_zero(env, regs, BPF_REG_0);
8450 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
8451 		break;
8452 	case RET_PTR_TO_MEM:
8453 		mark_reg_known_zero(env, regs, BPF_REG_0);
8454 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
8455 		regs[BPF_REG_0].mem_size = meta.mem_size;
8456 		break;
8457 	case RET_PTR_TO_MEM_OR_BTF_ID:
8458 	{
8459 		const struct btf_type *t;
8460 
8461 		mark_reg_known_zero(env, regs, BPF_REG_0);
8462 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
8463 		if (!btf_type_is_struct(t)) {
8464 			u32 tsize;
8465 			const struct btf_type *ret;
8466 			const char *tname;
8467 
8468 			/* resolve the type size of ksym. */
8469 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
8470 			if (IS_ERR(ret)) {
8471 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
8472 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
8473 					tname, PTR_ERR(ret));
8474 				return -EINVAL;
8475 			}
8476 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
8477 			regs[BPF_REG_0].mem_size = tsize;
8478 		} else {
8479 			/* MEM_RDONLY may be carried from ret_flag, but it
8480 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
8481 			 * it will confuse the check of PTR_TO_BTF_ID in
8482 			 * check_mem_access().
8483 			 */
8484 			ret_flag &= ~MEM_RDONLY;
8485 
8486 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
8487 			regs[BPF_REG_0].btf = meta.ret_btf;
8488 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
8489 		}
8490 		break;
8491 	}
8492 	case RET_PTR_TO_BTF_ID:
8493 	{
8494 		struct btf *ret_btf;
8495 		int ret_btf_id;
8496 
8497 		mark_reg_known_zero(env, regs, BPF_REG_0);
8498 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
8499 		if (func_id == BPF_FUNC_kptr_xchg) {
8500 			ret_btf = meta.kptr_field->kptr.btf;
8501 			ret_btf_id = meta.kptr_field->kptr.btf_id;
8502 		} else {
8503 			if (fn->ret_btf_id == BPF_PTR_POISON) {
8504 				verbose(env, "verifier internal error:");
8505 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
8506 					func_id_name(func_id));
8507 				return -EINVAL;
8508 			}
8509 			ret_btf = btf_vmlinux;
8510 			ret_btf_id = *fn->ret_btf_id;
8511 		}
8512 		if (ret_btf_id == 0) {
8513 			verbose(env, "invalid return type %u of func %s#%d\n",
8514 				base_type(ret_type), func_id_name(func_id),
8515 				func_id);
8516 			return -EINVAL;
8517 		}
8518 		regs[BPF_REG_0].btf = ret_btf;
8519 		regs[BPF_REG_0].btf_id = ret_btf_id;
8520 		break;
8521 	}
8522 	default:
8523 		verbose(env, "unknown return type %u of func %s#%d\n",
8524 			base_type(ret_type), func_id_name(func_id), func_id);
8525 		return -EINVAL;
8526 	}
8527 
8528 	if (type_may_be_null(regs[BPF_REG_0].type))
8529 		regs[BPF_REG_0].id = ++env->id_gen;
8530 
8531 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
8532 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
8533 			func_id_name(func_id), func_id);
8534 		return -EFAULT;
8535 	}
8536 
8537 	if (is_dynptr_ref_function(func_id))
8538 		regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
8539 
8540 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
8541 		/* For release_reference() */
8542 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
8543 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
8544 		int id = acquire_reference_state(env, insn_idx);
8545 
8546 		if (id < 0)
8547 			return id;
8548 		/* For mark_ptr_or_null_reg() */
8549 		regs[BPF_REG_0].id = id;
8550 		/* For release_reference() */
8551 		regs[BPF_REG_0].ref_obj_id = id;
8552 	}
8553 
8554 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
8555 
8556 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
8557 	if (err)
8558 		return err;
8559 
8560 	if ((func_id == BPF_FUNC_get_stack ||
8561 	     func_id == BPF_FUNC_get_task_stack) &&
8562 	    !env->prog->has_callchain_buf) {
8563 		const char *err_str;
8564 
8565 #ifdef CONFIG_PERF_EVENTS
8566 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
8567 		err_str = "cannot get callchain buffer for func %s#%d\n";
8568 #else
8569 		err = -ENOTSUPP;
8570 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
8571 #endif
8572 		if (err) {
8573 			verbose(env, err_str, func_id_name(func_id), func_id);
8574 			return err;
8575 		}
8576 
8577 		env->prog->has_callchain_buf = true;
8578 	}
8579 
8580 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
8581 		env->prog->call_get_stack = true;
8582 
8583 	if (func_id == BPF_FUNC_get_func_ip) {
8584 		if (check_get_func_ip(env))
8585 			return -ENOTSUPP;
8586 		env->prog->call_get_func_ip = true;
8587 	}
8588 
8589 	if (changes_data)
8590 		clear_all_pkt_pointers(env);
8591 	return 0;
8592 }
8593 
8594 /* mark_btf_func_reg_size() is used when the reg size is determined by
8595  * the BTF func_proto's return value size and argument.
8596  */
8597 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
8598 				   size_t reg_size)
8599 {
8600 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
8601 
8602 	if (regno == BPF_REG_0) {
8603 		/* Function return value */
8604 		reg->live |= REG_LIVE_WRITTEN;
8605 		reg->subreg_def = reg_size == sizeof(u64) ?
8606 			DEF_NOT_SUBREG : env->insn_idx + 1;
8607 	} else {
8608 		/* Function argument */
8609 		if (reg_size == sizeof(u64)) {
8610 			mark_insn_zext(env, reg);
8611 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
8612 		} else {
8613 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
8614 		}
8615 	}
8616 }
8617 
8618 struct bpf_kfunc_call_arg_meta {
8619 	/* In parameters */
8620 	struct btf *btf;
8621 	u32 func_id;
8622 	u32 kfunc_flags;
8623 	const struct btf_type *func_proto;
8624 	const char *func_name;
8625 	/* Out parameters */
8626 	u32 ref_obj_id;
8627 	u8 release_regno;
8628 	bool r0_rdonly;
8629 	u32 ret_btf_id;
8630 	u64 r0_size;
8631 	u32 subprogno;
8632 	struct {
8633 		u64 value;
8634 		bool found;
8635 	} arg_constant;
8636 	struct {
8637 		struct btf *btf;
8638 		u32 btf_id;
8639 	} arg_obj_drop;
8640 	struct {
8641 		struct btf_field *field;
8642 	} arg_list_head;
8643 	struct {
8644 		struct btf_field *field;
8645 	} arg_rbtree_root;
8646 };
8647 
8648 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
8649 {
8650 	return meta->kfunc_flags & KF_ACQUIRE;
8651 }
8652 
8653 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
8654 {
8655 	return meta->kfunc_flags & KF_RET_NULL;
8656 }
8657 
8658 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
8659 {
8660 	return meta->kfunc_flags & KF_RELEASE;
8661 }
8662 
8663 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
8664 {
8665 	return meta->kfunc_flags & KF_TRUSTED_ARGS;
8666 }
8667 
8668 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
8669 {
8670 	return meta->kfunc_flags & KF_SLEEPABLE;
8671 }
8672 
8673 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
8674 {
8675 	return meta->kfunc_flags & KF_DESTRUCTIVE;
8676 }
8677 
8678 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
8679 {
8680 	return meta->kfunc_flags & KF_RCU;
8681 }
8682 
8683 static bool is_kfunc_arg_kptr_get(struct bpf_kfunc_call_arg_meta *meta, int arg)
8684 {
8685 	return arg == 0 && (meta->kfunc_flags & KF_KPTR_GET);
8686 }
8687 
8688 static bool __kfunc_param_match_suffix(const struct btf *btf,
8689 				       const struct btf_param *arg,
8690 				       const char *suffix)
8691 {
8692 	int suffix_len = strlen(suffix), len;
8693 	const char *param_name;
8694 
8695 	/* In the future, this can be ported to use BTF tagging */
8696 	param_name = btf_name_by_offset(btf, arg->name_off);
8697 	if (str_is_empty(param_name))
8698 		return false;
8699 	len = strlen(param_name);
8700 	if (len < suffix_len)
8701 		return false;
8702 	param_name += len - suffix_len;
8703 	return !strncmp(param_name, suffix, suffix_len);
8704 }
8705 
8706 static bool is_kfunc_arg_mem_size(const struct btf *btf,
8707 				  const struct btf_param *arg,
8708 				  const struct bpf_reg_state *reg)
8709 {
8710 	const struct btf_type *t;
8711 
8712 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8713 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
8714 		return false;
8715 
8716 	return __kfunc_param_match_suffix(btf, arg, "__sz");
8717 }
8718 
8719 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
8720 {
8721 	return __kfunc_param_match_suffix(btf, arg, "__k");
8722 }
8723 
8724 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
8725 {
8726 	return __kfunc_param_match_suffix(btf, arg, "__ign");
8727 }
8728 
8729 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
8730 {
8731 	return __kfunc_param_match_suffix(btf, arg, "__alloc");
8732 }
8733 
8734 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
8735 					  const struct btf_param *arg,
8736 					  const char *name)
8737 {
8738 	int len, target_len = strlen(name);
8739 	const char *param_name;
8740 
8741 	param_name = btf_name_by_offset(btf, arg->name_off);
8742 	if (str_is_empty(param_name))
8743 		return false;
8744 	len = strlen(param_name);
8745 	if (len != target_len)
8746 		return false;
8747 	if (strcmp(param_name, name))
8748 		return false;
8749 
8750 	return true;
8751 }
8752 
8753 enum {
8754 	KF_ARG_DYNPTR_ID,
8755 	KF_ARG_LIST_HEAD_ID,
8756 	KF_ARG_LIST_NODE_ID,
8757 	KF_ARG_RB_ROOT_ID,
8758 	KF_ARG_RB_NODE_ID,
8759 };
8760 
8761 BTF_ID_LIST(kf_arg_btf_ids)
8762 BTF_ID(struct, bpf_dynptr_kern)
8763 BTF_ID(struct, bpf_list_head)
8764 BTF_ID(struct, bpf_list_node)
8765 BTF_ID(struct, bpf_rb_root)
8766 BTF_ID(struct, bpf_rb_node)
8767 
8768 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
8769 				    const struct btf_param *arg, int type)
8770 {
8771 	const struct btf_type *t;
8772 	u32 res_id;
8773 
8774 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8775 	if (!t)
8776 		return false;
8777 	if (!btf_type_is_ptr(t))
8778 		return false;
8779 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
8780 	if (!t)
8781 		return false;
8782 	return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
8783 }
8784 
8785 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
8786 {
8787 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
8788 }
8789 
8790 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
8791 {
8792 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
8793 }
8794 
8795 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
8796 {
8797 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
8798 }
8799 
8800 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
8801 {
8802 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
8803 }
8804 
8805 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
8806 {
8807 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
8808 }
8809 
8810 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
8811 				  const struct btf_param *arg)
8812 {
8813 	const struct btf_type *t;
8814 
8815 	t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
8816 	if (!t)
8817 		return false;
8818 
8819 	return true;
8820 }
8821 
8822 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
8823 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
8824 					const struct btf *btf,
8825 					const struct btf_type *t, int rec)
8826 {
8827 	const struct btf_type *member_type;
8828 	const struct btf_member *member;
8829 	u32 i;
8830 
8831 	if (!btf_type_is_struct(t))
8832 		return false;
8833 
8834 	for_each_member(i, t, member) {
8835 		const struct btf_array *array;
8836 
8837 		member_type = btf_type_skip_modifiers(btf, member->type, NULL);
8838 		if (btf_type_is_struct(member_type)) {
8839 			if (rec >= 3) {
8840 				verbose(env, "max struct nesting depth exceeded\n");
8841 				return false;
8842 			}
8843 			if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
8844 				return false;
8845 			continue;
8846 		}
8847 		if (btf_type_is_array(member_type)) {
8848 			array = btf_array(member_type);
8849 			if (!array->nelems)
8850 				return false;
8851 			member_type = btf_type_skip_modifiers(btf, array->type, NULL);
8852 			if (!btf_type_is_scalar(member_type))
8853 				return false;
8854 			continue;
8855 		}
8856 		if (!btf_type_is_scalar(member_type))
8857 			return false;
8858 	}
8859 	return true;
8860 }
8861 
8862 
8863 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
8864 #ifdef CONFIG_NET
8865 	[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
8866 	[PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
8867 	[PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
8868 #endif
8869 };
8870 
8871 enum kfunc_ptr_arg_type {
8872 	KF_ARG_PTR_TO_CTX,
8873 	KF_ARG_PTR_TO_ALLOC_BTF_ID,  /* Allocated object */
8874 	KF_ARG_PTR_TO_KPTR,	     /* PTR_TO_KPTR but type specific */
8875 	KF_ARG_PTR_TO_DYNPTR,
8876 	KF_ARG_PTR_TO_LIST_HEAD,
8877 	KF_ARG_PTR_TO_LIST_NODE,
8878 	KF_ARG_PTR_TO_BTF_ID,	     /* Also covers reg2btf_ids conversions */
8879 	KF_ARG_PTR_TO_MEM,
8880 	KF_ARG_PTR_TO_MEM_SIZE,	     /* Size derived from next argument, skip it */
8881 	KF_ARG_PTR_TO_CALLBACK,
8882 	KF_ARG_PTR_TO_RB_ROOT,
8883 	KF_ARG_PTR_TO_RB_NODE,
8884 };
8885 
8886 enum special_kfunc_type {
8887 	KF_bpf_obj_new_impl,
8888 	KF_bpf_obj_drop_impl,
8889 	KF_bpf_list_push_front,
8890 	KF_bpf_list_push_back,
8891 	KF_bpf_list_pop_front,
8892 	KF_bpf_list_pop_back,
8893 	KF_bpf_cast_to_kern_ctx,
8894 	KF_bpf_rdonly_cast,
8895 	KF_bpf_rcu_read_lock,
8896 	KF_bpf_rcu_read_unlock,
8897 	KF_bpf_rbtree_remove,
8898 	KF_bpf_rbtree_add,
8899 	KF_bpf_rbtree_first,
8900 };
8901 
8902 BTF_SET_START(special_kfunc_set)
8903 BTF_ID(func, bpf_obj_new_impl)
8904 BTF_ID(func, bpf_obj_drop_impl)
8905 BTF_ID(func, bpf_list_push_front)
8906 BTF_ID(func, bpf_list_push_back)
8907 BTF_ID(func, bpf_list_pop_front)
8908 BTF_ID(func, bpf_list_pop_back)
8909 BTF_ID(func, bpf_cast_to_kern_ctx)
8910 BTF_ID(func, bpf_rdonly_cast)
8911 BTF_ID(func, bpf_rbtree_remove)
8912 BTF_ID(func, bpf_rbtree_add)
8913 BTF_ID(func, bpf_rbtree_first)
8914 BTF_SET_END(special_kfunc_set)
8915 
8916 BTF_ID_LIST(special_kfunc_list)
8917 BTF_ID(func, bpf_obj_new_impl)
8918 BTF_ID(func, bpf_obj_drop_impl)
8919 BTF_ID(func, bpf_list_push_front)
8920 BTF_ID(func, bpf_list_push_back)
8921 BTF_ID(func, bpf_list_pop_front)
8922 BTF_ID(func, bpf_list_pop_back)
8923 BTF_ID(func, bpf_cast_to_kern_ctx)
8924 BTF_ID(func, bpf_rdonly_cast)
8925 BTF_ID(func, bpf_rcu_read_lock)
8926 BTF_ID(func, bpf_rcu_read_unlock)
8927 BTF_ID(func, bpf_rbtree_remove)
8928 BTF_ID(func, bpf_rbtree_add)
8929 BTF_ID(func, bpf_rbtree_first)
8930 
8931 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
8932 {
8933 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
8934 }
8935 
8936 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
8937 {
8938 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
8939 }
8940 
8941 static enum kfunc_ptr_arg_type
8942 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
8943 		       struct bpf_kfunc_call_arg_meta *meta,
8944 		       const struct btf_type *t, const struct btf_type *ref_t,
8945 		       const char *ref_tname, const struct btf_param *args,
8946 		       int argno, int nargs)
8947 {
8948 	u32 regno = argno + 1;
8949 	struct bpf_reg_state *regs = cur_regs(env);
8950 	struct bpf_reg_state *reg = &regs[regno];
8951 	bool arg_mem_size = false;
8952 
8953 	if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
8954 		return KF_ARG_PTR_TO_CTX;
8955 
8956 	/* In this function, we verify the kfunc's BTF as per the argument type,
8957 	 * leaving the rest of the verification with respect to the register
8958 	 * type to our caller. When a set of conditions hold in the BTF type of
8959 	 * arguments, we resolve it to a known kfunc_ptr_arg_type.
8960 	 */
8961 	if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
8962 		return KF_ARG_PTR_TO_CTX;
8963 
8964 	if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
8965 		return KF_ARG_PTR_TO_ALLOC_BTF_ID;
8966 
8967 	if (is_kfunc_arg_kptr_get(meta, argno)) {
8968 		if (!btf_type_is_ptr(ref_t)) {
8969 			verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
8970 			return -EINVAL;
8971 		}
8972 		ref_t = btf_type_by_id(meta->btf, ref_t->type);
8973 		ref_tname = btf_name_by_offset(meta->btf, ref_t->name_off);
8974 		if (!btf_type_is_struct(ref_t)) {
8975 			verbose(env, "kernel function %s args#0 pointer type %s %s is not supported\n",
8976 				meta->func_name, btf_type_str(ref_t), ref_tname);
8977 			return -EINVAL;
8978 		}
8979 		return KF_ARG_PTR_TO_KPTR;
8980 	}
8981 
8982 	if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
8983 		return KF_ARG_PTR_TO_DYNPTR;
8984 
8985 	if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
8986 		return KF_ARG_PTR_TO_LIST_HEAD;
8987 
8988 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
8989 		return KF_ARG_PTR_TO_LIST_NODE;
8990 
8991 	if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
8992 		return KF_ARG_PTR_TO_RB_ROOT;
8993 
8994 	if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
8995 		return KF_ARG_PTR_TO_RB_NODE;
8996 
8997 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
8998 		if (!btf_type_is_struct(ref_t)) {
8999 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
9000 				meta->func_name, argno, btf_type_str(ref_t), ref_tname);
9001 			return -EINVAL;
9002 		}
9003 		return KF_ARG_PTR_TO_BTF_ID;
9004 	}
9005 
9006 	if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
9007 		return KF_ARG_PTR_TO_CALLBACK;
9008 
9009 	if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))
9010 		arg_mem_size = true;
9011 
9012 	/* This is the catch all argument type of register types supported by
9013 	 * check_helper_mem_access. However, we only allow when argument type is
9014 	 * pointer to scalar, or struct composed (recursively) of scalars. When
9015 	 * arg_mem_size is true, the pointer can be void *.
9016 	 */
9017 	if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
9018 	    (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
9019 		verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
9020 			argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
9021 		return -EINVAL;
9022 	}
9023 	return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
9024 }
9025 
9026 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
9027 					struct bpf_reg_state *reg,
9028 					const struct btf_type *ref_t,
9029 					const char *ref_tname, u32 ref_id,
9030 					struct bpf_kfunc_call_arg_meta *meta,
9031 					int argno)
9032 {
9033 	const struct btf_type *reg_ref_t;
9034 	bool strict_type_match = false;
9035 	const struct btf *reg_btf;
9036 	const char *reg_ref_tname;
9037 	u32 reg_ref_id;
9038 
9039 	if (base_type(reg->type) == PTR_TO_BTF_ID) {
9040 		reg_btf = reg->btf;
9041 		reg_ref_id = reg->btf_id;
9042 	} else {
9043 		reg_btf = btf_vmlinux;
9044 		reg_ref_id = *reg2btf_ids[base_type(reg->type)];
9045 	}
9046 
9047 	/* Enforce strict type matching for calls to kfuncs that are acquiring
9048 	 * or releasing a reference, or are no-cast aliases. We do _not_
9049 	 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
9050 	 * as we want to enable BPF programs to pass types that are bitwise
9051 	 * equivalent without forcing them to explicitly cast with something
9052 	 * like bpf_cast_to_kern_ctx().
9053 	 *
9054 	 * For example, say we had a type like the following:
9055 	 *
9056 	 * struct bpf_cpumask {
9057 	 *	cpumask_t cpumask;
9058 	 *	refcount_t usage;
9059 	 * };
9060 	 *
9061 	 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
9062 	 * to a struct cpumask, so it would be safe to pass a struct
9063 	 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
9064 	 *
9065 	 * The philosophy here is similar to how we allow scalars of different
9066 	 * types to be passed to kfuncs as long as the size is the same. The
9067 	 * only difference here is that we're simply allowing
9068 	 * btf_struct_ids_match() to walk the struct at the 0th offset, and
9069 	 * resolve types.
9070 	 */
9071 	if (is_kfunc_acquire(meta) ||
9072 	    (is_kfunc_release(meta) && reg->ref_obj_id) ||
9073 	    btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
9074 		strict_type_match = true;
9075 
9076 	WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
9077 
9078 	reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
9079 	reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
9080 	if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
9081 		verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
9082 			meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
9083 			btf_type_str(reg_ref_t), reg_ref_tname);
9084 		return -EINVAL;
9085 	}
9086 	return 0;
9087 }
9088 
9089 static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
9090 				      struct bpf_reg_state *reg,
9091 				      const struct btf_type *ref_t,
9092 				      const char *ref_tname,
9093 				      struct bpf_kfunc_call_arg_meta *meta,
9094 				      int argno)
9095 {
9096 	struct btf_field *kptr_field;
9097 
9098 	/* check_func_arg_reg_off allows var_off for
9099 	 * PTR_TO_MAP_VALUE, but we need fixed offset to find
9100 	 * off_desc.
9101 	 */
9102 	if (!tnum_is_const(reg->var_off)) {
9103 		verbose(env, "arg#0 must have constant offset\n");
9104 		return -EINVAL;
9105 	}
9106 
9107 	kptr_field = btf_record_find(reg->map_ptr->record, reg->off + reg->var_off.value, BPF_KPTR);
9108 	if (!kptr_field || kptr_field->type != BPF_KPTR_REF) {
9109 		verbose(env, "arg#0 no referenced kptr at map value offset=%llu\n",
9110 			reg->off + reg->var_off.value);
9111 		return -EINVAL;
9112 	}
9113 
9114 	if (!btf_struct_ids_match(&env->log, meta->btf, ref_t->type, 0, kptr_field->kptr.btf,
9115 				  kptr_field->kptr.btf_id, true)) {
9116 		verbose(env, "kernel function %s args#%d expected pointer to %s %s\n",
9117 			meta->func_name, argno, btf_type_str(ref_t), ref_tname);
9118 		return -EINVAL;
9119 	}
9120 	return 0;
9121 }
9122 
9123 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9124 {
9125 	struct bpf_verifier_state *state = env->cur_state;
9126 
9127 	if (!state->active_lock.ptr) {
9128 		verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
9129 		return -EFAULT;
9130 	}
9131 
9132 	if (type_flag(reg->type) & NON_OWN_REF) {
9133 		verbose(env, "verifier internal error: NON_OWN_REF already set\n");
9134 		return -EFAULT;
9135 	}
9136 
9137 	reg->type |= NON_OWN_REF;
9138 	return 0;
9139 }
9140 
9141 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
9142 {
9143 	struct bpf_func_state *state, *unused;
9144 	struct bpf_reg_state *reg;
9145 	int i;
9146 
9147 	state = cur_func(env);
9148 
9149 	if (!ref_obj_id) {
9150 		verbose(env, "verifier internal error: ref_obj_id is zero for "
9151 			     "owning -> non-owning conversion\n");
9152 		return -EFAULT;
9153 	}
9154 
9155 	for (i = 0; i < state->acquired_refs; i++) {
9156 		if (state->refs[i].id != ref_obj_id)
9157 			continue;
9158 
9159 		/* Clear ref_obj_id here so release_reference doesn't clobber
9160 		 * the whole reg
9161 		 */
9162 		bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
9163 			if (reg->ref_obj_id == ref_obj_id) {
9164 				reg->ref_obj_id = 0;
9165 				ref_set_non_owning(env, reg);
9166 			}
9167 		}));
9168 		return 0;
9169 	}
9170 
9171 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
9172 	return -EFAULT;
9173 }
9174 
9175 /* Implementation details:
9176  *
9177  * Each register points to some region of memory, which we define as an
9178  * allocation. Each allocation may embed a bpf_spin_lock which protects any
9179  * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
9180  * allocation. The lock and the data it protects are colocated in the same
9181  * memory region.
9182  *
9183  * Hence, everytime a register holds a pointer value pointing to such
9184  * allocation, the verifier preserves a unique reg->id for it.
9185  *
9186  * The verifier remembers the lock 'ptr' and the lock 'id' whenever
9187  * bpf_spin_lock is called.
9188  *
9189  * To enable this, lock state in the verifier captures two values:
9190  *	active_lock.ptr = Register's type specific pointer
9191  *	active_lock.id  = A unique ID for each register pointer value
9192  *
9193  * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
9194  * supported register types.
9195  *
9196  * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
9197  * allocated objects is the reg->btf pointer.
9198  *
9199  * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
9200  * can establish the provenance of the map value statically for each distinct
9201  * lookup into such maps. They always contain a single map value hence unique
9202  * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
9203  *
9204  * So, in case of global variables, they use array maps with max_entries = 1,
9205  * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
9206  * into the same map value as max_entries is 1, as described above).
9207  *
9208  * In case of inner map lookups, the inner map pointer has same map_ptr as the
9209  * outer map pointer (in verifier context), but each lookup into an inner map
9210  * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
9211  * maps from the same outer map share the same map_ptr as active_lock.ptr, they
9212  * will get different reg->id assigned to each lookup, hence different
9213  * active_lock.id.
9214  *
9215  * In case of allocated objects, active_lock.ptr is the reg->btf, and the
9216  * reg->id is a unique ID preserved after the NULL pointer check on the pointer
9217  * returned from bpf_obj_new. Each allocation receives a new reg->id.
9218  */
9219 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9220 {
9221 	void *ptr;
9222 	u32 id;
9223 
9224 	switch ((int)reg->type) {
9225 	case PTR_TO_MAP_VALUE:
9226 		ptr = reg->map_ptr;
9227 		break;
9228 	case PTR_TO_BTF_ID | MEM_ALLOC:
9229 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
9230 		ptr = reg->btf;
9231 		break;
9232 	default:
9233 		verbose(env, "verifier internal error: unknown reg type for lock check\n");
9234 		return -EFAULT;
9235 	}
9236 	id = reg->id;
9237 
9238 	if (!env->cur_state->active_lock.ptr)
9239 		return -EINVAL;
9240 	if (env->cur_state->active_lock.ptr != ptr ||
9241 	    env->cur_state->active_lock.id != id) {
9242 		verbose(env, "held lock and object are not in the same allocation\n");
9243 		return -EINVAL;
9244 	}
9245 	return 0;
9246 }
9247 
9248 static bool is_bpf_list_api_kfunc(u32 btf_id)
9249 {
9250 	return btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
9251 	       btf_id == special_kfunc_list[KF_bpf_list_push_back] ||
9252 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9253 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
9254 }
9255 
9256 static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
9257 {
9258 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add] ||
9259 	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9260 	       btf_id == special_kfunc_list[KF_bpf_rbtree_first];
9261 }
9262 
9263 static bool is_bpf_graph_api_kfunc(u32 btf_id)
9264 {
9265 	return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
9266 }
9267 
9268 static bool is_callback_calling_kfunc(u32 btf_id)
9269 {
9270 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add];
9271 }
9272 
9273 static bool is_rbtree_lock_required_kfunc(u32 btf_id)
9274 {
9275 	return is_bpf_rbtree_api_kfunc(btf_id);
9276 }
9277 
9278 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
9279 					  enum btf_field_type head_field_type,
9280 					  u32 kfunc_btf_id)
9281 {
9282 	bool ret;
9283 
9284 	switch (head_field_type) {
9285 	case BPF_LIST_HEAD:
9286 		ret = is_bpf_list_api_kfunc(kfunc_btf_id);
9287 		break;
9288 	case BPF_RB_ROOT:
9289 		ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
9290 		break;
9291 	default:
9292 		verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
9293 			btf_field_type_name(head_field_type));
9294 		return false;
9295 	}
9296 
9297 	if (!ret)
9298 		verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
9299 			btf_field_type_name(head_field_type));
9300 	return ret;
9301 }
9302 
9303 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
9304 					  enum btf_field_type node_field_type,
9305 					  u32 kfunc_btf_id)
9306 {
9307 	bool ret;
9308 
9309 	switch (node_field_type) {
9310 	case BPF_LIST_NODE:
9311 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
9312 		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]);
9313 		break;
9314 	case BPF_RB_NODE:
9315 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9316 		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]);
9317 		break;
9318 	default:
9319 		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
9320 			btf_field_type_name(node_field_type));
9321 		return false;
9322 	}
9323 
9324 	if (!ret)
9325 		verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
9326 			btf_field_type_name(node_field_type));
9327 	return ret;
9328 }
9329 
9330 static int
9331 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
9332 				   struct bpf_reg_state *reg, u32 regno,
9333 				   struct bpf_kfunc_call_arg_meta *meta,
9334 				   enum btf_field_type head_field_type,
9335 				   struct btf_field **head_field)
9336 {
9337 	const char *head_type_name;
9338 	struct btf_field *field;
9339 	struct btf_record *rec;
9340 	u32 head_off;
9341 
9342 	if (meta->btf != btf_vmlinux) {
9343 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
9344 		return -EFAULT;
9345 	}
9346 
9347 	if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
9348 		return -EFAULT;
9349 
9350 	head_type_name = btf_field_type_name(head_field_type);
9351 	if (!tnum_is_const(reg->var_off)) {
9352 		verbose(env,
9353 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
9354 			regno, head_type_name);
9355 		return -EINVAL;
9356 	}
9357 
9358 	rec = reg_btf_record(reg);
9359 	head_off = reg->off + reg->var_off.value;
9360 	field = btf_record_find(rec, head_off, head_field_type);
9361 	if (!field) {
9362 		verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
9363 		return -EINVAL;
9364 	}
9365 
9366 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
9367 	if (check_reg_allocation_locked(env, reg)) {
9368 		verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
9369 			rec->spin_lock_off, head_type_name);
9370 		return -EINVAL;
9371 	}
9372 
9373 	if (*head_field) {
9374 		verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
9375 		return -EFAULT;
9376 	}
9377 	*head_field = field;
9378 	return 0;
9379 }
9380 
9381 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
9382 					   struct bpf_reg_state *reg, u32 regno,
9383 					   struct bpf_kfunc_call_arg_meta *meta)
9384 {
9385 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
9386 							  &meta->arg_list_head.field);
9387 }
9388 
9389 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
9390 					     struct bpf_reg_state *reg, u32 regno,
9391 					     struct bpf_kfunc_call_arg_meta *meta)
9392 {
9393 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
9394 							  &meta->arg_rbtree_root.field);
9395 }
9396 
9397 static int
9398 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
9399 				   struct bpf_reg_state *reg, u32 regno,
9400 				   struct bpf_kfunc_call_arg_meta *meta,
9401 				   enum btf_field_type head_field_type,
9402 				   enum btf_field_type node_field_type,
9403 				   struct btf_field **node_field)
9404 {
9405 	const char *node_type_name;
9406 	const struct btf_type *et, *t;
9407 	struct btf_field *field;
9408 	u32 node_off;
9409 
9410 	if (meta->btf != btf_vmlinux) {
9411 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
9412 		return -EFAULT;
9413 	}
9414 
9415 	if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
9416 		return -EFAULT;
9417 
9418 	node_type_name = btf_field_type_name(node_field_type);
9419 	if (!tnum_is_const(reg->var_off)) {
9420 		verbose(env,
9421 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
9422 			regno, node_type_name);
9423 		return -EINVAL;
9424 	}
9425 
9426 	node_off = reg->off + reg->var_off.value;
9427 	field = reg_find_field_offset(reg, node_off, node_field_type);
9428 	if (!field || field->offset != node_off) {
9429 		verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
9430 		return -EINVAL;
9431 	}
9432 
9433 	field = *node_field;
9434 
9435 	et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
9436 	t = btf_type_by_id(reg->btf, reg->btf_id);
9437 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
9438 				  field->graph_root.value_btf_id, true)) {
9439 		verbose(env, "operation on %s expects arg#1 %s at offset=%d "
9440 			"in struct %s, but arg is at offset=%d in struct %s\n",
9441 			btf_field_type_name(head_field_type),
9442 			btf_field_type_name(node_field_type),
9443 			field->graph_root.node_offset,
9444 			btf_name_by_offset(field->graph_root.btf, et->name_off),
9445 			node_off, btf_name_by_offset(reg->btf, t->name_off));
9446 		return -EINVAL;
9447 	}
9448 
9449 	if (node_off != field->graph_root.node_offset) {
9450 		verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
9451 			node_off, btf_field_type_name(node_field_type),
9452 			field->graph_root.node_offset,
9453 			btf_name_by_offset(field->graph_root.btf, et->name_off));
9454 		return -EINVAL;
9455 	}
9456 
9457 	return 0;
9458 }
9459 
9460 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
9461 					   struct bpf_reg_state *reg, u32 regno,
9462 					   struct bpf_kfunc_call_arg_meta *meta)
9463 {
9464 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
9465 						  BPF_LIST_HEAD, BPF_LIST_NODE,
9466 						  &meta->arg_list_head.field);
9467 }
9468 
9469 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
9470 					     struct bpf_reg_state *reg, u32 regno,
9471 					     struct bpf_kfunc_call_arg_meta *meta)
9472 {
9473 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
9474 						  BPF_RB_ROOT, BPF_RB_NODE,
9475 						  &meta->arg_rbtree_root.field);
9476 }
9477 
9478 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
9479 {
9480 	const char *func_name = meta->func_name, *ref_tname;
9481 	const struct btf *btf = meta->btf;
9482 	const struct btf_param *args;
9483 	u32 i, nargs;
9484 	int ret;
9485 
9486 	args = (const struct btf_param *)(meta->func_proto + 1);
9487 	nargs = btf_type_vlen(meta->func_proto);
9488 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
9489 		verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
9490 			MAX_BPF_FUNC_REG_ARGS);
9491 		return -EINVAL;
9492 	}
9493 
9494 	/* Check that BTF function arguments match actual types that the
9495 	 * verifier sees.
9496 	 */
9497 	for (i = 0; i < nargs; i++) {
9498 		struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
9499 		const struct btf_type *t, *ref_t, *resolve_ret;
9500 		enum bpf_arg_type arg_type = ARG_DONTCARE;
9501 		u32 regno = i + 1, ref_id, type_size;
9502 		bool is_ret_buf_sz = false;
9503 		int kf_arg_type;
9504 
9505 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
9506 
9507 		if (is_kfunc_arg_ignore(btf, &args[i]))
9508 			continue;
9509 
9510 		if (btf_type_is_scalar(t)) {
9511 			if (reg->type != SCALAR_VALUE) {
9512 				verbose(env, "R%d is not a scalar\n", regno);
9513 				return -EINVAL;
9514 			}
9515 
9516 			if (is_kfunc_arg_constant(meta->btf, &args[i])) {
9517 				if (meta->arg_constant.found) {
9518 					verbose(env, "verifier internal error: only one constant argument permitted\n");
9519 					return -EFAULT;
9520 				}
9521 				if (!tnum_is_const(reg->var_off)) {
9522 					verbose(env, "R%d must be a known constant\n", regno);
9523 					return -EINVAL;
9524 				}
9525 				ret = mark_chain_precision(env, regno);
9526 				if (ret < 0)
9527 					return ret;
9528 				meta->arg_constant.found = true;
9529 				meta->arg_constant.value = reg->var_off.value;
9530 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
9531 				meta->r0_rdonly = true;
9532 				is_ret_buf_sz = true;
9533 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
9534 				is_ret_buf_sz = true;
9535 			}
9536 
9537 			if (is_ret_buf_sz) {
9538 				if (meta->r0_size) {
9539 					verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
9540 					return -EINVAL;
9541 				}
9542 
9543 				if (!tnum_is_const(reg->var_off)) {
9544 					verbose(env, "R%d is not a const\n", regno);
9545 					return -EINVAL;
9546 				}
9547 
9548 				meta->r0_size = reg->var_off.value;
9549 				ret = mark_chain_precision(env, regno);
9550 				if (ret)
9551 					return ret;
9552 			}
9553 			continue;
9554 		}
9555 
9556 		if (!btf_type_is_ptr(t)) {
9557 			verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
9558 			return -EINVAL;
9559 		}
9560 
9561 		if (is_kfunc_trusted_args(meta) &&
9562 		    (register_is_null(reg) || type_may_be_null(reg->type))) {
9563 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
9564 			return -EACCES;
9565 		}
9566 
9567 		if (reg->ref_obj_id) {
9568 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
9569 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
9570 					regno, reg->ref_obj_id,
9571 					meta->ref_obj_id);
9572 				return -EFAULT;
9573 			}
9574 			meta->ref_obj_id = reg->ref_obj_id;
9575 			if (is_kfunc_release(meta))
9576 				meta->release_regno = regno;
9577 		}
9578 
9579 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
9580 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
9581 
9582 		kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
9583 		if (kf_arg_type < 0)
9584 			return kf_arg_type;
9585 
9586 		switch (kf_arg_type) {
9587 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
9588 		case KF_ARG_PTR_TO_BTF_ID:
9589 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
9590 				break;
9591 
9592 			if (!is_trusted_reg(reg)) {
9593 				if (!is_kfunc_rcu(meta)) {
9594 					verbose(env, "R%d must be referenced or trusted\n", regno);
9595 					return -EINVAL;
9596 				}
9597 				if (!is_rcu_reg(reg)) {
9598 					verbose(env, "R%d must be a rcu pointer\n", regno);
9599 					return -EINVAL;
9600 				}
9601 			}
9602 
9603 			fallthrough;
9604 		case KF_ARG_PTR_TO_CTX:
9605 			/* Trusted arguments have the same offset checks as release arguments */
9606 			arg_type |= OBJ_RELEASE;
9607 			break;
9608 		case KF_ARG_PTR_TO_KPTR:
9609 		case KF_ARG_PTR_TO_DYNPTR:
9610 		case KF_ARG_PTR_TO_LIST_HEAD:
9611 		case KF_ARG_PTR_TO_LIST_NODE:
9612 		case KF_ARG_PTR_TO_RB_ROOT:
9613 		case KF_ARG_PTR_TO_RB_NODE:
9614 		case KF_ARG_PTR_TO_MEM:
9615 		case KF_ARG_PTR_TO_MEM_SIZE:
9616 		case KF_ARG_PTR_TO_CALLBACK:
9617 			/* Trusted by default */
9618 			break;
9619 		default:
9620 			WARN_ON_ONCE(1);
9621 			return -EFAULT;
9622 		}
9623 
9624 		if (is_kfunc_release(meta) && reg->ref_obj_id)
9625 			arg_type |= OBJ_RELEASE;
9626 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
9627 		if (ret < 0)
9628 			return ret;
9629 
9630 		switch (kf_arg_type) {
9631 		case KF_ARG_PTR_TO_CTX:
9632 			if (reg->type != PTR_TO_CTX) {
9633 				verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
9634 				return -EINVAL;
9635 			}
9636 
9637 			if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
9638 				ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
9639 				if (ret < 0)
9640 					return -EINVAL;
9641 				meta->ret_btf_id  = ret;
9642 			}
9643 			break;
9644 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
9645 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9646 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
9647 				return -EINVAL;
9648 			}
9649 			if (!reg->ref_obj_id) {
9650 				verbose(env, "allocated object must be referenced\n");
9651 				return -EINVAL;
9652 			}
9653 			if (meta->btf == btf_vmlinux &&
9654 			    meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
9655 				meta->arg_obj_drop.btf = reg->btf;
9656 				meta->arg_obj_drop.btf_id = reg->btf_id;
9657 			}
9658 			break;
9659 		case KF_ARG_PTR_TO_KPTR:
9660 			if (reg->type != PTR_TO_MAP_VALUE) {
9661 				verbose(env, "arg#0 expected pointer to map value\n");
9662 				return -EINVAL;
9663 			}
9664 			ret = process_kf_arg_ptr_to_kptr(env, reg, ref_t, ref_tname, meta, i);
9665 			if (ret < 0)
9666 				return ret;
9667 			break;
9668 		case KF_ARG_PTR_TO_DYNPTR:
9669 			if (reg->type != PTR_TO_STACK &&
9670 			    reg->type != CONST_PTR_TO_DYNPTR) {
9671 				verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
9672 				return -EINVAL;
9673 			}
9674 
9675 			ret = process_dynptr_func(env, regno, ARG_PTR_TO_DYNPTR | MEM_RDONLY, NULL);
9676 			if (ret < 0)
9677 				return ret;
9678 			break;
9679 		case KF_ARG_PTR_TO_LIST_HEAD:
9680 			if (reg->type != PTR_TO_MAP_VALUE &&
9681 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9682 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
9683 				return -EINVAL;
9684 			}
9685 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
9686 				verbose(env, "allocated object must be referenced\n");
9687 				return -EINVAL;
9688 			}
9689 			ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
9690 			if (ret < 0)
9691 				return ret;
9692 			break;
9693 		case KF_ARG_PTR_TO_RB_ROOT:
9694 			if (reg->type != PTR_TO_MAP_VALUE &&
9695 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9696 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
9697 				return -EINVAL;
9698 			}
9699 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
9700 				verbose(env, "allocated object must be referenced\n");
9701 				return -EINVAL;
9702 			}
9703 			ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
9704 			if (ret < 0)
9705 				return ret;
9706 			break;
9707 		case KF_ARG_PTR_TO_LIST_NODE:
9708 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9709 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
9710 				return -EINVAL;
9711 			}
9712 			if (!reg->ref_obj_id) {
9713 				verbose(env, "allocated object must be referenced\n");
9714 				return -EINVAL;
9715 			}
9716 			ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
9717 			if (ret < 0)
9718 				return ret;
9719 			break;
9720 		case KF_ARG_PTR_TO_RB_NODE:
9721 			if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
9722 				if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
9723 					verbose(env, "rbtree_remove node input must be non-owning ref\n");
9724 					return -EINVAL;
9725 				}
9726 				if (in_rbtree_lock_required_cb(env)) {
9727 					verbose(env, "rbtree_remove not allowed in rbtree cb\n");
9728 					return -EINVAL;
9729 				}
9730 			} else {
9731 				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9732 					verbose(env, "arg#%d expected pointer to allocated object\n", i);
9733 					return -EINVAL;
9734 				}
9735 				if (!reg->ref_obj_id) {
9736 					verbose(env, "allocated object must be referenced\n");
9737 					return -EINVAL;
9738 				}
9739 			}
9740 
9741 			ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
9742 			if (ret < 0)
9743 				return ret;
9744 			break;
9745 		case KF_ARG_PTR_TO_BTF_ID:
9746 			/* Only base_type is checked, further checks are done here */
9747 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
9748 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
9749 			    !reg2btf_ids[base_type(reg->type)]) {
9750 				verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
9751 				verbose(env, "expected %s or socket\n",
9752 					reg_type_str(env, base_type(reg->type) |
9753 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
9754 				return -EINVAL;
9755 			}
9756 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
9757 			if (ret < 0)
9758 				return ret;
9759 			break;
9760 		case KF_ARG_PTR_TO_MEM:
9761 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
9762 			if (IS_ERR(resolve_ret)) {
9763 				verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
9764 					i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
9765 				return -EINVAL;
9766 			}
9767 			ret = check_mem_reg(env, reg, regno, type_size);
9768 			if (ret < 0)
9769 				return ret;
9770 			break;
9771 		case KF_ARG_PTR_TO_MEM_SIZE:
9772 			ret = check_kfunc_mem_size_reg(env, &regs[regno + 1], regno + 1);
9773 			if (ret < 0) {
9774 				verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
9775 				return ret;
9776 			}
9777 			/* Skip next '__sz' argument */
9778 			i++;
9779 			break;
9780 		case KF_ARG_PTR_TO_CALLBACK:
9781 			meta->subprogno = reg->subprogno;
9782 			break;
9783 		}
9784 	}
9785 
9786 	if (is_kfunc_release(meta) && !meta->release_regno) {
9787 		verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
9788 			func_name);
9789 		return -EINVAL;
9790 	}
9791 
9792 	return 0;
9793 }
9794 
9795 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9796 			    int *insn_idx_p)
9797 {
9798 	const struct btf_type *t, *func, *func_proto, *ptr_type;
9799 	u32 i, nargs, func_id, ptr_type_id, release_ref_obj_id;
9800 	struct bpf_reg_state *regs = cur_regs(env);
9801 	const char *func_name, *ptr_type_name;
9802 	bool sleepable, rcu_lock, rcu_unlock;
9803 	struct bpf_kfunc_call_arg_meta meta;
9804 	int err, insn_idx = *insn_idx_p;
9805 	const struct btf_param *args;
9806 	const struct btf_type *ret_t;
9807 	struct btf *desc_btf;
9808 	u32 *kfunc_flags;
9809 
9810 	/* skip for now, but return error when we find this in fixup_kfunc_call */
9811 	if (!insn->imm)
9812 		return 0;
9813 
9814 	desc_btf = find_kfunc_desc_btf(env, insn->off);
9815 	if (IS_ERR(desc_btf))
9816 		return PTR_ERR(desc_btf);
9817 
9818 	func_id = insn->imm;
9819 	func = btf_type_by_id(desc_btf, func_id);
9820 	func_name = btf_name_by_offset(desc_btf, func->name_off);
9821 	func_proto = btf_type_by_id(desc_btf, func->type);
9822 
9823 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
9824 	if (!kfunc_flags) {
9825 		verbose(env, "calling kernel function %s is not allowed\n",
9826 			func_name);
9827 		return -EACCES;
9828 	}
9829 
9830 	/* Prepare kfunc call metadata */
9831 	memset(&meta, 0, sizeof(meta));
9832 	meta.btf = desc_btf;
9833 	meta.func_id = func_id;
9834 	meta.kfunc_flags = *kfunc_flags;
9835 	meta.func_proto = func_proto;
9836 	meta.func_name = func_name;
9837 
9838 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
9839 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
9840 		return -EACCES;
9841 	}
9842 
9843 	sleepable = is_kfunc_sleepable(&meta);
9844 	if (sleepable && !env->prog->aux->sleepable) {
9845 		verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
9846 		return -EACCES;
9847 	}
9848 
9849 	rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
9850 	rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
9851 	if ((rcu_lock || rcu_unlock) && !env->rcu_tag_supported) {
9852 		verbose(env, "no vmlinux btf rcu tag support for kfunc %s\n", func_name);
9853 		return -EACCES;
9854 	}
9855 
9856 	if (env->cur_state->active_rcu_lock) {
9857 		struct bpf_func_state *state;
9858 		struct bpf_reg_state *reg;
9859 
9860 		if (rcu_lock) {
9861 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
9862 			return -EINVAL;
9863 		} else if (rcu_unlock) {
9864 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
9865 				if (reg->type & MEM_RCU) {
9866 					reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
9867 					reg->type |= PTR_UNTRUSTED;
9868 				}
9869 			}));
9870 			env->cur_state->active_rcu_lock = false;
9871 		} else if (sleepable) {
9872 			verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
9873 			return -EACCES;
9874 		}
9875 	} else if (rcu_lock) {
9876 		env->cur_state->active_rcu_lock = true;
9877 	} else if (rcu_unlock) {
9878 		verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
9879 		return -EINVAL;
9880 	}
9881 
9882 	/* Check the arguments */
9883 	err = check_kfunc_args(env, &meta);
9884 	if (err < 0)
9885 		return err;
9886 	/* In case of release function, we get register number of refcounted
9887 	 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
9888 	 */
9889 	if (meta.release_regno) {
9890 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
9891 		if (err) {
9892 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
9893 				func_name, func_id);
9894 			return err;
9895 		}
9896 	}
9897 
9898 	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
9899 	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back] ||
9900 	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
9901 		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
9902 		err = ref_convert_owning_non_owning(env, release_ref_obj_id);
9903 		if (err) {
9904 			verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
9905 				func_name, func_id);
9906 			return err;
9907 		}
9908 
9909 		err = release_reference(env, release_ref_obj_id);
9910 		if (err) {
9911 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
9912 				func_name, func_id);
9913 			return err;
9914 		}
9915 	}
9916 
9917 	if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
9918 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9919 					set_rbtree_add_callback_state);
9920 		if (err) {
9921 			verbose(env, "kfunc %s#%d failed callback verification\n",
9922 				func_name, func_id);
9923 			return err;
9924 		}
9925 	}
9926 
9927 	for (i = 0; i < CALLER_SAVED_REGS; i++)
9928 		mark_reg_not_init(env, regs, caller_saved[i]);
9929 
9930 	/* Check return type */
9931 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
9932 
9933 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
9934 		/* Only exception is bpf_obj_new_impl */
9935 		if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) {
9936 			verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
9937 			return -EINVAL;
9938 		}
9939 	}
9940 
9941 	if (btf_type_is_scalar(t)) {
9942 		mark_reg_unknown(env, regs, BPF_REG_0);
9943 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
9944 	} else if (btf_type_is_ptr(t)) {
9945 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
9946 
9947 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
9948 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
9949 				struct btf *ret_btf;
9950 				u32 ret_btf_id;
9951 
9952 				if (unlikely(!bpf_global_ma_set))
9953 					return -ENOMEM;
9954 
9955 				if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
9956 					verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
9957 					return -EINVAL;
9958 				}
9959 
9960 				ret_btf = env->prog->aux->btf;
9961 				ret_btf_id = meta.arg_constant.value;
9962 
9963 				/* This may be NULL due to user not supplying a BTF */
9964 				if (!ret_btf) {
9965 					verbose(env, "bpf_obj_new requires prog BTF\n");
9966 					return -EINVAL;
9967 				}
9968 
9969 				ret_t = btf_type_by_id(ret_btf, ret_btf_id);
9970 				if (!ret_t || !__btf_type_is_struct(ret_t)) {
9971 					verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
9972 					return -EINVAL;
9973 				}
9974 
9975 				mark_reg_known_zero(env, regs, BPF_REG_0);
9976 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
9977 				regs[BPF_REG_0].btf = ret_btf;
9978 				regs[BPF_REG_0].btf_id = ret_btf_id;
9979 
9980 				env->insn_aux_data[insn_idx].obj_new_size = ret_t->size;
9981 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9982 					btf_find_struct_meta(ret_btf, ret_btf_id);
9983 			} else if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
9984 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9985 					btf_find_struct_meta(meta.arg_obj_drop.btf,
9986 							     meta.arg_obj_drop.btf_id);
9987 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9988 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
9989 				struct btf_field *field = meta.arg_list_head.field;
9990 
9991 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
9992 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9993 				   meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
9994 				struct btf_field *field = meta.arg_rbtree_root.field;
9995 
9996 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
9997 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
9998 				mark_reg_known_zero(env, regs, BPF_REG_0);
9999 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
10000 				regs[BPF_REG_0].btf = desc_btf;
10001 				regs[BPF_REG_0].btf_id = meta.ret_btf_id;
10002 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
10003 				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
10004 				if (!ret_t || !btf_type_is_struct(ret_t)) {
10005 					verbose(env,
10006 						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
10007 					return -EINVAL;
10008 				}
10009 
10010 				mark_reg_known_zero(env, regs, BPF_REG_0);
10011 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
10012 				regs[BPF_REG_0].btf = desc_btf;
10013 				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
10014 			} else {
10015 				verbose(env, "kernel function %s unhandled dynamic return type\n",
10016 					meta.func_name);
10017 				return -EFAULT;
10018 			}
10019 		} else if (!__btf_type_is_struct(ptr_type)) {
10020 			if (!meta.r0_size) {
10021 				ptr_type_name = btf_name_by_offset(desc_btf,
10022 								   ptr_type->name_off);
10023 				verbose(env,
10024 					"kernel function %s returns pointer type %s %s is not supported\n",
10025 					func_name,
10026 					btf_type_str(ptr_type),
10027 					ptr_type_name);
10028 				return -EINVAL;
10029 			}
10030 
10031 			mark_reg_known_zero(env, regs, BPF_REG_0);
10032 			regs[BPF_REG_0].type = PTR_TO_MEM;
10033 			regs[BPF_REG_0].mem_size = meta.r0_size;
10034 
10035 			if (meta.r0_rdonly)
10036 				regs[BPF_REG_0].type |= MEM_RDONLY;
10037 
10038 			/* Ensures we don't access the memory after a release_reference() */
10039 			if (meta.ref_obj_id)
10040 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
10041 		} else {
10042 			mark_reg_known_zero(env, regs, BPF_REG_0);
10043 			regs[BPF_REG_0].btf = desc_btf;
10044 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
10045 			regs[BPF_REG_0].btf_id = ptr_type_id;
10046 		}
10047 
10048 		if (is_kfunc_ret_null(&meta)) {
10049 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
10050 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
10051 			regs[BPF_REG_0].id = ++env->id_gen;
10052 		}
10053 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
10054 		if (is_kfunc_acquire(&meta)) {
10055 			int id = acquire_reference_state(env, insn_idx);
10056 
10057 			if (id < 0)
10058 				return id;
10059 			if (is_kfunc_ret_null(&meta))
10060 				regs[BPF_REG_0].id = id;
10061 			regs[BPF_REG_0].ref_obj_id = id;
10062 		} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
10063 			ref_set_non_owning(env, &regs[BPF_REG_0]);
10064 		}
10065 
10066 		if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove])
10067 			invalidate_non_owning_refs(env);
10068 
10069 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
10070 			regs[BPF_REG_0].id = ++env->id_gen;
10071 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
10072 
10073 	nargs = btf_type_vlen(func_proto);
10074 	args = (const struct btf_param *)(func_proto + 1);
10075 	for (i = 0; i < nargs; i++) {
10076 		u32 regno = i + 1;
10077 
10078 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
10079 		if (btf_type_is_ptr(t))
10080 			mark_btf_func_reg_size(env, regno, sizeof(void *));
10081 		else
10082 			/* scalar. ensured by btf_check_kfunc_arg_match() */
10083 			mark_btf_func_reg_size(env, regno, t->size);
10084 	}
10085 
10086 	return 0;
10087 }
10088 
10089 static bool signed_add_overflows(s64 a, s64 b)
10090 {
10091 	/* Do the add in u64, where overflow is well-defined */
10092 	s64 res = (s64)((u64)a + (u64)b);
10093 
10094 	if (b < 0)
10095 		return res > a;
10096 	return res < a;
10097 }
10098 
10099 static bool signed_add32_overflows(s32 a, s32 b)
10100 {
10101 	/* Do the add in u32, where overflow is well-defined */
10102 	s32 res = (s32)((u32)a + (u32)b);
10103 
10104 	if (b < 0)
10105 		return res > a;
10106 	return res < a;
10107 }
10108 
10109 static bool signed_sub_overflows(s64 a, s64 b)
10110 {
10111 	/* Do the sub in u64, where overflow is well-defined */
10112 	s64 res = (s64)((u64)a - (u64)b);
10113 
10114 	if (b < 0)
10115 		return res < a;
10116 	return res > a;
10117 }
10118 
10119 static bool signed_sub32_overflows(s32 a, s32 b)
10120 {
10121 	/* Do the sub in u32, where overflow is well-defined */
10122 	s32 res = (s32)((u32)a - (u32)b);
10123 
10124 	if (b < 0)
10125 		return res < a;
10126 	return res > a;
10127 }
10128 
10129 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
10130 				  const struct bpf_reg_state *reg,
10131 				  enum bpf_reg_type type)
10132 {
10133 	bool known = tnum_is_const(reg->var_off);
10134 	s64 val = reg->var_off.value;
10135 	s64 smin = reg->smin_value;
10136 
10137 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
10138 		verbose(env, "math between %s pointer and %lld is not allowed\n",
10139 			reg_type_str(env, type), val);
10140 		return false;
10141 	}
10142 
10143 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
10144 		verbose(env, "%s pointer offset %d is not allowed\n",
10145 			reg_type_str(env, type), reg->off);
10146 		return false;
10147 	}
10148 
10149 	if (smin == S64_MIN) {
10150 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
10151 			reg_type_str(env, type));
10152 		return false;
10153 	}
10154 
10155 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
10156 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
10157 			smin, reg_type_str(env, type));
10158 		return false;
10159 	}
10160 
10161 	return true;
10162 }
10163 
10164 enum {
10165 	REASON_BOUNDS	= -1,
10166 	REASON_TYPE	= -2,
10167 	REASON_PATHS	= -3,
10168 	REASON_LIMIT	= -4,
10169 	REASON_STACK	= -5,
10170 };
10171 
10172 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
10173 			      u32 *alu_limit, bool mask_to_left)
10174 {
10175 	u32 max = 0, ptr_limit = 0;
10176 
10177 	switch (ptr_reg->type) {
10178 	case PTR_TO_STACK:
10179 		/* Offset 0 is out-of-bounds, but acceptable start for the
10180 		 * left direction, see BPF_REG_FP. Also, unknown scalar
10181 		 * offset where we would need to deal with min/max bounds is
10182 		 * currently prohibited for unprivileged.
10183 		 */
10184 		max = MAX_BPF_STACK + mask_to_left;
10185 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
10186 		break;
10187 	case PTR_TO_MAP_VALUE:
10188 		max = ptr_reg->map_ptr->value_size;
10189 		ptr_limit = (mask_to_left ?
10190 			     ptr_reg->smin_value :
10191 			     ptr_reg->umax_value) + ptr_reg->off;
10192 		break;
10193 	default:
10194 		return REASON_TYPE;
10195 	}
10196 
10197 	if (ptr_limit >= max)
10198 		return REASON_LIMIT;
10199 	*alu_limit = ptr_limit;
10200 	return 0;
10201 }
10202 
10203 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
10204 				    const struct bpf_insn *insn)
10205 {
10206 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
10207 }
10208 
10209 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
10210 				       u32 alu_state, u32 alu_limit)
10211 {
10212 	/* If we arrived here from different branches with different
10213 	 * state or limits to sanitize, then this won't work.
10214 	 */
10215 	if (aux->alu_state &&
10216 	    (aux->alu_state != alu_state ||
10217 	     aux->alu_limit != alu_limit))
10218 		return REASON_PATHS;
10219 
10220 	/* Corresponding fixup done in do_misc_fixups(). */
10221 	aux->alu_state = alu_state;
10222 	aux->alu_limit = alu_limit;
10223 	return 0;
10224 }
10225 
10226 static int sanitize_val_alu(struct bpf_verifier_env *env,
10227 			    struct bpf_insn *insn)
10228 {
10229 	struct bpf_insn_aux_data *aux = cur_aux(env);
10230 
10231 	if (can_skip_alu_sanitation(env, insn))
10232 		return 0;
10233 
10234 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
10235 }
10236 
10237 static bool sanitize_needed(u8 opcode)
10238 {
10239 	return opcode == BPF_ADD || opcode == BPF_SUB;
10240 }
10241 
10242 struct bpf_sanitize_info {
10243 	struct bpf_insn_aux_data aux;
10244 	bool mask_to_left;
10245 };
10246 
10247 static struct bpf_verifier_state *
10248 sanitize_speculative_path(struct bpf_verifier_env *env,
10249 			  const struct bpf_insn *insn,
10250 			  u32 next_idx, u32 curr_idx)
10251 {
10252 	struct bpf_verifier_state *branch;
10253 	struct bpf_reg_state *regs;
10254 
10255 	branch = push_stack(env, next_idx, curr_idx, true);
10256 	if (branch && insn) {
10257 		regs = branch->frame[branch->curframe]->regs;
10258 		if (BPF_SRC(insn->code) == BPF_K) {
10259 			mark_reg_unknown(env, regs, insn->dst_reg);
10260 		} else if (BPF_SRC(insn->code) == BPF_X) {
10261 			mark_reg_unknown(env, regs, insn->dst_reg);
10262 			mark_reg_unknown(env, regs, insn->src_reg);
10263 		}
10264 	}
10265 	return branch;
10266 }
10267 
10268 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
10269 			    struct bpf_insn *insn,
10270 			    const struct bpf_reg_state *ptr_reg,
10271 			    const struct bpf_reg_state *off_reg,
10272 			    struct bpf_reg_state *dst_reg,
10273 			    struct bpf_sanitize_info *info,
10274 			    const bool commit_window)
10275 {
10276 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
10277 	struct bpf_verifier_state *vstate = env->cur_state;
10278 	bool off_is_imm = tnum_is_const(off_reg->var_off);
10279 	bool off_is_neg = off_reg->smin_value < 0;
10280 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
10281 	u8 opcode = BPF_OP(insn->code);
10282 	u32 alu_state, alu_limit;
10283 	struct bpf_reg_state tmp;
10284 	bool ret;
10285 	int err;
10286 
10287 	if (can_skip_alu_sanitation(env, insn))
10288 		return 0;
10289 
10290 	/* We already marked aux for masking from non-speculative
10291 	 * paths, thus we got here in the first place. We only care
10292 	 * to explore bad access from here.
10293 	 */
10294 	if (vstate->speculative)
10295 		goto do_sim;
10296 
10297 	if (!commit_window) {
10298 		if (!tnum_is_const(off_reg->var_off) &&
10299 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
10300 			return REASON_BOUNDS;
10301 
10302 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
10303 				     (opcode == BPF_SUB && !off_is_neg);
10304 	}
10305 
10306 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
10307 	if (err < 0)
10308 		return err;
10309 
10310 	if (commit_window) {
10311 		/* In commit phase we narrow the masking window based on
10312 		 * the observed pointer move after the simulated operation.
10313 		 */
10314 		alu_state = info->aux.alu_state;
10315 		alu_limit = abs(info->aux.alu_limit - alu_limit);
10316 	} else {
10317 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
10318 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
10319 		alu_state |= ptr_is_dst_reg ?
10320 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
10321 
10322 		/* Limit pruning on unknown scalars to enable deep search for
10323 		 * potential masking differences from other program paths.
10324 		 */
10325 		if (!off_is_imm)
10326 			env->explore_alu_limits = true;
10327 	}
10328 
10329 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
10330 	if (err < 0)
10331 		return err;
10332 do_sim:
10333 	/* If we're in commit phase, we're done here given we already
10334 	 * pushed the truncated dst_reg into the speculative verification
10335 	 * stack.
10336 	 *
10337 	 * Also, when register is a known constant, we rewrite register-based
10338 	 * operation to immediate-based, and thus do not need masking (and as
10339 	 * a consequence, do not need to simulate the zero-truncation either).
10340 	 */
10341 	if (commit_window || off_is_imm)
10342 		return 0;
10343 
10344 	/* Simulate and find potential out-of-bounds access under
10345 	 * speculative execution from truncation as a result of
10346 	 * masking when off was not within expected range. If off
10347 	 * sits in dst, then we temporarily need to move ptr there
10348 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
10349 	 * for cases where we use K-based arithmetic in one direction
10350 	 * and truncated reg-based in the other in order to explore
10351 	 * bad access.
10352 	 */
10353 	if (!ptr_is_dst_reg) {
10354 		tmp = *dst_reg;
10355 		copy_register_state(dst_reg, ptr_reg);
10356 	}
10357 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
10358 					env->insn_idx);
10359 	if (!ptr_is_dst_reg && ret)
10360 		*dst_reg = tmp;
10361 	return !ret ? REASON_STACK : 0;
10362 }
10363 
10364 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
10365 {
10366 	struct bpf_verifier_state *vstate = env->cur_state;
10367 
10368 	/* If we simulate paths under speculation, we don't update the
10369 	 * insn as 'seen' such that when we verify unreachable paths in
10370 	 * the non-speculative domain, sanitize_dead_code() can still
10371 	 * rewrite/sanitize them.
10372 	 */
10373 	if (!vstate->speculative)
10374 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
10375 }
10376 
10377 static int sanitize_err(struct bpf_verifier_env *env,
10378 			const struct bpf_insn *insn, int reason,
10379 			const struct bpf_reg_state *off_reg,
10380 			const struct bpf_reg_state *dst_reg)
10381 {
10382 	static const char *err = "pointer arithmetic with it prohibited for !root";
10383 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
10384 	u32 dst = insn->dst_reg, src = insn->src_reg;
10385 
10386 	switch (reason) {
10387 	case REASON_BOUNDS:
10388 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
10389 			off_reg == dst_reg ? dst : src, err);
10390 		break;
10391 	case REASON_TYPE:
10392 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
10393 			off_reg == dst_reg ? src : dst, err);
10394 		break;
10395 	case REASON_PATHS:
10396 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
10397 			dst, op, err);
10398 		break;
10399 	case REASON_LIMIT:
10400 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
10401 			dst, op, err);
10402 		break;
10403 	case REASON_STACK:
10404 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
10405 			dst, err);
10406 		break;
10407 	default:
10408 		verbose(env, "verifier internal error: unknown reason (%d)\n",
10409 			reason);
10410 		break;
10411 	}
10412 
10413 	return -EACCES;
10414 }
10415 
10416 /* check that stack access falls within stack limits and that 'reg' doesn't
10417  * have a variable offset.
10418  *
10419  * Variable offset is prohibited for unprivileged mode for simplicity since it
10420  * requires corresponding support in Spectre masking for stack ALU.  See also
10421  * retrieve_ptr_limit().
10422  *
10423  *
10424  * 'off' includes 'reg->off'.
10425  */
10426 static int check_stack_access_for_ptr_arithmetic(
10427 				struct bpf_verifier_env *env,
10428 				int regno,
10429 				const struct bpf_reg_state *reg,
10430 				int off)
10431 {
10432 	if (!tnum_is_const(reg->var_off)) {
10433 		char tn_buf[48];
10434 
10435 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
10436 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
10437 			regno, tn_buf, off);
10438 		return -EACCES;
10439 	}
10440 
10441 	if (off >= 0 || off < -MAX_BPF_STACK) {
10442 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
10443 			"prohibited for !root; off=%d\n", regno, off);
10444 		return -EACCES;
10445 	}
10446 
10447 	return 0;
10448 }
10449 
10450 static int sanitize_check_bounds(struct bpf_verifier_env *env,
10451 				 const struct bpf_insn *insn,
10452 				 const struct bpf_reg_state *dst_reg)
10453 {
10454 	u32 dst = insn->dst_reg;
10455 
10456 	/* For unprivileged we require that resulting offset must be in bounds
10457 	 * in order to be able to sanitize access later on.
10458 	 */
10459 	if (env->bypass_spec_v1)
10460 		return 0;
10461 
10462 	switch (dst_reg->type) {
10463 	case PTR_TO_STACK:
10464 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
10465 					dst_reg->off + dst_reg->var_off.value))
10466 			return -EACCES;
10467 		break;
10468 	case PTR_TO_MAP_VALUE:
10469 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
10470 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
10471 				"prohibited for !root\n", dst);
10472 			return -EACCES;
10473 		}
10474 		break;
10475 	default:
10476 		break;
10477 	}
10478 
10479 	return 0;
10480 }
10481 
10482 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
10483  * Caller should also handle BPF_MOV case separately.
10484  * If we return -EACCES, caller may want to try again treating pointer as a
10485  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
10486  */
10487 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
10488 				   struct bpf_insn *insn,
10489 				   const struct bpf_reg_state *ptr_reg,
10490 				   const struct bpf_reg_state *off_reg)
10491 {
10492 	struct bpf_verifier_state *vstate = env->cur_state;
10493 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
10494 	struct bpf_reg_state *regs = state->regs, *dst_reg;
10495 	bool known = tnum_is_const(off_reg->var_off);
10496 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
10497 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
10498 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
10499 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
10500 	struct bpf_sanitize_info info = {};
10501 	u8 opcode = BPF_OP(insn->code);
10502 	u32 dst = insn->dst_reg;
10503 	int ret;
10504 
10505 	dst_reg = &regs[dst];
10506 
10507 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
10508 	    smin_val > smax_val || umin_val > umax_val) {
10509 		/* Taint dst register if offset had invalid bounds derived from
10510 		 * e.g. dead branches.
10511 		 */
10512 		__mark_reg_unknown(env, dst_reg);
10513 		return 0;
10514 	}
10515 
10516 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
10517 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
10518 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
10519 			__mark_reg_unknown(env, dst_reg);
10520 			return 0;
10521 		}
10522 
10523 		verbose(env,
10524 			"R%d 32-bit pointer arithmetic prohibited\n",
10525 			dst);
10526 		return -EACCES;
10527 	}
10528 
10529 	if (ptr_reg->type & PTR_MAYBE_NULL) {
10530 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
10531 			dst, reg_type_str(env, ptr_reg->type));
10532 		return -EACCES;
10533 	}
10534 
10535 	switch (base_type(ptr_reg->type)) {
10536 	case CONST_PTR_TO_MAP:
10537 		/* smin_val represents the known value */
10538 		if (known && smin_val == 0 && opcode == BPF_ADD)
10539 			break;
10540 		fallthrough;
10541 	case PTR_TO_PACKET_END:
10542 	case PTR_TO_SOCKET:
10543 	case PTR_TO_SOCK_COMMON:
10544 	case PTR_TO_TCP_SOCK:
10545 	case PTR_TO_XDP_SOCK:
10546 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
10547 			dst, reg_type_str(env, ptr_reg->type));
10548 		return -EACCES;
10549 	default:
10550 		break;
10551 	}
10552 
10553 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
10554 	 * The id may be overwritten later if we create a new variable offset.
10555 	 */
10556 	dst_reg->type = ptr_reg->type;
10557 	dst_reg->id = ptr_reg->id;
10558 
10559 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
10560 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
10561 		return -EINVAL;
10562 
10563 	/* pointer types do not carry 32-bit bounds at the moment. */
10564 	__mark_reg32_unbounded(dst_reg);
10565 
10566 	if (sanitize_needed(opcode)) {
10567 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
10568 				       &info, false);
10569 		if (ret < 0)
10570 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
10571 	}
10572 
10573 	switch (opcode) {
10574 	case BPF_ADD:
10575 		/* We can take a fixed offset as long as it doesn't overflow
10576 		 * the s32 'off' field
10577 		 */
10578 		if (known && (ptr_reg->off + smin_val ==
10579 			      (s64)(s32)(ptr_reg->off + smin_val))) {
10580 			/* pointer += K.  Accumulate it into fixed offset */
10581 			dst_reg->smin_value = smin_ptr;
10582 			dst_reg->smax_value = smax_ptr;
10583 			dst_reg->umin_value = umin_ptr;
10584 			dst_reg->umax_value = umax_ptr;
10585 			dst_reg->var_off = ptr_reg->var_off;
10586 			dst_reg->off = ptr_reg->off + smin_val;
10587 			dst_reg->raw = ptr_reg->raw;
10588 			break;
10589 		}
10590 		/* A new variable offset is created.  Note that off_reg->off
10591 		 * == 0, since it's a scalar.
10592 		 * dst_reg gets the pointer type and since some positive
10593 		 * integer value was added to the pointer, give it a new 'id'
10594 		 * if it's a PTR_TO_PACKET.
10595 		 * this creates a new 'base' pointer, off_reg (variable) gets
10596 		 * added into the variable offset, and we copy the fixed offset
10597 		 * from ptr_reg.
10598 		 */
10599 		if (signed_add_overflows(smin_ptr, smin_val) ||
10600 		    signed_add_overflows(smax_ptr, smax_val)) {
10601 			dst_reg->smin_value = S64_MIN;
10602 			dst_reg->smax_value = S64_MAX;
10603 		} else {
10604 			dst_reg->smin_value = smin_ptr + smin_val;
10605 			dst_reg->smax_value = smax_ptr + smax_val;
10606 		}
10607 		if (umin_ptr + umin_val < umin_ptr ||
10608 		    umax_ptr + umax_val < umax_ptr) {
10609 			dst_reg->umin_value = 0;
10610 			dst_reg->umax_value = U64_MAX;
10611 		} else {
10612 			dst_reg->umin_value = umin_ptr + umin_val;
10613 			dst_reg->umax_value = umax_ptr + umax_val;
10614 		}
10615 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
10616 		dst_reg->off = ptr_reg->off;
10617 		dst_reg->raw = ptr_reg->raw;
10618 		if (reg_is_pkt_pointer(ptr_reg)) {
10619 			dst_reg->id = ++env->id_gen;
10620 			/* something was added to pkt_ptr, set range to zero */
10621 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
10622 		}
10623 		break;
10624 	case BPF_SUB:
10625 		if (dst_reg == off_reg) {
10626 			/* scalar -= pointer.  Creates an unknown scalar */
10627 			verbose(env, "R%d tried to subtract pointer from scalar\n",
10628 				dst);
10629 			return -EACCES;
10630 		}
10631 		/* We don't allow subtraction from FP, because (according to
10632 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
10633 		 * be able to deal with it.
10634 		 */
10635 		if (ptr_reg->type == PTR_TO_STACK) {
10636 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
10637 				dst);
10638 			return -EACCES;
10639 		}
10640 		if (known && (ptr_reg->off - smin_val ==
10641 			      (s64)(s32)(ptr_reg->off - smin_val))) {
10642 			/* pointer -= K.  Subtract it from fixed offset */
10643 			dst_reg->smin_value = smin_ptr;
10644 			dst_reg->smax_value = smax_ptr;
10645 			dst_reg->umin_value = umin_ptr;
10646 			dst_reg->umax_value = umax_ptr;
10647 			dst_reg->var_off = ptr_reg->var_off;
10648 			dst_reg->id = ptr_reg->id;
10649 			dst_reg->off = ptr_reg->off - smin_val;
10650 			dst_reg->raw = ptr_reg->raw;
10651 			break;
10652 		}
10653 		/* A new variable offset is created.  If the subtrahend is known
10654 		 * nonnegative, then any reg->range we had before is still good.
10655 		 */
10656 		if (signed_sub_overflows(smin_ptr, smax_val) ||
10657 		    signed_sub_overflows(smax_ptr, smin_val)) {
10658 			/* Overflow possible, we know nothing */
10659 			dst_reg->smin_value = S64_MIN;
10660 			dst_reg->smax_value = S64_MAX;
10661 		} else {
10662 			dst_reg->smin_value = smin_ptr - smax_val;
10663 			dst_reg->smax_value = smax_ptr - smin_val;
10664 		}
10665 		if (umin_ptr < umax_val) {
10666 			/* Overflow possible, we know nothing */
10667 			dst_reg->umin_value = 0;
10668 			dst_reg->umax_value = U64_MAX;
10669 		} else {
10670 			/* Cannot overflow (as long as bounds are consistent) */
10671 			dst_reg->umin_value = umin_ptr - umax_val;
10672 			dst_reg->umax_value = umax_ptr - umin_val;
10673 		}
10674 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
10675 		dst_reg->off = ptr_reg->off;
10676 		dst_reg->raw = ptr_reg->raw;
10677 		if (reg_is_pkt_pointer(ptr_reg)) {
10678 			dst_reg->id = ++env->id_gen;
10679 			/* something was added to pkt_ptr, set range to zero */
10680 			if (smin_val < 0)
10681 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
10682 		}
10683 		break;
10684 	case BPF_AND:
10685 	case BPF_OR:
10686 	case BPF_XOR:
10687 		/* bitwise ops on pointers are troublesome, prohibit. */
10688 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
10689 			dst, bpf_alu_string[opcode >> 4]);
10690 		return -EACCES;
10691 	default:
10692 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
10693 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
10694 			dst, bpf_alu_string[opcode >> 4]);
10695 		return -EACCES;
10696 	}
10697 
10698 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
10699 		return -EINVAL;
10700 	reg_bounds_sync(dst_reg);
10701 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
10702 		return -EACCES;
10703 	if (sanitize_needed(opcode)) {
10704 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
10705 				       &info, true);
10706 		if (ret < 0)
10707 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
10708 	}
10709 
10710 	return 0;
10711 }
10712 
10713 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
10714 				 struct bpf_reg_state *src_reg)
10715 {
10716 	s32 smin_val = src_reg->s32_min_value;
10717 	s32 smax_val = src_reg->s32_max_value;
10718 	u32 umin_val = src_reg->u32_min_value;
10719 	u32 umax_val = src_reg->u32_max_value;
10720 
10721 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
10722 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
10723 		dst_reg->s32_min_value = S32_MIN;
10724 		dst_reg->s32_max_value = S32_MAX;
10725 	} else {
10726 		dst_reg->s32_min_value += smin_val;
10727 		dst_reg->s32_max_value += smax_val;
10728 	}
10729 	if (dst_reg->u32_min_value + umin_val < umin_val ||
10730 	    dst_reg->u32_max_value + umax_val < umax_val) {
10731 		dst_reg->u32_min_value = 0;
10732 		dst_reg->u32_max_value = U32_MAX;
10733 	} else {
10734 		dst_reg->u32_min_value += umin_val;
10735 		dst_reg->u32_max_value += umax_val;
10736 	}
10737 }
10738 
10739 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
10740 			       struct bpf_reg_state *src_reg)
10741 {
10742 	s64 smin_val = src_reg->smin_value;
10743 	s64 smax_val = src_reg->smax_value;
10744 	u64 umin_val = src_reg->umin_value;
10745 	u64 umax_val = src_reg->umax_value;
10746 
10747 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
10748 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
10749 		dst_reg->smin_value = S64_MIN;
10750 		dst_reg->smax_value = S64_MAX;
10751 	} else {
10752 		dst_reg->smin_value += smin_val;
10753 		dst_reg->smax_value += smax_val;
10754 	}
10755 	if (dst_reg->umin_value + umin_val < umin_val ||
10756 	    dst_reg->umax_value + umax_val < umax_val) {
10757 		dst_reg->umin_value = 0;
10758 		dst_reg->umax_value = U64_MAX;
10759 	} else {
10760 		dst_reg->umin_value += umin_val;
10761 		dst_reg->umax_value += umax_val;
10762 	}
10763 }
10764 
10765 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
10766 				 struct bpf_reg_state *src_reg)
10767 {
10768 	s32 smin_val = src_reg->s32_min_value;
10769 	s32 smax_val = src_reg->s32_max_value;
10770 	u32 umin_val = src_reg->u32_min_value;
10771 	u32 umax_val = src_reg->u32_max_value;
10772 
10773 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
10774 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
10775 		/* Overflow possible, we know nothing */
10776 		dst_reg->s32_min_value = S32_MIN;
10777 		dst_reg->s32_max_value = S32_MAX;
10778 	} else {
10779 		dst_reg->s32_min_value -= smax_val;
10780 		dst_reg->s32_max_value -= smin_val;
10781 	}
10782 	if (dst_reg->u32_min_value < umax_val) {
10783 		/* Overflow possible, we know nothing */
10784 		dst_reg->u32_min_value = 0;
10785 		dst_reg->u32_max_value = U32_MAX;
10786 	} else {
10787 		/* Cannot overflow (as long as bounds are consistent) */
10788 		dst_reg->u32_min_value -= umax_val;
10789 		dst_reg->u32_max_value -= umin_val;
10790 	}
10791 }
10792 
10793 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
10794 			       struct bpf_reg_state *src_reg)
10795 {
10796 	s64 smin_val = src_reg->smin_value;
10797 	s64 smax_val = src_reg->smax_value;
10798 	u64 umin_val = src_reg->umin_value;
10799 	u64 umax_val = src_reg->umax_value;
10800 
10801 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
10802 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
10803 		/* Overflow possible, we know nothing */
10804 		dst_reg->smin_value = S64_MIN;
10805 		dst_reg->smax_value = S64_MAX;
10806 	} else {
10807 		dst_reg->smin_value -= smax_val;
10808 		dst_reg->smax_value -= smin_val;
10809 	}
10810 	if (dst_reg->umin_value < umax_val) {
10811 		/* Overflow possible, we know nothing */
10812 		dst_reg->umin_value = 0;
10813 		dst_reg->umax_value = U64_MAX;
10814 	} else {
10815 		/* Cannot overflow (as long as bounds are consistent) */
10816 		dst_reg->umin_value -= umax_val;
10817 		dst_reg->umax_value -= umin_val;
10818 	}
10819 }
10820 
10821 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
10822 				 struct bpf_reg_state *src_reg)
10823 {
10824 	s32 smin_val = src_reg->s32_min_value;
10825 	u32 umin_val = src_reg->u32_min_value;
10826 	u32 umax_val = src_reg->u32_max_value;
10827 
10828 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
10829 		/* Ain't nobody got time to multiply that sign */
10830 		__mark_reg32_unbounded(dst_reg);
10831 		return;
10832 	}
10833 	/* Both values are positive, so we can work with unsigned and
10834 	 * copy the result to signed (unless it exceeds S32_MAX).
10835 	 */
10836 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
10837 		/* Potential overflow, we know nothing */
10838 		__mark_reg32_unbounded(dst_reg);
10839 		return;
10840 	}
10841 	dst_reg->u32_min_value *= umin_val;
10842 	dst_reg->u32_max_value *= umax_val;
10843 	if (dst_reg->u32_max_value > S32_MAX) {
10844 		/* Overflow possible, we know nothing */
10845 		dst_reg->s32_min_value = S32_MIN;
10846 		dst_reg->s32_max_value = S32_MAX;
10847 	} else {
10848 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10849 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10850 	}
10851 }
10852 
10853 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
10854 			       struct bpf_reg_state *src_reg)
10855 {
10856 	s64 smin_val = src_reg->smin_value;
10857 	u64 umin_val = src_reg->umin_value;
10858 	u64 umax_val = src_reg->umax_value;
10859 
10860 	if (smin_val < 0 || dst_reg->smin_value < 0) {
10861 		/* Ain't nobody got time to multiply that sign */
10862 		__mark_reg64_unbounded(dst_reg);
10863 		return;
10864 	}
10865 	/* Both values are positive, so we can work with unsigned and
10866 	 * copy the result to signed (unless it exceeds S64_MAX).
10867 	 */
10868 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
10869 		/* Potential overflow, we know nothing */
10870 		__mark_reg64_unbounded(dst_reg);
10871 		return;
10872 	}
10873 	dst_reg->umin_value *= umin_val;
10874 	dst_reg->umax_value *= umax_val;
10875 	if (dst_reg->umax_value > S64_MAX) {
10876 		/* Overflow possible, we know nothing */
10877 		dst_reg->smin_value = S64_MIN;
10878 		dst_reg->smax_value = S64_MAX;
10879 	} else {
10880 		dst_reg->smin_value = dst_reg->umin_value;
10881 		dst_reg->smax_value = dst_reg->umax_value;
10882 	}
10883 }
10884 
10885 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
10886 				 struct bpf_reg_state *src_reg)
10887 {
10888 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10889 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10890 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10891 	s32 smin_val = src_reg->s32_min_value;
10892 	u32 umax_val = src_reg->u32_max_value;
10893 
10894 	if (src_known && dst_known) {
10895 		__mark_reg32_known(dst_reg, var32_off.value);
10896 		return;
10897 	}
10898 
10899 	/* We get our minimum from the var_off, since that's inherently
10900 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
10901 	 */
10902 	dst_reg->u32_min_value = var32_off.value;
10903 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
10904 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
10905 		/* Lose signed bounds when ANDing negative numbers,
10906 		 * ain't nobody got time for that.
10907 		 */
10908 		dst_reg->s32_min_value = S32_MIN;
10909 		dst_reg->s32_max_value = S32_MAX;
10910 	} else {
10911 		/* ANDing two positives gives a positive, so safe to
10912 		 * cast result into s64.
10913 		 */
10914 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10915 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10916 	}
10917 }
10918 
10919 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
10920 			       struct bpf_reg_state *src_reg)
10921 {
10922 	bool src_known = tnum_is_const(src_reg->var_off);
10923 	bool dst_known = tnum_is_const(dst_reg->var_off);
10924 	s64 smin_val = src_reg->smin_value;
10925 	u64 umax_val = src_reg->umax_value;
10926 
10927 	if (src_known && dst_known) {
10928 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10929 		return;
10930 	}
10931 
10932 	/* We get our minimum from the var_off, since that's inherently
10933 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
10934 	 */
10935 	dst_reg->umin_value = dst_reg->var_off.value;
10936 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
10937 	if (dst_reg->smin_value < 0 || smin_val < 0) {
10938 		/* Lose signed bounds when ANDing negative numbers,
10939 		 * ain't nobody got time for that.
10940 		 */
10941 		dst_reg->smin_value = S64_MIN;
10942 		dst_reg->smax_value = S64_MAX;
10943 	} else {
10944 		/* ANDing two positives gives a positive, so safe to
10945 		 * cast result into s64.
10946 		 */
10947 		dst_reg->smin_value = dst_reg->umin_value;
10948 		dst_reg->smax_value = dst_reg->umax_value;
10949 	}
10950 	/* We may learn something more from the var_off */
10951 	__update_reg_bounds(dst_reg);
10952 }
10953 
10954 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
10955 				struct bpf_reg_state *src_reg)
10956 {
10957 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10958 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10959 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10960 	s32 smin_val = src_reg->s32_min_value;
10961 	u32 umin_val = src_reg->u32_min_value;
10962 
10963 	if (src_known && dst_known) {
10964 		__mark_reg32_known(dst_reg, var32_off.value);
10965 		return;
10966 	}
10967 
10968 	/* We get our maximum from the var_off, and our minimum is the
10969 	 * maximum of the operands' minima
10970 	 */
10971 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
10972 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
10973 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
10974 		/* Lose signed bounds when ORing negative numbers,
10975 		 * ain't nobody got time for that.
10976 		 */
10977 		dst_reg->s32_min_value = S32_MIN;
10978 		dst_reg->s32_max_value = S32_MAX;
10979 	} else {
10980 		/* ORing two positives gives a positive, so safe to
10981 		 * cast result into s64.
10982 		 */
10983 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10984 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10985 	}
10986 }
10987 
10988 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
10989 			      struct bpf_reg_state *src_reg)
10990 {
10991 	bool src_known = tnum_is_const(src_reg->var_off);
10992 	bool dst_known = tnum_is_const(dst_reg->var_off);
10993 	s64 smin_val = src_reg->smin_value;
10994 	u64 umin_val = src_reg->umin_value;
10995 
10996 	if (src_known && dst_known) {
10997 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10998 		return;
10999 	}
11000 
11001 	/* We get our maximum from the var_off, and our minimum is the
11002 	 * maximum of the operands' minima
11003 	 */
11004 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
11005 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11006 	if (dst_reg->smin_value < 0 || smin_val < 0) {
11007 		/* Lose signed bounds when ORing negative numbers,
11008 		 * ain't nobody got time for that.
11009 		 */
11010 		dst_reg->smin_value = S64_MIN;
11011 		dst_reg->smax_value = S64_MAX;
11012 	} else {
11013 		/* ORing two positives gives a positive, so safe to
11014 		 * cast result into s64.
11015 		 */
11016 		dst_reg->smin_value = dst_reg->umin_value;
11017 		dst_reg->smax_value = dst_reg->umax_value;
11018 	}
11019 	/* We may learn something more from the var_off */
11020 	__update_reg_bounds(dst_reg);
11021 }
11022 
11023 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
11024 				 struct bpf_reg_state *src_reg)
11025 {
11026 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
11027 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11028 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11029 	s32 smin_val = src_reg->s32_min_value;
11030 
11031 	if (src_known && dst_known) {
11032 		__mark_reg32_known(dst_reg, var32_off.value);
11033 		return;
11034 	}
11035 
11036 	/* We get both minimum and maximum from the var32_off. */
11037 	dst_reg->u32_min_value = var32_off.value;
11038 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
11039 
11040 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
11041 		/* XORing two positive sign numbers gives a positive,
11042 		 * so safe to cast u32 result into s32.
11043 		 */
11044 		dst_reg->s32_min_value = dst_reg->u32_min_value;
11045 		dst_reg->s32_max_value = dst_reg->u32_max_value;
11046 	} else {
11047 		dst_reg->s32_min_value = S32_MIN;
11048 		dst_reg->s32_max_value = S32_MAX;
11049 	}
11050 }
11051 
11052 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
11053 			       struct bpf_reg_state *src_reg)
11054 {
11055 	bool src_known = tnum_is_const(src_reg->var_off);
11056 	bool dst_known = tnum_is_const(dst_reg->var_off);
11057 	s64 smin_val = src_reg->smin_value;
11058 
11059 	if (src_known && dst_known) {
11060 		/* dst_reg->var_off.value has been updated earlier */
11061 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
11062 		return;
11063 	}
11064 
11065 	/* We get both minimum and maximum from the var_off. */
11066 	dst_reg->umin_value = dst_reg->var_off.value;
11067 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11068 
11069 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
11070 		/* XORing two positive sign numbers gives a positive,
11071 		 * so safe to cast u64 result into s64.
11072 		 */
11073 		dst_reg->smin_value = dst_reg->umin_value;
11074 		dst_reg->smax_value = dst_reg->umax_value;
11075 	} else {
11076 		dst_reg->smin_value = S64_MIN;
11077 		dst_reg->smax_value = S64_MAX;
11078 	}
11079 
11080 	__update_reg_bounds(dst_reg);
11081 }
11082 
11083 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11084 				   u64 umin_val, u64 umax_val)
11085 {
11086 	/* We lose all sign bit information (except what we can pick
11087 	 * up from var_off)
11088 	 */
11089 	dst_reg->s32_min_value = S32_MIN;
11090 	dst_reg->s32_max_value = S32_MAX;
11091 	/* If we might shift our top bit out, then we know nothing */
11092 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
11093 		dst_reg->u32_min_value = 0;
11094 		dst_reg->u32_max_value = U32_MAX;
11095 	} else {
11096 		dst_reg->u32_min_value <<= umin_val;
11097 		dst_reg->u32_max_value <<= umax_val;
11098 	}
11099 }
11100 
11101 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11102 				 struct bpf_reg_state *src_reg)
11103 {
11104 	u32 umax_val = src_reg->u32_max_value;
11105 	u32 umin_val = src_reg->u32_min_value;
11106 	/* u32 alu operation will zext upper bits */
11107 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
11108 
11109 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11110 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
11111 	/* Not required but being careful mark reg64 bounds as unknown so
11112 	 * that we are forced to pick them up from tnum and zext later and
11113 	 * if some path skips this step we are still safe.
11114 	 */
11115 	__mark_reg64_unbounded(dst_reg);
11116 	__update_reg32_bounds(dst_reg);
11117 }
11118 
11119 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
11120 				   u64 umin_val, u64 umax_val)
11121 {
11122 	/* Special case <<32 because it is a common compiler pattern to sign
11123 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
11124 	 * positive we know this shift will also be positive so we can track
11125 	 * bounds correctly. Otherwise we lose all sign bit information except
11126 	 * what we can pick up from var_off. Perhaps we can generalize this
11127 	 * later to shifts of any length.
11128 	 */
11129 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
11130 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
11131 	else
11132 		dst_reg->smax_value = S64_MAX;
11133 
11134 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
11135 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
11136 	else
11137 		dst_reg->smin_value = S64_MIN;
11138 
11139 	/* If we might shift our top bit out, then we know nothing */
11140 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
11141 		dst_reg->umin_value = 0;
11142 		dst_reg->umax_value = U64_MAX;
11143 	} else {
11144 		dst_reg->umin_value <<= umin_val;
11145 		dst_reg->umax_value <<= umax_val;
11146 	}
11147 }
11148 
11149 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
11150 			       struct bpf_reg_state *src_reg)
11151 {
11152 	u64 umax_val = src_reg->umax_value;
11153 	u64 umin_val = src_reg->umin_value;
11154 
11155 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
11156 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
11157 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11158 
11159 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
11160 	/* We may learn something more from the var_off */
11161 	__update_reg_bounds(dst_reg);
11162 }
11163 
11164 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
11165 				 struct bpf_reg_state *src_reg)
11166 {
11167 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
11168 	u32 umax_val = src_reg->u32_max_value;
11169 	u32 umin_val = src_reg->u32_min_value;
11170 
11171 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
11172 	 * be negative, then either:
11173 	 * 1) src_reg might be zero, so the sign bit of the result is
11174 	 *    unknown, so we lose our signed bounds
11175 	 * 2) it's known negative, thus the unsigned bounds capture the
11176 	 *    signed bounds
11177 	 * 3) the signed bounds cross zero, so they tell us nothing
11178 	 *    about the result
11179 	 * If the value in dst_reg is known nonnegative, then again the
11180 	 * unsigned bounds capture the signed bounds.
11181 	 * Thus, in all cases it suffices to blow away our signed bounds
11182 	 * and rely on inferring new ones from the unsigned bounds and
11183 	 * var_off of the result.
11184 	 */
11185 	dst_reg->s32_min_value = S32_MIN;
11186 	dst_reg->s32_max_value = S32_MAX;
11187 
11188 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
11189 	dst_reg->u32_min_value >>= umax_val;
11190 	dst_reg->u32_max_value >>= umin_val;
11191 
11192 	__mark_reg64_unbounded(dst_reg);
11193 	__update_reg32_bounds(dst_reg);
11194 }
11195 
11196 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
11197 			       struct bpf_reg_state *src_reg)
11198 {
11199 	u64 umax_val = src_reg->umax_value;
11200 	u64 umin_val = src_reg->umin_value;
11201 
11202 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
11203 	 * be negative, then either:
11204 	 * 1) src_reg might be zero, so the sign bit of the result is
11205 	 *    unknown, so we lose our signed bounds
11206 	 * 2) it's known negative, thus the unsigned bounds capture the
11207 	 *    signed bounds
11208 	 * 3) the signed bounds cross zero, so they tell us nothing
11209 	 *    about the result
11210 	 * If the value in dst_reg is known nonnegative, then again the
11211 	 * unsigned bounds capture the signed bounds.
11212 	 * Thus, in all cases it suffices to blow away our signed bounds
11213 	 * and rely on inferring new ones from the unsigned bounds and
11214 	 * var_off of the result.
11215 	 */
11216 	dst_reg->smin_value = S64_MIN;
11217 	dst_reg->smax_value = S64_MAX;
11218 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
11219 	dst_reg->umin_value >>= umax_val;
11220 	dst_reg->umax_value >>= umin_val;
11221 
11222 	/* Its not easy to operate on alu32 bounds here because it depends
11223 	 * on bits being shifted in. Take easy way out and mark unbounded
11224 	 * so we can recalculate later from tnum.
11225 	 */
11226 	__mark_reg32_unbounded(dst_reg);
11227 	__update_reg_bounds(dst_reg);
11228 }
11229 
11230 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
11231 				  struct bpf_reg_state *src_reg)
11232 {
11233 	u64 umin_val = src_reg->u32_min_value;
11234 
11235 	/* Upon reaching here, src_known is true and
11236 	 * umax_val is equal to umin_val.
11237 	 */
11238 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
11239 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
11240 
11241 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
11242 
11243 	/* blow away the dst_reg umin_value/umax_value and rely on
11244 	 * dst_reg var_off to refine the result.
11245 	 */
11246 	dst_reg->u32_min_value = 0;
11247 	dst_reg->u32_max_value = U32_MAX;
11248 
11249 	__mark_reg64_unbounded(dst_reg);
11250 	__update_reg32_bounds(dst_reg);
11251 }
11252 
11253 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
11254 				struct bpf_reg_state *src_reg)
11255 {
11256 	u64 umin_val = src_reg->umin_value;
11257 
11258 	/* Upon reaching here, src_known is true and umax_val is equal
11259 	 * to umin_val.
11260 	 */
11261 	dst_reg->smin_value >>= umin_val;
11262 	dst_reg->smax_value >>= umin_val;
11263 
11264 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
11265 
11266 	/* blow away the dst_reg umin_value/umax_value and rely on
11267 	 * dst_reg var_off to refine the result.
11268 	 */
11269 	dst_reg->umin_value = 0;
11270 	dst_reg->umax_value = U64_MAX;
11271 
11272 	/* Its not easy to operate on alu32 bounds here because it depends
11273 	 * on bits being shifted in from upper 32-bits. Take easy way out
11274 	 * and mark unbounded so we can recalculate later from tnum.
11275 	 */
11276 	__mark_reg32_unbounded(dst_reg);
11277 	__update_reg_bounds(dst_reg);
11278 }
11279 
11280 /* WARNING: This function does calculations on 64-bit values, but the actual
11281  * execution may occur on 32-bit values. Therefore, things like bitshifts
11282  * need extra checks in the 32-bit case.
11283  */
11284 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
11285 				      struct bpf_insn *insn,
11286 				      struct bpf_reg_state *dst_reg,
11287 				      struct bpf_reg_state src_reg)
11288 {
11289 	struct bpf_reg_state *regs = cur_regs(env);
11290 	u8 opcode = BPF_OP(insn->code);
11291 	bool src_known;
11292 	s64 smin_val, smax_val;
11293 	u64 umin_val, umax_val;
11294 	s32 s32_min_val, s32_max_val;
11295 	u32 u32_min_val, u32_max_val;
11296 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
11297 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
11298 	int ret;
11299 
11300 	smin_val = src_reg.smin_value;
11301 	smax_val = src_reg.smax_value;
11302 	umin_val = src_reg.umin_value;
11303 	umax_val = src_reg.umax_value;
11304 
11305 	s32_min_val = src_reg.s32_min_value;
11306 	s32_max_val = src_reg.s32_max_value;
11307 	u32_min_val = src_reg.u32_min_value;
11308 	u32_max_val = src_reg.u32_max_value;
11309 
11310 	if (alu32) {
11311 		src_known = tnum_subreg_is_const(src_reg.var_off);
11312 		if ((src_known &&
11313 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
11314 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
11315 			/* Taint dst register if offset had invalid bounds
11316 			 * derived from e.g. dead branches.
11317 			 */
11318 			__mark_reg_unknown(env, dst_reg);
11319 			return 0;
11320 		}
11321 	} else {
11322 		src_known = tnum_is_const(src_reg.var_off);
11323 		if ((src_known &&
11324 		     (smin_val != smax_val || umin_val != umax_val)) ||
11325 		    smin_val > smax_val || umin_val > umax_val) {
11326 			/* Taint dst register if offset had invalid bounds
11327 			 * derived from e.g. dead branches.
11328 			 */
11329 			__mark_reg_unknown(env, dst_reg);
11330 			return 0;
11331 		}
11332 	}
11333 
11334 	if (!src_known &&
11335 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
11336 		__mark_reg_unknown(env, dst_reg);
11337 		return 0;
11338 	}
11339 
11340 	if (sanitize_needed(opcode)) {
11341 		ret = sanitize_val_alu(env, insn);
11342 		if (ret < 0)
11343 			return sanitize_err(env, insn, ret, NULL, NULL);
11344 	}
11345 
11346 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
11347 	 * There are two classes of instructions: The first class we track both
11348 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
11349 	 * greatest amount of precision when alu operations are mixed with jmp32
11350 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
11351 	 * and BPF_OR. This is possible because these ops have fairly easy to
11352 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
11353 	 * See alu32 verifier tests for examples. The second class of
11354 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
11355 	 * with regards to tracking sign/unsigned bounds because the bits may
11356 	 * cross subreg boundaries in the alu64 case. When this happens we mark
11357 	 * the reg unbounded in the subreg bound space and use the resulting
11358 	 * tnum to calculate an approximation of the sign/unsigned bounds.
11359 	 */
11360 	switch (opcode) {
11361 	case BPF_ADD:
11362 		scalar32_min_max_add(dst_reg, &src_reg);
11363 		scalar_min_max_add(dst_reg, &src_reg);
11364 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
11365 		break;
11366 	case BPF_SUB:
11367 		scalar32_min_max_sub(dst_reg, &src_reg);
11368 		scalar_min_max_sub(dst_reg, &src_reg);
11369 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
11370 		break;
11371 	case BPF_MUL:
11372 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
11373 		scalar32_min_max_mul(dst_reg, &src_reg);
11374 		scalar_min_max_mul(dst_reg, &src_reg);
11375 		break;
11376 	case BPF_AND:
11377 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
11378 		scalar32_min_max_and(dst_reg, &src_reg);
11379 		scalar_min_max_and(dst_reg, &src_reg);
11380 		break;
11381 	case BPF_OR:
11382 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
11383 		scalar32_min_max_or(dst_reg, &src_reg);
11384 		scalar_min_max_or(dst_reg, &src_reg);
11385 		break;
11386 	case BPF_XOR:
11387 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
11388 		scalar32_min_max_xor(dst_reg, &src_reg);
11389 		scalar_min_max_xor(dst_reg, &src_reg);
11390 		break;
11391 	case BPF_LSH:
11392 		if (umax_val >= insn_bitness) {
11393 			/* Shifts greater than 31 or 63 are undefined.
11394 			 * This includes shifts by a negative number.
11395 			 */
11396 			mark_reg_unknown(env, regs, insn->dst_reg);
11397 			break;
11398 		}
11399 		if (alu32)
11400 			scalar32_min_max_lsh(dst_reg, &src_reg);
11401 		else
11402 			scalar_min_max_lsh(dst_reg, &src_reg);
11403 		break;
11404 	case BPF_RSH:
11405 		if (umax_val >= insn_bitness) {
11406 			/* Shifts greater than 31 or 63 are undefined.
11407 			 * This includes shifts by a negative number.
11408 			 */
11409 			mark_reg_unknown(env, regs, insn->dst_reg);
11410 			break;
11411 		}
11412 		if (alu32)
11413 			scalar32_min_max_rsh(dst_reg, &src_reg);
11414 		else
11415 			scalar_min_max_rsh(dst_reg, &src_reg);
11416 		break;
11417 	case BPF_ARSH:
11418 		if (umax_val >= insn_bitness) {
11419 			/* Shifts greater than 31 or 63 are undefined.
11420 			 * This includes shifts by a negative number.
11421 			 */
11422 			mark_reg_unknown(env, regs, insn->dst_reg);
11423 			break;
11424 		}
11425 		if (alu32)
11426 			scalar32_min_max_arsh(dst_reg, &src_reg);
11427 		else
11428 			scalar_min_max_arsh(dst_reg, &src_reg);
11429 		break;
11430 	default:
11431 		mark_reg_unknown(env, regs, insn->dst_reg);
11432 		break;
11433 	}
11434 
11435 	/* ALU32 ops are zero extended into 64bit register */
11436 	if (alu32)
11437 		zext_32_to_64(dst_reg);
11438 	reg_bounds_sync(dst_reg);
11439 	return 0;
11440 }
11441 
11442 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
11443  * and var_off.
11444  */
11445 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
11446 				   struct bpf_insn *insn)
11447 {
11448 	struct bpf_verifier_state *vstate = env->cur_state;
11449 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
11450 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
11451 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
11452 	u8 opcode = BPF_OP(insn->code);
11453 	int err;
11454 
11455 	dst_reg = &regs[insn->dst_reg];
11456 	src_reg = NULL;
11457 	if (dst_reg->type != SCALAR_VALUE)
11458 		ptr_reg = dst_reg;
11459 	else
11460 		/* Make sure ID is cleared otherwise dst_reg min/max could be
11461 		 * incorrectly propagated into other registers by find_equal_scalars()
11462 		 */
11463 		dst_reg->id = 0;
11464 	if (BPF_SRC(insn->code) == BPF_X) {
11465 		src_reg = &regs[insn->src_reg];
11466 		if (src_reg->type != SCALAR_VALUE) {
11467 			if (dst_reg->type != SCALAR_VALUE) {
11468 				/* Combining two pointers by any ALU op yields
11469 				 * an arbitrary scalar. Disallow all math except
11470 				 * pointer subtraction
11471 				 */
11472 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
11473 					mark_reg_unknown(env, regs, insn->dst_reg);
11474 					return 0;
11475 				}
11476 				verbose(env, "R%d pointer %s pointer prohibited\n",
11477 					insn->dst_reg,
11478 					bpf_alu_string[opcode >> 4]);
11479 				return -EACCES;
11480 			} else {
11481 				/* scalar += pointer
11482 				 * This is legal, but we have to reverse our
11483 				 * src/dest handling in computing the range
11484 				 */
11485 				err = mark_chain_precision(env, insn->dst_reg);
11486 				if (err)
11487 					return err;
11488 				return adjust_ptr_min_max_vals(env, insn,
11489 							       src_reg, dst_reg);
11490 			}
11491 		} else if (ptr_reg) {
11492 			/* pointer += scalar */
11493 			err = mark_chain_precision(env, insn->src_reg);
11494 			if (err)
11495 				return err;
11496 			return adjust_ptr_min_max_vals(env, insn,
11497 						       dst_reg, src_reg);
11498 		} else if (dst_reg->precise) {
11499 			/* if dst_reg is precise, src_reg should be precise as well */
11500 			err = mark_chain_precision(env, insn->src_reg);
11501 			if (err)
11502 				return err;
11503 		}
11504 	} else {
11505 		/* Pretend the src is a reg with a known value, since we only
11506 		 * need to be able to read from this state.
11507 		 */
11508 		off_reg.type = SCALAR_VALUE;
11509 		__mark_reg_known(&off_reg, insn->imm);
11510 		src_reg = &off_reg;
11511 		if (ptr_reg) /* pointer += K */
11512 			return adjust_ptr_min_max_vals(env, insn,
11513 						       ptr_reg, src_reg);
11514 	}
11515 
11516 	/* Got here implies adding two SCALAR_VALUEs */
11517 	if (WARN_ON_ONCE(ptr_reg)) {
11518 		print_verifier_state(env, state, true);
11519 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
11520 		return -EINVAL;
11521 	}
11522 	if (WARN_ON(!src_reg)) {
11523 		print_verifier_state(env, state, true);
11524 		verbose(env, "verifier internal error: no src_reg\n");
11525 		return -EINVAL;
11526 	}
11527 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
11528 }
11529 
11530 /* check validity of 32-bit and 64-bit arithmetic operations */
11531 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
11532 {
11533 	struct bpf_reg_state *regs = cur_regs(env);
11534 	u8 opcode = BPF_OP(insn->code);
11535 	int err;
11536 
11537 	if (opcode == BPF_END || opcode == BPF_NEG) {
11538 		if (opcode == BPF_NEG) {
11539 			if (BPF_SRC(insn->code) != BPF_K ||
11540 			    insn->src_reg != BPF_REG_0 ||
11541 			    insn->off != 0 || insn->imm != 0) {
11542 				verbose(env, "BPF_NEG uses reserved fields\n");
11543 				return -EINVAL;
11544 			}
11545 		} else {
11546 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
11547 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
11548 			    BPF_CLASS(insn->code) == BPF_ALU64) {
11549 				verbose(env, "BPF_END uses reserved fields\n");
11550 				return -EINVAL;
11551 			}
11552 		}
11553 
11554 		/* check src operand */
11555 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11556 		if (err)
11557 			return err;
11558 
11559 		if (is_pointer_value(env, insn->dst_reg)) {
11560 			verbose(env, "R%d pointer arithmetic prohibited\n",
11561 				insn->dst_reg);
11562 			return -EACCES;
11563 		}
11564 
11565 		/* check dest operand */
11566 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
11567 		if (err)
11568 			return err;
11569 
11570 	} else if (opcode == BPF_MOV) {
11571 
11572 		if (BPF_SRC(insn->code) == BPF_X) {
11573 			if (insn->imm != 0 || insn->off != 0) {
11574 				verbose(env, "BPF_MOV uses reserved fields\n");
11575 				return -EINVAL;
11576 			}
11577 
11578 			/* check src operand */
11579 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11580 			if (err)
11581 				return err;
11582 		} else {
11583 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
11584 				verbose(env, "BPF_MOV uses reserved fields\n");
11585 				return -EINVAL;
11586 			}
11587 		}
11588 
11589 		/* check dest operand, mark as required later */
11590 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11591 		if (err)
11592 			return err;
11593 
11594 		if (BPF_SRC(insn->code) == BPF_X) {
11595 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
11596 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
11597 
11598 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
11599 				/* case: R1 = R2
11600 				 * copy register state to dest reg
11601 				 */
11602 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
11603 					/* Assign src and dst registers the same ID
11604 					 * that will be used by find_equal_scalars()
11605 					 * to propagate min/max range.
11606 					 */
11607 					src_reg->id = ++env->id_gen;
11608 				copy_register_state(dst_reg, src_reg);
11609 				dst_reg->live |= REG_LIVE_WRITTEN;
11610 				dst_reg->subreg_def = DEF_NOT_SUBREG;
11611 			} else {
11612 				/* R1 = (u32) R2 */
11613 				if (is_pointer_value(env, insn->src_reg)) {
11614 					verbose(env,
11615 						"R%d partial copy of pointer\n",
11616 						insn->src_reg);
11617 					return -EACCES;
11618 				} else if (src_reg->type == SCALAR_VALUE) {
11619 					copy_register_state(dst_reg, src_reg);
11620 					/* Make sure ID is cleared otherwise
11621 					 * dst_reg min/max could be incorrectly
11622 					 * propagated into src_reg by find_equal_scalars()
11623 					 */
11624 					dst_reg->id = 0;
11625 					dst_reg->live |= REG_LIVE_WRITTEN;
11626 					dst_reg->subreg_def = env->insn_idx + 1;
11627 				} else {
11628 					mark_reg_unknown(env, regs,
11629 							 insn->dst_reg);
11630 				}
11631 				zext_32_to_64(dst_reg);
11632 				reg_bounds_sync(dst_reg);
11633 			}
11634 		} else {
11635 			/* case: R = imm
11636 			 * remember the value we stored into this reg
11637 			 */
11638 			/* clear any state __mark_reg_known doesn't set */
11639 			mark_reg_unknown(env, regs, insn->dst_reg);
11640 			regs[insn->dst_reg].type = SCALAR_VALUE;
11641 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
11642 				__mark_reg_known(regs + insn->dst_reg,
11643 						 insn->imm);
11644 			} else {
11645 				__mark_reg_known(regs + insn->dst_reg,
11646 						 (u32)insn->imm);
11647 			}
11648 		}
11649 
11650 	} else if (opcode > BPF_END) {
11651 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
11652 		return -EINVAL;
11653 
11654 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
11655 
11656 		if (BPF_SRC(insn->code) == BPF_X) {
11657 			if (insn->imm != 0 || insn->off != 0) {
11658 				verbose(env, "BPF_ALU uses reserved fields\n");
11659 				return -EINVAL;
11660 			}
11661 			/* check src1 operand */
11662 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11663 			if (err)
11664 				return err;
11665 		} else {
11666 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
11667 				verbose(env, "BPF_ALU uses reserved fields\n");
11668 				return -EINVAL;
11669 			}
11670 		}
11671 
11672 		/* check src2 operand */
11673 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11674 		if (err)
11675 			return err;
11676 
11677 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
11678 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
11679 			verbose(env, "div by zero\n");
11680 			return -EINVAL;
11681 		}
11682 
11683 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
11684 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
11685 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
11686 
11687 			if (insn->imm < 0 || insn->imm >= size) {
11688 				verbose(env, "invalid shift %d\n", insn->imm);
11689 				return -EINVAL;
11690 			}
11691 		}
11692 
11693 		/* check dest operand */
11694 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11695 		if (err)
11696 			return err;
11697 
11698 		return adjust_reg_min_max_vals(env, insn);
11699 	}
11700 
11701 	return 0;
11702 }
11703 
11704 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
11705 				   struct bpf_reg_state *dst_reg,
11706 				   enum bpf_reg_type type,
11707 				   bool range_right_open)
11708 {
11709 	struct bpf_func_state *state;
11710 	struct bpf_reg_state *reg;
11711 	int new_range;
11712 
11713 	if (dst_reg->off < 0 ||
11714 	    (dst_reg->off == 0 && range_right_open))
11715 		/* This doesn't give us any range */
11716 		return;
11717 
11718 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
11719 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
11720 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
11721 		 * than pkt_end, but that's because it's also less than pkt.
11722 		 */
11723 		return;
11724 
11725 	new_range = dst_reg->off;
11726 	if (range_right_open)
11727 		new_range++;
11728 
11729 	/* Examples for register markings:
11730 	 *
11731 	 * pkt_data in dst register:
11732 	 *
11733 	 *   r2 = r3;
11734 	 *   r2 += 8;
11735 	 *   if (r2 > pkt_end) goto <handle exception>
11736 	 *   <access okay>
11737 	 *
11738 	 *   r2 = r3;
11739 	 *   r2 += 8;
11740 	 *   if (r2 < pkt_end) goto <access okay>
11741 	 *   <handle exception>
11742 	 *
11743 	 *   Where:
11744 	 *     r2 == dst_reg, pkt_end == src_reg
11745 	 *     r2=pkt(id=n,off=8,r=0)
11746 	 *     r3=pkt(id=n,off=0,r=0)
11747 	 *
11748 	 * pkt_data in src register:
11749 	 *
11750 	 *   r2 = r3;
11751 	 *   r2 += 8;
11752 	 *   if (pkt_end >= r2) goto <access okay>
11753 	 *   <handle exception>
11754 	 *
11755 	 *   r2 = r3;
11756 	 *   r2 += 8;
11757 	 *   if (pkt_end <= r2) goto <handle exception>
11758 	 *   <access okay>
11759 	 *
11760 	 *   Where:
11761 	 *     pkt_end == dst_reg, r2 == src_reg
11762 	 *     r2=pkt(id=n,off=8,r=0)
11763 	 *     r3=pkt(id=n,off=0,r=0)
11764 	 *
11765 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
11766 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
11767 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
11768 	 * the check.
11769 	 */
11770 
11771 	/* If our ids match, then we must have the same max_value.  And we
11772 	 * don't care about the other reg's fixed offset, since if it's too big
11773 	 * the range won't allow anything.
11774 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
11775 	 */
11776 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11777 		if (reg->type == type && reg->id == dst_reg->id)
11778 			/* keep the maximum range already checked */
11779 			reg->range = max(reg->range, new_range);
11780 	}));
11781 }
11782 
11783 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
11784 {
11785 	struct tnum subreg = tnum_subreg(reg->var_off);
11786 	s32 sval = (s32)val;
11787 
11788 	switch (opcode) {
11789 	case BPF_JEQ:
11790 		if (tnum_is_const(subreg))
11791 			return !!tnum_equals_const(subreg, val);
11792 		break;
11793 	case BPF_JNE:
11794 		if (tnum_is_const(subreg))
11795 			return !tnum_equals_const(subreg, val);
11796 		break;
11797 	case BPF_JSET:
11798 		if ((~subreg.mask & subreg.value) & val)
11799 			return 1;
11800 		if (!((subreg.mask | subreg.value) & val))
11801 			return 0;
11802 		break;
11803 	case BPF_JGT:
11804 		if (reg->u32_min_value > val)
11805 			return 1;
11806 		else if (reg->u32_max_value <= val)
11807 			return 0;
11808 		break;
11809 	case BPF_JSGT:
11810 		if (reg->s32_min_value > sval)
11811 			return 1;
11812 		else if (reg->s32_max_value <= sval)
11813 			return 0;
11814 		break;
11815 	case BPF_JLT:
11816 		if (reg->u32_max_value < val)
11817 			return 1;
11818 		else if (reg->u32_min_value >= val)
11819 			return 0;
11820 		break;
11821 	case BPF_JSLT:
11822 		if (reg->s32_max_value < sval)
11823 			return 1;
11824 		else if (reg->s32_min_value >= sval)
11825 			return 0;
11826 		break;
11827 	case BPF_JGE:
11828 		if (reg->u32_min_value >= val)
11829 			return 1;
11830 		else if (reg->u32_max_value < val)
11831 			return 0;
11832 		break;
11833 	case BPF_JSGE:
11834 		if (reg->s32_min_value >= sval)
11835 			return 1;
11836 		else if (reg->s32_max_value < sval)
11837 			return 0;
11838 		break;
11839 	case BPF_JLE:
11840 		if (reg->u32_max_value <= val)
11841 			return 1;
11842 		else if (reg->u32_min_value > val)
11843 			return 0;
11844 		break;
11845 	case BPF_JSLE:
11846 		if (reg->s32_max_value <= sval)
11847 			return 1;
11848 		else if (reg->s32_min_value > sval)
11849 			return 0;
11850 		break;
11851 	}
11852 
11853 	return -1;
11854 }
11855 
11856 
11857 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
11858 {
11859 	s64 sval = (s64)val;
11860 
11861 	switch (opcode) {
11862 	case BPF_JEQ:
11863 		if (tnum_is_const(reg->var_off))
11864 			return !!tnum_equals_const(reg->var_off, val);
11865 		break;
11866 	case BPF_JNE:
11867 		if (tnum_is_const(reg->var_off))
11868 			return !tnum_equals_const(reg->var_off, val);
11869 		break;
11870 	case BPF_JSET:
11871 		if ((~reg->var_off.mask & reg->var_off.value) & val)
11872 			return 1;
11873 		if (!((reg->var_off.mask | reg->var_off.value) & val))
11874 			return 0;
11875 		break;
11876 	case BPF_JGT:
11877 		if (reg->umin_value > val)
11878 			return 1;
11879 		else if (reg->umax_value <= val)
11880 			return 0;
11881 		break;
11882 	case BPF_JSGT:
11883 		if (reg->smin_value > sval)
11884 			return 1;
11885 		else if (reg->smax_value <= sval)
11886 			return 0;
11887 		break;
11888 	case BPF_JLT:
11889 		if (reg->umax_value < val)
11890 			return 1;
11891 		else if (reg->umin_value >= val)
11892 			return 0;
11893 		break;
11894 	case BPF_JSLT:
11895 		if (reg->smax_value < sval)
11896 			return 1;
11897 		else if (reg->smin_value >= sval)
11898 			return 0;
11899 		break;
11900 	case BPF_JGE:
11901 		if (reg->umin_value >= val)
11902 			return 1;
11903 		else if (reg->umax_value < val)
11904 			return 0;
11905 		break;
11906 	case BPF_JSGE:
11907 		if (reg->smin_value >= sval)
11908 			return 1;
11909 		else if (reg->smax_value < sval)
11910 			return 0;
11911 		break;
11912 	case BPF_JLE:
11913 		if (reg->umax_value <= val)
11914 			return 1;
11915 		else if (reg->umin_value > val)
11916 			return 0;
11917 		break;
11918 	case BPF_JSLE:
11919 		if (reg->smax_value <= sval)
11920 			return 1;
11921 		else if (reg->smin_value > sval)
11922 			return 0;
11923 		break;
11924 	}
11925 
11926 	return -1;
11927 }
11928 
11929 /* compute branch direction of the expression "if (reg opcode val) goto target;"
11930  * and return:
11931  *  1 - branch will be taken and "goto target" will be executed
11932  *  0 - branch will not be taken and fall-through to next insn
11933  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
11934  *      range [0,10]
11935  */
11936 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
11937 			   bool is_jmp32)
11938 {
11939 	if (__is_pointer_value(false, reg)) {
11940 		if (!reg_type_not_null(reg->type))
11941 			return -1;
11942 
11943 		/* If pointer is valid tests against zero will fail so we can
11944 		 * use this to direct branch taken.
11945 		 */
11946 		if (val != 0)
11947 			return -1;
11948 
11949 		switch (opcode) {
11950 		case BPF_JEQ:
11951 			return 0;
11952 		case BPF_JNE:
11953 			return 1;
11954 		default:
11955 			return -1;
11956 		}
11957 	}
11958 
11959 	if (is_jmp32)
11960 		return is_branch32_taken(reg, val, opcode);
11961 	return is_branch64_taken(reg, val, opcode);
11962 }
11963 
11964 static int flip_opcode(u32 opcode)
11965 {
11966 	/* How can we transform "a <op> b" into "b <op> a"? */
11967 	static const u8 opcode_flip[16] = {
11968 		/* these stay the same */
11969 		[BPF_JEQ  >> 4] = BPF_JEQ,
11970 		[BPF_JNE  >> 4] = BPF_JNE,
11971 		[BPF_JSET >> 4] = BPF_JSET,
11972 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
11973 		[BPF_JGE  >> 4] = BPF_JLE,
11974 		[BPF_JGT  >> 4] = BPF_JLT,
11975 		[BPF_JLE  >> 4] = BPF_JGE,
11976 		[BPF_JLT  >> 4] = BPF_JGT,
11977 		[BPF_JSGE >> 4] = BPF_JSLE,
11978 		[BPF_JSGT >> 4] = BPF_JSLT,
11979 		[BPF_JSLE >> 4] = BPF_JSGE,
11980 		[BPF_JSLT >> 4] = BPF_JSGT
11981 	};
11982 	return opcode_flip[opcode >> 4];
11983 }
11984 
11985 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
11986 				   struct bpf_reg_state *src_reg,
11987 				   u8 opcode)
11988 {
11989 	struct bpf_reg_state *pkt;
11990 
11991 	if (src_reg->type == PTR_TO_PACKET_END) {
11992 		pkt = dst_reg;
11993 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
11994 		pkt = src_reg;
11995 		opcode = flip_opcode(opcode);
11996 	} else {
11997 		return -1;
11998 	}
11999 
12000 	if (pkt->range >= 0)
12001 		return -1;
12002 
12003 	switch (opcode) {
12004 	case BPF_JLE:
12005 		/* pkt <= pkt_end */
12006 		fallthrough;
12007 	case BPF_JGT:
12008 		/* pkt > pkt_end */
12009 		if (pkt->range == BEYOND_PKT_END)
12010 			/* pkt has at last one extra byte beyond pkt_end */
12011 			return opcode == BPF_JGT;
12012 		break;
12013 	case BPF_JLT:
12014 		/* pkt < pkt_end */
12015 		fallthrough;
12016 	case BPF_JGE:
12017 		/* pkt >= pkt_end */
12018 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
12019 			return opcode == BPF_JGE;
12020 		break;
12021 	}
12022 	return -1;
12023 }
12024 
12025 /* Adjusts the register min/max values in the case that the dst_reg is the
12026  * variable register that we are working on, and src_reg is a constant or we're
12027  * simply doing a BPF_K check.
12028  * In JEQ/JNE cases we also adjust the var_off values.
12029  */
12030 static void reg_set_min_max(struct bpf_reg_state *true_reg,
12031 			    struct bpf_reg_state *false_reg,
12032 			    u64 val, u32 val32,
12033 			    u8 opcode, bool is_jmp32)
12034 {
12035 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
12036 	struct tnum false_64off = false_reg->var_off;
12037 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
12038 	struct tnum true_64off = true_reg->var_off;
12039 	s64 sval = (s64)val;
12040 	s32 sval32 = (s32)val32;
12041 
12042 	/* If the dst_reg is a pointer, we can't learn anything about its
12043 	 * variable offset from the compare (unless src_reg were a pointer into
12044 	 * the same object, but we don't bother with that.
12045 	 * Since false_reg and true_reg have the same type by construction, we
12046 	 * only need to check one of them for pointerness.
12047 	 */
12048 	if (__is_pointer_value(false, false_reg))
12049 		return;
12050 
12051 	switch (opcode) {
12052 	/* JEQ/JNE comparison doesn't change the register equivalence.
12053 	 *
12054 	 * r1 = r2;
12055 	 * if (r1 == 42) goto label;
12056 	 * ...
12057 	 * label: // here both r1 and r2 are known to be 42.
12058 	 *
12059 	 * Hence when marking register as known preserve it's ID.
12060 	 */
12061 	case BPF_JEQ:
12062 		if (is_jmp32) {
12063 			__mark_reg32_known(true_reg, val32);
12064 			true_32off = tnum_subreg(true_reg->var_off);
12065 		} else {
12066 			___mark_reg_known(true_reg, val);
12067 			true_64off = true_reg->var_off;
12068 		}
12069 		break;
12070 	case BPF_JNE:
12071 		if (is_jmp32) {
12072 			__mark_reg32_known(false_reg, val32);
12073 			false_32off = tnum_subreg(false_reg->var_off);
12074 		} else {
12075 			___mark_reg_known(false_reg, val);
12076 			false_64off = false_reg->var_off;
12077 		}
12078 		break;
12079 	case BPF_JSET:
12080 		if (is_jmp32) {
12081 			false_32off = tnum_and(false_32off, tnum_const(~val32));
12082 			if (is_power_of_2(val32))
12083 				true_32off = tnum_or(true_32off,
12084 						     tnum_const(val32));
12085 		} else {
12086 			false_64off = tnum_and(false_64off, tnum_const(~val));
12087 			if (is_power_of_2(val))
12088 				true_64off = tnum_or(true_64off,
12089 						     tnum_const(val));
12090 		}
12091 		break;
12092 	case BPF_JGE:
12093 	case BPF_JGT:
12094 	{
12095 		if (is_jmp32) {
12096 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
12097 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
12098 
12099 			false_reg->u32_max_value = min(false_reg->u32_max_value,
12100 						       false_umax);
12101 			true_reg->u32_min_value = max(true_reg->u32_min_value,
12102 						      true_umin);
12103 		} else {
12104 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
12105 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
12106 
12107 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
12108 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
12109 		}
12110 		break;
12111 	}
12112 	case BPF_JSGE:
12113 	case BPF_JSGT:
12114 	{
12115 		if (is_jmp32) {
12116 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
12117 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
12118 
12119 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
12120 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
12121 		} else {
12122 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
12123 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
12124 
12125 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
12126 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
12127 		}
12128 		break;
12129 	}
12130 	case BPF_JLE:
12131 	case BPF_JLT:
12132 	{
12133 		if (is_jmp32) {
12134 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
12135 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
12136 
12137 			false_reg->u32_min_value = max(false_reg->u32_min_value,
12138 						       false_umin);
12139 			true_reg->u32_max_value = min(true_reg->u32_max_value,
12140 						      true_umax);
12141 		} else {
12142 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
12143 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
12144 
12145 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
12146 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
12147 		}
12148 		break;
12149 	}
12150 	case BPF_JSLE:
12151 	case BPF_JSLT:
12152 	{
12153 		if (is_jmp32) {
12154 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
12155 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
12156 
12157 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
12158 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
12159 		} else {
12160 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
12161 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
12162 
12163 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
12164 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
12165 		}
12166 		break;
12167 	}
12168 	default:
12169 		return;
12170 	}
12171 
12172 	if (is_jmp32) {
12173 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
12174 					     tnum_subreg(false_32off));
12175 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
12176 					    tnum_subreg(true_32off));
12177 		__reg_combine_32_into_64(false_reg);
12178 		__reg_combine_32_into_64(true_reg);
12179 	} else {
12180 		false_reg->var_off = false_64off;
12181 		true_reg->var_off = true_64off;
12182 		__reg_combine_64_into_32(false_reg);
12183 		__reg_combine_64_into_32(true_reg);
12184 	}
12185 }
12186 
12187 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
12188  * the variable reg.
12189  */
12190 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
12191 				struct bpf_reg_state *false_reg,
12192 				u64 val, u32 val32,
12193 				u8 opcode, bool is_jmp32)
12194 {
12195 	opcode = flip_opcode(opcode);
12196 	/* This uses zero as "not present in table"; luckily the zero opcode,
12197 	 * BPF_JA, can't get here.
12198 	 */
12199 	if (opcode)
12200 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
12201 }
12202 
12203 /* Regs are known to be equal, so intersect their min/max/var_off */
12204 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
12205 				  struct bpf_reg_state *dst_reg)
12206 {
12207 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
12208 							dst_reg->umin_value);
12209 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
12210 							dst_reg->umax_value);
12211 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
12212 							dst_reg->smin_value);
12213 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
12214 							dst_reg->smax_value);
12215 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
12216 							     dst_reg->var_off);
12217 	reg_bounds_sync(src_reg);
12218 	reg_bounds_sync(dst_reg);
12219 }
12220 
12221 static void reg_combine_min_max(struct bpf_reg_state *true_src,
12222 				struct bpf_reg_state *true_dst,
12223 				struct bpf_reg_state *false_src,
12224 				struct bpf_reg_state *false_dst,
12225 				u8 opcode)
12226 {
12227 	switch (opcode) {
12228 	case BPF_JEQ:
12229 		__reg_combine_min_max(true_src, true_dst);
12230 		break;
12231 	case BPF_JNE:
12232 		__reg_combine_min_max(false_src, false_dst);
12233 		break;
12234 	}
12235 }
12236 
12237 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
12238 				 struct bpf_reg_state *reg, u32 id,
12239 				 bool is_null)
12240 {
12241 	if (type_may_be_null(reg->type) && reg->id == id &&
12242 	    (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
12243 		/* Old offset (both fixed and variable parts) should have been
12244 		 * known-zero, because we don't allow pointer arithmetic on
12245 		 * pointers that might be NULL. If we see this happening, don't
12246 		 * convert the register.
12247 		 *
12248 		 * But in some cases, some helpers that return local kptrs
12249 		 * advance offset for the returned pointer. In those cases, it
12250 		 * is fine to expect to see reg->off.
12251 		 */
12252 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
12253 			return;
12254 		if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
12255 		    WARN_ON_ONCE(reg->off))
12256 			return;
12257 
12258 		if (is_null) {
12259 			reg->type = SCALAR_VALUE;
12260 			/* We don't need id and ref_obj_id from this point
12261 			 * onwards anymore, thus we should better reset it,
12262 			 * so that state pruning has chances to take effect.
12263 			 */
12264 			reg->id = 0;
12265 			reg->ref_obj_id = 0;
12266 
12267 			return;
12268 		}
12269 
12270 		mark_ptr_not_null_reg(reg);
12271 
12272 		if (!reg_may_point_to_spin_lock(reg)) {
12273 			/* For not-NULL ptr, reg->ref_obj_id will be reset
12274 			 * in release_reference().
12275 			 *
12276 			 * reg->id is still used by spin_lock ptr. Other
12277 			 * than spin_lock ptr type, reg->id can be reset.
12278 			 */
12279 			reg->id = 0;
12280 		}
12281 	}
12282 }
12283 
12284 /* The logic is similar to find_good_pkt_pointers(), both could eventually
12285  * be folded together at some point.
12286  */
12287 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
12288 				  bool is_null)
12289 {
12290 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
12291 	struct bpf_reg_state *regs = state->regs, *reg;
12292 	u32 ref_obj_id = regs[regno].ref_obj_id;
12293 	u32 id = regs[regno].id;
12294 
12295 	if (ref_obj_id && ref_obj_id == id && is_null)
12296 		/* regs[regno] is in the " == NULL" branch.
12297 		 * No one could have freed the reference state before
12298 		 * doing the NULL check.
12299 		 */
12300 		WARN_ON_ONCE(release_reference_state(state, id));
12301 
12302 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
12303 		mark_ptr_or_null_reg(state, reg, id, is_null);
12304 	}));
12305 }
12306 
12307 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
12308 				   struct bpf_reg_state *dst_reg,
12309 				   struct bpf_reg_state *src_reg,
12310 				   struct bpf_verifier_state *this_branch,
12311 				   struct bpf_verifier_state *other_branch)
12312 {
12313 	if (BPF_SRC(insn->code) != BPF_X)
12314 		return false;
12315 
12316 	/* Pointers are always 64-bit. */
12317 	if (BPF_CLASS(insn->code) == BPF_JMP32)
12318 		return false;
12319 
12320 	switch (BPF_OP(insn->code)) {
12321 	case BPF_JGT:
12322 		if ((dst_reg->type == PTR_TO_PACKET &&
12323 		     src_reg->type == PTR_TO_PACKET_END) ||
12324 		    (dst_reg->type == PTR_TO_PACKET_META &&
12325 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
12326 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
12327 			find_good_pkt_pointers(this_branch, dst_reg,
12328 					       dst_reg->type, false);
12329 			mark_pkt_end(other_branch, insn->dst_reg, true);
12330 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
12331 			    src_reg->type == PTR_TO_PACKET) ||
12332 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
12333 			    src_reg->type == PTR_TO_PACKET_META)) {
12334 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
12335 			find_good_pkt_pointers(other_branch, src_reg,
12336 					       src_reg->type, true);
12337 			mark_pkt_end(this_branch, insn->src_reg, false);
12338 		} else {
12339 			return false;
12340 		}
12341 		break;
12342 	case BPF_JLT:
12343 		if ((dst_reg->type == PTR_TO_PACKET &&
12344 		     src_reg->type == PTR_TO_PACKET_END) ||
12345 		    (dst_reg->type == PTR_TO_PACKET_META &&
12346 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
12347 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
12348 			find_good_pkt_pointers(other_branch, dst_reg,
12349 					       dst_reg->type, true);
12350 			mark_pkt_end(this_branch, insn->dst_reg, false);
12351 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
12352 			    src_reg->type == PTR_TO_PACKET) ||
12353 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
12354 			    src_reg->type == PTR_TO_PACKET_META)) {
12355 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
12356 			find_good_pkt_pointers(this_branch, src_reg,
12357 					       src_reg->type, false);
12358 			mark_pkt_end(other_branch, insn->src_reg, true);
12359 		} else {
12360 			return false;
12361 		}
12362 		break;
12363 	case BPF_JGE:
12364 		if ((dst_reg->type == PTR_TO_PACKET &&
12365 		     src_reg->type == PTR_TO_PACKET_END) ||
12366 		    (dst_reg->type == PTR_TO_PACKET_META &&
12367 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
12368 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
12369 			find_good_pkt_pointers(this_branch, dst_reg,
12370 					       dst_reg->type, true);
12371 			mark_pkt_end(other_branch, insn->dst_reg, false);
12372 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
12373 			    src_reg->type == PTR_TO_PACKET) ||
12374 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
12375 			    src_reg->type == PTR_TO_PACKET_META)) {
12376 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
12377 			find_good_pkt_pointers(other_branch, src_reg,
12378 					       src_reg->type, false);
12379 			mark_pkt_end(this_branch, insn->src_reg, true);
12380 		} else {
12381 			return false;
12382 		}
12383 		break;
12384 	case BPF_JLE:
12385 		if ((dst_reg->type == PTR_TO_PACKET &&
12386 		     src_reg->type == PTR_TO_PACKET_END) ||
12387 		    (dst_reg->type == PTR_TO_PACKET_META &&
12388 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
12389 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
12390 			find_good_pkt_pointers(other_branch, dst_reg,
12391 					       dst_reg->type, false);
12392 			mark_pkt_end(this_branch, insn->dst_reg, true);
12393 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
12394 			    src_reg->type == PTR_TO_PACKET) ||
12395 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
12396 			    src_reg->type == PTR_TO_PACKET_META)) {
12397 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
12398 			find_good_pkt_pointers(this_branch, src_reg,
12399 					       src_reg->type, true);
12400 			mark_pkt_end(other_branch, insn->src_reg, false);
12401 		} else {
12402 			return false;
12403 		}
12404 		break;
12405 	default:
12406 		return false;
12407 	}
12408 
12409 	return true;
12410 }
12411 
12412 static void find_equal_scalars(struct bpf_verifier_state *vstate,
12413 			       struct bpf_reg_state *known_reg)
12414 {
12415 	struct bpf_func_state *state;
12416 	struct bpf_reg_state *reg;
12417 
12418 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
12419 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
12420 			copy_register_state(reg, known_reg);
12421 	}));
12422 }
12423 
12424 static int check_cond_jmp_op(struct bpf_verifier_env *env,
12425 			     struct bpf_insn *insn, int *insn_idx)
12426 {
12427 	struct bpf_verifier_state *this_branch = env->cur_state;
12428 	struct bpf_verifier_state *other_branch;
12429 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
12430 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
12431 	struct bpf_reg_state *eq_branch_regs;
12432 	u8 opcode = BPF_OP(insn->code);
12433 	bool is_jmp32;
12434 	int pred = -1;
12435 	int err;
12436 
12437 	/* Only conditional jumps are expected to reach here. */
12438 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
12439 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
12440 		return -EINVAL;
12441 	}
12442 
12443 	if (BPF_SRC(insn->code) == BPF_X) {
12444 		if (insn->imm != 0) {
12445 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
12446 			return -EINVAL;
12447 		}
12448 
12449 		/* check src1 operand */
12450 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
12451 		if (err)
12452 			return err;
12453 
12454 		if (is_pointer_value(env, insn->src_reg)) {
12455 			verbose(env, "R%d pointer comparison prohibited\n",
12456 				insn->src_reg);
12457 			return -EACCES;
12458 		}
12459 		src_reg = &regs[insn->src_reg];
12460 	} else {
12461 		if (insn->src_reg != BPF_REG_0) {
12462 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
12463 			return -EINVAL;
12464 		}
12465 	}
12466 
12467 	/* check src2 operand */
12468 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12469 	if (err)
12470 		return err;
12471 
12472 	dst_reg = &regs[insn->dst_reg];
12473 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
12474 
12475 	if (BPF_SRC(insn->code) == BPF_K) {
12476 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
12477 	} else if (src_reg->type == SCALAR_VALUE &&
12478 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
12479 		pred = is_branch_taken(dst_reg,
12480 				       tnum_subreg(src_reg->var_off).value,
12481 				       opcode,
12482 				       is_jmp32);
12483 	} else if (src_reg->type == SCALAR_VALUE &&
12484 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
12485 		pred = is_branch_taken(dst_reg,
12486 				       src_reg->var_off.value,
12487 				       opcode,
12488 				       is_jmp32);
12489 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
12490 		   reg_is_pkt_pointer_any(src_reg) &&
12491 		   !is_jmp32) {
12492 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
12493 	}
12494 
12495 	if (pred >= 0) {
12496 		/* If we get here with a dst_reg pointer type it is because
12497 		 * above is_branch_taken() special cased the 0 comparison.
12498 		 */
12499 		if (!__is_pointer_value(false, dst_reg))
12500 			err = mark_chain_precision(env, insn->dst_reg);
12501 		if (BPF_SRC(insn->code) == BPF_X && !err &&
12502 		    !__is_pointer_value(false, src_reg))
12503 			err = mark_chain_precision(env, insn->src_reg);
12504 		if (err)
12505 			return err;
12506 	}
12507 
12508 	if (pred == 1) {
12509 		/* Only follow the goto, ignore fall-through. If needed, push
12510 		 * the fall-through branch for simulation under speculative
12511 		 * execution.
12512 		 */
12513 		if (!env->bypass_spec_v1 &&
12514 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
12515 					       *insn_idx))
12516 			return -EFAULT;
12517 		*insn_idx += insn->off;
12518 		return 0;
12519 	} else if (pred == 0) {
12520 		/* Only follow the fall-through branch, since that's where the
12521 		 * program will go. If needed, push the goto branch for
12522 		 * simulation under speculative execution.
12523 		 */
12524 		if (!env->bypass_spec_v1 &&
12525 		    !sanitize_speculative_path(env, insn,
12526 					       *insn_idx + insn->off + 1,
12527 					       *insn_idx))
12528 			return -EFAULT;
12529 		return 0;
12530 	}
12531 
12532 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
12533 				  false);
12534 	if (!other_branch)
12535 		return -EFAULT;
12536 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
12537 
12538 	/* detect if we are comparing against a constant value so we can adjust
12539 	 * our min/max values for our dst register.
12540 	 * this is only legit if both are scalars (or pointers to the same
12541 	 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
12542 	 * because otherwise the different base pointers mean the offsets aren't
12543 	 * comparable.
12544 	 */
12545 	if (BPF_SRC(insn->code) == BPF_X) {
12546 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
12547 
12548 		if (dst_reg->type == SCALAR_VALUE &&
12549 		    src_reg->type == SCALAR_VALUE) {
12550 			if (tnum_is_const(src_reg->var_off) ||
12551 			    (is_jmp32 &&
12552 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
12553 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
12554 						dst_reg,
12555 						src_reg->var_off.value,
12556 						tnum_subreg(src_reg->var_off).value,
12557 						opcode, is_jmp32);
12558 			else if (tnum_is_const(dst_reg->var_off) ||
12559 				 (is_jmp32 &&
12560 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
12561 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
12562 						    src_reg,
12563 						    dst_reg->var_off.value,
12564 						    tnum_subreg(dst_reg->var_off).value,
12565 						    opcode, is_jmp32);
12566 			else if (!is_jmp32 &&
12567 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
12568 				/* Comparing for equality, we can combine knowledge */
12569 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
12570 						    &other_branch_regs[insn->dst_reg],
12571 						    src_reg, dst_reg, opcode);
12572 			if (src_reg->id &&
12573 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
12574 				find_equal_scalars(this_branch, src_reg);
12575 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
12576 			}
12577 
12578 		}
12579 	} else if (dst_reg->type == SCALAR_VALUE) {
12580 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
12581 					dst_reg, insn->imm, (u32)insn->imm,
12582 					opcode, is_jmp32);
12583 	}
12584 
12585 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
12586 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
12587 		find_equal_scalars(this_branch, dst_reg);
12588 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
12589 	}
12590 
12591 	/* if one pointer register is compared to another pointer
12592 	 * register check if PTR_MAYBE_NULL could be lifted.
12593 	 * E.g. register A - maybe null
12594 	 *      register B - not null
12595 	 * for JNE A, B, ... - A is not null in the false branch;
12596 	 * for JEQ A, B, ... - A is not null in the true branch.
12597 	 *
12598 	 * Since PTR_TO_BTF_ID points to a kernel struct that does
12599 	 * not need to be null checked by the BPF program, i.e.,
12600 	 * could be null even without PTR_MAYBE_NULL marking, so
12601 	 * only propagate nullness when neither reg is that type.
12602 	 */
12603 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
12604 	    __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
12605 	    type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
12606 	    base_type(src_reg->type) != PTR_TO_BTF_ID &&
12607 	    base_type(dst_reg->type) != PTR_TO_BTF_ID) {
12608 		eq_branch_regs = NULL;
12609 		switch (opcode) {
12610 		case BPF_JEQ:
12611 			eq_branch_regs = other_branch_regs;
12612 			break;
12613 		case BPF_JNE:
12614 			eq_branch_regs = regs;
12615 			break;
12616 		default:
12617 			/* do nothing */
12618 			break;
12619 		}
12620 		if (eq_branch_regs) {
12621 			if (type_may_be_null(src_reg->type))
12622 				mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
12623 			else
12624 				mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
12625 		}
12626 	}
12627 
12628 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
12629 	 * NOTE: these optimizations below are related with pointer comparison
12630 	 *       which will never be JMP32.
12631 	 */
12632 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
12633 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
12634 	    type_may_be_null(dst_reg->type)) {
12635 		/* Mark all identical registers in each branch as either
12636 		 * safe or unknown depending R == 0 or R != 0 conditional.
12637 		 */
12638 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
12639 				      opcode == BPF_JNE);
12640 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
12641 				      opcode == BPF_JEQ);
12642 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
12643 					   this_branch, other_branch) &&
12644 		   is_pointer_value(env, insn->dst_reg)) {
12645 		verbose(env, "R%d pointer comparison prohibited\n",
12646 			insn->dst_reg);
12647 		return -EACCES;
12648 	}
12649 	if (env->log.level & BPF_LOG_LEVEL)
12650 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
12651 	return 0;
12652 }
12653 
12654 /* verify BPF_LD_IMM64 instruction */
12655 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
12656 {
12657 	struct bpf_insn_aux_data *aux = cur_aux(env);
12658 	struct bpf_reg_state *regs = cur_regs(env);
12659 	struct bpf_reg_state *dst_reg;
12660 	struct bpf_map *map;
12661 	int err;
12662 
12663 	if (BPF_SIZE(insn->code) != BPF_DW) {
12664 		verbose(env, "invalid BPF_LD_IMM insn\n");
12665 		return -EINVAL;
12666 	}
12667 	if (insn->off != 0) {
12668 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
12669 		return -EINVAL;
12670 	}
12671 
12672 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
12673 	if (err)
12674 		return err;
12675 
12676 	dst_reg = &regs[insn->dst_reg];
12677 	if (insn->src_reg == 0) {
12678 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
12679 
12680 		dst_reg->type = SCALAR_VALUE;
12681 		__mark_reg_known(&regs[insn->dst_reg], imm);
12682 		return 0;
12683 	}
12684 
12685 	/* All special src_reg cases are listed below. From this point onwards
12686 	 * we either succeed and assign a corresponding dst_reg->type after
12687 	 * zeroing the offset, or fail and reject the program.
12688 	 */
12689 	mark_reg_known_zero(env, regs, insn->dst_reg);
12690 
12691 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
12692 		dst_reg->type = aux->btf_var.reg_type;
12693 		switch (base_type(dst_reg->type)) {
12694 		case PTR_TO_MEM:
12695 			dst_reg->mem_size = aux->btf_var.mem_size;
12696 			break;
12697 		case PTR_TO_BTF_ID:
12698 			dst_reg->btf = aux->btf_var.btf;
12699 			dst_reg->btf_id = aux->btf_var.btf_id;
12700 			break;
12701 		default:
12702 			verbose(env, "bpf verifier is misconfigured\n");
12703 			return -EFAULT;
12704 		}
12705 		return 0;
12706 	}
12707 
12708 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
12709 		struct bpf_prog_aux *aux = env->prog->aux;
12710 		u32 subprogno = find_subprog(env,
12711 					     env->insn_idx + insn->imm + 1);
12712 
12713 		if (!aux->func_info) {
12714 			verbose(env, "missing btf func_info\n");
12715 			return -EINVAL;
12716 		}
12717 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
12718 			verbose(env, "callback function not static\n");
12719 			return -EINVAL;
12720 		}
12721 
12722 		dst_reg->type = PTR_TO_FUNC;
12723 		dst_reg->subprogno = subprogno;
12724 		return 0;
12725 	}
12726 
12727 	map = env->used_maps[aux->map_index];
12728 	dst_reg->map_ptr = map;
12729 
12730 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
12731 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
12732 		dst_reg->type = PTR_TO_MAP_VALUE;
12733 		dst_reg->off = aux->map_off;
12734 		WARN_ON_ONCE(map->max_entries != 1);
12735 		/* We want reg->id to be same (0) as map_value is not distinct */
12736 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
12737 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
12738 		dst_reg->type = CONST_PTR_TO_MAP;
12739 	} else {
12740 		verbose(env, "bpf verifier is misconfigured\n");
12741 		return -EINVAL;
12742 	}
12743 
12744 	return 0;
12745 }
12746 
12747 static bool may_access_skb(enum bpf_prog_type type)
12748 {
12749 	switch (type) {
12750 	case BPF_PROG_TYPE_SOCKET_FILTER:
12751 	case BPF_PROG_TYPE_SCHED_CLS:
12752 	case BPF_PROG_TYPE_SCHED_ACT:
12753 		return true;
12754 	default:
12755 		return false;
12756 	}
12757 }
12758 
12759 /* verify safety of LD_ABS|LD_IND instructions:
12760  * - they can only appear in the programs where ctx == skb
12761  * - since they are wrappers of function calls, they scratch R1-R5 registers,
12762  *   preserve R6-R9, and store return value into R0
12763  *
12764  * Implicit input:
12765  *   ctx == skb == R6 == CTX
12766  *
12767  * Explicit input:
12768  *   SRC == any register
12769  *   IMM == 32-bit immediate
12770  *
12771  * Output:
12772  *   R0 - 8/16/32-bit skb data converted to cpu endianness
12773  */
12774 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
12775 {
12776 	struct bpf_reg_state *regs = cur_regs(env);
12777 	static const int ctx_reg = BPF_REG_6;
12778 	u8 mode = BPF_MODE(insn->code);
12779 	int i, err;
12780 
12781 	if (!may_access_skb(resolve_prog_type(env->prog))) {
12782 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
12783 		return -EINVAL;
12784 	}
12785 
12786 	if (!env->ops->gen_ld_abs) {
12787 		verbose(env, "bpf verifier is misconfigured\n");
12788 		return -EINVAL;
12789 	}
12790 
12791 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
12792 	    BPF_SIZE(insn->code) == BPF_DW ||
12793 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
12794 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
12795 		return -EINVAL;
12796 	}
12797 
12798 	/* check whether implicit source operand (register R6) is readable */
12799 	err = check_reg_arg(env, ctx_reg, SRC_OP);
12800 	if (err)
12801 		return err;
12802 
12803 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
12804 	 * gen_ld_abs() may terminate the program at runtime, leading to
12805 	 * reference leak.
12806 	 */
12807 	err = check_reference_leak(env);
12808 	if (err) {
12809 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
12810 		return err;
12811 	}
12812 
12813 	if (env->cur_state->active_lock.ptr) {
12814 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
12815 		return -EINVAL;
12816 	}
12817 
12818 	if (env->cur_state->active_rcu_lock) {
12819 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
12820 		return -EINVAL;
12821 	}
12822 
12823 	if (regs[ctx_reg].type != PTR_TO_CTX) {
12824 		verbose(env,
12825 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
12826 		return -EINVAL;
12827 	}
12828 
12829 	if (mode == BPF_IND) {
12830 		/* check explicit source operand */
12831 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
12832 		if (err)
12833 			return err;
12834 	}
12835 
12836 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
12837 	if (err < 0)
12838 		return err;
12839 
12840 	/* reset caller saved regs to unreadable */
12841 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
12842 		mark_reg_not_init(env, regs, caller_saved[i]);
12843 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
12844 	}
12845 
12846 	/* mark destination R0 register as readable, since it contains
12847 	 * the value fetched from the packet.
12848 	 * Already marked as written above.
12849 	 */
12850 	mark_reg_unknown(env, regs, BPF_REG_0);
12851 	/* ld_abs load up to 32-bit skb data. */
12852 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
12853 	return 0;
12854 }
12855 
12856 static int check_return_code(struct bpf_verifier_env *env)
12857 {
12858 	struct tnum enforce_attach_type_range = tnum_unknown;
12859 	const struct bpf_prog *prog = env->prog;
12860 	struct bpf_reg_state *reg;
12861 	struct tnum range = tnum_range(0, 1);
12862 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
12863 	int err;
12864 	struct bpf_func_state *frame = env->cur_state->frame[0];
12865 	const bool is_subprog = frame->subprogno;
12866 
12867 	/* LSM and struct_ops func-ptr's return type could be "void" */
12868 	if (!is_subprog) {
12869 		switch (prog_type) {
12870 		case BPF_PROG_TYPE_LSM:
12871 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
12872 				/* See below, can be 0 or 0-1 depending on hook. */
12873 				break;
12874 			fallthrough;
12875 		case BPF_PROG_TYPE_STRUCT_OPS:
12876 			if (!prog->aux->attach_func_proto->type)
12877 				return 0;
12878 			break;
12879 		default:
12880 			break;
12881 		}
12882 	}
12883 
12884 	/* eBPF calling convention is such that R0 is used
12885 	 * to return the value from eBPF program.
12886 	 * Make sure that it's readable at this time
12887 	 * of bpf_exit, which means that program wrote
12888 	 * something into it earlier
12889 	 */
12890 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
12891 	if (err)
12892 		return err;
12893 
12894 	if (is_pointer_value(env, BPF_REG_0)) {
12895 		verbose(env, "R0 leaks addr as return value\n");
12896 		return -EACCES;
12897 	}
12898 
12899 	reg = cur_regs(env) + BPF_REG_0;
12900 
12901 	if (frame->in_async_callback_fn) {
12902 		/* enforce return zero from async callbacks like timer */
12903 		if (reg->type != SCALAR_VALUE) {
12904 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
12905 				reg_type_str(env, reg->type));
12906 			return -EINVAL;
12907 		}
12908 
12909 		if (!tnum_in(tnum_const(0), reg->var_off)) {
12910 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
12911 			return -EINVAL;
12912 		}
12913 		return 0;
12914 	}
12915 
12916 	if (is_subprog) {
12917 		if (reg->type != SCALAR_VALUE) {
12918 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
12919 				reg_type_str(env, reg->type));
12920 			return -EINVAL;
12921 		}
12922 		return 0;
12923 	}
12924 
12925 	switch (prog_type) {
12926 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
12927 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
12928 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
12929 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
12930 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
12931 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
12932 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
12933 			range = tnum_range(1, 1);
12934 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
12935 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
12936 			range = tnum_range(0, 3);
12937 		break;
12938 	case BPF_PROG_TYPE_CGROUP_SKB:
12939 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
12940 			range = tnum_range(0, 3);
12941 			enforce_attach_type_range = tnum_range(2, 3);
12942 		}
12943 		break;
12944 	case BPF_PROG_TYPE_CGROUP_SOCK:
12945 	case BPF_PROG_TYPE_SOCK_OPS:
12946 	case BPF_PROG_TYPE_CGROUP_DEVICE:
12947 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
12948 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
12949 		break;
12950 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
12951 		if (!env->prog->aux->attach_btf_id)
12952 			return 0;
12953 		range = tnum_const(0);
12954 		break;
12955 	case BPF_PROG_TYPE_TRACING:
12956 		switch (env->prog->expected_attach_type) {
12957 		case BPF_TRACE_FENTRY:
12958 		case BPF_TRACE_FEXIT:
12959 			range = tnum_const(0);
12960 			break;
12961 		case BPF_TRACE_RAW_TP:
12962 		case BPF_MODIFY_RETURN:
12963 			return 0;
12964 		case BPF_TRACE_ITER:
12965 			break;
12966 		default:
12967 			return -ENOTSUPP;
12968 		}
12969 		break;
12970 	case BPF_PROG_TYPE_SK_LOOKUP:
12971 		range = tnum_range(SK_DROP, SK_PASS);
12972 		break;
12973 
12974 	case BPF_PROG_TYPE_LSM:
12975 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
12976 			/* Regular BPF_PROG_TYPE_LSM programs can return
12977 			 * any value.
12978 			 */
12979 			return 0;
12980 		}
12981 		if (!env->prog->aux->attach_func_proto->type) {
12982 			/* Make sure programs that attach to void
12983 			 * hooks don't try to modify return value.
12984 			 */
12985 			range = tnum_range(1, 1);
12986 		}
12987 		break;
12988 
12989 	case BPF_PROG_TYPE_EXT:
12990 		/* freplace program can return anything as its return value
12991 		 * depends on the to-be-replaced kernel func or bpf program.
12992 		 */
12993 	default:
12994 		return 0;
12995 	}
12996 
12997 	if (reg->type != SCALAR_VALUE) {
12998 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
12999 			reg_type_str(env, reg->type));
13000 		return -EINVAL;
13001 	}
13002 
13003 	if (!tnum_in(range, reg->var_off)) {
13004 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
13005 		if (prog->expected_attach_type == BPF_LSM_CGROUP &&
13006 		    prog_type == BPF_PROG_TYPE_LSM &&
13007 		    !prog->aux->attach_func_proto->type)
13008 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
13009 		return -EINVAL;
13010 	}
13011 
13012 	if (!tnum_is_unknown(enforce_attach_type_range) &&
13013 	    tnum_in(enforce_attach_type_range, reg->var_off))
13014 		env->prog->enforce_expected_attach_type = 1;
13015 	return 0;
13016 }
13017 
13018 /* non-recursive DFS pseudo code
13019  * 1  procedure DFS-iterative(G,v):
13020  * 2      label v as discovered
13021  * 3      let S be a stack
13022  * 4      S.push(v)
13023  * 5      while S is not empty
13024  * 6            t <- S.peek()
13025  * 7            if t is what we're looking for:
13026  * 8                return t
13027  * 9            for all edges e in G.adjacentEdges(t) do
13028  * 10               if edge e is already labelled
13029  * 11                   continue with the next edge
13030  * 12               w <- G.adjacentVertex(t,e)
13031  * 13               if vertex w is not discovered and not explored
13032  * 14                   label e as tree-edge
13033  * 15                   label w as discovered
13034  * 16                   S.push(w)
13035  * 17                   continue at 5
13036  * 18               else if vertex w is discovered
13037  * 19                   label e as back-edge
13038  * 20               else
13039  * 21                   // vertex w is explored
13040  * 22                   label e as forward- or cross-edge
13041  * 23           label t as explored
13042  * 24           S.pop()
13043  *
13044  * convention:
13045  * 0x10 - discovered
13046  * 0x11 - discovered and fall-through edge labelled
13047  * 0x12 - discovered and fall-through and branch edges labelled
13048  * 0x20 - explored
13049  */
13050 
13051 enum {
13052 	DISCOVERED = 0x10,
13053 	EXPLORED = 0x20,
13054 	FALLTHROUGH = 1,
13055 	BRANCH = 2,
13056 };
13057 
13058 static u32 state_htab_size(struct bpf_verifier_env *env)
13059 {
13060 	return env->prog->len;
13061 }
13062 
13063 static struct bpf_verifier_state_list **explored_state(
13064 					struct bpf_verifier_env *env,
13065 					int idx)
13066 {
13067 	struct bpf_verifier_state *cur = env->cur_state;
13068 	struct bpf_func_state *state = cur->frame[cur->curframe];
13069 
13070 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
13071 }
13072 
13073 static void mark_prune_point(struct bpf_verifier_env *env, int idx)
13074 {
13075 	env->insn_aux_data[idx].prune_point = true;
13076 }
13077 
13078 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
13079 {
13080 	return env->insn_aux_data[insn_idx].prune_point;
13081 }
13082 
13083 enum {
13084 	DONE_EXPLORING = 0,
13085 	KEEP_EXPLORING = 1,
13086 };
13087 
13088 /* t, w, e - match pseudo-code above:
13089  * t - index of current instruction
13090  * w - next instruction
13091  * e - edge
13092  */
13093 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
13094 		     bool loop_ok)
13095 {
13096 	int *insn_stack = env->cfg.insn_stack;
13097 	int *insn_state = env->cfg.insn_state;
13098 
13099 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
13100 		return DONE_EXPLORING;
13101 
13102 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
13103 		return DONE_EXPLORING;
13104 
13105 	if (w < 0 || w >= env->prog->len) {
13106 		verbose_linfo(env, t, "%d: ", t);
13107 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
13108 		return -EINVAL;
13109 	}
13110 
13111 	if (e == BRANCH) {
13112 		/* mark branch target for state pruning */
13113 		mark_prune_point(env, w);
13114 		mark_jmp_point(env, w);
13115 	}
13116 
13117 	if (insn_state[w] == 0) {
13118 		/* tree-edge */
13119 		insn_state[t] = DISCOVERED | e;
13120 		insn_state[w] = DISCOVERED;
13121 		if (env->cfg.cur_stack >= env->prog->len)
13122 			return -E2BIG;
13123 		insn_stack[env->cfg.cur_stack++] = w;
13124 		return KEEP_EXPLORING;
13125 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
13126 		if (loop_ok && env->bpf_capable)
13127 			return DONE_EXPLORING;
13128 		verbose_linfo(env, t, "%d: ", t);
13129 		verbose_linfo(env, w, "%d: ", w);
13130 		verbose(env, "back-edge from insn %d to %d\n", t, w);
13131 		return -EINVAL;
13132 	} else if (insn_state[w] == EXPLORED) {
13133 		/* forward- or cross-edge */
13134 		insn_state[t] = DISCOVERED | e;
13135 	} else {
13136 		verbose(env, "insn state internal bug\n");
13137 		return -EFAULT;
13138 	}
13139 	return DONE_EXPLORING;
13140 }
13141 
13142 static int visit_func_call_insn(int t, struct bpf_insn *insns,
13143 				struct bpf_verifier_env *env,
13144 				bool visit_callee)
13145 {
13146 	int ret;
13147 
13148 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
13149 	if (ret)
13150 		return ret;
13151 
13152 	mark_prune_point(env, t + 1);
13153 	/* when we exit from subprog, we need to record non-linear history */
13154 	mark_jmp_point(env, t + 1);
13155 
13156 	if (visit_callee) {
13157 		mark_prune_point(env, t);
13158 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
13159 				/* It's ok to allow recursion from CFG point of
13160 				 * view. __check_func_call() will do the actual
13161 				 * check.
13162 				 */
13163 				bpf_pseudo_func(insns + t));
13164 	}
13165 	return ret;
13166 }
13167 
13168 /* Visits the instruction at index t and returns one of the following:
13169  *  < 0 - an error occurred
13170  *  DONE_EXPLORING - the instruction was fully explored
13171  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
13172  */
13173 static int visit_insn(int t, struct bpf_verifier_env *env)
13174 {
13175 	struct bpf_insn *insns = env->prog->insnsi;
13176 	int ret;
13177 
13178 	if (bpf_pseudo_func(insns + t))
13179 		return visit_func_call_insn(t, insns, env, true);
13180 
13181 	/* All non-branch instructions have a single fall-through edge. */
13182 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
13183 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
13184 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
13185 
13186 	switch (BPF_OP(insns[t].code)) {
13187 	case BPF_EXIT:
13188 		return DONE_EXPLORING;
13189 
13190 	case BPF_CALL:
13191 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
13192 			/* Mark this call insn as a prune point to trigger
13193 			 * is_state_visited() check before call itself is
13194 			 * processed by __check_func_call(). Otherwise new
13195 			 * async state will be pushed for further exploration.
13196 			 */
13197 			mark_prune_point(env, t);
13198 		return visit_func_call_insn(t, insns, env,
13199 					    insns[t].src_reg == BPF_PSEUDO_CALL);
13200 
13201 	case BPF_JA:
13202 		if (BPF_SRC(insns[t].code) != BPF_K)
13203 			return -EINVAL;
13204 
13205 		/* unconditional jump with single edge */
13206 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
13207 				true);
13208 		if (ret)
13209 			return ret;
13210 
13211 		mark_prune_point(env, t + insns[t].off + 1);
13212 		mark_jmp_point(env, t + insns[t].off + 1);
13213 
13214 		return ret;
13215 
13216 	default:
13217 		/* conditional jump with two edges */
13218 		mark_prune_point(env, t);
13219 
13220 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
13221 		if (ret)
13222 			return ret;
13223 
13224 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
13225 	}
13226 }
13227 
13228 /* non-recursive depth-first-search to detect loops in BPF program
13229  * loop == back-edge in directed graph
13230  */
13231 static int check_cfg(struct bpf_verifier_env *env)
13232 {
13233 	int insn_cnt = env->prog->len;
13234 	int *insn_stack, *insn_state;
13235 	int ret = 0;
13236 	int i;
13237 
13238 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
13239 	if (!insn_state)
13240 		return -ENOMEM;
13241 
13242 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
13243 	if (!insn_stack) {
13244 		kvfree(insn_state);
13245 		return -ENOMEM;
13246 	}
13247 
13248 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
13249 	insn_stack[0] = 0; /* 0 is the first instruction */
13250 	env->cfg.cur_stack = 1;
13251 
13252 	while (env->cfg.cur_stack > 0) {
13253 		int t = insn_stack[env->cfg.cur_stack - 1];
13254 
13255 		ret = visit_insn(t, env);
13256 		switch (ret) {
13257 		case DONE_EXPLORING:
13258 			insn_state[t] = EXPLORED;
13259 			env->cfg.cur_stack--;
13260 			break;
13261 		case KEEP_EXPLORING:
13262 			break;
13263 		default:
13264 			if (ret > 0) {
13265 				verbose(env, "visit_insn internal bug\n");
13266 				ret = -EFAULT;
13267 			}
13268 			goto err_free;
13269 		}
13270 	}
13271 
13272 	if (env->cfg.cur_stack < 0) {
13273 		verbose(env, "pop stack internal bug\n");
13274 		ret = -EFAULT;
13275 		goto err_free;
13276 	}
13277 
13278 	for (i = 0; i < insn_cnt; i++) {
13279 		if (insn_state[i] != EXPLORED) {
13280 			verbose(env, "unreachable insn %d\n", i);
13281 			ret = -EINVAL;
13282 			goto err_free;
13283 		}
13284 	}
13285 	ret = 0; /* cfg looks good */
13286 
13287 err_free:
13288 	kvfree(insn_state);
13289 	kvfree(insn_stack);
13290 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
13291 	return ret;
13292 }
13293 
13294 static int check_abnormal_return(struct bpf_verifier_env *env)
13295 {
13296 	int i;
13297 
13298 	for (i = 1; i < env->subprog_cnt; i++) {
13299 		if (env->subprog_info[i].has_ld_abs) {
13300 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
13301 			return -EINVAL;
13302 		}
13303 		if (env->subprog_info[i].has_tail_call) {
13304 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
13305 			return -EINVAL;
13306 		}
13307 	}
13308 	return 0;
13309 }
13310 
13311 /* The minimum supported BTF func info size */
13312 #define MIN_BPF_FUNCINFO_SIZE	8
13313 #define MAX_FUNCINFO_REC_SIZE	252
13314 
13315 static int check_btf_func(struct bpf_verifier_env *env,
13316 			  const union bpf_attr *attr,
13317 			  bpfptr_t uattr)
13318 {
13319 	const struct btf_type *type, *func_proto, *ret_type;
13320 	u32 i, nfuncs, urec_size, min_size;
13321 	u32 krec_size = sizeof(struct bpf_func_info);
13322 	struct bpf_func_info *krecord;
13323 	struct bpf_func_info_aux *info_aux = NULL;
13324 	struct bpf_prog *prog;
13325 	const struct btf *btf;
13326 	bpfptr_t urecord;
13327 	u32 prev_offset = 0;
13328 	bool scalar_return;
13329 	int ret = -ENOMEM;
13330 
13331 	nfuncs = attr->func_info_cnt;
13332 	if (!nfuncs) {
13333 		if (check_abnormal_return(env))
13334 			return -EINVAL;
13335 		return 0;
13336 	}
13337 
13338 	if (nfuncs != env->subprog_cnt) {
13339 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
13340 		return -EINVAL;
13341 	}
13342 
13343 	urec_size = attr->func_info_rec_size;
13344 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
13345 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
13346 	    urec_size % sizeof(u32)) {
13347 		verbose(env, "invalid func info rec size %u\n", urec_size);
13348 		return -EINVAL;
13349 	}
13350 
13351 	prog = env->prog;
13352 	btf = prog->aux->btf;
13353 
13354 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
13355 	min_size = min_t(u32, krec_size, urec_size);
13356 
13357 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
13358 	if (!krecord)
13359 		return -ENOMEM;
13360 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
13361 	if (!info_aux)
13362 		goto err_free;
13363 
13364 	for (i = 0; i < nfuncs; i++) {
13365 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
13366 		if (ret) {
13367 			if (ret == -E2BIG) {
13368 				verbose(env, "nonzero tailing record in func info");
13369 				/* set the size kernel expects so loader can zero
13370 				 * out the rest of the record.
13371 				 */
13372 				if (copy_to_bpfptr_offset(uattr,
13373 							  offsetof(union bpf_attr, func_info_rec_size),
13374 							  &min_size, sizeof(min_size)))
13375 					ret = -EFAULT;
13376 			}
13377 			goto err_free;
13378 		}
13379 
13380 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
13381 			ret = -EFAULT;
13382 			goto err_free;
13383 		}
13384 
13385 		/* check insn_off */
13386 		ret = -EINVAL;
13387 		if (i == 0) {
13388 			if (krecord[i].insn_off) {
13389 				verbose(env,
13390 					"nonzero insn_off %u for the first func info record",
13391 					krecord[i].insn_off);
13392 				goto err_free;
13393 			}
13394 		} else if (krecord[i].insn_off <= prev_offset) {
13395 			verbose(env,
13396 				"same or smaller insn offset (%u) than previous func info record (%u)",
13397 				krecord[i].insn_off, prev_offset);
13398 			goto err_free;
13399 		}
13400 
13401 		if (env->subprog_info[i].start != krecord[i].insn_off) {
13402 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
13403 			goto err_free;
13404 		}
13405 
13406 		/* check type_id */
13407 		type = btf_type_by_id(btf, krecord[i].type_id);
13408 		if (!type || !btf_type_is_func(type)) {
13409 			verbose(env, "invalid type id %d in func info",
13410 				krecord[i].type_id);
13411 			goto err_free;
13412 		}
13413 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
13414 
13415 		func_proto = btf_type_by_id(btf, type->type);
13416 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
13417 			/* btf_func_check() already verified it during BTF load */
13418 			goto err_free;
13419 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
13420 		scalar_return =
13421 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
13422 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
13423 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
13424 			goto err_free;
13425 		}
13426 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
13427 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
13428 			goto err_free;
13429 		}
13430 
13431 		prev_offset = krecord[i].insn_off;
13432 		bpfptr_add(&urecord, urec_size);
13433 	}
13434 
13435 	prog->aux->func_info = krecord;
13436 	prog->aux->func_info_cnt = nfuncs;
13437 	prog->aux->func_info_aux = info_aux;
13438 	return 0;
13439 
13440 err_free:
13441 	kvfree(krecord);
13442 	kfree(info_aux);
13443 	return ret;
13444 }
13445 
13446 static void adjust_btf_func(struct bpf_verifier_env *env)
13447 {
13448 	struct bpf_prog_aux *aux = env->prog->aux;
13449 	int i;
13450 
13451 	if (!aux->func_info)
13452 		return;
13453 
13454 	for (i = 0; i < env->subprog_cnt; i++)
13455 		aux->func_info[i].insn_off = env->subprog_info[i].start;
13456 }
13457 
13458 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
13459 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
13460 
13461 static int check_btf_line(struct bpf_verifier_env *env,
13462 			  const union bpf_attr *attr,
13463 			  bpfptr_t uattr)
13464 {
13465 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
13466 	struct bpf_subprog_info *sub;
13467 	struct bpf_line_info *linfo;
13468 	struct bpf_prog *prog;
13469 	const struct btf *btf;
13470 	bpfptr_t ulinfo;
13471 	int err;
13472 
13473 	nr_linfo = attr->line_info_cnt;
13474 	if (!nr_linfo)
13475 		return 0;
13476 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
13477 		return -EINVAL;
13478 
13479 	rec_size = attr->line_info_rec_size;
13480 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
13481 	    rec_size > MAX_LINEINFO_REC_SIZE ||
13482 	    rec_size & (sizeof(u32) - 1))
13483 		return -EINVAL;
13484 
13485 	/* Need to zero it in case the userspace may
13486 	 * pass in a smaller bpf_line_info object.
13487 	 */
13488 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
13489 			 GFP_KERNEL | __GFP_NOWARN);
13490 	if (!linfo)
13491 		return -ENOMEM;
13492 
13493 	prog = env->prog;
13494 	btf = prog->aux->btf;
13495 
13496 	s = 0;
13497 	sub = env->subprog_info;
13498 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
13499 	expected_size = sizeof(struct bpf_line_info);
13500 	ncopy = min_t(u32, expected_size, rec_size);
13501 	for (i = 0; i < nr_linfo; i++) {
13502 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
13503 		if (err) {
13504 			if (err == -E2BIG) {
13505 				verbose(env, "nonzero tailing record in line_info");
13506 				if (copy_to_bpfptr_offset(uattr,
13507 							  offsetof(union bpf_attr, line_info_rec_size),
13508 							  &expected_size, sizeof(expected_size)))
13509 					err = -EFAULT;
13510 			}
13511 			goto err_free;
13512 		}
13513 
13514 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
13515 			err = -EFAULT;
13516 			goto err_free;
13517 		}
13518 
13519 		/*
13520 		 * Check insn_off to ensure
13521 		 * 1) strictly increasing AND
13522 		 * 2) bounded by prog->len
13523 		 *
13524 		 * The linfo[0].insn_off == 0 check logically falls into
13525 		 * the later "missing bpf_line_info for func..." case
13526 		 * because the first linfo[0].insn_off must be the
13527 		 * first sub also and the first sub must have
13528 		 * subprog_info[0].start == 0.
13529 		 */
13530 		if ((i && linfo[i].insn_off <= prev_offset) ||
13531 		    linfo[i].insn_off >= prog->len) {
13532 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
13533 				i, linfo[i].insn_off, prev_offset,
13534 				prog->len);
13535 			err = -EINVAL;
13536 			goto err_free;
13537 		}
13538 
13539 		if (!prog->insnsi[linfo[i].insn_off].code) {
13540 			verbose(env,
13541 				"Invalid insn code at line_info[%u].insn_off\n",
13542 				i);
13543 			err = -EINVAL;
13544 			goto err_free;
13545 		}
13546 
13547 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
13548 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
13549 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
13550 			err = -EINVAL;
13551 			goto err_free;
13552 		}
13553 
13554 		if (s != env->subprog_cnt) {
13555 			if (linfo[i].insn_off == sub[s].start) {
13556 				sub[s].linfo_idx = i;
13557 				s++;
13558 			} else if (sub[s].start < linfo[i].insn_off) {
13559 				verbose(env, "missing bpf_line_info for func#%u\n", s);
13560 				err = -EINVAL;
13561 				goto err_free;
13562 			}
13563 		}
13564 
13565 		prev_offset = linfo[i].insn_off;
13566 		bpfptr_add(&ulinfo, rec_size);
13567 	}
13568 
13569 	if (s != env->subprog_cnt) {
13570 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
13571 			env->subprog_cnt - s, s);
13572 		err = -EINVAL;
13573 		goto err_free;
13574 	}
13575 
13576 	prog->aux->linfo = linfo;
13577 	prog->aux->nr_linfo = nr_linfo;
13578 
13579 	return 0;
13580 
13581 err_free:
13582 	kvfree(linfo);
13583 	return err;
13584 }
13585 
13586 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
13587 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
13588 
13589 static int check_core_relo(struct bpf_verifier_env *env,
13590 			   const union bpf_attr *attr,
13591 			   bpfptr_t uattr)
13592 {
13593 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
13594 	struct bpf_core_relo core_relo = {};
13595 	struct bpf_prog *prog = env->prog;
13596 	const struct btf *btf = prog->aux->btf;
13597 	struct bpf_core_ctx ctx = {
13598 		.log = &env->log,
13599 		.btf = btf,
13600 	};
13601 	bpfptr_t u_core_relo;
13602 	int err;
13603 
13604 	nr_core_relo = attr->core_relo_cnt;
13605 	if (!nr_core_relo)
13606 		return 0;
13607 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
13608 		return -EINVAL;
13609 
13610 	rec_size = attr->core_relo_rec_size;
13611 	if (rec_size < MIN_CORE_RELO_SIZE ||
13612 	    rec_size > MAX_CORE_RELO_SIZE ||
13613 	    rec_size % sizeof(u32))
13614 		return -EINVAL;
13615 
13616 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
13617 	expected_size = sizeof(struct bpf_core_relo);
13618 	ncopy = min_t(u32, expected_size, rec_size);
13619 
13620 	/* Unlike func_info and line_info, copy and apply each CO-RE
13621 	 * relocation record one at a time.
13622 	 */
13623 	for (i = 0; i < nr_core_relo; i++) {
13624 		/* future proofing when sizeof(bpf_core_relo) changes */
13625 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
13626 		if (err) {
13627 			if (err == -E2BIG) {
13628 				verbose(env, "nonzero tailing record in core_relo");
13629 				if (copy_to_bpfptr_offset(uattr,
13630 							  offsetof(union bpf_attr, core_relo_rec_size),
13631 							  &expected_size, sizeof(expected_size)))
13632 					err = -EFAULT;
13633 			}
13634 			break;
13635 		}
13636 
13637 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
13638 			err = -EFAULT;
13639 			break;
13640 		}
13641 
13642 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
13643 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
13644 				i, core_relo.insn_off, prog->len);
13645 			err = -EINVAL;
13646 			break;
13647 		}
13648 
13649 		err = bpf_core_apply(&ctx, &core_relo, i,
13650 				     &prog->insnsi[core_relo.insn_off / 8]);
13651 		if (err)
13652 			break;
13653 		bpfptr_add(&u_core_relo, rec_size);
13654 	}
13655 	return err;
13656 }
13657 
13658 static int check_btf_info(struct bpf_verifier_env *env,
13659 			  const union bpf_attr *attr,
13660 			  bpfptr_t uattr)
13661 {
13662 	struct btf *btf;
13663 	int err;
13664 
13665 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
13666 		if (check_abnormal_return(env))
13667 			return -EINVAL;
13668 		return 0;
13669 	}
13670 
13671 	btf = btf_get_by_fd(attr->prog_btf_fd);
13672 	if (IS_ERR(btf))
13673 		return PTR_ERR(btf);
13674 	if (btf_is_kernel(btf)) {
13675 		btf_put(btf);
13676 		return -EACCES;
13677 	}
13678 	env->prog->aux->btf = btf;
13679 
13680 	err = check_btf_func(env, attr, uattr);
13681 	if (err)
13682 		return err;
13683 
13684 	err = check_btf_line(env, attr, uattr);
13685 	if (err)
13686 		return err;
13687 
13688 	err = check_core_relo(env, attr, uattr);
13689 	if (err)
13690 		return err;
13691 
13692 	return 0;
13693 }
13694 
13695 /* check %cur's range satisfies %old's */
13696 static bool range_within(struct bpf_reg_state *old,
13697 			 struct bpf_reg_state *cur)
13698 {
13699 	return old->umin_value <= cur->umin_value &&
13700 	       old->umax_value >= cur->umax_value &&
13701 	       old->smin_value <= cur->smin_value &&
13702 	       old->smax_value >= cur->smax_value &&
13703 	       old->u32_min_value <= cur->u32_min_value &&
13704 	       old->u32_max_value >= cur->u32_max_value &&
13705 	       old->s32_min_value <= cur->s32_min_value &&
13706 	       old->s32_max_value >= cur->s32_max_value;
13707 }
13708 
13709 /* If in the old state two registers had the same id, then they need to have
13710  * the same id in the new state as well.  But that id could be different from
13711  * the old state, so we need to track the mapping from old to new ids.
13712  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
13713  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
13714  * regs with a different old id could still have new id 9, we don't care about
13715  * that.
13716  * So we look through our idmap to see if this old id has been seen before.  If
13717  * so, we require the new id to match; otherwise, we add the id pair to the map.
13718  */
13719 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
13720 {
13721 	unsigned int i;
13722 
13723 	/* either both IDs should be set or both should be zero */
13724 	if (!!old_id != !!cur_id)
13725 		return false;
13726 
13727 	if (old_id == 0) /* cur_id == 0 as well */
13728 		return true;
13729 
13730 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
13731 		if (!idmap[i].old) {
13732 			/* Reached an empty slot; haven't seen this id before */
13733 			idmap[i].old = old_id;
13734 			idmap[i].cur = cur_id;
13735 			return true;
13736 		}
13737 		if (idmap[i].old == old_id)
13738 			return idmap[i].cur == cur_id;
13739 	}
13740 	/* We ran out of idmap slots, which should be impossible */
13741 	WARN_ON_ONCE(1);
13742 	return false;
13743 }
13744 
13745 static void clean_func_state(struct bpf_verifier_env *env,
13746 			     struct bpf_func_state *st)
13747 {
13748 	enum bpf_reg_liveness live;
13749 	int i, j;
13750 
13751 	for (i = 0; i < BPF_REG_FP; i++) {
13752 		live = st->regs[i].live;
13753 		/* liveness must not touch this register anymore */
13754 		st->regs[i].live |= REG_LIVE_DONE;
13755 		if (!(live & REG_LIVE_READ))
13756 			/* since the register is unused, clear its state
13757 			 * to make further comparison simpler
13758 			 */
13759 			__mark_reg_not_init(env, &st->regs[i]);
13760 	}
13761 
13762 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
13763 		live = st->stack[i].spilled_ptr.live;
13764 		/* liveness must not touch this stack slot anymore */
13765 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
13766 		if (!(live & REG_LIVE_READ)) {
13767 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
13768 			for (j = 0; j < BPF_REG_SIZE; j++)
13769 				st->stack[i].slot_type[j] = STACK_INVALID;
13770 		}
13771 	}
13772 }
13773 
13774 static void clean_verifier_state(struct bpf_verifier_env *env,
13775 				 struct bpf_verifier_state *st)
13776 {
13777 	int i;
13778 
13779 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
13780 		/* all regs in this state in all frames were already marked */
13781 		return;
13782 
13783 	for (i = 0; i <= st->curframe; i++)
13784 		clean_func_state(env, st->frame[i]);
13785 }
13786 
13787 /* the parentage chains form a tree.
13788  * the verifier states are added to state lists at given insn and
13789  * pushed into state stack for future exploration.
13790  * when the verifier reaches bpf_exit insn some of the verifer states
13791  * stored in the state lists have their final liveness state already,
13792  * but a lot of states will get revised from liveness point of view when
13793  * the verifier explores other branches.
13794  * Example:
13795  * 1: r0 = 1
13796  * 2: if r1 == 100 goto pc+1
13797  * 3: r0 = 2
13798  * 4: exit
13799  * when the verifier reaches exit insn the register r0 in the state list of
13800  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
13801  * of insn 2 and goes exploring further. At the insn 4 it will walk the
13802  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
13803  *
13804  * Since the verifier pushes the branch states as it sees them while exploring
13805  * the program the condition of walking the branch instruction for the second
13806  * time means that all states below this branch were already explored and
13807  * their final liveness marks are already propagated.
13808  * Hence when the verifier completes the search of state list in is_state_visited()
13809  * we can call this clean_live_states() function to mark all liveness states
13810  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
13811  * will not be used.
13812  * This function also clears the registers and stack for states that !READ
13813  * to simplify state merging.
13814  *
13815  * Important note here that walking the same branch instruction in the callee
13816  * doesn't meant that the states are DONE. The verifier has to compare
13817  * the callsites
13818  */
13819 static void clean_live_states(struct bpf_verifier_env *env, int insn,
13820 			      struct bpf_verifier_state *cur)
13821 {
13822 	struct bpf_verifier_state_list *sl;
13823 	int i;
13824 
13825 	sl = *explored_state(env, insn);
13826 	while (sl) {
13827 		if (sl->state.branches)
13828 			goto next;
13829 		if (sl->state.insn_idx != insn ||
13830 		    sl->state.curframe != cur->curframe)
13831 			goto next;
13832 		for (i = 0; i <= cur->curframe; i++)
13833 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
13834 				goto next;
13835 		clean_verifier_state(env, &sl->state);
13836 next:
13837 		sl = sl->next;
13838 	}
13839 }
13840 
13841 static bool regs_exact(const struct bpf_reg_state *rold,
13842 		       const struct bpf_reg_state *rcur,
13843 		       struct bpf_id_pair *idmap)
13844 {
13845 	return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
13846 	       check_ids(rold->id, rcur->id, idmap) &&
13847 	       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
13848 }
13849 
13850 /* Returns true if (rold safe implies rcur safe) */
13851 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
13852 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
13853 {
13854 	if (!(rold->live & REG_LIVE_READ))
13855 		/* explored state didn't use this */
13856 		return true;
13857 	if (rold->type == NOT_INIT)
13858 		/* explored state can't have used this */
13859 		return true;
13860 	if (rcur->type == NOT_INIT)
13861 		return false;
13862 
13863 	/* Enforce that register types have to match exactly, including their
13864 	 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
13865 	 * rule.
13866 	 *
13867 	 * One can make a point that using a pointer register as unbounded
13868 	 * SCALAR would be technically acceptable, but this could lead to
13869 	 * pointer leaks because scalars are allowed to leak while pointers
13870 	 * are not. We could make this safe in special cases if root is
13871 	 * calling us, but it's probably not worth the hassle.
13872 	 *
13873 	 * Also, register types that are *not* MAYBE_NULL could technically be
13874 	 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
13875 	 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
13876 	 * to the same map).
13877 	 * However, if the old MAYBE_NULL register then got NULL checked,
13878 	 * doing so could have affected others with the same id, and we can't
13879 	 * check for that because we lost the id when we converted to
13880 	 * a non-MAYBE_NULL variant.
13881 	 * So, as a general rule we don't allow mixing MAYBE_NULL and
13882 	 * non-MAYBE_NULL registers as well.
13883 	 */
13884 	if (rold->type != rcur->type)
13885 		return false;
13886 
13887 	switch (base_type(rold->type)) {
13888 	case SCALAR_VALUE:
13889 		if (regs_exact(rold, rcur, idmap))
13890 			return true;
13891 		if (env->explore_alu_limits)
13892 			return false;
13893 		if (!rold->precise)
13894 			return true;
13895 		/* new val must satisfy old val knowledge */
13896 		return range_within(rold, rcur) &&
13897 		       tnum_in(rold->var_off, rcur->var_off);
13898 	case PTR_TO_MAP_KEY:
13899 	case PTR_TO_MAP_VALUE:
13900 		/* If the new min/max/var_off satisfy the old ones and
13901 		 * everything else matches, we are OK.
13902 		 */
13903 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
13904 		       range_within(rold, rcur) &&
13905 		       tnum_in(rold->var_off, rcur->var_off) &&
13906 		       check_ids(rold->id, rcur->id, idmap);
13907 	case PTR_TO_PACKET_META:
13908 	case PTR_TO_PACKET:
13909 		/* We must have at least as much range as the old ptr
13910 		 * did, so that any accesses which were safe before are
13911 		 * still safe.  This is true even if old range < old off,
13912 		 * since someone could have accessed through (ptr - k), or
13913 		 * even done ptr -= k in a register, to get a safe access.
13914 		 */
13915 		if (rold->range > rcur->range)
13916 			return false;
13917 		/* If the offsets don't match, we can't trust our alignment;
13918 		 * nor can we be sure that we won't fall out of range.
13919 		 */
13920 		if (rold->off != rcur->off)
13921 			return false;
13922 		/* id relations must be preserved */
13923 		if (!check_ids(rold->id, rcur->id, idmap))
13924 			return false;
13925 		/* new val must satisfy old val knowledge */
13926 		return range_within(rold, rcur) &&
13927 		       tnum_in(rold->var_off, rcur->var_off);
13928 	case PTR_TO_STACK:
13929 		/* two stack pointers are equal only if they're pointing to
13930 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
13931 		 */
13932 		return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
13933 	default:
13934 		return regs_exact(rold, rcur, idmap);
13935 	}
13936 }
13937 
13938 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
13939 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
13940 {
13941 	int i, spi;
13942 
13943 	/* walk slots of the explored stack and ignore any additional
13944 	 * slots in the current stack, since explored(safe) state
13945 	 * didn't use them
13946 	 */
13947 	for (i = 0; i < old->allocated_stack; i++) {
13948 		spi = i / BPF_REG_SIZE;
13949 
13950 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
13951 			i += BPF_REG_SIZE - 1;
13952 			/* explored state didn't use this */
13953 			continue;
13954 		}
13955 
13956 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
13957 			continue;
13958 
13959 		if (env->allow_uninit_stack &&
13960 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
13961 			continue;
13962 
13963 		/* explored stack has more populated slots than current stack
13964 		 * and these slots were used
13965 		 */
13966 		if (i >= cur->allocated_stack)
13967 			return false;
13968 
13969 		/* if old state was safe with misc data in the stack
13970 		 * it will be safe with zero-initialized stack.
13971 		 * The opposite is not true
13972 		 */
13973 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
13974 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
13975 			continue;
13976 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
13977 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
13978 			/* Ex: old explored (safe) state has STACK_SPILL in
13979 			 * this stack slot, but current has STACK_MISC ->
13980 			 * this verifier states are not equivalent,
13981 			 * return false to continue verification of this path
13982 			 */
13983 			return false;
13984 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
13985 			continue;
13986 		/* Both old and cur are having same slot_type */
13987 		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
13988 		case STACK_SPILL:
13989 			/* when explored and current stack slot are both storing
13990 			 * spilled registers, check that stored pointers types
13991 			 * are the same as well.
13992 			 * Ex: explored safe path could have stored
13993 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
13994 			 * but current path has stored:
13995 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
13996 			 * such verifier states are not equivalent.
13997 			 * return false to continue verification of this path
13998 			 */
13999 			if (!regsafe(env, &old->stack[spi].spilled_ptr,
14000 				     &cur->stack[spi].spilled_ptr, idmap))
14001 				return false;
14002 			break;
14003 		case STACK_DYNPTR:
14004 		{
14005 			const struct bpf_reg_state *old_reg, *cur_reg;
14006 
14007 			old_reg = &old->stack[spi].spilled_ptr;
14008 			cur_reg = &cur->stack[spi].spilled_ptr;
14009 			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
14010 			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
14011 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
14012 				return false;
14013 			break;
14014 		}
14015 		case STACK_MISC:
14016 		case STACK_ZERO:
14017 		case STACK_INVALID:
14018 			continue;
14019 		/* Ensure that new unhandled slot types return false by default */
14020 		default:
14021 			return false;
14022 		}
14023 	}
14024 	return true;
14025 }
14026 
14027 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
14028 		    struct bpf_id_pair *idmap)
14029 {
14030 	int i;
14031 
14032 	if (old->acquired_refs != cur->acquired_refs)
14033 		return false;
14034 
14035 	for (i = 0; i < old->acquired_refs; i++) {
14036 		if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
14037 			return false;
14038 	}
14039 
14040 	return true;
14041 }
14042 
14043 /* compare two verifier states
14044  *
14045  * all states stored in state_list are known to be valid, since
14046  * verifier reached 'bpf_exit' instruction through them
14047  *
14048  * this function is called when verifier exploring different branches of
14049  * execution popped from the state stack. If it sees an old state that has
14050  * more strict register state and more strict stack state then this execution
14051  * branch doesn't need to be explored further, since verifier already
14052  * concluded that more strict state leads to valid finish.
14053  *
14054  * Therefore two states are equivalent if register state is more conservative
14055  * and explored stack state is more conservative than the current one.
14056  * Example:
14057  *       explored                   current
14058  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
14059  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
14060  *
14061  * In other words if current stack state (one being explored) has more
14062  * valid slots than old one that already passed validation, it means
14063  * the verifier can stop exploring and conclude that current state is valid too
14064  *
14065  * Similarly with registers. If explored state has register type as invalid
14066  * whereas register type in current state is meaningful, it means that
14067  * the current state will reach 'bpf_exit' instruction safely
14068  */
14069 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
14070 			      struct bpf_func_state *cur)
14071 {
14072 	int i;
14073 
14074 	for (i = 0; i < MAX_BPF_REG; i++)
14075 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
14076 			     env->idmap_scratch))
14077 			return false;
14078 
14079 	if (!stacksafe(env, old, cur, env->idmap_scratch))
14080 		return false;
14081 
14082 	if (!refsafe(old, cur, env->idmap_scratch))
14083 		return false;
14084 
14085 	return true;
14086 }
14087 
14088 static bool states_equal(struct bpf_verifier_env *env,
14089 			 struct bpf_verifier_state *old,
14090 			 struct bpf_verifier_state *cur)
14091 {
14092 	int i;
14093 
14094 	if (old->curframe != cur->curframe)
14095 		return false;
14096 
14097 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
14098 
14099 	/* Verification state from speculative execution simulation
14100 	 * must never prune a non-speculative execution one.
14101 	 */
14102 	if (old->speculative && !cur->speculative)
14103 		return false;
14104 
14105 	if (old->active_lock.ptr != cur->active_lock.ptr)
14106 		return false;
14107 
14108 	/* Old and cur active_lock's have to be either both present
14109 	 * or both absent.
14110 	 */
14111 	if (!!old->active_lock.id != !!cur->active_lock.id)
14112 		return false;
14113 
14114 	if (old->active_lock.id &&
14115 	    !check_ids(old->active_lock.id, cur->active_lock.id, env->idmap_scratch))
14116 		return false;
14117 
14118 	if (old->active_rcu_lock != cur->active_rcu_lock)
14119 		return false;
14120 
14121 	/* for states to be equal callsites have to be the same
14122 	 * and all frame states need to be equivalent
14123 	 */
14124 	for (i = 0; i <= old->curframe; i++) {
14125 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
14126 			return false;
14127 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
14128 			return false;
14129 	}
14130 	return true;
14131 }
14132 
14133 /* Return 0 if no propagation happened. Return negative error code if error
14134  * happened. Otherwise, return the propagated bit.
14135  */
14136 static int propagate_liveness_reg(struct bpf_verifier_env *env,
14137 				  struct bpf_reg_state *reg,
14138 				  struct bpf_reg_state *parent_reg)
14139 {
14140 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
14141 	u8 flag = reg->live & REG_LIVE_READ;
14142 	int err;
14143 
14144 	/* When comes here, read flags of PARENT_REG or REG could be any of
14145 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
14146 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
14147 	 */
14148 	if (parent_flag == REG_LIVE_READ64 ||
14149 	    /* Or if there is no read flag from REG. */
14150 	    !flag ||
14151 	    /* Or if the read flag from REG is the same as PARENT_REG. */
14152 	    parent_flag == flag)
14153 		return 0;
14154 
14155 	err = mark_reg_read(env, reg, parent_reg, flag);
14156 	if (err)
14157 		return err;
14158 
14159 	return flag;
14160 }
14161 
14162 /* A write screens off any subsequent reads; but write marks come from the
14163  * straight-line code between a state and its parent.  When we arrive at an
14164  * equivalent state (jump target or such) we didn't arrive by the straight-line
14165  * code, so read marks in the state must propagate to the parent regardless
14166  * of the state's write marks. That's what 'parent == state->parent' comparison
14167  * in mark_reg_read() is for.
14168  */
14169 static int propagate_liveness(struct bpf_verifier_env *env,
14170 			      const struct bpf_verifier_state *vstate,
14171 			      struct bpf_verifier_state *vparent)
14172 {
14173 	struct bpf_reg_state *state_reg, *parent_reg;
14174 	struct bpf_func_state *state, *parent;
14175 	int i, frame, err = 0;
14176 
14177 	if (vparent->curframe != vstate->curframe) {
14178 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
14179 		     vparent->curframe, vstate->curframe);
14180 		return -EFAULT;
14181 	}
14182 	/* Propagate read liveness of registers... */
14183 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
14184 	for (frame = 0; frame <= vstate->curframe; frame++) {
14185 		parent = vparent->frame[frame];
14186 		state = vstate->frame[frame];
14187 		parent_reg = parent->regs;
14188 		state_reg = state->regs;
14189 		/* We don't need to worry about FP liveness, it's read-only */
14190 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
14191 			err = propagate_liveness_reg(env, &state_reg[i],
14192 						     &parent_reg[i]);
14193 			if (err < 0)
14194 				return err;
14195 			if (err == REG_LIVE_READ64)
14196 				mark_insn_zext(env, &parent_reg[i]);
14197 		}
14198 
14199 		/* Propagate stack slots. */
14200 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
14201 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
14202 			parent_reg = &parent->stack[i].spilled_ptr;
14203 			state_reg = &state->stack[i].spilled_ptr;
14204 			err = propagate_liveness_reg(env, state_reg,
14205 						     parent_reg);
14206 			if (err < 0)
14207 				return err;
14208 		}
14209 	}
14210 	return 0;
14211 }
14212 
14213 /* find precise scalars in the previous equivalent state and
14214  * propagate them into the current state
14215  */
14216 static int propagate_precision(struct bpf_verifier_env *env,
14217 			       const struct bpf_verifier_state *old)
14218 {
14219 	struct bpf_reg_state *state_reg;
14220 	struct bpf_func_state *state;
14221 	int i, err = 0, fr;
14222 
14223 	for (fr = old->curframe; fr >= 0; fr--) {
14224 		state = old->frame[fr];
14225 		state_reg = state->regs;
14226 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
14227 			if (state_reg->type != SCALAR_VALUE ||
14228 			    !state_reg->precise)
14229 				continue;
14230 			if (env->log.level & BPF_LOG_LEVEL2)
14231 				verbose(env, "frame %d: propagating r%d\n", i, fr);
14232 			err = mark_chain_precision_frame(env, fr, i);
14233 			if (err < 0)
14234 				return err;
14235 		}
14236 
14237 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
14238 			if (!is_spilled_reg(&state->stack[i]))
14239 				continue;
14240 			state_reg = &state->stack[i].spilled_ptr;
14241 			if (state_reg->type != SCALAR_VALUE ||
14242 			    !state_reg->precise)
14243 				continue;
14244 			if (env->log.level & BPF_LOG_LEVEL2)
14245 				verbose(env, "frame %d: propagating fp%d\n",
14246 					(-i - 1) * BPF_REG_SIZE, fr);
14247 			err = mark_chain_precision_stack_frame(env, fr, i);
14248 			if (err < 0)
14249 				return err;
14250 		}
14251 	}
14252 	return 0;
14253 }
14254 
14255 static bool states_maybe_looping(struct bpf_verifier_state *old,
14256 				 struct bpf_verifier_state *cur)
14257 {
14258 	struct bpf_func_state *fold, *fcur;
14259 	int i, fr = cur->curframe;
14260 
14261 	if (old->curframe != fr)
14262 		return false;
14263 
14264 	fold = old->frame[fr];
14265 	fcur = cur->frame[fr];
14266 	for (i = 0; i < MAX_BPF_REG; i++)
14267 		if (memcmp(&fold->regs[i], &fcur->regs[i],
14268 			   offsetof(struct bpf_reg_state, parent)))
14269 			return false;
14270 	return true;
14271 }
14272 
14273 
14274 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
14275 {
14276 	struct bpf_verifier_state_list *new_sl;
14277 	struct bpf_verifier_state_list *sl, **pprev;
14278 	struct bpf_verifier_state *cur = env->cur_state, *new;
14279 	int i, j, err, states_cnt = 0;
14280 	bool add_new_state = env->test_state_freq ? true : false;
14281 
14282 	/* bpf progs typically have pruning point every 4 instructions
14283 	 * http://vger.kernel.org/bpfconf2019.html#session-1
14284 	 * Do not add new state for future pruning if the verifier hasn't seen
14285 	 * at least 2 jumps and at least 8 instructions.
14286 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
14287 	 * In tests that amounts to up to 50% reduction into total verifier
14288 	 * memory consumption and 20% verifier time speedup.
14289 	 */
14290 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
14291 	    env->insn_processed - env->prev_insn_processed >= 8)
14292 		add_new_state = true;
14293 
14294 	pprev = explored_state(env, insn_idx);
14295 	sl = *pprev;
14296 
14297 	clean_live_states(env, insn_idx, cur);
14298 
14299 	while (sl) {
14300 		states_cnt++;
14301 		if (sl->state.insn_idx != insn_idx)
14302 			goto next;
14303 
14304 		if (sl->state.branches) {
14305 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
14306 
14307 			if (frame->in_async_callback_fn &&
14308 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
14309 				/* Different async_entry_cnt means that the verifier is
14310 				 * processing another entry into async callback.
14311 				 * Seeing the same state is not an indication of infinite
14312 				 * loop or infinite recursion.
14313 				 * But finding the same state doesn't mean that it's safe
14314 				 * to stop processing the current state. The previous state
14315 				 * hasn't yet reached bpf_exit, since state.branches > 0.
14316 				 * Checking in_async_callback_fn alone is not enough either.
14317 				 * Since the verifier still needs to catch infinite loops
14318 				 * inside async callbacks.
14319 				 */
14320 			} else if (states_maybe_looping(&sl->state, cur) &&
14321 				   states_equal(env, &sl->state, cur)) {
14322 				verbose_linfo(env, insn_idx, "; ");
14323 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
14324 				return -EINVAL;
14325 			}
14326 			/* if the verifier is processing a loop, avoid adding new state
14327 			 * too often, since different loop iterations have distinct
14328 			 * states and may not help future pruning.
14329 			 * This threshold shouldn't be too low to make sure that
14330 			 * a loop with large bound will be rejected quickly.
14331 			 * The most abusive loop will be:
14332 			 * r1 += 1
14333 			 * if r1 < 1000000 goto pc-2
14334 			 * 1M insn_procssed limit / 100 == 10k peak states.
14335 			 * This threshold shouldn't be too high either, since states
14336 			 * at the end of the loop are likely to be useful in pruning.
14337 			 */
14338 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
14339 			    env->insn_processed - env->prev_insn_processed < 100)
14340 				add_new_state = false;
14341 			goto miss;
14342 		}
14343 		if (states_equal(env, &sl->state, cur)) {
14344 			sl->hit_cnt++;
14345 			/* reached equivalent register/stack state,
14346 			 * prune the search.
14347 			 * Registers read by the continuation are read by us.
14348 			 * If we have any write marks in env->cur_state, they
14349 			 * will prevent corresponding reads in the continuation
14350 			 * from reaching our parent (an explored_state).  Our
14351 			 * own state will get the read marks recorded, but
14352 			 * they'll be immediately forgotten as we're pruning
14353 			 * this state and will pop a new one.
14354 			 */
14355 			err = propagate_liveness(env, &sl->state, cur);
14356 
14357 			/* if previous state reached the exit with precision and
14358 			 * current state is equivalent to it (except precsion marks)
14359 			 * the precision needs to be propagated back in
14360 			 * the current state.
14361 			 */
14362 			err = err ? : push_jmp_history(env, cur);
14363 			err = err ? : propagate_precision(env, &sl->state);
14364 			if (err)
14365 				return err;
14366 			return 1;
14367 		}
14368 miss:
14369 		/* when new state is not going to be added do not increase miss count.
14370 		 * Otherwise several loop iterations will remove the state
14371 		 * recorded earlier. The goal of these heuristics is to have
14372 		 * states from some iterations of the loop (some in the beginning
14373 		 * and some at the end) to help pruning.
14374 		 */
14375 		if (add_new_state)
14376 			sl->miss_cnt++;
14377 		/* heuristic to determine whether this state is beneficial
14378 		 * to keep checking from state equivalence point of view.
14379 		 * Higher numbers increase max_states_per_insn and verification time,
14380 		 * but do not meaningfully decrease insn_processed.
14381 		 */
14382 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
14383 			/* the state is unlikely to be useful. Remove it to
14384 			 * speed up verification
14385 			 */
14386 			*pprev = sl->next;
14387 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
14388 				u32 br = sl->state.branches;
14389 
14390 				WARN_ONCE(br,
14391 					  "BUG live_done but branches_to_explore %d\n",
14392 					  br);
14393 				free_verifier_state(&sl->state, false);
14394 				kfree(sl);
14395 				env->peak_states--;
14396 			} else {
14397 				/* cannot free this state, since parentage chain may
14398 				 * walk it later. Add it for free_list instead to
14399 				 * be freed at the end of verification
14400 				 */
14401 				sl->next = env->free_list;
14402 				env->free_list = sl;
14403 			}
14404 			sl = *pprev;
14405 			continue;
14406 		}
14407 next:
14408 		pprev = &sl->next;
14409 		sl = *pprev;
14410 	}
14411 
14412 	if (env->max_states_per_insn < states_cnt)
14413 		env->max_states_per_insn = states_cnt;
14414 
14415 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
14416 		return 0;
14417 
14418 	if (!add_new_state)
14419 		return 0;
14420 
14421 	/* There were no equivalent states, remember the current one.
14422 	 * Technically the current state is not proven to be safe yet,
14423 	 * but it will either reach outer most bpf_exit (which means it's safe)
14424 	 * or it will be rejected. When there are no loops the verifier won't be
14425 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
14426 	 * again on the way to bpf_exit.
14427 	 * When looping the sl->state.branches will be > 0 and this state
14428 	 * will not be considered for equivalence until branches == 0.
14429 	 */
14430 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
14431 	if (!new_sl)
14432 		return -ENOMEM;
14433 	env->total_states++;
14434 	env->peak_states++;
14435 	env->prev_jmps_processed = env->jmps_processed;
14436 	env->prev_insn_processed = env->insn_processed;
14437 
14438 	/* forget precise markings we inherited, see __mark_chain_precision */
14439 	if (env->bpf_capable)
14440 		mark_all_scalars_imprecise(env, cur);
14441 
14442 	/* add new state to the head of linked list */
14443 	new = &new_sl->state;
14444 	err = copy_verifier_state(new, cur);
14445 	if (err) {
14446 		free_verifier_state(new, false);
14447 		kfree(new_sl);
14448 		return err;
14449 	}
14450 	new->insn_idx = insn_idx;
14451 	WARN_ONCE(new->branches != 1,
14452 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
14453 
14454 	cur->parent = new;
14455 	cur->first_insn_idx = insn_idx;
14456 	clear_jmp_history(cur);
14457 	new_sl->next = *explored_state(env, insn_idx);
14458 	*explored_state(env, insn_idx) = new_sl;
14459 	/* connect new state to parentage chain. Current frame needs all
14460 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
14461 	 * to the stack implicitly by JITs) so in callers' frames connect just
14462 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
14463 	 * the state of the call instruction (with WRITTEN set), and r0 comes
14464 	 * from callee with its full parentage chain, anyway.
14465 	 */
14466 	/* clear write marks in current state: the writes we did are not writes
14467 	 * our child did, so they don't screen off its reads from us.
14468 	 * (There are no read marks in current state, because reads always mark
14469 	 * their parent and current state never has children yet.  Only
14470 	 * explored_states can get read marks.)
14471 	 */
14472 	for (j = 0; j <= cur->curframe; j++) {
14473 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
14474 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
14475 		for (i = 0; i < BPF_REG_FP; i++)
14476 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
14477 	}
14478 
14479 	/* all stack frames are accessible from callee, clear them all */
14480 	for (j = 0; j <= cur->curframe; j++) {
14481 		struct bpf_func_state *frame = cur->frame[j];
14482 		struct bpf_func_state *newframe = new->frame[j];
14483 
14484 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
14485 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
14486 			frame->stack[i].spilled_ptr.parent =
14487 						&newframe->stack[i].spilled_ptr;
14488 		}
14489 	}
14490 	return 0;
14491 }
14492 
14493 /* Return true if it's OK to have the same insn return a different type. */
14494 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
14495 {
14496 	switch (base_type(type)) {
14497 	case PTR_TO_CTX:
14498 	case PTR_TO_SOCKET:
14499 	case PTR_TO_SOCK_COMMON:
14500 	case PTR_TO_TCP_SOCK:
14501 	case PTR_TO_XDP_SOCK:
14502 	case PTR_TO_BTF_ID:
14503 		return false;
14504 	default:
14505 		return true;
14506 	}
14507 }
14508 
14509 /* If an instruction was previously used with particular pointer types, then we
14510  * need to be careful to avoid cases such as the below, where it may be ok
14511  * for one branch accessing the pointer, but not ok for the other branch:
14512  *
14513  * R1 = sock_ptr
14514  * goto X;
14515  * ...
14516  * R1 = some_other_valid_ptr;
14517  * goto X;
14518  * ...
14519  * R2 = *(u32 *)(R1 + 0);
14520  */
14521 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
14522 {
14523 	return src != prev && (!reg_type_mismatch_ok(src) ||
14524 			       !reg_type_mismatch_ok(prev));
14525 }
14526 
14527 static int do_check(struct bpf_verifier_env *env)
14528 {
14529 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
14530 	struct bpf_verifier_state *state = env->cur_state;
14531 	struct bpf_insn *insns = env->prog->insnsi;
14532 	struct bpf_reg_state *regs;
14533 	int insn_cnt = env->prog->len;
14534 	bool do_print_state = false;
14535 	int prev_insn_idx = -1;
14536 
14537 	for (;;) {
14538 		struct bpf_insn *insn;
14539 		u8 class;
14540 		int err;
14541 
14542 		env->prev_insn_idx = prev_insn_idx;
14543 		if (env->insn_idx >= insn_cnt) {
14544 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
14545 				env->insn_idx, insn_cnt);
14546 			return -EFAULT;
14547 		}
14548 
14549 		insn = &insns[env->insn_idx];
14550 		class = BPF_CLASS(insn->code);
14551 
14552 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
14553 			verbose(env,
14554 				"BPF program is too large. Processed %d insn\n",
14555 				env->insn_processed);
14556 			return -E2BIG;
14557 		}
14558 
14559 		state->last_insn_idx = env->prev_insn_idx;
14560 
14561 		if (is_prune_point(env, env->insn_idx)) {
14562 			err = is_state_visited(env, env->insn_idx);
14563 			if (err < 0)
14564 				return err;
14565 			if (err == 1) {
14566 				/* found equivalent state, can prune the search */
14567 				if (env->log.level & BPF_LOG_LEVEL) {
14568 					if (do_print_state)
14569 						verbose(env, "\nfrom %d to %d%s: safe\n",
14570 							env->prev_insn_idx, env->insn_idx,
14571 							env->cur_state->speculative ?
14572 							" (speculative execution)" : "");
14573 					else
14574 						verbose(env, "%d: safe\n", env->insn_idx);
14575 				}
14576 				goto process_bpf_exit;
14577 			}
14578 		}
14579 
14580 		if (is_jmp_point(env, env->insn_idx)) {
14581 			err = push_jmp_history(env, state);
14582 			if (err)
14583 				return err;
14584 		}
14585 
14586 		if (signal_pending(current))
14587 			return -EAGAIN;
14588 
14589 		if (need_resched())
14590 			cond_resched();
14591 
14592 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
14593 			verbose(env, "\nfrom %d to %d%s:",
14594 				env->prev_insn_idx, env->insn_idx,
14595 				env->cur_state->speculative ?
14596 				" (speculative execution)" : "");
14597 			print_verifier_state(env, state->frame[state->curframe], true);
14598 			do_print_state = false;
14599 		}
14600 
14601 		if (env->log.level & BPF_LOG_LEVEL) {
14602 			const struct bpf_insn_cbs cbs = {
14603 				.cb_call	= disasm_kfunc_name,
14604 				.cb_print	= verbose,
14605 				.private_data	= env,
14606 			};
14607 
14608 			if (verifier_state_scratched(env))
14609 				print_insn_state(env, state->frame[state->curframe]);
14610 
14611 			verbose_linfo(env, env->insn_idx, "; ");
14612 			env->prev_log_len = env->log.len_used;
14613 			verbose(env, "%d: ", env->insn_idx);
14614 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
14615 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
14616 			env->prev_log_len = env->log.len_used;
14617 		}
14618 
14619 		if (bpf_prog_is_offloaded(env->prog->aux)) {
14620 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
14621 							   env->prev_insn_idx);
14622 			if (err)
14623 				return err;
14624 		}
14625 
14626 		regs = cur_regs(env);
14627 		sanitize_mark_insn_seen(env);
14628 		prev_insn_idx = env->insn_idx;
14629 
14630 		if (class == BPF_ALU || class == BPF_ALU64) {
14631 			err = check_alu_op(env, insn);
14632 			if (err)
14633 				return err;
14634 
14635 		} else if (class == BPF_LDX) {
14636 			enum bpf_reg_type *prev_src_type, src_reg_type;
14637 
14638 			/* check for reserved fields is already done */
14639 
14640 			/* check src operand */
14641 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
14642 			if (err)
14643 				return err;
14644 
14645 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14646 			if (err)
14647 				return err;
14648 
14649 			src_reg_type = regs[insn->src_reg].type;
14650 
14651 			/* check that memory (src_reg + off) is readable,
14652 			 * the state of dst_reg will be updated by this func
14653 			 */
14654 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
14655 					       insn->off, BPF_SIZE(insn->code),
14656 					       BPF_READ, insn->dst_reg, false);
14657 			if (err)
14658 				return err;
14659 
14660 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
14661 
14662 			if (*prev_src_type == NOT_INIT) {
14663 				/* saw a valid insn
14664 				 * dst_reg = *(u32 *)(src_reg + off)
14665 				 * save type to validate intersecting paths
14666 				 */
14667 				*prev_src_type = src_reg_type;
14668 
14669 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
14670 				/* ABuser program is trying to use the same insn
14671 				 * dst_reg = *(u32*) (src_reg + off)
14672 				 * with different pointer types:
14673 				 * src_reg == ctx in one branch and
14674 				 * src_reg == stack|map in some other branch.
14675 				 * Reject it.
14676 				 */
14677 				verbose(env, "same insn cannot be used with different pointers\n");
14678 				return -EINVAL;
14679 			}
14680 
14681 		} else if (class == BPF_STX) {
14682 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
14683 
14684 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
14685 				err = check_atomic(env, env->insn_idx, insn);
14686 				if (err)
14687 					return err;
14688 				env->insn_idx++;
14689 				continue;
14690 			}
14691 
14692 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
14693 				verbose(env, "BPF_STX uses reserved fields\n");
14694 				return -EINVAL;
14695 			}
14696 
14697 			/* check src1 operand */
14698 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
14699 			if (err)
14700 				return err;
14701 			/* check src2 operand */
14702 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14703 			if (err)
14704 				return err;
14705 
14706 			dst_reg_type = regs[insn->dst_reg].type;
14707 
14708 			/* check that memory (dst_reg + off) is writeable */
14709 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
14710 					       insn->off, BPF_SIZE(insn->code),
14711 					       BPF_WRITE, insn->src_reg, false);
14712 			if (err)
14713 				return err;
14714 
14715 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
14716 
14717 			if (*prev_dst_type == NOT_INIT) {
14718 				*prev_dst_type = dst_reg_type;
14719 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
14720 				verbose(env, "same insn cannot be used with different pointers\n");
14721 				return -EINVAL;
14722 			}
14723 
14724 		} else if (class == BPF_ST) {
14725 			if (BPF_MODE(insn->code) != BPF_MEM ||
14726 			    insn->src_reg != BPF_REG_0) {
14727 				verbose(env, "BPF_ST uses reserved fields\n");
14728 				return -EINVAL;
14729 			}
14730 			/* check src operand */
14731 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14732 			if (err)
14733 				return err;
14734 
14735 			if (is_ctx_reg(env, insn->dst_reg)) {
14736 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
14737 					insn->dst_reg,
14738 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
14739 				return -EACCES;
14740 			}
14741 
14742 			/* check that memory (dst_reg + off) is writeable */
14743 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
14744 					       insn->off, BPF_SIZE(insn->code),
14745 					       BPF_WRITE, -1, false);
14746 			if (err)
14747 				return err;
14748 
14749 		} else if (class == BPF_JMP || class == BPF_JMP32) {
14750 			u8 opcode = BPF_OP(insn->code);
14751 
14752 			env->jmps_processed++;
14753 			if (opcode == BPF_CALL) {
14754 				if (BPF_SRC(insn->code) != BPF_K ||
14755 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
14756 				     && insn->off != 0) ||
14757 				    (insn->src_reg != BPF_REG_0 &&
14758 				     insn->src_reg != BPF_PSEUDO_CALL &&
14759 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
14760 				    insn->dst_reg != BPF_REG_0 ||
14761 				    class == BPF_JMP32) {
14762 					verbose(env, "BPF_CALL uses reserved fields\n");
14763 					return -EINVAL;
14764 				}
14765 
14766 				if (env->cur_state->active_lock.ptr) {
14767 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
14768 					    (insn->src_reg == BPF_PSEUDO_CALL) ||
14769 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
14770 					     (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
14771 						verbose(env, "function calls are not allowed while holding a lock\n");
14772 						return -EINVAL;
14773 					}
14774 				}
14775 				if (insn->src_reg == BPF_PSEUDO_CALL)
14776 					err = check_func_call(env, insn, &env->insn_idx);
14777 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
14778 					err = check_kfunc_call(env, insn, &env->insn_idx);
14779 				else
14780 					err = check_helper_call(env, insn, &env->insn_idx);
14781 				if (err)
14782 					return err;
14783 			} else if (opcode == BPF_JA) {
14784 				if (BPF_SRC(insn->code) != BPF_K ||
14785 				    insn->imm != 0 ||
14786 				    insn->src_reg != BPF_REG_0 ||
14787 				    insn->dst_reg != BPF_REG_0 ||
14788 				    class == BPF_JMP32) {
14789 					verbose(env, "BPF_JA uses reserved fields\n");
14790 					return -EINVAL;
14791 				}
14792 
14793 				env->insn_idx += insn->off + 1;
14794 				continue;
14795 
14796 			} else if (opcode == BPF_EXIT) {
14797 				if (BPF_SRC(insn->code) != BPF_K ||
14798 				    insn->imm != 0 ||
14799 				    insn->src_reg != BPF_REG_0 ||
14800 				    insn->dst_reg != BPF_REG_0 ||
14801 				    class == BPF_JMP32) {
14802 					verbose(env, "BPF_EXIT uses reserved fields\n");
14803 					return -EINVAL;
14804 				}
14805 
14806 				if (env->cur_state->active_lock.ptr &&
14807 				    !in_rbtree_lock_required_cb(env)) {
14808 					verbose(env, "bpf_spin_unlock is missing\n");
14809 					return -EINVAL;
14810 				}
14811 
14812 				if (env->cur_state->active_rcu_lock) {
14813 					verbose(env, "bpf_rcu_read_unlock is missing\n");
14814 					return -EINVAL;
14815 				}
14816 
14817 				/* We must do check_reference_leak here before
14818 				 * prepare_func_exit to handle the case when
14819 				 * state->curframe > 0, it may be a callback
14820 				 * function, for which reference_state must
14821 				 * match caller reference state when it exits.
14822 				 */
14823 				err = check_reference_leak(env);
14824 				if (err)
14825 					return err;
14826 
14827 				if (state->curframe) {
14828 					/* exit from nested function */
14829 					err = prepare_func_exit(env, &env->insn_idx);
14830 					if (err)
14831 						return err;
14832 					do_print_state = true;
14833 					continue;
14834 				}
14835 
14836 				err = check_return_code(env);
14837 				if (err)
14838 					return err;
14839 process_bpf_exit:
14840 				mark_verifier_state_scratched(env);
14841 				update_branch_counts(env, env->cur_state);
14842 				err = pop_stack(env, &prev_insn_idx,
14843 						&env->insn_idx, pop_log);
14844 				if (err < 0) {
14845 					if (err != -ENOENT)
14846 						return err;
14847 					break;
14848 				} else {
14849 					do_print_state = true;
14850 					continue;
14851 				}
14852 			} else {
14853 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
14854 				if (err)
14855 					return err;
14856 			}
14857 		} else if (class == BPF_LD) {
14858 			u8 mode = BPF_MODE(insn->code);
14859 
14860 			if (mode == BPF_ABS || mode == BPF_IND) {
14861 				err = check_ld_abs(env, insn);
14862 				if (err)
14863 					return err;
14864 
14865 			} else if (mode == BPF_IMM) {
14866 				err = check_ld_imm(env, insn);
14867 				if (err)
14868 					return err;
14869 
14870 				env->insn_idx++;
14871 				sanitize_mark_insn_seen(env);
14872 			} else {
14873 				verbose(env, "invalid BPF_LD mode\n");
14874 				return -EINVAL;
14875 			}
14876 		} else {
14877 			verbose(env, "unknown insn class %d\n", class);
14878 			return -EINVAL;
14879 		}
14880 
14881 		env->insn_idx++;
14882 	}
14883 
14884 	return 0;
14885 }
14886 
14887 static int find_btf_percpu_datasec(struct btf *btf)
14888 {
14889 	const struct btf_type *t;
14890 	const char *tname;
14891 	int i, n;
14892 
14893 	/*
14894 	 * Both vmlinux and module each have their own ".data..percpu"
14895 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
14896 	 * types to look at only module's own BTF types.
14897 	 */
14898 	n = btf_nr_types(btf);
14899 	if (btf_is_module(btf))
14900 		i = btf_nr_types(btf_vmlinux);
14901 	else
14902 		i = 1;
14903 
14904 	for(; i < n; i++) {
14905 		t = btf_type_by_id(btf, i);
14906 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
14907 			continue;
14908 
14909 		tname = btf_name_by_offset(btf, t->name_off);
14910 		if (!strcmp(tname, ".data..percpu"))
14911 			return i;
14912 	}
14913 
14914 	return -ENOENT;
14915 }
14916 
14917 /* replace pseudo btf_id with kernel symbol address */
14918 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
14919 			       struct bpf_insn *insn,
14920 			       struct bpf_insn_aux_data *aux)
14921 {
14922 	const struct btf_var_secinfo *vsi;
14923 	const struct btf_type *datasec;
14924 	struct btf_mod_pair *btf_mod;
14925 	const struct btf_type *t;
14926 	const char *sym_name;
14927 	bool percpu = false;
14928 	u32 type, id = insn->imm;
14929 	struct btf *btf;
14930 	s32 datasec_id;
14931 	u64 addr;
14932 	int i, btf_fd, err;
14933 
14934 	btf_fd = insn[1].imm;
14935 	if (btf_fd) {
14936 		btf = btf_get_by_fd(btf_fd);
14937 		if (IS_ERR(btf)) {
14938 			verbose(env, "invalid module BTF object FD specified.\n");
14939 			return -EINVAL;
14940 		}
14941 	} else {
14942 		if (!btf_vmlinux) {
14943 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
14944 			return -EINVAL;
14945 		}
14946 		btf = btf_vmlinux;
14947 		btf_get(btf);
14948 	}
14949 
14950 	t = btf_type_by_id(btf, id);
14951 	if (!t) {
14952 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
14953 		err = -ENOENT;
14954 		goto err_put;
14955 	}
14956 
14957 	if (!btf_type_is_var(t)) {
14958 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
14959 		err = -EINVAL;
14960 		goto err_put;
14961 	}
14962 
14963 	sym_name = btf_name_by_offset(btf, t->name_off);
14964 	addr = kallsyms_lookup_name(sym_name);
14965 	if (!addr) {
14966 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
14967 			sym_name);
14968 		err = -ENOENT;
14969 		goto err_put;
14970 	}
14971 
14972 	datasec_id = find_btf_percpu_datasec(btf);
14973 	if (datasec_id > 0) {
14974 		datasec = btf_type_by_id(btf, datasec_id);
14975 		for_each_vsi(i, datasec, vsi) {
14976 			if (vsi->type == id) {
14977 				percpu = true;
14978 				break;
14979 			}
14980 		}
14981 	}
14982 
14983 	insn[0].imm = (u32)addr;
14984 	insn[1].imm = addr >> 32;
14985 
14986 	type = t->type;
14987 	t = btf_type_skip_modifiers(btf, type, NULL);
14988 	if (percpu) {
14989 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
14990 		aux->btf_var.btf = btf;
14991 		aux->btf_var.btf_id = type;
14992 	} else if (!btf_type_is_struct(t)) {
14993 		const struct btf_type *ret;
14994 		const char *tname;
14995 		u32 tsize;
14996 
14997 		/* resolve the type size of ksym. */
14998 		ret = btf_resolve_size(btf, t, &tsize);
14999 		if (IS_ERR(ret)) {
15000 			tname = btf_name_by_offset(btf, t->name_off);
15001 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
15002 				tname, PTR_ERR(ret));
15003 			err = -EINVAL;
15004 			goto err_put;
15005 		}
15006 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
15007 		aux->btf_var.mem_size = tsize;
15008 	} else {
15009 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
15010 		aux->btf_var.btf = btf;
15011 		aux->btf_var.btf_id = type;
15012 	}
15013 
15014 	/* check whether we recorded this BTF (and maybe module) already */
15015 	for (i = 0; i < env->used_btf_cnt; i++) {
15016 		if (env->used_btfs[i].btf == btf) {
15017 			btf_put(btf);
15018 			return 0;
15019 		}
15020 	}
15021 
15022 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
15023 		err = -E2BIG;
15024 		goto err_put;
15025 	}
15026 
15027 	btf_mod = &env->used_btfs[env->used_btf_cnt];
15028 	btf_mod->btf = btf;
15029 	btf_mod->module = NULL;
15030 
15031 	/* if we reference variables from kernel module, bump its refcount */
15032 	if (btf_is_module(btf)) {
15033 		btf_mod->module = btf_try_get_module(btf);
15034 		if (!btf_mod->module) {
15035 			err = -ENXIO;
15036 			goto err_put;
15037 		}
15038 	}
15039 
15040 	env->used_btf_cnt++;
15041 
15042 	return 0;
15043 err_put:
15044 	btf_put(btf);
15045 	return err;
15046 }
15047 
15048 static bool is_tracing_prog_type(enum bpf_prog_type type)
15049 {
15050 	switch (type) {
15051 	case BPF_PROG_TYPE_KPROBE:
15052 	case BPF_PROG_TYPE_TRACEPOINT:
15053 	case BPF_PROG_TYPE_PERF_EVENT:
15054 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
15055 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
15056 		return true;
15057 	default:
15058 		return false;
15059 	}
15060 }
15061 
15062 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
15063 					struct bpf_map *map,
15064 					struct bpf_prog *prog)
15065 
15066 {
15067 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
15068 
15069 	if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
15070 	    btf_record_has_field(map->record, BPF_RB_ROOT)) {
15071 		if (is_tracing_prog_type(prog_type)) {
15072 			verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
15073 			return -EINVAL;
15074 		}
15075 	}
15076 
15077 	if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
15078 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
15079 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
15080 			return -EINVAL;
15081 		}
15082 
15083 		if (is_tracing_prog_type(prog_type)) {
15084 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
15085 			return -EINVAL;
15086 		}
15087 
15088 		if (prog->aux->sleepable) {
15089 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
15090 			return -EINVAL;
15091 		}
15092 	}
15093 
15094 	if (btf_record_has_field(map->record, BPF_TIMER)) {
15095 		if (is_tracing_prog_type(prog_type)) {
15096 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
15097 			return -EINVAL;
15098 		}
15099 	}
15100 
15101 	if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
15102 	    !bpf_offload_prog_map_match(prog, map)) {
15103 		verbose(env, "offload device mismatch between prog and map\n");
15104 		return -EINVAL;
15105 	}
15106 
15107 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
15108 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
15109 		return -EINVAL;
15110 	}
15111 
15112 	if (prog->aux->sleepable)
15113 		switch (map->map_type) {
15114 		case BPF_MAP_TYPE_HASH:
15115 		case BPF_MAP_TYPE_LRU_HASH:
15116 		case BPF_MAP_TYPE_ARRAY:
15117 		case BPF_MAP_TYPE_PERCPU_HASH:
15118 		case BPF_MAP_TYPE_PERCPU_ARRAY:
15119 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
15120 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
15121 		case BPF_MAP_TYPE_HASH_OF_MAPS:
15122 		case BPF_MAP_TYPE_RINGBUF:
15123 		case BPF_MAP_TYPE_USER_RINGBUF:
15124 		case BPF_MAP_TYPE_INODE_STORAGE:
15125 		case BPF_MAP_TYPE_SK_STORAGE:
15126 		case BPF_MAP_TYPE_TASK_STORAGE:
15127 		case BPF_MAP_TYPE_CGRP_STORAGE:
15128 			break;
15129 		default:
15130 			verbose(env,
15131 				"Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
15132 			return -EINVAL;
15133 		}
15134 
15135 	return 0;
15136 }
15137 
15138 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
15139 {
15140 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
15141 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
15142 }
15143 
15144 /* find and rewrite pseudo imm in ld_imm64 instructions:
15145  *
15146  * 1. if it accesses map FD, replace it with actual map pointer.
15147  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
15148  *
15149  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
15150  */
15151 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
15152 {
15153 	struct bpf_insn *insn = env->prog->insnsi;
15154 	int insn_cnt = env->prog->len;
15155 	int i, j, err;
15156 
15157 	err = bpf_prog_calc_tag(env->prog);
15158 	if (err)
15159 		return err;
15160 
15161 	for (i = 0; i < insn_cnt; i++, insn++) {
15162 		if (BPF_CLASS(insn->code) == BPF_LDX &&
15163 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
15164 			verbose(env, "BPF_LDX uses reserved fields\n");
15165 			return -EINVAL;
15166 		}
15167 
15168 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
15169 			struct bpf_insn_aux_data *aux;
15170 			struct bpf_map *map;
15171 			struct fd f;
15172 			u64 addr;
15173 			u32 fd;
15174 
15175 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
15176 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
15177 			    insn[1].off != 0) {
15178 				verbose(env, "invalid bpf_ld_imm64 insn\n");
15179 				return -EINVAL;
15180 			}
15181 
15182 			if (insn[0].src_reg == 0)
15183 				/* valid generic load 64-bit imm */
15184 				goto next_insn;
15185 
15186 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
15187 				aux = &env->insn_aux_data[i];
15188 				err = check_pseudo_btf_id(env, insn, aux);
15189 				if (err)
15190 					return err;
15191 				goto next_insn;
15192 			}
15193 
15194 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
15195 				aux = &env->insn_aux_data[i];
15196 				aux->ptr_type = PTR_TO_FUNC;
15197 				goto next_insn;
15198 			}
15199 
15200 			/* In final convert_pseudo_ld_imm64() step, this is
15201 			 * converted into regular 64-bit imm load insn.
15202 			 */
15203 			switch (insn[0].src_reg) {
15204 			case BPF_PSEUDO_MAP_VALUE:
15205 			case BPF_PSEUDO_MAP_IDX_VALUE:
15206 				break;
15207 			case BPF_PSEUDO_MAP_FD:
15208 			case BPF_PSEUDO_MAP_IDX:
15209 				if (insn[1].imm == 0)
15210 					break;
15211 				fallthrough;
15212 			default:
15213 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
15214 				return -EINVAL;
15215 			}
15216 
15217 			switch (insn[0].src_reg) {
15218 			case BPF_PSEUDO_MAP_IDX_VALUE:
15219 			case BPF_PSEUDO_MAP_IDX:
15220 				if (bpfptr_is_null(env->fd_array)) {
15221 					verbose(env, "fd_idx without fd_array is invalid\n");
15222 					return -EPROTO;
15223 				}
15224 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
15225 							    insn[0].imm * sizeof(fd),
15226 							    sizeof(fd)))
15227 					return -EFAULT;
15228 				break;
15229 			default:
15230 				fd = insn[0].imm;
15231 				break;
15232 			}
15233 
15234 			f = fdget(fd);
15235 			map = __bpf_map_get(f);
15236 			if (IS_ERR(map)) {
15237 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
15238 					insn[0].imm);
15239 				return PTR_ERR(map);
15240 			}
15241 
15242 			err = check_map_prog_compatibility(env, map, env->prog);
15243 			if (err) {
15244 				fdput(f);
15245 				return err;
15246 			}
15247 
15248 			aux = &env->insn_aux_data[i];
15249 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
15250 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
15251 				addr = (unsigned long)map;
15252 			} else {
15253 				u32 off = insn[1].imm;
15254 
15255 				if (off >= BPF_MAX_VAR_OFF) {
15256 					verbose(env, "direct value offset of %u is not allowed\n", off);
15257 					fdput(f);
15258 					return -EINVAL;
15259 				}
15260 
15261 				if (!map->ops->map_direct_value_addr) {
15262 					verbose(env, "no direct value access support for this map type\n");
15263 					fdput(f);
15264 					return -EINVAL;
15265 				}
15266 
15267 				err = map->ops->map_direct_value_addr(map, &addr, off);
15268 				if (err) {
15269 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
15270 						map->value_size, off);
15271 					fdput(f);
15272 					return err;
15273 				}
15274 
15275 				aux->map_off = off;
15276 				addr += off;
15277 			}
15278 
15279 			insn[0].imm = (u32)addr;
15280 			insn[1].imm = addr >> 32;
15281 
15282 			/* check whether we recorded this map already */
15283 			for (j = 0; j < env->used_map_cnt; j++) {
15284 				if (env->used_maps[j] == map) {
15285 					aux->map_index = j;
15286 					fdput(f);
15287 					goto next_insn;
15288 				}
15289 			}
15290 
15291 			if (env->used_map_cnt >= MAX_USED_MAPS) {
15292 				fdput(f);
15293 				return -E2BIG;
15294 			}
15295 
15296 			/* hold the map. If the program is rejected by verifier,
15297 			 * the map will be released by release_maps() or it
15298 			 * will be used by the valid program until it's unloaded
15299 			 * and all maps are released in free_used_maps()
15300 			 */
15301 			bpf_map_inc(map);
15302 
15303 			aux->map_index = env->used_map_cnt;
15304 			env->used_maps[env->used_map_cnt++] = map;
15305 
15306 			if (bpf_map_is_cgroup_storage(map) &&
15307 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
15308 				verbose(env, "only one cgroup storage of each type is allowed\n");
15309 				fdput(f);
15310 				return -EBUSY;
15311 			}
15312 
15313 			fdput(f);
15314 next_insn:
15315 			insn++;
15316 			i++;
15317 			continue;
15318 		}
15319 
15320 		/* Basic sanity check before we invest more work here. */
15321 		if (!bpf_opcode_in_insntable(insn->code)) {
15322 			verbose(env, "unknown opcode %02x\n", insn->code);
15323 			return -EINVAL;
15324 		}
15325 	}
15326 
15327 	/* now all pseudo BPF_LD_IMM64 instructions load valid
15328 	 * 'struct bpf_map *' into a register instead of user map_fd.
15329 	 * These pointers will be used later by verifier to validate map access.
15330 	 */
15331 	return 0;
15332 }
15333 
15334 /* drop refcnt of maps used by the rejected program */
15335 static void release_maps(struct bpf_verifier_env *env)
15336 {
15337 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
15338 			     env->used_map_cnt);
15339 }
15340 
15341 /* drop refcnt of maps used by the rejected program */
15342 static void release_btfs(struct bpf_verifier_env *env)
15343 {
15344 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
15345 			     env->used_btf_cnt);
15346 }
15347 
15348 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
15349 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
15350 {
15351 	struct bpf_insn *insn = env->prog->insnsi;
15352 	int insn_cnt = env->prog->len;
15353 	int i;
15354 
15355 	for (i = 0; i < insn_cnt; i++, insn++) {
15356 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
15357 			continue;
15358 		if (insn->src_reg == BPF_PSEUDO_FUNC)
15359 			continue;
15360 		insn->src_reg = 0;
15361 	}
15362 }
15363 
15364 /* single env->prog->insni[off] instruction was replaced with the range
15365  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
15366  * [0, off) and [off, end) to new locations, so the patched range stays zero
15367  */
15368 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
15369 				 struct bpf_insn_aux_data *new_data,
15370 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
15371 {
15372 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
15373 	struct bpf_insn *insn = new_prog->insnsi;
15374 	u32 old_seen = old_data[off].seen;
15375 	u32 prog_len;
15376 	int i;
15377 
15378 	/* aux info at OFF always needs adjustment, no matter fast path
15379 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
15380 	 * original insn at old prog.
15381 	 */
15382 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
15383 
15384 	if (cnt == 1)
15385 		return;
15386 	prog_len = new_prog->len;
15387 
15388 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
15389 	memcpy(new_data + off + cnt - 1, old_data + off,
15390 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
15391 	for (i = off; i < off + cnt - 1; i++) {
15392 		/* Expand insni[off]'s seen count to the patched range. */
15393 		new_data[i].seen = old_seen;
15394 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
15395 	}
15396 	env->insn_aux_data = new_data;
15397 	vfree(old_data);
15398 }
15399 
15400 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
15401 {
15402 	int i;
15403 
15404 	if (len == 1)
15405 		return;
15406 	/* NOTE: fake 'exit' subprog should be updated as well. */
15407 	for (i = 0; i <= env->subprog_cnt; i++) {
15408 		if (env->subprog_info[i].start <= off)
15409 			continue;
15410 		env->subprog_info[i].start += len - 1;
15411 	}
15412 }
15413 
15414 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
15415 {
15416 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
15417 	int i, sz = prog->aux->size_poke_tab;
15418 	struct bpf_jit_poke_descriptor *desc;
15419 
15420 	for (i = 0; i < sz; i++) {
15421 		desc = &tab[i];
15422 		if (desc->insn_idx <= off)
15423 			continue;
15424 		desc->insn_idx += len - 1;
15425 	}
15426 }
15427 
15428 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
15429 					    const struct bpf_insn *patch, u32 len)
15430 {
15431 	struct bpf_prog *new_prog;
15432 	struct bpf_insn_aux_data *new_data = NULL;
15433 
15434 	if (len > 1) {
15435 		new_data = vzalloc(array_size(env->prog->len + len - 1,
15436 					      sizeof(struct bpf_insn_aux_data)));
15437 		if (!new_data)
15438 			return NULL;
15439 	}
15440 
15441 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
15442 	if (IS_ERR(new_prog)) {
15443 		if (PTR_ERR(new_prog) == -ERANGE)
15444 			verbose(env,
15445 				"insn %d cannot be patched due to 16-bit range\n",
15446 				env->insn_aux_data[off].orig_idx);
15447 		vfree(new_data);
15448 		return NULL;
15449 	}
15450 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
15451 	adjust_subprog_starts(env, off, len);
15452 	adjust_poke_descs(new_prog, off, len);
15453 	return new_prog;
15454 }
15455 
15456 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
15457 					      u32 off, u32 cnt)
15458 {
15459 	int i, j;
15460 
15461 	/* find first prog starting at or after off (first to remove) */
15462 	for (i = 0; i < env->subprog_cnt; i++)
15463 		if (env->subprog_info[i].start >= off)
15464 			break;
15465 	/* find first prog starting at or after off + cnt (first to stay) */
15466 	for (j = i; j < env->subprog_cnt; j++)
15467 		if (env->subprog_info[j].start >= off + cnt)
15468 			break;
15469 	/* if j doesn't start exactly at off + cnt, we are just removing
15470 	 * the front of previous prog
15471 	 */
15472 	if (env->subprog_info[j].start != off + cnt)
15473 		j--;
15474 
15475 	if (j > i) {
15476 		struct bpf_prog_aux *aux = env->prog->aux;
15477 		int move;
15478 
15479 		/* move fake 'exit' subprog as well */
15480 		move = env->subprog_cnt + 1 - j;
15481 
15482 		memmove(env->subprog_info + i,
15483 			env->subprog_info + j,
15484 			sizeof(*env->subprog_info) * move);
15485 		env->subprog_cnt -= j - i;
15486 
15487 		/* remove func_info */
15488 		if (aux->func_info) {
15489 			move = aux->func_info_cnt - j;
15490 
15491 			memmove(aux->func_info + i,
15492 				aux->func_info + j,
15493 				sizeof(*aux->func_info) * move);
15494 			aux->func_info_cnt -= j - i;
15495 			/* func_info->insn_off is set after all code rewrites,
15496 			 * in adjust_btf_func() - no need to adjust
15497 			 */
15498 		}
15499 	} else {
15500 		/* convert i from "first prog to remove" to "first to adjust" */
15501 		if (env->subprog_info[i].start == off)
15502 			i++;
15503 	}
15504 
15505 	/* update fake 'exit' subprog as well */
15506 	for (; i <= env->subprog_cnt; i++)
15507 		env->subprog_info[i].start -= cnt;
15508 
15509 	return 0;
15510 }
15511 
15512 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
15513 				      u32 cnt)
15514 {
15515 	struct bpf_prog *prog = env->prog;
15516 	u32 i, l_off, l_cnt, nr_linfo;
15517 	struct bpf_line_info *linfo;
15518 
15519 	nr_linfo = prog->aux->nr_linfo;
15520 	if (!nr_linfo)
15521 		return 0;
15522 
15523 	linfo = prog->aux->linfo;
15524 
15525 	/* find first line info to remove, count lines to be removed */
15526 	for (i = 0; i < nr_linfo; i++)
15527 		if (linfo[i].insn_off >= off)
15528 			break;
15529 
15530 	l_off = i;
15531 	l_cnt = 0;
15532 	for (; i < nr_linfo; i++)
15533 		if (linfo[i].insn_off < off + cnt)
15534 			l_cnt++;
15535 		else
15536 			break;
15537 
15538 	/* First live insn doesn't match first live linfo, it needs to "inherit"
15539 	 * last removed linfo.  prog is already modified, so prog->len == off
15540 	 * means no live instructions after (tail of the program was removed).
15541 	 */
15542 	if (prog->len != off && l_cnt &&
15543 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
15544 		l_cnt--;
15545 		linfo[--i].insn_off = off + cnt;
15546 	}
15547 
15548 	/* remove the line info which refer to the removed instructions */
15549 	if (l_cnt) {
15550 		memmove(linfo + l_off, linfo + i,
15551 			sizeof(*linfo) * (nr_linfo - i));
15552 
15553 		prog->aux->nr_linfo -= l_cnt;
15554 		nr_linfo = prog->aux->nr_linfo;
15555 	}
15556 
15557 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
15558 	for (i = l_off; i < nr_linfo; i++)
15559 		linfo[i].insn_off -= cnt;
15560 
15561 	/* fix up all subprogs (incl. 'exit') which start >= off */
15562 	for (i = 0; i <= env->subprog_cnt; i++)
15563 		if (env->subprog_info[i].linfo_idx > l_off) {
15564 			/* program may have started in the removed region but
15565 			 * may not be fully removed
15566 			 */
15567 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
15568 				env->subprog_info[i].linfo_idx -= l_cnt;
15569 			else
15570 				env->subprog_info[i].linfo_idx = l_off;
15571 		}
15572 
15573 	return 0;
15574 }
15575 
15576 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
15577 {
15578 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15579 	unsigned int orig_prog_len = env->prog->len;
15580 	int err;
15581 
15582 	if (bpf_prog_is_offloaded(env->prog->aux))
15583 		bpf_prog_offload_remove_insns(env, off, cnt);
15584 
15585 	err = bpf_remove_insns(env->prog, off, cnt);
15586 	if (err)
15587 		return err;
15588 
15589 	err = adjust_subprog_starts_after_remove(env, off, cnt);
15590 	if (err)
15591 		return err;
15592 
15593 	err = bpf_adj_linfo_after_remove(env, off, cnt);
15594 	if (err)
15595 		return err;
15596 
15597 	memmove(aux_data + off,	aux_data + off + cnt,
15598 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
15599 
15600 	return 0;
15601 }
15602 
15603 /* The verifier does more data flow analysis than llvm and will not
15604  * explore branches that are dead at run time. Malicious programs can
15605  * have dead code too. Therefore replace all dead at-run-time code
15606  * with 'ja -1'.
15607  *
15608  * Just nops are not optimal, e.g. if they would sit at the end of the
15609  * program and through another bug we would manage to jump there, then
15610  * we'd execute beyond program memory otherwise. Returning exception
15611  * code also wouldn't work since we can have subprogs where the dead
15612  * code could be located.
15613  */
15614 static void sanitize_dead_code(struct bpf_verifier_env *env)
15615 {
15616 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15617 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
15618 	struct bpf_insn *insn = env->prog->insnsi;
15619 	const int insn_cnt = env->prog->len;
15620 	int i;
15621 
15622 	for (i = 0; i < insn_cnt; i++) {
15623 		if (aux_data[i].seen)
15624 			continue;
15625 		memcpy(insn + i, &trap, sizeof(trap));
15626 		aux_data[i].zext_dst = false;
15627 	}
15628 }
15629 
15630 static bool insn_is_cond_jump(u8 code)
15631 {
15632 	u8 op;
15633 
15634 	if (BPF_CLASS(code) == BPF_JMP32)
15635 		return true;
15636 
15637 	if (BPF_CLASS(code) != BPF_JMP)
15638 		return false;
15639 
15640 	op = BPF_OP(code);
15641 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
15642 }
15643 
15644 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
15645 {
15646 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15647 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
15648 	struct bpf_insn *insn = env->prog->insnsi;
15649 	const int insn_cnt = env->prog->len;
15650 	int i;
15651 
15652 	for (i = 0; i < insn_cnt; i++, insn++) {
15653 		if (!insn_is_cond_jump(insn->code))
15654 			continue;
15655 
15656 		if (!aux_data[i + 1].seen)
15657 			ja.off = insn->off;
15658 		else if (!aux_data[i + 1 + insn->off].seen)
15659 			ja.off = 0;
15660 		else
15661 			continue;
15662 
15663 		if (bpf_prog_is_offloaded(env->prog->aux))
15664 			bpf_prog_offload_replace_insn(env, i, &ja);
15665 
15666 		memcpy(insn, &ja, sizeof(ja));
15667 	}
15668 }
15669 
15670 static int opt_remove_dead_code(struct bpf_verifier_env *env)
15671 {
15672 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15673 	int insn_cnt = env->prog->len;
15674 	int i, err;
15675 
15676 	for (i = 0; i < insn_cnt; i++) {
15677 		int j;
15678 
15679 		j = 0;
15680 		while (i + j < insn_cnt && !aux_data[i + j].seen)
15681 			j++;
15682 		if (!j)
15683 			continue;
15684 
15685 		err = verifier_remove_insns(env, i, j);
15686 		if (err)
15687 			return err;
15688 		insn_cnt = env->prog->len;
15689 	}
15690 
15691 	return 0;
15692 }
15693 
15694 static int opt_remove_nops(struct bpf_verifier_env *env)
15695 {
15696 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
15697 	struct bpf_insn *insn = env->prog->insnsi;
15698 	int insn_cnt = env->prog->len;
15699 	int i, err;
15700 
15701 	for (i = 0; i < insn_cnt; i++) {
15702 		if (memcmp(&insn[i], &ja, sizeof(ja)))
15703 			continue;
15704 
15705 		err = verifier_remove_insns(env, i, 1);
15706 		if (err)
15707 			return err;
15708 		insn_cnt--;
15709 		i--;
15710 	}
15711 
15712 	return 0;
15713 }
15714 
15715 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
15716 					 const union bpf_attr *attr)
15717 {
15718 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
15719 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
15720 	int i, patch_len, delta = 0, len = env->prog->len;
15721 	struct bpf_insn *insns = env->prog->insnsi;
15722 	struct bpf_prog *new_prog;
15723 	bool rnd_hi32;
15724 
15725 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
15726 	zext_patch[1] = BPF_ZEXT_REG(0);
15727 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
15728 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
15729 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
15730 	for (i = 0; i < len; i++) {
15731 		int adj_idx = i + delta;
15732 		struct bpf_insn insn;
15733 		int load_reg;
15734 
15735 		insn = insns[adj_idx];
15736 		load_reg = insn_def_regno(&insn);
15737 		if (!aux[adj_idx].zext_dst) {
15738 			u8 code, class;
15739 			u32 imm_rnd;
15740 
15741 			if (!rnd_hi32)
15742 				continue;
15743 
15744 			code = insn.code;
15745 			class = BPF_CLASS(code);
15746 			if (load_reg == -1)
15747 				continue;
15748 
15749 			/* NOTE: arg "reg" (the fourth one) is only used for
15750 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
15751 			 *       here.
15752 			 */
15753 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
15754 				if (class == BPF_LD &&
15755 				    BPF_MODE(code) == BPF_IMM)
15756 					i++;
15757 				continue;
15758 			}
15759 
15760 			/* ctx load could be transformed into wider load. */
15761 			if (class == BPF_LDX &&
15762 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
15763 				continue;
15764 
15765 			imm_rnd = get_random_u32();
15766 			rnd_hi32_patch[0] = insn;
15767 			rnd_hi32_patch[1].imm = imm_rnd;
15768 			rnd_hi32_patch[3].dst_reg = load_reg;
15769 			patch = rnd_hi32_patch;
15770 			patch_len = 4;
15771 			goto apply_patch_buffer;
15772 		}
15773 
15774 		/* Add in an zero-extend instruction if a) the JIT has requested
15775 		 * it or b) it's a CMPXCHG.
15776 		 *
15777 		 * The latter is because: BPF_CMPXCHG always loads a value into
15778 		 * R0, therefore always zero-extends. However some archs'
15779 		 * equivalent instruction only does this load when the
15780 		 * comparison is successful. This detail of CMPXCHG is
15781 		 * orthogonal to the general zero-extension behaviour of the
15782 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
15783 		 */
15784 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
15785 			continue;
15786 
15787 		/* Zero-extension is done by the caller. */
15788 		if (bpf_pseudo_kfunc_call(&insn))
15789 			continue;
15790 
15791 		if (WARN_ON(load_reg == -1)) {
15792 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
15793 			return -EFAULT;
15794 		}
15795 
15796 		zext_patch[0] = insn;
15797 		zext_patch[1].dst_reg = load_reg;
15798 		zext_patch[1].src_reg = load_reg;
15799 		patch = zext_patch;
15800 		patch_len = 2;
15801 apply_patch_buffer:
15802 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
15803 		if (!new_prog)
15804 			return -ENOMEM;
15805 		env->prog = new_prog;
15806 		insns = new_prog->insnsi;
15807 		aux = env->insn_aux_data;
15808 		delta += patch_len - 1;
15809 	}
15810 
15811 	return 0;
15812 }
15813 
15814 /* convert load instructions that access fields of a context type into a
15815  * sequence of instructions that access fields of the underlying structure:
15816  *     struct __sk_buff    -> struct sk_buff
15817  *     struct bpf_sock_ops -> struct sock
15818  */
15819 static int convert_ctx_accesses(struct bpf_verifier_env *env)
15820 {
15821 	const struct bpf_verifier_ops *ops = env->ops;
15822 	int i, cnt, size, ctx_field_size, delta = 0;
15823 	const int insn_cnt = env->prog->len;
15824 	struct bpf_insn insn_buf[16], *insn;
15825 	u32 target_size, size_default, off;
15826 	struct bpf_prog *new_prog;
15827 	enum bpf_access_type type;
15828 	bool is_narrower_load;
15829 
15830 	if (ops->gen_prologue || env->seen_direct_write) {
15831 		if (!ops->gen_prologue) {
15832 			verbose(env, "bpf verifier is misconfigured\n");
15833 			return -EINVAL;
15834 		}
15835 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
15836 					env->prog);
15837 		if (cnt >= ARRAY_SIZE(insn_buf)) {
15838 			verbose(env, "bpf verifier is misconfigured\n");
15839 			return -EINVAL;
15840 		} else if (cnt) {
15841 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
15842 			if (!new_prog)
15843 				return -ENOMEM;
15844 
15845 			env->prog = new_prog;
15846 			delta += cnt - 1;
15847 		}
15848 	}
15849 
15850 	if (bpf_prog_is_offloaded(env->prog->aux))
15851 		return 0;
15852 
15853 	insn = env->prog->insnsi + delta;
15854 
15855 	for (i = 0; i < insn_cnt; i++, insn++) {
15856 		bpf_convert_ctx_access_t convert_ctx_access;
15857 		bool ctx_access;
15858 
15859 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
15860 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
15861 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
15862 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
15863 			type = BPF_READ;
15864 			ctx_access = true;
15865 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
15866 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
15867 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
15868 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
15869 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
15870 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
15871 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
15872 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
15873 			type = BPF_WRITE;
15874 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
15875 		} else {
15876 			continue;
15877 		}
15878 
15879 		if (type == BPF_WRITE &&
15880 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
15881 			struct bpf_insn patch[] = {
15882 				*insn,
15883 				BPF_ST_NOSPEC(),
15884 			};
15885 
15886 			cnt = ARRAY_SIZE(patch);
15887 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
15888 			if (!new_prog)
15889 				return -ENOMEM;
15890 
15891 			delta    += cnt - 1;
15892 			env->prog = new_prog;
15893 			insn      = new_prog->insnsi + i + delta;
15894 			continue;
15895 		}
15896 
15897 		if (!ctx_access)
15898 			continue;
15899 
15900 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
15901 		case PTR_TO_CTX:
15902 			if (!ops->convert_ctx_access)
15903 				continue;
15904 			convert_ctx_access = ops->convert_ctx_access;
15905 			break;
15906 		case PTR_TO_SOCKET:
15907 		case PTR_TO_SOCK_COMMON:
15908 			convert_ctx_access = bpf_sock_convert_ctx_access;
15909 			break;
15910 		case PTR_TO_TCP_SOCK:
15911 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
15912 			break;
15913 		case PTR_TO_XDP_SOCK:
15914 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
15915 			break;
15916 		case PTR_TO_BTF_ID:
15917 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
15918 		/* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
15919 		 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
15920 		 * be said once it is marked PTR_UNTRUSTED, hence we must handle
15921 		 * any faults for loads into such types. BPF_WRITE is disallowed
15922 		 * for this case.
15923 		 */
15924 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
15925 			if (type == BPF_READ) {
15926 				insn->code = BPF_LDX | BPF_PROBE_MEM |
15927 					BPF_SIZE((insn)->code);
15928 				env->prog->aux->num_exentries++;
15929 			}
15930 			continue;
15931 		default:
15932 			continue;
15933 		}
15934 
15935 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
15936 		size = BPF_LDST_BYTES(insn);
15937 
15938 		/* If the read access is a narrower load of the field,
15939 		 * convert to a 4/8-byte load, to minimum program type specific
15940 		 * convert_ctx_access changes. If conversion is successful,
15941 		 * we will apply proper mask to the result.
15942 		 */
15943 		is_narrower_load = size < ctx_field_size;
15944 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
15945 		off = insn->off;
15946 		if (is_narrower_load) {
15947 			u8 size_code;
15948 
15949 			if (type == BPF_WRITE) {
15950 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
15951 				return -EINVAL;
15952 			}
15953 
15954 			size_code = BPF_H;
15955 			if (ctx_field_size == 4)
15956 				size_code = BPF_W;
15957 			else if (ctx_field_size == 8)
15958 				size_code = BPF_DW;
15959 
15960 			insn->off = off & ~(size_default - 1);
15961 			insn->code = BPF_LDX | BPF_MEM | size_code;
15962 		}
15963 
15964 		target_size = 0;
15965 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
15966 					 &target_size);
15967 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
15968 		    (ctx_field_size && !target_size)) {
15969 			verbose(env, "bpf verifier is misconfigured\n");
15970 			return -EINVAL;
15971 		}
15972 
15973 		if (is_narrower_load && size < target_size) {
15974 			u8 shift = bpf_ctx_narrow_access_offset(
15975 				off, size, size_default) * 8;
15976 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
15977 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
15978 				return -EINVAL;
15979 			}
15980 			if (ctx_field_size <= 4) {
15981 				if (shift)
15982 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
15983 									insn->dst_reg,
15984 									shift);
15985 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
15986 								(1 << size * 8) - 1);
15987 			} else {
15988 				if (shift)
15989 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
15990 									insn->dst_reg,
15991 									shift);
15992 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
15993 								(1ULL << size * 8) - 1);
15994 			}
15995 		}
15996 
15997 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15998 		if (!new_prog)
15999 			return -ENOMEM;
16000 
16001 		delta += cnt - 1;
16002 
16003 		/* keep walking new program and skip insns we just inserted */
16004 		env->prog = new_prog;
16005 		insn      = new_prog->insnsi + i + delta;
16006 	}
16007 
16008 	return 0;
16009 }
16010 
16011 static int jit_subprogs(struct bpf_verifier_env *env)
16012 {
16013 	struct bpf_prog *prog = env->prog, **func, *tmp;
16014 	int i, j, subprog_start, subprog_end = 0, len, subprog;
16015 	struct bpf_map *map_ptr;
16016 	struct bpf_insn *insn;
16017 	void *old_bpf_func;
16018 	int err, num_exentries;
16019 
16020 	if (env->subprog_cnt <= 1)
16021 		return 0;
16022 
16023 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
16024 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
16025 			continue;
16026 
16027 		/* Upon error here we cannot fall back to interpreter but
16028 		 * need a hard reject of the program. Thus -EFAULT is
16029 		 * propagated in any case.
16030 		 */
16031 		subprog = find_subprog(env, i + insn->imm + 1);
16032 		if (subprog < 0) {
16033 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
16034 				  i + insn->imm + 1);
16035 			return -EFAULT;
16036 		}
16037 		/* temporarily remember subprog id inside insn instead of
16038 		 * aux_data, since next loop will split up all insns into funcs
16039 		 */
16040 		insn->off = subprog;
16041 		/* remember original imm in case JIT fails and fallback
16042 		 * to interpreter will be needed
16043 		 */
16044 		env->insn_aux_data[i].call_imm = insn->imm;
16045 		/* point imm to __bpf_call_base+1 from JITs point of view */
16046 		insn->imm = 1;
16047 		if (bpf_pseudo_func(insn))
16048 			/* jit (e.g. x86_64) may emit fewer instructions
16049 			 * if it learns a u32 imm is the same as a u64 imm.
16050 			 * Force a non zero here.
16051 			 */
16052 			insn[1].imm = 1;
16053 	}
16054 
16055 	err = bpf_prog_alloc_jited_linfo(prog);
16056 	if (err)
16057 		goto out_undo_insn;
16058 
16059 	err = -ENOMEM;
16060 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
16061 	if (!func)
16062 		goto out_undo_insn;
16063 
16064 	for (i = 0; i < env->subprog_cnt; i++) {
16065 		subprog_start = subprog_end;
16066 		subprog_end = env->subprog_info[i + 1].start;
16067 
16068 		len = subprog_end - subprog_start;
16069 		/* bpf_prog_run() doesn't call subprogs directly,
16070 		 * hence main prog stats include the runtime of subprogs.
16071 		 * subprogs don't have IDs and not reachable via prog_get_next_id
16072 		 * func[i]->stats will never be accessed and stays NULL
16073 		 */
16074 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
16075 		if (!func[i])
16076 			goto out_free;
16077 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
16078 		       len * sizeof(struct bpf_insn));
16079 		func[i]->type = prog->type;
16080 		func[i]->len = len;
16081 		if (bpf_prog_calc_tag(func[i]))
16082 			goto out_free;
16083 		func[i]->is_func = 1;
16084 		func[i]->aux->func_idx = i;
16085 		/* Below members will be freed only at prog->aux */
16086 		func[i]->aux->btf = prog->aux->btf;
16087 		func[i]->aux->func_info = prog->aux->func_info;
16088 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
16089 		func[i]->aux->poke_tab = prog->aux->poke_tab;
16090 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
16091 
16092 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
16093 			struct bpf_jit_poke_descriptor *poke;
16094 
16095 			poke = &prog->aux->poke_tab[j];
16096 			if (poke->insn_idx < subprog_end &&
16097 			    poke->insn_idx >= subprog_start)
16098 				poke->aux = func[i]->aux;
16099 		}
16100 
16101 		func[i]->aux->name[0] = 'F';
16102 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
16103 		func[i]->jit_requested = 1;
16104 		func[i]->blinding_requested = prog->blinding_requested;
16105 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
16106 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
16107 		func[i]->aux->linfo = prog->aux->linfo;
16108 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
16109 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
16110 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
16111 		num_exentries = 0;
16112 		insn = func[i]->insnsi;
16113 		for (j = 0; j < func[i]->len; j++, insn++) {
16114 			if (BPF_CLASS(insn->code) == BPF_LDX &&
16115 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
16116 				num_exentries++;
16117 		}
16118 		func[i]->aux->num_exentries = num_exentries;
16119 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
16120 		func[i] = bpf_int_jit_compile(func[i]);
16121 		if (!func[i]->jited) {
16122 			err = -ENOTSUPP;
16123 			goto out_free;
16124 		}
16125 		cond_resched();
16126 	}
16127 
16128 	/* at this point all bpf functions were successfully JITed
16129 	 * now populate all bpf_calls with correct addresses and
16130 	 * run last pass of JIT
16131 	 */
16132 	for (i = 0; i < env->subprog_cnt; i++) {
16133 		insn = func[i]->insnsi;
16134 		for (j = 0; j < func[i]->len; j++, insn++) {
16135 			if (bpf_pseudo_func(insn)) {
16136 				subprog = insn->off;
16137 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
16138 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
16139 				continue;
16140 			}
16141 			if (!bpf_pseudo_call(insn))
16142 				continue;
16143 			subprog = insn->off;
16144 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
16145 		}
16146 
16147 		/* we use the aux data to keep a list of the start addresses
16148 		 * of the JITed images for each function in the program
16149 		 *
16150 		 * for some architectures, such as powerpc64, the imm field
16151 		 * might not be large enough to hold the offset of the start
16152 		 * address of the callee's JITed image from __bpf_call_base
16153 		 *
16154 		 * in such cases, we can lookup the start address of a callee
16155 		 * by using its subprog id, available from the off field of
16156 		 * the call instruction, as an index for this list
16157 		 */
16158 		func[i]->aux->func = func;
16159 		func[i]->aux->func_cnt = env->subprog_cnt;
16160 	}
16161 	for (i = 0; i < env->subprog_cnt; i++) {
16162 		old_bpf_func = func[i]->bpf_func;
16163 		tmp = bpf_int_jit_compile(func[i]);
16164 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
16165 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
16166 			err = -ENOTSUPP;
16167 			goto out_free;
16168 		}
16169 		cond_resched();
16170 	}
16171 
16172 	/* finally lock prog and jit images for all functions and
16173 	 * populate kallsysm
16174 	 */
16175 	for (i = 0; i < env->subprog_cnt; i++) {
16176 		bpf_prog_lock_ro(func[i]);
16177 		bpf_prog_kallsyms_add(func[i]);
16178 	}
16179 
16180 	/* Last step: make now unused interpreter insns from main
16181 	 * prog consistent for later dump requests, so they can
16182 	 * later look the same as if they were interpreted only.
16183 	 */
16184 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
16185 		if (bpf_pseudo_func(insn)) {
16186 			insn[0].imm = env->insn_aux_data[i].call_imm;
16187 			insn[1].imm = insn->off;
16188 			insn->off = 0;
16189 			continue;
16190 		}
16191 		if (!bpf_pseudo_call(insn))
16192 			continue;
16193 		insn->off = env->insn_aux_data[i].call_imm;
16194 		subprog = find_subprog(env, i + insn->off + 1);
16195 		insn->imm = subprog;
16196 	}
16197 
16198 	prog->jited = 1;
16199 	prog->bpf_func = func[0]->bpf_func;
16200 	prog->jited_len = func[0]->jited_len;
16201 	prog->aux->func = func;
16202 	prog->aux->func_cnt = env->subprog_cnt;
16203 	bpf_prog_jit_attempt_done(prog);
16204 	return 0;
16205 out_free:
16206 	/* We failed JIT'ing, so at this point we need to unregister poke
16207 	 * descriptors from subprogs, so that kernel is not attempting to
16208 	 * patch it anymore as we're freeing the subprog JIT memory.
16209 	 */
16210 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
16211 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
16212 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
16213 	}
16214 	/* At this point we're guaranteed that poke descriptors are not
16215 	 * live anymore. We can just unlink its descriptor table as it's
16216 	 * released with the main prog.
16217 	 */
16218 	for (i = 0; i < env->subprog_cnt; i++) {
16219 		if (!func[i])
16220 			continue;
16221 		func[i]->aux->poke_tab = NULL;
16222 		bpf_jit_free(func[i]);
16223 	}
16224 	kfree(func);
16225 out_undo_insn:
16226 	/* cleanup main prog to be interpreted */
16227 	prog->jit_requested = 0;
16228 	prog->blinding_requested = 0;
16229 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
16230 		if (!bpf_pseudo_call(insn))
16231 			continue;
16232 		insn->off = 0;
16233 		insn->imm = env->insn_aux_data[i].call_imm;
16234 	}
16235 	bpf_prog_jit_attempt_done(prog);
16236 	return err;
16237 }
16238 
16239 static int fixup_call_args(struct bpf_verifier_env *env)
16240 {
16241 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
16242 	struct bpf_prog *prog = env->prog;
16243 	struct bpf_insn *insn = prog->insnsi;
16244 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
16245 	int i, depth;
16246 #endif
16247 	int err = 0;
16248 
16249 	if (env->prog->jit_requested &&
16250 	    !bpf_prog_is_offloaded(env->prog->aux)) {
16251 		err = jit_subprogs(env);
16252 		if (err == 0)
16253 			return 0;
16254 		if (err == -EFAULT)
16255 			return err;
16256 	}
16257 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
16258 	if (has_kfunc_call) {
16259 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
16260 		return -EINVAL;
16261 	}
16262 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
16263 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
16264 		 * have to be rejected, since interpreter doesn't support them yet.
16265 		 */
16266 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
16267 		return -EINVAL;
16268 	}
16269 	for (i = 0; i < prog->len; i++, insn++) {
16270 		if (bpf_pseudo_func(insn)) {
16271 			/* When JIT fails the progs with callback calls
16272 			 * have to be rejected, since interpreter doesn't support them yet.
16273 			 */
16274 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
16275 			return -EINVAL;
16276 		}
16277 
16278 		if (!bpf_pseudo_call(insn))
16279 			continue;
16280 		depth = get_callee_stack_depth(env, insn, i);
16281 		if (depth < 0)
16282 			return depth;
16283 		bpf_patch_call_args(insn, depth);
16284 	}
16285 	err = 0;
16286 #endif
16287 	return err;
16288 }
16289 
16290 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
16291 			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
16292 {
16293 	const struct bpf_kfunc_desc *desc;
16294 	void *xdp_kfunc;
16295 
16296 	if (!insn->imm) {
16297 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
16298 		return -EINVAL;
16299 	}
16300 
16301 	*cnt = 0;
16302 
16303 	if (bpf_dev_bound_kfunc_id(insn->imm)) {
16304 		xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm);
16305 		if (xdp_kfunc) {
16306 			insn->imm = BPF_CALL_IMM(xdp_kfunc);
16307 			return 0;
16308 		}
16309 
16310 		/* fallback to default kfunc when not supported by netdev */
16311 	}
16312 
16313 	/* insn->imm has the btf func_id. Replace it with
16314 	 * an address (relative to __bpf_call_base).
16315 	 */
16316 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
16317 	if (!desc) {
16318 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
16319 			insn->imm);
16320 		return -EFAULT;
16321 	}
16322 
16323 	insn->imm = desc->imm;
16324 	if (insn->off)
16325 		return 0;
16326 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
16327 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
16328 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
16329 		u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
16330 
16331 		insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
16332 		insn_buf[1] = addr[0];
16333 		insn_buf[2] = addr[1];
16334 		insn_buf[3] = *insn;
16335 		*cnt = 4;
16336 	} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
16337 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
16338 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
16339 
16340 		insn_buf[0] = addr[0];
16341 		insn_buf[1] = addr[1];
16342 		insn_buf[2] = *insn;
16343 		*cnt = 3;
16344 	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
16345 		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
16346 		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
16347 		*cnt = 1;
16348 	}
16349 	return 0;
16350 }
16351 
16352 /* Do various post-verification rewrites in a single program pass.
16353  * These rewrites simplify JIT and interpreter implementations.
16354  */
16355 static int do_misc_fixups(struct bpf_verifier_env *env)
16356 {
16357 	struct bpf_prog *prog = env->prog;
16358 	enum bpf_attach_type eatype = prog->expected_attach_type;
16359 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
16360 	struct bpf_insn *insn = prog->insnsi;
16361 	const struct bpf_func_proto *fn;
16362 	const int insn_cnt = prog->len;
16363 	const struct bpf_map_ops *ops;
16364 	struct bpf_insn_aux_data *aux;
16365 	struct bpf_insn insn_buf[16];
16366 	struct bpf_prog *new_prog;
16367 	struct bpf_map *map_ptr;
16368 	int i, ret, cnt, delta = 0;
16369 
16370 	for (i = 0; i < insn_cnt; i++, insn++) {
16371 		/* Make divide-by-zero exceptions impossible. */
16372 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
16373 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
16374 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
16375 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
16376 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
16377 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
16378 			struct bpf_insn *patchlet;
16379 			struct bpf_insn chk_and_div[] = {
16380 				/* [R,W]x div 0 -> 0 */
16381 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
16382 					     BPF_JNE | BPF_K, insn->src_reg,
16383 					     0, 2, 0),
16384 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
16385 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
16386 				*insn,
16387 			};
16388 			struct bpf_insn chk_and_mod[] = {
16389 				/* [R,W]x mod 0 -> [R,W]x */
16390 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
16391 					     BPF_JEQ | BPF_K, insn->src_reg,
16392 					     0, 1 + (is64 ? 0 : 1), 0),
16393 				*insn,
16394 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
16395 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
16396 			};
16397 
16398 			patchlet = isdiv ? chk_and_div : chk_and_mod;
16399 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
16400 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
16401 
16402 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
16403 			if (!new_prog)
16404 				return -ENOMEM;
16405 
16406 			delta    += cnt - 1;
16407 			env->prog = prog = new_prog;
16408 			insn      = new_prog->insnsi + i + delta;
16409 			continue;
16410 		}
16411 
16412 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
16413 		if (BPF_CLASS(insn->code) == BPF_LD &&
16414 		    (BPF_MODE(insn->code) == BPF_ABS ||
16415 		     BPF_MODE(insn->code) == BPF_IND)) {
16416 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
16417 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
16418 				verbose(env, "bpf verifier is misconfigured\n");
16419 				return -EINVAL;
16420 			}
16421 
16422 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16423 			if (!new_prog)
16424 				return -ENOMEM;
16425 
16426 			delta    += cnt - 1;
16427 			env->prog = prog = new_prog;
16428 			insn      = new_prog->insnsi + i + delta;
16429 			continue;
16430 		}
16431 
16432 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
16433 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
16434 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
16435 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
16436 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
16437 			struct bpf_insn *patch = &insn_buf[0];
16438 			bool issrc, isneg, isimm;
16439 			u32 off_reg;
16440 
16441 			aux = &env->insn_aux_data[i + delta];
16442 			if (!aux->alu_state ||
16443 			    aux->alu_state == BPF_ALU_NON_POINTER)
16444 				continue;
16445 
16446 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
16447 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
16448 				BPF_ALU_SANITIZE_SRC;
16449 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
16450 
16451 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
16452 			if (isimm) {
16453 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
16454 			} else {
16455 				if (isneg)
16456 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
16457 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
16458 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
16459 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
16460 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
16461 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
16462 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
16463 			}
16464 			if (!issrc)
16465 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
16466 			insn->src_reg = BPF_REG_AX;
16467 			if (isneg)
16468 				insn->code = insn->code == code_add ?
16469 					     code_sub : code_add;
16470 			*patch++ = *insn;
16471 			if (issrc && isneg && !isimm)
16472 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
16473 			cnt = patch - insn_buf;
16474 
16475 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16476 			if (!new_prog)
16477 				return -ENOMEM;
16478 
16479 			delta    += cnt - 1;
16480 			env->prog = prog = new_prog;
16481 			insn      = new_prog->insnsi + i + delta;
16482 			continue;
16483 		}
16484 
16485 		if (insn->code != (BPF_JMP | BPF_CALL))
16486 			continue;
16487 		if (insn->src_reg == BPF_PSEUDO_CALL)
16488 			continue;
16489 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
16490 			ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
16491 			if (ret)
16492 				return ret;
16493 			if (cnt == 0)
16494 				continue;
16495 
16496 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16497 			if (!new_prog)
16498 				return -ENOMEM;
16499 
16500 			delta	 += cnt - 1;
16501 			env->prog = prog = new_prog;
16502 			insn	  = new_prog->insnsi + i + delta;
16503 			continue;
16504 		}
16505 
16506 		if (insn->imm == BPF_FUNC_get_route_realm)
16507 			prog->dst_needed = 1;
16508 		if (insn->imm == BPF_FUNC_get_prandom_u32)
16509 			bpf_user_rnd_init_once();
16510 		if (insn->imm == BPF_FUNC_override_return)
16511 			prog->kprobe_override = 1;
16512 		if (insn->imm == BPF_FUNC_tail_call) {
16513 			/* If we tail call into other programs, we
16514 			 * cannot make any assumptions since they can
16515 			 * be replaced dynamically during runtime in
16516 			 * the program array.
16517 			 */
16518 			prog->cb_access = 1;
16519 			if (!allow_tail_call_in_subprogs(env))
16520 				prog->aux->stack_depth = MAX_BPF_STACK;
16521 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
16522 
16523 			/* mark bpf_tail_call as different opcode to avoid
16524 			 * conditional branch in the interpreter for every normal
16525 			 * call and to prevent accidental JITing by JIT compiler
16526 			 * that doesn't support bpf_tail_call yet
16527 			 */
16528 			insn->imm = 0;
16529 			insn->code = BPF_JMP | BPF_TAIL_CALL;
16530 
16531 			aux = &env->insn_aux_data[i + delta];
16532 			if (env->bpf_capable && !prog->blinding_requested &&
16533 			    prog->jit_requested &&
16534 			    !bpf_map_key_poisoned(aux) &&
16535 			    !bpf_map_ptr_poisoned(aux) &&
16536 			    !bpf_map_ptr_unpriv(aux)) {
16537 				struct bpf_jit_poke_descriptor desc = {
16538 					.reason = BPF_POKE_REASON_TAIL_CALL,
16539 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
16540 					.tail_call.key = bpf_map_key_immediate(aux),
16541 					.insn_idx = i + delta,
16542 				};
16543 
16544 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
16545 				if (ret < 0) {
16546 					verbose(env, "adding tail call poke descriptor failed\n");
16547 					return ret;
16548 				}
16549 
16550 				insn->imm = ret + 1;
16551 				continue;
16552 			}
16553 
16554 			if (!bpf_map_ptr_unpriv(aux))
16555 				continue;
16556 
16557 			/* instead of changing every JIT dealing with tail_call
16558 			 * emit two extra insns:
16559 			 * if (index >= max_entries) goto out;
16560 			 * index &= array->index_mask;
16561 			 * to avoid out-of-bounds cpu speculation
16562 			 */
16563 			if (bpf_map_ptr_poisoned(aux)) {
16564 				verbose(env, "tail_call abusing map_ptr\n");
16565 				return -EINVAL;
16566 			}
16567 
16568 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
16569 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
16570 						  map_ptr->max_entries, 2);
16571 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
16572 						    container_of(map_ptr,
16573 								 struct bpf_array,
16574 								 map)->index_mask);
16575 			insn_buf[2] = *insn;
16576 			cnt = 3;
16577 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16578 			if (!new_prog)
16579 				return -ENOMEM;
16580 
16581 			delta    += cnt - 1;
16582 			env->prog = prog = new_prog;
16583 			insn      = new_prog->insnsi + i + delta;
16584 			continue;
16585 		}
16586 
16587 		if (insn->imm == BPF_FUNC_timer_set_callback) {
16588 			/* The verifier will process callback_fn as many times as necessary
16589 			 * with different maps and the register states prepared by
16590 			 * set_timer_callback_state will be accurate.
16591 			 *
16592 			 * The following use case is valid:
16593 			 *   map1 is shared by prog1, prog2, prog3.
16594 			 *   prog1 calls bpf_timer_init for some map1 elements
16595 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
16596 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
16597 			 *   prog3 calls bpf_timer_start for some map1 elements.
16598 			 *     Those that were not both bpf_timer_init-ed and
16599 			 *     bpf_timer_set_callback-ed will return -EINVAL.
16600 			 */
16601 			struct bpf_insn ld_addrs[2] = {
16602 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
16603 			};
16604 
16605 			insn_buf[0] = ld_addrs[0];
16606 			insn_buf[1] = ld_addrs[1];
16607 			insn_buf[2] = *insn;
16608 			cnt = 3;
16609 
16610 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16611 			if (!new_prog)
16612 				return -ENOMEM;
16613 
16614 			delta    += cnt - 1;
16615 			env->prog = prog = new_prog;
16616 			insn      = new_prog->insnsi + i + delta;
16617 			goto patch_call_imm;
16618 		}
16619 
16620 		if (is_storage_get_function(insn->imm)) {
16621 			if (!env->prog->aux->sleepable ||
16622 			    env->insn_aux_data[i + delta].storage_get_func_atomic)
16623 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
16624 			else
16625 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
16626 			insn_buf[1] = *insn;
16627 			cnt = 2;
16628 
16629 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16630 			if (!new_prog)
16631 				return -ENOMEM;
16632 
16633 			delta += cnt - 1;
16634 			env->prog = prog = new_prog;
16635 			insn = new_prog->insnsi + i + delta;
16636 			goto patch_call_imm;
16637 		}
16638 
16639 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
16640 		 * and other inlining handlers are currently limited to 64 bit
16641 		 * only.
16642 		 */
16643 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
16644 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
16645 		     insn->imm == BPF_FUNC_map_update_elem ||
16646 		     insn->imm == BPF_FUNC_map_delete_elem ||
16647 		     insn->imm == BPF_FUNC_map_push_elem   ||
16648 		     insn->imm == BPF_FUNC_map_pop_elem    ||
16649 		     insn->imm == BPF_FUNC_map_peek_elem   ||
16650 		     insn->imm == BPF_FUNC_redirect_map    ||
16651 		     insn->imm == BPF_FUNC_for_each_map_elem ||
16652 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
16653 			aux = &env->insn_aux_data[i + delta];
16654 			if (bpf_map_ptr_poisoned(aux))
16655 				goto patch_call_imm;
16656 
16657 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
16658 			ops = map_ptr->ops;
16659 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
16660 			    ops->map_gen_lookup) {
16661 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
16662 				if (cnt == -EOPNOTSUPP)
16663 					goto patch_map_ops_generic;
16664 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
16665 					verbose(env, "bpf verifier is misconfigured\n");
16666 					return -EINVAL;
16667 				}
16668 
16669 				new_prog = bpf_patch_insn_data(env, i + delta,
16670 							       insn_buf, cnt);
16671 				if (!new_prog)
16672 					return -ENOMEM;
16673 
16674 				delta    += cnt - 1;
16675 				env->prog = prog = new_prog;
16676 				insn      = new_prog->insnsi + i + delta;
16677 				continue;
16678 			}
16679 
16680 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
16681 				     (void *(*)(struct bpf_map *map, void *key))NULL));
16682 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
16683 				     (int (*)(struct bpf_map *map, void *key))NULL));
16684 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
16685 				     (int (*)(struct bpf_map *map, void *key, void *value,
16686 					      u64 flags))NULL));
16687 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
16688 				     (int (*)(struct bpf_map *map, void *value,
16689 					      u64 flags))NULL));
16690 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
16691 				     (int (*)(struct bpf_map *map, void *value))NULL));
16692 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
16693 				     (int (*)(struct bpf_map *map, void *value))NULL));
16694 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
16695 				     (int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
16696 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
16697 				     (int (*)(struct bpf_map *map,
16698 					      bpf_callback_t callback_fn,
16699 					      void *callback_ctx,
16700 					      u64 flags))NULL));
16701 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
16702 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
16703 
16704 patch_map_ops_generic:
16705 			switch (insn->imm) {
16706 			case BPF_FUNC_map_lookup_elem:
16707 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
16708 				continue;
16709 			case BPF_FUNC_map_update_elem:
16710 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
16711 				continue;
16712 			case BPF_FUNC_map_delete_elem:
16713 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
16714 				continue;
16715 			case BPF_FUNC_map_push_elem:
16716 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
16717 				continue;
16718 			case BPF_FUNC_map_pop_elem:
16719 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
16720 				continue;
16721 			case BPF_FUNC_map_peek_elem:
16722 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
16723 				continue;
16724 			case BPF_FUNC_redirect_map:
16725 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
16726 				continue;
16727 			case BPF_FUNC_for_each_map_elem:
16728 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
16729 				continue;
16730 			case BPF_FUNC_map_lookup_percpu_elem:
16731 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
16732 				continue;
16733 			}
16734 
16735 			goto patch_call_imm;
16736 		}
16737 
16738 		/* Implement bpf_jiffies64 inline. */
16739 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
16740 		    insn->imm == BPF_FUNC_jiffies64) {
16741 			struct bpf_insn ld_jiffies_addr[2] = {
16742 				BPF_LD_IMM64(BPF_REG_0,
16743 					     (unsigned long)&jiffies),
16744 			};
16745 
16746 			insn_buf[0] = ld_jiffies_addr[0];
16747 			insn_buf[1] = ld_jiffies_addr[1];
16748 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
16749 						  BPF_REG_0, 0);
16750 			cnt = 3;
16751 
16752 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
16753 						       cnt);
16754 			if (!new_prog)
16755 				return -ENOMEM;
16756 
16757 			delta    += cnt - 1;
16758 			env->prog = prog = new_prog;
16759 			insn      = new_prog->insnsi + i + delta;
16760 			continue;
16761 		}
16762 
16763 		/* Implement bpf_get_func_arg inline. */
16764 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16765 		    insn->imm == BPF_FUNC_get_func_arg) {
16766 			/* Load nr_args from ctx - 8 */
16767 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16768 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
16769 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
16770 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
16771 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
16772 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
16773 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
16774 			insn_buf[7] = BPF_JMP_A(1);
16775 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
16776 			cnt = 9;
16777 
16778 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16779 			if (!new_prog)
16780 				return -ENOMEM;
16781 
16782 			delta    += cnt - 1;
16783 			env->prog = prog = new_prog;
16784 			insn      = new_prog->insnsi + i + delta;
16785 			continue;
16786 		}
16787 
16788 		/* Implement bpf_get_func_ret inline. */
16789 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16790 		    insn->imm == BPF_FUNC_get_func_ret) {
16791 			if (eatype == BPF_TRACE_FEXIT ||
16792 			    eatype == BPF_MODIFY_RETURN) {
16793 				/* Load nr_args from ctx - 8 */
16794 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16795 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
16796 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
16797 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
16798 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
16799 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
16800 				cnt = 6;
16801 			} else {
16802 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
16803 				cnt = 1;
16804 			}
16805 
16806 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16807 			if (!new_prog)
16808 				return -ENOMEM;
16809 
16810 			delta    += cnt - 1;
16811 			env->prog = prog = new_prog;
16812 			insn      = new_prog->insnsi + i + delta;
16813 			continue;
16814 		}
16815 
16816 		/* Implement get_func_arg_cnt inline. */
16817 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16818 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
16819 			/* Load nr_args from ctx - 8 */
16820 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16821 
16822 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
16823 			if (!new_prog)
16824 				return -ENOMEM;
16825 
16826 			env->prog = prog = new_prog;
16827 			insn      = new_prog->insnsi + i + delta;
16828 			continue;
16829 		}
16830 
16831 		/* Implement bpf_get_func_ip inline. */
16832 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16833 		    insn->imm == BPF_FUNC_get_func_ip) {
16834 			/* Load IP address from ctx - 16 */
16835 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
16836 
16837 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
16838 			if (!new_prog)
16839 				return -ENOMEM;
16840 
16841 			env->prog = prog = new_prog;
16842 			insn      = new_prog->insnsi + i + delta;
16843 			continue;
16844 		}
16845 
16846 patch_call_imm:
16847 		fn = env->ops->get_func_proto(insn->imm, env->prog);
16848 		/* all functions that have prototype and verifier allowed
16849 		 * programs to call them, must be real in-kernel functions
16850 		 */
16851 		if (!fn->func) {
16852 			verbose(env,
16853 				"kernel subsystem misconfigured func %s#%d\n",
16854 				func_id_name(insn->imm), insn->imm);
16855 			return -EFAULT;
16856 		}
16857 		insn->imm = fn->func - __bpf_call_base;
16858 	}
16859 
16860 	/* Since poke tab is now finalized, publish aux to tracker. */
16861 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
16862 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
16863 		if (!map_ptr->ops->map_poke_track ||
16864 		    !map_ptr->ops->map_poke_untrack ||
16865 		    !map_ptr->ops->map_poke_run) {
16866 			verbose(env, "bpf verifier is misconfigured\n");
16867 			return -EINVAL;
16868 		}
16869 
16870 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
16871 		if (ret < 0) {
16872 			verbose(env, "tracking tail call prog failed\n");
16873 			return ret;
16874 		}
16875 	}
16876 
16877 	sort_kfunc_descs_by_imm(env->prog);
16878 
16879 	return 0;
16880 }
16881 
16882 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
16883 					int position,
16884 					s32 stack_base,
16885 					u32 callback_subprogno,
16886 					u32 *cnt)
16887 {
16888 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
16889 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
16890 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
16891 	int reg_loop_max = BPF_REG_6;
16892 	int reg_loop_cnt = BPF_REG_7;
16893 	int reg_loop_ctx = BPF_REG_8;
16894 
16895 	struct bpf_prog *new_prog;
16896 	u32 callback_start;
16897 	u32 call_insn_offset;
16898 	s32 callback_offset;
16899 
16900 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
16901 	 * be careful to modify this code in sync.
16902 	 */
16903 	struct bpf_insn insn_buf[] = {
16904 		/* Return error and jump to the end of the patch if
16905 		 * expected number of iterations is too big.
16906 		 */
16907 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
16908 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
16909 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
16910 		/* spill R6, R7, R8 to use these as loop vars */
16911 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
16912 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
16913 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
16914 		/* initialize loop vars */
16915 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
16916 		BPF_MOV32_IMM(reg_loop_cnt, 0),
16917 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
16918 		/* loop header,
16919 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
16920 		 */
16921 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
16922 		/* callback call,
16923 		 * correct callback offset would be set after patching
16924 		 */
16925 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
16926 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
16927 		BPF_CALL_REL(0),
16928 		/* increment loop counter */
16929 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
16930 		/* jump to loop header if callback returned 0 */
16931 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
16932 		/* return value of bpf_loop,
16933 		 * set R0 to the number of iterations
16934 		 */
16935 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
16936 		/* restore original values of R6, R7, R8 */
16937 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
16938 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
16939 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
16940 	};
16941 
16942 	*cnt = ARRAY_SIZE(insn_buf);
16943 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
16944 	if (!new_prog)
16945 		return new_prog;
16946 
16947 	/* callback start is known only after patching */
16948 	callback_start = env->subprog_info[callback_subprogno].start;
16949 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
16950 	call_insn_offset = position + 12;
16951 	callback_offset = callback_start - call_insn_offset - 1;
16952 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
16953 
16954 	return new_prog;
16955 }
16956 
16957 static bool is_bpf_loop_call(struct bpf_insn *insn)
16958 {
16959 	return insn->code == (BPF_JMP | BPF_CALL) &&
16960 		insn->src_reg == 0 &&
16961 		insn->imm == BPF_FUNC_loop;
16962 }
16963 
16964 /* For all sub-programs in the program (including main) check
16965  * insn_aux_data to see if there are bpf_loop calls that require
16966  * inlining. If such calls are found the calls are replaced with a
16967  * sequence of instructions produced by `inline_bpf_loop` function and
16968  * subprog stack_depth is increased by the size of 3 registers.
16969  * This stack space is used to spill values of the R6, R7, R8.  These
16970  * registers are used to store the loop bound, counter and context
16971  * variables.
16972  */
16973 static int optimize_bpf_loop(struct bpf_verifier_env *env)
16974 {
16975 	struct bpf_subprog_info *subprogs = env->subprog_info;
16976 	int i, cur_subprog = 0, cnt, delta = 0;
16977 	struct bpf_insn *insn = env->prog->insnsi;
16978 	int insn_cnt = env->prog->len;
16979 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
16980 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
16981 	u16 stack_depth_extra = 0;
16982 
16983 	for (i = 0; i < insn_cnt; i++, insn++) {
16984 		struct bpf_loop_inline_state *inline_state =
16985 			&env->insn_aux_data[i + delta].loop_inline_state;
16986 
16987 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
16988 			struct bpf_prog *new_prog;
16989 
16990 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
16991 			new_prog = inline_bpf_loop(env,
16992 						   i + delta,
16993 						   -(stack_depth + stack_depth_extra),
16994 						   inline_state->callback_subprogno,
16995 						   &cnt);
16996 			if (!new_prog)
16997 				return -ENOMEM;
16998 
16999 			delta     += cnt - 1;
17000 			env->prog  = new_prog;
17001 			insn       = new_prog->insnsi + i + delta;
17002 		}
17003 
17004 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
17005 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
17006 			cur_subprog++;
17007 			stack_depth = subprogs[cur_subprog].stack_depth;
17008 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
17009 			stack_depth_extra = 0;
17010 		}
17011 	}
17012 
17013 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
17014 
17015 	return 0;
17016 }
17017 
17018 static void free_states(struct bpf_verifier_env *env)
17019 {
17020 	struct bpf_verifier_state_list *sl, *sln;
17021 	int i;
17022 
17023 	sl = env->free_list;
17024 	while (sl) {
17025 		sln = sl->next;
17026 		free_verifier_state(&sl->state, false);
17027 		kfree(sl);
17028 		sl = sln;
17029 	}
17030 	env->free_list = NULL;
17031 
17032 	if (!env->explored_states)
17033 		return;
17034 
17035 	for (i = 0; i < state_htab_size(env); i++) {
17036 		sl = env->explored_states[i];
17037 
17038 		while (sl) {
17039 			sln = sl->next;
17040 			free_verifier_state(&sl->state, false);
17041 			kfree(sl);
17042 			sl = sln;
17043 		}
17044 		env->explored_states[i] = NULL;
17045 	}
17046 }
17047 
17048 static int do_check_common(struct bpf_verifier_env *env, int subprog)
17049 {
17050 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
17051 	struct bpf_verifier_state *state;
17052 	struct bpf_reg_state *regs;
17053 	int ret, i;
17054 
17055 	env->prev_linfo = NULL;
17056 	env->pass_cnt++;
17057 
17058 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
17059 	if (!state)
17060 		return -ENOMEM;
17061 	state->curframe = 0;
17062 	state->speculative = false;
17063 	state->branches = 1;
17064 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
17065 	if (!state->frame[0]) {
17066 		kfree(state);
17067 		return -ENOMEM;
17068 	}
17069 	env->cur_state = state;
17070 	init_func_state(env, state->frame[0],
17071 			BPF_MAIN_FUNC /* callsite */,
17072 			0 /* frameno */,
17073 			subprog);
17074 	state->first_insn_idx = env->subprog_info[subprog].start;
17075 	state->last_insn_idx = -1;
17076 
17077 	regs = state->frame[state->curframe]->regs;
17078 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
17079 		ret = btf_prepare_func_args(env, subprog, regs);
17080 		if (ret)
17081 			goto out;
17082 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
17083 			if (regs[i].type == PTR_TO_CTX)
17084 				mark_reg_known_zero(env, regs, i);
17085 			else if (regs[i].type == SCALAR_VALUE)
17086 				mark_reg_unknown(env, regs, i);
17087 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
17088 				const u32 mem_size = regs[i].mem_size;
17089 
17090 				mark_reg_known_zero(env, regs, i);
17091 				regs[i].mem_size = mem_size;
17092 				regs[i].id = ++env->id_gen;
17093 			}
17094 		}
17095 	} else {
17096 		/* 1st arg to a function */
17097 		regs[BPF_REG_1].type = PTR_TO_CTX;
17098 		mark_reg_known_zero(env, regs, BPF_REG_1);
17099 		ret = btf_check_subprog_arg_match(env, subprog, regs);
17100 		if (ret == -EFAULT)
17101 			/* unlikely verifier bug. abort.
17102 			 * ret == 0 and ret < 0 are sadly acceptable for
17103 			 * main() function due to backward compatibility.
17104 			 * Like socket filter program may be written as:
17105 			 * int bpf_prog(struct pt_regs *ctx)
17106 			 * and never dereference that ctx in the program.
17107 			 * 'struct pt_regs' is a type mismatch for socket
17108 			 * filter that should be using 'struct __sk_buff'.
17109 			 */
17110 			goto out;
17111 	}
17112 
17113 	ret = do_check(env);
17114 out:
17115 	/* check for NULL is necessary, since cur_state can be freed inside
17116 	 * do_check() under memory pressure.
17117 	 */
17118 	if (env->cur_state) {
17119 		free_verifier_state(env->cur_state, true);
17120 		env->cur_state = NULL;
17121 	}
17122 	while (!pop_stack(env, NULL, NULL, false));
17123 	if (!ret && pop_log)
17124 		bpf_vlog_reset(&env->log, 0);
17125 	free_states(env);
17126 	return ret;
17127 }
17128 
17129 /* Verify all global functions in a BPF program one by one based on their BTF.
17130  * All global functions must pass verification. Otherwise the whole program is rejected.
17131  * Consider:
17132  * int bar(int);
17133  * int foo(int f)
17134  * {
17135  *    return bar(f);
17136  * }
17137  * int bar(int b)
17138  * {
17139  *    ...
17140  * }
17141  * foo() will be verified first for R1=any_scalar_value. During verification it
17142  * will be assumed that bar() already verified successfully and call to bar()
17143  * from foo() will be checked for type match only. Later bar() will be verified
17144  * independently to check that it's safe for R1=any_scalar_value.
17145  */
17146 static int do_check_subprogs(struct bpf_verifier_env *env)
17147 {
17148 	struct bpf_prog_aux *aux = env->prog->aux;
17149 	int i, ret;
17150 
17151 	if (!aux->func_info)
17152 		return 0;
17153 
17154 	for (i = 1; i < env->subprog_cnt; i++) {
17155 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
17156 			continue;
17157 		env->insn_idx = env->subprog_info[i].start;
17158 		WARN_ON_ONCE(env->insn_idx == 0);
17159 		ret = do_check_common(env, i);
17160 		if (ret) {
17161 			return ret;
17162 		} else if (env->log.level & BPF_LOG_LEVEL) {
17163 			verbose(env,
17164 				"Func#%d is safe for any args that match its prototype\n",
17165 				i);
17166 		}
17167 	}
17168 	return 0;
17169 }
17170 
17171 static int do_check_main(struct bpf_verifier_env *env)
17172 {
17173 	int ret;
17174 
17175 	env->insn_idx = 0;
17176 	ret = do_check_common(env, 0);
17177 	if (!ret)
17178 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
17179 	return ret;
17180 }
17181 
17182 
17183 static void print_verification_stats(struct bpf_verifier_env *env)
17184 {
17185 	int i;
17186 
17187 	if (env->log.level & BPF_LOG_STATS) {
17188 		verbose(env, "verification time %lld usec\n",
17189 			div_u64(env->verification_time, 1000));
17190 		verbose(env, "stack depth ");
17191 		for (i = 0; i < env->subprog_cnt; i++) {
17192 			u32 depth = env->subprog_info[i].stack_depth;
17193 
17194 			verbose(env, "%d", depth);
17195 			if (i + 1 < env->subprog_cnt)
17196 				verbose(env, "+");
17197 		}
17198 		verbose(env, "\n");
17199 	}
17200 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
17201 		"total_states %d peak_states %d mark_read %d\n",
17202 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
17203 		env->max_states_per_insn, env->total_states,
17204 		env->peak_states, env->longest_mark_read_walk);
17205 }
17206 
17207 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
17208 {
17209 	const struct btf_type *t, *func_proto;
17210 	const struct bpf_struct_ops *st_ops;
17211 	const struct btf_member *member;
17212 	struct bpf_prog *prog = env->prog;
17213 	u32 btf_id, member_idx;
17214 	const char *mname;
17215 
17216 	if (!prog->gpl_compatible) {
17217 		verbose(env, "struct ops programs must have a GPL compatible license\n");
17218 		return -EINVAL;
17219 	}
17220 
17221 	btf_id = prog->aux->attach_btf_id;
17222 	st_ops = bpf_struct_ops_find(btf_id);
17223 	if (!st_ops) {
17224 		verbose(env, "attach_btf_id %u is not a supported struct\n",
17225 			btf_id);
17226 		return -ENOTSUPP;
17227 	}
17228 
17229 	t = st_ops->type;
17230 	member_idx = prog->expected_attach_type;
17231 	if (member_idx >= btf_type_vlen(t)) {
17232 		verbose(env, "attach to invalid member idx %u of struct %s\n",
17233 			member_idx, st_ops->name);
17234 		return -EINVAL;
17235 	}
17236 
17237 	member = &btf_type_member(t)[member_idx];
17238 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
17239 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
17240 					       NULL);
17241 	if (!func_proto) {
17242 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
17243 			mname, member_idx, st_ops->name);
17244 		return -EINVAL;
17245 	}
17246 
17247 	if (st_ops->check_member) {
17248 		int err = st_ops->check_member(t, member, prog);
17249 
17250 		if (err) {
17251 			verbose(env, "attach to unsupported member %s of struct %s\n",
17252 				mname, st_ops->name);
17253 			return err;
17254 		}
17255 	}
17256 
17257 	prog->aux->attach_func_proto = func_proto;
17258 	prog->aux->attach_func_name = mname;
17259 	env->ops = st_ops->verifier_ops;
17260 
17261 	return 0;
17262 }
17263 #define SECURITY_PREFIX "security_"
17264 
17265 static int check_attach_modify_return(unsigned long addr, const char *func_name)
17266 {
17267 	if (within_error_injection_list(addr) ||
17268 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
17269 		return 0;
17270 
17271 	return -EINVAL;
17272 }
17273 
17274 /* list of non-sleepable functions that are otherwise on
17275  * ALLOW_ERROR_INJECTION list
17276  */
17277 BTF_SET_START(btf_non_sleepable_error_inject)
17278 /* Three functions below can be called from sleepable and non-sleepable context.
17279  * Assume non-sleepable from bpf safety point of view.
17280  */
17281 BTF_ID(func, __filemap_add_folio)
17282 BTF_ID(func, should_fail_alloc_page)
17283 BTF_ID(func, should_failslab)
17284 BTF_SET_END(btf_non_sleepable_error_inject)
17285 
17286 static int check_non_sleepable_error_inject(u32 btf_id)
17287 {
17288 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
17289 }
17290 
17291 int bpf_check_attach_target(struct bpf_verifier_log *log,
17292 			    const struct bpf_prog *prog,
17293 			    const struct bpf_prog *tgt_prog,
17294 			    u32 btf_id,
17295 			    struct bpf_attach_target_info *tgt_info)
17296 {
17297 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
17298 	const char prefix[] = "btf_trace_";
17299 	int ret = 0, subprog = -1, i;
17300 	const struct btf_type *t;
17301 	bool conservative = true;
17302 	const char *tname;
17303 	struct btf *btf;
17304 	long addr = 0;
17305 
17306 	if (!btf_id) {
17307 		bpf_log(log, "Tracing programs must provide btf_id\n");
17308 		return -EINVAL;
17309 	}
17310 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
17311 	if (!btf) {
17312 		bpf_log(log,
17313 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
17314 		return -EINVAL;
17315 	}
17316 	t = btf_type_by_id(btf, btf_id);
17317 	if (!t) {
17318 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
17319 		return -EINVAL;
17320 	}
17321 	tname = btf_name_by_offset(btf, t->name_off);
17322 	if (!tname) {
17323 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
17324 		return -EINVAL;
17325 	}
17326 	if (tgt_prog) {
17327 		struct bpf_prog_aux *aux = tgt_prog->aux;
17328 
17329 		if (bpf_prog_is_dev_bound(prog->aux) &&
17330 		    !bpf_prog_dev_bound_match(prog, tgt_prog)) {
17331 			bpf_log(log, "Target program bound device mismatch");
17332 			return -EINVAL;
17333 		}
17334 
17335 		for (i = 0; i < aux->func_info_cnt; i++)
17336 			if (aux->func_info[i].type_id == btf_id) {
17337 				subprog = i;
17338 				break;
17339 			}
17340 		if (subprog == -1) {
17341 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
17342 			return -EINVAL;
17343 		}
17344 		conservative = aux->func_info_aux[subprog].unreliable;
17345 		if (prog_extension) {
17346 			if (conservative) {
17347 				bpf_log(log,
17348 					"Cannot replace static functions\n");
17349 				return -EINVAL;
17350 			}
17351 			if (!prog->jit_requested) {
17352 				bpf_log(log,
17353 					"Extension programs should be JITed\n");
17354 				return -EINVAL;
17355 			}
17356 		}
17357 		if (!tgt_prog->jited) {
17358 			bpf_log(log, "Can attach to only JITed progs\n");
17359 			return -EINVAL;
17360 		}
17361 		if (tgt_prog->type == prog->type) {
17362 			/* Cannot fentry/fexit another fentry/fexit program.
17363 			 * Cannot attach program extension to another extension.
17364 			 * It's ok to attach fentry/fexit to extension program.
17365 			 */
17366 			bpf_log(log, "Cannot recursively attach\n");
17367 			return -EINVAL;
17368 		}
17369 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
17370 		    prog_extension &&
17371 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
17372 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
17373 			/* Program extensions can extend all program types
17374 			 * except fentry/fexit. The reason is the following.
17375 			 * The fentry/fexit programs are used for performance
17376 			 * analysis, stats and can be attached to any program
17377 			 * type except themselves. When extension program is
17378 			 * replacing XDP function it is necessary to allow
17379 			 * performance analysis of all functions. Both original
17380 			 * XDP program and its program extension. Hence
17381 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
17382 			 * allowed. If extending of fentry/fexit was allowed it
17383 			 * would be possible to create long call chain
17384 			 * fentry->extension->fentry->extension beyond
17385 			 * reasonable stack size. Hence extending fentry is not
17386 			 * allowed.
17387 			 */
17388 			bpf_log(log, "Cannot extend fentry/fexit\n");
17389 			return -EINVAL;
17390 		}
17391 	} else {
17392 		if (prog_extension) {
17393 			bpf_log(log, "Cannot replace kernel functions\n");
17394 			return -EINVAL;
17395 		}
17396 	}
17397 
17398 	switch (prog->expected_attach_type) {
17399 	case BPF_TRACE_RAW_TP:
17400 		if (tgt_prog) {
17401 			bpf_log(log,
17402 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
17403 			return -EINVAL;
17404 		}
17405 		if (!btf_type_is_typedef(t)) {
17406 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
17407 				btf_id);
17408 			return -EINVAL;
17409 		}
17410 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
17411 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
17412 				btf_id, tname);
17413 			return -EINVAL;
17414 		}
17415 		tname += sizeof(prefix) - 1;
17416 		t = btf_type_by_id(btf, t->type);
17417 		if (!btf_type_is_ptr(t))
17418 			/* should never happen in valid vmlinux build */
17419 			return -EINVAL;
17420 		t = btf_type_by_id(btf, t->type);
17421 		if (!btf_type_is_func_proto(t))
17422 			/* should never happen in valid vmlinux build */
17423 			return -EINVAL;
17424 
17425 		break;
17426 	case BPF_TRACE_ITER:
17427 		if (!btf_type_is_func(t)) {
17428 			bpf_log(log, "attach_btf_id %u is not a function\n",
17429 				btf_id);
17430 			return -EINVAL;
17431 		}
17432 		t = btf_type_by_id(btf, t->type);
17433 		if (!btf_type_is_func_proto(t))
17434 			return -EINVAL;
17435 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
17436 		if (ret)
17437 			return ret;
17438 		break;
17439 	default:
17440 		if (!prog_extension)
17441 			return -EINVAL;
17442 		fallthrough;
17443 	case BPF_MODIFY_RETURN:
17444 	case BPF_LSM_MAC:
17445 	case BPF_LSM_CGROUP:
17446 	case BPF_TRACE_FENTRY:
17447 	case BPF_TRACE_FEXIT:
17448 		if (!btf_type_is_func(t)) {
17449 			bpf_log(log, "attach_btf_id %u is not a function\n",
17450 				btf_id);
17451 			return -EINVAL;
17452 		}
17453 		if (prog_extension &&
17454 		    btf_check_type_match(log, prog, btf, t))
17455 			return -EINVAL;
17456 		t = btf_type_by_id(btf, t->type);
17457 		if (!btf_type_is_func_proto(t))
17458 			return -EINVAL;
17459 
17460 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
17461 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
17462 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
17463 			return -EINVAL;
17464 
17465 		if (tgt_prog && conservative)
17466 			t = NULL;
17467 
17468 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
17469 		if (ret < 0)
17470 			return ret;
17471 
17472 		if (tgt_prog) {
17473 			if (subprog == 0)
17474 				addr = (long) tgt_prog->bpf_func;
17475 			else
17476 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
17477 		} else {
17478 			addr = kallsyms_lookup_name(tname);
17479 			if (!addr) {
17480 				bpf_log(log,
17481 					"The address of function %s cannot be found\n",
17482 					tname);
17483 				return -ENOENT;
17484 			}
17485 		}
17486 
17487 		if (prog->aux->sleepable) {
17488 			ret = -EINVAL;
17489 			switch (prog->type) {
17490 			case BPF_PROG_TYPE_TRACING:
17491 
17492 				/* fentry/fexit/fmod_ret progs can be sleepable if they are
17493 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
17494 				 */
17495 				if (!check_non_sleepable_error_inject(btf_id) &&
17496 				    within_error_injection_list(addr))
17497 					ret = 0;
17498 				/* fentry/fexit/fmod_ret progs can also be sleepable if they are
17499 				 * in the fmodret id set with the KF_SLEEPABLE flag.
17500 				 */
17501 				else {
17502 					u32 *flags = btf_kfunc_is_modify_return(btf, btf_id);
17503 
17504 					if (flags && (*flags & KF_SLEEPABLE))
17505 						ret = 0;
17506 				}
17507 				break;
17508 			case BPF_PROG_TYPE_LSM:
17509 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
17510 				 * Only some of them are sleepable.
17511 				 */
17512 				if (bpf_lsm_is_sleepable_hook(btf_id))
17513 					ret = 0;
17514 				break;
17515 			default:
17516 				break;
17517 			}
17518 			if (ret) {
17519 				bpf_log(log, "%s is not sleepable\n", tname);
17520 				return ret;
17521 			}
17522 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
17523 			if (tgt_prog) {
17524 				bpf_log(log, "can't modify return codes of BPF programs\n");
17525 				return -EINVAL;
17526 			}
17527 			ret = -EINVAL;
17528 			if (btf_kfunc_is_modify_return(btf, btf_id) ||
17529 			    !check_attach_modify_return(addr, tname))
17530 				ret = 0;
17531 			if (ret) {
17532 				bpf_log(log, "%s() is not modifiable\n", tname);
17533 				return ret;
17534 			}
17535 		}
17536 
17537 		break;
17538 	}
17539 	tgt_info->tgt_addr = addr;
17540 	tgt_info->tgt_name = tname;
17541 	tgt_info->tgt_type = t;
17542 	return 0;
17543 }
17544 
17545 BTF_SET_START(btf_id_deny)
17546 BTF_ID_UNUSED
17547 #ifdef CONFIG_SMP
17548 BTF_ID(func, migrate_disable)
17549 BTF_ID(func, migrate_enable)
17550 #endif
17551 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
17552 BTF_ID(func, rcu_read_unlock_strict)
17553 #endif
17554 BTF_SET_END(btf_id_deny)
17555 
17556 static bool can_be_sleepable(struct bpf_prog *prog)
17557 {
17558 	if (prog->type == BPF_PROG_TYPE_TRACING) {
17559 		switch (prog->expected_attach_type) {
17560 		case BPF_TRACE_FENTRY:
17561 		case BPF_TRACE_FEXIT:
17562 		case BPF_MODIFY_RETURN:
17563 		case BPF_TRACE_ITER:
17564 			return true;
17565 		default:
17566 			return false;
17567 		}
17568 	}
17569 	return prog->type == BPF_PROG_TYPE_LSM ||
17570 	       prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
17571 	       prog->type == BPF_PROG_TYPE_STRUCT_OPS;
17572 }
17573 
17574 static int check_attach_btf_id(struct bpf_verifier_env *env)
17575 {
17576 	struct bpf_prog *prog = env->prog;
17577 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
17578 	struct bpf_attach_target_info tgt_info = {};
17579 	u32 btf_id = prog->aux->attach_btf_id;
17580 	struct bpf_trampoline *tr;
17581 	int ret;
17582 	u64 key;
17583 
17584 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
17585 		if (prog->aux->sleepable)
17586 			/* attach_btf_id checked to be zero already */
17587 			return 0;
17588 		verbose(env, "Syscall programs can only be sleepable\n");
17589 		return -EINVAL;
17590 	}
17591 
17592 	if (prog->aux->sleepable && !can_be_sleepable(prog)) {
17593 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
17594 		return -EINVAL;
17595 	}
17596 
17597 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
17598 		return check_struct_ops_btf_id(env);
17599 
17600 	if (prog->type != BPF_PROG_TYPE_TRACING &&
17601 	    prog->type != BPF_PROG_TYPE_LSM &&
17602 	    prog->type != BPF_PROG_TYPE_EXT)
17603 		return 0;
17604 
17605 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
17606 	if (ret)
17607 		return ret;
17608 
17609 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
17610 		/* to make freplace equivalent to their targets, they need to
17611 		 * inherit env->ops and expected_attach_type for the rest of the
17612 		 * verification
17613 		 */
17614 		env->ops = bpf_verifier_ops[tgt_prog->type];
17615 		prog->expected_attach_type = tgt_prog->expected_attach_type;
17616 	}
17617 
17618 	/* store info about the attachment target that will be used later */
17619 	prog->aux->attach_func_proto = tgt_info.tgt_type;
17620 	prog->aux->attach_func_name = tgt_info.tgt_name;
17621 
17622 	if (tgt_prog) {
17623 		prog->aux->saved_dst_prog_type = tgt_prog->type;
17624 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
17625 	}
17626 
17627 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
17628 		prog->aux->attach_btf_trace = true;
17629 		return 0;
17630 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
17631 		if (!bpf_iter_prog_supported(prog))
17632 			return -EINVAL;
17633 		return 0;
17634 	}
17635 
17636 	if (prog->type == BPF_PROG_TYPE_LSM) {
17637 		ret = bpf_lsm_verify_prog(&env->log, prog);
17638 		if (ret < 0)
17639 			return ret;
17640 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
17641 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
17642 		return -EINVAL;
17643 	}
17644 
17645 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
17646 	tr = bpf_trampoline_get(key, &tgt_info);
17647 	if (!tr)
17648 		return -ENOMEM;
17649 
17650 	prog->aux->dst_trampoline = tr;
17651 	return 0;
17652 }
17653 
17654 struct btf *bpf_get_btf_vmlinux(void)
17655 {
17656 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
17657 		mutex_lock(&bpf_verifier_lock);
17658 		if (!btf_vmlinux)
17659 			btf_vmlinux = btf_parse_vmlinux();
17660 		mutex_unlock(&bpf_verifier_lock);
17661 	}
17662 	return btf_vmlinux;
17663 }
17664 
17665 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
17666 {
17667 	u64 start_time = ktime_get_ns();
17668 	struct bpf_verifier_env *env;
17669 	struct bpf_verifier_log *log;
17670 	int i, len, ret = -EINVAL;
17671 	bool is_priv;
17672 
17673 	/* no program is valid */
17674 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
17675 		return -EINVAL;
17676 
17677 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
17678 	 * allocate/free it every time bpf_check() is called
17679 	 */
17680 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
17681 	if (!env)
17682 		return -ENOMEM;
17683 	log = &env->log;
17684 
17685 	len = (*prog)->len;
17686 	env->insn_aux_data =
17687 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
17688 	ret = -ENOMEM;
17689 	if (!env->insn_aux_data)
17690 		goto err_free_env;
17691 	for (i = 0; i < len; i++)
17692 		env->insn_aux_data[i].orig_idx = i;
17693 	env->prog = *prog;
17694 	env->ops = bpf_verifier_ops[env->prog->type];
17695 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
17696 	is_priv = bpf_capable();
17697 
17698 	bpf_get_btf_vmlinux();
17699 
17700 	/* grab the mutex to protect few globals used by verifier */
17701 	if (!is_priv)
17702 		mutex_lock(&bpf_verifier_lock);
17703 
17704 	if (attr->log_level || attr->log_buf || attr->log_size) {
17705 		/* user requested verbose verifier output
17706 		 * and supplied buffer to store the verification trace
17707 		 */
17708 		log->level = attr->log_level;
17709 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
17710 		log->len_total = attr->log_size;
17711 
17712 		/* log attributes have to be sane */
17713 		if (!bpf_verifier_log_attr_valid(log)) {
17714 			ret = -EINVAL;
17715 			goto err_unlock;
17716 		}
17717 	}
17718 
17719 	mark_verifier_state_clean(env);
17720 
17721 	if (IS_ERR(btf_vmlinux)) {
17722 		/* Either gcc or pahole or kernel are broken. */
17723 		verbose(env, "in-kernel BTF is malformed\n");
17724 		ret = PTR_ERR(btf_vmlinux);
17725 		goto skip_full_check;
17726 	}
17727 
17728 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
17729 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
17730 		env->strict_alignment = true;
17731 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
17732 		env->strict_alignment = false;
17733 
17734 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
17735 	env->allow_uninit_stack = bpf_allow_uninit_stack();
17736 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
17737 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
17738 	env->bpf_capable = bpf_capable();
17739 	env->rcu_tag_supported = btf_vmlinux &&
17740 		btf_find_by_name_kind(btf_vmlinux, "rcu", BTF_KIND_TYPE_TAG) > 0;
17741 
17742 	if (is_priv)
17743 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
17744 
17745 	env->explored_states = kvcalloc(state_htab_size(env),
17746 				       sizeof(struct bpf_verifier_state_list *),
17747 				       GFP_USER);
17748 	ret = -ENOMEM;
17749 	if (!env->explored_states)
17750 		goto skip_full_check;
17751 
17752 	ret = add_subprog_and_kfunc(env);
17753 	if (ret < 0)
17754 		goto skip_full_check;
17755 
17756 	ret = check_subprogs(env);
17757 	if (ret < 0)
17758 		goto skip_full_check;
17759 
17760 	ret = check_btf_info(env, attr, uattr);
17761 	if (ret < 0)
17762 		goto skip_full_check;
17763 
17764 	ret = check_attach_btf_id(env);
17765 	if (ret)
17766 		goto skip_full_check;
17767 
17768 	ret = resolve_pseudo_ldimm64(env);
17769 	if (ret < 0)
17770 		goto skip_full_check;
17771 
17772 	if (bpf_prog_is_offloaded(env->prog->aux)) {
17773 		ret = bpf_prog_offload_verifier_prep(env->prog);
17774 		if (ret)
17775 			goto skip_full_check;
17776 	}
17777 
17778 	ret = check_cfg(env);
17779 	if (ret < 0)
17780 		goto skip_full_check;
17781 
17782 	ret = do_check_subprogs(env);
17783 	ret = ret ?: do_check_main(env);
17784 
17785 	if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
17786 		ret = bpf_prog_offload_finalize(env);
17787 
17788 skip_full_check:
17789 	kvfree(env->explored_states);
17790 
17791 	if (ret == 0)
17792 		ret = check_max_stack_depth(env);
17793 
17794 	/* instruction rewrites happen after this point */
17795 	if (ret == 0)
17796 		ret = optimize_bpf_loop(env);
17797 
17798 	if (is_priv) {
17799 		if (ret == 0)
17800 			opt_hard_wire_dead_code_branches(env);
17801 		if (ret == 0)
17802 			ret = opt_remove_dead_code(env);
17803 		if (ret == 0)
17804 			ret = opt_remove_nops(env);
17805 	} else {
17806 		if (ret == 0)
17807 			sanitize_dead_code(env);
17808 	}
17809 
17810 	if (ret == 0)
17811 		/* program is valid, convert *(u32*)(ctx + off) accesses */
17812 		ret = convert_ctx_accesses(env);
17813 
17814 	if (ret == 0)
17815 		ret = do_misc_fixups(env);
17816 
17817 	/* do 32-bit optimization after insn patching has done so those patched
17818 	 * insns could be handled correctly.
17819 	 */
17820 	if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
17821 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
17822 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
17823 								     : false;
17824 	}
17825 
17826 	if (ret == 0)
17827 		ret = fixup_call_args(env);
17828 
17829 	env->verification_time = ktime_get_ns() - start_time;
17830 	print_verification_stats(env);
17831 	env->prog->aux->verified_insns = env->insn_processed;
17832 
17833 	if (log->level && bpf_verifier_log_full(log))
17834 		ret = -ENOSPC;
17835 	if (log->level && !log->ubuf) {
17836 		ret = -EFAULT;
17837 		goto err_release_maps;
17838 	}
17839 
17840 	if (ret)
17841 		goto err_release_maps;
17842 
17843 	if (env->used_map_cnt) {
17844 		/* if program passed verifier, update used_maps in bpf_prog_info */
17845 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
17846 							  sizeof(env->used_maps[0]),
17847 							  GFP_KERNEL);
17848 
17849 		if (!env->prog->aux->used_maps) {
17850 			ret = -ENOMEM;
17851 			goto err_release_maps;
17852 		}
17853 
17854 		memcpy(env->prog->aux->used_maps, env->used_maps,
17855 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
17856 		env->prog->aux->used_map_cnt = env->used_map_cnt;
17857 	}
17858 	if (env->used_btf_cnt) {
17859 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
17860 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
17861 							  sizeof(env->used_btfs[0]),
17862 							  GFP_KERNEL);
17863 		if (!env->prog->aux->used_btfs) {
17864 			ret = -ENOMEM;
17865 			goto err_release_maps;
17866 		}
17867 
17868 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
17869 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
17870 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
17871 	}
17872 	if (env->used_map_cnt || env->used_btf_cnt) {
17873 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
17874 		 * bpf_ld_imm64 instructions
17875 		 */
17876 		convert_pseudo_ld_imm64(env);
17877 	}
17878 
17879 	adjust_btf_func(env);
17880 
17881 err_release_maps:
17882 	if (!env->prog->aux->used_maps)
17883 		/* if we didn't copy map pointers into bpf_prog_info, release
17884 		 * them now. Otherwise free_used_maps() will release them.
17885 		 */
17886 		release_maps(env);
17887 	if (!env->prog->aux->used_btfs)
17888 		release_btfs(env);
17889 
17890 	/* extension progs temporarily inherit the attach_type of their targets
17891 	   for verification purposes, so set it back to zero before returning
17892 	 */
17893 	if (env->prog->type == BPF_PROG_TYPE_EXT)
17894 		env->prog->expected_attach_type = 0;
17895 
17896 	*prog = env->prog;
17897 err_unlock:
17898 	if (!is_priv)
17899 		mutex_unlock(&bpf_verifier_lock);
17900 	vfree(env->insn_aux_data);
17901 err_free_env:
17902 	kfree(env);
17903 	return ret;
17904 }
17905