xref: /openbmc/linux/kernel/bpf/verifier.c (revision be801411)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 
27 #include "disasm.h"
28 
29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
30 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
31 	[_id] = & _name ## _verifier_ops,
32 #define BPF_MAP_TYPE(_id, _ops)
33 #define BPF_LINK_TYPE(_id, _name)
34 #include <linux/bpf_types.h>
35 #undef BPF_PROG_TYPE
36 #undef BPF_MAP_TYPE
37 #undef BPF_LINK_TYPE
38 };
39 
40 /* bpf_check() is a static code analyzer that walks eBPF program
41  * instruction by instruction and updates register/stack state.
42  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
43  *
44  * The first pass is depth-first-search to check that the program is a DAG.
45  * It rejects the following programs:
46  * - larger than BPF_MAXINSNS insns
47  * - if loop is present (detected via back-edge)
48  * - unreachable insns exist (shouldn't be a forest. program = one function)
49  * - out of bounds or malformed jumps
50  * The second pass is all possible path descent from the 1st insn.
51  * Since it's analyzing all paths through the program, the length of the
52  * analysis is limited to 64k insn, which may be hit even if total number of
53  * insn is less then 4K, but there are too many branches that change stack/regs.
54  * Number of 'branches to be analyzed' is limited to 1k
55  *
56  * On entry to each instruction, each register has a type, and the instruction
57  * changes the types of the registers depending on instruction semantics.
58  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
59  * copied to R1.
60  *
61  * All registers are 64-bit.
62  * R0 - return register
63  * R1-R5 argument passing registers
64  * R6-R9 callee saved registers
65  * R10 - frame pointer read-only
66  *
67  * At the start of BPF program the register R1 contains a pointer to bpf_context
68  * and has type PTR_TO_CTX.
69  *
70  * Verifier tracks arithmetic operations on pointers in case:
71  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
72  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
73  * 1st insn copies R10 (which has FRAME_PTR) type into R1
74  * and 2nd arithmetic instruction is pattern matched to recognize
75  * that it wants to construct a pointer to some element within stack.
76  * So after 2nd insn, the register R1 has type PTR_TO_STACK
77  * (and -20 constant is saved for further stack bounds checking).
78  * Meaning that this reg is a pointer to stack plus known immediate constant.
79  *
80  * Most of the time the registers have SCALAR_VALUE type, which
81  * means the register has some value, but it's not a valid pointer.
82  * (like pointer plus pointer becomes SCALAR_VALUE type)
83  *
84  * When verifier sees load or store instructions the type of base register
85  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
86  * four pointer types recognized by check_mem_access() function.
87  *
88  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
89  * and the range of [ptr, ptr + map's value_size) is accessible.
90  *
91  * registers used to pass values to function calls are checked against
92  * function argument constraints.
93  *
94  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
95  * It means that the register type passed to this function must be
96  * PTR_TO_STACK and it will be used inside the function as
97  * 'pointer to map element key'
98  *
99  * For example the argument constraints for bpf_map_lookup_elem():
100  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
101  *   .arg1_type = ARG_CONST_MAP_PTR,
102  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
103  *
104  * ret_type says that this function returns 'pointer to map elem value or null'
105  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
106  * 2nd argument should be a pointer to stack, which will be used inside
107  * the helper function as a pointer to map element key.
108  *
109  * On the kernel side the helper function looks like:
110  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
111  * {
112  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
113  *    void *key = (void *) (unsigned long) r2;
114  *    void *value;
115  *
116  *    here kernel can access 'key' and 'map' pointers safely, knowing that
117  *    [key, key + map->key_size) bytes are valid and were initialized on
118  *    the stack of eBPF program.
119  * }
120  *
121  * Corresponding eBPF program may look like:
122  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
123  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
124  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
125  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
126  * here verifier looks at prototype of map_lookup_elem() and sees:
127  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
128  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129  *
130  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
131  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
132  * and were initialized prior to this call.
133  * If it's ok, then verifier allows this BPF_CALL insn and looks at
134  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
135  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
136  * returns either pointer to map value or NULL.
137  *
138  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
139  * insn, the register holding that pointer in the true branch changes state to
140  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
141  * branch. See check_cond_jmp_op().
142  *
143  * After the call R0 is set to return type of the function and registers R1-R5
144  * are set to NOT_INIT to indicate that they are no longer readable.
145  *
146  * The following reference types represent a potential reference to a kernel
147  * resource which, after first being allocated, must be checked and freed by
148  * the BPF program:
149  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
150  *
151  * When the verifier sees a helper call return a reference type, it allocates a
152  * pointer id for the reference and stores it in the current function state.
153  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
154  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
155  * passes through a NULL-check conditional. For the branch wherein the state is
156  * changed to CONST_IMM, the verifier releases the reference.
157  *
158  * For each helper function that allocates a reference, such as
159  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
160  * bpf_sk_release(). When a reference type passes into the release function,
161  * the verifier also releases the reference. If any unchecked or unreleased
162  * reference remains at the end of the program, the verifier rejects it.
163  */
164 
165 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
166 struct bpf_verifier_stack_elem {
167 	/* verifer state is 'st'
168 	 * before processing instruction 'insn_idx'
169 	 * and after processing instruction 'prev_insn_idx'
170 	 */
171 	struct bpf_verifier_state st;
172 	int insn_idx;
173 	int prev_insn_idx;
174 	struct bpf_verifier_stack_elem *next;
175 	/* length of verifier log at the time this state was pushed on stack */
176 	u32 log_pos;
177 };
178 
179 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
180 #define BPF_COMPLEXITY_LIMIT_STATES	64
181 
182 #define BPF_MAP_KEY_POISON	(1ULL << 63)
183 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
184 
185 #define BPF_MAP_PTR_UNPRIV	1UL
186 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
187 					  POISON_POINTER_DELTA))
188 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189 
190 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
191 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
192 
193 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
194 {
195 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
196 }
197 
198 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
199 {
200 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
201 }
202 
203 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
204 			      const struct bpf_map *map, bool unpriv)
205 {
206 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
207 	unpriv |= bpf_map_ptr_unpriv(aux);
208 	aux->map_ptr_state = (unsigned long)map |
209 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
210 }
211 
212 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
213 {
214 	return aux->map_key_state & BPF_MAP_KEY_POISON;
215 }
216 
217 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
218 {
219 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
220 }
221 
222 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
223 {
224 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
225 }
226 
227 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
228 {
229 	bool poisoned = bpf_map_key_poisoned(aux);
230 
231 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
232 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
233 }
234 
235 static bool bpf_pseudo_call(const struct bpf_insn *insn)
236 {
237 	return insn->code == (BPF_JMP | BPF_CALL) &&
238 	       insn->src_reg == BPF_PSEUDO_CALL;
239 }
240 
241 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
242 {
243 	return insn->code == (BPF_JMP | BPF_CALL) &&
244 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
245 }
246 
247 struct bpf_call_arg_meta {
248 	struct bpf_map *map_ptr;
249 	bool raw_mode;
250 	bool pkt_access;
251 	u8 release_regno;
252 	int regno;
253 	int access_size;
254 	int mem_size;
255 	u64 msize_max_value;
256 	int ref_obj_id;
257 	int map_uid;
258 	int func_id;
259 	struct btf *btf;
260 	u32 btf_id;
261 	struct btf *ret_btf;
262 	u32 ret_btf_id;
263 	u32 subprogno;
264 	struct bpf_map_value_off_desc *kptr_off_desc;
265 	u8 uninit_dynptr_regno;
266 };
267 
268 struct btf *btf_vmlinux;
269 
270 static DEFINE_MUTEX(bpf_verifier_lock);
271 
272 static const struct bpf_line_info *
273 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
274 {
275 	const struct bpf_line_info *linfo;
276 	const struct bpf_prog *prog;
277 	u32 i, nr_linfo;
278 
279 	prog = env->prog;
280 	nr_linfo = prog->aux->nr_linfo;
281 
282 	if (!nr_linfo || insn_off >= prog->len)
283 		return NULL;
284 
285 	linfo = prog->aux->linfo;
286 	for (i = 1; i < nr_linfo; i++)
287 		if (insn_off < linfo[i].insn_off)
288 			break;
289 
290 	return &linfo[i - 1];
291 }
292 
293 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
294 		       va_list args)
295 {
296 	unsigned int n;
297 
298 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
299 
300 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
301 		  "verifier log line truncated - local buffer too short\n");
302 
303 	if (log->level == BPF_LOG_KERNEL) {
304 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
305 
306 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
307 		return;
308 	}
309 
310 	n = min(log->len_total - log->len_used - 1, n);
311 	log->kbuf[n] = '\0';
312 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
313 		log->len_used += n;
314 	else
315 		log->ubuf = NULL;
316 }
317 
318 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
319 {
320 	char zero = 0;
321 
322 	if (!bpf_verifier_log_needed(log))
323 		return;
324 
325 	log->len_used = new_pos;
326 	if (put_user(zero, log->ubuf + new_pos))
327 		log->ubuf = NULL;
328 }
329 
330 /* log_level controls verbosity level of eBPF verifier.
331  * bpf_verifier_log_write() is used to dump the verification trace to the log,
332  * so the user can figure out what's wrong with the program
333  */
334 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
335 					   const char *fmt, ...)
336 {
337 	va_list args;
338 
339 	if (!bpf_verifier_log_needed(&env->log))
340 		return;
341 
342 	va_start(args, fmt);
343 	bpf_verifier_vlog(&env->log, fmt, args);
344 	va_end(args);
345 }
346 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
347 
348 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
349 {
350 	struct bpf_verifier_env *env = private_data;
351 	va_list args;
352 
353 	if (!bpf_verifier_log_needed(&env->log))
354 		return;
355 
356 	va_start(args, fmt);
357 	bpf_verifier_vlog(&env->log, fmt, args);
358 	va_end(args);
359 }
360 
361 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
362 			    const char *fmt, ...)
363 {
364 	va_list args;
365 
366 	if (!bpf_verifier_log_needed(log))
367 		return;
368 
369 	va_start(args, fmt);
370 	bpf_verifier_vlog(log, fmt, args);
371 	va_end(args);
372 }
373 
374 static const char *ltrim(const char *s)
375 {
376 	while (isspace(*s))
377 		s++;
378 
379 	return s;
380 }
381 
382 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
383 					 u32 insn_off,
384 					 const char *prefix_fmt, ...)
385 {
386 	const struct bpf_line_info *linfo;
387 
388 	if (!bpf_verifier_log_needed(&env->log))
389 		return;
390 
391 	linfo = find_linfo(env, insn_off);
392 	if (!linfo || linfo == env->prev_linfo)
393 		return;
394 
395 	if (prefix_fmt) {
396 		va_list args;
397 
398 		va_start(args, prefix_fmt);
399 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
400 		va_end(args);
401 	}
402 
403 	verbose(env, "%s\n",
404 		ltrim(btf_name_by_offset(env->prog->aux->btf,
405 					 linfo->line_off)));
406 
407 	env->prev_linfo = linfo;
408 }
409 
410 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
411 				   struct bpf_reg_state *reg,
412 				   struct tnum *range, const char *ctx,
413 				   const char *reg_name)
414 {
415 	char tn_buf[48];
416 
417 	verbose(env, "At %s the register %s ", ctx, reg_name);
418 	if (!tnum_is_unknown(reg->var_off)) {
419 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
420 		verbose(env, "has value %s", tn_buf);
421 	} else {
422 		verbose(env, "has unknown scalar value");
423 	}
424 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
425 	verbose(env, " should have been in %s\n", tn_buf);
426 }
427 
428 static bool type_is_pkt_pointer(enum bpf_reg_type type)
429 {
430 	return type == PTR_TO_PACKET ||
431 	       type == PTR_TO_PACKET_META;
432 }
433 
434 static bool type_is_sk_pointer(enum bpf_reg_type type)
435 {
436 	return type == PTR_TO_SOCKET ||
437 		type == PTR_TO_SOCK_COMMON ||
438 		type == PTR_TO_TCP_SOCK ||
439 		type == PTR_TO_XDP_SOCK;
440 }
441 
442 static bool reg_type_not_null(enum bpf_reg_type type)
443 {
444 	return type == PTR_TO_SOCKET ||
445 		type == PTR_TO_TCP_SOCK ||
446 		type == PTR_TO_MAP_VALUE ||
447 		type == PTR_TO_MAP_KEY ||
448 		type == PTR_TO_SOCK_COMMON;
449 }
450 
451 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
452 {
453 	return reg->type == PTR_TO_MAP_VALUE &&
454 		map_value_has_spin_lock(reg->map_ptr);
455 }
456 
457 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
458 {
459 	return base_type(type) == PTR_TO_SOCKET ||
460 		base_type(type) == PTR_TO_TCP_SOCK ||
461 		base_type(type) == PTR_TO_MEM ||
462 		base_type(type) == PTR_TO_BTF_ID;
463 }
464 
465 static bool type_is_rdonly_mem(u32 type)
466 {
467 	return type & MEM_RDONLY;
468 }
469 
470 static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
471 {
472 	return type == ARG_PTR_TO_SOCK_COMMON;
473 }
474 
475 static bool type_may_be_null(u32 type)
476 {
477 	return type & PTR_MAYBE_NULL;
478 }
479 
480 static bool may_be_acquire_function(enum bpf_func_id func_id)
481 {
482 	return func_id == BPF_FUNC_sk_lookup_tcp ||
483 		func_id == BPF_FUNC_sk_lookup_udp ||
484 		func_id == BPF_FUNC_skc_lookup_tcp ||
485 		func_id == BPF_FUNC_map_lookup_elem ||
486 	        func_id == BPF_FUNC_ringbuf_reserve;
487 }
488 
489 static bool is_acquire_function(enum bpf_func_id func_id,
490 				const struct bpf_map *map)
491 {
492 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
493 
494 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
495 	    func_id == BPF_FUNC_sk_lookup_udp ||
496 	    func_id == BPF_FUNC_skc_lookup_tcp ||
497 	    func_id == BPF_FUNC_ringbuf_reserve ||
498 	    func_id == BPF_FUNC_kptr_xchg)
499 		return true;
500 
501 	if (func_id == BPF_FUNC_map_lookup_elem &&
502 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
503 	     map_type == BPF_MAP_TYPE_SOCKHASH))
504 		return true;
505 
506 	return false;
507 }
508 
509 static bool is_ptr_cast_function(enum bpf_func_id func_id)
510 {
511 	return func_id == BPF_FUNC_tcp_sock ||
512 		func_id == BPF_FUNC_sk_fullsock ||
513 		func_id == BPF_FUNC_skc_to_tcp_sock ||
514 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
515 		func_id == BPF_FUNC_skc_to_udp6_sock ||
516 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
517 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
518 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
519 }
520 
521 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
522 {
523 	return BPF_CLASS(insn->code) == BPF_STX &&
524 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
525 	       insn->imm == BPF_CMPXCHG;
526 }
527 
528 /* string representation of 'enum bpf_reg_type'
529  *
530  * Note that reg_type_str() can not appear more than once in a single verbose()
531  * statement.
532  */
533 static const char *reg_type_str(struct bpf_verifier_env *env,
534 				enum bpf_reg_type type)
535 {
536 	char postfix[16] = {0}, prefix[32] = {0};
537 	static const char * const str[] = {
538 		[NOT_INIT]		= "?",
539 		[SCALAR_VALUE]		= "scalar",
540 		[PTR_TO_CTX]		= "ctx",
541 		[CONST_PTR_TO_MAP]	= "map_ptr",
542 		[PTR_TO_MAP_VALUE]	= "map_value",
543 		[PTR_TO_STACK]		= "fp",
544 		[PTR_TO_PACKET]		= "pkt",
545 		[PTR_TO_PACKET_META]	= "pkt_meta",
546 		[PTR_TO_PACKET_END]	= "pkt_end",
547 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
548 		[PTR_TO_SOCKET]		= "sock",
549 		[PTR_TO_SOCK_COMMON]	= "sock_common",
550 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
551 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
552 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
553 		[PTR_TO_BTF_ID]		= "ptr_",
554 		[PTR_TO_MEM]		= "mem",
555 		[PTR_TO_BUF]		= "buf",
556 		[PTR_TO_FUNC]		= "func",
557 		[PTR_TO_MAP_KEY]	= "map_key",
558 	};
559 
560 	if (type & PTR_MAYBE_NULL) {
561 		if (base_type(type) == PTR_TO_BTF_ID)
562 			strncpy(postfix, "or_null_", 16);
563 		else
564 			strncpy(postfix, "_or_null", 16);
565 	}
566 
567 	if (type & MEM_RDONLY)
568 		strncpy(prefix, "rdonly_", 32);
569 	if (type & MEM_ALLOC)
570 		strncpy(prefix, "alloc_", 32);
571 	if (type & MEM_USER)
572 		strncpy(prefix, "user_", 32);
573 	if (type & MEM_PERCPU)
574 		strncpy(prefix, "percpu_", 32);
575 	if (type & PTR_UNTRUSTED)
576 		strncpy(prefix, "untrusted_", 32);
577 
578 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
579 		 prefix, str[base_type(type)], postfix);
580 	return env->type_str_buf;
581 }
582 
583 static char slot_type_char[] = {
584 	[STACK_INVALID]	= '?',
585 	[STACK_SPILL]	= 'r',
586 	[STACK_MISC]	= 'm',
587 	[STACK_ZERO]	= '0',
588 	[STACK_DYNPTR]	= 'd',
589 };
590 
591 static void print_liveness(struct bpf_verifier_env *env,
592 			   enum bpf_reg_liveness live)
593 {
594 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
595 	    verbose(env, "_");
596 	if (live & REG_LIVE_READ)
597 		verbose(env, "r");
598 	if (live & REG_LIVE_WRITTEN)
599 		verbose(env, "w");
600 	if (live & REG_LIVE_DONE)
601 		verbose(env, "D");
602 }
603 
604 static int get_spi(s32 off)
605 {
606 	return (-off - 1) / BPF_REG_SIZE;
607 }
608 
609 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
610 {
611 	int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
612 
613 	/* We need to check that slots between [spi - nr_slots + 1, spi] are
614 	 * within [0, allocated_stack).
615 	 *
616 	 * Please note that the spi grows downwards. For example, a dynptr
617 	 * takes the size of two stack slots; the first slot will be at
618 	 * spi and the second slot will be at spi - 1.
619 	 */
620 	return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
621 }
622 
623 static struct bpf_func_state *func(struct bpf_verifier_env *env,
624 				   const struct bpf_reg_state *reg)
625 {
626 	struct bpf_verifier_state *cur = env->cur_state;
627 
628 	return cur->frame[reg->frameno];
629 }
630 
631 static const char *kernel_type_name(const struct btf* btf, u32 id)
632 {
633 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
634 }
635 
636 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
637 {
638 	env->scratched_regs |= 1U << regno;
639 }
640 
641 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
642 {
643 	env->scratched_stack_slots |= 1ULL << spi;
644 }
645 
646 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
647 {
648 	return (env->scratched_regs >> regno) & 1;
649 }
650 
651 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
652 {
653 	return (env->scratched_stack_slots >> regno) & 1;
654 }
655 
656 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
657 {
658 	return env->scratched_regs || env->scratched_stack_slots;
659 }
660 
661 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
662 {
663 	env->scratched_regs = 0U;
664 	env->scratched_stack_slots = 0ULL;
665 }
666 
667 /* Used for printing the entire verifier state. */
668 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
669 {
670 	env->scratched_regs = ~0U;
671 	env->scratched_stack_slots = ~0ULL;
672 }
673 
674 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
675 {
676 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
677 	case DYNPTR_TYPE_LOCAL:
678 		return BPF_DYNPTR_TYPE_LOCAL;
679 	case DYNPTR_TYPE_RINGBUF:
680 		return BPF_DYNPTR_TYPE_RINGBUF;
681 	default:
682 		return BPF_DYNPTR_TYPE_INVALID;
683 	}
684 }
685 
686 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
687 {
688 	return type == BPF_DYNPTR_TYPE_RINGBUF;
689 }
690 
691 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
692 				   enum bpf_arg_type arg_type, int insn_idx)
693 {
694 	struct bpf_func_state *state = func(env, reg);
695 	enum bpf_dynptr_type type;
696 	int spi, i, id;
697 
698 	spi = get_spi(reg->off);
699 
700 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
701 		return -EINVAL;
702 
703 	for (i = 0; i < BPF_REG_SIZE; i++) {
704 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
705 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
706 	}
707 
708 	type = arg_to_dynptr_type(arg_type);
709 	if (type == BPF_DYNPTR_TYPE_INVALID)
710 		return -EINVAL;
711 
712 	state->stack[spi].spilled_ptr.dynptr.first_slot = true;
713 	state->stack[spi].spilled_ptr.dynptr.type = type;
714 	state->stack[spi - 1].spilled_ptr.dynptr.type = type;
715 
716 	if (dynptr_type_refcounted(type)) {
717 		/* The id is used to track proper releasing */
718 		id = acquire_reference_state(env, insn_idx);
719 		if (id < 0)
720 			return id;
721 
722 		state->stack[spi].spilled_ptr.id = id;
723 		state->stack[spi - 1].spilled_ptr.id = id;
724 	}
725 
726 	return 0;
727 }
728 
729 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
730 {
731 	struct bpf_func_state *state = func(env, reg);
732 	int spi, i;
733 
734 	spi = get_spi(reg->off);
735 
736 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
737 		return -EINVAL;
738 
739 	for (i = 0; i < BPF_REG_SIZE; i++) {
740 		state->stack[spi].slot_type[i] = STACK_INVALID;
741 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
742 	}
743 
744 	/* Invalidate any slices associated with this dynptr */
745 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
746 		release_reference(env, state->stack[spi].spilled_ptr.id);
747 		state->stack[spi].spilled_ptr.id = 0;
748 		state->stack[spi - 1].spilled_ptr.id = 0;
749 	}
750 
751 	state->stack[spi].spilled_ptr.dynptr.first_slot = false;
752 	state->stack[spi].spilled_ptr.dynptr.type = 0;
753 	state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
754 
755 	return 0;
756 }
757 
758 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
759 {
760 	struct bpf_func_state *state = func(env, reg);
761 	int spi = get_spi(reg->off);
762 	int i;
763 
764 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
765 		return true;
766 
767 	for (i = 0; i < BPF_REG_SIZE; i++) {
768 		if (state->stack[spi].slot_type[i] == STACK_DYNPTR ||
769 		    state->stack[spi - 1].slot_type[i] == STACK_DYNPTR)
770 			return false;
771 	}
772 
773 	return true;
774 }
775 
776 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
777 				     enum bpf_arg_type arg_type)
778 {
779 	struct bpf_func_state *state = func(env, reg);
780 	int spi = get_spi(reg->off);
781 	int i;
782 
783 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
784 	    !state->stack[spi].spilled_ptr.dynptr.first_slot)
785 		return false;
786 
787 	for (i = 0; i < BPF_REG_SIZE; i++) {
788 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
789 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
790 			return false;
791 	}
792 
793 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
794 	if (arg_type == ARG_PTR_TO_DYNPTR)
795 		return true;
796 
797 	return state->stack[spi].spilled_ptr.dynptr.type == arg_to_dynptr_type(arg_type);
798 }
799 
800 /* The reg state of a pointer or a bounded scalar was saved when
801  * it was spilled to the stack.
802  */
803 static bool is_spilled_reg(const struct bpf_stack_state *stack)
804 {
805 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
806 }
807 
808 static void scrub_spilled_slot(u8 *stype)
809 {
810 	if (*stype != STACK_INVALID)
811 		*stype = STACK_MISC;
812 }
813 
814 static void print_verifier_state(struct bpf_verifier_env *env,
815 				 const struct bpf_func_state *state,
816 				 bool print_all)
817 {
818 	const struct bpf_reg_state *reg;
819 	enum bpf_reg_type t;
820 	int i;
821 
822 	if (state->frameno)
823 		verbose(env, " frame%d:", state->frameno);
824 	for (i = 0; i < MAX_BPF_REG; i++) {
825 		reg = &state->regs[i];
826 		t = reg->type;
827 		if (t == NOT_INIT)
828 			continue;
829 		if (!print_all && !reg_scratched(env, i))
830 			continue;
831 		verbose(env, " R%d", i);
832 		print_liveness(env, reg->live);
833 		verbose(env, "=");
834 		if (t == SCALAR_VALUE && reg->precise)
835 			verbose(env, "P");
836 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
837 		    tnum_is_const(reg->var_off)) {
838 			/* reg->off should be 0 for SCALAR_VALUE */
839 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
840 			verbose(env, "%lld", reg->var_off.value + reg->off);
841 		} else {
842 			const char *sep = "";
843 
844 			verbose(env, "%s", reg_type_str(env, t));
845 			if (base_type(t) == PTR_TO_BTF_ID)
846 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
847 			verbose(env, "(");
848 /*
849  * _a stands for append, was shortened to avoid multiline statements below.
850  * This macro is used to output a comma separated list of attributes.
851  */
852 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
853 
854 			if (reg->id)
855 				verbose_a("id=%d", reg->id);
856 			if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
857 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
858 			if (t != SCALAR_VALUE)
859 				verbose_a("off=%d", reg->off);
860 			if (type_is_pkt_pointer(t))
861 				verbose_a("r=%d", reg->range);
862 			else if (base_type(t) == CONST_PTR_TO_MAP ||
863 				 base_type(t) == PTR_TO_MAP_KEY ||
864 				 base_type(t) == PTR_TO_MAP_VALUE)
865 				verbose_a("ks=%d,vs=%d",
866 					  reg->map_ptr->key_size,
867 					  reg->map_ptr->value_size);
868 			if (tnum_is_const(reg->var_off)) {
869 				/* Typically an immediate SCALAR_VALUE, but
870 				 * could be a pointer whose offset is too big
871 				 * for reg->off
872 				 */
873 				verbose_a("imm=%llx", reg->var_off.value);
874 			} else {
875 				if (reg->smin_value != reg->umin_value &&
876 				    reg->smin_value != S64_MIN)
877 					verbose_a("smin=%lld", (long long)reg->smin_value);
878 				if (reg->smax_value != reg->umax_value &&
879 				    reg->smax_value != S64_MAX)
880 					verbose_a("smax=%lld", (long long)reg->smax_value);
881 				if (reg->umin_value != 0)
882 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
883 				if (reg->umax_value != U64_MAX)
884 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
885 				if (!tnum_is_unknown(reg->var_off)) {
886 					char tn_buf[48];
887 
888 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
889 					verbose_a("var_off=%s", tn_buf);
890 				}
891 				if (reg->s32_min_value != reg->smin_value &&
892 				    reg->s32_min_value != S32_MIN)
893 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
894 				if (reg->s32_max_value != reg->smax_value &&
895 				    reg->s32_max_value != S32_MAX)
896 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
897 				if (reg->u32_min_value != reg->umin_value &&
898 				    reg->u32_min_value != U32_MIN)
899 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
900 				if (reg->u32_max_value != reg->umax_value &&
901 				    reg->u32_max_value != U32_MAX)
902 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
903 			}
904 #undef verbose_a
905 
906 			verbose(env, ")");
907 		}
908 	}
909 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
910 		char types_buf[BPF_REG_SIZE + 1];
911 		bool valid = false;
912 		int j;
913 
914 		for (j = 0; j < BPF_REG_SIZE; j++) {
915 			if (state->stack[i].slot_type[j] != STACK_INVALID)
916 				valid = true;
917 			types_buf[j] = slot_type_char[
918 					state->stack[i].slot_type[j]];
919 		}
920 		types_buf[BPF_REG_SIZE] = 0;
921 		if (!valid)
922 			continue;
923 		if (!print_all && !stack_slot_scratched(env, i))
924 			continue;
925 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
926 		print_liveness(env, state->stack[i].spilled_ptr.live);
927 		if (is_spilled_reg(&state->stack[i])) {
928 			reg = &state->stack[i].spilled_ptr;
929 			t = reg->type;
930 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
931 			if (t == SCALAR_VALUE && reg->precise)
932 				verbose(env, "P");
933 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
934 				verbose(env, "%lld", reg->var_off.value + reg->off);
935 		} else {
936 			verbose(env, "=%s", types_buf);
937 		}
938 	}
939 	if (state->acquired_refs && state->refs[0].id) {
940 		verbose(env, " refs=%d", state->refs[0].id);
941 		for (i = 1; i < state->acquired_refs; i++)
942 			if (state->refs[i].id)
943 				verbose(env, ",%d", state->refs[i].id);
944 	}
945 	if (state->in_callback_fn)
946 		verbose(env, " cb");
947 	if (state->in_async_callback_fn)
948 		verbose(env, " async_cb");
949 	verbose(env, "\n");
950 	mark_verifier_state_clean(env);
951 }
952 
953 static inline u32 vlog_alignment(u32 pos)
954 {
955 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
956 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
957 }
958 
959 static void print_insn_state(struct bpf_verifier_env *env,
960 			     const struct bpf_func_state *state)
961 {
962 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
963 		/* remove new line character */
964 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
965 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
966 	} else {
967 		verbose(env, "%d:", env->insn_idx);
968 	}
969 	print_verifier_state(env, state, false);
970 }
971 
972 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
973  * small to hold src. This is different from krealloc since we don't want to preserve
974  * the contents of dst.
975  *
976  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
977  * not be allocated.
978  */
979 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
980 {
981 	size_t bytes;
982 
983 	if (ZERO_OR_NULL_PTR(src))
984 		goto out;
985 
986 	if (unlikely(check_mul_overflow(n, size, &bytes)))
987 		return NULL;
988 
989 	if (ksize(dst) < bytes) {
990 		kfree(dst);
991 		dst = kmalloc_track_caller(bytes, flags);
992 		if (!dst)
993 			return NULL;
994 	}
995 
996 	memcpy(dst, src, bytes);
997 out:
998 	return dst ? dst : ZERO_SIZE_PTR;
999 }
1000 
1001 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1002  * small to hold new_n items. new items are zeroed out if the array grows.
1003  *
1004  * Contrary to krealloc_array, does not free arr if new_n is zero.
1005  */
1006 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1007 {
1008 	if (!new_n || old_n == new_n)
1009 		goto out;
1010 
1011 	arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
1012 	if (!arr)
1013 		return NULL;
1014 
1015 	if (new_n > old_n)
1016 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1017 
1018 out:
1019 	return arr ? arr : ZERO_SIZE_PTR;
1020 }
1021 
1022 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1023 {
1024 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1025 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1026 	if (!dst->refs)
1027 		return -ENOMEM;
1028 
1029 	dst->acquired_refs = src->acquired_refs;
1030 	return 0;
1031 }
1032 
1033 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1034 {
1035 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1036 
1037 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1038 				GFP_KERNEL);
1039 	if (!dst->stack)
1040 		return -ENOMEM;
1041 
1042 	dst->allocated_stack = src->allocated_stack;
1043 	return 0;
1044 }
1045 
1046 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1047 {
1048 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1049 				    sizeof(struct bpf_reference_state));
1050 	if (!state->refs)
1051 		return -ENOMEM;
1052 
1053 	state->acquired_refs = n;
1054 	return 0;
1055 }
1056 
1057 static int grow_stack_state(struct bpf_func_state *state, int size)
1058 {
1059 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1060 
1061 	if (old_n >= n)
1062 		return 0;
1063 
1064 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1065 	if (!state->stack)
1066 		return -ENOMEM;
1067 
1068 	state->allocated_stack = size;
1069 	return 0;
1070 }
1071 
1072 /* Acquire a pointer id from the env and update the state->refs to include
1073  * this new pointer reference.
1074  * On success, returns a valid pointer id to associate with the register
1075  * On failure, returns a negative errno.
1076  */
1077 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1078 {
1079 	struct bpf_func_state *state = cur_func(env);
1080 	int new_ofs = state->acquired_refs;
1081 	int id, err;
1082 
1083 	err = resize_reference_state(state, state->acquired_refs + 1);
1084 	if (err)
1085 		return err;
1086 	id = ++env->id_gen;
1087 	state->refs[new_ofs].id = id;
1088 	state->refs[new_ofs].insn_idx = insn_idx;
1089 
1090 	return id;
1091 }
1092 
1093 /* release function corresponding to acquire_reference_state(). Idempotent. */
1094 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1095 {
1096 	int i, last_idx;
1097 
1098 	last_idx = state->acquired_refs - 1;
1099 	for (i = 0; i < state->acquired_refs; i++) {
1100 		if (state->refs[i].id == ptr_id) {
1101 			if (last_idx && i != last_idx)
1102 				memcpy(&state->refs[i], &state->refs[last_idx],
1103 				       sizeof(*state->refs));
1104 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1105 			state->acquired_refs--;
1106 			return 0;
1107 		}
1108 	}
1109 	return -EINVAL;
1110 }
1111 
1112 static void free_func_state(struct bpf_func_state *state)
1113 {
1114 	if (!state)
1115 		return;
1116 	kfree(state->refs);
1117 	kfree(state->stack);
1118 	kfree(state);
1119 }
1120 
1121 static void clear_jmp_history(struct bpf_verifier_state *state)
1122 {
1123 	kfree(state->jmp_history);
1124 	state->jmp_history = NULL;
1125 	state->jmp_history_cnt = 0;
1126 }
1127 
1128 static void free_verifier_state(struct bpf_verifier_state *state,
1129 				bool free_self)
1130 {
1131 	int i;
1132 
1133 	for (i = 0; i <= state->curframe; i++) {
1134 		free_func_state(state->frame[i]);
1135 		state->frame[i] = NULL;
1136 	}
1137 	clear_jmp_history(state);
1138 	if (free_self)
1139 		kfree(state);
1140 }
1141 
1142 /* copy verifier state from src to dst growing dst stack space
1143  * when necessary to accommodate larger src stack
1144  */
1145 static int copy_func_state(struct bpf_func_state *dst,
1146 			   const struct bpf_func_state *src)
1147 {
1148 	int err;
1149 
1150 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1151 	err = copy_reference_state(dst, src);
1152 	if (err)
1153 		return err;
1154 	return copy_stack_state(dst, src);
1155 }
1156 
1157 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1158 			       const struct bpf_verifier_state *src)
1159 {
1160 	struct bpf_func_state *dst;
1161 	int i, err;
1162 
1163 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1164 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1165 					    GFP_USER);
1166 	if (!dst_state->jmp_history)
1167 		return -ENOMEM;
1168 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1169 
1170 	/* if dst has more stack frames then src frame, free them */
1171 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1172 		free_func_state(dst_state->frame[i]);
1173 		dst_state->frame[i] = NULL;
1174 	}
1175 	dst_state->speculative = src->speculative;
1176 	dst_state->curframe = src->curframe;
1177 	dst_state->active_spin_lock = src->active_spin_lock;
1178 	dst_state->branches = src->branches;
1179 	dst_state->parent = src->parent;
1180 	dst_state->first_insn_idx = src->first_insn_idx;
1181 	dst_state->last_insn_idx = src->last_insn_idx;
1182 	for (i = 0; i <= src->curframe; i++) {
1183 		dst = dst_state->frame[i];
1184 		if (!dst) {
1185 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1186 			if (!dst)
1187 				return -ENOMEM;
1188 			dst_state->frame[i] = dst;
1189 		}
1190 		err = copy_func_state(dst, src->frame[i]);
1191 		if (err)
1192 			return err;
1193 	}
1194 	return 0;
1195 }
1196 
1197 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1198 {
1199 	while (st) {
1200 		u32 br = --st->branches;
1201 
1202 		/* WARN_ON(br > 1) technically makes sense here,
1203 		 * but see comment in push_stack(), hence:
1204 		 */
1205 		WARN_ONCE((int)br < 0,
1206 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1207 			  br);
1208 		if (br)
1209 			break;
1210 		st = st->parent;
1211 	}
1212 }
1213 
1214 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1215 		     int *insn_idx, bool pop_log)
1216 {
1217 	struct bpf_verifier_state *cur = env->cur_state;
1218 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1219 	int err;
1220 
1221 	if (env->head == NULL)
1222 		return -ENOENT;
1223 
1224 	if (cur) {
1225 		err = copy_verifier_state(cur, &head->st);
1226 		if (err)
1227 			return err;
1228 	}
1229 	if (pop_log)
1230 		bpf_vlog_reset(&env->log, head->log_pos);
1231 	if (insn_idx)
1232 		*insn_idx = head->insn_idx;
1233 	if (prev_insn_idx)
1234 		*prev_insn_idx = head->prev_insn_idx;
1235 	elem = head->next;
1236 	free_verifier_state(&head->st, false);
1237 	kfree(head);
1238 	env->head = elem;
1239 	env->stack_size--;
1240 	return 0;
1241 }
1242 
1243 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1244 					     int insn_idx, int prev_insn_idx,
1245 					     bool speculative)
1246 {
1247 	struct bpf_verifier_state *cur = env->cur_state;
1248 	struct bpf_verifier_stack_elem *elem;
1249 	int err;
1250 
1251 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1252 	if (!elem)
1253 		goto err;
1254 
1255 	elem->insn_idx = insn_idx;
1256 	elem->prev_insn_idx = prev_insn_idx;
1257 	elem->next = env->head;
1258 	elem->log_pos = env->log.len_used;
1259 	env->head = elem;
1260 	env->stack_size++;
1261 	err = copy_verifier_state(&elem->st, cur);
1262 	if (err)
1263 		goto err;
1264 	elem->st.speculative |= speculative;
1265 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1266 		verbose(env, "The sequence of %d jumps is too complex.\n",
1267 			env->stack_size);
1268 		goto err;
1269 	}
1270 	if (elem->st.parent) {
1271 		++elem->st.parent->branches;
1272 		/* WARN_ON(branches > 2) technically makes sense here,
1273 		 * but
1274 		 * 1. speculative states will bump 'branches' for non-branch
1275 		 * instructions
1276 		 * 2. is_state_visited() heuristics may decide not to create
1277 		 * a new state for a sequence of branches and all such current
1278 		 * and cloned states will be pointing to a single parent state
1279 		 * which might have large 'branches' count.
1280 		 */
1281 	}
1282 	return &elem->st;
1283 err:
1284 	free_verifier_state(env->cur_state, true);
1285 	env->cur_state = NULL;
1286 	/* pop all elements and return */
1287 	while (!pop_stack(env, NULL, NULL, false));
1288 	return NULL;
1289 }
1290 
1291 #define CALLER_SAVED_REGS 6
1292 static const int caller_saved[CALLER_SAVED_REGS] = {
1293 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1294 };
1295 
1296 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1297 				struct bpf_reg_state *reg);
1298 
1299 /* This helper doesn't clear reg->id */
1300 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1301 {
1302 	reg->var_off = tnum_const(imm);
1303 	reg->smin_value = (s64)imm;
1304 	reg->smax_value = (s64)imm;
1305 	reg->umin_value = imm;
1306 	reg->umax_value = imm;
1307 
1308 	reg->s32_min_value = (s32)imm;
1309 	reg->s32_max_value = (s32)imm;
1310 	reg->u32_min_value = (u32)imm;
1311 	reg->u32_max_value = (u32)imm;
1312 }
1313 
1314 /* Mark the unknown part of a register (variable offset or scalar value) as
1315  * known to have the value @imm.
1316  */
1317 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1318 {
1319 	/* Clear id, off, and union(map_ptr, range) */
1320 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1321 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1322 	___mark_reg_known(reg, imm);
1323 }
1324 
1325 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1326 {
1327 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1328 	reg->s32_min_value = (s32)imm;
1329 	reg->s32_max_value = (s32)imm;
1330 	reg->u32_min_value = (u32)imm;
1331 	reg->u32_max_value = (u32)imm;
1332 }
1333 
1334 /* Mark the 'variable offset' part of a register as zero.  This should be
1335  * used only on registers holding a pointer type.
1336  */
1337 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1338 {
1339 	__mark_reg_known(reg, 0);
1340 }
1341 
1342 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1343 {
1344 	__mark_reg_known(reg, 0);
1345 	reg->type = SCALAR_VALUE;
1346 }
1347 
1348 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1349 				struct bpf_reg_state *regs, u32 regno)
1350 {
1351 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1352 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1353 		/* Something bad happened, let's kill all regs */
1354 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1355 			__mark_reg_not_init(env, regs + regno);
1356 		return;
1357 	}
1358 	__mark_reg_known_zero(regs + regno);
1359 }
1360 
1361 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1362 {
1363 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1364 		const struct bpf_map *map = reg->map_ptr;
1365 
1366 		if (map->inner_map_meta) {
1367 			reg->type = CONST_PTR_TO_MAP;
1368 			reg->map_ptr = map->inner_map_meta;
1369 			/* transfer reg's id which is unique for every map_lookup_elem
1370 			 * as UID of the inner map.
1371 			 */
1372 			if (map_value_has_timer(map->inner_map_meta))
1373 				reg->map_uid = reg->id;
1374 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1375 			reg->type = PTR_TO_XDP_SOCK;
1376 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1377 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1378 			reg->type = PTR_TO_SOCKET;
1379 		} else {
1380 			reg->type = PTR_TO_MAP_VALUE;
1381 		}
1382 		return;
1383 	}
1384 
1385 	reg->type &= ~PTR_MAYBE_NULL;
1386 }
1387 
1388 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1389 {
1390 	return type_is_pkt_pointer(reg->type);
1391 }
1392 
1393 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1394 {
1395 	return reg_is_pkt_pointer(reg) ||
1396 	       reg->type == PTR_TO_PACKET_END;
1397 }
1398 
1399 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1400 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1401 				    enum bpf_reg_type which)
1402 {
1403 	/* The register can already have a range from prior markings.
1404 	 * This is fine as long as it hasn't been advanced from its
1405 	 * origin.
1406 	 */
1407 	return reg->type == which &&
1408 	       reg->id == 0 &&
1409 	       reg->off == 0 &&
1410 	       tnum_equals_const(reg->var_off, 0);
1411 }
1412 
1413 /* Reset the min/max bounds of a register */
1414 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1415 {
1416 	reg->smin_value = S64_MIN;
1417 	reg->smax_value = S64_MAX;
1418 	reg->umin_value = 0;
1419 	reg->umax_value = U64_MAX;
1420 
1421 	reg->s32_min_value = S32_MIN;
1422 	reg->s32_max_value = S32_MAX;
1423 	reg->u32_min_value = 0;
1424 	reg->u32_max_value = U32_MAX;
1425 }
1426 
1427 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1428 {
1429 	reg->smin_value = S64_MIN;
1430 	reg->smax_value = S64_MAX;
1431 	reg->umin_value = 0;
1432 	reg->umax_value = U64_MAX;
1433 }
1434 
1435 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1436 {
1437 	reg->s32_min_value = S32_MIN;
1438 	reg->s32_max_value = S32_MAX;
1439 	reg->u32_min_value = 0;
1440 	reg->u32_max_value = U32_MAX;
1441 }
1442 
1443 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1444 {
1445 	struct tnum var32_off = tnum_subreg(reg->var_off);
1446 
1447 	/* min signed is max(sign bit) | min(other bits) */
1448 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1449 			var32_off.value | (var32_off.mask & S32_MIN));
1450 	/* max signed is min(sign bit) | max(other bits) */
1451 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1452 			var32_off.value | (var32_off.mask & S32_MAX));
1453 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1454 	reg->u32_max_value = min(reg->u32_max_value,
1455 				 (u32)(var32_off.value | var32_off.mask));
1456 }
1457 
1458 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1459 {
1460 	/* min signed is max(sign bit) | min(other bits) */
1461 	reg->smin_value = max_t(s64, reg->smin_value,
1462 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1463 	/* max signed is min(sign bit) | max(other bits) */
1464 	reg->smax_value = min_t(s64, reg->smax_value,
1465 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1466 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1467 	reg->umax_value = min(reg->umax_value,
1468 			      reg->var_off.value | reg->var_off.mask);
1469 }
1470 
1471 static void __update_reg_bounds(struct bpf_reg_state *reg)
1472 {
1473 	__update_reg32_bounds(reg);
1474 	__update_reg64_bounds(reg);
1475 }
1476 
1477 /* Uses signed min/max values to inform unsigned, and vice-versa */
1478 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1479 {
1480 	/* Learn sign from signed bounds.
1481 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1482 	 * are the same, so combine.  This works even in the negative case, e.g.
1483 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1484 	 */
1485 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1486 		reg->s32_min_value = reg->u32_min_value =
1487 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1488 		reg->s32_max_value = reg->u32_max_value =
1489 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1490 		return;
1491 	}
1492 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1493 	 * boundary, so we must be careful.
1494 	 */
1495 	if ((s32)reg->u32_max_value >= 0) {
1496 		/* Positive.  We can't learn anything from the smin, but smax
1497 		 * is positive, hence safe.
1498 		 */
1499 		reg->s32_min_value = reg->u32_min_value;
1500 		reg->s32_max_value = reg->u32_max_value =
1501 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1502 	} else if ((s32)reg->u32_min_value < 0) {
1503 		/* Negative.  We can't learn anything from the smax, but smin
1504 		 * is negative, hence safe.
1505 		 */
1506 		reg->s32_min_value = reg->u32_min_value =
1507 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1508 		reg->s32_max_value = reg->u32_max_value;
1509 	}
1510 }
1511 
1512 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1513 {
1514 	/* Learn sign from signed bounds.
1515 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1516 	 * are the same, so combine.  This works even in the negative case, e.g.
1517 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1518 	 */
1519 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1520 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1521 							  reg->umin_value);
1522 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1523 							  reg->umax_value);
1524 		return;
1525 	}
1526 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1527 	 * boundary, so we must be careful.
1528 	 */
1529 	if ((s64)reg->umax_value >= 0) {
1530 		/* Positive.  We can't learn anything from the smin, but smax
1531 		 * is positive, hence safe.
1532 		 */
1533 		reg->smin_value = reg->umin_value;
1534 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1535 							  reg->umax_value);
1536 	} else if ((s64)reg->umin_value < 0) {
1537 		/* Negative.  We can't learn anything from the smax, but smin
1538 		 * is negative, hence safe.
1539 		 */
1540 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1541 							  reg->umin_value);
1542 		reg->smax_value = reg->umax_value;
1543 	}
1544 }
1545 
1546 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1547 {
1548 	__reg32_deduce_bounds(reg);
1549 	__reg64_deduce_bounds(reg);
1550 }
1551 
1552 /* Attempts to improve var_off based on unsigned min/max information */
1553 static void __reg_bound_offset(struct bpf_reg_state *reg)
1554 {
1555 	struct tnum var64_off = tnum_intersect(reg->var_off,
1556 					       tnum_range(reg->umin_value,
1557 							  reg->umax_value));
1558 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1559 						tnum_range(reg->u32_min_value,
1560 							   reg->u32_max_value));
1561 
1562 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1563 }
1564 
1565 static bool __reg32_bound_s64(s32 a)
1566 {
1567 	return a >= 0 && a <= S32_MAX;
1568 }
1569 
1570 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1571 {
1572 	reg->umin_value = reg->u32_min_value;
1573 	reg->umax_value = reg->u32_max_value;
1574 
1575 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1576 	 * be positive otherwise set to worse case bounds and refine later
1577 	 * from tnum.
1578 	 */
1579 	if (__reg32_bound_s64(reg->s32_min_value) &&
1580 	    __reg32_bound_s64(reg->s32_max_value)) {
1581 		reg->smin_value = reg->s32_min_value;
1582 		reg->smax_value = reg->s32_max_value;
1583 	} else {
1584 		reg->smin_value = 0;
1585 		reg->smax_value = U32_MAX;
1586 	}
1587 }
1588 
1589 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1590 {
1591 	/* special case when 64-bit register has upper 32-bit register
1592 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1593 	 * allowing us to use 32-bit bounds directly,
1594 	 */
1595 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1596 		__reg_assign_32_into_64(reg);
1597 	} else {
1598 		/* Otherwise the best we can do is push lower 32bit known and
1599 		 * unknown bits into register (var_off set from jmp logic)
1600 		 * then learn as much as possible from the 64-bit tnum
1601 		 * known and unknown bits. The previous smin/smax bounds are
1602 		 * invalid here because of jmp32 compare so mark them unknown
1603 		 * so they do not impact tnum bounds calculation.
1604 		 */
1605 		__mark_reg64_unbounded(reg);
1606 		__update_reg_bounds(reg);
1607 	}
1608 
1609 	/* Intersecting with the old var_off might have improved our bounds
1610 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1611 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1612 	 */
1613 	__reg_deduce_bounds(reg);
1614 	__reg_bound_offset(reg);
1615 	__update_reg_bounds(reg);
1616 }
1617 
1618 static bool __reg64_bound_s32(s64 a)
1619 {
1620 	return a >= S32_MIN && a <= S32_MAX;
1621 }
1622 
1623 static bool __reg64_bound_u32(u64 a)
1624 {
1625 	return a >= U32_MIN && a <= U32_MAX;
1626 }
1627 
1628 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1629 {
1630 	__mark_reg32_unbounded(reg);
1631 
1632 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1633 		reg->s32_min_value = (s32)reg->smin_value;
1634 		reg->s32_max_value = (s32)reg->smax_value;
1635 	}
1636 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1637 		reg->u32_min_value = (u32)reg->umin_value;
1638 		reg->u32_max_value = (u32)reg->umax_value;
1639 	}
1640 
1641 	/* Intersecting with the old var_off might have improved our bounds
1642 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1643 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1644 	 */
1645 	__reg_deduce_bounds(reg);
1646 	__reg_bound_offset(reg);
1647 	__update_reg_bounds(reg);
1648 }
1649 
1650 /* Mark a register as having a completely unknown (scalar) value. */
1651 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1652 			       struct bpf_reg_state *reg)
1653 {
1654 	/*
1655 	 * Clear type, id, off, and union(map_ptr, range) and
1656 	 * padding between 'type' and union
1657 	 */
1658 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1659 	reg->type = SCALAR_VALUE;
1660 	reg->var_off = tnum_unknown;
1661 	reg->frameno = 0;
1662 	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
1663 	__mark_reg_unbounded(reg);
1664 }
1665 
1666 static void mark_reg_unknown(struct bpf_verifier_env *env,
1667 			     struct bpf_reg_state *regs, u32 regno)
1668 {
1669 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1670 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1671 		/* Something bad happened, let's kill all regs except FP */
1672 		for (regno = 0; regno < BPF_REG_FP; regno++)
1673 			__mark_reg_not_init(env, regs + regno);
1674 		return;
1675 	}
1676 	__mark_reg_unknown(env, regs + regno);
1677 }
1678 
1679 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1680 				struct bpf_reg_state *reg)
1681 {
1682 	__mark_reg_unknown(env, reg);
1683 	reg->type = NOT_INIT;
1684 }
1685 
1686 static void mark_reg_not_init(struct bpf_verifier_env *env,
1687 			      struct bpf_reg_state *regs, u32 regno)
1688 {
1689 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1690 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1691 		/* Something bad happened, let's kill all regs except FP */
1692 		for (regno = 0; regno < BPF_REG_FP; regno++)
1693 			__mark_reg_not_init(env, regs + regno);
1694 		return;
1695 	}
1696 	__mark_reg_not_init(env, regs + regno);
1697 }
1698 
1699 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1700 			    struct bpf_reg_state *regs, u32 regno,
1701 			    enum bpf_reg_type reg_type,
1702 			    struct btf *btf, u32 btf_id,
1703 			    enum bpf_type_flag flag)
1704 {
1705 	if (reg_type == SCALAR_VALUE) {
1706 		mark_reg_unknown(env, regs, regno);
1707 		return;
1708 	}
1709 	mark_reg_known_zero(env, regs, regno);
1710 	regs[regno].type = PTR_TO_BTF_ID | flag;
1711 	regs[regno].btf = btf;
1712 	regs[regno].btf_id = btf_id;
1713 }
1714 
1715 #define DEF_NOT_SUBREG	(0)
1716 static void init_reg_state(struct bpf_verifier_env *env,
1717 			   struct bpf_func_state *state)
1718 {
1719 	struct bpf_reg_state *regs = state->regs;
1720 	int i;
1721 
1722 	for (i = 0; i < MAX_BPF_REG; i++) {
1723 		mark_reg_not_init(env, regs, i);
1724 		regs[i].live = REG_LIVE_NONE;
1725 		regs[i].parent = NULL;
1726 		regs[i].subreg_def = DEF_NOT_SUBREG;
1727 	}
1728 
1729 	/* frame pointer */
1730 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1731 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1732 	regs[BPF_REG_FP].frameno = state->frameno;
1733 }
1734 
1735 #define BPF_MAIN_FUNC (-1)
1736 static void init_func_state(struct bpf_verifier_env *env,
1737 			    struct bpf_func_state *state,
1738 			    int callsite, int frameno, int subprogno)
1739 {
1740 	state->callsite = callsite;
1741 	state->frameno = frameno;
1742 	state->subprogno = subprogno;
1743 	init_reg_state(env, state);
1744 	mark_verifier_state_scratched(env);
1745 }
1746 
1747 /* Similar to push_stack(), but for async callbacks */
1748 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1749 						int insn_idx, int prev_insn_idx,
1750 						int subprog)
1751 {
1752 	struct bpf_verifier_stack_elem *elem;
1753 	struct bpf_func_state *frame;
1754 
1755 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1756 	if (!elem)
1757 		goto err;
1758 
1759 	elem->insn_idx = insn_idx;
1760 	elem->prev_insn_idx = prev_insn_idx;
1761 	elem->next = env->head;
1762 	elem->log_pos = env->log.len_used;
1763 	env->head = elem;
1764 	env->stack_size++;
1765 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1766 		verbose(env,
1767 			"The sequence of %d jumps is too complex for async cb.\n",
1768 			env->stack_size);
1769 		goto err;
1770 	}
1771 	/* Unlike push_stack() do not copy_verifier_state().
1772 	 * The caller state doesn't matter.
1773 	 * This is async callback. It starts in a fresh stack.
1774 	 * Initialize it similar to do_check_common().
1775 	 */
1776 	elem->st.branches = 1;
1777 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1778 	if (!frame)
1779 		goto err;
1780 	init_func_state(env, frame,
1781 			BPF_MAIN_FUNC /* callsite */,
1782 			0 /* frameno within this callchain */,
1783 			subprog /* subprog number within this prog */);
1784 	elem->st.frame[0] = frame;
1785 	return &elem->st;
1786 err:
1787 	free_verifier_state(env->cur_state, true);
1788 	env->cur_state = NULL;
1789 	/* pop all elements and return */
1790 	while (!pop_stack(env, NULL, NULL, false));
1791 	return NULL;
1792 }
1793 
1794 
1795 enum reg_arg_type {
1796 	SRC_OP,		/* register is used as source operand */
1797 	DST_OP,		/* register is used as destination operand */
1798 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1799 };
1800 
1801 static int cmp_subprogs(const void *a, const void *b)
1802 {
1803 	return ((struct bpf_subprog_info *)a)->start -
1804 	       ((struct bpf_subprog_info *)b)->start;
1805 }
1806 
1807 static int find_subprog(struct bpf_verifier_env *env, int off)
1808 {
1809 	struct bpf_subprog_info *p;
1810 
1811 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1812 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1813 	if (!p)
1814 		return -ENOENT;
1815 	return p - env->subprog_info;
1816 
1817 }
1818 
1819 static int add_subprog(struct bpf_verifier_env *env, int off)
1820 {
1821 	int insn_cnt = env->prog->len;
1822 	int ret;
1823 
1824 	if (off >= insn_cnt || off < 0) {
1825 		verbose(env, "call to invalid destination\n");
1826 		return -EINVAL;
1827 	}
1828 	ret = find_subprog(env, off);
1829 	if (ret >= 0)
1830 		return ret;
1831 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1832 		verbose(env, "too many subprograms\n");
1833 		return -E2BIG;
1834 	}
1835 	/* determine subprog starts. The end is one before the next starts */
1836 	env->subprog_info[env->subprog_cnt++].start = off;
1837 	sort(env->subprog_info, env->subprog_cnt,
1838 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1839 	return env->subprog_cnt - 1;
1840 }
1841 
1842 #define MAX_KFUNC_DESCS 256
1843 #define MAX_KFUNC_BTFS	256
1844 
1845 struct bpf_kfunc_desc {
1846 	struct btf_func_model func_model;
1847 	u32 func_id;
1848 	s32 imm;
1849 	u16 offset;
1850 };
1851 
1852 struct bpf_kfunc_btf {
1853 	struct btf *btf;
1854 	struct module *module;
1855 	u16 offset;
1856 };
1857 
1858 struct bpf_kfunc_desc_tab {
1859 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1860 	u32 nr_descs;
1861 };
1862 
1863 struct bpf_kfunc_btf_tab {
1864 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1865 	u32 nr_descs;
1866 };
1867 
1868 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
1869 {
1870 	const struct bpf_kfunc_desc *d0 = a;
1871 	const struct bpf_kfunc_desc *d1 = b;
1872 
1873 	/* func_id is not greater than BTF_MAX_TYPE */
1874 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1875 }
1876 
1877 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1878 {
1879 	const struct bpf_kfunc_btf *d0 = a;
1880 	const struct bpf_kfunc_btf *d1 = b;
1881 
1882 	return d0->offset - d1->offset;
1883 }
1884 
1885 static const struct bpf_kfunc_desc *
1886 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
1887 {
1888 	struct bpf_kfunc_desc desc = {
1889 		.func_id = func_id,
1890 		.offset = offset,
1891 	};
1892 	struct bpf_kfunc_desc_tab *tab;
1893 
1894 	tab = prog->aux->kfunc_tab;
1895 	return bsearch(&desc, tab->descs, tab->nr_descs,
1896 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1897 }
1898 
1899 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1900 					 s16 offset)
1901 {
1902 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
1903 	struct bpf_kfunc_btf_tab *tab;
1904 	struct bpf_kfunc_btf *b;
1905 	struct module *mod;
1906 	struct btf *btf;
1907 	int btf_fd;
1908 
1909 	tab = env->prog->aux->kfunc_btf_tab;
1910 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1911 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1912 	if (!b) {
1913 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
1914 			verbose(env, "too many different module BTFs\n");
1915 			return ERR_PTR(-E2BIG);
1916 		}
1917 
1918 		if (bpfptr_is_null(env->fd_array)) {
1919 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1920 			return ERR_PTR(-EPROTO);
1921 		}
1922 
1923 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1924 					    offset * sizeof(btf_fd),
1925 					    sizeof(btf_fd)))
1926 			return ERR_PTR(-EFAULT);
1927 
1928 		btf = btf_get_by_fd(btf_fd);
1929 		if (IS_ERR(btf)) {
1930 			verbose(env, "invalid module BTF fd specified\n");
1931 			return btf;
1932 		}
1933 
1934 		if (!btf_is_module(btf)) {
1935 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
1936 			btf_put(btf);
1937 			return ERR_PTR(-EINVAL);
1938 		}
1939 
1940 		mod = btf_try_get_module(btf);
1941 		if (!mod) {
1942 			btf_put(btf);
1943 			return ERR_PTR(-ENXIO);
1944 		}
1945 
1946 		b = &tab->descs[tab->nr_descs++];
1947 		b->btf = btf;
1948 		b->module = mod;
1949 		b->offset = offset;
1950 
1951 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1952 		     kfunc_btf_cmp_by_off, NULL);
1953 	}
1954 	return b->btf;
1955 }
1956 
1957 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1958 {
1959 	if (!tab)
1960 		return;
1961 
1962 	while (tab->nr_descs--) {
1963 		module_put(tab->descs[tab->nr_descs].module);
1964 		btf_put(tab->descs[tab->nr_descs].btf);
1965 	}
1966 	kfree(tab);
1967 }
1968 
1969 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
1970 {
1971 	if (offset) {
1972 		if (offset < 0) {
1973 			/* In the future, this can be allowed to increase limit
1974 			 * of fd index into fd_array, interpreted as u16.
1975 			 */
1976 			verbose(env, "negative offset disallowed for kernel module function call\n");
1977 			return ERR_PTR(-EINVAL);
1978 		}
1979 
1980 		return __find_kfunc_desc_btf(env, offset);
1981 	}
1982 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
1983 }
1984 
1985 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
1986 {
1987 	const struct btf_type *func, *func_proto;
1988 	struct bpf_kfunc_btf_tab *btf_tab;
1989 	struct bpf_kfunc_desc_tab *tab;
1990 	struct bpf_prog_aux *prog_aux;
1991 	struct bpf_kfunc_desc *desc;
1992 	const char *func_name;
1993 	struct btf *desc_btf;
1994 	unsigned long call_imm;
1995 	unsigned long addr;
1996 	int err;
1997 
1998 	prog_aux = env->prog->aux;
1999 	tab = prog_aux->kfunc_tab;
2000 	btf_tab = prog_aux->kfunc_btf_tab;
2001 	if (!tab) {
2002 		if (!btf_vmlinux) {
2003 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2004 			return -ENOTSUPP;
2005 		}
2006 
2007 		if (!env->prog->jit_requested) {
2008 			verbose(env, "JIT is required for calling kernel function\n");
2009 			return -ENOTSUPP;
2010 		}
2011 
2012 		if (!bpf_jit_supports_kfunc_call()) {
2013 			verbose(env, "JIT does not support calling kernel function\n");
2014 			return -ENOTSUPP;
2015 		}
2016 
2017 		if (!env->prog->gpl_compatible) {
2018 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2019 			return -EINVAL;
2020 		}
2021 
2022 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2023 		if (!tab)
2024 			return -ENOMEM;
2025 		prog_aux->kfunc_tab = tab;
2026 	}
2027 
2028 	/* func_id == 0 is always invalid, but instead of returning an error, be
2029 	 * conservative and wait until the code elimination pass before returning
2030 	 * error, so that invalid calls that get pruned out can be in BPF programs
2031 	 * loaded from userspace.  It is also required that offset be untouched
2032 	 * for such calls.
2033 	 */
2034 	if (!func_id && !offset)
2035 		return 0;
2036 
2037 	if (!btf_tab && offset) {
2038 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2039 		if (!btf_tab)
2040 			return -ENOMEM;
2041 		prog_aux->kfunc_btf_tab = btf_tab;
2042 	}
2043 
2044 	desc_btf = find_kfunc_desc_btf(env, offset);
2045 	if (IS_ERR(desc_btf)) {
2046 		verbose(env, "failed to find BTF for kernel function\n");
2047 		return PTR_ERR(desc_btf);
2048 	}
2049 
2050 	if (find_kfunc_desc(env->prog, func_id, offset))
2051 		return 0;
2052 
2053 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2054 		verbose(env, "too many different kernel function calls\n");
2055 		return -E2BIG;
2056 	}
2057 
2058 	func = btf_type_by_id(desc_btf, func_id);
2059 	if (!func || !btf_type_is_func(func)) {
2060 		verbose(env, "kernel btf_id %u is not a function\n",
2061 			func_id);
2062 		return -EINVAL;
2063 	}
2064 	func_proto = btf_type_by_id(desc_btf, func->type);
2065 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2066 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2067 			func_id);
2068 		return -EINVAL;
2069 	}
2070 
2071 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2072 	addr = kallsyms_lookup_name(func_name);
2073 	if (!addr) {
2074 		verbose(env, "cannot find address for kernel function %s\n",
2075 			func_name);
2076 		return -EINVAL;
2077 	}
2078 
2079 	call_imm = BPF_CALL_IMM(addr);
2080 	/* Check whether or not the relative offset overflows desc->imm */
2081 	if ((unsigned long)(s32)call_imm != call_imm) {
2082 		verbose(env, "address of kernel function %s is out of range\n",
2083 			func_name);
2084 		return -EINVAL;
2085 	}
2086 
2087 	desc = &tab->descs[tab->nr_descs++];
2088 	desc->func_id = func_id;
2089 	desc->imm = call_imm;
2090 	desc->offset = offset;
2091 	err = btf_distill_func_proto(&env->log, desc_btf,
2092 				     func_proto, func_name,
2093 				     &desc->func_model);
2094 	if (!err)
2095 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2096 		     kfunc_desc_cmp_by_id_off, NULL);
2097 	return err;
2098 }
2099 
2100 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2101 {
2102 	const struct bpf_kfunc_desc *d0 = a;
2103 	const struct bpf_kfunc_desc *d1 = b;
2104 
2105 	if (d0->imm > d1->imm)
2106 		return 1;
2107 	else if (d0->imm < d1->imm)
2108 		return -1;
2109 	return 0;
2110 }
2111 
2112 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2113 {
2114 	struct bpf_kfunc_desc_tab *tab;
2115 
2116 	tab = prog->aux->kfunc_tab;
2117 	if (!tab)
2118 		return;
2119 
2120 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2121 	     kfunc_desc_cmp_by_imm, NULL);
2122 }
2123 
2124 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2125 {
2126 	return !!prog->aux->kfunc_tab;
2127 }
2128 
2129 const struct btf_func_model *
2130 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2131 			 const struct bpf_insn *insn)
2132 {
2133 	const struct bpf_kfunc_desc desc = {
2134 		.imm = insn->imm,
2135 	};
2136 	const struct bpf_kfunc_desc *res;
2137 	struct bpf_kfunc_desc_tab *tab;
2138 
2139 	tab = prog->aux->kfunc_tab;
2140 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2141 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2142 
2143 	return res ? &res->func_model : NULL;
2144 }
2145 
2146 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2147 {
2148 	struct bpf_subprog_info *subprog = env->subprog_info;
2149 	struct bpf_insn *insn = env->prog->insnsi;
2150 	int i, ret, insn_cnt = env->prog->len;
2151 
2152 	/* Add entry function. */
2153 	ret = add_subprog(env, 0);
2154 	if (ret)
2155 		return ret;
2156 
2157 	for (i = 0; i < insn_cnt; i++, insn++) {
2158 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2159 		    !bpf_pseudo_kfunc_call(insn))
2160 			continue;
2161 
2162 		if (!env->bpf_capable) {
2163 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2164 			return -EPERM;
2165 		}
2166 
2167 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2168 			ret = add_subprog(env, i + insn->imm + 1);
2169 		else
2170 			ret = add_kfunc_call(env, insn->imm, insn->off);
2171 
2172 		if (ret < 0)
2173 			return ret;
2174 	}
2175 
2176 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2177 	 * logic. 'subprog_cnt' should not be increased.
2178 	 */
2179 	subprog[env->subprog_cnt].start = insn_cnt;
2180 
2181 	if (env->log.level & BPF_LOG_LEVEL2)
2182 		for (i = 0; i < env->subprog_cnt; i++)
2183 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2184 
2185 	return 0;
2186 }
2187 
2188 static int check_subprogs(struct bpf_verifier_env *env)
2189 {
2190 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2191 	struct bpf_subprog_info *subprog = env->subprog_info;
2192 	struct bpf_insn *insn = env->prog->insnsi;
2193 	int insn_cnt = env->prog->len;
2194 
2195 	/* now check that all jumps are within the same subprog */
2196 	subprog_start = subprog[cur_subprog].start;
2197 	subprog_end = subprog[cur_subprog + 1].start;
2198 	for (i = 0; i < insn_cnt; i++) {
2199 		u8 code = insn[i].code;
2200 
2201 		if (code == (BPF_JMP | BPF_CALL) &&
2202 		    insn[i].imm == BPF_FUNC_tail_call &&
2203 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2204 			subprog[cur_subprog].has_tail_call = true;
2205 		if (BPF_CLASS(code) == BPF_LD &&
2206 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2207 			subprog[cur_subprog].has_ld_abs = true;
2208 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2209 			goto next;
2210 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2211 			goto next;
2212 		off = i + insn[i].off + 1;
2213 		if (off < subprog_start || off >= subprog_end) {
2214 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2215 			return -EINVAL;
2216 		}
2217 next:
2218 		if (i == subprog_end - 1) {
2219 			/* to avoid fall-through from one subprog into another
2220 			 * the last insn of the subprog should be either exit
2221 			 * or unconditional jump back
2222 			 */
2223 			if (code != (BPF_JMP | BPF_EXIT) &&
2224 			    code != (BPF_JMP | BPF_JA)) {
2225 				verbose(env, "last insn is not an exit or jmp\n");
2226 				return -EINVAL;
2227 			}
2228 			subprog_start = subprog_end;
2229 			cur_subprog++;
2230 			if (cur_subprog < env->subprog_cnt)
2231 				subprog_end = subprog[cur_subprog + 1].start;
2232 		}
2233 	}
2234 	return 0;
2235 }
2236 
2237 /* Parentage chain of this register (or stack slot) should take care of all
2238  * issues like callee-saved registers, stack slot allocation time, etc.
2239  */
2240 static int mark_reg_read(struct bpf_verifier_env *env,
2241 			 const struct bpf_reg_state *state,
2242 			 struct bpf_reg_state *parent, u8 flag)
2243 {
2244 	bool writes = parent == state->parent; /* Observe write marks */
2245 	int cnt = 0;
2246 
2247 	while (parent) {
2248 		/* if read wasn't screened by an earlier write ... */
2249 		if (writes && state->live & REG_LIVE_WRITTEN)
2250 			break;
2251 		if (parent->live & REG_LIVE_DONE) {
2252 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2253 				reg_type_str(env, parent->type),
2254 				parent->var_off.value, parent->off);
2255 			return -EFAULT;
2256 		}
2257 		/* The first condition is more likely to be true than the
2258 		 * second, checked it first.
2259 		 */
2260 		if ((parent->live & REG_LIVE_READ) == flag ||
2261 		    parent->live & REG_LIVE_READ64)
2262 			/* The parentage chain never changes and
2263 			 * this parent was already marked as LIVE_READ.
2264 			 * There is no need to keep walking the chain again and
2265 			 * keep re-marking all parents as LIVE_READ.
2266 			 * This case happens when the same register is read
2267 			 * multiple times without writes into it in-between.
2268 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2269 			 * then no need to set the weak REG_LIVE_READ32.
2270 			 */
2271 			break;
2272 		/* ... then we depend on parent's value */
2273 		parent->live |= flag;
2274 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2275 		if (flag == REG_LIVE_READ64)
2276 			parent->live &= ~REG_LIVE_READ32;
2277 		state = parent;
2278 		parent = state->parent;
2279 		writes = true;
2280 		cnt++;
2281 	}
2282 
2283 	if (env->longest_mark_read_walk < cnt)
2284 		env->longest_mark_read_walk = cnt;
2285 	return 0;
2286 }
2287 
2288 /* This function is supposed to be used by the following 32-bit optimization
2289  * code only. It returns TRUE if the source or destination register operates
2290  * on 64-bit, otherwise return FALSE.
2291  */
2292 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2293 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2294 {
2295 	u8 code, class, op;
2296 
2297 	code = insn->code;
2298 	class = BPF_CLASS(code);
2299 	op = BPF_OP(code);
2300 	if (class == BPF_JMP) {
2301 		/* BPF_EXIT for "main" will reach here. Return TRUE
2302 		 * conservatively.
2303 		 */
2304 		if (op == BPF_EXIT)
2305 			return true;
2306 		if (op == BPF_CALL) {
2307 			/* BPF to BPF call will reach here because of marking
2308 			 * caller saved clobber with DST_OP_NO_MARK for which we
2309 			 * don't care the register def because they are anyway
2310 			 * marked as NOT_INIT already.
2311 			 */
2312 			if (insn->src_reg == BPF_PSEUDO_CALL)
2313 				return false;
2314 			/* Helper call will reach here because of arg type
2315 			 * check, conservatively return TRUE.
2316 			 */
2317 			if (t == SRC_OP)
2318 				return true;
2319 
2320 			return false;
2321 		}
2322 	}
2323 
2324 	if (class == BPF_ALU64 || class == BPF_JMP ||
2325 	    /* BPF_END always use BPF_ALU class. */
2326 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2327 		return true;
2328 
2329 	if (class == BPF_ALU || class == BPF_JMP32)
2330 		return false;
2331 
2332 	if (class == BPF_LDX) {
2333 		if (t != SRC_OP)
2334 			return BPF_SIZE(code) == BPF_DW;
2335 		/* LDX source must be ptr. */
2336 		return true;
2337 	}
2338 
2339 	if (class == BPF_STX) {
2340 		/* BPF_STX (including atomic variants) has multiple source
2341 		 * operands, one of which is a ptr. Check whether the caller is
2342 		 * asking about it.
2343 		 */
2344 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2345 			return true;
2346 		return BPF_SIZE(code) == BPF_DW;
2347 	}
2348 
2349 	if (class == BPF_LD) {
2350 		u8 mode = BPF_MODE(code);
2351 
2352 		/* LD_IMM64 */
2353 		if (mode == BPF_IMM)
2354 			return true;
2355 
2356 		/* Both LD_IND and LD_ABS return 32-bit data. */
2357 		if (t != SRC_OP)
2358 			return  false;
2359 
2360 		/* Implicit ctx ptr. */
2361 		if (regno == BPF_REG_6)
2362 			return true;
2363 
2364 		/* Explicit source could be any width. */
2365 		return true;
2366 	}
2367 
2368 	if (class == BPF_ST)
2369 		/* The only source register for BPF_ST is a ptr. */
2370 		return true;
2371 
2372 	/* Conservatively return true at default. */
2373 	return true;
2374 }
2375 
2376 /* Return the regno defined by the insn, or -1. */
2377 static int insn_def_regno(const struct bpf_insn *insn)
2378 {
2379 	switch (BPF_CLASS(insn->code)) {
2380 	case BPF_JMP:
2381 	case BPF_JMP32:
2382 	case BPF_ST:
2383 		return -1;
2384 	case BPF_STX:
2385 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2386 		    (insn->imm & BPF_FETCH)) {
2387 			if (insn->imm == BPF_CMPXCHG)
2388 				return BPF_REG_0;
2389 			else
2390 				return insn->src_reg;
2391 		} else {
2392 			return -1;
2393 		}
2394 	default:
2395 		return insn->dst_reg;
2396 	}
2397 }
2398 
2399 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2400 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2401 {
2402 	int dst_reg = insn_def_regno(insn);
2403 
2404 	if (dst_reg == -1)
2405 		return false;
2406 
2407 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2408 }
2409 
2410 static void mark_insn_zext(struct bpf_verifier_env *env,
2411 			   struct bpf_reg_state *reg)
2412 {
2413 	s32 def_idx = reg->subreg_def;
2414 
2415 	if (def_idx == DEF_NOT_SUBREG)
2416 		return;
2417 
2418 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2419 	/* The dst will be zero extended, so won't be sub-register anymore. */
2420 	reg->subreg_def = DEF_NOT_SUBREG;
2421 }
2422 
2423 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2424 			 enum reg_arg_type t)
2425 {
2426 	struct bpf_verifier_state *vstate = env->cur_state;
2427 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2428 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2429 	struct bpf_reg_state *reg, *regs = state->regs;
2430 	bool rw64;
2431 
2432 	if (regno >= MAX_BPF_REG) {
2433 		verbose(env, "R%d is invalid\n", regno);
2434 		return -EINVAL;
2435 	}
2436 
2437 	mark_reg_scratched(env, regno);
2438 
2439 	reg = &regs[regno];
2440 	rw64 = is_reg64(env, insn, regno, reg, t);
2441 	if (t == SRC_OP) {
2442 		/* check whether register used as source operand can be read */
2443 		if (reg->type == NOT_INIT) {
2444 			verbose(env, "R%d !read_ok\n", regno);
2445 			return -EACCES;
2446 		}
2447 		/* We don't need to worry about FP liveness because it's read-only */
2448 		if (regno == BPF_REG_FP)
2449 			return 0;
2450 
2451 		if (rw64)
2452 			mark_insn_zext(env, reg);
2453 
2454 		return mark_reg_read(env, reg, reg->parent,
2455 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2456 	} else {
2457 		/* check whether register used as dest operand can be written to */
2458 		if (regno == BPF_REG_FP) {
2459 			verbose(env, "frame pointer is read only\n");
2460 			return -EACCES;
2461 		}
2462 		reg->live |= REG_LIVE_WRITTEN;
2463 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2464 		if (t == DST_OP)
2465 			mark_reg_unknown(env, regs, regno);
2466 	}
2467 	return 0;
2468 }
2469 
2470 /* for any branch, call, exit record the history of jmps in the given state */
2471 static int push_jmp_history(struct bpf_verifier_env *env,
2472 			    struct bpf_verifier_state *cur)
2473 {
2474 	u32 cnt = cur->jmp_history_cnt;
2475 	struct bpf_idx_pair *p;
2476 
2477 	cnt++;
2478 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2479 	if (!p)
2480 		return -ENOMEM;
2481 	p[cnt - 1].idx = env->insn_idx;
2482 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2483 	cur->jmp_history = p;
2484 	cur->jmp_history_cnt = cnt;
2485 	return 0;
2486 }
2487 
2488 /* Backtrack one insn at a time. If idx is not at the top of recorded
2489  * history then previous instruction came from straight line execution.
2490  */
2491 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2492 			     u32 *history)
2493 {
2494 	u32 cnt = *history;
2495 
2496 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2497 		i = st->jmp_history[cnt - 1].prev_idx;
2498 		(*history)--;
2499 	} else {
2500 		i--;
2501 	}
2502 	return i;
2503 }
2504 
2505 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2506 {
2507 	const struct btf_type *func;
2508 	struct btf *desc_btf;
2509 
2510 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2511 		return NULL;
2512 
2513 	desc_btf = find_kfunc_desc_btf(data, insn->off);
2514 	if (IS_ERR(desc_btf))
2515 		return "<error>";
2516 
2517 	func = btf_type_by_id(desc_btf, insn->imm);
2518 	return btf_name_by_offset(desc_btf, func->name_off);
2519 }
2520 
2521 /* For given verifier state backtrack_insn() is called from the last insn to
2522  * the first insn. Its purpose is to compute a bitmask of registers and
2523  * stack slots that needs precision in the parent verifier state.
2524  */
2525 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2526 			  u32 *reg_mask, u64 *stack_mask)
2527 {
2528 	const struct bpf_insn_cbs cbs = {
2529 		.cb_call	= disasm_kfunc_name,
2530 		.cb_print	= verbose,
2531 		.private_data	= env,
2532 	};
2533 	struct bpf_insn *insn = env->prog->insnsi + idx;
2534 	u8 class = BPF_CLASS(insn->code);
2535 	u8 opcode = BPF_OP(insn->code);
2536 	u8 mode = BPF_MODE(insn->code);
2537 	u32 dreg = 1u << insn->dst_reg;
2538 	u32 sreg = 1u << insn->src_reg;
2539 	u32 spi;
2540 
2541 	if (insn->code == 0)
2542 		return 0;
2543 	if (env->log.level & BPF_LOG_LEVEL2) {
2544 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2545 		verbose(env, "%d: ", idx);
2546 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2547 	}
2548 
2549 	if (class == BPF_ALU || class == BPF_ALU64) {
2550 		if (!(*reg_mask & dreg))
2551 			return 0;
2552 		if (opcode == BPF_MOV) {
2553 			if (BPF_SRC(insn->code) == BPF_X) {
2554 				/* dreg = sreg
2555 				 * dreg needs precision after this insn
2556 				 * sreg needs precision before this insn
2557 				 */
2558 				*reg_mask &= ~dreg;
2559 				*reg_mask |= sreg;
2560 			} else {
2561 				/* dreg = K
2562 				 * dreg needs precision after this insn.
2563 				 * Corresponding register is already marked
2564 				 * as precise=true in this verifier state.
2565 				 * No further markings in parent are necessary
2566 				 */
2567 				*reg_mask &= ~dreg;
2568 			}
2569 		} else {
2570 			if (BPF_SRC(insn->code) == BPF_X) {
2571 				/* dreg += sreg
2572 				 * both dreg and sreg need precision
2573 				 * before this insn
2574 				 */
2575 				*reg_mask |= sreg;
2576 			} /* else dreg += K
2577 			   * dreg still needs precision before this insn
2578 			   */
2579 		}
2580 	} else if (class == BPF_LDX) {
2581 		if (!(*reg_mask & dreg))
2582 			return 0;
2583 		*reg_mask &= ~dreg;
2584 
2585 		/* scalars can only be spilled into stack w/o losing precision.
2586 		 * Load from any other memory can be zero extended.
2587 		 * The desire to keep that precision is already indicated
2588 		 * by 'precise' mark in corresponding register of this state.
2589 		 * No further tracking necessary.
2590 		 */
2591 		if (insn->src_reg != BPF_REG_FP)
2592 			return 0;
2593 
2594 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2595 		 * that [fp - off] slot contains scalar that needs to be
2596 		 * tracked with precision
2597 		 */
2598 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2599 		if (spi >= 64) {
2600 			verbose(env, "BUG spi %d\n", spi);
2601 			WARN_ONCE(1, "verifier backtracking bug");
2602 			return -EFAULT;
2603 		}
2604 		*stack_mask |= 1ull << spi;
2605 	} else if (class == BPF_STX || class == BPF_ST) {
2606 		if (*reg_mask & dreg)
2607 			/* stx & st shouldn't be using _scalar_ dst_reg
2608 			 * to access memory. It means backtracking
2609 			 * encountered a case of pointer subtraction.
2610 			 */
2611 			return -ENOTSUPP;
2612 		/* scalars can only be spilled into stack */
2613 		if (insn->dst_reg != BPF_REG_FP)
2614 			return 0;
2615 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2616 		if (spi >= 64) {
2617 			verbose(env, "BUG spi %d\n", spi);
2618 			WARN_ONCE(1, "verifier backtracking bug");
2619 			return -EFAULT;
2620 		}
2621 		if (!(*stack_mask & (1ull << spi)))
2622 			return 0;
2623 		*stack_mask &= ~(1ull << spi);
2624 		if (class == BPF_STX)
2625 			*reg_mask |= sreg;
2626 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2627 		if (opcode == BPF_CALL) {
2628 			if (insn->src_reg == BPF_PSEUDO_CALL)
2629 				return -ENOTSUPP;
2630 			/* regular helper call sets R0 */
2631 			*reg_mask &= ~1;
2632 			if (*reg_mask & 0x3f) {
2633 				/* if backtracing was looking for registers R1-R5
2634 				 * they should have been found already.
2635 				 */
2636 				verbose(env, "BUG regs %x\n", *reg_mask);
2637 				WARN_ONCE(1, "verifier backtracking bug");
2638 				return -EFAULT;
2639 			}
2640 		} else if (opcode == BPF_EXIT) {
2641 			return -ENOTSUPP;
2642 		}
2643 	} else if (class == BPF_LD) {
2644 		if (!(*reg_mask & dreg))
2645 			return 0;
2646 		*reg_mask &= ~dreg;
2647 		/* It's ld_imm64 or ld_abs or ld_ind.
2648 		 * For ld_imm64 no further tracking of precision
2649 		 * into parent is necessary
2650 		 */
2651 		if (mode == BPF_IND || mode == BPF_ABS)
2652 			/* to be analyzed */
2653 			return -ENOTSUPP;
2654 	}
2655 	return 0;
2656 }
2657 
2658 /* the scalar precision tracking algorithm:
2659  * . at the start all registers have precise=false.
2660  * . scalar ranges are tracked as normal through alu and jmp insns.
2661  * . once precise value of the scalar register is used in:
2662  *   .  ptr + scalar alu
2663  *   . if (scalar cond K|scalar)
2664  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2665  *   backtrack through the verifier states and mark all registers and
2666  *   stack slots with spilled constants that these scalar regisers
2667  *   should be precise.
2668  * . during state pruning two registers (or spilled stack slots)
2669  *   are equivalent if both are not precise.
2670  *
2671  * Note the verifier cannot simply walk register parentage chain,
2672  * since many different registers and stack slots could have been
2673  * used to compute single precise scalar.
2674  *
2675  * The approach of starting with precise=true for all registers and then
2676  * backtrack to mark a register as not precise when the verifier detects
2677  * that program doesn't care about specific value (e.g., when helper
2678  * takes register as ARG_ANYTHING parameter) is not safe.
2679  *
2680  * It's ok to walk single parentage chain of the verifier states.
2681  * It's possible that this backtracking will go all the way till 1st insn.
2682  * All other branches will be explored for needing precision later.
2683  *
2684  * The backtracking needs to deal with cases like:
2685  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2686  * r9 -= r8
2687  * r5 = r9
2688  * if r5 > 0x79f goto pc+7
2689  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2690  * r5 += 1
2691  * ...
2692  * call bpf_perf_event_output#25
2693  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2694  *
2695  * and this case:
2696  * r6 = 1
2697  * call foo // uses callee's r6 inside to compute r0
2698  * r0 += r6
2699  * if r0 == 0 goto
2700  *
2701  * to track above reg_mask/stack_mask needs to be independent for each frame.
2702  *
2703  * Also if parent's curframe > frame where backtracking started,
2704  * the verifier need to mark registers in both frames, otherwise callees
2705  * may incorrectly prune callers. This is similar to
2706  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2707  *
2708  * For now backtracking falls back into conservative marking.
2709  */
2710 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2711 				     struct bpf_verifier_state *st)
2712 {
2713 	struct bpf_func_state *func;
2714 	struct bpf_reg_state *reg;
2715 	int i, j;
2716 
2717 	/* big hammer: mark all scalars precise in this path.
2718 	 * pop_stack may still get !precise scalars.
2719 	 */
2720 	for (; st; st = st->parent)
2721 		for (i = 0; i <= st->curframe; i++) {
2722 			func = st->frame[i];
2723 			for (j = 0; j < BPF_REG_FP; j++) {
2724 				reg = &func->regs[j];
2725 				if (reg->type != SCALAR_VALUE)
2726 					continue;
2727 				reg->precise = true;
2728 			}
2729 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2730 				if (!is_spilled_reg(&func->stack[j]))
2731 					continue;
2732 				reg = &func->stack[j].spilled_ptr;
2733 				if (reg->type != SCALAR_VALUE)
2734 					continue;
2735 				reg->precise = true;
2736 			}
2737 		}
2738 }
2739 
2740 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2741 				  int spi)
2742 {
2743 	struct bpf_verifier_state *st = env->cur_state;
2744 	int first_idx = st->first_insn_idx;
2745 	int last_idx = env->insn_idx;
2746 	struct bpf_func_state *func;
2747 	struct bpf_reg_state *reg;
2748 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2749 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
2750 	bool skip_first = true;
2751 	bool new_marks = false;
2752 	int i, err;
2753 
2754 	if (!env->bpf_capable)
2755 		return 0;
2756 
2757 	func = st->frame[st->curframe];
2758 	if (regno >= 0) {
2759 		reg = &func->regs[regno];
2760 		if (reg->type != SCALAR_VALUE) {
2761 			WARN_ONCE(1, "backtracing misuse");
2762 			return -EFAULT;
2763 		}
2764 		if (!reg->precise)
2765 			new_marks = true;
2766 		else
2767 			reg_mask = 0;
2768 		reg->precise = true;
2769 	}
2770 
2771 	while (spi >= 0) {
2772 		if (!is_spilled_reg(&func->stack[spi])) {
2773 			stack_mask = 0;
2774 			break;
2775 		}
2776 		reg = &func->stack[spi].spilled_ptr;
2777 		if (reg->type != SCALAR_VALUE) {
2778 			stack_mask = 0;
2779 			break;
2780 		}
2781 		if (!reg->precise)
2782 			new_marks = true;
2783 		else
2784 			stack_mask = 0;
2785 		reg->precise = true;
2786 		break;
2787 	}
2788 
2789 	if (!new_marks)
2790 		return 0;
2791 	if (!reg_mask && !stack_mask)
2792 		return 0;
2793 	for (;;) {
2794 		DECLARE_BITMAP(mask, 64);
2795 		u32 history = st->jmp_history_cnt;
2796 
2797 		if (env->log.level & BPF_LOG_LEVEL2)
2798 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2799 		for (i = last_idx;;) {
2800 			if (skip_first) {
2801 				err = 0;
2802 				skip_first = false;
2803 			} else {
2804 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2805 			}
2806 			if (err == -ENOTSUPP) {
2807 				mark_all_scalars_precise(env, st);
2808 				return 0;
2809 			} else if (err) {
2810 				return err;
2811 			}
2812 			if (!reg_mask && !stack_mask)
2813 				/* Found assignment(s) into tracked register in this state.
2814 				 * Since this state is already marked, just return.
2815 				 * Nothing to be tracked further in the parent state.
2816 				 */
2817 				return 0;
2818 			if (i == first_idx)
2819 				break;
2820 			i = get_prev_insn_idx(st, i, &history);
2821 			if (i >= env->prog->len) {
2822 				/* This can happen if backtracking reached insn 0
2823 				 * and there are still reg_mask or stack_mask
2824 				 * to backtrack.
2825 				 * It means the backtracking missed the spot where
2826 				 * particular register was initialized with a constant.
2827 				 */
2828 				verbose(env, "BUG backtracking idx %d\n", i);
2829 				WARN_ONCE(1, "verifier backtracking bug");
2830 				return -EFAULT;
2831 			}
2832 		}
2833 		st = st->parent;
2834 		if (!st)
2835 			break;
2836 
2837 		new_marks = false;
2838 		func = st->frame[st->curframe];
2839 		bitmap_from_u64(mask, reg_mask);
2840 		for_each_set_bit(i, mask, 32) {
2841 			reg = &func->regs[i];
2842 			if (reg->type != SCALAR_VALUE) {
2843 				reg_mask &= ~(1u << i);
2844 				continue;
2845 			}
2846 			if (!reg->precise)
2847 				new_marks = true;
2848 			reg->precise = true;
2849 		}
2850 
2851 		bitmap_from_u64(mask, stack_mask);
2852 		for_each_set_bit(i, mask, 64) {
2853 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
2854 				/* the sequence of instructions:
2855 				 * 2: (bf) r3 = r10
2856 				 * 3: (7b) *(u64 *)(r3 -8) = r0
2857 				 * 4: (79) r4 = *(u64 *)(r10 -8)
2858 				 * doesn't contain jmps. It's backtracked
2859 				 * as a single block.
2860 				 * During backtracking insn 3 is not recognized as
2861 				 * stack access, so at the end of backtracking
2862 				 * stack slot fp-8 is still marked in stack_mask.
2863 				 * However the parent state may not have accessed
2864 				 * fp-8 and it's "unallocated" stack space.
2865 				 * In such case fallback to conservative.
2866 				 */
2867 				mark_all_scalars_precise(env, st);
2868 				return 0;
2869 			}
2870 
2871 			if (!is_spilled_reg(&func->stack[i])) {
2872 				stack_mask &= ~(1ull << i);
2873 				continue;
2874 			}
2875 			reg = &func->stack[i].spilled_ptr;
2876 			if (reg->type != SCALAR_VALUE) {
2877 				stack_mask &= ~(1ull << i);
2878 				continue;
2879 			}
2880 			if (!reg->precise)
2881 				new_marks = true;
2882 			reg->precise = true;
2883 		}
2884 		if (env->log.level & BPF_LOG_LEVEL2) {
2885 			verbose(env, "parent %s regs=%x stack=%llx marks:",
2886 				new_marks ? "didn't have" : "already had",
2887 				reg_mask, stack_mask);
2888 			print_verifier_state(env, func, true);
2889 		}
2890 
2891 		if (!reg_mask && !stack_mask)
2892 			break;
2893 		if (!new_marks)
2894 			break;
2895 
2896 		last_idx = st->last_insn_idx;
2897 		first_idx = st->first_insn_idx;
2898 	}
2899 	return 0;
2900 }
2901 
2902 static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2903 {
2904 	return __mark_chain_precision(env, regno, -1);
2905 }
2906 
2907 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2908 {
2909 	return __mark_chain_precision(env, -1, spi);
2910 }
2911 
2912 static bool is_spillable_regtype(enum bpf_reg_type type)
2913 {
2914 	switch (base_type(type)) {
2915 	case PTR_TO_MAP_VALUE:
2916 	case PTR_TO_STACK:
2917 	case PTR_TO_CTX:
2918 	case PTR_TO_PACKET:
2919 	case PTR_TO_PACKET_META:
2920 	case PTR_TO_PACKET_END:
2921 	case PTR_TO_FLOW_KEYS:
2922 	case CONST_PTR_TO_MAP:
2923 	case PTR_TO_SOCKET:
2924 	case PTR_TO_SOCK_COMMON:
2925 	case PTR_TO_TCP_SOCK:
2926 	case PTR_TO_XDP_SOCK:
2927 	case PTR_TO_BTF_ID:
2928 	case PTR_TO_BUF:
2929 	case PTR_TO_MEM:
2930 	case PTR_TO_FUNC:
2931 	case PTR_TO_MAP_KEY:
2932 		return true;
2933 	default:
2934 		return false;
2935 	}
2936 }
2937 
2938 /* Does this register contain a constant zero? */
2939 static bool register_is_null(struct bpf_reg_state *reg)
2940 {
2941 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2942 }
2943 
2944 static bool register_is_const(struct bpf_reg_state *reg)
2945 {
2946 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2947 }
2948 
2949 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2950 {
2951 	return tnum_is_unknown(reg->var_off) &&
2952 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2953 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2954 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2955 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2956 }
2957 
2958 static bool register_is_bounded(struct bpf_reg_state *reg)
2959 {
2960 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2961 }
2962 
2963 static bool __is_pointer_value(bool allow_ptr_leaks,
2964 			       const struct bpf_reg_state *reg)
2965 {
2966 	if (allow_ptr_leaks)
2967 		return false;
2968 
2969 	return reg->type != SCALAR_VALUE;
2970 }
2971 
2972 static void save_register_state(struct bpf_func_state *state,
2973 				int spi, struct bpf_reg_state *reg,
2974 				int size)
2975 {
2976 	int i;
2977 
2978 	state->stack[spi].spilled_ptr = *reg;
2979 	if (size == BPF_REG_SIZE)
2980 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2981 
2982 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
2983 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
2984 
2985 	/* size < 8 bytes spill */
2986 	for (; i; i--)
2987 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
2988 }
2989 
2990 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
2991  * stack boundary and alignment are checked in check_mem_access()
2992  */
2993 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
2994 				       /* stack frame we're writing to */
2995 				       struct bpf_func_state *state,
2996 				       int off, int size, int value_regno,
2997 				       int insn_idx)
2998 {
2999 	struct bpf_func_state *cur; /* state of the current function */
3000 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3001 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
3002 	struct bpf_reg_state *reg = NULL;
3003 
3004 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3005 	if (err)
3006 		return err;
3007 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3008 	 * so it's aligned access and [off, off + size) are within stack limits
3009 	 */
3010 	if (!env->allow_ptr_leaks &&
3011 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3012 	    size != BPF_REG_SIZE) {
3013 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3014 		return -EACCES;
3015 	}
3016 
3017 	cur = env->cur_state->frame[env->cur_state->curframe];
3018 	if (value_regno >= 0)
3019 		reg = &cur->regs[value_regno];
3020 	if (!env->bypass_spec_v4) {
3021 		bool sanitize = reg && is_spillable_regtype(reg->type);
3022 
3023 		for (i = 0; i < size; i++) {
3024 			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
3025 				sanitize = true;
3026 				break;
3027 			}
3028 		}
3029 
3030 		if (sanitize)
3031 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3032 	}
3033 
3034 	mark_stack_slot_scratched(env, spi);
3035 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3036 	    !register_is_null(reg) && env->bpf_capable) {
3037 		if (dst_reg != BPF_REG_FP) {
3038 			/* The backtracking logic can only recognize explicit
3039 			 * stack slot address like [fp - 8]. Other spill of
3040 			 * scalar via different register has to be conservative.
3041 			 * Backtrack from here and mark all registers as precise
3042 			 * that contributed into 'reg' being a constant.
3043 			 */
3044 			err = mark_chain_precision(env, value_regno);
3045 			if (err)
3046 				return err;
3047 		}
3048 		save_register_state(state, spi, reg, size);
3049 	} else if (reg && is_spillable_regtype(reg->type)) {
3050 		/* register containing pointer is being spilled into stack */
3051 		if (size != BPF_REG_SIZE) {
3052 			verbose_linfo(env, insn_idx, "; ");
3053 			verbose(env, "invalid size of register spill\n");
3054 			return -EACCES;
3055 		}
3056 		if (state != cur && reg->type == PTR_TO_STACK) {
3057 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3058 			return -EINVAL;
3059 		}
3060 		save_register_state(state, spi, reg, size);
3061 	} else {
3062 		u8 type = STACK_MISC;
3063 
3064 		/* regular write of data into stack destroys any spilled ptr */
3065 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3066 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
3067 		if (is_spilled_reg(&state->stack[spi]))
3068 			for (i = 0; i < BPF_REG_SIZE; i++)
3069 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3070 
3071 		/* only mark the slot as written if all 8 bytes were written
3072 		 * otherwise read propagation may incorrectly stop too soon
3073 		 * when stack slots are partially written.
3074 		 * This heuristic means that read propagation will be
3075 		 * conservative, since it will add reg_live_read marks
3076 		 * to stack slots all the way to first state when programs
3077 		 * writes+reads less than 8 bytes
3078 		 */
3079 		if (size == BPF_REG_SIZE)
3080 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3081 
3082 		/* when we zero initialize stack slots mark them as such */
3083 		if (reg && register_is_null(reg)) {
3084 			/* backtracking doesn't work for STACK_ZERO yet. */
3085 			err = mark_chain_precision(env, value_regno);
3086 			if (err)
3087 				return err;
3088 			type = STACK_ZERO;
3089 		}
3090 
3091 		/* Mark slots affected by this stack write. */
3092 		for (i = 0; i < size; i++)
3093 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3094 				type;
3095 	}
3096 	return 0;
3097 }
3098 
3099 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3100  * known to contain a variable offset.
3101  * This function checks whether the write is permitted and conservatively
3102  * tracks the effects of the write, considering that each stack slot in the
3103  * dynamic range is potentially written to.
3104  *
3105  * 'off' includes 'regno->off'.
3106  * 'value_regno' can be -1, meaning that an unknown value is being written to
3107  * the stack.
3108  *
3109  * Spilled pointers in range are not marked as written because we don't know
3110  * what's going to be actually written. This means that read propagation for
3111  * future reads cannot be terminated by this write.
3112  *
3113  * For privileged programs, uninitialized stack slots are considered
3114  * initialized by this write (even though we don't know exactly what offsets
3115  * are going to be written to). The idea is that we don't want the verifier to
3116  * reject future reads that access slots written to through variable offsets.
3117  */
3118 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3119 				     /* func where register points to */
3120 				     struct bpf_func_state *state,
3121 				     int ptr_regno, int off, int size,
3122 				     int value_regno, int insn_idx)
3123 {
3124 	struct bpf_func_state *cur; /* state of the current function */
3125 	int min_off, max_off;
3126 	int i, err;
3127 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3128 	bool writing_zero = false;
3129 	/* set if the fact that we're writing a zero is used to let any
3130 	 * stack slots remain STACK_ZERO
3131 	 */
3132 	bool zero_used = false;
3133 
3134 	cur = env->cur_state->frame[env->cur_state->curframe];
3135 	ptr_reg = &cur->regs[ptr_regno];
3136 	min_off = ptr_reg->smin_value + off;
3137 	max_off = ptr_reg->smax_value + off + size;
3138 	if (value_regno >= 0)
3139 		value_reg = &cur->regs[value_regno];
3140 	if (value_reg && register_is_null(value_reg))
3141 		writing_zero = true;
3142 
3143 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3144 	if (err)
3145 		return err;
3146 
3147 
3148 	/* Variable offset writes destroy any spilled pointers in range. */
3149 	for (i = min_off; i < max_off; i++) {
3150 		u8 new_type, *stype;
3151 		int slot, spi;
3152 
3153 		slot = -i - 1;
3154 		spi = slot / BPF_REG_SIZE;
3155 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3156 		mark_stack_slot_scratched(env, spi);
3157 
3158 		if (!env->allow_ptr_leaks
3159 				&& *stype != NOT_INIT
3160 				&& *stype != SCALAR_VALUE) {
3161 			/* Reject the write if there's are spilled pointers in
3162 			 * range. If we didn't reject here, the ptr status
3163 			 * would be erased below (even though not all slots are
3164 			 * actually overwritten), possibly opening the door to
3165 			 * leaks.
3166 			 */
3167 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3168 				insn_idx, i);
3169 			return -EINVAL;
3170 		}
3171 
3172 		/* Erase all spilled pointers. */
3173 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3174 
3175 		/* Update the slot type. */
3176 		new_type = STACK_MISC;
3177 		if (writing_zero && *stype == STACK_ZERO) {
3178 			new_type = STACK_ZERO;
3179 			zero_used = true;
3180 		}
3181 		/* If the slot is STACK_INVALID, we check whether it's OK to
3182 		 * pretend that it will be initialized by this write. The slot
3183 		 * might not actually be written to, and so if we mark it as
3184 		 * initialized future reads might leak uninitialized memory.
3185 		 * For privileged programs, we will accept such reads to slots
3186 		 * that may or may not be written because, if we're reject
3187 		 * them, the error would be too confusing.
3188 		 */
3189 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3190 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3191 					insn_idx, i);
3192 			return -EINVAL;
3193 		}
3194 		*stype = new_type;
3195 	}
3196 	if (zero_used) {
3197 		/* backtracking doesn't work for STACK_ZERO yet. */
3198 		err = mark_chain_precision(env, value_regno);
3199 		if (err)
3200 			return err;
3201 	}
3202 	return 0;
3203 }
3204 
3205 /* When register 'dst_regno' is assigned some values from stack[min_off,
3206  * max_off), we set the register's type according to the types of the
3207  * respective stack slots. If all the stack values are known to be zeros, then
3208  * so is the destination reg. Otherwise, the register is considered to be
3209  * SCALAR. This function does not deal with register filling; the caller must
3210  * ensure that all spilled registers in the stack range have been marked as
3211  * read.
3212  */
3213 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3214 				/* func where src register points to */
3215 				struct bpf_func_state *ptr_state,
3216 				int min_off, int max_off, int dst_regno)
3217 {
3218 	struct bpf_verifier_state *vstate = env->cur_state;
3219 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3220 	int i, slot, spi;
3221 	u8 *stype;
3222 	int zeros = 0;
3223 
3224 	for (i = min_off; i < max_off; i++) {
3225 		slot = -i - 1;
3226 		spi = slot / BPF_REG_SIZE;
3227 		stype = ptr_state->stack[spi].slot_type;
3228 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3229 			break;
3230 		zeros++;
3231 	}
3232 	if (zeros == max_off - min_off) {
3233 		/* any access_size read into register is zero extended,
3234 		 * so the whole register == const_zero
3235 		 */
3236 		__mark_reg_const_zero(&state->regs[dst_regno]);
3237 		/* backtracking doesn't support STACK_ZERO yet,
3238 		 * so mark it precise here, so that later
3239 		 * backtracking can stop here.
3240 		 * Backtracking may not need this if this register
3241 		 * doesn't participate in pointer adjustment.
3242 		 * Forward propagation of precise flag is not
3243 		 * necessary either. This mark is only to stop
3244 		 * backtracking. Any register that contributed
3245 		 * to const 0 was marked precise before spill.
3246 		 */
3247 		state->regs[dst_regno].precise = true;
3248 	} else {
3249 		/* have read misc data from the stack */
3250 		mark_reg_unknown(env, state->regs, dst_regno);
3251 	}
3252 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3253 }
3254 
3255 /* Read the stack at 'off' and put the results into the register indicated by
3256  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3257  * spilled reg.
3258  *
3259  * 'dst_regno' can be -1, meaning that the read value is not going to a
3260  * register.
3261  *
3262  * The access is assumed to be within the current stack bounds.
3263  */
3264 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3265 				      /* func where src register points to */
3266 				      struct bpf_func_state *reg_state,
3267 				      int off, int size, int dst_regno)
3268 {
3269 	struct bpf_verifier_state *vstate = env->cur_state;
3270 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3271 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3272 	struct bpf_reg_state *reg;
3273 	u8 *stype, type;
3274 
3275 	stype = reg_state->stack[spi].slot_type;
3276 	reg = &reg_state->stack[spi].spilled_ptr;
3277 
3278 	if (is_spilled_reg(&reg_state->stack[spi])) {
3279 		u8 spill_size = 1;
3280 
3281 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3282 			spill_size++;
3283 
3284 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3285 			if (reg->type != SCALAR_VALUE) {
3286 				verbose_linfo(env, env->insn_idx, "; ");
3287 				verbose(env, "invalid size of register fill\n");
3288 				return -EACCES;
3289 			}
3290 
3291 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3292 			if (dst_regno < 0)
3293 				return 0;
3294 
3295 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3296 				/* The earlier check_reg_arg() has decided the
3297 				 * subreg_def for this insn.  Save it first.
3298 				 */
3299 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3300 
3301 				state->regs[dst_regno] = *reg;
3302 				state->regs[dst_regno].subreg_def = subreg_def;
3303 			} else {
3304 				for (i = 0; i < size; i++) {
3305 					type = stype[(slot - i) % BPF_REG_SIZE];
3306 					if (type == STACK_SPILL)
3307 						continue;
3308 					if (type == STACK_MISC)
3309 						continue;
3310 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3311 						off, i, size);
3312 					return -EACCES;
3313 				}
3314 				mark_reg_unknown(env, state->regs, dst_regno);
3315 			}
3316 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3317 			return 0;
3318 		}
3319 
3320 		if (dst_regno >= 0) {
3321 			/* restore register state from stack */
3322 			state->regs[dst_regno] = *reg;
3323 			/* mark reg as written since spilled pointer state likely
3324 			 * has its liveness marks cleared by is_state_visited()
3325 			 * which resets stack/reg liveness for state transitions
3326 			 */
3327 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3328 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3329 			/* If dst_regno==-1, the caller is asking us whether
3330 			 * it is acceptable to use this value as a SCALAR_VALUE
3331 			 * (e.g. for XADD).
3332 			 * We must not allow unprivileged callers to do that
3333 			 * with spilled pointers.
3334 			 */
3335 			verbose(env, "leaking pointer from stack off %d\n",
3336 				off);
3337 			return -EACCES;
3338 		}
3339 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3340 	} else {
3341 		for (i = 0; i < size; i++) {
3342 			type = stype[(slot - i) % BPF_REG_SIZE];
3343 			if (type == STACK_MISC)
3344 				continue;
3345 			if (type == STACK_ZERO)
3346 				continue;
3347 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3348 				off, i, size);
3349 			return -EACCES;
3350 		}
3351 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3352 		if (dst_regno >= 0)
3353 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3354 	}
3355 	return 0;
3356 }
3357 
3358 enum bpf_access_src {
3359 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3360 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3361 };
3362 
3363 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3364 					 int regno, int off, int access_size,
3365 					 bool zero_size_allowed,
3366 					 enum bpf_access_src type,
3367 					 struct bpf_call_arg_meta *meta);
3368 
3369 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3370 {
3371 	return cur_regs(env) + regno;
3372 }
3373 
3374 /* Read the stack at 'ptr_regno + off' and put the result into the register
3375  * 'dst_regno'.
3376  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3377  * but not its variable offset.
3378  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3379  *
3380  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3381  * filling registers (i.e. reads of spilled register cannot be detected when
3382  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3383  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3384  * offset; for a fixed offset check_stack_read_fixed_off should be used
3385  * instead.
3386  */
3387 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3388 				    int ptr_regno, int off, int size, int dst_regno)
3389 {
3390 	/* The state of the source register. */
3391 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3392 	struct bpf_func_state *ptr_state = func(env, reg);
3393 	int err;
3394 	int min_off, max_off;
3395 
3396 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3397 	 */
3398 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3399 					    false, ACCESS_DIRECT, NULL);
3400 	if (err)
3401 		return err;
3402 
3403 	min_off = reg->smin_value + off;
3404 	max_off = reg->smax_value + off;
3405 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3406 	return 0;
3407 }
3408 
3409 /* check_stack_read dispatches to check_stack_read_fixed_off or
3410  * check_stack_read_var_off.
3411  *
3412  * The caller must ensure that the offset falls within the allocated stack
3413  * bounds.
3414  *
3415  * 'dst_regno' is a register which will receive the value from the stack. It
3416  * can be -1, meaning that the read value is not going to a register.
3417  */
3418 static int check_stack_read(struct bpf_verifier_env *env,
3419 			    int ptr_regno, int off, int size,
3420 			    int dst_regno)
3421 {
3422 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3423 	struct bpf_func_state *state = func(env, reg);
3424 	int err;
3425 	/* Some accesses are only permitted with a static offset. */
3426 	bool var_off = !tnum_is_const(reg->var_off);
3427 
3428 	/* The offset is required to be static when reads don't go to a
3429 	 * register, in order to not leak pointers (see
3430 	 * check_stack_read_fixed_off).
3431 	 */
3432 	if (dst_regno < 0 && var_off) {
3433 		char tn_buf[48];
3434 
3435 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3436 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3437 			tn_buf, off, size);
3438 		return -EACCES;
3439 	}
3440 	/* Variable offset is prohibited for unprivileged mode for simplicity
3441 	 * since it requires corresponding support in Spectre masking for stack
3442 	 * ALU. See also retrieve_ptr_limit().
3443 	 */
3444 	if (!env->bypass_spec_v1 && var_off) {
3445 		char tn_buf[48];
3446 
3447 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3448 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3449 				ptr_regno, tn_buf);
3450 		return -EACCES;
3451 	}
3452 
3453 	if (!var_off) {
3454 		off += reg->var_off.value;
3455 		err = check_stack_read_fixed_off(env, state, off, size,
3456 						 dst_regno);
3457 	} else {
3458 		/* Variable offset stack reads need more conservative handling
3459 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3460 		 * branch.
3461 		 */
3462 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3463 					       dst_regno);
3464 	}
3465 	return err;
3466 }
3467 
3468 
3469 /* check_stack_write dispatches to check_stack_write_fixed_off or
3470  * check_stack_write_var_off.
3471  *
3472  * 'ptr_regno' is the register used as a pointer into the stack.
3473  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3474  * 'value_regno' is the register whose value we're writing to the stack. It can
3475  * be -1, meaning that we're not writing from a register.
3476  *
3477  * The caller must ensure that the offset falls within the maximum stack size.
3478  */
3479 static int check_stack_write(struct bpf_verifier_env *env,
3480 			     int ptr_regno, int off, int size,
3481 			     int value_regno, int insn_idx)
3482 {
3483 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3484 	struct bpf_func_state *state = func(env, reg);
3485 	int err;
3486 
3487 	if (tnum_is_const(reg->var_off)) {
3488 		off += reg->var_off.value;
3489 		err = check_stack_write_fixed_off(env, state, off, size,
3490 						  value_regno, insn_idx);
3491 	} else {
3492 		/* Variable offset stack reads need more conservative handling
3493 		 * than fixed offset ones.
3494 		 */
3495 		err = check_stack_write_var_off(env, state,
3496 						ptr_regno, off, size,
3497 						value_regno, insn_idx);
3498 	}
3499 	return err;
3500 }
3501 
3502 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3503 				 int off, int size, enum bpf_access_type type)
3504 {
3505 	struct bpf_reg_state *regs = cur_regs(env);
3506 	struct bpf_map *map = regs[regno].map_ptr;
3507 	u32 cap = bpf_map_flags_to_cap(map);
3508 
3509 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3510 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3511 			map->value_size, off, size);
3512 		return -EACCES;
3513 	}
3514 
3515 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3516 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3517 			map->value_size, off, size);
3518 		return -EACCES;
3519 	}
3520 
3521 	return 0;
3522 }
3523 
3524 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3525 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3526 			      int off, int size, u32 mem_size,
3527 			      bool zero_size_allowed)
3528 {
3529 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3530 	struct bpf_reg_state *reg;
3531 
3532 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3533 		return 0;
3534 
3535 	reg = &cur_regs(env)[regno];
3536 	switch (reg->type) {
3537 	case PTR_TO_MAP_KEY:
3538 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3539 			mem_size, off, size);
3540 		break;
3541 	case PTR_TO_MAP_VALUE:
3542 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
3543 			mem_size, off, size);
3544 		break;
3545 	case PTR_TO_PACKET:
3546 	case PTR_TO_PACKET_META:
3547 	case PTR_TO_PACKET_END:
3548 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3549 			off, size, regno, reg->id, off, mem_size);
3550 		break;
3551 	case PTR_TO_MEM:
3552 	default:
3553 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3554 			mem_size, off, size);
3555 	}
3556 
3557 	return -EACCES;
3558 }
3559 
3560 /* check read/write into a memory region with possible variable offset */
3561 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3562 				   int off, int size, u32 mem_size,
3563 				   bool zero_size_allowed)
3564 {
3565 	struct bpf_verifier_state *vstate = env->cur_state;
3566 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3567 	struct bpf_reg_state *reg = &state->regs[regno];
3568 	int err;
3569 
3570 	/* We may have adjusted the register pointing to memory region, so we
3571 	 * need to try adding each of min_value and max_value to off
3572 	 * to make sure our theoretical access will be safe.
3573 	 *
3574 	 * The minimum value is only important with signed
3575 	 * comparisons where we can't assume the floor of a
3576 	 * value is 0.  If we are using signed variables for our
3577 	 * index'es we need to make sure that whatever we use
3578 	 * will have a set floor within our range.
3579 	 */
3580 	if (reg->smin_value < 0 &&
3581 	    (reg->smin_value == S64_MIN ||
3582 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3583 	      reg->smin_value + off < 0)) {
3584 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3585 			regno);
3586 		return -EACCES;
3587 	}
3588 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
3589 				 mem_size, zero_size_allowed);
3590 	if (err) {
3591 		verbose(env, "R%d min value is outside of the allowed memory range\n",
3592 			regno);
3593 		return err;
3594 	}
3595 
3596 	/* If we haven't set a max value then we need to bail since we can't be
3597 	 * sure we won't do bad things.
3598 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
3599 	 */
3600 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
3601 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
3602 			regno);
3603 		return -EACCES;
3604 	}
3605 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
3606 				 mem_size, zero_size_allowed);
3607 	if (err) {
3608 		verbose(env, "R%d max value is outside of the allowed memory range\n",
3609 			regno);
3610 		return err;
3611 	}
3612 
3613 	return 0;
3614 }
3615 
3616 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3617 			       const struct bpf_reg_state *reg, int regno,
3618 			       bool fixed_off_ok)
3619 {
3620 	/* Access to this pointer-typed register or passing it to a helper
3621 	 * is only allowed in its original, unmodified form.
3622 	 */
3623 
3624 	if (reg->off < 0) {
3625 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
3626 			reg_type_str(env, reg->type), regno, reg->off);
3627 		return -EACCES;
3628 	}
3629 
3630 	if (!fixed_off_ok && reg->off) {
3631 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3632 			reg_type_str(env, reg->type), regno, reg->off);
3633 		return -EACCES;
3634 	}
3635 
3636 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3637 		char tn_buf[48];
3638 
3639 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3640 		verbose(env, "variable %s access var_off=%s disallowed\n",
3641 			reg_type_str(env, reg->type), tn_buf);
3642 		return -EACCES;
3643 	}
3644 
3645 	return 0;
3646 }
3647 
3648 int check_ptr_off_reg(struct bpf_verifier_env *env,
3649 		      const struct bpf_reg_state *reg, int regno)
3650 {
3651 	return __check_ptr_off_reg(env, reg, regno, false);
3652 }
3653 
3654 static int map_kptr_match_type(struct bpf_verifier_env *env,
3655 			       struct bpf_map_value_off_desc *off_desc,
3656 			       struct bpf_reg_state *reg, u32 regno)
3657 {
3658 	const char *targ_name = kernel_type_name(off_desc->kptr.btf, off_desc->kptr.btf_id);
3659 	int perm_flags = PTR_MAYBE_NULL;
3660 	const char *reg_name = "";
3661 
3662 	/* Only unreferenced case accepts untrusted pointers */
3663 	if (off_desc->type == BPF_KPTR_UNREF)
3664 		perm_flags |= PTR_UNTRUSTED;
3665 
3666 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
3667 		goto bad_type;
3668 
3669 	if (!btf_is_kernel(reg->btf)) {
3670 		verbose(env, "R%d must point to kernel BTF\n", regno);
3671 		return -EINVAL;
3672 	}
3673 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
3674 	reg_name = kernel_type_name(reg->btf, reg->btf_id);
3675 
3676 	/* For ref_ptr case, release function check should ensure we get one
3677 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
3678 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
3679 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
3680 	 * reg->off and reg->ref_obj_id are not needed here.
3681 	 */
3682 	if (__check_ptr_off_reg(env, reg, regno, true))
3683 		return -EACCES;
3684 
3685 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
3686 	 * we also need to take into account the reg->off.
3687 	 *
3688 	 * We want to support cases like:
3689 	 *
3690 	 * struct foo {
3691 	 *         struct bar br;
3692 	 *         struct baz bz;
3693 	 * };
3694 	 *
3695 	 * struct foo *v;
3696 	 * v = func();	      // PTR_TO_BTF_ID
3697 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
3698 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
3699 	 *                    // first member type of struct after comparison fails
3700 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
3701 	 *                    // to match type
3702 	 *
3703 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
3704 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
3705 	 * the struct to match type against first member of struct, i.e. reject
3706 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
3707 	 * strict mode to true for type match.
3708 	 */
3709 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
3710 				  off_desc->kptr.btf, off_desc->kptr.btf_id,
3711 				  off_desc->type == BPF_KPTR_REF))
3712 		goto bad_type;
3713 	return 0;
3714 bad_type:
3715 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
3716 		reg_type_str(env, reg->type), reg_name);
3717 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
3718 	if (off_desc->type == BPF_KPTR_UNREF)
3719 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
3720 			targ_name);
3721 	else
3722 		verbose(env, "\n");
3723 	return -EINVAL;
3724 }
3725 
3726 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
3727 				 int value_regno, int insn_idx,
3728 				 struct bpf_map_value_off_desc *off_desc)
3729 {
3730 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3731 	int class = BPF_CLASS(insn->code);
3732 	struct bpf_reg_state *val_reg;
3733 
3734 	/* Things we already checked for in check_map_access and caller:
3735 	 *  - Reject cases where variable offset may touch kptr
3736 	 *  - size of access (must be BPF_DW)
3737 	 *  - tnum_is_const(reg->var_off)
3738 	 *  - off_desc->offset == off + reg->var_off.value
3739 	 */
3740 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
3741 	if (BPF_MODE(insn->code) != BPF_MEM) {
3742 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
3743 		return -EACCES;
3744 	}
3745 
3746 	/* We only allow loading referenced kptr, since it will be marked as
3747 	 * untrusted, similar to unreferenced kptr.
3748 	 */
3749 	if (class != BPF_LDX && off_desc->type == BPF_KPTR_REF) {
3750 		verbose(env, "store to referenced kptr disallowed\n");
3751 		return -EACCES;
3752 	}
3753 
3754 	if (class == BPF_LDX) {
3755 		val_reg = reg_state(env, value_regno);
3756 		/* We can simply mark the value_regno receiving the pointer
3757 		 * value from map as PTR_TO_BTF_ID, with the correct type.
3758 		 */
3759 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->kptr.btf,
3760 				off_desc->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
3761 		/* For mark_ptr_or_null_reg */
3762 		val_reg->id = ++env->id_gen;
3763 	} else if (class == BPF_STX) {
3764 		val_reg = reg_state(env, value_regno);
3765 		if (!register_is_null(val_reg) &&
3766 		    map_kptr_match_type(env, off_desc, val_reg, value_regno))
3767 			return -EACCES;
3768 	} else if (class == BPF_ST) {
3769 		if (insn->imm) {
3770 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
3771 				off_desc->offset);
3772 			return -EACCES;
3773 		}
3774 	} else {
3775 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
3776 		return -EACCES;
3777 	}
3778 	return 0;
3779 }
3780 
3781 /* check read/write into a map element with possible variable offset */
3782 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3783 			    int off, int size, bool zero_size_allowed,
3784 			    enum bpf_access_src src)
3785 {
3786 	struct bpf_verifier_state *vstate = env->cur_state;
3787 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3788 	struct bpf_reg_state *reg = &state->regs[regno];
3789 	struct bpf_map *map = reg->map_ptr;
3790 	int err;
3791 
3792 	err = check_mem_region_access(env, regno, off, size, map->value_size,
3793 				      zero_size_allowed);
3794 	if (err)
3795 		return err;
3796 
3797 	if (map_value_has_spin_lock(map)) {
3798 		u32 lock = map->spin_lock_off;
3799 
3800 		/* if any part of struct bpf_spin_lock can be touched by
3801 		 * load/store reject this program.
3802 		 * To check that [x1, x2) overlaps with [y1, y2)
3803 		 * it is sufficient to check x1 < y2 && y1 < x2.
3804 		 */
3805 		if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3806 		     lock < reg->umax_value + off + size) {
3807 			verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3808 			return -EACCES;
3809 		}
3810 	}
3811 	if (map_value_has_timer(map)) {
3812 		u32 t = map->timer_off;
3813 
3814 		if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3815 		     t < reg->umax_value + off + size) {
3816 			verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3817 			return -EACCES;
3818 		}
3819 	}
3820 	if (map_value_has_kptrs(map)) {
3821 		struct bpf_map_value_off *tab = map->kptr_off_tab;
3822 		int i;
3823 
3824 		for (i = 0; i < tab->nr_off; i++) {
3825 			u32 p = tab->off[i].offset;
3826 
3827 			if (reg->smin_value + off < p + sizeof(u64) &&
3828 			    p < reg->umax_value + off + size) {
3829 				if (src != ACCESS_DIRECT) {
3830 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
3831 					return -EACCES;
3832 				}
3833 				if (!tnum_is_const(reg->var_off)) {
3834 					verbose(env, "kptr access cannot have variable offset\n");
3835 					return -EACCES;
3836 				}
3837 				if (p != off + reg->var_off.value) {
3838 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
3839 						p, off + reg->var_off.value);
3840 					return -EACCES;
3841 				}
3842 				if (size != bpf_size_to_bytes(BPF_DW)) {
3843 					verbose(env, "kptr access size must be BPF_DW\n");
3844 					return -EACCES;
3845 				}
3846 				break;
3847 			}
3848 		}
3849 	}
3850 	return err;
3851 }
3852 
3853 #define MAX_PACKET_OFF 0xffff
3854 
3855 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3856 				       const struct bpf_call_arg_meta *meta,
3857 				       enum bpf_access_type t)
3858 {
3859 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3860 
3861 	switch (prog_type) {
3862 	/* Program types only with direct read access go here! */
3863 	case BPF_PROG_TYPE_LWT_IN:
3864 	case BPF_PROG_TYPE_LWT_OUT:
3865 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3866 	case BPF_PROG_TYPE_SK_REUSEPORT:
3867 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3868 	case BPF_PROG_TYPE_CGROUP_SKB:
3869 		if (t == BPF_WRITE)
3870 			return false;
3871 		fallthrough;
3872 
3873 	/* Program types with direct read + write access go here! */
3874 	case BPF_PROG_TYPE_SCHED_CLS:
3875 	case BPF_PROG_TYPE_SCHED_ACT:
3876 	case BPF_PROG_TYPE_XDP:
3877 	case BPF_PROG_TYPE_LWT_XMIT:
3878 	case BPF_PROG_TYPE_SK_SKB:
3879 	case BPF_PROG_TYPE_SK_MSG:
3880 		if (meta)
3881 			return meta->pkt_access;
3882 
3883 		env->seen_direct_write = true;
3884 		return true;
3885 
3886 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3887 		if (t == BPF_WRITE)
3888 			env->seen_direct_write = true;
3889 
3890 		return true;
3891 
3892 	default:
3893 		return false;
3894 	}
3895 }
3896 
3897 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
3898 			       int size, bool zero_size_allowed)
3899 {
3900 	struct bpf_reg_state *regs = cur_regs(env);
3901 	struct bpf_reg_state *reg = &regs[regno];
3902 	int err;
3903 
3904 	/* We may have added a variable offset to the packet pointer; but any
3905 	 * reg->range we have comes after that.  We are only checking the fixed
3906 	 * offset.
3907 	 */
3908 
3909 	/* We don't allow negative numbers, because we aren't tracking enough
3910 	 * detail to prove they're safe.
3911 	 */
3912 	if (reg->smin_value < 0) {
3913 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3914 			regno);
3915 		return -EACCES;
3916 	}
3917 
3918 	err = reg->range < 0 ? -EINVAL :
3919 	      __check_mem_access(env, regno, off, size, reg->range,
3920 				 zero_size_allowed);
3921 	if (err) {
3922 		verbose(env, "R%d offset is outside of the packet\n", regno);
3923 		return err;
3924 	}
3925 
3926 	/* __check_mem_access has made sure "off + size - 1" is within u16.
3927 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3928 	 * otherwise find_good_pkt_pointers would have refused to set range info
3929 	 * that __check_mem_access would have rejected this pkt access.
3930 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3931 	 */
3932 	env->prog->aux->max_pkt_offset =
3933 		max_t(u32, env->prog->aux->max_pkt_offset,
3934 		      off + reg->umax_value + size - 1);
3935 
3936 	return err;
3937 }
3938 
3939 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
3940 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
3941 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
3942 			    struct btf **btf, u32 *btf_id)
3943 {
3944 	struct bpf_insn_access_aux info = {
3945 		.reg_type = *reg_type,
3946 		.log = &env->log,
3947 	};
3948 
3949 	if (env->ops->is_valid_access &&
3950 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
3951 		/* A non zero info.ctx_field_size indicates that this field is a
3952 		 * candidate for later verifier transformation to load the whole
3953 		 * field and then apply a mask when accessed with a narrower
3954 		 * access than actual ctx access size. A zero info.ctx_field_size
3955 		 * will only allow for whole field access and rejects any other
3956 		 * type of narrower access.
3957 		 */
3958 		*reg_type = info.reg_type;
3959 
3960 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
3961 			*btf = info.btf;
3962 			*btf_id = info.btf_id;
3963 		} else {
3964 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
3965 		}
3966 		/* remember the offset of last byte accessed in ctx */
3967 		if (env->prog->aux->max_ctx_offset < off + size)
3968 			env->prog->aux->max_ctx_offset = off + size;
3969 		return 0;
3970 	}
3971 
3972 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
3973 	return -EACCES;
3974 }
3975 
3976 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
3977 				  int size)
3978 {
3979 	if (size < 0 || off < 0 ||
3980 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
3981 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
3982 			off, size);
3983 		return -EACCES;
3984 	}
3985 	return 0;
3986 }
3987 
3988 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
3989 			     u32 regno, int off, int size,
3990 			     enum bpf_access_type t)
3991 {
3992 	struct bpf_reg_state *regs = cur_regs(env);
3993 	struct bpf_reg_state *reg = &regs[regno];
3994 	struct bpf_insn_access_aux info = {};
3995 	bool valid;
3996 
3997 	if (reg->smin_value < 0) {
3998 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3999 			regno);
4000 		return -EACCES;
4001 	}
4002 
4003 	switch (reg->type) {
4004 	case PTR_TO_SOCK_COMMON:
4005 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4006 		break;
4007 	case PTR_TO_SOCKET:
4008 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4009 		break;
4010 	case PTR_TO_TCP_SOCK:
4011 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4012 		break;
4013 	case PTR_TO_XDP_SOCK:
4014 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4015 		break;
4016 	default:
4017 		valid = false;
4018 	}
4019 
4020 
4021 	if (valid) {
4022 		env->insn_aux_data[insn_idx].ctx_field_size =
4023 			info.ctx_field_size;
4024 		return 0;
4025 	}
4026 
4027 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4028 		regno, reg_type_str(env, reg->type), off, size);
4029 
4030 	return -EACCES;
4031 }
4032 
4033 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4034 {
4035 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4036 }
4037 
4038 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4039 {
4040 	const struct bpf_reg_state *reg = reg_state(env, regno);
4041 
4042 	return reg->type == PTR_TO_CTX;
4043 }
4044 
4045 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4046 {
4047 	const struct bpf_reg_state *reg = reg_state(env, regno);
4048 
4049 	return type_is_sk_pointer(reg->type);
4050 }
4051 
4052 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4053 {
4054 	const struct bpf_reg_state *reg = reg_state(env, regno);
4055 
4056 	return type_is_pkt_pointer(reg->type);
4057 }
4058 
4059 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4060 {
4061 	const struct bpf_reg_state *reg = reg_state(env, regno);
4062 
4063 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4064 	return reg->type == PTR_TO_FLOW_KEYS;
4065 }
4066 
4067 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4068 				   const struct bpf_reg_state *reg,
4069 				   int off, int size, bool strict)
4070 {
4071 	struct tnum reg_off;
4072 	int ip_align;
4073 
4074 	/* Byte size accesses are always allowed. */
4075 	if (!strict || size == 1)
4076 		return 0;
4077 
4078 	/* For platforms that do not have a Kconfig enabling
4079 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4080 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4081 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4082 	 * to this code only in strict mode where we want to emulate
4083 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4084 	 * unconditional IP align value of '2'.
4085 	 */
4086 	ip_align = 2;
4087 
4088 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4089 	if (!tnum_is_aligned(reg_off, size)) {
4090 		char tn_buf[48];
4091 
4092 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4093 		verbose(env,
4094 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4095 			ip_align, tn_buf, reg->off, off, size);
4096 		return -EACCES;
4097 	}
4098 
4099 	return 0;
4100 }
4101 
4102 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4103 				       const struct bpf_reg_state *reg,
4104 				       const char *pointer_desc,
4105 				       int off, int size, bool strict)
4106 {
4107 	struct tnum reg_off;
4108 
4109 	/* Byte size accesses are always allowed. */
4110 	if (!strict || size == 1)
4111 		return 0;
4112 
4113 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4114 	if (!tnum_is_aligned(reg_off, size)) {
4115 		char tn_buf[48];
4116 
4117 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4118 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
4119 			pointer_desc, tn_buf, reg->off, off, size);
4120 		return -EACCES;
4121 	}
4122 
4123 	return 0;
4124 }
4125 
4126 static int check_ptr_alignment(struct bpf_verifier_env *env,
4127 			       const struct bpf_reg_state *reg, int off,
4128 			       int size, bool strict_alignment_once)
4129 {
4130 	bool strict = env->strict_alignment || strict_alignment_once;
4131 	const char *pointer_desc = "";
4132 
4133 	switch (reg->type) {
4134 	case PTR_TO_PACKET:
4135 	case PTR_TO_PACKET_META:
4136 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
4137 		 * right in front, treat it the very same way.
4138 		 */
4139 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
4140 	case PTR_TO_FLOW_KEYS:
4141 		pointer_desc = "flow keys ";
4142 		break;
4143 	case PTR_TO_MAP_KEY:
4144 		pointer_desc = "key ";
4145 		break;
4146 	case PTR_TO_MAP_VALUE:
4147 		pointer_desc = "value ";
4148 		break;
4149 	case PTR_TO_CTX:
4150 		pointer_desc = "context ";
4151 		break;
4152 	case PTR_TO_STACK:
4153 		pointer_desc = "stack ";
4154 		/* The stack spill tracking logic in check_stack_write_fixed_off()
4155 		 * and check_stack_read_fixed_off() relies on stack accesses being
4156 		 * aligned.
4157 		 */
4158 		strict = true;
4159 		break;
4160 	case PTR_TO_SOCKET:
4161 		pointer_desc = "sock ";
4162 		break;
4163 	case PTR_TO_SOCK_COMMON:
4164 		pointer_desc = "sock_common ";
4165 		break;
4166 	case PTR_TO_TCP_SOCK:
4167 		pointer_desc = "tcp_sock ";
4168 		break;
4169 	case PTR_TO_XDP_SOCK:
4170 		pointer_desc = "xdp_sock ";
4171 		break;
4172 	default:
4173 		break;
4174 	}
4175 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4176 					   strict);
4177 }
4178 
4179 static int update_stack_depth(struct bpf_verifier_env *env,
4180 			      const struct bpf_func_state *func,
4181 			      int off)
4182 {
4183 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
4184 
4185 	if (stack >= -off)
4186 		return 0;
4187 
4188 	/* update known max for given subprogram */
4189 	env->subprog_info[func->subprogno].stack_depth = -off;
4190 	return 0;
4191 }
4192 
4193 /* starting from main bpf function walk all instructions of the function
4194  * and recursively walk all callees that given function can call.
4195  * Ignore jump and exit insns.
4196  * Since recursion is prevented by check_cfg() this algorithm
4197  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4198  */
4199 static int check_max_stack_depth(struct bpf_verifier_env *env)
4200 {
4201 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4202 	struct bpf_subprog_info *subprog = env->subprog_info;
4203 	struct bpf_insn *insn = env->prog->insnsi;
4204 	bool tail_call_reachable = false;
4205 	int ret_insn[MAX_CALL_FRAMES];
4206 	int ret_prog[MAX_CALL_FRAMES];
4207 	int j;
4208 
4209 process_func:
4210 	/* protect against potential stack overflow that might happen when
4211 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4212 	 * depth for such case down to 256 so that the worst case scenario
4213 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
4214 	 * 8k).
4215 	 *
4216 	 * To get the idea what might happen, see an example:
4217 	 * func1 -> sub rsp, 128
4218 	 *  subfunc1 -> sub rsp, 256
4219 	 *  tailcall1 -> add rsp, 256
4220 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4221 	 *   subfunc2 -> sub rsp, 64
4222 	 *   subfunc22 -> sub rsp, 128
4223 	 *   tailcall2 -> add rsp, 128
4224 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4225 	 *
4226 	 * tailcall will unwind the current stack frame but it will not get rid
4227 	 * of caller's stack as shown on the example above.
4228 	 */
4229 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
4230 		verbose(env,
4231 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4232 			depth);
4233 		return -EACCES;
4234 	}
4235 	/* round up to 32-bytes, since this is granularity
4236 	 * of interpreter stack size
4237 	 */
4238 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4239 	if (depth > MAX_BPF_STACK) {
4240 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
4241 			frame + 1, depth);
4242 		return -EACCES;
4243 	}
4244 continue_func:
4245 	subprog_end = subprog[idx + 1].start;
4246 	for (; i < subprog_end; i++) {
4247 		int next_insn;
4248 
4249 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
4250 			continue;
4251 		/* remember insn and function to return to */
4252 		ret_insn[frame] = i + 1;
4253 		ret_prog[frame] = idx;
4254 
4255 		/* find the callee */
4256 		next_insn = i + insn[i].imm + 1;
4257 		idx = find_subprog(env, next_insn);
4258 		if (idx < 0) {
4259 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4260 				  next_insn);
4261 			return -EFAULT;
4262 		}
4263 		if (subprog[idx].is_async_cb) {
4264 			if (subprog[idx].has_tail_call) {
4265 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4266 				return -EFAULT;
4267 			}
4268 			 /* async callbacks don't increase bpf prog stack size */
4269 			continue;
4270 		}
4271 		i = next_insn;
4272 
4273 		if (subprog[idx].has_tail_call)
4274 			tail_call_reachable = true;
4275 
4276 		frame++;
4277 		if (frame >= MAX_CALL_FRAMES) {
4278 			verbose(env, "the call stack of %d frames is too deep !\n",
4279 				frame);
4280 			return -E2BIG;
4281 		}
4282 		goto process_func;
4283 	}
4284 	/* if tail call got detected across bpf2bpf calls then mark each of the
4285 	 * currently present subprog frames as tail call reachable subprogs;
4286 	 * this info will be utilized by JIT so that we will be preserving the
4287 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
4288 	 */
4289 	if (tail_call_reachable)
4290 		for (j = 0; j < frame; j++)
4291 			subprog[ret_prog[j]].tail_call_reachable = true;
4292 	if (subprog[0].tail_call_reachable)
4293 		env->prog->aux->tail_call_reachable = true;
4294 
4295 	/* end of for() loop means the last insn of the 'subprog'
4296 	 * was reached. Doesn't matter whether it was JA or EXIT
4297 	 */
4298 	if (frame == 0)
4299 		return 0;
4300 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4301 	frame--;
4302 	i = ret_insn[frame];
4303 	idx = ret_prog[frame];
4304 	goto continue_func;
4305 }
4306 
4307 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
4308 static int get_callee_stack_depth(struct bpf_verifier_env *env,
4309 				  const struct bpf_insn *insn, int idx)
4310 {
4311 	int start = idx + insn->imm + 1, subprog;
4312 
4313 	subprog = find_subprog(env, start);
4314 	if (subprog < 0) {
4315 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4316 			  start);
4317 		return -EFAULT;
4318 	}
4319 	return env->subprog_info[subprog].stack_depth;
4320 }
4321 #endif
4322 
4323 static int __check_buffer_access(struct bpf_verifier_env *env,
4324 				 const char *buf_info,
4325 				 const struct bpf_reg_state *reg,
4326 				 int regno, int off, int size)
4327 {
4328 	if (off < 0) {
4329 		verbose(env,
4330 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4331 			regno, buf_info, off, size);
4332 		return -EACCES;
4333 	}
4334 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4335 		char tn_buf[48];
4336 
4337 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4338 		verbose(env,
4339 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4340 			regno, off, tn_buf);
4341 		return -EACCES;
4342 	}
4343 
4344 	return 0;
4345 }
4346 
4347 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4348 				  const struct bpf_reg_state *reg,
4349 				  int regno, int off, int size)
4350 {
4351 	int err;
4352 
4353 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4354 	if (err)
4355 		return err;
4356 
4357 	if (off + size > env->prog->aux->max_tp_access)
4358 		env->prog->aux->max_tp_access = off + size;
4359 
4360 	return 0;
4361 }
4362 
4363 static int check_buffer_access(struct bpf_verifier_env *env,
4364 			       const struct bpf_reg_state *reg,
4365 			       int regno, int off, int size,
4366 			       bool zero_size_allowed,
4367 			       u32 *max_access)
4368 {
4369 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4370 	int err;
4371 
4372 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4373 	if (err)
4374 		return err;
4375 
4376 	if (off + size > *max_access)
4377 		*max_access = off + size;
4378 
4379 	return 0;
4380 }
4381 
4382 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4383 static void zext_32_to_64(struct bpf_reg_state *reg)
4384 {
4385 	reg->var_off = tnum_subreg(reg->var_off);
4386 	__reg_assign_32_into_64(reg);
4387 }
4388 
4389 /* truncate register to smaller size (in bytes)
4390  * must be called with size < BPF_REG_SIZE
4391  */
4392 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4393 {
4394 	u64 mask;
4395 
4396 	/* clear high bits in bit representation */
4397 	reg->var_off = tnum_cast(reg->var_off, size);
4398 
4399 	/* fix arithmetic bounds */
4400 	mask = ((u64)1 << (size * 8)) - 1;
4401 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4402 		reg->umin_value &= mask;
4403 		reg->umax_value &= mask;
4404 	} else {
4405 		reg->umin_value = 0;
4406 		reg->umax_value = mask;
4407 	}
4408 	reg->smin_value = reg->umin_value;
4409 	reg->smax_value = reg->umax_value;
4410 
4411 	/* If size is smaller than 32bit register the 32bit register
4412 	 * values are also truncated so we push 64-bit bounds into
4413 	 * 32-bit bounds. Above were truncated < 32-bits already.
4414 	 */
4415 	if (size >= 4)
4416 		return;
4417 	__reg_combine_64_into_32(reg);
4418 }
4419 
4420 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4421 {
4422 	/* A map is considered read-only if the following condition are true:
4423 	 *
4424 	 * 1) BPF program side cannot change any of the map content. The
4425 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4426 	 *    and was set at map creation time.
4427 	 * 2) The map value(s) have been initialized from user space by a
4428 	 *    loader and then "frozen", such that no new map update/delete
4429 	 *    operations from syscall side are possible for the rest of
4430 	 *    the map's lifetime from that point onwards.
4431 	 * 3) Any parallel/pending map update/delete operations from syscall
4432 	 *    side have been completed. Only after that point, it's safe to
4433 	 *    assume that map value(s) are immutable.
4434 	 */
4435 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4436 	       READ_ONCE(map->frozen) &&
4437 	       !bpf_map_write_active(map);
4438 }
4439 
4440 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4441 {
4442 	void *ptr;
4443 	u64 addr;
4444 	int err;
4445 
4446 	err = map->ops->map_direct_value_addr(map, &addr, off);
4447 	if (err)
4448 		return err;
4449 	ptr = (void *)(long)addr + off;
4450 
4451 	switch (size) {
4452 	case sizeof(u8):
4453 		*val = (u64)*(u8 *)ptr;
4454 		break;
4455 	case sizeof(u16):
4456 		*val = (u64)*(u16 *)ptr;
4457 		break;
4458 	case sizeof(u32):
4459 		*val = (u64)*(u32 *)ptr;
4460 		break;
4461 	case sizeof(u64):
4462 		*val = *(u64 *)ptr;
4463 		break;
4464 	default:
4465 		return -EINVAL;
4466 	}
4467 	return 0;
4468 }
4469 
4470 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4471 				   struct bpf_reg_state *regs,
4472 				   int regno, int off, int size,
4473 				   enum bpf_access_type atype,
4474 				   int value_regno)
4475 {
4476 	struct bpf_reg_state *reg = regs + regno;
4477 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4478 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4479 	enum bpf_type_flag flag = 0;
4480 	u32 btf_id;
4481 	int ret;
4482 
4483 	if (off < 0) {
4484 		verbose(env,
4485 			"R%d is ptr_%s invalid negative access: off=%d\n",
4486 			regno, tname, off);
4487 		return -EACCES;
4488 	}
4489 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4490 		char tn_buf[48];
4491 
4492 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4493 		verbose(env,
4494 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4495 			regno, tname, off, tn_buf);
4496 		return -EACCES;
4497 	}
4498 
4499 	if (reg->type & MEM_USER) {
4500 		verbose(env,
4501 			"R%d is ptr_%s access user memory: off=%d\n",
4502 			regno, tname, off);
4503 		return -EACCES;
4504 	}
4505 
4506 	if (reg->type & MEM_PERCPU) {
4507 		verbose(env,
4508 			"R%d is ptr_%s access percpu memory: off=%d\n",
4509 			regno, tname, off);
4510 		return -EACCES;
4511 	}
4512 
4513 	if (env->ops->btf_struct_access) {
4514 		ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
4515 						  off, size, atype, &btf_id, &flag);
4516 	} else {
4517 		if (atype != BPF_READ) {
4518 			verbose(env, "only read is supported\n");
4519 			return -EACCES;
4520 		}
4521 
4522 		ret = btf_struct_access(&env->log, reg->btf, t, off, size,
4523 					atype, &btf_id, &flag);
4524 	}
4525 
4526 	if (ret < 0)
4527 		return ret;
4528 
4529 	/* If this is an untrusted pointer, all pointers formed by walking it
4530 	 * also inherit the untrusted flag.
4531 	 */
4532 	if (type_flag(reg->type) & PTR_UNTRUSTED)
4533 		flag |= PTR_UNTRUSTED;
4534 
4535 	if (atype == BPF_READ && value_regno >= 0)
4536 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
4537 
4538 	return 0;
4539 }
4540 
4541 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4542 				   struct bpf_reg_state *regs,
4543 				   int regno, int off, int size,
4544 				   enum bpf_access_type atype,
4545 				   int value_regno)
4546 {
4547 	struct bpf_reg_state *reg = regs + regno;
4548 	struct bpf_map *map = reg->map_ptr;
4549 	enum bpf_type_flag flag = 0;
4550 	const struct btf_type *t;
4551 	const char *tname;
4552 	u32 btf_id;
4553 	int ret;
4554 
4555 	if (!btf_vmlinux) {
4556 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4557 		return -ENOTSUPP;
4558 	}
4559 
4560 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4561 		verbose(env, "map_ptr access not supported for map type %d\n",
4562 			map->map_type);
4563 		return -ENOTSUPP;
4564 	}
4565 
4566 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4567 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4568 
4569 	if (!env->allow_ptr_to_map_access) {
4570 		verbose(env,
4571 			"%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4572 			tname);
4573 		return -EPERM;
4574 	}
4575 
4576 	if (off < 0) {
4577 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
4578 			regno, tname, off);
4579 		return -EACCES;
4580 	}
4581 
4582 	if (atype != BPF_READ) {
4583 		verbose(env, "only read from %s is supported\n", tname);
4584 		return -EACCES;
4585 	}
4586 
4587 	ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
4588 	if (ret < 0)
4589 		return ret;
4590 
4591 	if (value_regno >= 0)
4592 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
4593 
4594 	return 0;
4595 }
4596 
4597 /* Check that the stack access at the given offset is within bounds. The
4598  * maximum valid offset is -1.
4599  *
4600  * The minimum valid offset is -MAX_BPF_STACK for writes, and
4601  * -state->allocated_stack for reads.
4602  */
4603 static int check_stack_slot_within_bounds(int off,
4604 					  struct bpf_func_state *state,
4605 					  enum bpf_access_type t)
4606 {
4607 	int min_valid_off;
4608 
4609 	if (t == BPF_WRITE)
4610 		min_valid_off = -MAX_BPF_STACK;
4611 	else
4612 		min_valid_off = -state->allocated_stack;
4613 
4614 	if (off < min_valid_off || off > -1)
4615 		return -EACCES;
4616 	return 0;
4617 }
4618 
4619 /* Check that the stack access at 'regno + off' falls within the maximum stack
4620  * bounds.
4621  *
4622  * 'off' includes `regno->offset`, but not its dynamic part (if any).
4623  */
4624 static int check_stack_access_within_bounds(
4625 		struct bpf_verifier_env *env,
4626 		int regno, int off, int access_size,
4627 		enum bpf_access_src src, enum bpf_access_type type)
4628 {
4629 	struct bpf_reg_state *regs = cur_regs(env);
4630 	struct bpf_reg_state *reg = regs + regno;
4631 	struct bpf_func_state *state = func(env, reg);
4632 	int min_off, max_off;
4633 	int err;
4634 	char *err_extra;
4635 
4636 	if (src == ACCESS_HELPER)
4637 		/* We don't know if helpers are reading or writing (or both). */
4638 		err_extra = " indirect access to";
4639 	else if (type == BPF_READ)
4640 		err_extra = " read from";
4641 	else
4642 		err_extra = " write to";
4643 
4644 	if (tnum_is_const(reg->var_off)) {
4645 		min_off = reg->var_off.value + off;
4646 		if (access_size > 0)
4647 			max_off = min_off + access_size - 1;
4648 		else
4649 			max_off = min_off;
4650 	} else {
4651 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4652 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
4653 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4654 				err_extra, regno);
4655 			return -EACCES;
4656 		}
4657 		min_off = reg->smin_value + off;
4658 		if (access_size > 0)
4659 			max_off = reg->smax_value + off + access_size - 1;
4660 		else
4661 			max_off = min_off;
4662 	}
4663 
4664 	err = check_stack_slot_within_bounds(min_off, state, type);
4665 	if (!err)
4666 		err = check_stack_slot_within_bounds(max_off, state, type);
4667 
4668 	if (err) {
4669 		if (tnum_is_const(reg->var_off)) {
4670 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4671 				err_extra, regno, off, access_size);
4672 		} else {
4673 			char tn_buf[48];
4674 
4675 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4676 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4677 				err_extra, regno, tn_buf, access_size);
4678 		}
4679 	}
4680 	return err;
4681 }
4682 
4683 /* check whether memory at (regno + off) is accessible for t = (read | write)
4684  * if t==write, value_regno is a register which value is stored into memory
4685  * if t==read, value_regno is a register which will receive the value from memory
4686  * if t==write && value_regno==-1, some unknown value is stored into memory
4687  * if t==read && value_regno==-1, don't care what we read from memory
4688  */
4689 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4690 			    int off, int bpf_size, enum bpf_access_type t,
4691 			    int value_regno, bool strict_alignment_once)
4692 {
4693 	struct bpf_reg_state *regs = cur_regs(env);
4694 	struct bpf_reg_state *reg = regs + regno;
4695 	struct bpf_func_state *state;
4696 	int size, err = 0;
4697 
4698 	size = bpf_size_to_bytes(bpf_size);
4699 	if (size < 0)
4700 		return size;
4701 
4702 	/* alignment checks will add in reg->off themselves */
4703 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
4704 	if (err)
4705 		return err;
4706 
4707 	/* for access checks, reg->off is just part of off */
4708 	off += reg->off;
4709 
4710 	if (reg->type == PTR_TO_MAP_KEY) {
4711 		if (t == BPF_WRITE) {
4712 			verbose(env, "write to change key R%d not allowed\n", regno);
4713 			return -EACCES;
4714 		}
4715 
4716 		err = check_mem_region_access(env, regno, off, size,
4717 					      reg->map_ptr->key_size, false);
4718 		if (err)
4719 			return err;
4720 		if (value_regno >= 0)
4721 			mark_reg_unknown(env, regs, value_regno);
4722 	} else if (reg->type == PTR_TO_MAP_VALUE) {
4723 		struct bpf_map_value_off_desc *kptr_off_desc = NULL;
4724 
4725 		if (t == BPF_WRITE && value_regno >= 0 &&
4726 		    is_pointer_value(env, value_regno)) {
4727 			verbose(env, "R%d leaks addr into map\n", value_regno);
4728 			return -EACCES;
4729 		}
4730 		err = check_map_access_type(env, regno, off, size, t);
4731 		if (err)
4732 			return err;
4733 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
4734 		if (err)
4735 			return err;
4736 		if (tnum_is_const(reg->var_off))
4737 			kptr_off_desc = bpf_map_kptr_off_contains(reg->map_ptr,
4738 								  off + reg->var_off.value);
4739 		if (kptr_off_desc) {
4740 			err = check_map_kptr_access(env, regno, value_regno, insn_idx,
4741 						    kptr_off_desc);
4742 		} else if (t == BPF_READ && value_regno >= 0) {
4743 			struct bpf_map *map = reg->map_ptr;
4744 
4745 			/* if map is read-only, track its contents as scalars */
4746 			if (tnum_is_const(reg->var_off) &&
4747 			    bpf_map_is_rdonly(map) &&
4748 			    map->ops->map_direct_value_addr) {
4749 				int map_off = off + reg->var_off.value;
4750 				u64 val = 0;
4751 
4752 				err = bpf_map_direct_read(map, map_off, size,
4753 							  &val);
4754 				if (err)
4755 					return err;
4756 
4757 				regs[value_regno].type = SCALAR_VALUE;
4758 				__mark_reg_known(&regs[value_regno], val);
4759 			} else {
4760 				mark_reg_unknown(env, regs, value_regno);
4761 			}
4762 		}
4763 	} else if (base_type(reg->type) == PTR_TO_MEM) {
4764 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4765 
4766 		if (type_may_be_null(reg->type)) {
4767 			verbose(env, "R%d invalid mem access '%s'\n", regno,
4768 				reg_type_str(env, reg->type));
4769 			return -EACCES;
4770 		}
4771 
4772 		if (t == BPF_WRITE && rdonly_mem) {
4773 			verbose(env, "R%d cannot write into %s\n",
4774 				regno, reg_type_str(env, reg->type));
4775 			return -EACCES;
4776 		}
4777 
4778 		if (t == BPF_WRITE && value_regno >= 0 &&
4779 		    is_pointer_value(env, value_regno)) {
4780 			verbose(env, "R%d leaks addr into mem\n", value_regno);
4781 			return -EACCES;
4782 		}
4783 
4784 		err = check_mem_region_access(env, regno, off, size,
4785 					      reg->mem_size, false);
4786 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
4787 			mark_reg_unknown(env, regs, value_regno);
4788 	} else if (reg->type == PTR_TO_CTX) {
4789 		enum bpf_reg_type reg_type = SCALAR_VALUE;
4790 		struct btf *btf = NULL;
4791 		u32 btf_id = 0;
4792 
4793 		if (t == BPF_WRITE && value_regno >= 0 &&
4794 		    is_pointer_value(env, value_regno)) {
4795 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
4796 			return -EACCES;
4797 		}
4798 
4799 		err = check_ptr_off_reg(env, reg, regno);
4800 		if (err < 0)
4801 			return err;
4802 
4803 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
4804 				       &btf_id);
4805 		if (err)
4806 			verbose_linfo(env, insn_idx, "; ");
4807 		if (!err && t == BPF_READ && value_regno >= 0) {
4808 			/* ctx access returns either a scalar, or a
4809 			 * PTR_TO_PACKET[_META,_END]. In the latter
4810 			 * case, we know the offset is zero.
4811 			 */
4812 			if (reg_type == SCALAR_VALUE) {
4813 				mark_reg_unknown(env, regs, value_regno);
4814 			} else {
4815 				mark_reg_known_zero(env, regs,
4816 						    value_regno);
4817 				if (type_may_be_null(reg_type))
4818 					regs[value_regno].id = ++env->id_gen;
4819 				/* A load of ctx field could have different
4820 				 * actual load size with the one encoded in the
4821 				 * insn. When the dst is PTR, it is for sure not
4822 				 * a sub-register.
4823 				 */
4824 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
4825 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
4826 					regs[value_regno].btf = btf;
4827 					regs[value_regno].btf_id = btf_id;
4828 				}
4829 			}
4830 			regs[value_regno].type = reg_type;
4831 		}
4832 
4833 	} else if (reg->type == PTR_TO_STACK) {
4834 		/* Basic bounds checks. */
4835 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
4836 		if (err)
4837 			return err;
4838 
4839 		state = func(env, reg);
4840 		err = update_stack_depth(env, state, off);
4841 		if (err)
4842 			return err;
4843 
4844 		if (t == BPF_READ)
4845 			err = check_stack_read(env, regno, off, size,
4846 					       value_regno);
4847 		else
4848 			err = check_stack_write(env, regno, off, size,
4849 						value_regno, insn_idx);
4850 	} else if (reg_is_pkt_pointer(reg)) {
4851 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
4852 			verbose(env, "cannot write into packet\n");
4853 			return -EACCES;
4854 		}
4855 		if (t == BPF_WRITE && value_regno >= 0 &&
4856 		    is_pointer_value(env, value_regno)) {
4857 			verbose(env, "R%d leaks addr into packet\n",
4858 				value_regno);
4859 			return -EACCES;
4860 		}
4861 		err = check_packet_access(env, regno, off, size, false);
4862 		if (!err && t == BPF_READ && value_regno >= 0)
4863 			mark_reg_unknown(env, regs, value_regno);
4864 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
4865 		if (t == BPF_WRITE && value_regno >= 0 &&
4866 		    is_pointer_value(env, value_regno)) {
4867 			verbose(env, "R%d leaks addr into flow keys\n",
4868 				value_regno);
4869 			return -EACCES;
4870 		}
4871 
4872 		err = check_flow_keys_access(env, off, size);
4873 		if (!err && t == BPF_READ && value_regno >= 0)
4874 			mark_reg_unknown(env, regs, value_regno);
4875 	} else if (type_is_sk_pointer(reg->type)) {
4876 		if (t == BPF_WRITE) {
4877 			verbose(env, "R%d cannot write into %s\n",
4878 				regno, reg_type_str(env, reg->type));
4879 			return -EACCES;
4880 		}
4881 		err = check_sock_access(env, insn_idx, regno, off, size, t);
4882 		if (!err && value_regno >= 0)
4883 			mark_reg_unknown(env, regs, value_regno);
4884 	} else if (reg->type == PTR_TO_TP_BUFFER) {
4885 		err = check_tp_buffer_access(env, reg, regno, off, size);
4886 		if (!err && t == BPF_READ && value_regno >= 0)
4887 			mark_reg_unknown(env, regs, value_regno);
4888 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
4889 		   !type_may_be_null(reg->type)) {
4890 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4891 					      value_regno);
4892 	} else if (reg->type == CONST_PTR_TO_MAP) {
4893 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4894 					      value_regno);
4895 	} else if (base_type(reg->type) == PTR_TO_BUF) {
4896 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4897 		u32 *max_access;
4898 
4899 		if (rdonly_mem) {
4900 			if (t == BPF_WRITE) {
4901 				verbose(env, "R%d cannot write into %s\n",
4902 					regno, reg_type_str(env, reg->type));
4903 				return -EACCES;
4904 			}
4905 			max_access = &env->prog->aux->max_rdonly_access;
4906 		} else {
4907 			max_access = &env->prog->aux->max_rdwr_access;
4908 		}
4909 
4910 		err = check_buffer_access(env, reg, regno, off, size, false,
4911 					  max_access);
4912 
4913 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
4914 			mark_reg_unknown(env, regs, value_regno);
4915 	} else {
4916 		verbose(env, "R%d invalid mem access '%s'\n", regno,
4917 			reg_type_str(env, reg->type));
4918 		return -EACCES;
4919 	}
4920 
4921 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
4922 	    regs[value_regno].type == SCALAR_VALUE) {
4923 		/* b/h/w load zero-extends, mark upper bits as known 0 */
4924 		coerce_reg_to_size(&regs[value_regno], size);
4925 	}
4926 	return err;
4927 }
4928 
4929 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
4930 {
4931 	int load_reg;
4932 	int err;
4933 
4934 	switch (insn->imm) {
4935 	case BPF_ADD:
4936 	case BPF_ADD | BPF_FETCH:
4937 	case BPF_AND:
4938 	case BPF_AND | BPF_FETCH:
4939 	case BPF_OR:
4940 	case BPF_OR | BPF_FETCH:
4941 	case BPF_XOR:
4942 	case BPF_XOR | BPF_FETCH:
4943 	case BPF_XCHG:
4944 	case BPF_CMPXCHG:
4945 		break;
4946 	default:
4947 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4948 		return -EINVAL;
4949 	}
4950 
4951 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4952 		verbose(env, "invalid atomic operand size\n");
4953 		return -EINVAL;
4954 	}
4955 
4956 	/* check src1 operand */
4957 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
4958 	if (err)
4959 		return err;
4960 
4961 	/* check src2 operand */
4962 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4963 	if (err)
4964 		return err;
4965 
4966 	if (insn->imm == BPF_CMPXCHG) {
4967 		/* Check comparison of R0 with memory location */
4968 		const u32 aux_reg = BPF_REG_0;
4969 
4970 		err = check_reg_arg(env, aux_reg, SRC_OP);
4971 		if (err)
4972 			return err;
4973 
4974 		if (is_pointer_value(env, aux_reg)) {
4975 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
4976 			return -EACCES;
4977 		}
4978 	}
4979 
4980 	if (is_pointer_value(env, insn->src_reg)) {
4981 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
4982 		return -EACCES;
4983 	}
4984 
4985 	if (is_ctx_reg(env, insn->dst_reg) ||
4986 	    is_pkt_reg(env, insn->dst_reg) ||
4987 	    is_flow_key_reg(env, insn->dst_reg) ||
4988 	    is_sk_reg(env, insn->dst_reg)) {
4989 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
4990 			insn->dst_reg,
4991 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
4992 		return -EACCES;
4993 	}
4994 
4995 	if (insn->imm & BPF_FETCH) {
4996 		if (insn->imm == BPF_CMPXCHG)
4997 			load_reg = BPF_REG_0;
4998 		else
4999 			load_reg = insn->src_reg;
5000 
5001 		/* check and record load of old value */
5002 		err = check_reg_arg(env, load_reg, DST_OP);
5003 		if (err)
5004 			return err;
5005 	} else {
5006 		/* This instruction accesses a memory location but doesn't
5007 		 * actually load it into a register.
5008 		 */
5009 		load_reg = -1;
5010 	}
5011 
5012 	/* Check whether we can read the memory, with second call for fetch
5013 	 * case to simulate the register fill.
5014 	 */
5015 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5016 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
5017 	if (!err && load_reg >= 0)
5018 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5019 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
5020 				       true);
5021 	if (err)
5022 		return err;
5023 
5024 	/* Check whether we can write into the same memory. */
5025 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5026 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5027 	if (err)
5028 		return err;
5029 
5030 	return 0;
5031 }
5032 
5033 /* When register 'regno' is used to read the stack (either directly or through
5034  * a helper function) make sure that it's within stack boundary and, depending
5035  * on the access type, that all elements of the stack are initialized.
5036  *
5037  * 'off' includes 'regno->off', but not its dynamic part (if any).
5038  *
5039  * All registers that have been spilled on the stack in the slots within the
5040  * read offsets are marked as read.
5041  */
5042 static int check_stack_range_initialized(
5043 		struct bpf_verifier_env *env, int regno, int off,
5044 		int access_size, bool zero_size_allowed,
5045 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
5046 {
5047 	struct bpf_reg_state *reg = reg_state(env, regno);
5048 	struct bpf_func_state *state = func(env, reg);
5049 	int err, min_off, max_off, i, j, slot, spi;
5050 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5051 	enum bpf_access_type bounds_check_type;
5052 	/* Some accesses can write anything into the stack, others are
5053 	 * read-only.
5054 	 */
5055 	bool clobber = false;
5056 
5057 	if (access_size == 0 && !zero_size_allowed) {
5058 		verbose(env, "invalid zero-sized read\n");
5059 		return -EACCES;
5060 	}
5061 
5062 	if (type == ACCESS_HELPER) {
5063 		/* The bounds checks for writes are more permissive than for
5064 		 * reads. However, if raw_mode is not set, we'll do extra
5065 		 * checks below.
5066 		 */
5067 		bounds_check_type = BPF_WRITE;
5068 		clobber = true;
5069 	} else {
5070 		bounds_check_type = BPF_READ;
5071 	}
5072 	err = check_stack_access_within_bounds(env, regno, off, access_size,
5073 					       type, bounds_check_type);
5074 	if (err)
5075 		return err;
5076 
5077 
5078 	if (tnum_is_const(reg->var_off)) {
5079 		min_off = max_off = reg->var_off.value + off;
5080 	} else {
5081 		/* Variable offset is prohibited for unprivileged mode for
5082 		 * simplicity since it requires corresponding support in
5083 		 * Spectre masking for stack ALU.
5084 		 * See also retrieve_ptr_limit().
5085 		 */
5086 		if (!env->bypass_spec_v1) {
5087 			char tn_buf[48];
5088 
5089 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5090 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5091 				regno, err_extra, tn_buf);
5092 			return -EACCES;
5093 		}
5094 		/* Only initialized buffer on stack is allowed to be accessed
5095 		 * with variable offset. With uninitialized buffer it's hard to
5096 		 * guarantee that whole memory is marked as initialized on
5097 		 * helper return since specific bounds are unknown what may
5098 		 * cause uninitialized stack leaking.
5099 		 */
5100 		if (meta && meta->raw_mode)
5101 			meta = NULL;
5102 
5103 		min_off = reg->smin_value + off;
5104 		max_off = reg->smax_value + off;
5105 	}
5106 
5107 	if (meta && meta->raw_mode) {
5108 		meta->access_size = access_size;
5109 		meta->regno = regno;
5110 		return 0;
5111 	}
5112 
5113 	for (i = min_off; i < max_off + access_size; i++) {
5114 		u8 *stype;
5115 
5116 		slot = -i - 1;
5117 		spi = slot / BPF_REG_SIZE;
5118 		if (state->allocated_stack <= slot)
5119 			goto err;
5120 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5121 		if (*stype == STACK_MISC)
5122 			goto mark;
5123 		if (*stype == STACK_ZERO) {
5124 			if (clobber) {
5125 				/* helper can write anything into the stack */
5126 				*stype = STACK_MISC;
5127 			}
5128 			goto mark;
5129 		}
5130 
5131 		if (is_spilled_reg(&state->stack[spi]) &&
5132 		    base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
5133 			goto mark;
5134 
5135 		if (is_spilled_reg(&state->stack[spi]) &&
5136 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5137 		     env->allow_ptr_leaks)) {
5138 			if (clobber) {
5139 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5140 				for (j = 0; j < BPF_REG_SIZE; j++)
5141 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
5142 			}
5143 			goto mark;
5144 		}
5145 
5146 err:
5147 		if (tnum_is_const(reg->var_off)) {
5148 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5149 				err_extra, regno, min_off, i - min_off, access_size);
5150 		} else {
5151 			char tn_buf[48];
5152 
5153 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5154 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5155 				err_extra, regno, tn_buf, i - min_off, access_size);
5156 		}
5157 		return -EACCES;
5158 mark:
5159 		/* reading any byte out of 8-byte 'spill_slot' will cause
5160 		 * the whole slot to be marked as 'read'
5161 		 */
5162 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
5163 			      state->stack[spi].spilled_ptr.parent,
5164 			      REG_LIVE_READ64);
5165 	}
5166 	return update_stack_depth(env, state, min_off);
5167 }
5168 
5169 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5170 				   int access_size, bool zero_size_allowed,
5171 				   struct bpf_call_arg_meta *meta)
5172 {
5173 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5174 	u32 *max_access;
5175 
5176 	switch (base_type(reg->type)) {
5177 	case PTR_TO_PACKET:
5178 	case PTR_TO_PACKET_META:
5179 		return check_packet_access(env, regno, reg->off, access_size,
5180 					   zero_size_allowed);
5181 	case PTR_TO_MAP_KEY:
5182 		if (meta && meta->raw_mode) {
5183 			verbose(env, "R%d cannot write into %s\n", regno,
5184 				reg_type_str(env, reg->type));
5185 			return -EACCES;
5186 		}
5187 		return check_mem_region_access(env, regno, reg->off, access_size,
5188 					       reg->map_ptr->key_size, false);
5189 	case PTR_TO_MAP_VALUE:
5190 		if (check_map_access_type(env, regno, reg->off, access_size,
5191 					  meta && meta->raw_mode ? BPF_WRITE :
5192 					  BPF_READ))
5193 			return -EACCES;
5194 		return check_map_access(env, regno, reg->off, access_size,
5195 					zero_size_allowed, ACCESS_HELPER);
5196 	case PTR_TO_MEM:
5197 		if (type_is_rdonly_mem(reg->type)) {
5198 			if (meta && meta->raw_mode) {
5199 				verbose(env, "R%d cannot write into %s\n", regno,
5200 					reg_type_str(env, reg->type));
5201 				return -EACCES;
5202 			}
5203 		}
5204 		return check_mem_region_access(env, regno, reg->off,
5205 					       access_size, reg->mem_size,
5206 					       zero_size_allowed);
5207 	case PTR_TO_BUF:
5208 		if (type_is_rdonly_mem(reg->type)) {
5209 			if (meta && meta->raw_mode) {
5210 				verbose(env, "R%d cannot write into %s\n", regno,
5211 					reg_type_str(env, reg->type));
5212 				return -EACCES;
5213 			}
5214 
5215 			max_access = &env->prog->aux->max_rdonly_access;
5216 		} else {
5217 			max_access = &env->prog->aux->max_rdwr_access;
5218 		}
5219 		return check_buffer_access(env, reg, regno, reg->off,
5220 					   access_size, zero_size_allowed,
5221 					   max_access);
5222 	case PTR_TO_STACK:
5223 		return check_stack_range_initialized(
5224 				env,
5225 				regno, reg->off, access_size,
5226 				zero_size_allowed, ACCESS_HELPER, meta);
5227 	default: /* scalar_value or invalid ptr */
5228 		/* Allow zero-byte read from NULL, regardless of pointer type */
5229 		if (zero_size_allowed && access_size == 0 &&
5230 		    register_is_null(reg))
5231 			return 0;
5232 
5233 		verbose(env, "R%d type=%s ", regno,
5234 			reg_type_str(env, reg->type));
5235 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
5236 		return -EACCES;
5237 	}
5238 }
5239 
5240 static int check_mem_size_reg(struct bpf_verifier_env *env,
5241 			      struct bpf_reg_state *reg, u32 regno,
5242 			      bool zero_size_allowed,
5243 			      struct bpf_call_arg_meta *meta)
5244 {
5245 	int err;
5246 
5247 	/* This is used to refine r0 return value bounds for helpers
5248 	 * that enforce this value as an upper bound on return values.
5249 	 * See do_refine_retval_range() for helpers that can refine
5250 	 * the return value. C type of helper is u32 so we pull register
5251 	 * bound from umax_value however, if negative verifier errors
5252 	 * out. Only upper bounds can be learned because retval is an
5253 	 * int type and negative retvals are allowed.
5254 	 */
5255 	meta->msize_max_value = reg->umax_value;
5256 
5257 	/* The register is SCALAR_VALUE; the access check
5258 	 * happens using its boundaries.
5259 	 */
5260 	if (!tnum_is_const(reg->var_off))
5261 		/* For unprivileged variable accesses, disable raw
5262 		 * mode so that the program is required to
5263 		 * initialize all the memory that the helper could
5264 		 * just partially fill up.
5265 		 */
5266 		meta = NULL;
5267 
5268 	if (reg->smin_value < 0) {
5269 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5270 			regno);
5271 		return -EACCES;
5272 	}
5273 
5274 	if (reg->umin_value == 0) {
5275 		err = check_helper_mem_access(env, regno - 1, 0,
5276 					      zero_size_allowed,
5277 					      meta);
5278 		if (err)
5279 			return err;
5280 	}
5281 
5282 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5283 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5284 			regno);
5285 		return -EACCES;
5286 	}
5287 	err = check_helper_mem_access(env, regno - 1,
5288 				      reg->umax_value,
5289 				      zero_size_allowed, meta);
5290 	if (!err)
5291 		err = mark_chain_precision(env, regno);
5292 	return err;
5293 }
5294 
5295 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5296 		   u32 regno, u32 mem_size)
5297 {
5298 	bool may_be_null = type_may_be_null(reg->type);
5299 	struct bpf_reg_state saved_reg;
5300 	struct bpf_call_arg_meta meta;
5301 	int err;
5302 
5303 	if (register_is_null(reg))
5304 		return 0;
5305 
5306 	memset(&meta, 0, sizeof(meta));
5307 	/* Assuming that the register contains a value check if the memory
5308 	 * access is safe. Temporarily save and restore the register's state as
5309 	 * the conversion shouldn't be visible to a caller.
5310 	 */
5311 	if (may_be_null) {
5312 		saved_reg = *reg;
5313 		mark_ptr_not_null_reg(reg);
5314 	}
5315 
5316 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5317 	/* Check access for BPF_WRITE */
5318 	meta.raw_mode = true;
5319 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5320 
5321 	if (may_be_null)
5322 		*reg = saved_reg;
5323 
5324 	return err;
5325 }
5326 
5327 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5328 			     u32 regno)
5329 {
5330 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5331 	bool may_be_null = type_may_be_null(mem_reg->type);
5332 	struct bpf_reg_state saved_reg;
5333 	struct bpf_call_arg_meta meta;
5334 	int err;
5335 
5336 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5337 
5338 	memset(&meta, 0, sizeof(meta));
5339 
5340 	if (may_be_null) {
5341 		saved_reg = *mem_reg;
5342 		mark_ptr_not_null_reg(mem_reg);
5343 	}
5344 
5345 	err = check_mem_size_reg(env, reg, regno, true, &meta);
5346 	/* Check access for BPF_WRITE */
5347 	meta.raw_mode = true;
5348 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
5349 
5350 	if (may_be_null)
5351 		*mem_reg = saved_reg;
5352 	return err;
5353 }
5354 
5355 /* Implementation details:
5356  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5357  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5358  * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5359  * value_or_null->value transition, since the verifier only cares about
5360  * the range of access to valid map value pointer and doesn't care about actual
5361  * address of the map element.
5362  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5363  * reg->id > 0 after value_or_null->value transition. By doing so
5364  * two bpf_map_lookups will be considered two different pointers that
5365  * point to different bpf_spin_locks.
5366  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5367  * dead-locks.
5368  * Since only one bpf_spin_lock is allowed the checks are simpler than
5369  * reg_is_refcounted() logic. The verifier needs to remember only
5370  * one spin_lock instead of array of acquired_refs.
5371  * cur_state->active_spin_lock remembers which map value element got locked
5372  * and clears it after bpf_spin_unlock.
5373  */
5374 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5375 			     bool is_lock)
5376 {
5377 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5378 	struct bpf_verifier_state *cur = env->cur_state;
5379 	bool is_const = tnum_is_const(reg->var_off);
5380 	struct bpf_map *map = reg->map_ptr;
5381 	u64 val = reg->var_off.value;
5382 
5383 	if (!is_const) {
5384 		verbose(env,
5385 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5386 			regno);
5387 		return -EINVAL;
5388 	}
5389 	if (!map->btf) {
5390 		verbose(env,
5391 			"map '%s' has to have BTF in order to use bpf_spin_lock\n",
5392 			map->name);
5393 		return -EINVAL;
5394 	}
5395 	if (!map_value_has_spin_lock(map)) {
5396 		if (map->spin_lock_off == -E2BIG)
5397 			verbose(env,
5398 				"map '%s' has more than one 'struct bpf_spin_lock'\n",
5399 				map->name);
5400 		else if (map->spin_lock_off == -ENOENT)
5401 			verbose(env,
5402 				"map '%s' doesn't have 'struct bpf_spin_lock'\n",
5403 				map->name);
5404 		else
5405 			verbose(env,
5406 				"map '%s' is not a struct type or bpf_spin_lock is mangled\n",
5407 				map->name);
5408 		return -EINVAL;
5409 	}
5410 	if (map->spin_lock_off != val + reg->off) {
5411 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
5412 			val + reg->off);
5413 		return -EINVAL;
5414 	}
5415 	if (is_lock) {
5416 		if (cur->active_spin_lock) {
5417 			verbose(env,
5418 				"Locking two bpf_spin_locks are not allowed\n");
5419 			return -EINVAL;
5420 		}
5421 		cur->active_spin_lock = reg->id;
5422 	} else {
5423 		if (!cur->active_spin_lock) {
5424 			verbose(env, "bpf_spin_unlock without taking a lock\n");
5425 			return -EINVAL;
5426 		}
5427 		if (cur->active_spin_lock != reg->id) {
5428 			verbose(env, "bpf_spin_unlock of different lock\n");
5429 			return -EINVAL;
5430 		}
5431 		cur->active_spin_lock = 0;
5432 	}
5433 	return 0;
5434 }
5435 
5436 static int process_timer_func(struct bpf_verifier_env *env, int regno,
5437 			      struct bpf_call_arg_meta *meta)
5438 {
5439 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5440 	bool is_const = tnum_is_const(reg->var_off);
5441 	struct bpf_map *map = reg->map_ptr;
5442 	u64 val = reg->var_off.value;
5443 
5444 	if (!is_const) {
5445 		verbose(env,
5446 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5447 			regno);
5448 		return -EINVAL;
5449 	}
5450 	if (!map->btf) {
5451 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5452 			map->name);
5453 		return -EINVAL;
5454 	}
5455 	if (!map_value_has_timer(map)) {
5456 		if (map->timer_off == -E2BIG)
5457 			verbose(env,
5458 				"map '%s' has more than one 'struct bpf_timer'\n",
5459 				map->name);
5460 		else if (map->timer_off == -ENOENT)
5461 			verbose(env,
5462 				"map '%s' doesn't have 'struct bpf_timer'\n",
5463 				map->name);
5464 		else
5465 			verbose(env,
5466 				"map '%s' is not a struct type or bpf_timer is mangled\n",
5467 				map->name);
5468 		return -EINVAL;
5469 	}
5470 	if (map->timer_off != val + reg->off) {
5471 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5472 			val + reg->off, map->timer_off);
5473 		return -EINVAL;
5474 	}
5475 	if (meta->map_ptr) {
5476 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5477 		return -EFAULT;
5478 	}
5479 	meta->map_uid = reg->map_uid;
5480 	meta->map_ptr = map;
5481 	return 0;
5482 }
5483 
5484 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
5485 			     struct bpf_call_arg_meta *meta)
5486 {
5487 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5488 	struct bpf_map_value_off_desc *off_desc;
5489 	struct bpf_map *map_ptr = reg->map_ptr;
5490 	u32 kptr_off;
5491 	int ret;
5492 
5493 	if (!tnum_is_const(reg->var_off)) {
5494 		verbose(env,
5495 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
5496 			regno);
5497 		return -EINVAL;
5498 	}
5499 	if (!map_ptr->btf) {
5500 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
5501 			map_ptr->name);
5502 		return -EINVAL;
5503 	}
5504 	if (!map_value_has_kptrs(map_ptr)) {
5505 		ret = PTR_ERR_OR_ZERO(map_ptr->kptr_off_tab);
5506 		if (ret == -E2BIG)
5507 			verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name,
5508 				BPF_MAP_VALUE_OFF_MAX);
5509 		else if (ret == -EEXIST)
5510 			verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name);
5511 		else
5512 			verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
5513 		return -EINVAL;
5514 	}
5515 
5516 	meta->map_ptr = map_ptr;
5517 	kptr_off = reg->off + reg->var_off.value;
5518 	off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off);
5519 	if (!off_desc) {
5520 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
5521 		return -EACCES;
5522 	}
5523 	if (off_desc->type != BPF_KPTR_REF) {
5524 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
5525 		return -EACCES;
5526 	}
5527 	meta->kptr_off_desc = off_desc;
5528 	return 0;
5529 }
5530 
5531 static bool arg_type_is_mem_size(enum bpf_arg_type type)
5532 {
5533 	return type == ARG_CONST_SIZE ||
5534 	       type == ARG_CONST_SIZE_OR_ZERO;
5535 }
5536 
5537 static bool arg_type_is_alloc_size(enum bpf_arg_type type)
5538 {
5539 	return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
5540 }
5541 
5542 static bool arg_type_is_int_ptr(enum bpf_arg_type type)
5543 {
5544 	return type == ARG_PTR_TO_INT ||
5545 	       type == ARG_PTR_TO_LONG;
5546 }
5547 
5548 static bool arg_type_is_release(enum bpf_arg_type type)
5549 {
5550 	return type & OBJ_RELEASE;
5551 }
5552 
5553 static bool arg_type_is_dynptr(enum bpf_arg_type type)
5554 {
5555 	return base_type(type) == ARG_PTR_TO_DYNPTR;
5556 }
5557 
5558 static int int_ptr_type_to_size(enum bpf_arg_type type)
5559 {
5560 	if (type == ARG_PTR_TO_INT)
5561 		return sizeof(u32);
5562 	else if (type == ARG_PTR_TO_LONG)
5563 		return sizeof(u64);
5564 
5565 	return -EINVAL;
5566 }
5567 
5568 static int resolve_map_arg_type(struct bpf_verifier_env *env,
5569 				 const struct bpf_call_arg_meta *meta,
5570 				 enum bpf_arg_type *arg_type)
5571 {
5572 	if (!meta->map_ptr) {
5573 		/* kernel subsystem misconfigured verifier */
5574 		verbose(env, "invalid map_ptr to access map->type\n");
5575 		return -EACCES;
5576 	}
5577 
5578 	switch (meta->map_ptr->map_type) {
5579 	case BPF_MAP_TYPE_SOCKMAP:
5580 	case BPF_MAP_TYPE_SOCKHASH:
5581 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
5582 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
5583 		} else {
5584 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
5585 			return -EINVAL;
5586 		}
5587 		break;
5588 	case BPF_MAP_TYPE_BLOOM_FILTER:
5589 		if (meta->func_id == BPF_FUNC_map_peek_elem)
5590 			*arg_type = ARG_PTR_TO_MAP_VALUE;
5591 		break;
5592 	default:
5593 		break;
5594 	}
5595 	return 0;
5596 }
5597 
5598 struct bpf_reg_types {
5599 	const enum bpf_reg_type types[10];
5600 	u32 *btf_id;
5601 };
5602 
5603 static const struct bpf_reg_types map_key_value_types = {
5604 	.types = {
5605 		PTR_TO_STACK,
5606 		PTR_TO_PACKET,
5607 		PTR_TO_PACKET_META,
5608 		PTR_TO_MAP_KEY,
5609 		PTR_TO_MAP_VALUE,
5610 	},
5611 };
5612 
5613 static const struct bpf_reg_types sock_types = {
5614 	.types = {
5615 		PTR_TO_SOCK_COMMON,
5616 		PTR_TO_SOCKET,
5617 		PTR_TO_TCP_SOCK,
5618 		PTR_TO_XDP_SOCK,
5619 	},
5620 };
5621 
5622 #ifdef CONFIG_NET
5623 static const struct bpf_reg_types btf_id_sock_common_types = {
5624 	.types = {
5625 		PTR_TO_SOCK_COMMON,
5626 		PTR_TO_SOCKET,
5627 		PTR_TO_TCP_SOCK,
5628 		PTR_TO_XDP_SOCK,
5629 		PTR_TO_BTF_ID,
5630 	},
5631 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5632 };
5633 #endif
5634 
5635 static const struct bpf_reg_types mem_types = {
5636 	.types = {
5637 		PTR_TO_STACK,
5638 		PTR_TO_PACKET,
5639 		PTR_TO_PACKET_META,
5640 		PTR_TO_MAP_KEY,
5641 		PTR_TO_MAP_VALUE,
5642 		PTR_TO_MEM,
5643 		PTR_TO_MEM | MEM_ALLOC,
5644 		PTR_TO_BUF,
5645 	},
5646 };
5647 
5648 static const struct bpf_reg_types int_ptr_types = {
5649 	.types = {
5650 		PTR_TO_STACK,
5651 		PTR_TO_PACKET,
5652 		PTR_TO_PACKET_META,
5653 		PTR_TO_MAP_KEY,
5654 		PTR_TO_MAP_VALUE,
5655 	},
5656 };
5657 
5658 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5659 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5660 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5661 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
5662 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5663 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5664 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
5665 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
5666 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5667 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
5668 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
5669 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
5670 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
5671 
5672 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5673 	[ARG_PTR_TO_MAP_KEY]		= &map_key_value_types,
5674 	[ARG_PTR_TO_MAP_VALUE]		= &map_key_value_types,
5675 	[ARG_CONST_SIZE]		= &scalar_types,
5676 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
5677 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
5678 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
5679 	[ARG_PTR_TO_CTX]		= &context_types,
5680 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
5681 #ifdef CONFIG_NET
5682 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
5683 #endif
5684 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
5685 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
5686 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
5687 	[ARG_PTR_TO_MEM]		= &mem_types,
5688 	[ARG_PTR_TO_ALLOC_MEM]		= &alloc_mem_types,
5689 	[ARG_PTR_TO_INT]		= &int_ptr_types,
5690 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
5691 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
5692 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
5693 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
5694 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
5695 	[ARG_PTR_TO_TIMER]		= &timer_types,
5696 	[ARG_PTR_TO_KPTR]		= &kptr_types,
5697 	[ARG_PTR_TO_DYNPTR]		= &stack_ptr_types,
5698 };
5699 
5700 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
5701 			  enum bpf_arg_type arg_type,
5702 			  const u32 *arg_btf_id,
5703 			  struct bpf_call_arg_meta *meta)
5704 {
5705 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5706 	enum bpf_reg_type expected, type = reg->type;
5707 	const struct bpf_reg_types *compatible;
5708 	int i, j;
5709 
5710 	compatible = compatible_reg_types[base_type(arg_type)];
5711 	if (!compatible) {
5712 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5713 		return -EFAULT;
5714 	}
5715 
5716 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
5717 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
5718 	 *
5719 	 * Same for MAYBE_NULL:
5720 	 *
5721 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
5722 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
5723 	 *
5724 	 * Therefore we fold these flags depending on the arg_type before comparison.
5725 	 */
5726 	if (arg_type & MEM_RDONLY)
5727 		type &= ~MEM_RDONLY;
5728 	if (arg_type & PTR_MAYBE_NULL)
5729 		type &= ~PTR_MAYBE_NULL;
5730 
5731 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5732 		expected = compatible->types[i];
5733 		if (expected == NOT_INIT)
5734 			break;
5735 
5736 		if (type == expected)
5737 			goto found;
5738 	}
5739 
5740 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
5741 	for (j = 0; j + 1 < i; j++)
5742 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
5743 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
5744 	return -EACCES;
5745 
5746 found:
5747 	if (reg->type == PTR_TO_BTF_ID) {
5748 		/* For bpf_sk_release, it needs to match against first member
5749 		 * 'struct sock_common', hence make an exception for it. This
5750 		 * allows bpf_sk_release to work for multiple socket types.
5751 		 */
5752 		bool strict_type_match = arg_type_is_release(arg_type) &&
5753 					 meta->func_id != BPF_FUNC_sk_release;
5754 
5755 		if (!arg_btf_id) {
5756 			if (!compatible->btf_id) {
5757 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5758 				return -EFAULT;
5759 			}
5760 			arg_btf_id = compatible->btf_id;
5761 		}
5762 
5763 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
5764 			if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno))
5765 				return -EACCES;
5766 		} else if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5767 						 btf_vmlinux, *arg_btf_id,
5768 						 strict_type_match)) {
5769 			verbose(env, "R%d is of type %s but %s is expected\n",
5770 				regno, kernel_type_name(reg->btf, reg->btf_id),
5771 				kernel_type_name(btf_vmlinux, *arg_btf_id));
5772 			return -EACCES;
5773 		}
5774 	}
5775 
5776 	return 0;
5777 }
5778 
5779 int check_func_arg_reg_off(struct bpf_verifier_env *env,
5780 			   const struct bpf_reg_state *reg, int regno,
5781 			   enum bpf_arg_type arg_type)
5782 {
5783 	enum bpf_reg_type type = reg->type;
5784 	bool fixed_off_ok = false;
5785 
5786 	switch ((u32)type) {
5787 	/* Pointer types where reg offset is explicitly allowed: */
5788 	case PTR_TO_STACK:
5789 		if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) {
5790 			verbose(env, "cannot pass in dynptr at an offset\n");
5791 			return -EINVAL;
5792 		}
5793 		fallthrough;
5794 	case PTR_TO_PACKET:
5795 	case PTR_TO_PACKET_META:
5796 	case PTR_TO_MAP_KEY:
5797 	case PTR_TO_MAP_VALUE:
5798 	case PTR_TO_MEM:
5799 	case PTR_TO_MEM | MEM_RDONLY:
5800 	case PTR_TO_MEM | MEM_ALLOC:
5801 	case PTR_TO_BUF:
5802 	case PTR_TO_BUF | MEM_RDONLY:
5803 	case SCALAR_VALUE:
5804 		/* Some of the argument types nevertheless require a
5805 		 * zero register offset.
5806 		 */
5807 		if (base_type(arg_type) != ARG_PTR_TO_ALLOC_MEM)
5808 			return 0;
5809 		break;
5810 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
5811 	 * fixed offset.
5812 	 */
5813 	case PTR_TO_BTF_ID:
5814 		/* When referenced PTR_TO_BTF_ID is passed to release function,
5815 		 * it's fixed offset must be 0.	In the other cases, fixed offset
5816 		 * can be non-zero.
5817 		 */
5818 		if (arg_type_is_release(arg_type) && reg->off) {
5819 			verbose(env, "R%d must have zero offset when passed to release func\n",
5820 				regno);
5821 			return -EINVAL;
5822 		}
5823 		/* For arg is release pointer, fixed_off_ok must be false, but
5824 		 * we already checked and rejected reg->off != 0 above, so set
5825 		 * to true to allow fixed offset for all other cases.
5826 		 */
5827 		fixed_off_ok = true;
5828 		break;
5829 	default:
5830 		break;
5831 	}
5832 	return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
5833 }
5834 
5835 static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
5836 {
5837 	struct bpf_func_state *state = func(env, reg);
5838 	int spi = get_spi(reg->off);
5839 
5840 	return state->stack[spi].spilled_ptr.id;
5841 }
5842 
5843 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5844 			  struct bpf_call_arg_meta *meta,
5845 			  const struct bpf_func_proto *fn)
5846 {
5847 	u32 regno = BPF_REG_1 + arg;
5848 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5849 	enum bpf_arg_type arg_type = fn->arg_type[arg];
5850 	enum bpf_reg_type type = reg->type;
5851 	u32 *arg_btf_id = NULL;
5852 	int err = 0;
5853 
5854 	if (arg_type == ARG_DONTCARE)
5855 		return 0;
5856 
5857 	err = check_reg_arg(env, regno, SRC_OP);
5858 	if (err)
5859 		return err;
5860 
5861 	if (arg_type == ARG_ANYTHING) {
5862 		if (is_pointer_value(env, regno)) {
5863 			verbose(env, "R%d leaks addr into helper function\n",
5864 				regno);
5865 			return -EACCES;
5866 		}
5867 		return 0;
5868 	}
5869 
5870 	if (type_is_pkt_pointer(type) &&
5871 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
5872 		verbose(env, "helper access to the packet is not allowed\n");
5873 		return -EACCES;
5874 	}
5875 
5876 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
5877 		err = resolve_map_arg_type(env, meta, &arg_type);
5878 		if (err)
5879 			return err;
5880 	}
5881 
5882 	if (register_is_null(reg) && type_may_be_null(arg_type))
5883 		/* A NULL register has a SCALAR_VALUE type, so skip
5884 		 * type checking.
5885 		 */
5886 		goto skip_type_check;
5887 
5888 	/* arg_btf_id and arg_size are in a union. */
5889 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
5890 		arg_btf_id = fn->arg_btf_id[arg];
5891 
5892 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
5893 	if (err)
5894 		return err;
5895 
5896 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
5897 	if (err)
5898 		return err;
5899 
5900 skip_type_check:
5901 	if (arg_type_is_release(arg_type)) {
5902 		if (arg_type_is_dynptr(arg_type)) {
5903 			struct bpf_func_state *state = func(env, reg);
5904 			int spi = get_spi(reg->off);
5905 
5906 			if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
5907 			    !state->stack[spi].spilled_ptr.id) {
5908 				verbose(env, "arg %d is an unacquired reference\n", regno);
5909 				return -EINVAL;
5910 			}
5911 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
5912 			verbose(env, "R%d must be referenced when passed to release function\n",
5913 				regno);
5914 			return -EINVAL;
5915 		}
5916 		if (meta->release_regno) {
5917 			verbose(env, "verifier internal error: more than one release argument\n");
5918 			return -EFAULT;
5919 		}
5920 		meta->release_regno = regno;
5921 	}
5922 
5923 	if (reg->ref_obj_id) {
5924 		if (meta->ref_obj_id) {
5925 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5926 				regno, reg->ref_obj_id,
5927 				meta->ref_obj_id);
5928 			return -EFAULT;
5929 		}
5930 		meta->ref_obj_id = reg->ref_obj_id;
5931 	}
5932 
5933 	if (arg_type == ARG_CONST_MAP_PTR) {
5934 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
5935 		if (meta->map_ptr) {
5936 			/* Use map_uid (which is unique id of inner map) to reject:
5937 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5938 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5939 			 * if (inner_map1 && inner_map2) {
5940 			 *     timer = bpf_map_lookup_elem(inner_map1);
5941 			 *     if (timer)
5942 			 *         // mismatch would have been allowed
5943 			 *         bpf_timer_init(timer, inner_map2);
5944 			 * }
5945 			 *
5946 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
5947 			 */
5948 			if (meta->map_ptr != reg->map_ptr ||
5949 			    meta->map_uid != reg->map_uid) {
5950 				verbose(env,
5951 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
5952 					meta->map_uid, reg->map_uid);
5953 				return -EINVAL;
5954 			}
5955 		}
5956 		meta->map_ptr = reg->map_ptr;
5957 		meta->map_uid = reg->map_uid;
5958 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
5959 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
5960 		 * check that [key, key + map->key_size) are within
5961 		 * stack limits and initialized
5962 		 */
5963 		if (!meta->map_ptr) {
5964 			/* in function declaration map_ptr must come before
5965 			 * map_key, so that it's verified and known before
5966 			 * we have to check map_key here. Otherwise it means
5967 			 * that kernel subsystem misconfigured verifier
5968 			 */
5969 			verbose(env, "invalid map_ptr to access map->key\n");
5970 			return -EACCES;
5971 		}
5972 		err = check_helper_mem_access(env, regno,
5973 					      meta->map_ptr->key_size, false,
5974 					      NULL);
5975 	} else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
5976 		if (type_may_be_null(arg_type) && register_is_null(reg))
5977 			return 0;
5978 
5979 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
5980 		 * check [value, value + map->value_size) validity
5981 		 */
5982 		if (!meta->map_ptr) {
5983 			/* kernel subsystem misconfigured verifier */
5984 			verbose(env, "invalid map_ptr to access map->value\n");
5985 			return -EACCES;
5986 		}
5987 		meta->raw_mode = arg_type & MEM_UNINIT;
5988 		err = check_helper_mem_access(env, regno,
5989 					      meta->map_ptr->value_size, false,
5990 					      meta);
5991 	} else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
5992 		if (!reg->btf_id) {
5993 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
5994 			return -EACCES;
5995 		}
5996 		meta->ret_btf = reg->btf;
5997 		meta->ret_btf_id = reg->btf_id;
5998 	} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
5999 		if (meta->func_id == BPF_FUNC_spin_lock) {
6000 			if (process_spin_lock(env, regno, true))
6001 				return -EACCES;
6002 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
6003 			if (process_spin_lock(env, regno, false))
6004 				return -EACCES;
6005 		} else {
6006 			verbose(env, "verifier internal error\n");
6007 			return -EFAULT;
6008 		}
6009 	} else if (arg_type == ARG_PTR_TO_TIMER) {
6010 		if (process_timer_func(env, regno, meta))
6011 			return -EACCES;
6012 	} else if (arg_type == ARG_PTR_TO_FUNC) {
6013 		meta->subprogno = reg->subprogno;
6014 	} else if (base_type(arg_type) == ARG_PTR_TO_MEM) {
6015 		/* The access to this pointer is only checked when we hit the
6016 		 * next is_mem_size argument below.
6017 		 */
6018 		meta->raw_mode = arg_type & MEM_UNINIT;
6019 		if (arg_type & MEM_FIXED_SIZE) {
6020 			err = check_helper_mem_access(env, regno,
6021 						      fn->arg_size[arg], false,
6022 						      meta);
6023 		}
6024 	} else if (arg_type_is_mem_size(arg_type)) {
6025 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
6026 
6027 		err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
6028 	} else if (arg_type_is_dynptr(arg_type)) {
6029 		if (arg_type & MEM_UNINIT) {
6030 			if (!is_dynptr_reg_valid_uninit(env, reg)) {
6031 				verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6032 				return -EINVAL;
6033 			}
6034 
6035 			/* We only support one dynptr being uninitialized at the moment,
6036 			 * which is sufficient for the helper functions we have right now.
6037 			 */
6038 			if (meta->uninit_dynptr_regno) {
6039 				verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6040 				return -EFAULT;
6041 			}
6042 
6043 			meta->uninit_dynptr_regno = regno;
6044 		} else if (!is_dynptr_reg_valid_init(env, reg, arg_type)) {
6045 			const char *err_extra = "";
6046 
6047 			switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6048 			case DYNPTR_TYPE_LOCAL:
6049 				err_extra = "local ";
6050 				break;
6051 			case DYNPTR_TYPE_RINGBUF:
6052 				err_extra = "ringbuf ";
6053 				break;
6054 			default:
6055 				break;
6056 			}
6057 
6058 			verbose(env, "Expected an initialized %sdynptr as arg #%d\n",
6059 				err_extra, arg + 1);
6060 			return -EINVAL;
6061 		}
6062 	} else if (arg_type_is_alloc_size(arg_type)) {
6063 		if (!tnum_is_const(reg->var_off)) {
6064 			verbose(env, "R%d is not a known constant'\n",
6065 				regno);
6066 			return -EACCES;
6067 		}
6068 		meta->mem_size = reg->var_off.value;
6069 	} else if (arg_type_is_int_ptr(arg_type)) {
6070 		int size = int_ptr_type_to_size(arg_type);
6071 
6072 		err = check_helper_mem_access(env, regno, size, false, meta);
6073 		if (err)
6074 			return err;
6075 		err = check_ptr_alignment(env, reg, 0, size, true);
6076 	} else if (arg_type == ARG_PTR_TO_CONST_STR) {
6077 		struct bpf_map *map = reg->map_ptr;
6078 		int map_off;
6079 		u64 map_addr;
6080 		char *str_ptr;
6081 
6082 		if (!bpf_map_is_rdonly(map)) {
6083 			verbose(env, "R%d does not point to a readonly map'\n", regno);
6084 			return -EACCES;
6085 		}
6086 
6087 		if (!tnum_is_const(reg->var_off)) {
6088 			verbose(env, "R%d is not a constant address'\n", regno);
6089 			return -EACCES;
6090 		}
6091 
6092 		if (!map->ops->map_direct_value_addr) {
6093 			verbose(env, "no direct value access support for this map type\n");
6094 			return -EACCES;
6095 		}
6096 
6097 		err = check_map_access(env, regno, reg->off,
6098 				       map->value_size - reg->off, false,
6099 				       ACCESS_HELPER);
6100 		if (err)
6101 			return err;
6102 
6103 		map_off = reg->off + reg->var_off.value;
6104 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6105 		if (err) {
6106 			verbose(env, "direct value access on string failed\n");
6107 			return err;
6108 		}
6109 
6110 		str_ptr = (char *)(long)(map_addr);
6111 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6112 			verbose(env, "string is not zero-terminated\n");
6113 			return -EINVAL;
6114 		}
6115 	} else if (arg_type == ARG_PTR_TO_KPTR) {
6116 		if (process_kptr_func(env, regno, meta))
6117 			return -EACCES;
6118 	}
6119 
6120 	return err;
6121 }
6122 
6123 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
6124 {
6125 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
6126 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6127 
6128 	if (func_id != BPF_FUNC_map_update_elem)
6129 		return false;
6130 
6131 	/* It's not possible to get access to a locked struct sock in these
6132 	 * contexts, so updating is safe.
6133 	 */
6134 	switch (type) {
6135 	case BPF_PROG_TYPE_TRACING:
6136 		if (eatype == BPF_TRACE_ITER)
6137 			return true;
6138 		break;
6139 	case BPF_PROG_TYPE_SOCKET_FILTER:
6140 	case BPF_PROG_TYPE_SCHED_CLS:
6141 	case BPF_PROG_TYPE_SCHED_ACT:
6142 	case BPF_PROG_TYPE_XDP:
6143 	case BPF_PROG_TYPE_SK_REUSEPORT:
6144 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
6145 	case BPF_PROG_TYPE_SK_LOOKUP:
6146 		return true;
6147 	default:
6148 		break;
6149 	}
6150 
6151 	verbose(env, "cannot update sockmap in this context\n");
6152 	return false;
6153 }
6154 
6155 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
6156 {
6157 	return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
6158 }
6159 
6160 static int check_map_func_compatibility(struct bpf_verifier_env *env,
6161 					struct bpf_map *map, int func_id)
6162 {
6163 	if (!map)
6164 		return 0;
6165 
6166 	/* We need a two way check, first is from map perspective ... */
6167 	switch (map->map_type) {
6168 	case BPF_MAP_TYPE_PROG_ARRAY:
6169 		if (func_id != BPF_FUNC_tail_call)
6170 			goto error;
6171 		break;
6172 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
6173 		if (func_id != BPF_FUNC_perf_event_read &&
6174 		    func_id != BPF_FUNC_perf_event_output &&
6175 		    func_id != BPF_FUNC_skb_output &&
6176 		    func_id != BPF_FUNC_perf_event_read_value &&
6177 		    func_id != BPF_FUNC_xdp_output)
6178 			goto error;
6179 		break;
6180 	case BPF_MAP_TYPE_RINGBUF:
6181 		if (func_id != BPF_FUNC_ringbuf_output &&
6182 		    func_id != BPF_FUNC_ringbuf_reserve &&
6183 		    func_id != BPF_FUNC_ringbuf_query &&
6184 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
6185 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
6186 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
6187 			goto error;
6188 		break;
6189 	case BPF_MAP_TYPE_STACK_TRACE:
6190 		if (func_id != BPF_FUNC_get_stackid)
6191 			goto error;
6192 		break;
6193 	case BPF_MAP_TYPE_CGROUP_ARRAY:
6194 		if (func_id != BPF_FUNC_skb_under_cgroup &&
6195 		    func_id != BPF_FUNC_current_task_under_cgroup)
6196 			goto error;
6197 		break;
6198 	case BPF_MAP_TYPE_CGROUP_STORAGE:
6199 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
6200 		if (func_id != BPF_FUNC_get_local_storage)
6201 			goto error;
6202 		break;
6203 	case BPF_MAP_TYPE_DEVMAP:
6204 	case BPF_MAP_TYPE_DEVMAP_HASH:
6205 		if (func_id != BPF_FUNC_redirect_map &&
6206 		    func_id != BPF_FUNC_map_lookup_elem)
6207 			goto error;
6208 		break;
6209 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
6210 	 * appear.
6211 	 */
6212 	case BPF_MAP_TYPE_CPUMAP:
6213 		if (func_id != BPF_FUNC_redirect_map)
6214 			goto error;
6215 		break;
6216 	case BPF_MAP_TYPE_XSKMAP:
6217 		if (func_id != BPF_FUNC_redirect_map &&
6218 		    func_id != BPF_FUNC_map_lookup_elem)
6219 			goto error;
6220 		break;
6221 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
6222 	case BPF_MAP_TYPE_HASH_OF_MAPS:
6223 		if (func_id != BPF_FUNC_map_lookup_elem)
6224 			goto error;
6225 		break;
6226 	case BPF_MAP_TYPE_SOCKMAP:
6227 		if (func_id != BPF_FUNC_sk_redirect_map &&
6228 		    func_id != BPF_FUNC_sock_map_update &&
6229 		    func_id != BPF_FUNC_map_delete_elem &&
6230 		    func_id != BPF_FUNC_msg_redirect_map &&
6231 		    func_id != BPF_FUNC_sk_select_reuseport &&
6232 		    func_id != BPF_FUNC_map_lookup_elem &&
6233 		    !may_update_sockmap(env, func_id))
6234 			goto error;
6235 		break;
6236 	case BPF_MAP_TYPE_SOCKHASH:
6237 		if (func_id != BPF_FUNC_sk_redirect_hash &&
6238 		    func_id != BPF_FUNC_sock_hash_update &&
6239 		    func_id != BPF_FUNC_map_delete_elem &&
6240 		    func_id != BPF_FUNC_msg_redirect_hash &&
6241 		    func_id != BPF_FUNC_sk_select_reuseport &&
6242 		    func_id != BPF_FUNC_map_lookup_elem &&
6243 		    !may_update_sockmap(env, func_id))
6244 			goto error;
6245 		break;
6246 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
6247 		if (func_id != BPF_FUNC_sk_select_reuseport)
6248 			goto error;
6249 		break;
6250 	case BPF_MAP_TYPE_QUEUE:
6251 	case BPF_MAP_TYPE_STACK:
6252 		if (func_id != BPF_FUNC_map_peek_elem &&
6253 		    func_id != BPF_FUNC_map_pop_elem &&
6254 		    func_id != BPF_FUNC_map_push_elem)
6255 			goto error;
6256 		break;
6257 	case BPF_MAP_TYPE_SK_STORAGE:
6258 		if (func_id != BPF_FUNC_sk_storage_get &&
6259 		    func_id != BPF_FUNC_sk_storage_delete)
6260 			goto error;
6261 		break;
6262 	case BPF_MAP_TYPE_INODE_STORAGE:
6263 		if (func_id != BPF_FUNC_inode_storage_get &&
6264 		    func_id != BPF_FUNC_inode_storage_delete)
6265 			goto error;
6266 		break;
6267 	case BPF_MAP_TYPE_TASK_STORAGE:
6268 		if (func_id != BPF_FUNC_task_storage_get &&
6269 		    func_id != BPF_FUNC_task_storage_delete)
6270 			goto error;
6271 		break;
6272 	case BPF_MAP_TYPE_BLOOM_FILTER:
6273 		if (func_id != BPF_FUNC_map_peek_elem &&
6274 		    func_id != BPF_FUNC_map_push_elem)
6275 			goto error;
6276 		break;
6277 	default:
6278 		break;
6279 	}
6280 
6281 	/* ... and second from the function itself. */
6282 	switch (func_id) {
6283 	case BPF_FUNC_tail_call:
6284 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
6285 			goto error;
6286 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
6287 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
6288 			return -EINVAL;
6289 		}
6290 		break;
6291 	case BPF_FUNC_perf_event_read:
6292 	case BPF_FUNC_perf_event_output:
6293 	case BPF_FUNC_perf_event_read_value:
6294 	case BPF_FUNC_skb_output:
6295 	case BPF_FUNC_xdp_output:
6296 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
6297 			goto error;
6298 		break;
6299 	case BPF_FUNC_ringbuf_output:
6300 	case BPF_FUNC_ringbuf_reserve:
6301 	case BPF_FUNC_ringbuf_query:
6302 	case BPF_FUNC_ringbuf_reserve_dynptr:
6303 	case BPF_FUNC_ringbuf_submit_dynptr:
6304 	case BPF_FUNC_ringbuf_discard_dynptr:
6305 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
6306 			goto error;
6307 		break;
6308 	case BPF_FUNC_get_stackid:
6309 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
6310 			goto error;
6311 		break;
6312 	case BPF_FUNC_current_task_under_cgroup:
6313 	case BPF_FUNC_skb_under_cgroup:
6314 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
6315 			goto error;
6316 		break;
6317 	case BPF_FUNC_redirect_map:
6318 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6319 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
6320 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
6321 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
6322 			goto error;
6323 		break;
6324 	case BPF_FUNC_sk_redirect_map:
6325 	case BPF_FUNC_msg_redirect_map:
6326 	case BPF_FUNC_sock_map_update:
6327 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
6328 			goto error;
6329 		break;
6330 	case BPF_FUNC_sk_redirect_hash:
6331 	case BPF_FUNC_msg_redirect_hash:
6332 	case BPF_FUNC_sock_hash_update:
6333 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
6334 			goto error;
6335 		break;
6336 	case BPF_FUNC_get_local_storage:
6337 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
6338 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
6339 			goto error;
6340 		break;
6341 	case BPF_FUNC_sk_select_reuseport:
6342 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
6343 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
6344 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
6345 			goto error;
6346 		break;
6347 	case BPF_FUNC_map_pop_elem:
6348 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6349 		    map->map_type != BPF_MAP_TYPE_STACK)
6350 			goto error;
6351 		break;
6352 	case BPF_FUNC_map_peek_elem:
6353 	case BPF_FUNC_map_push_elem:
6354 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6355 		    map->map_type != BPF_MAP_TYPE_STACK &&
6356 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
6357 			goto error;
6358 		break;
6359 	case BPF_FUNC_map_lookup_percpu_elem:
6360 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
6361 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6362 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
6363 			goto error;
6364 		break;
6365 	case BPF_FUNC_sk_storage_get:
6366 	case BPF_FUNC_sk_storage_delete:
6367 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
6368 			goto error;
6369 		break;
6370 	case BPF_FUNC_inode_storage_get:
6371 	case BPF_FUNC_inode_storage_delete:
6372 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
6373 			goto error;
6374 		break;
6375 	case BPF_FUNC_task_storage_get:
6376 	case BPF_FUNC_task_storage_delete:
6377 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
6378 			goto error;
6379 		break;
6380 	default:
6381 		break;
6382 	}
6383 
6384 	return 0;
6385 error:
6386 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
6387 		map->map_type, func_id_name(func_id), func_id);
6388 	return -EINVAL;
6389 }
6390 
6391 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
6392 {
6393 	int count = 0;
6394 
6395 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
6396 		count++;
6397 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
6398 		count++;
6399 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
6400 		count++;
6401 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
6402 		count++;
6403 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
6404 		count++;
6405 
6406 	/* We only support one arg being in raw mode at the moment,
6407 	 * which is sufficient for the helper functions we have
6408 	 * right now.
6409 	 */
6410 	return count <= 1;
6411 }
6412 
6413 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
6414 {
6415 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
6416 	bool has_size = fn->arg_size[arg] != 0;
6417 	bool is_next_size = false;
6418 
6419 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
6420 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
6421 
6422 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
6423 		return is_next_size;
6424 
6425 	return has_size == is_next_size || is_next_size == is_fixed;
6426 }
6427 
6428 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
6429 {
6430 	/* bpf_xxx(..., buf, len) call will access 'len'
6431 	 * bytes from memory 'buf'. Both arg types need
6432 	 * to be paired, so make sure there's no buggy
6433 	 * helper function specification.
6434 	 */
6435 	if (arg_type_is_mem_size(fn->arg1_type) ||
6436 	    check_args_pair_invalid(fn, 0) ||
6437 	    check_args_pair_invalid(fn, 1) ||
6438 	    check_args_pair_invalid(fn, 2) ||
6439 	    check_args_pair_invalid(fn, 3) ||
6440 	    check_args_pair_invalid(fn, 4))
6441 		return false;
6442 
6443 	return true;
6444 }
6445 
6446 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
6447 {
6448 	int count = 0;
6449 
6450 	if (arg_type_may_be_refcounted(fn->arg1_type))
6451 		count++;
6452 	if (arg_type_may_be_refcounted(fn->arg2_type))
6453 		count++;
6454 	if (arg_type_may_be_refcounted(fn->arg3_type))
6455 		count++;
6456 	if (arg_type_may_be_refcounted(fn->arg4_type))
6457 		count++;
6458 	if (arg_type_may_be_refcounted(fn->arg5_type))
6459 		count++;
6460 
6461 	/* A reference acquiring function cannot acquire
6462 	 * another refcounted ptr.
6463 	 */
6464 	if (may_be_acquire_function(func_id) && count)
6465 		return false;
6466 
6467 	/* We only support one arg being unreferenced at the moment,
6468 	 * which is sufficient for the helper functions we have right now.
6469 	 */
6470 	return count <= 1;
6471 }
6472 
6473 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
6474 {
6475 	int i;
6476 
6477 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
6478 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
6479 			return false;
6480 
6481 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
6482 		    /* arg_btf_id and arg_size are in a union. */
6483 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
6484 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
6485 			return false;
6486 	}
6487 
6488 	return true;
6489 }
6490 
6491 static int check_func_proto(const struct bpf_func_proto *fn, int func_id,
6492 			    struct bpf_call_arg_meta *meta)
6493 {
6494 	return check_raw_mode_ok(fn) &&
6495 	       check_arg_pair_ok(fn) &&
6496 	       check_btf_id_ok(fn) &&
6497 	       check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
6498 }
6499 
6500 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
6501  * are now invalid, so turn them into unknown SCALAR_VALUE.
6502  */
6503 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
6504 				     struct bpf_func_state *state)
6505 {
6506 	struct bpf_reg_state *regs = state->regs, *reg;
6507 	int i;
6508 
6509 	for (i = 0; i < MAX_BPF_REG; i++)
6510 		if (reg_is_pkt_pointer_any(&regs[i]))
6511 			mark_reg_unknown(env, regs, i);
6512 
6513 	bpf_for_each_spilled_reg(i, state, reg) {
6514 		if (!reg)
6515 			continue;
6516 		if (reg_is_pkt_pointer_any(reg))
6517 			__mark_reg_unknown(env, reg);
6518 	}
6519 }
6520 
6521 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
6522 {
6523 	struct bpf_verifier_state *vstate = env->cur_state;
6524 	int i;
6525 
6526 	for (i = 0; i <= vstate->curframe; i++)
6527 		__clear_all_pkt_pointers(env, vstate->frame[i]);
6528 }
6529 
6530 enum {
6531 	AT_PKT_END = -1,
6532 	BEYOND_PKT_END = -2,
6533 };
6534 
6535 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
6536 {
6537 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
6538 	struct bpf_reg_state *reg = &state->regs[regn];
6539 
6540 	if (reg->type != PTR_TO_PACKET)
6541 		/* PTR_TO_PACKET_META is not supported yet */
6542 		return;
6543 
6544 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
6545 	 * How far beyond pkt_end it goes is unknown.
6546 	 * if (!range_open) it's the case of pkt >= pkt_end
6547 	 * if (range_open) it's the case of pkt > pkt_end
6548 	 * hence this pointer is at least 1 byte bigger than pkt_end
6549 	 */
6550 	if (range_open)
6551 		reg->range = BEYOND_PKT_END;
6552 	else
6553 		reg->range = AT_PKT_END;
6554 }
6555 
6556 static void release_reg_references(struct bpf_verifier_env *env,
6557 				   struct bpf_func_state *state,
6558 				   int ref_obj_id)
6559 {
6560 	struct bpf_reg_state *regs = state->regs, *reg;
6561 	int i;
6562 
6563 	for (i = 0; i < MAX_BPF_REG; i++)
6564 		if (regs[i].ref_obj_id == ref_obj_id)
6565 			mark_reg_unknown(env, regs, i);
6566 
6567 	bpf_for_each_spilled_reg(i, state, reg) {
6568 		if (!reg)
6569 			continue;
6570 		if (reg->ref_obj_id == ref_obj_id)
6571 			__mark_reg_unknown(env, reg);
6572 	}
6573 }
6574 
6575 /* The pointer with the specified id has released its reference to kernel
6576  * resources. Identify all copies of the same pointer and clear the reference.
6577  */
6578 static int release_reference(struct bpf_verifier_env *env,
6579 			     int ref_obj_id)
6580 {
6581 	struct bpf_verifier_state *vstate = env->cur_state;
6582 	int err;
6583 	int i;
6584 
6585 	err = release_reference_state(cur_func(env), ref_obj_id);
6586 	if (err)
6587 		return err;
6588 
6589 	for (i = 0; i <= vstate->curframe; i++)
6590 		release_reg_references(env, vstate->frame[i], ref_obj_id);
6591 
6592 	return 0;
6593 }
6594 
6595 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6596 				    struct bpf_reg_state *regs)
6597 {
6598 	int i;
6599 
6600 	/* after the call registers r0 - r5 were scratched */
6601 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6602 		mark_reg_not_init(env, regs, caller_saved[i]);
6603 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6604 	}
6605 }
6606 
6607 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6608 				   struct bpf_func_state *caller,
6609 				   struct bpf_func_state *callee,
6610 				   int insn_idx);
6611 
6612 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6613 			     int *insn_idx, int subprog,
6614 			     set_callee_state_fn set_callee_state_cb)
6615 {
6616 	struct bpf_verifier_state *state = env->cur_state;
6617 	struct bpf_func_info_aux *func_info_aux;
6618 	struct bpf_func_state *caller, *callee;
6619 	int err;
6620 	bool is_global = false;
6621 
6622 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
6623 		verbose(env, "the call stack of %d frames is too deep\n",
6624 			state->curframe + 2);
6625 		return -E2BIG;
6626 	}
6627 
6628 	caller = state->frame[state->curframe];
6629 	if (state->frame[state->curframe + 1]) {
6630 		verbose(env, "verifier bug. Frame %d already allocated\n",
6631 			state->curframe + 1);
6632 		return -EFAULT;
6633 	}
6634 
6635 	func_info_aux = env->prog->aux->func_info_aux;
6636 	if (func_info_aux)
6637 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6638 	err = btf_check_subprog_arg_match(env, subprog, caller->regs);
6639 	if (err == -EFAULT)
6640 		return err;
6641 	if (is_global) {
6642 		if (err) {
6643 			verbose(env, "Caller passes invalid args into func#%d\n",
6644 				subprog);
6645 			return err;
6646 		} else {
6647 			if (env->log.level & BPF_LOG_LEVEL)
6648 				verbose(env,
6649 					"Func#%d is global and valid. Skipping.\n",
6650 					subprog);
6651 			clear_caller_saved_regs(env, caller->regs);
6652 
6653 			/* All global functions return a 64-bit SCALAR_VALUE */
6654 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
6655 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6656 
6657 			/* continue with next insn after call */
6658 			return 0;
6659 		}
6660 	}
6661 
6662 	if (insn->code == (BPF_JMP | BPF_CALL) &&
6663 	    insn->src_reg == 0 &&
6664 	    insn->imm == BPF_FUNC_timer_set_callback) {
6665 		struct bpf_verifier_state *async_cb;
6666 
6667 		/* there is no real recursion here. timer callbacks are async */
6668 		env->subprog_info[subprog].is_async_cb = true;
6669 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6670 					 *insn_idx, subprog);
6671 		if (!async_cb)
6672 			return -EFAULT;
6673 		callee = async_cb->frame[0];
6674 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
6675 
6676 		/* Convert bpf_timer_set_callback() args into timer callback args */
6677 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
6678 		if (err)
6679 			return err;
6680 
6681 		clear_caller_saved_regs(env, caller->regs);
6682 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
6683 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6684 		/* continue with next insn after call */
6685 		return 0;
6686 	}
6687 
6688 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
6689 	if (!callee)
6690 		return -ENOMEM;
6691 	state->frame[state->curframe + 1] = callee;
6692 
6693 	/* callee cannot access r0, r6 - r9 for reading and has to write
6694 	 * into its own stack before reading from it.
6695 	 * callee can read/write into caller's stack
6696 	 */
6697 	init_func_state(env, callee,
6698 			/* remember the callsite, it will be used by bpf_exit */
6699 			*insn_idx /* callsite */,
6700 			state->curframe + 1 /* frameno within this callchain */,
6701 			subprog /* subprog number within this prog */);
6702 
6703 	/* Transfer references to the callee */
6704 	err = copy_reference_state(callee, caller);
6705 	if (err)
6706 		return err;
6707 
6708 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
6709 	if (err)
6710 		return err;
6711 
6712 	clear_caller_saved_regs(env, caller->regs);
6713 
6714 	/* only increment it after check_reg_arg() finished */
6715 	state->curframe++;
6716 
6717 	/* and go analyze first insn of the callee */
6718 	*insn_idx = env->subprog_info[subprog].start - 1;
6719 
6720 	if (env->log.level & BPF_LOG_LEVEL) {
6721 		verbose(env, "caller:\n");
6722 		print_verifier_state(env, caller, true);
6723 		verbose(env, "callee:\n");
6724 		print_verifier_state(env, callee, true);
6725 	}
6726 	return 0;
6727 }
6728 
6729 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6730 				   struct bpf_func_state *caller,
6731 				   struct bpf_func_state *callee)
6732 {
6733 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6734 	 *      void *callback_ctx, u64 flags);
6735 	 * callback_fn(struct bpf_map *map, void *key, void *value,
6736 	 *      void *callback_ctx);
6737 	 */
6738 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6739 
6740 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6741 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6742 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6743 
6744 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6745 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6746 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6747 
6748 	/* pointer to stack or null */
6749 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6750 
6751 	/* unused */
6752 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6753 	return 0;
6754 }
6755 
6756 static int set_callee_state(struct bpf_verifier_env *env,
6757 			    struct bpf_func_state *caller,
6758 			    struct bpf_func_state *callee, int insn_idx)
6759 {
6760 	int i;
6761 
6762 	/* copy r1 - r5 args that callee can access.  The copy includes parent
6763 	 * pointers, which connects us up to the liveness chain
6764 	 */
6765 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6766 		callee->regs[i] = caller->regs[i];
6767 	return 0;
6768 }
6769 
6770 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6771 			   int *insn_idx)
6772 {
6773 	int subprog, target_insn;
6774 
6775 	target_insn = *insn_idx + insn->imm + 1;
6776 	subprog = find_subprog(env, target_insn);
6777 	if (subprog < 0) {
6778 		verbose(env, "verifier bug. No program starts at insn %d\n",
6779 			target_insn);
6780 		return -EFAULT;
6781 	}
6782 
6783 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6784 }
6785 
6786 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6787 				       struct bpf_func_state *caller,
6788 				       struct bpf_func_state *callee,
6789 				       int insn_idx)
6790 {
6791 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6792 	struct bpf_map *map;
6793 	int err;
6794 
6795 	if (bpf_map_ptr_poisoned(insn_aux)) {
6796 		verbose(env, "tail_call abusing map_ptr\n");
6797 		return -EINVAL;
6798 	}
6799 
6800 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6801 	if (!map->ops->map_set_for_each_callback_args ||
6802 	    !map->ops->map_for_each_callback) {
6803 		verbose(env, "callback function not allowed for map\n");
6804 		return -ENOTSUPP;
6805 	}
6806 
6807 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6808 	if (err)
6809 		return err;
6810 
6811 	callee->in_callback_fn = true;
6812 	return 0;
6813 }
6814 
6815 static int set_loop_callback_state(struct bpf_verifier_env *env,
6816 				   struct bpf_func_state *caller,
6817 				   struct bpf_func_state *callee,
6818 				   int insn_idx)
6819 {
6820 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
6821 	 *	    u64 flags);
6822 	 * callback_fn(u32 index, void *callback_ctx);
6823 	 */
6824 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
6825 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6826 
6827 	/* unused */
6828 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6829 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6830 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6831 
6832 	callee->in_callback_fn = true;
6833 	return 0;
6834 }
6835 
6836 static int set_timer_callback_state(struct bpf_verifier_env *env,
6837 				    struct bpf_func_state *caller,
6838 				    struct bpf_func_state *callee,
6839 				    int insn_idx)
6840 {
6841 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6842 
6843 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6844 	 * callback_fn(struct bpf_map *map, void *key, void *value);
6845 	 */
6846 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6847 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6848 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
6849 
6850 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6851 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6852 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
6853 
6854 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6855 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6856 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
6857 
6858 	/* unused */
6859 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6860 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6861 	callee->in_async_callback_fn = true;
6862 	return 0;
6863 }
6864 
6865 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
6866 				       struct bpf_func_state *caller,
6867 				       struct bpf_func_state *callee,
6868 				       int insn_idx)
6869 {
6870 	/* bpf_find_vma(struct task_struct *task, u64 addr,
6871 	 *               void *callback_fn, void *callback_ctx, u64 flags)
6872 	 * (callback_fn)(struct task_struct *task,
6873 	 *               struct vm_area_struct *vma, void *callback_ctx);
6874 	 */
6875 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6876 
6877 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
6878 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6879 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
6880 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
6881 
6882 	/* pointer to stack or null */
6883 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
6884 
6885 	/* unused */
6886 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6887 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6888 	callee->in_callback_fn = true;
6889 	return 0;
6890 }
6891 
6892 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6893 {
6894 	struct bpf_verifier_state *state = env->cur_state;
6895 	struct bpf_func_state *caller, *callee;
6896 	struct bpf_reg_state *r0;
6897 	int err;
6898 
6899 	callee = state->frame[state->curframe];
6900 	r0 = &callee->regs[BPF_REG_0];
6901 	if (r0->type == PTR_TO_STACK) {
6902 		/* technically it's ok to return caller's stack pointer
6903 		 * (or caller's caller's pointer) back to the caller,
6904 		 * since these pointers are valid. Only current stack
6905 		 * pointer will be invalid as soon as function exits,
6906 		 * but let's be conservative
6907 		 */
6908 		verbose(env, "cannot return stack pointer to the caller\n");
6909 		return -EINVAL;
6910 	}
6911 
6912 	state->curframe--;
6913 	caller = state->frame[state->curframe];
6914 	if (callee->in_callback_fn) {
6915 		/* enforce R0 return value range [0, 1]. */
6916 		struct tnum range = tnum_range(0, 1);
6917 
6918 		if (r0->type != SCALAR_VALUE) {
6919 			verbose(env, "R0 not a scalar value\n");
6920 			return -EACCES;
6921 		}
6922 		if (!tnum_in(range, r0->var_off)) {
6923 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6924 			return -EINVAL;
6925 		}
6926 	} else {
6927 		/* return to the caller whatever r0 had in the callee */
6928 		caller->regs[BPF_REG_0] = *r0;
6929 	}
6930 
6931 	/* Transfer references to the caller */
6932 	err = copy_reference_state(caller, callee);
6933 	if (err)
6934 		return err;
6935 
6936 	*insn_idx = callee->callsite + 1;
6937 	if (env->log.level & BPF_LOG_LEVEL) {
6938 		verbose(env, "returning from callee:\n");
6939 		print_verifier_state(env, callee, true);
6940 		verbose(env, "to caller at %d:\n", *insn_idx);
6941 		print_verifier_state(env, caller, true);
6942 	}
6943 	/* clear everything in the callee */
6944 	free_func_state(callee);
6945 	state->frame[state->curframe + 1] = NULL;
6946 	return 0;
6947 }
6948 
6949 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
6950 				   int func_id,
6951 				   struct bpf_call_arg_meta *meta)
6952 {
6953 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
6954 
6955 	if (ret_type != RET_INTEGER ||
6956 	    (func_id != BPF_FUNC_get_stack &&
6957 	     func_id != BPF_FUNC_get_task_stack &&
6958 	     func_id != BPF_FUNC_probe_read_str &&
6959 	     func_id != BPF_FUNC_probe_read_kernel_str &&
6960 	     func_id != BPF_FUNC_probe_read_user_str))
6961 		return;
6962 
6963 	ret_reg->smax_value = meta->msize_max_value;
6964 	ret_reg->s32_max_value = meta->msize_max_value;
6965 	ret_reg->smin_value = -MAX_ERRNO;
6966 	ret_reg->s32_min_value = -MAX_ERRNO;
6967 	__reg_deduce_bounds(ret_reg);
6968 	__reg_bound_offset(ret_reg);
6969 	__update_reg_bounds(ret_reg);
6970 }
6971 
6972 static int
6973 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6974 		int func_id, int insn_idx)
6975 {
6976 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6977 	struct bpf_map *map = meta->map_ptr;
6978 
6979 	if (func_id != BPF_FUNC_tail_call &&
6980 	    func_id != BPF_FUNC_map_lookup_elem &&
6981 	    func_id != BPF_FUNC_map_update_elem &&
6982 	    func_id != BPF_FUNC_map_delete_elem &&
6983 	    func_id != BPF_FUNC_map_push_elem &&
6984 	    func_id != BPF_FUNC_map_pop_elem &&
6985 	    func_id != BPF_FUNC_map_peek_elem &&
6986 	    func_id != BPF_FUNC_for_each_map_elem &&
6987 	    func_id != BPF_FUNC_redirect_map &&
6988 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
6989 		return 0;
6990 
6991 	if (map == NULL) {
6992 		verbose(env, "kernel subsystem misconfigured verifier\n");
6993 		return -EINVAL;
6994 	}
6995 
6996 	/* In case of read-only, some additional restrictions
6997 	 * need to be applied in order to prevent altering the
6998 	 * state of the map from program side.
6999 	 */
7000 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
7001 	    (func_id == BPF_FUNC_map_delete_elem ||
7002 	     func_id == BPF_FUNC_map_update_elem ||
7003 	     func_id == BPF_FUNC_map_push_elem ||
7004 	     func_id == BPF_FUNC_map_pop_elem)) {
7005 		verbose(env, "write into map forbidden\n");
7006 		return -EACCES;
7007 	}
7008 
7009 	if (!BPF_MAP_PTR(aux->map_ptr_state))
7010 		bpf_map_ptr_store(aux, meta->map_ptr,
7011 				  !meta->map_ptr->bypass_spec_v1);
7012 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
7013 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
7014 				  !meta->map_ptr->bypass_spec_v1);
7015 	return 0;
7016 }
7017 
7018 static int
7019 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7020 		int func_id, int insn_idx)
7021 {
7022 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7023 	struct bpf_reg_state *regs = cur_regs(env), *reg;
7024 	struct bpf_map *map = meta->map_ptr;
7025 	struct tnum range;
7026 	u64 val;
7027 	int err;
7028 
7029 	if (func_id != BPF_FUNC_tail_call)
7030 		return 0;
7031 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
7032 		verbose(env, "kernel subsystem misconfigured verifier\n");
7033 		return -EINVAL;
7034 	}
7035 
7036 	range = tnum_range(0, map->max_entries - 1);
7037 	reg = &regs[BPF_REG_3];
7038 
7039 	if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
7040 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7041 		return 0;
7042 	}
7043 
7044 	err = mark_chain_precision(env, BPF_REG_3);
7045 	if (err)
7046 		return err;
7047 
7048 	val = reg->var_off.value;
7049 	if (bpf_map_key_unseen(aux))
7050 		bpf_map_key_store(aux, val);
7051 	else if (!bpf_map_key_poisoned(aux) &&
7052 		  bpf_map_key_immediate(aux) != val)
7053 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7054 	return 0;
7055 }
7056 
7057 static int check_reference_leak(struct bpf_verifier_env *env)
7058 {
7059 	struct bpf_func_state *state = cur_func(env);
7060 	int i;
7061 
7062 	for (i = 0; i < state->acquired_refs; i++) {
7063 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
7064 			state->refs[i].id, state->refs[i].insn_idx);
7065 	}
7066 	return state->acquired_refs ? -EINVAL : 0;
7067 }
7068 
7069 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
7070 				   struct bpf_reg_state *regs)
7071 {
7072 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
7073 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
7074 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
7075 	int err, fmt_map_off, num_args;
7076 	u64 fmt_addr;
7077 	char *fmt;
7078 
7079 	/* data must be an array of u64 */
7080 	if (data_len_reg->var_off.value % 8)
7081 		return -EINVAL;
7082 	num_args = data_len_reg->var_off.value / 8;
7083 
7084 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
7085 	 * and map_direct_value_addr is set.
7086 	 */
7087 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
7088 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
7089 						  fmt_map_off);
7090 	if (err) {
7091 		verbose(env, "verifier bug\n");
7092 		return -EFAULT;
7093 	}
7094 	fmt = (char *)(long)fmt_addr + fmt_map_off;
7095 
7096 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
7097 	 * can focus on validating the format specifiers.
7098 	 */
7099 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
7100 	if (err < 0)
7101 		verbose(env, "Invalid format string\n");
7102 
7103 	return err;
7104 }
7105 
7106 static int check_get_func_ip(struct bpf_verifier_env *env)
7107 {
7108 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7109 	int func_id = BPF_FUNC_get_func_ip;
7110 
7111 	if (type == BPF_PROG_TYPE_TRACING) {
7112 		if (!bpf_prog_has_trampoline(env->prog)) {
7113 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
7114 				func_id_name(func_id), func_id);
7115 			return -ENOTSUPP;
7116 		}
7117 		return 0;
7118 	} else if (type == BPF_PROG_TYPE_KPROBE) {
7119 		return 0;
7120 	}
7121 
7122 	verbose(env, "func %s#%d not supported for program type %d\n",
7123 		func_id_name(func_id), func_id, type);
7124 	return -ENOTSUPP;
7125 }
7126 
7127 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7128 			     int *insn_idx_p)
7129 {
7130 	const struct bpf_func_proto *fn = NULL;
7131 	enum bpf_return_type ret_type;
7132 	enum bpf_type_flag ret_flag;
7133 	struct bpf_reg_state *regs;
7134 	struct bpf_call_arg_meta meta;
7135 	int insn_idx = *insn_idx_p;
7136 	bool changes_data;
7137 	int i, err, func_id;
7138 
7139 	/* find function prototype */
7140 	func_id = insn->imm;
7141 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
7142 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
7143 			func_id);
7144 		return -EINVAL;
7145 	}
7146 
7147 	if (env->ops->get_func_proto)
7148 		fn = env->ops->get_func_proto(func_id, env->prog);
7149 	if (!fn) {
7150 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
7151 			func_id);
7152 		return -EINVAL;
7153 	}
7154 
7155 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
7156 	if (!env->prog->gpl_compatible && fn->gpl_only) {
7157 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
7158 		return -EINVAL;
7159 	}
7160 
7161 	if (fn->allowed && !fn->allowed(env->prog)) {
7162 		verbose(env, "helper call is not allowed in probe\n");
7163 		return -EINVAL;
7164 	}
7165 
7166 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
7167 	changes_data = bpf_helper_changes_pkt_data(fn->func);
7168 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
7169 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
7170 			func_id_name(func_id), func_id);
7171 		return -EINVAL;
7172 	}
7173 
7174 	memset(&meta, 0, sizeof(meta));
7175 	meta.pkt_access = fn->pkt_access;
7176 
7177 	err = check_func_proto(fn, func_id, &meta);
7178 	if (err) {
7179 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
7180 			func_id_name(func_id), func_id);
7181 		return err;
7182 	}
7183 
7184 	meta.func_id = func_id;
7185 	/* check args */
7186 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7187 		err = check_func_arg(env, i, &meta, fn);
7188 		if (err)
7189 			return err;
7190 	}
7191 
7192 	err = record_func_map(env, &meta, func_id, insn_idx);
7193 	if (err)
7194 		return err;
7195 
7196 	err = record_func_key(env, &meta, func_id, insn_idx);
7197 	if (err)
7198 		return err;
7199 
7200 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
7201 	 * is inferred from register state.
7202 	 */
7203 	for (i = 0; i < meta.access_size; i++) {
7204 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
7205 				       BPF_WRITE, -1, false);
7206 		if (err)
7207 			return err;
7208 	}
7209 
7210 	regs = cur_regs(env);
7211 
7212 	if (meta.uninit_dynptr_regno) {
7213 		/* we write BPF_DW bits (8 bytes) at a time */
7214 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7215 			err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
7216 					       i, BPF_DW, BPF_WRITE, -1, false);
7217 			if (err)
7218 				return err;
7219 		}
7220 
7221 		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
7222 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
7223 					      insn_idx);
7224 		if (err)
7225 			return err;
7226 	}
7227 
7228 	if (meta.release_regno) {
7229 		err = -EINVAL;
7230 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
7231 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
7232 		else if (meta.ref_obj_id)
7233 			err = release_reference(env, meta.ref_obj_id);
7234 		/* meta.ref_obj_id can only be 0 if register that is meant to be
7235 		 * released is NULL, which must be > R0.
7236 		 */
7237 		else if (register_is_null(&regs[meta.release_regno]))
7238 			err = 0;
7239 		if (err) {
7240 			verbose(env, "func %s#%d reference has not been acquired before\n",
7241 				func_id_name(func_id), func_id);
7242 			return err;
7243 		}
7244 	}
7245 
7246 	switch (func_id) {
7247 	case BPF_FUNC_tail_call:
7248 		err = check_reference_leak(env);
7249 		if (err) {
7250 			verbose(env, "tail_call would lead to reference leak\n");
7251 			return err;
7252 		}
7253 		break;
7254 	case BPF_FUNC_get_local_storage:
7255 		/* check that flags argument in get_local_storage(map, flags) is 0,
7256 		 * this is required because get_local_storage() can't return an error.
7257 		 */
7258 		if (!register_is_null(&regs[BPF_REG_2])) {
7259 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
7260 			return -EINVAL;
7261 		}
7262 		break;
7263 	case BPF_FUNC_for_each_map_elem:
7264 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7265 					set_map_elem_callback_state);
7266 		break;
7267 	case BPF_FUNC_timer_set_callback:
7268 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7269 					set_timer_callback_state);
7270 		break;
7271 	case BPF_FUNC_find_vma:
7272 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7273 					set_find_vma_callback_state);
7274 		break;
7275 	case BPF_FUNC_snprintf:
7276 		err = check_bpf_snprintf_call(env, regs);
7277 		break;
7278 	case BPF_FUNC_loop:
7279 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7280 					set_loop_callback_state);
7281 		break;
7282 	case BPF_FUNC_dynptr_from_mem:
7283 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
7284 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
7285 				reg_type_str(env, regs[BPF_REG_1].type));
7286 			return -EACCES;
7287 		}
7288 	}
7289 
7290 	if (err)
7291 		return err;
7292 
7293 	/* reset caller saved regs */
7294 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
7295 		mark_reg_not_init(env, regs, caller_saved[i]);
7296 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7297 	}
7298 
7299 	/* helper call returns 64-bit value. */
7300 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7301 
7302 	/* update return register (already marked as written above) */
7303 	ret_type = fn->ret_type;
7304 	ret_flag = type_flag(fn->ret_type);
7305 	if (ret_type == RET_INTEGER) {
7306 		/* sets type to SCALAR_VALUE */
7307 		mark_reg_unknown(env, regs, BPF_REG_0);
7308 	} else if (ret_type == RET_VOID) {
7309 		regs[BPF_REG_0].type = NOT_INIT;
7310 	} else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
7311 		/* There is no offset yet applied, variable or fixed */
7312 		mark_reg_known_zero(env, regs, BPF_REG_0);
7313 		/* remember map_ptr, so that check_map_access()
7314 		 * can check 'value_size' boundary of memory access
7315 		 * to map element returned from bpf_map_lookup_elem()
7316 		 */
7317 		if (meta.map_ptr == NULL) {
7318 			verbose(env,
7319 				"kernel subsystem misconfigured verifier\n");
7320 			return -EINVAL;
7321 		}
7322 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
7323 		regs[BPF_REG_0].map_uid = meta.map_uid;
7324 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7325 		if (!type_may_be_null(ret_type) &&
7326 		    map_value_has_spin_lock(meta.map_ptr)) {
7327 			regs[BPF_REG_0].id = ++env->id_gen;
7328 		}
7329 	} else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
7330 		mark_reg_known_zero(env, regs, BPF_REG_0);
7331 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
7332 	} else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
7333 		mark_reg_known_zero(env, regs, BPF_REG_0);
7334 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
7335 	} else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
7336 		mark_reg_known_zero(env, regs, BPF_REG_0);
7337 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
7338 	} else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
7339 		mark_reg_known_zero(env, regs, BPF_REG_0);
7340 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7341 		regs[BPF_REG_0].mem_size = meta.mem_size;
7342 	} else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
7343 		const struct btf_type *t;
7344 
7345 		mark_reg_known_zero(env, regs, BPF_REG_0);
7346 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
7347 		if (!btf_type_is_struct(t)) {
7348 			u32 tsize;
7349 			const struct btf_type *ret;
7350 			const char *tname;
7351 
7352 			/* resolve the type size of ksym. */
7353 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
7354 			if (IS_ERR(ret)) {
7355 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
7356 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
7357 					tname, PTR_ERR(ret));
7358 				return -EINVAL;
7359 			}
7360 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7361 			regs[BPF_REG_0].mem_size = tsize;
7362 		} else {
7363 			/* MEM_RDONLY may be carried from ret_flag, but it
7364 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7365 			 * it will confuse the check of PTR_TO_BTF_ID in
7366 			 * check_mem_access().
7367 			 */
7368 			ret_flag &= ~MEM_RDONLY;
7369 
7370 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7371 			regs[BPF_REG_0].btf = meta.ret_btf;
7372 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7373 		}
7374 	} else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
7375 		struct btf *ret_btf;
7376 		int ret_btf_id;
7377 
7378 		mark_reg_known_zero(env, regs, BPF_REG_0);
7379 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7380 		if (func_id == BPF_FUNC_kptr_xchg) {
7381 			ret_btf = meta.kptr_off_desc->kptr.btf;
7382 			ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
7383 		} else {
7384 			ret_btf = btf_vmlinux;
7385 			ret_btf_id = *fn->ret_btf_id;
7386 		}
7387 		if (ret_btf_id == 0) {
7388 			verbose(env, "invalid return type %u of func %s#%d\n",
7389 				base_type(ret_type), func_id_name(func_id),
7390 				func_id);
7391 			return -EINVAL;
7392 		}
7393 		regs[BPF_REG_0].btf = ret_btf;
7394 		regs[BPF_REG_0].btf_id = ret_btf_id;
7395 	} else {
7396 		verbose(env, "unknown return type %u of func %s#%d\n",
7397 			base_type(ret_type), func_id_name(func_id), func_id);
7398 		return -EINVAL;
7399 	}
7400 
7401 	if (type_may_be_null(regs[BPF_REG_0].type))
7402 		regs[BPF_REG_0].id = ++env->id_gen;
7403 
7404 	if (is_ptr_cast_function(func_id)) {
7405 		/* For release_reference() */
7406 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
7407 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
7408 		int id = acquire_reference_state(env, insn_idx);
7409 
7410 		if (id < 0)
7411 			return id;
7412 		/* For mark_ptr_or_null_reg() */
7413 		regs[BPF_REG_0].id = id;
7414 		/* For release_reference() */
7415 		regs[BPF_REG_0].ref_obj_id = id;
7416 	} else if (func_id == BPF_FUNC_dynptr_data) {
7417 		int dynptr_id = 0, i;
7418 
7419 		/* Find the id of the dynptr we're acquiring a reference to */
7420 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7421 			if (arg_type_is_dynptr(fn->arg_type[i])) {
7422 				if (dynptr_id) {
7423 					verbose(env, "verifier internal error: multiple dynptr args in func\n");
7424 					return -EFAULT;
7425 				}
7426 				dynptr_id = stack_slot_get_id(env, &regs[BPF_REG_1 + i]);
7427 			}
7428 		}
7429 		/* For release_reference() */
7430 		regs[BPF_REG_0].ref_obj_id = dynptr_id;
7431 	}
7432 
7433 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
7434 
7435 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
7436 	if (err)
7437 		return err;
7438 
7439 	if ((func_id == BPF_FUNC_get_stack ||
7440 	     func_id == BPF_FUNC_get_task_stack) &&
7441 	    !env->prog->has_callchain_buf) {
7442 		const char *err_str;
7443 
7444 #ifdef CONFIG_PERF_EVENTS
7445 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
7446 		err_str = "cannot get callchain buffer for func %s#%d\n";
7447 #else
7448 		err = -ENOTSUPP;
7449 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
7450 #endif
7451 		if (err) {
7452 			verbose(env, err_str, func_id_name(func_id), func_id);
7453 			return err;
7454 		}
7455 
7456 		env->prog->has_callchain_buf = true;
7457 	}
7458 
7459 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
7460 		env->prog->call_get_stack = true;
7461 
7462 	if (func_id == BPF_FUNC_get_func_ip) {
7463 		if (check_get_func_ip(env))
7464 			return -ENOTSUPP;
7465 		env->prog->call_get_func_ip = true;
7466 	}
7467 
7468 	if (changes_data)
7469 		clear_all_pkt_pointers(env);
7470 	return 0;
7471 }
7472 
7473 /* mark_btf_func_reg_size() is used when the reg size is determined by
7474  * the BTF func_proto's return value size and argument.
7475  */
7476 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
7477 				   size_t reg_size)
7478 {
7479 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
7480 
7481 	if (regno == BPF_REG_0) {
7482 		/* Function return value */
7483 		reg->live |= REG_LIVE_WRITTEN;
7484 		reg->subreg_def = reg_size == sizeof(u64) ?
7485 			DEF_NOT_SUBREG : env->insn_idx + 1;
7486 	} else {
7487 		/* Function argument */
7488 		if (reg_size == sizeof(u64)) {
7489 			mark_insn_zext(env, reg);
7490 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
7491 		} else {
7492 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
7493 		}
7494 	}
7495 }
7496 
7497 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7498 			    int *insn_idx_p)
7499 {
7500 	const struct btf_type *t, *func, *func_proto, *ptr_type;
7501 	struct bpf_reg_state *regs = cur_regs(env);
7502 	const char *func_name, *ptr_type_name;
7503 	u32 i, nargs, func_id, ptr_type_id;
7504 	int err, insn_idx = *insn_idx_p;
7505 	const struct btf_param *args;
7506 	struct btf *desc_btf;
7507 	bool acq;
7508 
7509 	/* skip for now, but return error when we find this in fixup_kfunc_call */
7510 	if (!insn->imm)
7511 		return 0;
7512 
7513 	desc_btf = find_kfunc_desc_btf(env, insn->off);
7514 	if (IS_ERR(desc_btf))
7515 		return PTR_ERR(desc_btf);
7516 
7517 	func_id = insn->imm;
7518 	func = btf_type_by_id(desc_btf, func_id);
7519 	func_name = btf_name_by_offset(desc_btf, func->name_off);
7520 	func_proto = btf_type_by_id(desc_btf, func->type);
7521 
7522 	if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7523 				      BTF_KFUNC_TYPE_CHECK, func_id)) {
7524 		verbose(env, "calling kernel function %s is not allowed\n",
7525 			func_name);
7526 		return -EACCES;
7527 	}
7528 
7529 	acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7530 					BTF_KFUNC_TYPE_ACQUIRE, func_id);
7531 
7532 	/* Check the arguments */
7533 	err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
7534 	if (err < 0)
7535 		return err;
7536 	/* In case of release function, we get register number of refcounted
7537 	 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now
7538 	 */
7539 	if (err) {
7540 		err = release_reference(env, regs[err].ref_obj_id);
7541 		if (err) {
7542 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
7543 				func_name, func_id);
7544 			return err;
7545 		}
7546 	}
7547 
7548 	for (i = 0; i < CALLER_SAVED_REGS; i++)
7549 		mark_reg_not_init(env, regs, caller_saved[i]);
7550 
7551 	/* Check return type */
7552 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
7553 
7554 	if (acq && !btf_type_is_ptr(t)) {
7555 		verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
7556 		return -EINVAL;
7557 	}
7558 
7559 	if (btf_type_is_scalar(t)) {
7560 		mark_reg_unknown(env, regs, BPF_REG_0);
7561 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
7562 	} else if (btf_type_is_ptr(t)) {
7563 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
7564 						   &ptr_type_id);
7565 		if (!btf_type_is_struct(ptr_type)) {
7566 			ptr_type_name = btf_name_by_offset(desc_btf,
7567 							   ptr_type->name_off);
7568 			verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
7569 				func_name, btf_type_str(ptr_type),
7570 				ptr_type_name);
7571 			return -EINVAL;
7572 		}
7573 		mark_reg_known_zero(env, regs, BPF_REG_0);
7574 		regs[BPF_REG_0].btf = desc_btf;
7575 		regs[BPF_REG_0].type = PTR_TO_BTF_ID;
7576 		regs[BPF_REG_0].btf_id = ptr_type_id;
7577 		if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7578 					      BTF_KFUNC_TYPE_RET_NULL, func_id)) {
7579 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
7580 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
7581 			regs[BPF_REG_0].id = ++env->id_gen;
7582 		}
7583 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
7584 		if (acq) {
7585 			int id = acquire_reference_state(env, insn_idx);
7586 
7587 			if (id < 0)
7588 				return id;
7589 			regs[BPF_REG_0].id = id;
7590 			regs[BPF_REG_0].ref_obj_id = id;
7591 		}
7592 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
7593 
7594 	nargs = btf_type_vlen(func_proto);
7595 	args = (const struct btf_param *)(func_proto + 1);
7596 	for (i = 0; i < nargs; i++) {
7597 		u32 regno = i + 1;
7598 
7599 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
7600 		if (btf_type_is_ptr(t))
7601 			mark_btf_func_reg_size(env, regno, sizeof(void *));
7602 		else
7603 			/* scalar. ensured by btf_check_kfunc_arg_match() */
7604 			mark_btf_func_reg_size(env, regno, t->size);
7605 	}
7606 
7607 	return 0;
7608 }
7609 
7610 static bool signed_add_overflows(s64 a, s64 b)
7611 {
7612 	/* Do the add in u64, where overflow is well-defined */
7613 	s64 res = (s64)((u64)a + (u64)b);
7614 
7615 	if (b < 0)
7616 		return res > a;
7617 	return res < a;
7618 }
7619 
7620 static bool signed_add32_overflows(s32 a, s32 b)
7621 {
7622 	/* Do the add in u32, where overflow is well-defined */
7623 	s32 res = (s32)((u32)a + (u32)b);
7624 
7625 	if (b < 0)
7626 		return res > a;
7627 	return res < a;
7628 }
7629 
7630 static bool signed_sub_overflows(s64 a, s64 b)
7631 {
7632 	/* Do the sub in u64, where overflow is well-defined */
7633 	s64 res = (s64)((u64)a - (u64)b);
7634 
7635 	if (b < 0)
7636 		return res < a;
7637 	return res > a;
7638 }
7639 
7640 static bool signed_sub32_overflows(s32 a, s32 b)
7641 {
7642 	/* Do the sub in u32, where overflow is well-defined */
7643 	s32 res = (s32)((u32)a - (u32)b);
7644 
7645 	if (b < 0)
7646 		return res < a;
7647 	return res > a;
7648 }
7649 
7650 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7651 				  const struct bpf_reg_state *reg,
7652 				  enum bpf_reg_type type)
7653 {
7654 	bool known = tnum_is_const(reg->var_off);
7655 	s64 val = reg->var_off.value;
7656 	s64 smin = reg->smin_value;
7657 
7658 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7659 		verbose(env, "math between %s pointer and %lld is not allowed\n",
7660 			reg_type_str(env, type), val);
7661 		return false;
7662 	}
7663 
7664 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7665 		verbose(env, "%s pointer offset %d is not allowed\n",
7666 			reg_type_str(env, type), reg->off);
7667 		return false;
7668 	}
7669 
7670 	if (smin == S64_MIN) {
7671 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
7672 			reg_type_str(env, type));
7673 		return false;
7674 	}
7675 
7676 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7677 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
7678 			smin, reg_type_str(env, type));
7679 		return false;
7680 	}
7681 
7682 	return true;
7683 }
7684 
7685 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7686 {
7687 	return &env->insn_aux_data[env->insn_idx];
7688 }
7689 
7690 enum {
7691 	REASON_BOUNDS	= -1,
7692 	REASON_TYPE	= -2,
7693 	REASON_PATHS	= -3,
7694 	REASON_LIMIT	= -4,
7695 	REASON_STACK	= -5,
7696 };
7697 
7698 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
7699 			      u32 *alu_limit, bool mask_to_left)
7700 {
7701 	u32 max = 0, ptr_limit = 0;
7702 
7703 	switch (ptr_reg->type) {
7704 	case PTR_TO_STACK:
7705 		/* Offset 0 is out-of-bounds, but acceptable start for the
7706 		 * left direction, see BPF_REG_FP. Also, unknown scalar
7707 		 * offset where we would need to deal with min/max bounds is
7708 		 * currently prohibited for unprivileged.
7709 		 */
7710 		max = MAX_BPF_STACK + mask_to_left;
7711 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
7712 		break;
7713 	case PTR_TO_MAP_VALUE:
7714 		max = ptr_reg->map_ptr->value_size;
7715 		ptr_limit = (mask_to_left ?
7716 			     ptr_reg->smin_value :
7717 			     ptr_reg->umax_value) + ptr_reg->off;
7718 		break;
7719 	default:
7720 		return REASON_TYPE;
7721 	}
7722 
7723 	if (ptr_limit >= max)
7724 		return REASON_LIMIT;
7725 	*alu_limit = ptr_limit;
7726 	return 0;
7727 }
7728 
7729 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
7730 				    const struct bpf_insn *insn)
7731 {
7732 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
7733 }
7734 
7735 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
7736 				       u32 alu_state, u32 alu_limit)
7737 {
7738 	/* If we arrived here from different branches with different
7739 	 * state or limits to sanitize, then this won't work.
7740 	 */
7741 	if (aux->alu_state &&
7742 	    (aux->alu_state != alu_state ||
7743 	     aux->alu_limit != alu_limit))
7744 		return REASON_PATHS;
7745 
7746 	/* Corresponding fixup done in do_misc_fixups(). */
7747 	aux->alu_state = alu_state;
7748 	aux->alu_limit = alu_limit;
7749 	return 0;
7750 }
7751 
7752 static int sanitize_val_alu(struct bpf_verifier_env *env,
7753 			    struct bpf_insn *insn)
7754 {
7755 	struct bpf_insn_aux_data *aux = cur_aux(env);
7756 
7757 	if (can_skip_alu_sanitation(env, insn))
7758 		return 0;
7759 
7760 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
7761 }
7762 
7763 static bool sanitize_needed(u8 opcode)
7764 {
7765 	return opcode == BPF_ADD || opcode == BPF_SUB;
7766 }
7767 
7768 struct bpf_sanitize_info {
7769 	struct bpf_insn_aux_data aux;
7770 	bool mask_to_left;
7771 };
7772 
7773 static struct bpf_verifier_state *
7774 sanitize_speculative_path(struct bpf_verifier_env *env,
7775 			  const struct bpf_insn *insn,
7776 			  u32 next_idx, u32 curr_idx)
7777 {
7778 	struct bpf_verifier_state *branch;
7779 	struct bpf_reg_state *regs;
7780 
7781 	branch = push_stack(env, next_idx, curr_idx, true);
7782 	if (branch && insn) {
7783 		regs = branch->frame[branch->curframe]->regs;
7784 		if (BPF_SRC(insn->code) == BPF_K) {
7785 			mark_reg_unknown(env, regs, insn->dst_reg);
7786 		} else if (BPF_SRC(insn->code) == BPF_X) {
7787 			mark_reg_unknown(env, regs, insn->dst_reg);
7788 			mark_reg_unknown(env, regs, insn->src_reg);
7789 		}
7790 	}
7791 	return branch;
7792 }
7793 
7794 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
7795 			    struct bpf_insn *insn,
7796 			    const struct bpf_reg_state *ptr_reg,
7797 			    const struct bpf_reg_state *off_reg,
7798 			    struct bpf_reg_state *dst_reg,
7799 			    struct bpf_sanitize_info *info,
7800 			    const bool commit_window)
7801 {
7802 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
7803 	struct bpf_verifier_state *vstate = env->cur_state;
7804 	bool off_is_imm = tnum_is_const(off_reg->var_off);
7805 	bool off_is_neg = off_reg->smin_value < 0;
7806 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
7807 	u8 opcode = BPF_OP(insn->code);
7808 	u32 alu_state, alu_limit;
7809 	struct bpf_reg_state tmp;
7810 	bool ret;
7811 	int err;
7812 
7813 	if (can_skip_alu_sanitation(env, insn))
7814 		return 0;
7815 
7816 	/* We already marked aux for masking from non-speculative
7817 	 * paths, thus we got here in the first place. We only care
7818 	 * to explore bad access from here.
7819 	 */
7820 	if (vstate->speculative)
7821 		goto do_sim;
7822 
7823 	if (!commit_window) {
7824 		if (!tnum_is_const(off_reg->var_off) &&
7825 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
7826 			return REASON_BOUNDS;
7827 
7828 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
7829 				     (opcode == BPF_SUB && !off_is_neg);
7830 	}
7831 
7832 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
7833 	if (err < 0)
7834 		return err;
7835 
7836 	if (commit_window) {
7837 		/* In commit phase we narrow the masking window based on
7838 		 * the observed pointer move after the simulated operation.
7839 		 */
7840 		alu_state = info->aux.alu_state;
7841 		alu_limit = abs(info->aux.alu_limit - alu_limit);
7842 	} else {
7843 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
7844 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7845 		alu_state |= ptr_is_dst_reg ?
7846 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
7847 
7848 		/* Limit pruning on unknown scalars to enable deep search for
7849 		 * potential masking differences from other program paths.
7850 		 */
7851 		if (!off_is_imm)
7852 			env->explore_alu_limits = true;
7853 	}
7854 
7855 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
7856 	if (err < 0)
7857 		return err;
7858 do_sim:
7859 	/* If we're in commit phase, we're done here given we already
7860 	 * pushed the truncated dst_reg into the speculative verification
7861 	 * stack.
7862 	 *
7863 	 * Also, when register is a known constant, we rewrite register-based
7864 	 * operation to immediate-based, and thus do not need masking (and as
7865 	 * a consequence, do not need to simulate the zero-truncation either).
7866 	 */
7867 	if (commit_window || off_is_imm)
7868 		return 0;
7869 
7870 	/* Simulate and find potential out-of-bounds access under
7871 	 * speculative execution from truncation as a result of
7872 	 * masking when off was not within expected range. If off
7873 	 * sits in dst, then we temporarily need to move ptr there
7874 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
7875 	 * for cases where we use K-based arithmetic in one direction
7876 	 * and truncated reg-based in the other in order to explore
7877 	 * bad access.
7878 	 */
7879 	if (!ptr_is_dst_reg) {
7880 		tmp = *dst_reg;
7881 		*dst_reg = *ptr_reg;
7882 	}
7883 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
7884 					env->insn_idx);
7885 	if (!ptr_is_dst_reg && ret)
7886 		*dst_reg = tmp;
7887 	return !ret ? REASON_STACK : 0;
7888 }
7889 
7890 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
7891 {
7892 	struct bpf_verifier_state *vstate = env->cur_state;
7893 
7894 	/* If we simulate paths under speculation, we don't update the
7895 	 * insn as 'seen' such that when we verify unreachable paths in
7896 	 * the non-speculative domain, sanitize_dead_code() can still
7897 	 * rewrite/sanitize them.
7898 	 */
7899 	if (!vstate->speculative)
7900 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
7901 }
7902 
7903 static int sanitize_err(struct bpf_verifier_env *env,
7904 			const struct bpf_insn *insn, int reason,
7905 			const struct bpf_reg_state *off_reg,
7906 			const struct bpf_reg_state *dst_reg)
7907 {
7908 	static const char *err = "pointer arithmetic with it prohibited for !root";
7909 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
7910 	u32 dst = insn->dst_reg, src = insn->src_reg;
7911 
7912 	switch (reason) {
7913 	case REASON_BOUNDS:
7914 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
7915 			off_reg == dst_reg ? dst : src, err);
7916 		break;
7917 	case REASON_TYPE:
7918 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
7919 			off_reg == dst_reg ? src : dst, err);
7920 		break;
7921 	case REASON_PATHS:
7922 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
7923 			dst, op, err);
7924 		break;
7925 	case REASON_LIMIT:
7926 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
7927 			dst, op, err);
7928 		break;
7929 	case REASON_STACK:
7930 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
7931 			dst, err);
7932 		break;
7933 	default:
7934 		verbose(env, "verifier internal error: unknown reason (%d)\n",
7935 			reason);
7936 		break;
7937 	}
7938 
7939 	return -EACCES;
7940 }
7941 
7942 /* check that stack access falls within stack limits and that 'reg' doesn't
7943  * have a variable offset.
7944  *
7945  * Variable offset is prohibited for unprivileged mode for simplicity since it
7946  * requires corresponding support in Spectre masking for stack ALU.  See also
7947  * retrieve_ptr_limit().
7948  *
7949  *
7950  * 'off' includes 'reg->off'.
7951  */
7952 static int check_stack_access_for_ptr_arithmetic(
7953 				struct bpf_verifier_env *env,
7954 				int regno,
7955 				const struct bpf_reg_state *reg,
7956 				int off)
7957 {
7958 	if (!tnum_is_const(reg->var_off)) {
7959 		char tn_buf[48];
7960 
7961 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7962 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
7963 			regno, tn_buf, off);
7964 		return -EACCES;
7965 	}
7966 
7967 	if (off >= 0 || off < -MAX_BPF_STACK) {
7968 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
7969 			"prohibited for !root; off=%d\n", regno, off);
7970 		return -EACCES;
7971 	}
7972 
7973 	return 0;
7974 }
7975 
7976 static int sanitize_check_bounds(struct bpf_verifier_env *env,
7977 				 const struct bpf_insn *insn,
7978 				 const struct bpf_reg_state *dst_reg)
7979 {
7980 	u32 dst = insn->dst_reg;
7981 
7982 	/* For unprivileged we require that resulting offset must be in bounds
7983 	 * in order to be able to sanitize access later on.
7984 	 */
7985 	if (env->bypass_spec_v1)
7986 		return 0;
7987 
7988 	switch (dst_reg->type) {
7989 	case PTR_TO_STACK:
7990 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
7991 					dst_reg->off + dst_reg->var_off.value))
7992 			return -EACCES;
7993 		break;
7994 	case PTR_TO_MAP_VALUE:
7995 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
7996 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
7997 				"prohibited for !root\n", dst);
7998 			return -EACCES;
7999 		}
8000 		break;
8001 	default:
8002 		break;
8003 	}
8004 
8005 	return 0;
8006 }
8007 
8008 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
8009  * Caller should also handle BPF_MOV case separately.
8010  * If we return -EACCES, caller may want to try again treating pointer as a
8011  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
8012  */
8013 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
8014 				   struct bpf_insn *insn,
8015 				   const struct bpf_reg_state *ptr_reg,
8016 				   const struct bpf_reg_state *off_reg)
8017 {
8018 	struct bpf_verifier_state *vstate = env->cur_state;
8019 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8020 	struct bpf_reg_state *regs = state->regs, *dst_reg;
8021 	bool known = tnum_is_const(off_reg->var_off);
8022 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
8023 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
8024 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
8025 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
8026 	struct bpf_sanitize_info info = {};
8027 	u8 opcode = BPF_OP(insn->code);
8028 	u32 dst = insn->dst_reg;
8029 	int ret;
8030 
8031 	dst_reg = &regs[dst];
8032 
8033 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
8034 	    smin_val > smax_val || umin_val > umax_val) {
8035 		/* Taint dst register if offset had invalid bounds derived from
8036 		 * e.g. dead branches.
8037 		 */
8038 		__mark_reg_unknown(env, dst_reg);
8039 		return 0;
8040 	}
8041 
8042 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
8043 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
8044 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
8045 			__mark_reg_unknown(env, dst_reg);
8046 			return 0;
8047 		}
8048 
8049 		verbose(env,
8050 			"R%d 32-bit pointer arithmetic prohibited\n",
8051 			dst);
8052 		return -EACCES;
8053 	}
8054 
8055 	if (ptr_reg->type & PTR_MAYBE_NULL) {
8056 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
8057 			dst, reg_type_str(env, ptr_reg->type));
8058 		return -EACCES;
8059 	}
8060 
8061 	switch (base_type(ptr_reg->type)) {
8062 	case CONST_PTR_TO_MAP:
8063 		/* smin_val represents the known value */
8064 		if (known && smin_val == 0 && opcode == BPF_ADD)
8065 			break;
8066 		fallthrough;
8067 	case PTR_TO_PACKET_END:
8068 	case PTR_TO_SOCKET:
8069 	case PTR_TO_SOCK_COMMON:
8070 	case PTR_TO_TCP_SOCK:
8071 	case PTR_TO_XDP_SOCK:
8072 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
8073 			dst, reg_type_str(env, ptr_reg->type));
8074 		return -EACCES;
8075 	default:
8076 		break;
8077 	}
8078 
8079 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
8080 	 * The id may be overwritten later if we create a new variable offset.
8081 	 */
8082 	dst_reg->type = ptr_reg->type;
8083 	dst_reg->id = ptr_reg->id;
8084 
8085 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
8086 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
8087 		return -EINVAL;
8088 
8089 	/* pointer types do not carry 32-bit bounds at the moment. */
8090 	__mark_reg32_unbounded(dst_reg);
8091 
8092 	if (sanitize_needed(opcode)) {
8093 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
8094 				       &info, false);
8095 		if (ret < 0)
8096 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
8097 	}
8098 
8099 	switch (opcode) {
8100 	case BPF_ADD:
8101 		/* We can take a fixed offset as long as it doesn't overflow
8102 		 * the s32 'off' field
8103 		 */
8104 		if (known && (ptr_reg->off + smin_val ==
8105 			      (s64)(s32)(ptr_reg->off + smin_val))) {
8106 			/* pointer += K.  Accumulate it into fixed offset */
8107 			dst_reg->smin_value = smin_ptr;
8108 			dst_reg->smax_value = smax_ptr;
8109 			dst_reg->umin_value = umin_ptr;
8110 			dst_reg->umax_value = umax_ptr;
8111 			dst_reg->var_off = ptr_reg->var_off;
8112 			dst_reg->off = ptr_reg->off + smin_val;
8113 			dst_reg->raw = ptr_reg->raw;
8114 			break;
8115 		}
8116 		/* A new variable offset is created.  Note that off_reg->off
8117 		 * == 0, since it's a scalar.
8118 		 * dst_reg gets the pointer type and since some positive
8119 		 * integer value was added to the pointer, give it a new 'id'
8120 		 * if it's a PTR_TO_PACKET.
8121 		 * this creates a new 'base' pointer, off_reg (variable) gets
8122 		 * added into the variable offset, and we copy the fixed offset
8123 		 * from ptr_reg.
8124 		 */
8125 		if (signed_add_overflows(smin_ptr, smin_val) ||
8126 		    signed_add_overflows(smax_ptr, smax_val)) {
8127 			dst_reg->smin_value = S64_MIN;
8128 			dst_reg->smax_value = S64_MAX;
8129 		} else {
8130 			dst_reg->smin_value = smin_ptr + smin_val;
8131 			dst_reg->smax_value = smax_ptr + smax_val;
8132 		}
8133 		if (umin_ptr + umin_val < umin_ptr ||
8134 		    umax_ptr + umax_val < umax_ptr) {
8135 			dst_reg->umin_value = 0;
8136 			dst_reg->umax_value = U64_MAX;
8137 		} else {
8138 			dst_reg->umin_value = umin_ptr + umin_val;
8139 			dst_reg->umax_value = umax_ptr + umax_val;
8140 		}
8141 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
8142 		dst_reg->off = ptr_reg->off;
8143 		dst_reg->raw = ptr_reg->raw;
8144 		if (reg_is_pkt_pointer(ptr_reg)) {
8145 			dst_reg->id = ++env->id_gen;
8146 			/* something was added to pkt_ptr, set range to zero */
8147 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
8148 		}
8149 		break;
8150 	case BPF_SUB:
8151 		if (dst_reg == off_reg) {
8152 			/* scalar -= pointer.  Creates an unknown scalar */
8153 			verbose(env, "R%d tried to subtract pointer from scalar\n",
8154 				dst);
8155 			return -EACCES;
8156 		}
8157 		/* We don't allow subtraction from FP, because (according to
8158 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
8159 		 * be able to deal with it.
8160 		 */
8161 		if (ptr_reg->type == PTR_TO_STACK) {
8162 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
8163 				dst);
8164 			return -EACCES;
8165 		}
8166 		if (known && (ptr_reg->off - smin_val ==
8167 			      (s64)(s32)(ptr_reg->off - smin_val))) {
8168 			/* pointer -= K.  Subtract it from fixed offset */
8169 			dst_reg->smin_value = smin_ptr;
8170 			dst_reg->smax_value = smax_ptr;
8171 			dst_reg->umin_value = umin_ptr;
8172 			dst_reg->umax_value = umax_ptr;
8173 			dst_reg->var_off = ptr_reg->var_off;
8174 			dst_reg->id = ptr_reg->id;
8175 			dst_reg->off = ptr_reg->off - smin_val;
8176 			dst_reg->raw = ptr_reg->raw;
8177 			break;
8178 		}
8179 		/* A new variable offset is created.  If the subtrahend is known
8180 		 * nonnegative, then any reg->range we had before is still good.
8181 		 */
8182 		if (signed_sub_overflows(smin_ptr, smax_val) ||
8183 		    signed_sub_overflows(smax_ptr, smin_val)) {
8184 			/* Overflow possible, we know nothing */
8185 			dst_reg->smin_value = S64_MIN;
8186 			dst_reg->smax_value = S64_MAX;
8187 		} else {
8188 			dst_reg->smin_value = smin_ptr - smax_val;
8189 			dst_reg->smax_value = smax_ptr - smin_val;
8190 		}
8191 		if (umin_ptr < umax_val) {
8192 			/* Overflow possible, we know nothing */
8193 			dst_reg->umin_value = 0;
8194 			dst_reg->umax_value = U64_MAX;
8195 		} else {
8196 			/* Cannot overflow (as long as bounds are consistent) */
8197 			dst_reg->umin_value = umin_ptr - umax_val;
8198 			dst_reg->umax_value = umax_ptr - umin_val;
8199 		}
8200 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
8201 		dst_reg->off = ptr_reg->off;
8202 		dst_reg->raw = ptr_reg->raw;
8203 		if (reg_is_pkt_pointer(ptr_reg)) {
8204 			dst_reg->id = ++env->id_gen;
8205 			/* something was added to pkt_ptr, set range to zero */
8206 			if (smin_val < 0)
8207 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
8208 		}
8209 		break;
8210 	case BPF_AND:
8211 	case BPF_OR:
8212 	case BPF_XOR:
8213 		/* bitwise ops on pointers are troublesome, prohibit. */
8214 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
8215 			dst, bpf_alu_string[opcode >> 4]);
8216 		return -EACCES;
8217 	default:
8218 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
8219 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
8220 			dst, bpf_alu_string[opcode >> 4]);
8221 		return -EACCES;
8222 	}
8223 
8224 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
8225 		return -EINVAL;
8226 
8227 	__update_reg_bounds(dst_reg);
8228 	__reg_deduce_bounds(dst_reg);
8229 	__reg_bound_offset(dst_reg);
8230 
8231 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
8232 		return -EACCES;
8233 	if (sanitize_needed(opcode)) {
8234 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
8235 				       &info, true);
8236 		if (ret < 0)
8237 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
8238 	}
8239 
8240 	return 0;
8241 }
8242 
8243 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
8244 				 struct bpf_reg_state *src_reg)
8245 {
8246 	s32 smin_val = src_reg->s32_min_value;
8247 	s32 smax_val = src_reg->s32_max_value;
8248 	u32 umin_val = src_reg->u32_min_value;
8249 	u32 umax_val = src_reg->u32_max_value;
8250 
8251 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
8252 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
8253 		dst_reg->s32_min_value = S32_MIN;
8254 		dst_reg->s32_max_value = S32_MAX;
8255 	} else {
8256 		dst_reg->s32_min_value += smin_val;
8257 		dst_reg->s32_max_value += smax_val;
8258 	}
8259 	if (dst_reg->u32_min_value + umin_val < umin_val ||
8260 	    dst_reg->u32_max_value + umax_val < umax_val) {
8261 		dst_reg->u32_min_value = 0;
8262 		dst_reg->u32_max_value = U32_MAX;
8263 	} else {
8264 		dst_reg->u32_min_value += umin_val;
8265 		dst_reg->u32_max_value += umax_val;
8266 	}
8267 }
8268 
8269 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
8270 			       struct bpf_reg_state *src_reg)
8271 {
8272 	s64 smin_val = src_reg->smin_value;
8273 	s64 smax_val = src_reg->smax_value;
8274 	u64 umin_val = src_reg->umin_value;
8275 	u64 umax_val = src_reg->umax_value;
8276 
8277 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
8278 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
8279 		dst_reg->smin_value = S64_MIN;
8280 		dst_reg->smax_value = S64_MAX;
8281 	} else {
8282 		dst_reg->smin_value += smin_val;
8283 		dst_reg->smax_value += smax_val;
8284 	}
8285 	if (dst_reg->umin_value + umin_val < umin_val ||
8286 	    dst_reg->umax_value + umax_val < umax_val) {
8287 		dst_reg->umin_value = 0;
8288 		dst_reg->umax_value = U64_MAX;
8289 	} else {
8290 		dst_reg->umin_value += umin_val;
8291 		dst_reg->umax_value += umax_val;
8292 	}
8293 }
8294 
8295 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
8296 				 struct bpf_reg_state *src_reg)
8297 {
8298 	s32 smin_val = src_reg->s32_min_value;
8299 	s32 smax_val = src_reg->s32_max_value;
8300 	u32 umin_val = src_reg->u32_min_value;
8301 	u32 umax_val = src_reg->u32_max_value;
8302 
8303 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
8304 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
8305 		/* Overflow possible, we know nothing */
8306 		dst_reg->s32_min_value = S32_MIN;
8307 		dst_reg->s32_max_value = S32_MAX;
8308 	} else {
8309 		dst_reg->s32_min_value -= smax_val;
8310 		dst_reg->s32_max_value -= smin_val;
8311 	}
8312 	if (dst_reg->u32_min_value < umax_val) {
8313 		/* Overflow possible, we know nothing */
8314 		dst_reg->u32_min_value = 0;
8315 		dst_reg->u32_max_value = U32_MAX;
8316 	} else {
8317 		/* Cannot overflow (as long as bounds are consistent) */
8318 		dst_reg->u32_min_value -= umax_val;
8319 		dst_reg->u32_max_value -= umin_val;
8320 	}
8321 }
8322 
8323 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
8324 			       struct bpf_reg_state *src_reg)
8325 {
8326 	s64 smin_val = src_reg->smin_value;
8327 	s64 smax_val = src_reg->smax_value;
8328 	u64 umin_val = src_reg->umin_value;
8329 	u64 umax_val = src_reg->umax_value;
8330 
8331 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
8332 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
8333 		/* Overflow possible, we know nothing */
8334 		dst_reg->smin_value = S64_MIN;
8335 		dst_reg->smax_value = S64_MAX;
8336 	} else {
8337 		dst_reg->smin_value -= smax_val;
8338 		dst_reg->smax_value -= smin_val;
8339 	}
8340 	if (dst_reg->umin_value < umax_val) {
8341 		/* Overflow possible, we know nothing */
8342 		dst_reg->umin_value = 0;
8343 		dst_reg->umax_value = U64_MAX;
8344 	} else {
8345 		/* Cannot overflow (as long as bounds are consistent) */
8346 		dst_reg->umin_value -= umax_val;
8347 		dst_reg->umax_value -= umin_val;
8348 	}
8349 }
8350 
8351 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
8352 				 struct bpf_reg_state *src_reg)
8353 {
8354 	s32 smin_val = src_reg->s32_min_value;
8355 	u32 umin_val = src_reg->u32_min_value;
8356 	u32 umax_val = src_reg->u32_max_value;
8357 
8358 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
8359 		/* Ain't nobody got time to multiply that sign */
8360 		__mark_reg32_unbounded(dst_reg);
8361 		return;
8362 	}
8363 	/* Both values are positive, so we can work with unsigned and
8364 	 * copy the result to signed (unless it exceeds S32_MAX).
8365 	 */
8366 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
8367 		/* Potential overflow, we know nothing */
8368 		__mark_reg32_unbounded(dst_reg);
8369 		return;
8370 	}
8371 	dst_reg->u32_min_value *= umin_val;
8372 	dst_reg->u32_max_value *= umax_val;
8373 	if (dst_reg->u32_max_value > S32_MAX) {
8374 		/* Overflow possible, we know nothing */
8375 		dst_reg->s32_min_value = S32_MIN;
8376 		dst_reg->s32_max_value = S32_MAX;
8377 	} else {
8378 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8379 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8380 	}
8381 }
8382 
8383 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
8384 			       struct bpf_reg_state *src_reg)
8385 {
8386 	s64 smin_val = src_reg->smin_value;
8387 	u64 umin_val = src_reg->umin_value;
8388 	u64 umax_val = src_reg->umax_value;
8389 
8390 	if (smin_val < 0 || dst_reg->smin_value < 0) {
8391 		/* Ain't nobody got time to multiply that sign */
8392 		__mark_reg64_unbounded(dst_reg);
8393 		return;
8394 	}
8395 	/* Both values are positive, so we can work with unsigned and
8396 	 * copy the result to signed (unless it exceeds S64_MAX).
8397 	 */
8398 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
8399 		/* Potential overflow, we know nothing */
8400 		__mark_reg64_unbounded(dst_reg);
8401 		return;
8402 	}
8403 	dst_reg->umin_value *= umin_val;
8404 	dst_reg->umax_value *= umax_val;
8405 	if (dst_reg->umax_value > S64_MAX) {
8406 		/* Overflow possible, we know nothing */
8407 		dst_reg->smin_value = S64_MIN;
8408 		dst_reg->smax_value = S64_MAX;
8409 	} else {
8410 		dst_reg->smin_value = dst_reg->umin_value;
8411 		dst_reg->smax_value = dst_reg->umax_value;
8412 	}
8413 }
8414 
8415 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
8416 				 struct bpf_reg_state *src_reg)
8417 {
8418 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8419 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8420 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8421 	s32 smin_val = src_reg->s32_min_value;
8422 	u32 umax_val = src_reg->u32_max_value;
8423 
8424 	if (src_known && dst_known) {
8425 		__mark_reg32_known(dst_reg, var32_off.value);
8426 		return;
8427 	}
8428 
8429 	/* We get our minimum from the var_off, since that's inherently
8430 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
8431 	 */
8432 	dst_reg->u32_min_value = var32_off.value;
8433 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
8434 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8435 		/* Lose signed bounds when ANDing negative numbers,
8436 		 * ain't nobody got time for that.
8437 		 */
8438 		dst_reg->s32_min_value = S32_MIN;
8439 		dst_reg->s32_max_value = S32_MAX;
8440 	} else {
8441 		/* ANDing two positives gives a positive, so safe to
8442 		 * cast result into s64.
8443 		 */
8444 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8445 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8446 	}
8447 }
8448 
8449 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
8450 			       struct bpf_reg_state *src_reg)
8451 {
8452 	bool src_known = tnum_is_const(src_reg->var_off);
8453 	bool dst_known = tnum_is_const(dst_reg->var_off);
8454 	s64 smin_val = src_reg->smin_value;
8455 	u64 umax_val = src_reg->umax_value;
8456 
8457 	if (src_known && dst_known) {
8458 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8459 		return;
8460 	}
8461 
8462 	/* We get our minimum from the var_off, since that's inherently
8463 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
8464 	 */
8465 	dst_reg->umin_value = dst_reg->var_off.value;
8466 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
8467 	if (dst_reg->smin_value < 0 || smin_val < 0) {
8468 		/* Lose signed bounds when ANDing negative numbers,
8469 		 * ain't nobody got time for that.
8470 		 */
8471 		dst_reg->smin_value = S64_MIN;
8472 		dst_reg->smax_value = S64_MAX;
8473 	} else {
8474 		/* ANDing two positives gives a positive, so safe to
8475 		 * cast result into s64.
8476 		 */
8477 		dst_reg->smin_value = dst_reg->umin_value;
8478 		dst_reg->smax_value = dst_reg->umax_value;
8479 	}
8480 	/* We may learn something more from the var_off */
8481 	__update_reg_bounds(dst_reg);
8482 }
8483 
8484 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
8485 				struct bpf_reg_state *src_reg)
8486 {
8487 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8488 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8489 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8490 	s32 smin_val = src_reg->s32_min_value;
8491 	u32 umin_val = src_reg->u32_min_value;
8492 
8493 	if (src_known && dst_known) {
8494 		__mark_reg32_known(dst_reg, var32_off.value);
8495 		return;
8496 	}
8497 
8498 	/* We get our maximum from the var_off, and our minimum is the
8499 	 * maximum of the operands' minima
8500 	 */
8501 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
8502 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8503 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8504 		/* Lose signed bounds when ORing negative numbers,
8505 		 * ain't nobody got time for that.
8506 		 */
8507 		dst_reg->s32_min_value = S32_MIN;
8508 		dst_reg->s32_max_value = S32_MAX;
8509 	} else {
8510 		/* ORing two positives gives a positive, so safe to
8511 		 * cast result into s64.
8512 		 */
8513 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8514 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8515 	}
8516 }
8517 
8518 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
8519 			      struct bpf_reg_state *src_reg)
8520 {
8521 	bool src_known = tnum_is_const(src_reg->var_off);
8522 	bool dst_known = tnum_is_const(dst_reg->var_off);
8523 	s64 smin_val = src_reg->smin_value;
8524 	u64 umin_val = src_reg->umin_value;
8525 
8526 	if (src_known && dst_known) {
8527 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8528 		return;
8529 	}
8530 
8531 	/* We get our maximum from the var_off, and our minimum is the
8532 	 * maximum of the operands' minima
8533 	 */
8534 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
8535 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8536 	if (dst_reg->smin_value < 0 || smin_val < 0) {
8537 		/* Lose signed bounds when ORing negative numbers,
8538 		 * ain't nobody got time for that.
8539 		 */
8540 		dst_reg->smin_value = S64_MIN;
8541 		dst_reg->smax_value = S64_MAX;
8542 	} else {
8543 		/* ORing two positives gives a positive, so safe to
8544 		 * cast result into s64.
8545 		 */
8546 		dst_reg->smin_value = dst_reg->umin_value;
8547 		dst_reg->smax_value = dst_reg->umax_value;
8548 	}
8549 	/* We may learn something more from the var_off */
8550 	__update_reg_bounds(dst_reg);
8551 }
8552 
8553 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
8554 				 struct bpf_reg_state *src_reg)
8555 {
8556 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8557 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8558 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8559 	s32 smin_val = src_reg->s32_min_value;
8560 
8561 	if (src_known && dst_known) {
8562 		__mark_reg32_known(dst_reg, var32_off.value);
8563 		return;
8564 	}
8565 
8566 	/* We get both minimum and maximum from the var32_off. */
8567 	dst_reg->u32_min_value = var32_off.value;
8568 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8569 
8570 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
8571 		/* XORing two positive sign numbers gives a positive,
8572 		 * so safe to cast u32 result into s32.
8573 		 */
8574 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8575 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8576 	} else {
8577 		dst_reg->s32_min_value = S32_MIN;
8578 		dst_reg->s32_max_value = S32_MAX;
8579 	}
8580 }
8581 
8582 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
8583 			       struct bpf_reg_state *src_reg)
8584 {
8585 	bool src_known = tnum_is_const(src_reg->var_off);
8586 	bool dst_known = tnum_is_const(dst_reg->var_off);
8587 	s64 smin_val = src_reg->smin_value;
8588 
8589 	if (src_known && dst_known) {
8590 		/* dst_reg->var_off.value has been updated earlier */
8591 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8592 		return;
8593 	}
8594 
8595 	/* We get both minimum and maximum from the var_off. */
8596 	dst_reg->umin_value = dst_reg->var_off.value;
8597 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8598 
8599 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
8600 		/* XORing two positive sign numbers gives a positive,
8601 		 * so safe to cast u64 result into s64.
8602 		 */
8603 		dst_reg->smin_value = dst_reg->umin_value;
8604 		dst_reg->smax_value = dst_reg->umax_value;
8605 	} else {
8606 		dst_reg->smin_value = S64_MIN;
8607 		dst_reg->smax_value = S64_MAX;
8608 	}
8609 
8610 	__update_reg_bounds(dst_reg);
8611 }
8612 
8613 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8614 				   u64 umin_val, u64 umax_val)
8615 {
8616 	/* We lose all sign bit information (except what we can pick
8617 	 * up from var_off)
8618 	 */
8619 	dst_reg->s32_min_value = S32_MIN;
8620 	dst_reg->s32_max_value = S32_MAX;
8621 	/* If we might shift our top bit out, then we know nothing */
8622 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
8623 		dst_reg->u32_min_value = 0;
8624 		dst_reg->u32_max_value = U32_MAX;
8625 	} else {
8626 		dst_reg->u32_min_value <<= umin_val;
8627 		dst_reg->u32_max_value <<= umax_val;
8628 	}
8629 }
8630 
8631 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8632 				 struct bpf_reg_state *src_reg)
8633 {
8634 	u32 umax_val = src_reg->u32_max_value;
8635 	u32 umin_val = src_reg->u32_min_value;
8636 	/* u32 alu operation will zext upper bits */
8637 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8638 
8639 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8640 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
8641 	/* Not required but being careful mark reg64 bounds as unknown so
8642 	 * that we are forced to pick them up from tnum and zext later and
8643 	 * if some path skips this step we are still safe.
8644 	 */
8645 	__mark_reg64_unbounded(dst_reg);
8646 	__update_reg32_bounds(dst_reg);
8647 }
8648 
8649 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
8650 				   u64 umin_val, u64 umax_val)
8651 {
8652 	/* Special case <<32 because it is a common compiler pattern to sign
8653 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
8654 	 * positive we know this shift will also be positive so we can track
8655 	 * bounds correctly. Otherwise we lose all sign bit information except
8656 	 * what we can pick up from var_off. Perhaps we can generalize this
8657 	 * later to shifts of any length.
8658 	 */
8659 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
8660 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
8661 	else
8662 		dst_reg->smax_value = S64_MAX;
8663 
8664 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
8665 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
8666 	else
8667 		dst_reg->smin_value = S64_MIN;
8668 
8669 	/* If we might shift our top bit out, then we know nothing */
8670 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
8671 		dst_reg->umin_value = 0;
8672 		dst_reg->umax_value = U64_MAX;
8673 	} else {
8674 		dst_reg->umin_value <<= umin_val;
8675 		dst_reg->umax_value <<= umax_val;
8676 	}
8677 }
8678 
8679 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
8680 			       struct bpf_reg_state *src_reg)
8681 {
8682 	u64 umax_val = src_reg->umax_value;
8683 	u64 umin_val = src_reg->umin_value;
8684 
8685 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
8686 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
8687 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8688 
8689 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
8690 	/* We may learn something more from the var_off */
8691 	__update_reg_bounds(dst_reg);
8692 }
8693 
8694 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
8695 				 struct bpf_reg_state *src_reg)
8696 {
8697 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8698 	u32 umax_val = src_reg->u32_max_value;
8699 	u32 umin_val = src_reg->u32_min_value;
8700 
8701 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8702 	 * be negative, then either:
8703 	 * 1) src_reg might be zero, so the sign bit of the result is
8704 	 *    unknown, so we lose our signed bounds
8705 	 * 2) it's known negative, thus the unsigned bounds capture the
8706 	 *    signed bounds
8707 	 * 3) the signed bounds cross zero, so they tell us nothing
8708 	 *    about the result
8709 	 * If the value in dst_reg is known nonnegative, then again the
8710 	 * unsigned bounds capture the signed bounds.
8711 	 * Thus, in all cases it suffices to blow away our signed bounds
8712 	 * and rely on inferring new ones from the unsigned bounds and
8713 	 * var_off of the result.
8714 	 */
8715 	dst_reg->s32_min_value = S32_MIN;
8716 	dst_reg->s32_max_value = S32_MAX;
8717 
8718 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
8719 	dst_reg->u32_min_value >>= umax_val;
8720 	dst_reg->u32_max_value >>= umin_val;
8721 
8722 	__mark_reg64_unbounded(dst_reg);
8723 	__update_reg32_bounds(dst_reg);
8724 }
8725 
8726 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
8727 			       struct bpf_reg_state *src_reg)
8728 {
8729 	u64 umax_val = src_reg->umax_value;
8730 	u64 umin_val = src_reg->umin_value;
8731 
8732 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8733 	 * be negative, then either:
8734 	 * 1) src_reg might be zero, so the sign bit of the result is
8735 	 *    unknown, so we lose our signed bounds
8736 	 * 2) it's known negative, thus the unsigned bounds capture the
8737 	 *    signed bounds
8738 	 * 3) the signed bounds cross zero, so they tell us nothing
8739 	 *    about the result
8740 	 * If the value in dst_reg is known nonnegative, then again the
8741 	 * unsigned bounds capture the signed bounds.
8742 	 * Thus, in all cases it suffices to blow away our signed bounds
8743 	 * and rely on inferring new ones from the unsigned bounds and
8744 	 * var_off of the result.
8745 	 */
8746 	dst_reg->smin_value = S64_MIN;
8747 	dst_reg->smax_value = S64_MAX;
8748 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
8749 	dst_reg->umin_value >>= umax_val;
8750 	dst_reg->umax_value >>= umin_val;
8751 
8752 	/* Its not easy to operate on alu32 bounds here because it depends
8753 	 * on bits being shifted in. Take easy way out and mark unbounded
8754 	 * so we can recalculate later from tnum.
8755 	 */
8756 	__mark_reg32_unbounded(dst_reg);
8757 	__update_reg_bounds(dst_reg);
8758 }
8759 
8760 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
8761 				  struct bpf_reg_state *src_reg)
8762 {
8763 	u64 umin_val = src_reg->u32_min_value;
8764 
8765 	/* Upon reaching here, src_known is true and
8766 	 * umax_val is equal to umin_val.
8767 	 */
8768 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
8769 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
8770 
8771 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
8772 
8773 	/* blow away the dst_reg umin_value/umax_value and rely on
8774 	 * dst_reg var_off to refine the result.
8775 	 */
8776 	dst_reg->u32_min_value = 0;
8777 	dst_reg->u32_max_value = U32_MAX;
8778 
8779 	__mark_reg64_unbounded(dst_reg);
8780 	__update_reg32_bounds(dst_reg);
8781 }
8782 
8783 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
8784 				struct bpf_reg_state *src_reg)
8785 {
8786 	u64 umin_val = src_reg->umin_value;
8787 
8788 	/* Upon reaching here, src_known is true and umax_val is equal
8789 	 * to umin_val.
8790 	 */
8791 	dst_reg->smin_value >>= umin_val;
8792 	dst_reg->smax_value >>= umin_val;
8793 
8794 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
8795 
8796 	/* blow away the dst_reg umin_value/umax_value and rely on
8797 	 * dst_reg var_off to refine the result.
8798 	 */
8799 	dst_reg->umin_value = 0;
8800 	dst_reg->umax_value = U64_MAX;
8801 
8802 	/* Its not easy to operate on alu32 bounds here because it depends
8803 	 * on bits being shifted in from upper 32-bits. Take easy way out
8804 	 * and mark unbounded so we can recalculate later from tnum.
8805 	 */
8806 	__mark_reg32_unbounded(dst_reg);
8807 	__update_reg_bounds(dst_reg);
8808 }
8809 
8810 /* WARNING: This function does calculations on 64-bit values, but the actual
8811  * execution may occur on 32-bit values. Therefore, things like bitshifts
8812  * need extra checks in the 32-bit case.
8813  */
8814 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
8815 				      struct bpf_insn *insn,
8816 				      struct bpf_reg_state *dst_reg,
8817 				      struct bpf_reg_state src_reg)
8818 {
8819 	struct bpf_reg_state *regs = cur_regs(env);
8820 	u8 opcode = BPF_OP(insn->code);
8821 	bool src_known;
8822 	s64 smin_val, smax_val;
8823 	u64 umin_val, umax_val;
8824 	s32 s32_min_val, s32_max_val;
8825 	u32 u32_min_val, u32_max_val;
8826 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
8827 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
8828 	int ret;
8829 
8830 	smin_val = src_reg.smin_value;
8831 	smax_val = src_reg.smax_value;
8832 	umin_val = src_reg.umin_value;
8833 	umax_val = src_reg.umax_value;
8834 
8835 	s32_min_val = src_reg.s32_min_value;
8836 	s32_max_val = src_reg.s32_max_value;
8837 	u32_min_val = src_reg.u32_min_value;
8838 	u32_max_val = src_reg.u32_max_value;
8839 
8840 	if (alu32) {
8841 		src_known = tnum_subreg_is_const(src_reg.var_off);
8842 		if ((src_known &&
8843 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
8844 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
8845 			/* Taint dst register if offset had invalid bounds
8846 			 * derived from e.g. dead branches.
8847 			 */
8848 			__mark_reg_unknown(env, dst_reg);
8849 			return 0;
8850 		}
8851 	} else {
8852 		src_known = tnum_is_const(src_reg.var_off);
8853 		if ((src_known &&
8854 		     (smin_val != smax_val || umin_val != umax_val)) ||
8855 		    smin_val > smax_val || umin_val > umax_val) {
8856 			/* Taint dst register if offset had invalid bounds
8857 			 * derived from e.g. dead branches.
8858 			 */
8859 			__mark_reg_unknown(env, dst_reg);
8860 			return 0;
8861 		}
8862 	}
8863 
8864 	if (!src_known &&
8865 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
8866 		__mark_reg_unknown(env, dst_reg);
8867 		return 0;
8868 	}
8869 
8870 	if (sanitize_needed(opcode)) {
8871 		ret = sanitize_val_alu(env, insn);
8872 		if (ret < 0)
8873 			return sanitize_err(env, insn, ret, NULL, NULL);
8874 	}
8875 
8876 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
8877 	 * There are two classes of instructions: The first class we track both
8878 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
8879 	 * greatest amount of precision when alu operations are mixed with jmp32
8880 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
8881 	 * and BPF_OR. This is possible because these ops have fairly easy to
8882 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
8883 	 * See alu32 verifier tests for examples. The second class of
8884 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
8885 	 * with regards to tracking sign/unsigned bounds because the bits may
8886 	 * cross subreg boundaries in the alu64 case. When this happens we mark
8887 	 * the reg unbounded in the subreg bound space and use the resulting
8888 	 * tnum to calculate an approximation of the sign/unsigned bounds.
8889 	 */
8890 	switch (opcode) {
8891 	case BPF_ADD:
8892 		scalar32_min_max_add(dst_reg, &src_reg);
8893 		scalar_min_max_add(dst_reg, &src_reg);
8894 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
8895 		break;
8896 	case BPF_SUB:
8897 		scalar32_min_max_sub(dst_reg, &src_reg);
8898 		scalar_min_max_sub(dst_reg, &src_reg);
8899 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
8900 		break;
8901 	case BPF_MUL:
8902 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
8903 		scalar32_min_max_mul(dst_reg, &src_reg);
8904 		scalar_min_max_mul(dst_reg, &src_reg);
8905 		break;
8906 	case BPF_AND:
8907 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
8908 		scalar32_min_max_and(dst_reg, &src_reg);
8909 		scalar_min_max_and(dst_reg, &src_reg);
8910 		break;
8911 	case BPF_OR:
8912 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
8913 		scalar32_min_max_or(dst_reg, &src_reg);
8914 		scalar_min_max_or(dst_reg, &src_reg);
8915 		break;
8916 	case BPF_XOR:
8917 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
8918 		scalar32_min_max_xor(dst_reg, &src_reg);
8919 		scalar_min_max_xor(dst_reg, &src_reg);
8920 		break;
8921 	case BPF_LSH:
8922 		if (umax_val >= insn_bitness) {
8923 			/* Shifts greater than 31 or 63 are undefined.
8924 			 * This includes shifts by a negative number.
8925 			 */
8926 			mark_reg_unknown(env, regs, insn->dst_reg);
8927 			break;
8928 		}
8929 		if (alu32)
8930 			scalar32_min_max_lsh(dst_reg, &src_reg);
8931 		else
8932 			scalar_min_max_lsh(dst_reg, &src_reg);
8933 		break;
8934 	case BPF_RSH:
8935 		if (umax_val >= insn_bitness) {
8936 			/* Shifts greater than 31 or 63 are undefined.
8937 			 * This includes shifts by a negative number.
8938 			 */
8939 			mark_reg_unknown(env, regs, insn->dst_reg);
8940 			break;
8941 		}
8942 		if (alu32)
8943 			scalar32_min_max_rsh(dst_reg, &src_reg);
8944 		else
8945 			scalar_min_max_rsh(dst_reg, &src_reg);
8946 		break;
8947 	case BPF_ARSH:
8948 		if (umax_val >= insn_bitness) {
8949 			/* Shifts greater than 31 or 63 are undefined.
8950 			 * This includes shifts by a negative number.
8951 			 */
8952 			mark_reg_unknown(env, regs, insn->dst_reg);
8953 			break;
8954 		}
8955 		if (alu32)
8956 			scalar32_min_max_arsh(dst_reg, &src_reg);
8957 		else
8958 			scalar_min_max_arsh(dst_reg, &src_reg);
8959 		break;
8960 	default:
8961 		mark_reg_unknown(env, regs, insn->dst_reg);
8962 		break;
8963 	}
8964 
8965 	/* ALU32 ops are zero extended into 64bit register */
8966 	if (alu32)
8967 		zext_32_to_64(dst_reg);
8968 
8969 	__update_reg_bounds(dst_reg);
8970 	__reg_deduce_bounds(dst_reg);
8971 	__reg_bound_offset(dst_reg);
8972 	return 0;
8973 }
8974 
8975 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
8976  * and var_off.
8977  */
8978 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
8979 				   struct bpf_insn *insn)
8980 {
8981 	struct bpf_verifier_state *vstate = env->cur_state;
8982 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8983 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
8984 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
8985 	u8 opcode = BPF_OP(insn->code);
8986 	int err;
8987 
8988 	dst_reg = &regs[insn->dst_reg];
8989 	src_reg = NULL;
8990 	if (dst_reg->type != SCALAR_VALUE)
8991 		ptr_reg = dst_reg;
8992 	else
8993 		/* Make sure ID is cleared otherwise dst_reg min/max could be
8994 		 * incorrectly propagated into other registers by find_equal_scalars()
8995 		 */
8996 		dst_reg->id = 0;
8997 	if (BPF_SRC(insn->code) == BPF_X) {
8998 		src_reg = &regs[insn->src_reg];
8999 		if (src_reg->type != SCALAR_VALUE) {
9000 			if (dst_reg->type != SCALAR_VALUE) {
9001 				/* Combining two pointers by any ALU op yields
9002 				 * an arbitrary scalar. Disallow all math except
9003 				 * pointer subtraction
9004 				 */
9005 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
9006 					mark_reg_unknown(env, regs, insn->dst_reg);
9007 					return 0;
9008 				}
9009 				verbose(env, "R%d pointer %s pointer prohibited\n",
9010 					insn->dst_reg,
9011 					bpf_alu_string[opcode >> 4]);
9012 				return -EACCES;
9013 			} else {
9014 				/* scalar += pointer
9015 				 * This is legal, but we have to reverse our
9016 				 * src/dest handling in computing the range
9017 				 */
9018 				err = mark_chain_precision(env, insn->dst_reg);
9019 				if (err)
9020 					return err;
9021 				return adjust_ptr_min_max_vals(env, insn,
9022 							       src_reg, dst_reg);
9023 			}
9024 		} else if (ptr_reg) {
9025 			/* pointer += scalar */
9026 			err = mark_chain_precision(env, insn->src_reg);
9027 			if (err)
9028 				return err;
9029 			return adjust_ptr_min_max_vals(env, insn,
9030 						       dst_reg, src_reg);
9031 		}
9032 	} else {
9033 		/* Pretend the src is a reg with a known value, since we only
9034 		 * need to be able to read from this state.
9035 		 */
9036 		off_reg.type = SCALAR_VALUE;
9037 		__mark_reg_known(&off_reg, insn->imm);
9038 		src_reg = &off_reg;
9039 		if (ptr_reg) /* pointer += K */
9040 			return adjust_ptr_min_max_vals(env, insn,
9041 						       ptr_reg, src_reg);
9042 	}
9043 
9044 	/* Got here implies adding two SCALAR_VALUEs */
9045 	if (WARN_ON_ONCE(ptr_reg)) {
9046 		print_verifier_state(env, state, true);
9047 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
9048 		return -EINVAL;
9049 	}
9050 	if (WARN_ON(!src_reg)) {
9051 		print_verifier_state(env, state, true);
9052 		verbose(env, "verifier internal error: no src_reg\n");
9053 		return -EINVAL;
9054 	}
9055 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
9056 }
9057 
9058 /* check validity of 32-bit and 64-bit arithmetic operations */
9059 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
9060 {
9061 	struct bpf_reg_state *regs = cur_regs(env);
9062 	u8 opcode = BPF_OP(insn->code);
9063 	int err;
9064 
9065 	if (opcode == BPF_END || opcode == BPF_NEG) {
9066 		if (opcode == BPF_NEG) {
9067 			if (BPF_SRC(insn->code) != 0 ||
9068 			    insn->src_reg != BPF_REG_0 ||
9069 			    insn->off != 0 || insn->imm != 0) {
9070 				verbose(env, "BPF_NEG uses reserved fields\n");
9071 				return -EINVAL;
9072 			}
9073 		} else {
9074 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
9075 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
9076 			    BPF_CLASS(insn->code) == BPF_ALU64) {
9077 				verbose(env, "BPF_END uses reserved fields\n");
9078 				return -EINVAL;
9079 			}
9080 		}
9081 
9082 		/* check src operand */
9083 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9084 		if (err)
9085 			return err;
9086 
9087 		if (is_pointer_value(env, insn->dst_reg)) {
9088 			verbose(env, "R%d pointer arithmetic prohibited\n",
9089 				insn->dst_reg);
9090 			return -EACCES;
9091 		}
9092 
9093 		/* check dest operand */
9094 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
9095 		if (err)
9096 			return err;
9097 
9098 	} else if (opcode == BPF_MOV) {
9099 
9100 		if (BPF_SRC(insn->code) == BPF_X) {
9101 			if (insn->imm != 0 || insn->off != 0) {
9102 				verbose(env, "BPF_MOV uses reserved fields\n");
9103 				return -EINVAL;
9104 			}
9105 
9106 			/* check src operand */
9107 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
9108 			if (err)
9109 				return err;
9110 		} else {
9111 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
9112 				verbose(env, "BPF_MOV uses reserved fields\n");
9113 				return -EINVAL;
9114 			}
9115 		}
9116 
9117 		/* check dest operand, mark as required later */
9118 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
9119 		if (err)
9120 			return err;
9121 
9122 		if (BPF_SRC(insn->code) == BPF_X) {
9123 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
9124 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
9125 
9126 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
9127 				/* case: R1 = R2
9128 				 * copy register state to dest reg
9129 				 */
9130 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
9131 					/* Assign src and dst registers the same ID
9132 					 * that will be used by find_equal_scalars()
9133 					 * to propagate min/max range.
9134 					 */
9135 					src_reg->id = ++env->id_gen;
9136 				*dst_reg = *src_reg;
9137 				dst_reg->live |= REG_LIVE_WRITTEN;
9138 				dst_reg->subreg_def = DEF_NOT_SUBREG;
9139 			} else {
9140 				/* R1 = (u32) R2 */
9141 				if (is_pointer_value(env, insn->src_reg)) {
9142 					verbose(env,
9143 						"R%d partial copy of pointer\n",
9144 						insn->src_reg);
9145 					return -EACCES;
9146 				} else if (src_reg->type == SCALAR_VALUE) {
9147 					*dst_reg = *src_reg;
9148 					/* Make sure ID is cleared otherwise
9149 					 * dst_reg min/max could be incorrectly
9150 					 * propagated into src_reg by find_equal_scalars()
9151 					 */
9152 					dst_reg->id = 0;
9153 					dst_reg->live |= REG_LIVE_WRITTEN;
9154 					dst_reg->subreg_def = env->insn_idx + 1;
9155 				} else {
9156 					mark_reg_unknown(env, regs,
9157 							 insn->dst_reg);
9158 				}
9159 				zext_32_to_64(dst_reg);
9160 
9161 				__update_reg_bounds(dst_reg);
9162 				__reg_deduce_bounds(dst_reg);
9163 				__reg_bound_offset(dst_reg);
9164 			}
9165 		} else {
9166 			/* case: R = imm
9167 			 * remember the value we stored into this reg
9168 			 */
9169 			/* clear any state __mark_reg_known doesn't set */
9170 			mark_reg_unknown(env, regs, insn->dst_reg);
9171 			regs[insn->dst_reg].type = SCALAR_VALUE;
9172 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
9173 				__mark_reg_known(regs + insn->dst_reg,
9174 						 insn->imm);
9175 			} else {
9176 				__mark_reg_known(regs + insn->dst_reg,
9177 						 (u32)insn->imm);
9178 			}
9179 		}
9180 
9181 	} else if (opcode > BPF_END) {
9182 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
9183 		return -EINVAL;
9184 
9185 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
9186 
9187 		if (BPF_SRC(insn->code) == BPF_X) {
9188 			if (insn->imm != 0 || insn->off != 0) {
9189 				verbose(env, "BPF_ALU uses reserved fields\n");
9190 				return -EINVAL;
9191 			}
9192 			/* check src1 operand */
9193 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
9194 			if (err)
9195 				return err;
9196 		} else {
9197 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
9198 				verbose(env, "BPF_ALU uses reserved fields\n");
9199 				return -EINVAL;
9200 			}
9201 		}
9202 
9203 		/* check src2 operand */
9204 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9205 		if (err)
9206 			return err;
9207 
9208 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
9209 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
9210 			verbose(env, "div by zero\n");
9211 			return -EINVAL;
9212 		}
9213 
9214 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
9215 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
9216 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
9217 
9218 			if (insn->imm < 0 || insn->imm >= size) {
9219 				verbose(env, "invalid shift %d\n", insn->imm);
9220 				return -EINVAL;
9221 			}
9222 		}
9223 
9224 		/* check dest operand */
9225 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
9226 		if (err)
9227 			return err;
9228 
9229 		return adjust_reg_min_max_vals(env, insn);
9230 	}
9231 
9232 	return 0;
9233 }
9234 
9235 static void __find_good_pkt_pointers(struct bpf_func_state *state,
9236 				     struct bpf_reg_state *dst_reg,
9237 				     enum bpf_reg_type type, int new_range)
9238 {
9239 	struct bpf_reg_state *reg;
9240 	int i;
9241 
9242 	for (i = 0; i < MAX_BPF_REG; i++) {
9243 		reg = &state->regs[i];
9244 		if (reg->type == type && reg->id == dst_reg->id)
9245 			/* keep the maximum range already checked */
9246 			reg->range = max(reg->range, new_range);
9247 	}
9248 
9249 	bpf_for_each_spilled_reg(i, state, reg) {
9250 		if (!reg)
9251 			continue;
9252 		if (reg->type == type && reg->id == dst_reg->id)
9253 			reg->range = max(reg->range, new_range);
9254 	}
9255 }
9256 
9257 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
9258 				   struct bpf_reg_state *dst_reg,
9259 				   enum bpf_reg_type type,
9260 				   bool range_right_open)
9261 {
9262 	int new_range, i;
9263 
9264 	if (dst_reg->off < 0 ||
9265 	    (dst_reg->off == 0 && range_right_open))
9266 		/* This doesn't give us any range */
9267 		return;
9268 
9269 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
9270 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
9271 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
9272 		 * than pkt_end, but that's because it's also less than pkt.
9273 		 */
9274 		return;
9275 
9276 	new_range = dst_reg->off;
9277 	if (range_right_open)
9278 		new_range++;
9279 
9280 	/* Examples for register markings:
9281 	 *
9282 	 * pkt_data in dst register:
9283 	 *
9284 	 *   r2 = r3;
9285 	 *   r2 += 8;
9286 	 *   if (r2 > pkt_end) goto <handle exception>
9287 	 *   <access okay>
9288 	 *
9289 	 *   r2 = r3;
9290 	 *   r2 += 8;
9291 	 *   if (r2 < pkt_end) goto <access okay>
9292 	 *   <handle exception>
9293 	 *
9294 	 *   Where:
9295 	 *     r2 == dst_reg, pkt_end == src_reg
9296 	 *     r2=pkt(id=n,off=8,r=0)
9297 	 *     r3=pkt(id=n,off=0,r=0)
9298 	 *
9299 	 * pkt_data in src register:
9300 	 *
9301 	 *   r2 = r3;
9302 	 *   r2 += 8;
9303 	 *   if (pkt_end >= r2) goto <access okay>
9304 	 *   <handle exception>
9305 	 *
9306 	 *   r2 = r3;
9307 	 *   r2 += 8;
9308 	 *   if (pkt_end <= r2) goto <handle exception>
9309 	 *   <access okay>
9310 	 *
9311 	 *   Where:
9312 	 *     pkt_end == dst_reg, r2 == src_reg
9313 	 *     r2=pkt(id=n,off=8,r=0)
9314 	 *     r3=pkt(id=n,off=0,r=0)
9315 	 *
9316 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
9317 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
9318 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
9319 	 * the check.
9320 	 */
9321 
9322 	/* If our ids match, then we must have the same max_value.  And we
9323 	 * don't care about the other reg's fixed offset, since if it's too big
9324 	 * the range won't allow anything.
9325 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
9326 	 */
9327 	for (i = 0; i <= vstate->curframe; i++)
9328 		__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
9329 					 new_range);
9330 }
9331 
9332 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
9333 {
9334 	struct tnum subreg = tnum_subreg(reg->var_off);
9335 	s32 sval = (s32)val;
9336 
9337 	switch (opcode) {
9338 	case BPF_JEQ:
9339 		if (tnum_is_const(subreg))
9340 			return !!tnum_equals_const(subreg, val);
9341 		break;
9342 	case BPF_JNE:
9343 		if (tnum_is_const(subreg))
9344 			return !tnum_equals_const(subreg, val);
9345 		break;
9346 	case BPF_JSET:
9347 		if ((~subreg.mask & subreg.value) & val)
9348 			return 1;
9349 		if (!((subreg.mask | subreg.value) & val))
9350 			return 0;
9351 		break;
9352 	case BPF_JGT:
9353 		if (reg->u32_min_value > val)
9354 			return 1;
9355 		else if (reg->u32_max_value <= val)
9356 			return 0;
9357 		break;
9358 	case BPF_JSGT:
9359 		if (reg->s32_min_value > sval)
9360 			return 1;
9361 		else if (reg->s32_max_value <= sval)
9362 			return 0;
9363 		break;
9364 	case BPF_JLT:
9365 		if (reg->u32_max_value < val)
9366 			return 1;
9367 		else if (reg->u32_min_value >= val)
9368 			return 0;
9369 		break;
9370 	case BPF_JSLT:
9371 		if (reg->s32_max_value < sval)
9372 			return 1;
9373 		else if (reg->s32_min_value >= sval)
9374 			return 0;
9375 		break;
9376 	case BPF_JGE:
9377 		if (reg->u32_min_value >= val)
9378 			return 1;
9379 		else if (reg->u32_max_value < val)
9380 			return 0;
9381 		break;
9382 	case BPF_JSGE:
9383 		if (reg->s32_min_value >= sval)
9384 			return 1;
9385 		else if (reg->s32_max_value < sval)
9386 			return 0;
9387 		break;
9388 	case BPF_JLE:
9389 		if (reg->u32_max_value <= val)
9390 			return 1;
9391 		else if (reg->u32_min_value > val)
9392 			return 0;
9393 		break;
9394 	case BPF_JSLE:
9395 		if (reg->s32_max_value <= sval)
9396 			return 1;
9397 		else if (reg->s32_min_value > sval)
9398 			return 0;
9399 		break;
9400 	}
9401 
9402 	return -1;
9403 }
9404 
9405 
9406 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
9407 {
9408 	s64 sval = (s64)val;
9409 
9410 	switch (opcode) {
9411 	case BPF_JEQ:
9412 		if (tnum_is_const(reg->var_off))
9413 			return !!tnum_equals_const(reg->var_off, val);
9414 		break;
9415 	case BPF_JNE:
9416 		if (tnum_is_const(reg->var_off))
9417 			return !tnum_equals_const(reg->var_off, val);
9418 		break;
9419 	case BPF_JSET:
9420 		if ((~reg->var_off.mask & reg->var_off.value) & val)
9421 			return 1;
9422 		if (!((reg->var_off.mask | reg->var_off.value) & val))
9423 			return 0;
9424 		break;
9425 	case BPF_JGT:
9426 		if (reg->umin_value > val)
9427 			return 1;
9428 		else if (reg->umax_value <= val)
9429 			return 0;
9430 		break;
9431 	case BPF_JSGT:
9432 		if (reg->smin_value > sval)
9433 			return 1;
9434 		else if (reg->smax_value <= sval)
9435 			return 0;
9436 		break;
9437 	case BPF_JLT:
9438 		if (reg->umax_value < val)
9439 			return 1;
9440 		else if (reg->umin_value >= val)
9441 			return 0;
9442 		break;
9443 	case BPF_JSLT:
9444 		if (reg->smax_value < sval)
9445 			return 1;
9446 		else if (reg->smin_value >= sval)
9447 			return 0;
9448 		break;
9449 	case BPF_JGE:
9450 		if (reg->umin_value >= val)
9451 			return 1;
9452 		else if (reg->umax_value < val)
9453 			return 0;
9454 		break;
9455 	case BPF_JSGE:
9456 		if (reg->smin_value >= sval)
9457 			return 1;
9458 		else if (reg->smax_value < sval)
9459 			return 0;
9460 		break;
9461 	case BPF_JLE:
9462 		if (reg->umax_value <= val)
9463 			return 1;
9464 		else if (reg->umin_value > val)
9465 			return 0;
9466 		break;
9467 	case BPF_JSLE:
9468 		if (reg->smax_value <= sval)
9469 			return 1;
9470 		else if (reg->smin_value > sval)
9471 			return 0;
9472 		break;
9473 	}
9474 
9475 	return -1;
9476 }
9477 
9478 /* compute branch direction of the expression "if (reg opcode val) goto target;"
9479  * and return:
9480  *  1 - branch will be taken and "goto target" will be executed
9481  *  0 - branch will not be taken and fall-through to next insn
9482  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
9483  *      range [0,10]
9484  */
9485 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
9486 			   bool is_jmp32)
9487 {
9488 	if (__is_pointer_value(false, reg)) {
9489 		if (!reg_type_not_null(reg->type))
9490 			return -1;
9491 
9492 		/* If pointer is valid tests against zero will fail so we can
9493 		 * use this to direct branch taken.
9494 		 */
9495 		if (val != 0)
9496 			return -1;
9497 
9498 		switch (opcode) {
9499 		case BPF_JEQ:
9500 			return 0;
9501 		case BPF_JNE:
9502 			return 1;
9503 		default:
9504 			return -1;
9505 		}
9506 	}
9507 
9508 	if (is_jmp32)
9509 		return is_branch32_taken(reg, val, opcode);
9510 	return is_branch64_taken(reg, val, opcode);
9511 }
9512 
9513 static int flip_opcode(u32 opcode)
9514 {
9515 	/* How can we transform "a <op> b" into "b <op> a"? */
9516 	static const u8 opcode_flip[16] = {
9517 		/* these stay the same */
9518 		[BPF_JEQ  >> 4] = BPF_JEQ,
9519 		[BPF_JNE  >> 4] = BPF_JNE,
9520 		[BPF_JSET >> 4] = BPF_JSET,
9521 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
9522 		[BPF_JGE  >> 4] = BPF_JLE,
9523 		[BPF_JGT  >> 4] = BPF_JLT,
9524 		[BPF_JLE  >> 4] = BPF_JGE,
9525 		[BPF_JLT  >> 4] = BPF_JGT,
9526 		[BPF_JSGE >> 4] = BPF_JSLE,
9527 		[BPF_JSGT >> 4] = BPF_JSLT,
9528 		[BPF_JSLE >> 4] = BPF_JSGE,
9529 		[BPF_JSLT >> 4] = BPF_JSGT
9530 	};
9531 	return opcode_flip[opcode >> 4];
9532 }
9533 
9534 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
9535 				   struct bpf_reg_state *src_reg,
9536 				   u8 opcode)
9537 {
9538 	struct bpf_reg_state *pkt;
9539 
9540 	if (src_reg->type == PTR_TO_PACKET_END) {
9541 		pkt = dst_reg;
9542 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
9543 		pkt = src_reg;
9544 		opcode = flip_opcode(opcode);
9545 	} else {
9546 		return -1;
9547 	}
9548 
9549 	if (pkt->range >= 0)
9550 		return -1;
9551 
9552 	switch (opcode) {
9553 	case BPF_JLE:
9554 		/* pkt <= pkt_end */
9555 		fallthrough;
9556 	case BPF_JGT:
9557 		/* pkt > pkt_end */
9558 		if (pkt->range == BEYOND_PKT_END)
9559 			/* pkt has at last one extra byte beyond pkt_end */
9560 			return opcode == BPF_JGT;
9561 		break;
9562 	case BPF_JLT:
9563 		/* pkt < pkt_end */
9564 		fallthrough;
9565 	case BPF_JGE:
9566 		/* pkt >= pkt_end */
9567 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
9568 			return opcode == BPF_JGE;
9569 		break;
9570 	}
9571 	return -1;
9572 }
9573 
9574 /* Adjusts the register min/max values in the case that the dst_reg is the
9575  * variable register that we are working on, and src_reg is a constant or we're
9576  * simply doing a BPF_K check.
9577  * In JEQ/JNE cases we also adjust the var_off values.
9578  */
9579 static void reg_set_min_max(struct bpf_reg_state *true_reg,
9580 			    struct bpf_reg_state *false_reg,
9581 			    u64 val, u32 val32,
9582 			    u8 opcode, bool is_jmp32)
9583 {
9584 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
9585 	struct tnum false_64off = false_reg->var_off;
9586 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
9587 	struct tnum true_64off = true_reg->var_off;
9588 	s64 sval = (s64)val;
9589 	s32 sval32 = (s32)val32;
9590 
9591 	/* If the dst_reg is a pointer, we can't learn anything about its
9592 	 * variable offset from the compare (unless src_reg were a pointer into
9593 	 * the same object, but we don't bother with that.
9594 	 * Since false_reg and true_reg have the same type by construction, we
9595 	 * only need to check one of them for pointerness.
9596 	 */
9597 	if (__is_pointer_value(false, false_reg))
9598 		return;
9599 
9600 	switch (opcode) {
9601 	case BPF_JEQ:
9602 	case BPF_JNE:
9603 	{
9604 		struct bpf_reg_state *reg =
9605 			opcode == BPF_JEQ ? true_reg : false_reg;
9606 
9607 		/* JEQ/JNE comparison doesn't change the register equivalence.
9608 		 * r1 = r2;
9609 		 * if (r1 == 42) goto label;
9610 		 * ...
9611 		 * label: // here both r1 and r2 are known to be 42.
9612 		 *
9613 		 * Hence when marking register as known preserve it's ID.
9614 		 */
9615 		if (is_jmp32)
9616 			__mark_reg32_known(reg, val32);
9617 		else
9618 			___mark_reg_known(reg, val);
9619 		break;
9620 	}
9621 	case BPF_JSET:
9622 		if (is_jmp32) {
9623 			false_32off = tnum_and(false_32off, tnum_const(~val32));
9624 			if (is_power_of_2(val32))
9625 				true_32off = tnum_or(true_32off,
9626 						     tnum_const(val32));
9627 		} else {
9628 			false_64off = tnum_and(false_64off, tnum_const(~val));
9629 			if (is_power_of_2(val))
9630 				true_64off = tnum_or(true_64off,
9631 						     tnum_const(val));
9632 		}
9633 		break;
9634 	case BPF_JGE:
9635 	case BPF_JGT:
9636 	{
9637 		if (is_jmp32) {
9638 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
9639 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
9640 
9641 			false_reg->u32_max_value = min(false_reg->u32_max_value,
9642 						       false_umax);
9643 			true_reg->u32_min_value = max(true_reg->u32_min_value,
9644 						      true_umin);
9645 		} else {
9646 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
9647 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
9648 
9649 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
9650 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
9651 		}
9652 		break;
9653 	}
9654 	case BPF_JSGE:
9655 	case BPF_JSGT:
9656 	{
9657 		if (is_jmp32) {
9658 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
9659 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
9660 
9661 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
9662 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
9663 		} else {
9664 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
9665 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
9666 
9667 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
9668 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
9669 		}
9670 		break;
9671 	}
9672 	case BPF_JLE:
9673 	case BPF_JLT:
9674 	{
9675 		if (is_jmp32) {
9676 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
9677 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
9678 
9679 			false_reg->u32_min_value = max(false_reg->u32_min_value,
9680 						       false_umin);
9681 			true_reg->u32_max_value = min(true_reg->u32_max_value,
9682 						      true_umax);
9683 		} else {
9684 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
9685 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
9686 
9687 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
9688 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
9689 		}
9690 		break;
9691 	}
9692 	case BPF_JSLE:
9693 	case BPF_JSLT:
9694 	{
9695 		if (is_jmp32) {
9696 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
9697 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
9698 
9699 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
9700 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
9701 		} else {
9702 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
9703 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
9704 
9705 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
9706 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
9707 		}
9708 		break;
9709 	}
9710 	default:
9711 		return;
9712 	}
9713 
9714 	if (is_jmp32) {
9715 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
9716 					     tnum_subreg(false_32off));
9717 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
9718 					    tnum_subreg(true_32off));
9719 		__reg_combine_32_into_64(false_reg);
9720 		__reg_combine_32_into_64(true_reg);
9721 	} else {
9722 		false_reg->var_off = false_64off;
9723 		true_reg->var_off = true_64off;
9724 		__reg_combine_64_into_32(false_reg);
9725 		__reg_combine_64_into_32(true_reg);
9726 	}
9727 }
9728 
9729 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
9730  * the variable reg.
9731  */
9732 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
9733 				struct bpf_reg_state *false_reg,
9734 				u64 val, u32 val32,
9735 				u8 opcode, bool is_jmp32)
9736 {
9737 	opcode = flip_opcode(opcode);
9738 	/* This uses zero as "not present in table"; luckily the zero opcode,
9739 	 * BPF_JA, can't get here.
9740 	 */
9741 	if (opcode)
9742 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
9743 }
9744 
9745 /* Regs are known to be equal, so intersect their min/max/var_off */
9746 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
9747 				  struct bpf_reg_state *dst_reg)
9748 {
9749 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
9750 							dst_reg->umin_value);
9751 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
9752 							dst_reg->umax_value);
9753 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
9754 							dst_reg->smin_value);
9755 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
9756 							dst_reg->smax_value);
9757 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
9758 							     dst_reg->var_off);
9759 	/* We might have learned new bounds from the var_off. */
9760 	__update_reg_bounds(src_reg);
9761 	__update_reg_bounds(dst_reg);
9762 	/* We might have learned something about the sign bit. */
9763 	__reg_deduce_bounds(src_reg);
9764 	__reg_deduce_bounds(dst_reg);
9765 	/* We might have learned some bits from the bounds. */
9766 	__reg_bound_offset(src_reg);
9767 	__reg_bound_offset(dst_reg);
9768 	/* Intersecting with the old var_off might have improved our bounds
9769 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
9770 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
9771 	 */
9772 	__update_reg_bounds(src_reg);
9773 	__update_reg_bounds(dst_reg);
9774 }
9775 
9776 static void reg_combine_min_max(struct bpf_reg_state *true_src,
9777 				struct bpf_reg_state *true_dst,
9778 				struct bpf_reg_state *false_src,
9779 				struct bpf_reg_state *false_dst,
9780 				u8 opcode)
9781 {
9782 	switch (opcode) {
9783 	case BPF_JEQ:
9784 		__reg_combine_min_max(true_src, true_dst);
9785 		break;
9786 	case BPF_JNE:
9787 		__reg_combine_min_max(false_src, false_dst);
9788 		break;
9789 	}
9790 }
9791 
9792 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
9793 				 struct bpf_reg_state *reg, u32 id,
9794 				 bool is_null)
9795 {
9796 	if (type_may_be_null(reg->type) && reg->id == id &&
9797 	    !WARN_ON_ONCE(!reg->id)) {
9798 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
9799 				 !tnum_equals_const(reg->var_off, 0) ||
9800 				 reg->off)) {
9801 			/* Old offset (both fixed and variable parts) should
9802 			 * have been known-zero, because we don't allow pointer
9803 			 * arithmetic on pointers that might be NULL. If we
9804 			 * see this happening, don't convert the register.
9805 			 */
9806 			return;
9807 		}
9808 		if (is_null) {
9809 			reg->type = SCALAR_VALUE;
9810 			/* We don't need id and ref_obj_id from this point
9811 			 * onwards anymore, thus we should better reset it,
9812 			 * so that state pruning has chances to take effect.
9813 			 */
9814 			reg->id = 0;
9815 			reg->ref_obj_id = 0;
9816 
9817 			return;
9818 		}
9819 
9820 		mark_ptr_not_null_reg(reg);
9821 
9822 		if (!reg_may_point_to_spin_lock(reg)) {
9823 			/* For not-NULL ptr, reg->ref_obj_id will be reset
9824 			 * in release_reg_references().
9825 			 *
9826 			 * reg->id is still used by spin_lock ptr. Other
9827 			 * than spin_lock ptr type, reg->id can be reset.
9828 			 */
9829 			reg->id = 0;
9830 		}
9831 	}
9832 }
9833 
9834 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
9835 				    bool is_null)
9836 {
9837 	struct bpf_reg_state *reg;
9838 	int i;
9839 
9840 	for (i = 0; i < MAX_BPF_REG; i++)
9841 		mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
9842 
9843 	bpf_for_each_spilled_reg(i, state, reg) {
9844 		if (!reg)
9845 			continue;
9846 		mark_ptr_or_null_reg(state, reg, id, is_null);
9847 	}
9848 }
9849 
9850 /* The logic is similar to find_good_pkt_pointers(), both could eventually
9851  * be folded together at some point.
9852  */
9853 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9854 				  bool is_null)
9855 {
9856 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9857 	struct bpf_reg_state *regs = state->regs;
9858 	u32 ref_obj_id = regs[regno].ref_obj_id;
9859 	u32 id = regs[regno].id;
9860 	int i;
9861 
9862 	if (ref_obj_id && ref_obj_id == id && is_null)
9863 		/* regs[regno] is in the " == NULL" branch.
9864 		 * No one could have freed the reference state before
9865 		 * doing the NULL check.
9866 		 */
9867 		WARN_ON_ONCE(release_reference_state(state, id));
9868 
9869 	for (i = 0; i <= vstate->curframe; i++)
9870 		__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
9871 }
9872 
9873 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
9874 				   struct bpf_reg_state *dst_reg,
9875 				   struct bpf_reg_state *src_reg,
9876 				   struct bpf_verifier_state *this_branch,
9877 				   struct bpf_verifier_state *other_branch)
9878 {
9879 	if (BPF_SRC(insn->code) != BPF_X)
9880 		return false;
9881 
9882 	/* Pointers are always 64-bit. */
9883 	if (BPF_CLASS(insn->code) == BPF_JMP32)
9884 		return false;
9885 
9886 	switch (BPF_OP(insn->code)) {
9887 	case BPF_JGT:
9888 		if ((dst_reg->type == PTR_TO_PACKET &&
9889 		     src_reg->type == PTR_TO_PACKET_END) ||
9890 		    (dst_reg->type == PTR_TO_PACKET_META &&
9891 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9892 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
9893 			find_good_pkt_pointers(this_branch, dst_reg,
9894 					       dst_reg->type, false);
9895 			mark_pkt_end(other_branch, insn->dst_reg, true);
9896 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9897 			    src_reg->type == PTR_TO_PACKET) ||
9898 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9899 			    src_reg->type == PTR_TO_PACKET_META)) {
9900 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
9901 			find_good_pkt_pointers(other_branch, src_reg,
9902 					       src_reg->type, true);
9903 			mark_pkt_end(this_branch, insn->src_reg, false);
9904 		} else {
9905 			return false;
9906 		}
9907 		break;
9908 	case BPF_JLT:
9909 		if ((dst_reg->type == PTR_TO_PACKET &&
9910 		     src_reg->type == PTR_TO_PACKET_END) ||
9911 		    (dst_reg->type == PTR_TO_PACKET_META &&
9912 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9913 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
9914 			find_good_pkt_pointers(other_branch, dst_reg,
9915 					       dst_reg->type, true);
9916 			mark_pkt_end(this_branch, insn->dst_reg, false);
9917 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9918 			    src_reg->type == PTR_TO_PACKET) ||
9919 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9920 			    src_reg->type == PTR_TO_PACKET_META)) {
9921 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
9922 			find_good_pkt_pointers(this_branch, src_reg,
9923 					       src_reg->type, false);
9924 			mark_pkt_end(other_branch, insn->src_reg, true);
9925 		} else {
9926 			return false;
9927 		}
9928 		break;
9929 	case BPF_JGE:
9930 		if ((dst_reg->type == PTR_TO_PACKET &&
9931 		     src_reg->type == PTR_TO_PACKET_END) ||
9932 		    (dst_reg->type == PTR_TO_PACKET_META &&
9933 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9934 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
9935 			find_good_pkt_pointers(this_branch, dst_reg,
9936 					       dst_reg->type, true);
9937 			mark_pkt_end(other_branch, insn->dst_reg, false);
9938 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9939 			    src_reg->type == PTR_TO_PACKET) ||
9940 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9941 			    src_reg->type == PTR_TO_PACKET_META)) {
9942 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
9943 			find_good_pkt_pointers(other_branch, src_reg,
9944 					       src_reg->type, false);
9945 			mark_pkt_end(this_branch, insn->src_reg, true);
9946 		} else {
9947 			return false;
9948 		}
9949 		break;
9950 	case BPF_JLE:
9951 		if ((dst_reg->type == PTR_TO_PACKET &&
9952 		     src_reg->type == PTR_TO_PACKET_END) ||
9953 		    (dst_reg->type == PTR_TO_PACKET_META &&
9954 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9955 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
9956 			find_good_pkt_pointers(other_branch, dst_reg,
9957 					       dst_reg->type, false);
9958 			mark_pkt_end(this_branch, insn->dst_reg, true);
9959 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9960 			    src_reg->type == PTR_TO_PACKET) ||
9961 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9962 			    src_reg->type == PTR_TO_PACKET_META)) {
9963 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
9964 			find_good_pkt_pointers(this_branch, src_reg,
9965 					       src_reg->type, true);
9966 			mark_pkt_end(other_branch, insn->src_reg, false);
9967 		} else {
9968 			return false;
9969 		}
9970 		break;
9971 	default:
9972 		return false;
9973 	}
9974 
9975 	return true;
9976 }
9977 
9978 static void find_equal_scalars(struct bpf_verifier_state *vstate,
9979 			       struct bpf_reg_state *known_reg)
9980 {
9981 	struct bpf_func_state *state;
9982 	struct bpf_reg_state *reg;
9983 	int i, j;
9984 
9985 	for (i = 0; i <= vstate->curframe; i++) {
9986 		state = vstate->frame[i];
9987 		for (j = 0; j < MAX_BPF_REG; j++) {
9988 			reg = &state->regs[j];
9989 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9990 				*reg = *known_reg;
9991 		}
9992 
9993 		bpf_for_each_spilled_reg(j, state, reg) {
9994 			if (!reg)
9995 				continue;
9996 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9997 				*reg = *known_reg;
9998 		}
9999 	}
10000 }
10001 
10002 static int check_cond_jmp_op(struct bpf_verifier_env *env,
10003 			     struct bpf_insn *insn, int *insn_idx)
10004 {
10005 	struct bpf_verifier_state *this_branch = env->cur_state;
10006 	struct bpf_verifier_state *other_branch;
10007 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
10008 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
10009 	u8 opcode = BPF_OP(insn->code);
10010 	bool is_jmp32;
10011 	int pred = -1;
10012 	int err;
10013 
10014 	/* Only conditional jumps are expected to reach here. */
10015 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
10016 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
10017 		return -EINVAL;
10018 	}
10019 
10020 	if (BPF_SRC(insn->code) == BPF_X) {
10021 		if (insn->imm != 0) {
10022 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
10023 			return -EINVAL;
10024 		}
10025 
10026 		/* check src1 operand */
10027 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
10028 		if (err)
10029 			return err;
10030 
10031 		if (is_pointer_value(env, insn->src_reg)) {
10032 			verbose(env, "R%d pointer comparison prohibited\n",
10033 				insn->src_reg);
10034 			return -EACCES;
10035 		}
10036 		src_reg = &regs[insn->src_reg];
10037 	} else {
10038 		if (insn->src_reg != BPF_REG_0) {
10039 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
10040 			return -EINVAL;
10041 		}
10042 	}
10043 
10044 	/* check src2 operand */
10045 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
10046 	if (err)
10047 		return err;
10048 
10049 	dst_reg = &regs[insn->dst_reg];
10050 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
10051 
10052 	if (BPF_SRC(insn->code) == BPF_K) {
10053 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
10054 	} else if (src_reg->type == SCALAR_VALUE &&
10055 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
10056 		pred = is_branch_taken(dst_reg,
10057 				       tnum_subreg(src_reg->var_off).value,
10058 				       opcode,
10059 				       is_jmp32);
10060 	} else if (src_reg->type == SCALAR_VALUE &&
10061 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
10062 		pred = is_branch_taken(dst_reg,
10063 				       src_reg->var_off.value,
10064 				       opcode,
10065 				       is_jmp32);
10066 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
10067 		   reg_is_pkt_pointer_any(src_reg) &&
10068 		   !is_jmp32) {
10069 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
10070 	}
10071 
10072 	if (pred >= 0) {
10073 		/* If we get here with a dst_reg pointer type it is because
10074 		 * above is_branch_taken() special cased the 0 comparison.
10075 		 */
10076 		if (!__is_pointer_value(false, dst_reg))
10077 			err = mark_chain_precision(env, insn->dst_reg);
10078 		if (BPF_SRC(insn->code) == BPF_X && !err &&
10079 		    !__is_pointer_value(false, src_reg))
10080 			err = mark_chain_precision(env, insn->src_reg);
10081 		if (err)
10082 			return err;
10083 	}
10084 
10085 	if (pred == 1) {
10086 		/* Only follow the goto, ignore fall-through. If needed, push
10087 		 * the fall-through branch for simulation under speculative
10088 		 * execution.
10089 		 */
10090 		if (!env->bypass_spec_v1 &&
10091 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
10092 					       *insn_idx))
10093 			return -EFAULT;
10094 		*insn_idx += insn->off;
10095 		return 0;
10096 	} else if (pred == 0) {
10097 		/* Only follow the fall-through branch, since that's where the
10098 		 * program will go. If needed, push the goto branch for
10099 		 * simulation under speculative execution.
10100 		 */
10101 		if (!env->bypass_spec_v1 &&
10102 		    !sanitize_speculative_path(env, insn,
10103 					       *insn_idx + insn->off + 1,
10104 					       *insn_idx))
10105 			return -EFAULT;
10106 		return 0;
10107 	}
10108 
10109 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
10110 				  false);
10111 	if (!other_branch)
10112 		return -EFAULT;
10113 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
10114 
10115 	/* detect if we are comparing against a constant value so we can adjust
10116 	 * our min/max values for our dst register.
10117 	 * this is only legit if both are scalars (or pointers to the same
10118 	 * object, I suppose, but we don't support that right now), because
10119 	 * otherwise the different base pointers mean the offsets aren't
10120 	 * comparable.
10121 	 */
10122 	if (BPF_SRC(insn->code) == BPF_X) {
10123 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
10124 
10125 		if (dst_reg->type == SCALAR_VALUE &&
10126 		    src_reg->type == SCALAR_VALUE) {
10127 			if (tnum_is_const(src_reg->var_off) ||
10128 			    (is_jmp32 &&
10129 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
10130 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
10131 						dst_reg,
10132 						src_reg->var_off.value,
10133 						tnum_subreg(src_reg->var_off).value,
10134 						opcode, is_jmp32);
10135 			else if (tnum_is_const(dst_reg->var_off) ||
10136 				 (is_jmp32 &&
10137 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
10138 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
10139 						    src_reg,
10140 						    dst_reg->var_off.value,
10141 						    tnum_subreg(dst_reg->var_off).value,
10142 						    opcode, is_jmp32);
10143 			else if (!is_jmp32 &&
10144 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
10145 				/* Comparing for equality, we can combine knowledge */
10146 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
10147 						    &other_branch_regs[insn->dst_reg],
10148 						    src_reg, dst_reg, opcode);
10149 			if (src_reg->id &&
10150 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
10151 				find_equal_scalars(this_branch, src_reg);
10152 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
10153 			}
10154 
10155 		}
10156 	} else if (dst_reg->type == SCALAR_VALUE) {
10157 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
10158 					dst_reg, insn->imm, (u32)insn->imm,
10159 					opcode, is_jmp32);
10160 	}
10161 
10162 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
10163 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
10164 		find_equal_scalars(this_branch, dst_reg);
10165 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
10166 	}
10167 
10168 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
10169 	 * NOTE: these optimizations below are related with pointer comparison
10170 	 *       which will never be JMP32.
10171 	 */
10172 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
10173 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
10174 	    type_may_be_null(dst_reg->type)) {
10175 		/* Mark all identical registers in each branch as either
10176 		 * safe or unknown depending R == 0 or R != 0 conditional.
10177 		 */
10178 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
10179 				      opcode == BPF_JNE);
10180 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
10181 				      opcode == BPF_JEQ);
10182 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
10183 					   this_branch, other_branch) &&
10184 		   is_pointer_value(env, insn->dst_reg)) {
10185 		verbose(env, "R%d pointer comparison prohibited\n",
10186 			insn->dst_reg);
10187 		return -EACCES;
10188 	}
10189 	if (env->log.level & BPF_LOG_LEVEL)
10190 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
10191 	return 0;
10192 }
10193 
10194 /* verify BPF_LD_IMM64 instruction */
10195 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
10196 {
10197 	struct bpf_insn_aux_data *aux = cur_aux(env);
10198 	struct bpf_reg_state *regs = cur_regs(env);
10199 	struct bpf_reg_state *dst_reg;
10200 	struct bpf_map *map;
10201 	int err;
10202 
10203 	if (BPF_SIZE(insn->code) != BPF_DW) {
10204 		verbose(env, "invalid BPF_LD_IMM insn\n");
10205 		return -EINVAL;
10206 	}
10207 	if (insn->off != 0) {
10208 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
10209 		return -EINVAL;
10210 	}
10211 
10212 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
10213 	if (err)
10214 		return err;
10215 
10216 	dst_reg = &regs[insn->dst_reg];
10217 	if (insn->src_reg == 0) {
10218 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
10219 
10220 		dst_reg->type = SCALAR_VALUE;
10221 		__mark_reg_known(&regs[insn->dst_reg], imm);
10222 		return 0;
10223 	}
10224 
10225 	/* All special src_reg cases are listed below. From this point onwards
10226 	 * we either succeed and assign a corresponding dst_reg->type after
10227 	 * zeroing the offset, or fail and reject the program.
10228 	 */
10229 	mark_reg_known_zero(env, regs, insn->dst_reg);
10230 
10231 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
10232 		dst_reg->type = aux->btf_var.reg_type;
10233 		switch (base_type(dst_reg->type)) {
10234 		case PTR_TO_MEM:
10235 			dst_reg->mem_size = aux->btf_var.mem_size;
10236 			break;
10237 		case PTR_TO_BTF_ID:
10238 			dst_reg->btf = aux->btf_var.btf;
10239 			dst_reg->btf_id = aux->btf_var.btf_id;
10240 			break;
10241 		default:
10242 			verbose(env, "bpf verifier is misconfigured\n");
10243 			return -EFAULT;
10244 		}
10245 		return 0;
10246 	}
10247 
10248 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
10249 		struct bpf_prog_aux *aux = env->prog->aux;
10250 		u32 subprogno = find_subprog(env,
10251 					     env->insn_idx + insn->imm + 1);
10252 
10253 		if (!aux->func_info) {
10254 			verbose(env, "missing btf func_info\n");
10255 			return -EINVAL;
10256 		}
10257 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
10258 			verbose(env, "callback function not static\n");
10259 			return -EINVAL;
10260 		}
10261 
10262 		dst_reg->type = PTR_TO_FUNC;
10263 		dst_reg->subprogno = subprogno;
10264 		return 0;
10265 	}
10266 
10267 	map = env->used_maps[aux->map_index];
10268 	dst_reg->map_ptr = map;
10269 
10270 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
10271 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
10272 		dst_reg->type = PTR_TO_MAP_VALUE;
10273 		dst_reg->off = aux->map_off;
10274 		if (map_value_has_spin_lock(map))
10275 			dst_reg->id = ++env->id_gen;
10276 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
10277 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
10278 		dst_reg->type = CONST_PTR_TO_MAP;
10279 	} else {
10280 		verbose(env, "bpf verifier is misconfigured\n");
10281 		return -EINVAL;
10282 	}
10283 
10284 	return 0;
10285 }
10286 
10287 static bool may_access_skb(enum bpf_prog_type type)
10288 {
10289 	switch (type) {
10290 	case BPF_PROG_TYPE_SOCKET_FILTER:
10291 	case BPF_PROG_TYPE_SCHED_CLS:
10292 	case BPF_PROG_TYPE_SCHED_ACT:
10293 		return true;
10294 	default:
10295 		return false;
10296 	}
10297 }
10298 
10299 /* verify safety of LD_ABS|LD_IND instructions:
10300  * - they can only appear in the programs where ctx == skb
10301  * - since they are wrappers of function calls, they scratch R1-R5 registers,
10302  *   preserve R6-R9, and store return value into R0
10303  *
10304  * Implicit input:
10305  *   ctx == skb == R6 == CTX
10306  *
10307  * Explicit input:
10308  *   SRC == any register
10309  *   IMM == 32-bit immediate
10310  *
10311  * Output:
10312  *   R0 - 8/16/32-bit skb data converted to cpu endianness
10313  */
10314 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
10315 {
10316 	struct bpf_reg_state *regs = cur_regs(env);
10317 	static const int ctx_reg = BPF_REG_6;
10318 	u8 mode = BPF_MODE(insn->code);
10319 	int i, err;
10320 
10321 	if (!may_access_skb(resolve_prog_type(env->prog))) {
10322 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
10323 		return -EINVAL;
10324 	}
10325 
10326 	if (!env->ops->gen_ld_abs) {
10327 		verbose(env, "bpf verifier is misconfigured\n");
10328 		return -EINVAL;
10329 	}
10330 
10331 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
10332 	    BPF_SIZE(insn->code) == BPF_DW ||
10333 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
10334 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
10335 		return -EINVAL;
10336 	}
10337 
10338 	/* check whether implicit source operand (register R6) is readable */
10339 	err = check_reg_arg(env, ctx_reg, SRC_OP);
10340 	if (err)
10341 		return err;
10342 
10343 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
10344 	 * gen_ld_abs() may terminate the program at runtime, leading to
10345 	 * reference leak.
10346 	 */
10347 	err = check_reference_leak(env);
10348 	if (err) {
10349 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
10350 		return err;
10351 	}
10352 
10353 	if (env->cur_state->active_spin_lock) {
10354 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
10355 		return -EINVAL;
10356 	}
10357 
10358 	if (regs[ctx_reg].type != PTR_TO_CTX) {
10359 		verbose(env,
10360 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
10361 		return -EINVAL;
10362 	}
10363 
10364 	if (mode == BPF_IND) {
10365 		/* check explicit source operand */
10366 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
10367 		if (err)
10368 			return err;
10369 	}
10370 
10371 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
10372 	if (err < 0)
10373 		return err;
10374 
10375 	/* reset caller saved regs to unreadable */
10376 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
10377 		mark_reg_not_init(env, regs, caller_saved[i]);
10378 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
10379 	}
10380 
10381 	/* mark destination R0 register as readable, since it contains
10382 	 * the value fetched from the packet.
10383 	 * Already marked as written above.
10384 	 */
10385 	mark_reg_unknown(env, regs, BPF_REG_0);
10386 	/* ld_abs load up to 32-bit skb data. */
10387 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
10388 	return 0;
10389 }
10390 
10391 static int check_return_code(struct bpf_verifier_env *env)
10392 {
10393 	struct tnum enforce_attach_type_range = tnum_unknown;
10394 	const struct bpf_prog *prog = env->prog;
10395 	struct bpf_reg_state *reg;
10396 	struct tnum range = tnum_range(0, 1);
10397 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
10398 	int err;
10399 	struct bpf_func_state *frame = env->cur_state->frame[0];
10400 	const bool is_subprog = frame->subprogno;
10401 
10402 	/* LSM and struct_ops func-ptr's return type could be "void" */
10403 	if (!is_subprog &&
10404 	    (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
10405 	     prog_type == BPF_PROG_TYPE_LSM) &&
10406 	    !prog->aux->attach_func_proto->type)
10407 		return 0;
10408 
10409 	/* eBPF calling convention is such that R0 is used
10410 	 * to return the value from eBPF program.
10411 	 * Make sure that it's readable at this time
10412 	 * of bpf_exit, which means that program wrote
10413 	 * something into it earlier
10414 	 */
10415 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
10416 	if (err)
10417 		return err;
10418 
10419 	if (is_pointer_value(env, BPF_REG_0)) {
10420 		verbose(env, "R0 leaks addr as return value\n");
10421 		return -EACCES;
10422 	}
10423 
10424 	reg = cur_regs(env) + BPF_REG_0;
10425 
10426 	if (frame->in_async_callback_fn) {
10427 		/* enforce return zero from async callbacks like timer */
10428 		if (reg->type != SCALAR_VALUE) {
10429 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
10430 				reg_type_str(env, reg->type));
10431 			return -EINVAL;
10432 		}
10433 
10434 		if (!tnum_in(tnum_const(0), reg->var_off)) {
10435 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
10436 			return -EINVAL;
10437 		}
10438 		return 0;
10439 	}
10440 
10441 	if (is_subprog) {
10442 		if (reg->type != SCALAR_VALUE) {
10443 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
10444 				reg_type_str(env, reg->type));
10445 			return -EINVAL;
10446 		}
10447 		return 0;
10448 	}
10449 
10450 	switch (prog_type) {
10451 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
10452 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
10453 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
10454 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
10455 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
10456 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
10457 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
10458 			range = tnum_range(1, 1);
10459 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
10460 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
10461 			range = tnum_range(0, 3);
10462 		break;
10463 	case BPF_PROG_TYPE_CGROUP_SKB:
10464 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
10465 			range = tnum_range(0, 3);
10466 			enforce_attach_type_range = tnum_range(2, 3);
10467 		}
10468 		break;
10469 	case BPF_PROG_TYPE_CGROUP_SOCK:
10470 	case BPF_PROG_TYPE_SOCK_OPS:
10471 	case BPF_PROG_TYPE_CGROUP_DEVICE:
10472 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
10473 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
10474 		break;
10475 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
10476 		if (!env->prog->aux->attach_btf_id)
10477 			return 0;
10478 		range = tnum_const(0);
10479 		break;
10480 	case BPF_PROG_TYPE_TRACING:
10481 		switch (env->prog->expected_attach_type) {
10482 		case BPF_TRACE_FENTRY:
10483 		case BPF_TRACE_FEXIT:
10484 			range = tnum_const(0);
10485 			break;
10486 		case BPF_TRACE_RAW_TP:
10487 		case BPF_MODIFY_RETURN:
10488 			return 0;
10489 		case BPF_TRACE_ITER:
10490 			break;
10491 		default:
10492 			return -ENOTSUPP;
10493 		}
10494 		break;
10495 	case BPF_PROG_TYPE_SK_LOOKUP:
10496 		range = tnum_range(SK_DROP, SK_PASS);
10497 		break;
10498 	case BPF_PROG_TYPE_EXT:
10499 		/* freplace program can return anything as its return value
10500 		 * depends on the to-be-replaced kernel func or bpf program.
10501 		 */
10502 	default:
10503 		return 0;
10504 	}
10505 
10506 	if (reg->type != SCALAR_VALUE) {
10507 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
10508 			reg_type_str(env, reg->type));
10509 		return -EINVAL;
10510 	}
10511 
10512 	if (!tnum_in(range, reg->var_off)) {
10513 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
10514 		return -EINVAL;
10515 	}
10516 
10517 	if (!tnum_is_unknown(enforce_attach_type_range) &&
10518 	    tnum_in(enforce_attach_type_range, reg->var_off))
10519 		env->prog->enforce_expected_attach_type = 1;
10520 	return 0;
10521 }
10522 
10523 /* non-recursive DFS pseudo code
10524  * 1  procedure DFS-iterative(G,v):
10525  * 2      label v as discovered
10526  * 3      let S be a stack
10527  * 4      S.push(v)
10528  * 5      while S is not empty
10529  * 6            t <- S.pop()
10530  * 7            if t is what we're looking for:
10531  * 8                return t
10532  * 9            for all edges e in G.adjacentEdges(t) do
10533  * 10               if edge e is already labelled
10534  * 11                   continue with the next edge
10535  * 12               w <- G.adjacentVertex(t,e)
10536  * 13               if vertex w is not discovered and not explored
10537  * 14                   label e as tree-edge
10538  * 15                   label w as discovered
10539  * 16                   S.push(w)
10540  * 17                   continue at 5
10541  * 18               else if vertex w is discovered
10542  * 19                   label e as back-edge
10543  * 20               else
10544  * 21                   // vertex w is explored
10545  * 22                   label e as forward- or cross-edge
10546  * 23           label t as explored
10547  * 24           S.pop()
10548  *
10549  * convention:
10550  * 0x10 - discovered
10551  * 0x11 - discovered and fall-through edge labelled
10552  * 0x12 - discovered and fall-through and branch edges labelled
10553  * 0x20 - explored
10554  */
10555 
10556 enum {
10557 	DISCOVERED = 0x10,
10558 	EXPLORED = 0x20,
10559 	FALLTHROUGH = 1,
10560 	BRANCH = 2,
10561 };
10562 
10563 static u32 state_htab_size(struct bpf_verifier_env *env)
10564 {
10565 	return env->prog->len;
10566 }
10567 
10568 static struct bpf_verifier_state_list **explored_state(
10569 					struct bpf_verifier_env *env,
10570 					int idx)
10571 {
10572 	struct bpf_verifier_state *cur = env->cur_state;
10573 	struct bpf_func_state *state = cur->frame[cur->curframe];
10574 
10575 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
10576 }
10577 
10578 static void init_explored_state(struct bpf_verifier_env *env, int idx)
10579 {
10580 	env->insn_aux_data[idx].prune_point = true;
10581 }
10582 
10583 enum {
10584 	DONE_EXPLORING = 0,
10585 	KEEP_EXPLORING = 1,
10586 };
10587 
10588 /* t, w, e - match pseudo-code above:
10589  * t - index of current instruction
10590  * w - next instruction
10591  * e - edge
10592  */
10593 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
10594 		     bool loop_ok)
10595 {
10596 	int *insn_stack = env->cfg.insn_stack;
10597 	int *insn_state = env->cfg.insn_state;
10598 
10599 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
10600 		return DONE_EXPLORING;
10601 
10602 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
10603 		return DONE_EXPLORING;
10604 
10605 	if (w < 0 || w >= env->prog->len) {
10606 		verbose_linfo(env, t, "%d: ", t);
10607 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
10608 		return -EINVAL;
10609 	}
10610 
10611 	if (e == BRANCH)
10612 		/* mark branch target for state pruning */
10613 		init_explored_state(env, w);
10614 
10615 	if (insn_state[w] == 0) {
10616 		/* tree-edge */
10617 		insn_state[t] = DISCOVERED | e;
10618 		insn_state[w] = DISCOVERED;
10619 		if (env->cfg.cur_stack >= env->prog->len)
10620 			return -E2BIG;
10621 		insn_stack[env->cfg.cur_stack++] = w;
10622 		return KEEP_EXPLORING;
10623 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
10624 		if (loop_ok && env->bpf_capable)
10625 			return DONE_EXPLORING;
10626 		verbose_linfo(env, t, "%d: ", t);
10627 		verbose_linfo(env, w, "%d: ", w);
10628 		verbose(env, "back-edge from insn %d to %d\n", t, w);
10629 		return -EINVAL;
10630 	} else if (insn_state[w] == EXPLORED) {
10631 		/* forward- or cross-edge */
10632 		insn_state[t] = DISCOVERED | e;
10633 	} else {
10634 		verbose(env, "insn state internal bug\n");
10635 		return -EFAULT;
10636 	}
10637 	return DONE_EXPLORING;
10638 }
10639 
10640 static int visit_func_call_insn(int t, int insn_cnt,
10641 				struct bpf_insn *insns,
10642 				struct bpf_verifier_env *env,
10643 				bool visit_callee)
10644 {
10645 	int ret;
10646 
10647 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
10648 	if (ret)
10649 		return ret;
10650 
10651 	if (t + 1 < insn_cnt)
10652 		init_explored_state(env, t + 1);
10653 	if (visit_callee) {
10654 		init_explored_state(env, t);
10655 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
10656 				/* It's ok to allow recursion from CFG point of
10657 				 * view. __check_func_call() will do the actual
10658 				 * check.
10659 				 */
10660 				bpf_pseudo_func(insns + t));
10661 	}
10662 	return ret;
10663 }
10664 
10665 /* Visits the instruction at index t and returns one of the following:
10666  *  < 0 - an error occurred
10667  *  DONE_EXPLORING - the instruction was fully explored
10668  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
10669  */
10670 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
10671 {
10672 	struct bpf_insn *insns = env->prog->insnsi;
10673 	int ret;
10674 
10675 	if (bpf_pseudo_func(insns + t))
10676 		return visit_func_call_insn(t, insn_cnt, insns, env, true);
10677 
10678 	/* All non-branch instructions have a single fall-through edge. */
10679 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
10680 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
10681 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
10682 
10683 	switch (BPF_OP(insns[t].code)) {
10684 	case BPF_EXIT:
10685 		return DONE_EXPLORING;
10686 
10687 	case BPF_CALL:
10688 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
10689 			/* Mark this call insn to trigger is_state_visited() check
10690 			 * before call itself is processed by __check_func_call().
10691 			 * Otherwise new async state will be pushed for further
10692 			 * exploration.
10693 			 */
10694 			init_explored_state(env, t);
10695 		return visit_func_call_insn(t, insn_cnt, insns, env,
10696 					    insns[t].src_reg == BPF_PSEUDO_CALL);
10697 
10698 	case BPF_JA:
10699 		if (BPF_SRC(insns[t].code) != BPF_K)
10700 			return -EINVAL;
10701 
10702 		/* unconditional jump with single edge */
10703 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
10704 				true);
10705 		if (ret)
10706 			return ret;
10707 
10708 		/* unconditional jmp is not a good pruning point,
10709 		 * but it's marked, since backtracking needs
10710 		 * to record jmp history in is_state_visited().
10711 		 */
10712 		init_explored_state(env, t + insns[t].off + 1);
10713 		/* tell verifier to check for equivalent states
10714 		 * after every call and jump
10715 		 */
10716 		if (t + 1 < insn_cnt)
10717 			init_explored_state(env, t + 1);
10718 
10719 		return ret;
10720 
10721 	default:
10722 		/* conditional jump with two edges */
10723 		init_explored_state(env, t);
10724 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
10725 		if (ret)
10726 			return ret;
10727 
10728 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
10729 	}
10730 }
10731 
10732 /* non-recursive depth-first-search to detect loops in BPF program
10733  * loop == back-edge in directed graph
10734  */
10735 static int check_cfg(struct bpf_verifier_env *env)
10736 {
10737 	int insn_cnt = env->prog->len;
10738 	int *insn_stack, *insn_state;
10739 	int ret = 0;
10740 	int i;
10741 
10742 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10743 	if (!insn_state)
10744 		return -ENOMEM;
10745 
10746 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10747 	if (!insn_stack) {
10748 		kvfree(insn_state);
10749 		return -ENOMEM;
10750 	}
10751 
10752 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
10753 	insn_stack[0] = 0; /* 0 is the first instruction */
10754 	env->cfg.cur_stack = 1;
10755 
10756 	while (env->cfg.cur_stack > 0) {
10757 		int t = insn_stack[env->cfg.cur_stack - 1];
10758 
10759 		ret = visit_insn(t, insn_cnt, env);
10760 		switch (ret) {
10761 		case DONE_EXPLORING:
10762 			insn_state[t] = EXPLORED;
10763 			env->cfg.cur_stack--;
10764 			break;
10765 		case KEEP_EXPLORING:
10766 			break;
10767 		default:
10768 			if (ret > 0) {
10769 				verbose(env, "visit_insn internal bug\n");
10770 				ret = -EFAULT;
10771 			}
10772 			goto err_free;
10773 		}
10774 	}
10775 
10776 	if (env->cfg.cur_stack < 0) {
10777 		verbose(env, "pop stack internal bug\n");
10778 		ret = -EFAULT;
10779 		goto err_free;
10780 	}
10781 
10782 	for (i = 0; i < insn_cnt; i++) {
10783 		if (insn_state[i] != EXPLORED) {
10784 			verbose(env, "unreachable insn %d\n", i);
10785 			ret = -EINVAL;
10786 			goto err_free;
10787 		}
10788 	}
10789 	ret = 0; /* cfg looks good */
10790 
10791 err_free:
10792 	kvfree(insn_state);
10793 	kvfree(insn_stack);
10794 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
10795 	return ret;
10796 }
10797 
10798 static int check_abnormal_return(struct bpf_verifier_env *env)
10799 {
10800 	int i;
10801 
10802 	for (i = 1; i < env->subprog_cnt; i++) {
10803 		if (env->subprog_info[i].has_ld_abs) {
10804 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
10805 			return -EINVAL;
10806 		}
10807 		if (env->subprog_info[i].has_tail_call) {
10808 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
10809 			return -EINVAL;
10810 		}
10811 	}
10812 	return 0;
10813 }
10814 
10815 /* The minimum supported BTF func info size */
10816 #define MIN_BPF_FUNCINFO_SIZE	8
10817 #define MAX_FUNCINFO_REC_SIZE	252
10818 
10819 static int check_btf_func(struct bpf_verifier_env *env,
10820 			  const union bpf_attr *attr,
10821 			  bpfptr_t uattr)
10822 {
10823 	const struct btf_type *type, *func_proto, *ret_type;
10824 	u32 i, nfuncs, urec_size, min_size;
10825 	u32 krec_size = sizeof(struct bpf_func_info);
10826 	struct bpf_func_info *krecord;
10827 	struct bpf_func_info_aux *info_aux = NULL;
10828 	struct bpf_prog *prog;
10829 	const struct btf *btf;
10830 	bpfptr_t urecord;
10831 	u32 prev_offset = 0;
10832 	bool scalar_return;
10833 	int ret = -ENOMEM;
10834 
10835 	nfuncs = attr->func_info_cnt;
10836 	if (!nfuncs) {
10837 		if (check_abnormal_return(env))
10838 			return -EINVAL;
10839 		return 0;
10840 	}
10841 
10842 	if (nfuncs != env->subprog_cnt) {
10843 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
10844 		return -EINVAL;
10845 	}
10846 
10847 	urec_size = attr->func_info_rec_size;
10848 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
10849 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
10850 	    urec_size % sizeof(u32)) {
10851 		verbose(env, "invalid func info rec size %u\n", urec_size);
10852 		return -EINVAL;
10853 	}
10854 
10855 	prog = env->prog;
10856 	btf = prog->aux->btf;
10857 
10858 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
10859 	min_size = min_t(u32, krec_size, urec_size);
10860 
10861 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
10862 	if (!krecord)
10863 		return -ENOMEM;
10864 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
10865 	if (!info_aux)
10866 		goto err_free;
10867 
10868 	for (i = 0; i < nfuncs; i++) {
10869 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
10870 		if (ret) {
10871 			if (ret == -E2BIG) {
10872 				verbose(env, "nonzero tailing record in func info");
10873 				/* set the size kernel expects so loader can zero
10874 				 * out the rest of the record.
10875 				 */
10876 				if (copy_to_bpfptr_offset(uattr,
10877 							  offsetof(union bpf_attr, func_info_rec_size),
10878 							  &min_size, sizeof(min_size)))
10879 					ret = -EFAULT;
10880 			}
10881 			goto err_free;
10882 		}
10883 
10884 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
10885 			ret = -EFAULT;
10886 			goto err_free;
10887 		}
10888 
10889 		/* check insn_off */
10890 		ret = -EINVAL;
10891 		if (i == 0) {
10892 			if (krecord[i].insn_off) {
10893 				verbose(env,
10894 					"nonzero insn_off %u for the first func info record",
10895 					krecord[i].insn_off);
10896 				goto err_free;
10897 			}
10898 		} else if (krecord[i].insn_off <= prev_offset) {
10899 			verbose(env,
10900 				"same or smaller insn offset (%u) than previous func info record (%u)",
10901 				krecord[i].insn_off, prev_offset);
10902 			goto err_free;
10903 		}
10904 
10905 		if (env->subprog_info[i].start != krecord[i].insn_off) {
10906 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
10907 			goto err_free;
10908 		}
10909 
10910 		/* check type_id */
10911 		type = btf_type_by_id(btf, krecord[i].type_id);
10912 		if (!type || !btf_type_is_func(type)) {
10913 			verbose(env, "invalid type id %d in func info",
10914 				krecord[i].type_id);
10915 			goto err_free;
10916 		}
10917 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
10918 
10919 		func_proto = btf_type_by_id(btf, type->type);
10920 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
10921 			/* btf_func_check() already verified it during BTF load */
10922 			goto err_free;
10923 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
10924 		scalar_return =
10925 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
10926 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
10927 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
10928 			goto err_free;
10929 		}
10930 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
10931 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
10932 			goto err_free;
10933 		}
10934 
10935 		prev_offset = krecord[i].insn_off;
10936 		bpfptr_add(&urecord, urec_size);
10937 	}
10938 
10939 	prog->aux->func_info = krecord;
10940 	prog->aux->func_info_cnt = nfuncs;
10941 	prog->aux->func_info_aux = info_aux;
10942 	return 0;
10943 
10944 err_free:
10945 	kvfree(krecord);
10946 	kfree(info_aux);
10947 	return ret;
10948 }
10949 
10950 static void adjust_btf_func(struct bpf_verifier_env *env)
10951 {
10952 	struct bpf_prog_aux *aux = env->prog->aux;
10953 	int i;
10954 
10955 	if (!aux->func_info)
10956 		return;
10957 
10958 	for (i = 0; i < env->subprog_cnt; i++)
10959 		aux->func_info[i].insn_off = env->subprog_info[i].start;
10960 }
10961 
10962 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
10963 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
10964 
10965 static int check_btf_line(struct bpf_verifier_env *env,
10966 			  const union bpf_attr *attr,
10967 			  bpfptr_t uattr)
10968 {
10969 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
10970 	struct bpf_subprog_info *sub;
10971 	struct bpf_line_info *linfo;
10972 	struct bpf_prog *prog;
10973 	const struct btf *btf;
10974 	bpfptr_t ulinfo;
10975 	int err;
10976 
10977 	nr_linfo = attr->line_info_cnt;
10978 	if (!nr_linfo)
10979 		return 0;
10980 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
10981 		return -EINVAL;
10982 
10983 	rec_size = attr->line_info_rec_size;
10984 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
10985 	    rec_size > MAX_LINEINFO_REC_SIZE ||
10986 	    rec_size & (sizeof(u32) - 1))
10987 		return -EINVAL;
10988 
10989 	/* Need to zero it in case the userspace may
10990 	 * pass in a smaller bpf_line_info object.
10991 	 */
10992 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
10993 			 GFP_KERNEL | __GFP_NOWARN);
10994 	if (!linfo)
10995 		return -ENOMEM;
10996 
10997 	prog = env->prog;
10998 	btf = prog->aux->btf;
10999 
11000 	s = 0;
11001 	sub = env->subprog_info;
11002 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
11003 	expected_size = sizeof(struct bpf_line_info);
11004 	ncopy = min_t(u32, expected_size, rec_size);
11005 	for (i = 0; i < nr_linfo; i++) {
11006 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
11007 		if (err) {
11008 			if (err == -E2BIG) {
11009 				verbose(env, "nonzero tailing record in line_info");
11010 				if (copy_to_bpfptr_offset(uattr,
11011 							  offsetof(union bpf_attr, line_info_rec_size),
11012 							  &expected_size, sizeof(expected_size)))
11013 					err = -EFAULT;
11014 			}
11015 			goto err_free;
11016 		}
11017 
11018 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
11019 			err = -EFAULT;
11020 			goto err_free;
11021 		}
11022 
11023 		/*
11024 		 * Check insn_off to ensure
11025 		 * 1) strictly increasing AND
11026 		 * 2) bounded by prog->len
11027 		 *
11028 		 * The linfo[0].insn_off == 0 check logically falls into
11029 		 * the later "missing bpf_line_info for func..." case
11030 		 * because the first linfo[0].insn_off must be the
11031 		 * first sub also and the first sub must have
11032 		 * subprog_info[0].start == 0.
11033 		 */
11034 		if ((i && linfo[i].insn_off <= prev_offset) ||
11035 		    linfo[i].insn_off >= prog->len) {
11036 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
11037 				i, linfo[i].insn_off, prev_offset,
11038 				prog->len);
11039 			err = -EINVAL;
11040 			goto err_free;
11041 		}
11042 
11043 		if (!prog->insnsi[linfo[i].insn_off].code) {
11044 			verbose(env,
11045 				"Invalid insn code at line_info[%u].insn_off\n",
11046 				i);
11047 			err = -EINVAL;
11048 			goto err_free;
11049 		}
11050 
11051 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
11052 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
11053 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
11054 			err = -EINVAL;
11055 			goto err_free;
11056 		}
11057 
11058 		if (s != env->subprog_cnt) {
11059 			if (linfo[i].insn_off == sub[s].start) {
11060 				sub[s].linfo_idx = i;
11061 				s++;
11062 			} else if (sub[s].start < linfo[i].insn_off) {
11063 				verbose(env, "missing bpf_line_info for func#%u\n", s);
11064 				err = -EINVAL;
11065 				goto err_free;
11066 			}
11067 		}
11068 
11069 		prev_offset = linfo[i].insn_off;
11070 		bpfptr_add(&ulinfo, rec_size);
11071 	}
11072 
11073 	if (s != env->subprog_cnt) {
11074 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
11075 			env->subprog_cnt - s, s);
11076 		err = -EINVAL;
11077 		goto err_free;
11078 	}
11079 
11080 	prog->aux->linfo = linfo;
11081 	prog->aux->nr_linfo = nr_linfo;
11082 
11083 	return 0;
11084 
11085 err_free:
11086 	kvfree(linfo);
11087 	return err;
11088 }
11089 
11090 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
11091 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
11092 
11093 static int check_core_relo(struct bpf_verifier_env *env,
11094 			   const union bpf_attr *attr,
11095 			   bpfptr_t uattr)
11096 {
11097 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
11098 	struct bpf_core_relo core_relo = {};
11099 	struct bpf_prog *prog = env->prog;
11100 	const struct btf *btf = prog->aux->btf;
11101 	struct bpf_core_ctx ctx = {
11102 		.log = &env->log,
11103 		.btf = btf,
11104 	};
11105 	bpfptr_t u_core_relo;
11106 	int err;
11107 
11108 	nr_core_relo = attr->core_relo_cnt;
11109 	if (!nr_core_relo)
11110 		return 0;
11111 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
11112 		return -EINVAL;
11113 
11114 	rec_size = attr->core_relo_rec_size;
11115 	if (rec_size < MIN_CORE_RELO_SIZE ||
11116 	    rec_size > MAX_CORE_RELO_SIZE ||
11117 	    rec_size % sizeof(u32))
11118 		return -EINVAL;
11119 
11120 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
11121 	expected_size = sizeof(struct bpf_core_relo);
11122 	ncopy = min_t(u32, expected_size, rec_size);
11123 
11124 	/* Unlike func_info and line_info, copy and apply each CO-RE
11125 	 * relocation record one at a time.
11126 	 */
11127 	for (i = 0; i < nr_core_relo; i++) {
11128 		/* future proofing when sizeof(bpf_core_relo) changes */
11129 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
11130 		if (err) {
11131 			if (err == -E2BIG) {
11132 				verbose(env, "nonzero tailing record in core_relo");
11133 				if (copy_to_bpfptr_offset(uattr,
11134 							  offsetof(union bpf_attr, core_relo_rec_size),
11135 							  &expected_size, sizeof(expected_size)))
11136 					err = -EFAULT;
11137 			}
11138 			break;
11139 		}
11140 
11141 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
11142 			err = -EFAULT;
11143 			break;
11144 		}
11145 
11146 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
11147 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
11148 				i, core_relo.insn_off, prog->len);
11149 			err = -EINVAL;
11150 			break;
11151 		}
11152 
11153 		err = bpf_core_apply(&ctx, &core_relo, i,
11154 				     &prog->insnsi[core_relo.insn_off / 8]);
11155 		if (err)
11156 			break;
11157 		bpfptr_add(&u_core_relo, rec_size);
11158 	}
11159 	return err;
11160 }
11161 
11162 static int check_btf_info(struct bpf_verifier_env *env,
11163 			  const union bpf_attr *attr,
11164 			  bpfptr_t uattr)
11165 {
11166 	struct btf *btf;
11167 	int err;
11168 
11169 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
11170 		if (check_abnormal_return(env))
11171 			return -EINVAL;
11172 		return 0;
11173 	}
11174 
11175 	btf = btf_get_by_fd(attr->prog_btf_fd);
11176 	if (IS_ERR(btf))
11177 		return PTR_ERR(btf);
11178 	if (btf_is_kernel(btf)) {
11179 		btf_put(btf);
11180 		return -EACCES;
11181 	}
11182 	env->prog->aux->btf = btf;
11183 
11184 	err = check_btf_func(env, attr, uattr);
11185 	if (err)
11186 		return err;
11187 
11188 	err = check_btf_line(env, attr, uattr);
11189 	if (err)
11190 		return err;
11191 
11192 	err = check_core_relo(env, attr, uattr);
11193 	if (err)
11194 		return err;
11195 
11196 	return 0;
11197 }
11198 
11199 /* check %cur's range satisfies %old's */
11200 static bool range_within(struct bpf_reg_state *old,
11201 			 struct bpf_reg_state *cur)
11202 {
11203 	return old->umin_value <= cur->umin_value &&
11204 	       old->umax_value >= cur->umax_value &&
11205 	       old->smin_value <= cur->smin_value &&
11206 	       old->smax_value >= cur->smax_value &&
11207 	       old->u32_min_value <= cur->u32_min_value &&
11208 	       old->u32_max_value >= cur->u32_max_value &&
11209 	       old->s32_min_value <= cur->s32_min_value &&
11210 	       old->s32_max_value >= cur->s32_max_value;
11211 }
11212 
11213 /* If in the old state two registers had the same id, then they need to have
11214  * the same id in the new state as well.  But that id could be different from
11215  * the old state, so we need to track the mapping from old to new ids.
11216  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
11217  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
11218  * regs with a different old id could still have new id 9, we don't care about
11219  * that.
11220  * So we look through our idmap to see if this old id has been seen before.  If
11221  * so, we require the new id to match; otherwise, we add the id pair to the map.
11222  */
11223 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
11224 {
11225 	unsigned int i;
11226 
11227 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
11228 		if (!idmap[i].old) {
11229 			/* Reached an empty slot; haven't seen this id before */
11230 			idmap[i].old = old_id;
11231 			idmap[i].cur = cur_id;
11232 			return true;
11233 		}
11234 		if (idmap[i].old == old_id)
11235 			return idmap[i].cur == cur_id;
11236 	}
11237 	/* We ran out of idmap slots, which should be impossible */
11238 	WARN_ON_ONCE(1);
11239 	return false;
11240 }
11241 
11242 static void clean_func_state(struct bpf_verifier_env *env,
11243 			     struct bpf_func_state *st)
11244 {
11245 	enum bpf_reg_liveness live;
11246 	int i, j;
11247 
11248 	for (i = 0; i < BPF_REG_FP; i++) {
11249 		live = st->regs[i].live;
11250 		/* liveness must not touch this register anymore */
11251 		st->regs[i].live |= REG_LIVE_DONE;
11252 		if (!(live & REG_LIVE_READ))
11253 			/* since the register is unused, clear its state
11254 			 * to make further comparison simpler
11255 			 */
11256 			__mark_reg_not_init(env, &st->regs[i]);
11257 	}
11258 
11259 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
11260 		live = st->stack[i].spilled_ptr.live;
11261 		/* liveness must not touch this stack slot anymore */
11262 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
11263 		if (!(live & REG_LIVE_READ)) {
11264 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
11265 			for (j = 0; j < BPF_REG_SIZE; j++)
11266 				st->stack[i].slot_type[j] = STACK_INVALID;
11267 		}
11268 	}
11269 }
11270 
11271 static void clean_verifier_state(struct bpf_verifier_env *env,
11272 				 struct bpf_verifier_state *st)
11273 {
11274 	int i;
11275 
11276 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
11277 		/* all regs in this state in all frames were already marked */
11278 		return;
11279 
11280 	for (i = 0; i <= st->curframe; i++)
11281 		clean_func_state(env, st->frame[i]);
11282 }
11283 
11284 /* the parentage chains form a tree.
11285  * the verifier states are added to state lists at given insn and
11286  * pushed into state stack for future exploration.
11287  * when the verifier reaches bpf_exit insn some of the verifer states
11288  * stored in the state lists have their final liveness state already,
11289  * but a lot of states will get revised from liveness point of view when
11290  * the verifier explores other branches.
11291  * Example:
11292  * 1: r0 = 1
11293  * 2: if r1 == 100 goto pc+1
11294  * 3: r0 = 2
11295  * 4: exit
11296  * when the verifier reaches exit insn the register r0 in the state list of
11297  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
11298  * of insn 2 and goes exploring further. At the insn 4 it will walk the
11299  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
11300  *
11301  * Since the verifier pushes the branch states as it sees them while exploring
11302  * the program the condition of walking the branch instruction for the second
11303  * time means that all states below this branch were already explored and
11304  * their final liveness marks are already propagated.
11305  * Hence when the verifier completes the search of state list in is_state_visited()
11306  * we can call this clean_live_states() function to mark all liveness states
11307  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
11308  * will not be used.
11309  * This function also clears the registers and stack for states that !READ
11310  * to simplify state merging.
11311  *
11312  * Important note here that walking the same branch instruction in the callee
11313  * doesn't meant that the states are DONE. The verifier has to compare
11314  * the callsites
11315  */
11316 static void clean_live_states(struct bpf_verifier_env *env, int insn,
11317 			      struct bpf_verifier_state *cur)
11318 {
11319 	struct bpf_verifier_state_list *sl;
11320 	int i;
11321 
11322 	sl = *explored_state(env, insn);
11323 	while (sl) {
11324 		if (sl->state.branches)
11325 			goto next;
11326 		if (sl->state.insn_idx != insn ||
11327 		    sl->state.curframe != cur->curframe)
11328 			goto next;
11329 		for (i = 0; i <= cur->curframe; i++)
11330 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
11331 				goto next;
11332 		clean_verifier_state(env, &sl->state);
11333 next:
11334 		sl = sl->next;
11335 	}
11336 }
11337 
11338 /* Returns true if (rold safe implies rcur safe) */
11339 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
11340 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
11341 {
11342 	bool equal;
11343 
11344 	if (!(rold->live & REG_LIVE_READ))
11345 		/* explored state didn't use this */
11346 		return true;
11347 
11348 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
11349 
11350 	if (rold->type == PTR_TO_STACK)
11351 		/* two stack pointers are equal only if they're pointing to
11352 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
11353 		 */
11354 		return equal && rold->frameno == rcur->frameno;
11355 
11356 	if (equal)
11357 		return true;
11358 
11359 	if (rold->type == NOT_INIT)
11360 		/* explored state can't have used this */
11361 		return true;
11362 	if (rcur->type == NOT_INIT)
11363 		return false;
11364 	switch (base_type(rold->type)) {
11365 	case SCALAR_VALUE:
11366 		if (env->explore_alu_limits)
11367 			return false;
11368 		if (rcur->type == SCALAR_VALUE) {
11369 			if (!rold->precise && !rcur->precise)
11370 				return true;
11371 			/* new val must satisfy old val knowledge */
11372 			return range_within(rold, rcur) &&
11373 			       tnum_in(rold->var_off, rcur->var_off);
11374 		} else {
11375 			/* We're trying to use a pointer in place of a scalar.
11376 			 * Even if the scalar was unbounded, this could lead to
11377 			 * pointer leaks because scalars are allowed to leak
11378 			 * while pointers are not. We could make this safe in
11379 			 * special cases if root is calling us, but it's
11380 			 * probably not worth the hassle.
11381 			 */
11382 			return false;
11383 		}
11384 	case PTR_TO_MAP_KEY:
11385 	case PTR_TO_MAP_VALUE:
11386 		/* a PTR_TO_MAP_VALUE could be safe to use as a
11387 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
11388 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
11389 		 * checked, doing so could have affected others with the same
11390 		 * id, and we can't check for that because we lost the id when
11391 		 * we converted to a PTR_TO_MAP_VALUE.
11392 		 */
11393 		if (type_may_be_null(rold->type)) {
11394 			if (!type_may_be_null(rcur->type))
11395 				return false;
11396 			if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
11397 				return false;
11398 			/* Check our ids match any regs they're supposed to */
11399 			return check_ids(rold->id, rcur->id, idmap);
11400 		}
11401 
11402 		/* If the new min/max/var_off satisfy the old ones and
11403 		 * everything else matches, we are OK.
11404 		 * 'id' is not compared, since it's only used for maps with
11405 		 * bpf_spin_lock inside map element and in such cases if
11406 		 * the rest of the prog is valid for one map element then
11407 		 * it's valid for all map elements regardless of the key
11408 		 * used in bpf_map_lookup()
11409 		 */
11410 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
11411 		       range_within(rold, rcur) &&
11412 		       tnum_in(rold->var_off, rcur->var_off);
11413 	case PTR_TO_PACKET_META:
11414 	case PTR_TO_PACKET:
11415 		if (rcur->type != rold->type)
11416 			return false;
11417 		/* We must have at least as much range as the old ptr
11418 		 * did, so that any accesses which were safe before are
11419 		 * still safe.  This is true even if old range < old off,
11420 		 * since someone could have accessed through (ptr - k), or
11421 		 * even done ptr -= k in a register, to get a safe access.
11422 		 */
11423 		if (rold->range > rcur->range)
11424 			return false;
11425 		/* If the offsets don't match, we can't trust our alignment;
11426 		 * nor can we be sure that we won't fall out of range.
11427 		 */
11428 		if (rold->off != rcur->off)
11429 			return false;
11430 		/* id relations must be preserved */
11431 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
11432 			return false;
11433 		/* new val must satisfy old val knowledge */
11434 		return range_within(rold, rcur) &&
11435 		       tnum_in(rold->var_off, rcur->var_off);
11436 	case PTR_TO_CTX:
11437 	case CONST_PTR_TO_MAP:
11438 	case PTR_TO_PACKET_END:
11439 	case PTR_TO_FLOW_KEYS:
11440 	case PTR_TO_SOCKET:
11441 	case PTR_TO_SOCK_COMMON:
11442 	case PTR_TO_TCP_SOCK:
11443 	case PTR_TO_XDP_SOCK:
11444 		/* Only valid matches are exact, which memcmp() above
11445 		 * would have accepted
11446 		 */
11447 	default:
11448 		/* Don't know what's going on, just say it's not safe */
11449 		return false;
11450 	}
11451 
11452 	/* Shouldn't get here; if we do, say it's not safe */
11453 	WARN_ON_ONCE(1);
11454 	return false;
11455 }
11456 
11457 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
11458 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
11459 {
11460 	int i, spi;
11461 
11462 	/* walk slots of the explored stack and ignore any additional
11463 	 * slots in the current stack, since explored(safe) state
11464 	 * didn't use them
11465 	 */
11466 	for (i = 0; i < old->allocated_stack; i++) {
11467 		spi = i / BPF_REG_SIZE;
11468 
11469 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
11470 			i += BPF_REG_SIZE - 1;
11471 			/* explored state didn't use this */
11472 			continue;
11473 		}
11474 
11475 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
11476 			continue;
11477 
11478 		/* explored stack has more populated slots than current stack
11479 		 * and these slots were used
11480 		 */
11481 		if (i >= cur->allocated_stack)
11482 			return false;
11483 
11484 		/* if old state was safe with misc data in the stack
11485 		 * it will be safe with zero-initialized stack.
11486 		 * The opposite is not true
11487 		 */
11488 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
11489 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
11490 			continue;
11491 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
11492 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
11493 			/* Ex: old explored (safe) state has STACK_SPILL in
11494 			 * this stack slot, but current has STACK_MISC ->
11495 			 * this verifier states are not equivalent,
11496 			 * return false to continue verification of this path
11497 			 */
11498 			return false;
11499 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
11500 			continue;
11501 		if (!is_spilled_reg(&old->stack[spi]))
11502 			continue;
11503 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
11504 			     &cur->stack[spi].spilled_ptr, idmap))
11505 			/* when explored and current stack slot are both storing
11506 			 * spilled registers, check that stored pointers types
11507 			 * are the same as well.
11508 			 * Ex: explored safe path could have stored
11509 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
11510 			 * but current path has stored:
11511 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
11512 			 * such verifier states are not equivalent.
11513 			 * return false to continue verification of this path
11514 			 */
11515 			return false;
11516 	}
11517 	return true;
11518 }
11519 
11520 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
11521 {
11522 	if (old->acquired_refs != cur->acquired_refs)
11523 		return false;
11524 	return !memcmp(old->refs, cur->refs,
11525 		       sizeof(*old->refs) * old->acquired_refs);
11526 }
11527 
11528 /* compare two verifier states
11529  *
11530  * all states stored in state_list are known to be valid, since
11531  * verifier reached 'bpf_exit' instruction through them
11532  *
11533  * this function is called when verifier exploring different branches of
11534  * execution popped from the state stack. If it sees an old state that has
11535  * more strict register state and more strict stack state then this execution
11536  * branch doesn't need to be explored further, since verifier already
11537  * concluded that more strict state leads to valid finish.
11538  *
11539  * Therefore two states are equivalent if register state is more conservative
11540  * and explored stack state is more conservative than the current one.
11541  * Example:
11542  *       explored                   current
11543  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
11544  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
11545  *
11546  * In other words if current stack state (one being explored) has more
11547  * valid slots than old one that already passed validation, it means
11548  * the verifier can stop exploring and conclude that current state is valid too
11549  *
11550  * Similarly with registers. If explored state has register type as invalid
11551  * whereas register type in current state is meaningful, it means that
11552  * the current state will reach 'bpf_exit' instruction safely
11553  */
11554 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
11555 			      struct bpf_func_state *cur)
11556 {
11557 	int i;
11558 
11559 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
11560 	for (i = 0; i < MAX_BPF_REG; i++)
11561 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
11562 			     env->idmap_scratch))
11563 			return false;
11564 
11565 	if (!stacksafe(env, old, cur, env->idmap_scratch))
11566 		return false;
11567 
11568 	if (!refsafe(old, cur))
11569 		return false;
11570 
11571 	return true;
11572 }
11573 
11574 static bool states_equal(struct bpf_verifier_env *env,
11575 			 struct bpf_verifier_state *old,
11576 			 struct bpf_verifier_state *cur)
11577 {
11578 	int i;
11579 
11580 	if (old->curframe != cur->curframe)
11581 		return false;
11582 
11583 	/* Verification state from speculative execution simulation
11584 	 * must never prune a non-speculative execution one.
11585 	 */
11586 	if (old->speculative && !cur->speculative)
11587 		return false;
11588 
11589 	if (old->active_spin_lock != cur->active_spin_lock)
11590 		return false;
11591 
11592 	/* for states to be equal callsites have to be the same
11593 	 * and all frame states need to be equivalent
11594 	 */
11595 	for (i = 0; i <= old->curframe; i++) {
11596 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
11597 			return false;
11598 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
11599 			return false;
11600 	}
11601 	return true;
11602 }
11603 
11604 /* Return 0 if no propagation happened. Return negative error code if error
11605  * happened. Otherwise, return the propagated bit.
11606  */
11607 static int propagate_liveness_reg(struct bpf_verifier_env *env,
11608 				  struct bpf_reg_state *reg,
11609 				  struct bpf_reg_state *parent_reg)
11610 {
11611 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
11612 	u8 flag = reg->live & REG_LIVE_READ;
11613 	int err;
11614 
11615 	/* When comes here, read flags of PARENT_REG or REG could be any of
11616 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
11617 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
11618 	 */
11619 	if (parent_flag == REG_LIVE_READ64 ||
11620 	    /* Or if there is no read flag from REG. */
11621 	    !flag ||
11622 	    /* Or if the read flag from REG is the same as PARENT_REG. */
11623 	    parent_flag == flag)
11624 		return 0;
11625 
11626 	err = mark_reg_read(env, reg, parent_reg, flag);
11627 	if (err)
11628 		return err;
11629 
11630 	return flag;
11631 }
11632 
11633 /* A write screens off any subsequent reads; but write marks come from the
11634  * straight-line code between a state and its parent.  When we arrive at an
11635  * equivalent state (jump target or such) we didn't arrive by the straight-line
11636  * code, so read marks in the state must propagate to the parent regardless
11637  * of the state's write marks. That's what 'parent == state->parent' comparison
11638  * in mark_reg_read() is for.
11639  */
11640 static int propagate_liveness(struct bpf_verifier_env *env,
11641 			      const struct bpf_verifier_state *vstate,
11642 			      struct bpf_verifier_state *vparent)
11643 {
11644 	struct bpf_reg_state *state_reg, *parent_reg;
11645 	struct bpf_func_state *state, *parent;
11646 	int i, frame, err = 0;
11647 
11648 	if (vparent->curframe != vstate->curframe) {
11649 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
11650 		     vparent->curframe, vstate->curframe);
11651 		return -EFAULT;
11652 	}
11653 	/* Propagate read liveness of registers... */
11654 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
11655 	for (frame = 0; frame <= vstate->curframe; frame++) {
11656 		parent = vparent->frame[frame];
11657 		state = vstate->frame[frame];
11658 		parent_reg = parent->regs;
11659 		state_reg = state->regs;
11660 		/* We don't need to worry about FP liveness, it's read-only */
11661 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
11662 			err = propagate_liveness_reg(env, &state_reg[i],
11663 						     &parent_reg[i]);
11664 			if (err < 0)
11665 				return err;
11666 			if (err == REG_LIVE_READ64)
11667 				mark_insn_zext(env, &parent_reg[i]);
11668 		}
11669 
11670 		/* Propagate stack slots. */
11671 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
11672 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
11673 			parent_reg = &parent->stack[i].spilled_ptr;
11674 			state_reg = &state->stack[i].spilled_ptr;
11675 			err = propagate_liveness_reg(env, state_reg,
11676 						     parent_reg);
11677 			if (err < 0)
11678 				return err;
11679 		}
11680 	}
11681 	return 0;
11682 }
11683 
11684 /* find precise scalars in the previous equivalent state and
11685  * propagate them into the current state
11686  */
11687 static int propagate_precision(struct bpf_verifier_env *env,
11688 			       const struct bpf_verifier_state *old)
11689 {
11690 	struct bpf_reg_state *state_reg;
11691 	struct bpf_func_state *state;
11692 	int i, err = 0;
11693 
11694 	state = old->frame[old->curframe];
11695 	state_reg = state->regs;
11696 	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
11697 		if (state_reg->type != SCALAR_VALUE ||
11698 		    !state_reg->precise)
11699 			continue;
11700 		if (env->log.level & BPF_LOG_LEVEL2)
11701 			verbose(env, "propagating r%d\n", i);
11702 		err = mark_chain_precision(env, i);
11703 		if (err < 0)
11704 			return err;
11705 	}
11706 
11707 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
11708 		if (!is_spilled_reg(&state->stack[i]))
11709 			continue;
11710 		state_reg = &state->stack[i].spilled_ptr;
11711 		if (state_reg->type != SCALAR_VALUE ||
11712 		    !state_reg->precise)
11713 			continue;
11714 		if (env->log.level & BPF_LOG_LEVEL2)
11715 			verbose(env, "propagating fp%d\n",
11716 				(-i - 1) * BPF_REG_SIZE);
11717 		err = mark_chain_precision_stack(env, i);
11718 		if (err < 0)
11719 			return err;
11720 	}
11721 	return 0;
11722 }
11723 
11724 static bool states_maybe_looping(struct bpf_verifier_state *old,
11725 				 struct bpf_verifier_state *cur)
11726 {
11727 	struct bpf_func_state *fold, *fcur;
11728 	int i, fr = cur->curframe;
11729 
11730 	if (old->curframe != fr)
11731 		return false;
11732 
11733 	fold = old->frame[fr];
11734 	fcur = cur->frame[fr];
11735 	for (i = 0; i < MAX_BPF_REG; i++)
11736 		if (memcmp(&fold->regs[i], &fcur->regs[i],
11737 			   offsetof(struct bpf_reg_state, parent)))
11738 			return false;
11739 	return true;
11740 }
11741 
11742 
11743 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
11744 {
11745 	struct bpf_verifier_state_list *new_sl;
11746 	struct bpf_verifier_state_list *sl, **pprev;
11747 	struct bpf_verifier_state *cur = env->cur_state, *new;
11748 	int i, j, err, states_cnt = 0;
11749 	bool add_new_state = env->test_state_freq ? true : false;
11750 
11751 	cur->last_insn_idx = env->prev_insn_idx;
11752 	if (!env->insn_aux_data[insn_idx].prune_point)
11753 		/* this 'insn_idx' instruction wasn't marked, so we will not
11754 		 * be doing state search here
11755 		 */
11756 		return 0;
11757 
11758 	/* bpf progs typically have pruning point every 4 instructions
11759 	 * http://vger.kernel.org/bpfconf2019.html#session-1
11760 	 * Do not add new state for future pruning if the verifier hasn't seen
11761 	 * at least 2 jumps and at least 8 instructions.
11762 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
11763 	 * In tests that amounts to up to 50% reduction into total verifier
11764 	 * memory consumption and 20% verifier time speedup.
11765 	 */
11766 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
11767 	    env->insn_processed - env->prev_insn_processed >= 8)
11768 		add_new_state = true;
11769 
11770 	pprev = explored_state(env, insn_idx);
11771 	sl = *pprev;
11772 
11773 	clean_live_states(env, insn_idx, cur);
11774 
11775 	while (sl) {
11776 		states_cnt++;
11777 		if (sl->state.insn_idx != insn_idx)
11778 			goto next;
11779 
11780 		if (sl->state.branches) {
11781 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
11782 
11783 			if (frame->in_async_callback_fn &&
11784 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
11785 				/* Different async_entry_cnt means that the verifier is
11786 				 * processing another entry into async callback.
11787 				 * Seeing the same state is not an indication of infinite
11788 				 * loop or infinite recursion.
11789 				 * But finding the same state doesn't mean that it's safe
11790 				 * to stop processing the current state. The previous state
11791 				 * hasn't yet reached bpf_exit, since state.branches > 0.
11792 				 * Checking in_async_callback_fn alone is not enough either.
11793 				 * Since the verifier still needs to catch infinite loops
11794 				 * inside async callbacks.
11795 				 */
11796 			} else if (states_maybe_looping(&sl->state, cur) &&
11797 				   states_equal(env, &sl->state, cur)) {
11798 				verbose_linfo(env, insn_idx, "; ");
11799 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
11800 				return -EINVAL;
11801 			}
11802 			/* if the verifier is processing a loop, avoid adding new state
11803 			 * too often, since different loop iterations have distinct
11804 			 * states and may not help future pruning.
11805 			 * This threshold shouldn't be too low to make sure that
11806 			 * a loop with large bound will be rejected quickly.
11807 			 * The most abusive loop will be:
11808 			 * r1 += 1
11809 			 * if r1 < 1000000 goto pc-2
11810 			 * 1M insn_procssed limit / 100 == 10k peak states.
11811 			 * This threshold shouldn't be too high either, since states
11812 			 * at the end of the loop are likely to be useful in pruning.
11813 			 */
11814 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
11815 			    env->insn_processed - env->prev_insn_processed < 100)
11816 				add_new_state = false;
11817 			goto miss;
11818 		}
11819 		if (states_equal(env, &sl->state, cur)) {
11820 			sl->hit_cnt++;
11821 			/* reached equivalent register/stack state,
11822 			 * prune the search.
11823 			 * Registers read by the continuation are read by us.
11824 			 * If we have any write marks in env->cur_state, they
11825 			 * will prevent corresponding reads in the continuation
11826 			 * from reaching our parent (an explored_state).  Our
11827 			 * own state will get the read marks recorded, but
11828 			 * they'll be immediately forgotten as we're pruning
11829 			 * this state and will pop a new one.
11830 			 */
11831 			err = propagate_liveness(env, &sl->state, cur);
11832 
11833 			/* if previous state reached the exit with precision and
11834 			 * current state is equivalent to it (except precsion marks)
11835 			 * the precision needs to be propagated back in
11836 			 * the current state.
11837 			 */
11838 			err = err ? : push_jmp_history(env, cur);
11839 			err = err ? : propagate_precision(env, &sl->state);
11840 			if (err)
11841 				return err;
11842 			return 1;
11843 		}
11844 miss:
11845 		/* when new state is not going to be added do not increase miss count.
11846 		 * Otherwise several loop iterations will remove the state
11847 		 * recorded earlier. The goal of these heuristics is to have
11848 		 * states from some iterations of the loop (some in the beginning
11849 		 * and some at the end) to help pruning.
11850 		 */
11851 		if (add_new_state)
11852 			sl->miss_cnt++;
11853 		/* heuristic to determine whether this state is beneficial
11854 		 * to keep checking from state equivalence point of view.
11855 		 * Higher numbers increase max_states_per_insn and verification time,
11856 		 * but do not meaningfully decrease insn_processed.
11857 		 */
11858 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
11859 			/* the state is unlikely to be useful. Remove it to
11860 			 * speed up verification
11861 			 */
11862 			*pprev = sl->next;
11863 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
11864 				u32 br = sl->state.branches;
11865 
11866 				WARN_ONCE(br,
11867 					  "BUG live_done but branches_to_explore %d\n",
11868 					  br);
11869 				free_verifier_state(&sl->state, false);
11870 				kfree(sl);
11871 				env->peak_states--;
11872 			} else {
11873 				/* cannot free this state, since parentage chain may
11874 				 * walk it later. Add it for free_list instead to
11875 				 * be freed at the end of verification
11876 				 */
11877 				sl->next = env->free_list;
11878 				env->free_list = sl;
11879 			}
11880 			sl = *pprev;
11881 			continue;
11882 		}
11883 next:
11884 		pprev = &sl->next;
11885 		sl = *pprev;
11886 	}
11887 
11888 	if (env->max_states_per_insn < states_cnt)
11889 		env->max_states_per_insn = states_cnt;
11890 
11891 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
11892 		return push_jmp_history(env, cur);
11893 
11894 	if (!add_new_state)
11895 		return push_jmp_history(env, cur);
11896 
11897 	/* There were no equivalent states, remember the current one.
11898 	 * Technically the current state is not proven to be safe yet,
11899 	 * but it will either reach outer most bpf_exit (which means it's safe)
11900 	 * or it will be rejected. When there are no loops the verifier won't be
11901 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
11902 	 * again on the way to bpf_exit.
11903 	 * When looping the sl->state.branches will be > 0 and this state
11904 	 * will not be considered for equivalence until branches == 0.
11905 	 */
11906 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
11907 	if (!new_sl)
11908 		return -ENOMEM;
11909 	env->total_states++;
11910 	env->peak_states++;
11911 	env->prev_jmps_processed = env->jmps_processed;
11912 	env->prev_insn_processed = env->insn_processed;
11913 
11914 	/* add new state to the head of linked list */
11915 	new = &new_sl->state;
11916 	err = copy_verifier_state(new, cur);
11917 	if (err) {
11918 		free_verifier_state(new, false);
11919 		kfree(new_sl);
11920 		return err;
11921 	}
11922 	new->insn_idx = insn_idx;
11923 	WARN_ONCE(new->branches != 1,
11924 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
11925 
11926 	cur->parent = new;
11927 	cur->first_insn_idx = insn_idx;
11928 	clear_jmp_history(cur);
11929 	new_sl->next = *explored_state(env, insn_idx);
11930 	*explored_state(env, insn_idx) = new_sl;
11931 	/* connect new state to parentage chain. Current frame needs all
11932 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
11933 	 * to the stack implicitly by JITs) so in callers' frames connect just
11934 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
11935 	 * the state of the call instruction (with WRITTEN set), and r0 comes
11936 	 * from callee with its full parentage chain, anyway.
11937 	 */
11938 	/* clear write marks in current state: the writes we did are not writes
11939 	 * our child did, so they don't screen off its reads from us.
11940 	 * (There are no read marks in current state, because reads always mark
11941 	 * their parent and current state never has children yet.  Only
11942 	 * explored_states can get read marks.)
11943 	 */
11944 	for (j = 0; j <= cur->curframe; j++) {
11945 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
11946 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
11947 		for (i = 0; i < BPF_REG_FP; i++)
11948 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
11949 	}
11950 
11951 	/* all stack frames are accessible from callee, clear them all */
11952 	for (j = 0; j <= cur->curframe; j++) {
11953 		struct bpf_func_state *frame = cur->frame[j];
11954 		struct bpf_func_state *newframe = new->frame[j];
11955 
11956 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
11957 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
11958 			frame->stack[i].spilled_ptr.parent =
11959 						&newframe->stack[i].spilled_ptr;
11960 		}
11961 	}
11962 	return 0;
11963 }
11964 
11965 /* Return true if it's OK to have the same insn return a different type. */
11966 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
11967 {
11968 	switch (base_type(type)) {
11969 	case PTR_TO_CTX:
11970 	case PTR_TO_SOCKET:
11971 	case PTR_TO_SOCK_COMMON:
11972 	case PTR_TO_TCP_SOCK:
11973 	case PTR_TO_XDP_SOCK:
11974 	case PTR_TO_BTF_ID:
11975 		return false;
11976 	default:
11977 		return true;
11978 	}
11979 }
11980 
11981 /* If an instruction was previously used with particular pointer types, then we
11982  * need to be careful to avoid cases such as the below, where it may be ok
11983  * for one branch accessing the pointer, but not ok for the other branch:
11984  *
11985  * R1 = sock_ptr
11986  * goto X;
11987  * ...
11988  * R1 = some_other_valid_ptr;
11989  * goto X;
11990  * ...
11991  * R2 = *(u32 *)(R1 + 0);
11992  */
11993 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
11994 {
11995 	return src != prev && (!reg_type_mismatch_ok(src) ||
11996 			       !reg_type_mismatch_ok(prev));
11997 }
11998 
11999 static int do_check(struct bpf_verifier_env *env)
12000 {
12001 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
12002 	struct bpf_verifier_state *state = env->cur_state;
12003 	struct bpf_insn *insns = env->prog->insnsi;
12004 	struct bpf_reg_state *regs;
12005 	int insn_cnt = env->prog->len;
12006 	bool do_print_state = false;
12007 	int prev_insn_idx = -1;
12008 
12009 	for (;;) {
12010 		struct bpf_insn *insn;
12011 		u8 class;
12012 		int err;
12013 
12014 		env->prev_insn_idx = prev_insn_idx;
12015 		if (env->insn_idx >= insn_cnt) {
12016 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
12017 				env->insn_idx, insn_cnt);
12018 			return -EFAULT;
12019 		}
12020 
12021 		insn = &insns[env->insn_idx];
12022 		class = BPF_CLASS(insn->code);
12023 
12024 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
12025 			verbose(env,
12026 				"BPF program is too large. Processed %d insn\n",
12027 				env->insn_processed);
12028 			return -E2BIG;
12029 		}
12030 
12031 		err = is_state_visited(env, env->insn_idx);
12032 		if (err < 0)
12033 			return err;
12034 		if (err == 1) {
12035 			/* found equivalent state, can prune the search */
12036 			if (env->log.level & BPF_LOG_LEVEL) {
12037 				if (do_print_state)
12038 					verbose(env, "\nfrom %d to %d%s: safe\n",
12039 						env->prev_insn_idx, env->insn_idx,
12040 						env->cur_state->speculative ?
12041 						" (speculative execution)" : "");
12042 				else
12043 					verbose(env, "%d: safe\n", env->insn_idx);
12044 			}
12045 			goto process_bpf_exit;
12046 		}
12047 
12048 		if (signal_pending(current))
12049 			return -EAGAIN;
12050 
12051 		if (need_resched())
12052 			cond_resched();
12053 
12054 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
12055 			verbose(env, "\nfrom %d to %d%s:",
12056 				env->prev_insn_idx, env->insn_idx,
12057 				env->cur_state->speculative ?
12058 				" (speculative execution)" : "");
12059 			print_verifier_state(env, state->frame[state->curframe], true);
12060 			do_print_state = false;
12061 		}
12062 
12063 		if (env->log.level & BPF_LOG_LEVEL) {
12064 			const struct bpf_insn_cbs cbs = {
12065 				.cb_call	= disasm_kfunc_name,
12066 				.cb_print	= verbose,
12067 				.private_data	= env,
12068 			};
12069 
12070 			if (verifier_state_scratched(env))
12071 				print_insn_state(env, state->frame[state->curframe]);
12072 
12073 			verbose_linfo(env, env->insn_idx, "; ");
12074 			env->prev_log_len = env->log.len_used;
12075 			verbose(env, "%d: ", env->insn_idx);
12076 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
12077 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
12078 			env->prev_log_len = env->log.len_used;
12079 		}
12080 
12081 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
12082 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
12083 							   env->prev_insn_idx);
12084 			if (err)
12085 				return err;
12086 		}
12087 
12088 		regs = cur_regs(env);
12089 		sanitize_mark_insn_seen(env);
12090 		prev_insn_idx = env->insn_idx;
12091 
12092 		if (class == BPF_ALU || class == BPF_ALU64) {
12093 			err = check_alu_op(env, insn);
12094 			if (err)
12095 				return err;
12096 
12097 		} else if (class == BPF_LDX) {
12098 			enum bpf_reg_type *prev_src_type, src_reg_type;
12099 
12100 			/* check for reserved fields is already done */
12101 
12102 			/* check src operand */
12103 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12104 			if (err)
12105 				return err;
12106 
12107 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
12108 			if (err)
12109 				return err;
12110 
12111 			src_reg_type = regs[insn->src_reg].type;
12112 
12113 			/* check that memory (src_reg + off) is readable,
12114 			 * the state of dst_reg will be updated by this func
12115 			 */
12116 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
12117 					       insn->off, BPF_SIZE(insn->code),
12118 					       BPF_READ, insn->dst_reg, false);
12119 			if (err)
12120 				return err;
12121 
12122 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
12123 
12124 			if (*prev_src_type == NOT_INIT) {
12125 				/* saw a valid insn
12126 				 * dst_reg = *(u32 *)(src_reg + off)
12127 				 * save type to validate intersecting paths
12128 				 */
12129 				*prev_src_type = src_reg_type;
12130 
12131 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
12132 				/* ABuser program is trying to use the same insn
12133 				 * dst_reg = *(u32*) (src_reg + off)
12134 				 * with different pointer types:
12135 				 * src_reg == ctx in one branch and
12136 				 * src_reg == stack|map in some other branch.
12137 				 * Reject it.
12138 				 */
12139 				verbose(env, "same insn cannot be used with different pointers\n");
12140 				return -EINVAL;
12141 			}
12142 
12143 		} else if (class == BPF_STX) {
12144 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
12145 
12146 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
12147 				err = check_atomic(env, env->insn_idx, insn);
12148 				if (err)
12149 					return err;
12150 				env->insn_idx++;
12151 				continue;
12152 			}
12153 
12154 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
12155 				verbose(env, "BPF_STX uses reserved fields\n");
12156 				return -EINVAL;
12157 			}
12158 
12159 			/* check src1 operand */
12160 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12161 			if (err)
12162 				return err;
12163 			/* check src2 operand */
12164 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12165 			if (err)
12166 				return err;
12167 
12168 			dst_reg_type = regs[insn->dst_reg].type;
12169 
12170 			/* check that memory (dst_reg + off) is writeable */
12171 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12172 					       insn->off, BPF_SIZE(insn->code),
12173 					       BPF_WRITE, insn->src_reg, false);
12174 			if (err)
12175 				return err;
12176 
12177 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
12178 
12179 			if (*prev_dst_type == NOT_INIT) {
12180 				*prev_dst_type = dst_reg_type;
12181 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
12182 				verbose(env, "same insn cannot be used with different pointers\n");
12183 				return -EINVAL;
12184 			}
12185 
12186 		} else if (class == BPF_ST) {
12187 			if (BPF_MODE(insn->code) != BPF_MEM ||
12188 			    insn->src_reg != BPF_REG_0) {
12189 				verbose(env, "BPF_ST uses reserved fields\n");
12190 				return -EINVAL;
12191 			}
12192 			/* check src operand */
12193 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12194 			if (err)
12195 				return err;
12196 
12197 			if (is_ctx_reg(env, insn->dst_reg)) {
12198 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
12199 					insn->dst_reg,
12200 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
12201 				return -EACCES;
12202 			}
12203 
12204 			/* check that memory (dst_reg + off) is writeable */
12205 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12206 					       insn->off, BPF_SIZE(insn->code),
12207 					       BPF_WRITE, -1, false);
12208 			if (err)
12209 				return err;
12210 
12211 		} else if (class == BPF_JMP || class == BPF_JMP32) {
12212 			u8 opcode = BPF_OP(insn->code);
12213 
12214 			env->jmps_processed++;
12215 			if (opcode == BPF_CALL) {
12216 				if (BPF_SRC(insn->code) != BPF_K ||
12217 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
12218 				     && insn->off != 0) ||
12219 				    (insn->src_reg != BPF_REG_0 &&
12220 				     insn->src_reg != BPF_PSEUDO_CALL &&
12221 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
12222 				    insn->dst_reg != BPF_REG_0 ||
12223 				    class == BPF_JMP32) {
12224 					verbose(env, "BPF_CALL uses reserved fields\n");
12225 					return -EINVAL;
12226 				}
12227 
12228 				if (env->cur_state->active_spin_lock &&
12229 				    (insn->src_reg == BPF_PSEUDO_CALL ||
12230 				     insn->imm != BPF_FUNC_spin_unlock)) {
12231 					verbose(env, "function calls are not allowed while holding a lock\n");
12232 					return -EINVAL;
12233 				}
12234 				if (insn->src_reg == BPF_PSEUDO_CALL)
12235 					err = check_func_call(env, insn, &env->insn_idx);
12236 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
12237 					err = check_kfunc_call(env, insn, &env->insn_idx);
12238 				else
12239 					err = check_helper_call(env, insn, &env->insn_idx);
12240 				if (err)
12241 					return err;
12242 			} else if (opcode == BPF_JA) {
12243 				if (BPF_SRC(insn->code) != BPF_K ||
12244 				    insn->imm != 0 ||
12245 				    insn->src_reg != BPF_REG_0 ||
12246 				    insn->dst_reg != BPF_REG_0 ||
12247 				    class == BPF_JMP32) {
12248 					verbose(env, "BPF_JA uses reserved fields\n");
12249 					return -EINVAL;
12250 				}
12251 
12252 				env->insn_idx += insn->off + 1;
12253 				continue;
12254 
12255 			} else if (opcode == BPF_EXIT) {
12256 				if (BPF_SRC(insn->code) != BPF_K ||
12257 				    insn->imm != 0 ||
12258 				    insn->src_reg != BPF_REG_0 ||
12259 				    insn->dst_reg != BPF_REG_0 ||
12260 				    class == BPF_JMP32) {
12261 					verbose(env, "BPF_EXIT uses reserved fields\n");
12262 					return -EINVAL;
12263 				}
12264 
12265 				if (env->cur_state->active_spin_lock) {
12266 					verbose(env, "bpf_spin_unlock is missing\n");
12267 					return -EINVAL;
12268 				}
12269 
12270 				if (state->curframe) {
12271 					/* exit from nested function */
12272 					err = prepare_func_exit(env, &env->insn_idx);
12273 					if (err)
12274 						return err;
12275 					do_print_state = true;
12276 					continue;
12277 				}
12278 
12279 				err = check_reference_leak(env);
12280 				if (err)
12281 					return err;
12282 
12283 				err = check_return_code(env);
12284 				if (err)
12285 					return err;
12286 process_bpf_exit:
12287 				mark_verifier_state_scratched(env);
12288 				update_branch_counts(env, env->cur_state);
12289 				err = pop_stack(env, &prev_insn_idx,
12290 						&env->insn_idx, pop_log);
12291 				if (err < 0) {
12292 					if (err != -ENOENT)
12293 						return err;
12294 					break;
12295 				} else {
12296 					do_print_state = true;
12297 					continue;
12298 				}
12299 			} else {
12300 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
12301 				if (err)
12302 					return err;
12303 			}
12304 		} else if (class == BPF_LD) {
12305 			u8 mode = BPF_MODE(insn->code);
12306 
12307 			if (mode == BPF_ABS || mode == BPF_IND) {
12308 				err = check_ld_abs(env, insn);
12309 				if (err)
12310 					return err;
12311 
12312 			} else if (mode == BPF_IMM) {
12313 				err = check_ld_imm(env, insn);
12314 				if (err)
12315 					return err;
12316 
12317 				env->insn_idx++;
12318 				sanitize_mark_insn_seen(env);
12319 			} else {
12320 				verbose(env, "invalid BPF_LD mode\n");
12321 				return -EINVAL;
12322 			}
12323 		} else {
12324 			verbose(env, "unknown insn class %d\n", class);
12325 			return -EINVAL;
12326 		}
12327 
12328 		env->insn_idx++;
12329 	}
12330 
12331 	return 0;
12332 }
12333 
12334 static int find_btf_percpu_datasec(struct btf *btf)
12335 {
12336 	const struct btf_type *t;
12337 	const char *tname;
12338 	int i, n;
12339 
12340 	/*
12341 	 * Both vmlinux and module each have their own ".data..percpu"
12342 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
12343 	 * types to look at only module's own BTF types.
12344 	 */
12345 	n = btf_nr_types(btf);
12346 	if (btf_is_module(btf))
12347 		i = btf_nr_types(btf_vmlinux);
12348 	else
12349 		i = 1;
12350 
12351 	for(; i < n; i++) {
12352 		t = btf_type_by_id(btf, i);
12353 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
12354 			continue;
12355 
12356 		tname = btf_name_by_offset(btf, t->name_off);
12357 		if (!strcmp(tname, ".data..percpu"))
12358 			return i;
12359 	}
12360 
12361 	return -ENOENT;
12362 }
12363 
12364 /* replace pseudo btf_id with kernel symbol address */
12365 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
12366 			       struct bpf_insn *insn,
12367 			       struct bpf_insn_aux_data *aux)
12368 {
12369 	const struct btf_var_secinfo *vsi;
12370 	const struct btf_type *datasec;
12371 	struct btf_mod_pair *btf_mod;
12372 	const struct btf_type *t;
12373 	const char *sym_name;
12374 	bool percpu = false;
12375 	u32 type, id = insn->imm;
12376 	struct btf *btf;
12377 	s32 datasec_id;
12378 	u64 addr;
12379 	int i, btf_fd, err;
12380 
12381 	btf_fd = insn[1].imm;
12382 	if (btf_fd) {
12383 		btf = btf_get_by_fd(btf_fd);
12384 		if (IS_ERR(btf)) {
12385 			verbose(env, "invalid module BTF object FD specified.\n");
12386 			return -EINVAL;
12387 		}
12388 	} else {
12389 		if (!btf_vmlinux) {
12390 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
12391 			return -EINVAL;
12392 		}
12393 		btf = btf_vmlinux;
12394 		btf_get(btf);
12395 	}
12396 
12397 	t = btf_type_by_id(btf, id);
12398 	if (!t) {
12399 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
12400 		err = -ENOENT;
12401 		goto err_put;
12402 	}
12403 
12404 	if (!btf_type_is_var(t)) {
12405 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
12406 		err = -EINVAL;
12407 		goto err_put;
12408 	}
12409 
12410 	sym_name = btf_name_by_offset(btf, t->name_off);
12411 	addr = kallsyms_lookup_name(sym_name);
12412 	if (!addr) {
12413 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
12414 			sym_name);
12415 		err = -ENOENT;
12416 		goto err_put;
12417 	}
12418 
12419 	datasec_id = find_btf_percpu_datasec(btf);
12420 	if (datasec_id > 0) {
12421 		datasec = btf_type_by_id(btf, datasec_id);
12422 		for_each_vsi(i, datasec, vsi) {
12423 			if (vsi->type == id) {
12424 				percpu = true;
12425 				break;
12426 			}
12427 		}
12428 	}
12429 
12430 	insn[0].imm = (u32)addr;
12431 	insn[1].imm = addr >> 32;
12432 
12433 	type = t->type;
12434 	t = btf_type_skip_modifiers(btf, type, NULL);
12435 	if (percpu) {
12436 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
12437 		aux->btf_var.btf = btf;
12438 		aux->btf_var.btf_id = type;
12439 	} else if (!btf_type_is_struct(t)) {
12440 		const struct btf_type *ret;
12441 		const char *tname;
12442 		u32 tsize;
12443 
12444 		/* resolve the type size of ksym. */
12445 		ret = btf_resolve_size(btf, t, &tsize);
12446 		if (IS_ERR(ret)) {
12447 			tname = btf_name_by_offset(btf, t->name_off);
12448 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
12449 				tname, PTR_ERR(ret));
12450 			err = -EINVAL;
12451 			goto err_put;
12452 		}
12453 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
12454 		aux->btf_var.mem_size = tsize;
12455 	} else {
12456 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
12457 		aux->btf_var.btf = btf;
12458 		aux->btf_var.btf_id = type;
12459 	}
12460 
12461 	/* check whether we recorded this BTF (and maybe module) already */
12462 	for (i = 0; i < env->used_btf_cnt; i++) {
12463 		if (env->used_btfs[i].btf == btf) {
12464 			btf_put(btf);
12465 			return 0;
12466 		}
12467 	}
12468 
12469 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
12470 		err = -E2BIG;
12471 		goto err_put;
12472 	}
12473 
12474 	btf_mod = &env->used_btfs[env->used_btf_cnt];
12475 	btf_mod->btf = btf;
12476 	btf_mod->module = NULL;
12477 
12478 	/* if we reference variables from kernel module, bump its refcount */
12479 	if (btf_is_module(btf)) {
12480 		btf_mod->module = btf_try_get_module(btf);
12481 		if (!btf_mod->module) {
12482 			err = -ENXIO;
12483 			goto err_put;
12484 		}
12485 	}
12486 
12487 	env->used_btf_cnt++;
12488 
12489 	return 0;
12490 err_put:
12491 	btf_put(btf);
12492 	return err;
12493 }
12494 
12495 static int check_map_prealloc(struct bpf_map *map)
12496 {
12497 	return (map->map_type != BPF_MAP_TYPE_HASH &&
12498 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
12499 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
12500 		!(map->map_flags & BPF_F_NO_PREALLOC);
12501 }
12502 
12503 static bool is_tracing_prog_type(enum bpf_prog_type type)
12504 {
12505 	switch (type) {
12506 	case BPF_PROG_TYPE_KPROBE:
12507 	case BPF_PROG_TYPE_TRACEPOINT:
12508 	case BPF_PROG_TYPE_PERF_EVENT:
12509 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
12510 		return true;
12511 	default:
12512 		return false;
12513 	}
12514 }
12515 
12516 static bool is_preallocated_map(struct bpf_map *map)
12517 {
12518 	if (!check_map_prealloc(map))
12519 		return false;
12520 	if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
12521 		return false;
12522 	return true;
12523 }
12524 
12525 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
12526 					struct bpf_map *map,
12527 					struct bpf_prog *prog)
12528 
12529 {
12530 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
12531 	/*
12532 	 * Validate that trace type programs use preallocated hash maps.
12533 	 *
12534 	 * For programs attached to PERF events this is mandatory as the
12535 	 * perf NMI can hit any arbitrary code sequence.
12536 	 *
12537 	 * All other trace types using preallocated hash maps are unsafe as
12538 	 * well because tracepoint or kprobes can be inside locked regions
12539 	 * of the memory allocator or at a place where a recursion into the
12540 	 * memory allocator would see inconsistent state.
12541 	 *
12542 	 * On RT enabled kernels run-time allocation of all trace type
12543 	 * programs is strictly prohibited due to lock type constraints. On
12544 	 * !RT kernels it is allowed for backwards compatibility reasons for
12545 	 * now, but warnings are emitted so developers are made aware of
12546 	 * the unsafety and can fix their programs before this is enforced.
12547 	 */
12548 	if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
12549 		if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
12550 			verbose(env, "perf_event programs can only use preallocated hash map\n");
12551 			return -EINVAL;
12552 		}
12553 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
12554 			verbose(env, "trace type programs can only use preallocated hash map\n");
12555 			return -EINVAL;
12556 		}
12557 		WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
12558 		verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
12559 	}
12560 
12561 	if (map_value_has_spin_lock(map)) {
12562 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
12563 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
12564 			return -EINVAL;
12565 		}
12566 
12567 		if (is_tracing_prog_type(prog_type)) {
12568 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
12569 			return -EINVAL;
12570 		}
12571 
12572 		if (prog->aux->sleepable) {
12573 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
12574 			return -EINVAL;
12575 		}
12576 	}
12577 
12578 	if (map_value_has_timer(map)) {
12579 		if (is_tracing_prog_type(prog_type)) {
12580 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
12581 			return -EINVAL;
12582 		}
12583 	}
12584 
12585 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
12586 	    !bpf_offload_prog_map_match(prog, map)) {
12587 		verbose(env, "offload device mismatch between prog and map\n");
12588 		return -EINVAL;
12589 	}
12590 
12591 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
12592 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
12593 		return -EINVAL;
12594 	}
12595 
12596 	if (prog->aux->sleepable)
12597 		switch (map->map_type) {
12598 		case BPF_MAP_TYPE_HASH:
12599 		case BPF_MAP_TYPE_LRU_HASH:
12600 		case BPF_MAP_TYPE_ARRAY:
12601 		case BPF_MAP_TYPE_PERCPU_HASH:
12602 		case BPF_MAP_TYPE_PERCPU_ARRAY:
12603 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
12604 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
12605 		case BPF_MAP_TYPE_HASH_OF_MAPS:
12606 			if (!is_preallocated_map(map)) {
12607 				verbose(env,
12608 					"Sleepable programs can only use preallocated maps\n");
12609 				return -EINVAL;
12610 			}
12611 			break;
12612 		case BPF_MAP_TYPE_RINGBUF:
12613 		case BPF_MAP_TYPE_INODE_STORAGE:
12614 		case BPF_MAP_TYPE_SK_STORAGE:
12615 		case BPF_MAP_TYPE_TASK_STORAGE:
12616 			break;
12617 		default:
12618 			verbose(env,
12619 				"Sleepable programs can only use array, hash, and ringbuf maps\n");
12620 			return -EINVAL;
12621 		}
12622 
12623 	return 0;
12624 }
12625 
12626 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
12627 {
12628 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
12629 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
12630 }
12631 
12632 /* find and rewrite pseudo imm in ld_imm64 instructions:
12633  *
12634  * 1. if it accesses map FD, replace it with actual map pointer.
12635  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
12636  *
12637  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
12638  */
12639 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
12640 {
12641 	struct bpf_insn *insn = env->prog->insnsi;
12642 	int insn_cnt = env->prog->len;
12643 	int i, j, err;
12644 
12645 	err = bpf_prog_calc_tag(env->prog);
12646 	if (err)
12647 		return err;
12648 
12649 	for (i = 0; i < insn_cnt; i++, insn++) {
12650 		if (BPF_CLASS(insn->code) == BPF_LDX &&
12651 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
12652 			verbose(env, "BPF_LDX uses reserved fields\n");
12653 			return -EINVAL;
12654 		}
12655 
12656 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
12657 			struct bpf_insn_aux_data *aux;
12658 			struct bpf_map *map;
12659 			struct fd f;
12660 			u64 addr;
12661 			u32 fd;
12662 
12663 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
12664 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
12665 			    insn[1].off != 0) {
12666 				verbose(env, "invalid bpf_ld_imm64 insn\n");
12667 				return -EINVAL;
12668 			}
12669 
12670 			if (insn[0].src_reg == 0)
12671 				/* valid generic load 64-bit imm */
12672 				goto next_insn;
12673 
12674 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
12675 				aux = &env->insn_aux_data[i];
12676 				err = check_pseudo_btf_id(env, insn, aux);
12677 				if (err)
12678 					return err;
12679 				goto next_insn;
12680 			}
12681 
12682 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
12683 				aux = &env->insn_aux_data[i];
12684 				aux->ptr_type = PTR_TO_FUNC;
12685 				goto next_insn;
12686 			}
12687 
12688 			/* In final convert_pseudo_ld_imm64() step, this is
12689 			 * converted into regular 64-bit imm load insn.
12690 			 */
12691 			switch (insn[0].src_reg) {
12692 			case BPF_PSEUDO_MAP_VALUE:
12693 			case BPF_PSEUDO_MAP_IDX_VALUE:
12694 				break;
12695 			case BPF_PSEUDO_MAP_FD:
12696 			case BPF_PSEUDO_MAP_IDX:
12697 				if (insn[1].imm == 0)
12698 					break;
12699 				fallthrough;
12700 			default:
12701 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
12702 				return -EINVAL;
12703 			}
12704 
12705 			switch (insn[0].src_reg) {
12706 			case BPF_PSEUDO_MAP_IDX_VALUE:
12707 			case BPF_PSEUDO_MAP_IDX:
12708 				if (bpfptr_is_null(env->fd_array)) {
12709 					verbose(env, "fd_idx without fd_array is invalid\n");
12710 					return -EPROTO;
12711 				}
12712 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
12713 							    insn[0].imm * sizeof(fd),
12714 							    sizeof(fd)))
12715 					return -EFAULT;
12716 				break;
12717 			default:
12718 				fd = insn[0].imm;
12719 				break;
12720 			}
12721 
12722 			f = fdget(fd);
12723 			map = __bpf_map_get(f);
12724 			if (IS_ERR(map)) {
12725 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
12726 					insn[0].imm);
12727 				return PTR_ERR(map);
12728 			}
12729 
12730 			err = check_map_prog_compatibility(env, map, env->prog);
12731 			if (err) {
12732 				fdput(f);
12733 				return err;
12734 			}
12735 
12736 			aux = &env->insn_aux_data[i];
12737 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
12738 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
12739 				addr = (unsigned long)map;
12740 			} else {
12741 				u32 off = insn[1].imm;
12742 
12743 				if (off >= BPF_MAX_VAR_OFF) {
12744 					verbose(env, "direct value offset of %u is not allowed\n", off);
12745 					fdput(f);
12746 					return -EINVAL;
12747 				}
12748 
12749 				if (!map->ops->map_direct_value_addr) {
12750 					verbose(env, "no direct value access support for this map type\n");
12751 					fdput(f);
12752 					return -EINVAL;
12753 				}
12754 
12755 				err = map->ops->map_direct_value_addr(map, &addr, off);
12756 				if (err) {
12757 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
12758 						map->value_size, off);
12759 					fdput(f);
12760 					return err;
12761 				}
12762 
12763 				aux->map_off = off;
12764 				addr += off;
12765 			}
12766 
12767 			insn[0].imm = (u32)addr;
12768 			insn[1].imm = addr >> 32;
12769 
12770 			/* check whether we recorded this map already */
12771 			for (j = 0; j < env->used_map_cnt; j++) {
12772 				if (env->used_maps[j] == map) {
12773 					aux->map_index = j;
12774 					fdput(f);
12775 					goto next_insn;
12776 				}
12777 			}
12778 
12779 			if (env->used_map_cnt >= MAX_USED_MAPS) {
12780 				fdput(f);
12781 				return -E2BIG;
12782 			}
12783 
12784 			/* hold the map. If the program is rejected by verifier,
12785 			 * the map will be released by release_maps() or it
12786 			 * will be used by the valid program until it's unloaded
12787 			 * and all maps are released in free_used_maps()
12788 			 */
12789 			bpf_map_inc(map);
12790 
12791 			aux->map_index = env->used_map_cnt;
12792 			env->used_maps[env->used_map_cnt++] = map;
12793 
12794 			if (bpf_map_is_cgroup_storage(map) &&
12795 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
12796 				verbose(env, "only one cgroup storage of each type is allowed\n");
12797 				fdput(f);
12798 				return -EBUSY;
12799 			}
12800 
12801 			fdput(f);
12802 next_insn:
12803 			insn++;
12804 			i++;
12805 			continue;
12806 		}
12807 
12808 		/* Basic sanity check before we invest more work here. */
12809 		if (!bpf_opcode_in_insntable(insn->code)) {
12810 			verbose(env, "unknown opcode %02x\n", insn->code);
12811 			return -EINVAL;
12812 		}
12813 	}
12814 
12815 	/* now all pseudo BPF_LD_IMM64 instructions load valid
12816 	 * 'struct bpf_map *' into a register instead of user map_fd.
12817 	 * These pointers will be used later by verifier to validate map access.
12818 	 */
12819 	return 0;
12820 }
12821 
12822 /* drop refcnt of maps used by the rejected program */
12823 static void release_maps(struct bpf_verifier_env *env)
12824 {
12825 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
12826 			     env->used_map_cnt);
12827 }
12828 
12829 /* drop refcnt of maps used by the rejected program */
12830 static void release_btfs(struct bpf_verifier_env *env)
12831 {
12832 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
12833 			     env->used_btf_cnt);
12834 }
12835 
12836 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
12837 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
12838 {
12839 	struct bpf_insn *insn = env->prog->insnsi;
12840 	int insn_cnt = env->prog->len;
12841 	int i;
12842 
12843 	for (i = 0; i < insn_cnt; i++, insn++) {
12844 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
12845 			continue;
12846 		if (insn->src_reg == BPF_PSEUDO_FUNC)
12847 			continue;
12848 		insn->src_reg = 0;
12849 	}
12850 }
12851 
12852 /* single env->prog->insni[off] instruction was replaced with the range
12853  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
12854  * [0, off) and [off, end) to new locations, so the patched range stays zero
12855  */
12856 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
12857 				 struct bpf_insn_aux_data *new_data,
12858 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
12859 {
12860 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
12861 	struct bpf_insn *insn = new_prog->insnsi;
12862 	u32 old_seen = old_data[off].seen;
12863 	u32 prog_len;
12864 	int i;
12865 
12866 	/* aux info at OFF always needs adjustment, no matter fast path
12867 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
12868 	 * original insn at old prog.
12869 	 */
12870 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
12871 
12872 	if (cnt == 1)
12873 		return;
12874 	prog_len = new_prog->len;
12875 
12876 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
12877 	memcpy(new_data + off + cnt - 1, old_data + off,
12878 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
12879 	for (i = off; i < off + cnt - 1; i++) {
12880 		/* Expand insni[off]'s seen count to the patched range. */
12881 		new_data[i].seen = old_seen;
12882 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
12883 	}
12884 	env->insn_aux_data = new_data;
12885 	vfree(old_data);
12886 }
12887 
12888 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
12889 {
12890 	int i;
12891 
12892 	if (len == 1)
12893 		return;
12894 	/* NOTE: fake 'exit' subprog should be updated as well. */
12895 	for (i = 0; i <= env->subprog_cnt; i++) {
12896 		if (env->subprog_info[i].start <= off)
12897 			continue;
12898 		env->subprog_info[i].start += len - 1;
12899 	}
12900 }
12901 
12902 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
12903 {
12904 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
12905 	int i, sz = prog->aux->size_poke_tab;
12906 	struct bpf_jit_poke_descriptor *desc;
12907 
12908 	for (i = 0; i < sz; i++) {
12909 		desc = &tab[i];
12910 		if (desc->insn_idx <= off)
12911 			continue;
12912 		desc->insn_idx += len - 1;
12913 	}
12914 }
12915 
12916 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
12917 					    const struct bpf_insn *patch, u32 len)
12918 {
12919 	struct bpf_prog *new_prog;
12920 	struct bpf_insn_aux_data *new_data = NULL;
12921 
12922 	if (len > 1) {
12923 		new_data = vzalloc(array_size(env->prog->len + len - 1,
12924 					      sizeof(struct bpf_insn_aux_data)));
12925 		if (!new_data)
12926 			return NULL;
12927 	}
12928 
12929 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
12930 	if (IS_ERR(new_prog)) {
12931 		if (PTR_ERR(new_prog) == -ERANGE)
12932 			verbose(env,
12933 				"insn %d cannot be patched due to 16-bit range\n",
12934 				env->insn_aux_data[off].orig_idx);
12935 		vfree(new_data);
12936 		return NULL;
12937 	}
12938 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
12939 	adjust_subprog_starts(env, off, len);
12940 	adjust_poke_descs(new_prog, off, len);
12941 	return new_prog;
12942 }
12943 
12944 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
12945 					      u32 off, u32 cnt)
12946 {
12947 	int i, j;
12948 
12949 	/* find first prog starting at or after off (first to remove) */
12950 	for (i = 0; i < env->subprog_cnt; i++)
12951 		if (env->subprog_info[i].start >= off)
12952 			break;
12953 	/* find first prog starting at or after off + cnt (first to stay) */
12954 	for (j = i; j < env->subprog_cnt; j++)
12955 		if (env->subprog_info[j].start >= off + cnt)
12956 			break;
12957 	/* if j doesn't start exactly at off + cnt, we are just removing
12958 	 * the front of previous prog
12959 	 */
12960 	if (env->subprog_info[j].start != off + cnt)
12961 		j--;
12962 
12963 	if (j > i) {
12964 		struct bpf_prog_aux *aux = env->prog->aux;
12965 		int move;
12966 
12967 		/* move fake 'exit' subprog as well */
12968 		move = env->subprog_cnt + 1 - j;
12969 
12970 		memmove(env->subprog_info + i,
12971 			env->subprog_info + j,
12972 			sizeof(*env->subprog_info) * move);
12973 		env->subprog_cnt -= j - i;
12974 
12975 		/* remove func_info */
12976 		if (aux->func_info) {
12977 			move = aux->func_info_cnt - j;
12978 
12979 			memmove(aux->func_info + i,
12980 				aux->func_info + j,
12981 				sizeof(*aux->func_info) * move);
12982 			aux->func_info_cnt -= j - i;
12983 			/* func_info->insn_off is set after all code rewrites,
12984 			 * in adjust_btf_func() - no need to adjust
12985 			 */
12986 		}
12987 	} else {
12988 		/* convert i from "first prog to remove" to "first to adjust" */
12989 		if (env->subprog_info[i].start == off)
12990 			i++;
12991 	}
12992 
12993 	/* update fake 'exit' subprog as well */
12994 	for (; i <= env->subprog_cnt; i++)
12995 		env->subprog_info[i].start -= cnt;
12996 
12997 	return 0;
12998 }
12999 
13000 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
13001 				      u32 cnt)
13002 {
13003 	struct bpf_prog *prog = env->prog;
13004 	u32 i, l_off, l_cnt, nr_linfo;
13005 	struct bpf_line_info *linfo;
13006 
13007 	nr_linfo = prog->aux->nr_linfo;
13008 	if (!nr_linfo)
13009 		return 0;
13010 
13011 	linfo = prog->aux->linfo;
13012 
13013 	/* find first line info to remove, count lines to be removed */
13014 	for (i = 0; i < nr_linfo; i++)
13015 		if (linfo[i].insn_off >= off)
13016 			break;
13017 
13018 	l_off = i;
13019 	l_cnt = 0;
13020 	for (; i < nr_linfo; i++)
13021 		if (linfo[i].insn_off < off + cnt)
13022 			l_cnt++;
13023 		else
13024 			break;
13025 
13026 	/* First live insn doesn't match first live linfo, it needs to "inherit"
13027 	 * last removed linfo.  prog is already modified, so prog->len == off
13028 	 * means no live instructions after (tail of the program was removed).
13029 	 */
13030 	if (prog->len != off && l_cnt &&
13031 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
13032 		l_cnt--;
13033 		linfo[--i].insn_off = off + cnt;
13034 	}
13035 
13036 	/* remove the line info which refer to the removed instructions */
13037 	if (l_cnt) {
13038 		memmove(linfo + l_off, linfo + i,
13039 			sizeof(*linfo) * (nr_linfo - i));
13040 
13041 		prog->aux->nr_linfo -= l_cnt;
13042 		nr_linfo = prog->aux->nr_linfo;
13043 	}
13044 
13045 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
13046 	for (i = l_off; i < nr_linfo; i++)
13047 		linfo[i].insn_off -= cnt;
13048 
13049 	/* fix up all subprogs (incl. 'exit') which start >= off */
13050 	for (i = 0; i <= env->subprog_cnt; i++)
13051 		if (env->subprog_info[i].linfo_idx > l_off) {
13052 			/* program may have started in the removed region but
13053 			 * may not be fully removed
13054 			 */
13055 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
13056 				env->subprog_info[i].linfo_idx -= l_cnt;
13057 			else
13058 				env->subprog_info[i].linfo_idx = l_off;
13059 		}
13060 
13061 	return 0;
13062 }
13063 
13064 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
13065 {
13066 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13067 	unsigned int orig_prog_len = env->prog->len;
13068 	int err;
13069 
13070 	if (bpf_prog_is_dev_bound(env->prog->aux))
13071 		bpf_prog_offload_remove_insns(env, off, cnt);
13072 
13073 	err = bpf_remove_insns(env->prog, off, cnt);
13074 	if (err)
13075 		return err;
13076 
13077 	err = adjust_subprog_starts_after_remove(env, off, cnt);
13078 	if (err)
13079 		return err;
13080 
13081 	err = bpf_adj_linfo_after_remove(env, off, cnt);
13082 	if (err)
13083 		return err;
13084 
13085 	memmove(aux_data + off,	aux_data + off + cnt,
13086 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
13087 
13088 	return 0;
13089 }
13090 
13091 /* The verifier does more data flow analysis than llvm and will not
13092  * explore branches that are dead at run time. Malicious programs can
13093  * have dead code too. Therefore replace all dead at-run-time code
13094  * with 'ja -1'.
13095  *
13096  * Just nops are not optimal, e.g. if they would sit at the end of the
13097  * program and through another bug we would manage to jump there, then
13098  * we'd execute beyond program memory otherwise. Returning exception
13099  * code also wouldn't work since we can have subprogs where the dead
13100  * code could be located.
13101  */
13102 static void sanitize_dead_code(struct bpf_verifier_env *env)
13103 {
13104 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13105 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
13106 	struct bpf_insn *insn = env->prog->insnsi;
13107 	const int insn_cnt = env->prog->len;
13108 	int i;
13109 
13110 	for (i = 0; i < insn_cnt; i++) {
13111 		if (aux_data[i].seen)
13112 			continue;
13113 		memcpy(insn + i, &trap, sizeof(trap));
13114 		aux_data[i].zext_dst = false;
13115 	}
13116 }
13117 
13118 static bool insn_is_cond_jump(u8 code)
13119 {
13120 	u8 op;
13121 
13122 	if (BPF_CLASS(code) == BPF_JMP32)
13123 		return true;
13124 
13125 	if (BPF_CLASS(code) != BPF_JMP)
13126 		return false;
13127 
13128 	op = BPF_OP(code);
13129 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
13130 }
13131 
13132 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
13133 {
13134 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13135 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13136 	struct bpf_insn *insn = env->prog->insnsi;
13137 	const int insn_cnt = env->prog->len;
13138 	int i;
13139 
13140 	for (i = 0; i < insn_cnt; i++, insn++) {
13141 		if (!insn_is_cond_jump(insn->code))
13142 			continue;
13143 
13144 		if (!aux_data[i + 1].seen)
13145 			ja.off = insn->off;
13146 		else if (!aux_data[i + 1 + insn->off].seen)
13147 			ja.off = 0;
13148 		else
13149 			continue;
13150 
13151 		if (bpf_prog_is_dev_bound(env->prog->aux))
13152 			bpf_prog_offload_replace_insn(env, i, &ja);
13153 
13154 		memcpy(insn, &ja, sizeof(ja));
13155 	}
13156 }
13157 
13158 static int opt_remove_dead_code(struct bpf_verifier_env *env)
13159 {
13160 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13161 	int insn_cnt = env->prog->len;
13162 	int i, err;
13163 
13164 	for (i = 0; i < insn_cnt; i++) {
13165 		int j;
13166 
13167 		j = 0;
13168 		while (i + j < insn_cnt && !aux_data[i + j].seen)
13169 			j++;
13170 		if (!j)
13171 			continue;
13172 
13173 		err = verifier_remove_insns(env, i, j);
13174 		if (err)
13175 			return err;
13176 		insn_cnt = env->prog->len;
13177 	}
13178 
13179 	return 0;
13180 }
13181 
13182 static int opt_remove_nops(struct bpf_verifier_env *env)
13183 {
13184 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13185 	struct bpf_insn *insn = env->prog->insnsi;
13186 	int insn_cnt = env->prog->len;
13187 	int i, err;
13188 
13189 	for (i = 0; i < insn_cnt; i++) {
13190 		if (memcmp(&insn[i], &ja, sizeof(ja)))
13191 			continue;
13192 
13193 		err = verifier_remove_insns(env, i, 1);
13194 		if (err)
13195 			return err;
13196 		insn_cnt--;
13197 		i--;
13198 	}
13199 
13200 	return 0;
13201 }
13202 
13203 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
13204 					 const union bpf_attr *attr)
13205 {
13206 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
13207 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
13208 	int i, patch_len, delta = 0, len = env->prog->len;
13209 	struct bpf_insn *insns = env->prog->insnsi;
13210 	struct bpf_prog *new_prog;
13211 	bool rnd_hi32;
13212 
13213 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
13214 	zext_patch[1] = BPF_ZEXT_REG(0);
13215 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
13216 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
13217 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
13218 	for (i = 0; i < len; i++) {
13219 		int adj_idx = i + delta;
13220 		struct bpf_insn insn;
13221 		int load_reg;
13222 
13223 		insn = insns[adj_idx];
13224 		load_reg = insn_def_regno(&insn);
13225 		if (!aux[adj_idx].zext_dst) {
13226 			u8 code, class;
13227 			u32 imm_rnd;
13228 
13229 			if (!rnd_hi32)
13230 				continue;
13231 
13232 			code = insn.code;
13233 			class = BPF_CLASS(code);
13234 			if (load_reg == -1)
13235 				continue;
13236 
13237 			/* NOTE: arg "reg" (the fourth one) is only used for
13238 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
13239 			 *       here.
13240 			 */
13241 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
13242 				if (class == BPF_LD &&
13243 				    BPF_MODE(code) == BPF_IMM)
13244 					i++;
13245 				continue;
13246 			}
13247 
13248 			/* ctx load could be transformed into wider load. */
13249 			if (class == BPF_LDX &&
13250 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
13251 				continue;
13252 
13253 			imm_rnd = get_random_int();
13254 			rnd_hi32_patch[0] = insn;
13255 			rnd_hi32_patch[1].imm = imm_rnd;
13256 			rnd_hi32_patch[3].dst_reg = load_reg;
13257 			patch = rnd_hi32_patch;
13258 			patch_len = 4;
13259 			goto apply_patch_buffer;
13260 		}
13261 
13262 		/* Add in an zero-extend instruction if a) the JIT has requested
13263 		 * it or b) it's a CMPXCHG.
13264 		 *
13265 		 * The latter is because: BPF_CMPXCHG always loads a value into
13266 		 * R0, therefore always zero-extends. However some archs'
13267 		 * equivalent instruction only does this load when the
13268 		 * comparison is successful. This detail of CMPXCHG is
13269 		 * orthogonal to the general zero-extension behaviour of the
13270 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
13271 		 */
13272 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
13273 			continue;
13274 
13275 		if (WARN_ON(load_reg == -1)) {
13276 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
13277 			return -EFAULT;
13278 		}
13279 
13280 		zext_patch[0] = insn;
13281 		zext_patch[1].dst_reg = load_reg;
13282 		zext_patch[1].src_reg = load_reg;
13283 		patch = zext_patch;
13284 		patch_len = 2;
13285 apply_patch_buffer:
13286 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
13287 		if (!new_prog)
13288 			return -ENOMEM;
13289 		env->prog = new_prog;
13290 		insns = new_prog->insnsi;
13291 		aux = env->insn_aux_data;
13292 		delta += patch_len - 1;
13293 	}
13294 
13295 	return 0;
13296 }
13297 
13298 /* convert load instructions that access fields of a context type into a
13299  * sequence of instructions that access fields of the underlying structure:
13300  *     struct __sk_buff    -> struct sk_buff
13301  *     struct bpf_sock_ops -> struct sock
13302  */
13303 static int convert_ctx_accesses(struct bpf_verifier_env *env)
13304 {
13305 	const struct bpf_verifier_ops *ops = env->ops;
13306 	int i, cnt, size, ctx_field_size, delta = 0;
13307 	const int insn_cnt = env->prog->len;
13308 	struct bpf_insn insn_buf[16], *insn;
13309 	u32 target_size, size_default, off;
13310 	struct bpf_prog *new_prog;
13311 	enum bpf_access_type type;
13312 	bool is_narrower_load;
13313 
13314 	if (ops->gen_prologue || env->seen_direct_write) {
13315 		if (!ops->gen_prologue) {
13316 			verbose(env, "bpf verifier is misconfigured\n");
13317 			return -EINVAL;
13318 		}
13319 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
13320 					env->prog);
13321 		if (cnt >= ARRAY_SIZE(insn_buf)) {
13322 			verbose(env, "bpf verifier is misconfigured\n");
13323 			return -EINVAL;
13324 		} else if (cnt) {
13325 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
13326 			if (!new_prog)
13327 				return -ENOMEM;
13328 
13329 			env->prog = new_prog;
13330 			delta += cnt - 1;
13331 		}
13332 	}
13333 
13334 	if (bpf_prog_is_dev_bound(env->prog->aux))
13335 		return 0;
13336 
13337 	insn = env->prog->insnsi + delta;
13338 
13339 	for (i = 0; i < insn_cnt; i++, insn++) {
13340 		bpf_convert_ctx_access_t convert_ctx_access;
13341 		bool ctx_access;
13342 
13343 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
13344 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
13345 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
13346 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
13347 			type = BPF_READ;
13348 			ctx_access = true;
13349 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
13350 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
13351 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
13352 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
13353 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
13354 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
13355 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
13356 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
13357 			type = BPF_WRITE;
13358 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
13359 		} else {
13360 			continue;
13361 		}
13362 
13363 		if (type == BPF_WRITE &&
13364 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
13365 			struct bpf_insn patch[] = {
13366 				*insn,
13367 				BPF_ST_NOSPEC(),
13368 			};
13369 
13370 			cnt = ARRAY_SIZE(patch);
13371 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
13372 			if (!new_prog)
13373 				return -ENOMEM;
13374 
13375 			delta    += cnt - 1;
13376 			env->prog = new_prog;
13377 			insn      = new_prog->insnsi + i + delta;
13378 			continue;
13379 		}
13380 
13381 		if (!ctx_access)
13382 			continue;
13383 
13384 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
13385 		case PTR_TO_CTX:
13386 			if (!ops->convert_ctx_access)
13387 				continue;
13388 			convert_ctx_access = ops->convert_ctx_access;
13389 			break;
13390 		case PTR_TO_SOCKET:
13391 		case PTR_TO_SOCK_COMMON:
13392 			convert_ctx_access = bpf_sock_convert_ctx_access;
13393 			break;
13394 		case PTR_TO_TCP_SOCK:
13395 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
13396 			break;
13397 		case PTR_TO_XDP_SOCK:
13398 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
13399 			break;
13400 		case PTR_TO_BTF_ID:
13401 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
13402 			if (type == BPF_READ) {
13403 				insn->code = BPF_LDX | BPF_PROBE_MEM |
13404 					BPF_SIZE((insn)->code);
13405 				env->prog->aux->num_exentries++;
13406 			} else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
13407 				verbose(env, "Writes through BTF pointers are not allowed\n");
13408 				return -EINVAL;
13409 			}
13410 			continue;
13411 		default:
13412 			continue;
13413 		}
13414 
13415 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
13416 		size = BPF_LDST_BYTES(insn);
13417 
13418 		/* If the read access is a narrower load of the field,
13419 		 * convert to a 4/8-byte load, to minimum program type specific
13420 		 * convert_ctx_access changes. If conversion is successful,
13421 		 * we will apply proper mask to the result.
13422 		 */
13423 		is_narrower_load = size < ctx_field_size;
13424 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
13425 		off = insn->off;
13426 		if (is_narrower_load) {
13427 			u8 size_code;
13428 
13429 			if (type == BPF_WRITE) {
13430 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
13431 				return -EINVAL;
13432 			}
13433 
13434 			size_code = BPF_H;
13435 			if (ctx_field_size == 4)
13436 				size_code = BPF_W;
13437 			else if (ctx_field_size == 8)
13438 				size_code = BPF_DW;
13439 
13440 			insn->off = off & ~(size_default - 1);
13441 			insn->code = BPF_LDX | BPF_MEM | size_code;
13442 		}
13443 
13444 		target_size = 0;
13445 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
13446 					 &target_size);
13447 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
13448 		    (ctx_field_size && !target_size)) {
13449 			verbose(env, "bpf verifier is misconfigured\n");
13450 			return -EINVAL;
13451 		}
13452 
13453 		if (is_narrower_load && size < target_size) {
13454 			u8 shift = bpf_ctx_narrow_access_offset(
13455 				off, size, size_default) * 8;
13456 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
13457 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
13458 				return -EINVAL;
13459 			}
13460 			if (ctx_field_size <= 4) {
13461 				if (shift)
13462 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
13463 									insn->dst_reg,
13464 									shift);
13465 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
13466 								(1 << size * 8) - 1);
13467 			} else {
13468 				if (shift)
13469 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
13470 									insn->dst_reg,
13471 									shift);
13472 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
13473 								(1ULL << size * 8) - 1);
13474 			}
13475 		}
13476 
13477 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13478 		if (!new_prog)
13479 			return -ENOMEM;
13480 
13481 		delta += cnt - 1;
13482 
13483 		/* keep walking new program and skip insns we just inserted */
13484 		env->prog = new_prog;
13485 		insn      = new_prog->insnsi + i + delta;
13486 	}
13487 
13488 	return 0;
13489 }
13490 
13491 static int jit_subprogs(struct bpf_verifier_env *env)
13492 {
13493 	struct bpf_prog *prog = env->prog, **func, *tmp;
13494 	int i, j, subprog_start, subprog_end = 0, len, subprog;
13495 	struct bpf_map *map_ptr;
13496 	struct bpf_insn *insn;
13497 	void *old_bpf_func;
13498 	int err, num_exentries;
13499 
13500 	if (env->subprog_cnt <= 1)
13501 		return 0;
13502 
13503 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13504 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
13505 			continue;
13506 
13507 		/* Upon error here we cannot fall back to interpreter but
13508 		 * need a hard reject of the program. Thus -EFAULT is
13509 		 * propagated in any case.
13510 		 */
13511 		subprog = find_subprog(env, i + insn->imm + 1);
13512 		if (subprog < 0) {
13513 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
13514 				  i + insn->imm + 1);
13515 			return -EFAULT;
13516 		}
13517 		/* temporarily remember subprog id inside insn instead of
13518 		 * aux_data, since next loop will split up all insns into funcs
13519 		 */
13520 		insn->off = subprog;
13521 		/* remember original imm in case JIT fails and fallback
13522 		 * to interpreter will be needed
13523 		 */
13524 		env->insn_aux_data[i].call_imm = insn->imm;
13525 		/* point imm to __bpf_call_base+1 from JITs point of view */
13526 		insn->imm = 1;
13527 		if (bpf_pseudo_func(insn))
13528 			/* jit (e.g. x86_64) may emit fewer instructions
13529 			 * if it learns a u32 imm is the same as a u64 imm.
13530 			 * Force a non zero here.
13531 			 */
13532 			insn[1].imm = 1;
13533 	}
13534 
13535 	err = bpf_prog_alloc_jited_linfo(prog);
13536 	if (err)
13537 		goto out_undo_insn;
13538 
13539 	err = -ENOMEM;
13540 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
13541 	if (!func)
13542 		goto out_undo_insn;
13543 
13544 	for (i = 0; i < env->subprog_cnt; i++) {
13545 		subprog_start = subprog_end;
13546 		subprog_end = env->subprog_info[i + 1].start;
13547 
13548 		len = subprog_end - subprog_start;
13549 		/* bpf_prog_run() doesn't call subprogs directly,
13550 		 * hence main prog stats include the runtime of subprogs.
13551 		 * subprogs don't have IDs and not reachable via prog_get_next_id
13552 		 * func[i]->stats will never be accessed and stays NULL
13553 		 */
13554 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
13555 		if (!func[i])
13556 			goto out_free;
13557 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
13558 		       len * sizeof(struct bpf_insn));
13559 		func[i]->type = prog->type;
13560 		func[i]->len = len;
13561 		if (bpf_prog_calc_tag(func[i]))
13562 			goto out_free;
13563 		func[i]->is_func = 1;
13564 		func[i]->aux->func_idx = i;
13565 		/* Below members will be freed only at prog->aux */
13566 		func[i]->aux->btf = prog->aux->btf;
13567 		func[i]->aux->func_info = prog->aux->func_info;
13568 		func[i]->aux->poke_tab = prog->aux->poke_tab;
13569 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
13570 
13571 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
13572 			struct bpf_jit_poke_descriptor *poke;
13573 
13574 			poke = &prog->aux->poke_tab[j];
13575 			if (poke->insn_idx < subprog_end &&
13576 			    poke->insn_idx >= subprog_start)
13577 				poke->aux = func[i]->aux;
13578 		}
13579 
13580 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
13581 		 * Long term would need debug info to populate names
13582 		 */
13583 		func[i]->aux->name[0] = 'F';
13584 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
13585 		func[i]->jit_requested = 1;
13586 		func[i]->blinding_requested = prog->blinding_requested;
13587 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
13588 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
13589 		func[i]->aux->linfo = prog->aux->linfo;
13590 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
13591 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
13592 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
13593 		num_exentries = 0;
13594 		insn = func[i]->insnsi;
13595 		for (j = 0; j < func[i]->len; j++, insn++) {
13596 			if (BPF_CLASS(insn->code) == BPF_LDX &&
13597 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
13598 				num_exentries++;
13599 		}
13600 		func[i]->aux->num_exentries = num_exentries;
13601 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
13602 		func[i] = bpf_int_jit_compile(func[i]);
13603 		if (!func[i]->jited) {
13604 			err = -ENOTSUPP;
13605 			goto out_free;
13606 		}
13607 		cond_resched();
13608 	}
13609 
13610 	/* at this point all bpf functions were successfully JITed
13611 	 * now populate all bpf_calls with correct addresses and
13612 	 * run last pass of JIT
13613 	 */
13614 	for (i = 0; i < env->subprog_cnt; i++) {
13615 		insn = func[i]->insnsi;
13616 		for (j = 0; j < func[i]->len; j++, insn++) {
13617 			if (bpf_pseudo_func(insn)) {
13618 				subprog = insn->off;
13619 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
13620 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
13621 				continue;
13622 			}
13623 			if (!bpf_pseudo_call(insn))
13624 				continue;
13625 			subprog = insn->off;
13626 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
13627 		}
13628 
13629 		/* we use the aux data to keep a list of the start addresses
13630 		 * of the JITed images for each function in the program
13631 		 *
13632 		 * for some architectures, such as powerpc64, the imm field
13633 		 * might not be large enough to hold the offset of the start
13634 		 * address of the callee's JITed image from __bpf_call_base
13635 		 *
13636 		 * in such cases, we can lookup the start address of a callee
13637 		 * by using its subprog id, available from the off field of
13638 		 * the call instruction, as an index for this list
13639 		 */
13640 		func[i]->aux->func = func;
13641 		func[i]->aux->func_cnt = env->subprog_cnt;
13642 	}
13643 	for (i = 0; i < env->subprog_cnt; i++) {
13644 		old_bpf_func = func[i]->bpf_func;
13645 		tmp = bpf_int_jit_compile(func[i]);
13646 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
13647 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
13648 			err = -ENOTSUPP;
13649 			goto out_free;
13650 		}
13651 		cond_resched();
13652 	}
13653 
13654 	/* finally lock prog and jit images for all functions and
13655 	 * populate kallsysm
13656 	 */
13657 	for (i = 0; i < env->subprog_cnt; i++) {
13658 		bpf_prog_lock_ro(func[i]);
13659 		bpf_prog_kallsyms_add(func[i]);
13660 	}
13661 
13662 	/* Last step: make now unused interpreter insns from main
13663 	 * prog consistent for later dump requests, so they can
13664 	 * later look the same as if they were interpreted only.
13665 	 */
13666 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13667 		if (bpf_pseudo_func(insn)) {
13668 			insn[0].imm = env->insn_aux_data[i].call_imm;
13669 			insn[1].imm = insn->off;
13670 			insn->off = 0;
13671 			continue;
13672 		}
13673 		if (!bpf_pseudo_call(insn))
13674 			continue;
13675 		insn->off = env->insn_aux_data[i].call_imm;
13676 		subprog = find_subprog(env, i + insn->off + 1);
13677 		insn->imm = subprog;
13678 	}
13679 
13680 	prog->jited = 1;
13681 	prog->bpf_func = func[0]->bpf_func;
13682 	prog->jited_len = func[0]->jited_len;
13683 	prog->aux->func = func;
13684 	prog->aux->func_cnt = env->subprog_cnt;
13685 	bpf_prog_jit_attempt_done(prog);
13686 	return 0;
13687 out_free:
13688 	/* We failed JIT'ing, so at this point we need to unregister poke
13689 	 * descriptors from subprogs, so that kernel is not attempting to
13690 	 * patch it anymore as we're freeing the subprog JIT memory.
13691 	 */
13692 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13693 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13694 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
13695 	}
13696 	/* At this point we're guaranteed that poke descriptors are not
13697 	 * live anymore. We can just unlink its descriptor table as it's
13698 	 * released with the main prog.
13699 	 */
13700 	for (i = 0; i < env->subprog_cnt; i++) {
13701 		if (!func[i])
13702 			continue;
13703 		func[i]->aux->poke_tab = NULL;
13704 		bpf_jit_free(func[i]);
13705 	}
13706 	kfree(func);
13707 out_undo_insn:
13708 	/* cleanup main prog to be interpreted */
13709 	prog->jit_requested = 0;
13710 	prog->blinding_requested = 0;
13711 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13712 		if (!bpf_pseudo_call(insn))
13713 			continue;
13714 		insn->off = 0;
13715 		insn->imm = env->insn_aux_data[i].call_imm;
13716 	}
13717 	bpf_prog_jit_attempt_done(prog);
13718 	return err;
13719 }
13720 
13721 static int fixup_call_args(struct bpf_verifier_env *env)
13722 {
13723 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13724 	struct bpf_prog *prog = env->prog;
13725 	struct bpf_insn *insn = prog->insnsi;
13726 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
13727 	int i, depth;
13728 #endif
13729 	int err = 0;
13730 
13731 	if (env->prog->jit_requested &&
13732 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
13733 		err = jit_subprogs(env);
13734 		if (err == 0)
13735 			return 0;
13736 		if (err == -EFAULT)
13737 			return err;
13738 	}
13739 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13740 	if (has_kfunc_call) {
13741 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
13742 		return -EINVAL;
13743 	}
13744 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
13745 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
13746 		 * have to be rejected, since interpreter doesn't support them yet.
13747 		 */
13748 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
13749 		return -EINVAL;
13750 	}
13751 	for (i = 0; i < prog->len; i++, insn++) {
13752 		if (bpf_pseudo_func(insn)) {
13753 			/* When JIT fails the progs with callback calls
13754 			 * have to be rejected, since interpreter doesn't support them yet.
13755 			 */
13756 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
13757 			return -EINVAL;
13758 		}
13759 
13760 		if (!bpf_pseudo_call(insn))
13761 			continue;
13762 		depth = get_callee_stack_depth(env, insn, i);
13763 		if (depth < 0)
13764 			return depth;
13765 		bpf_patch_call_args(insn, depth);
13766 	}
13767 	err = 0;
13768 #endif
13769 	return err;
13770 }
13771 
13772 static int fixup_kfunc_call(struct bpf_verifier_env *env,
13773 			    struct bpf_insn *insn)
13774 {
13775 	const struct bpf_kfunc_desc *desc;
13776 
13777 	if (!insn->imm) {
13778 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
13779 		return -EINVAL;
13780 	}
13781 
13782 	/* insn->imm has the btf func_id. Replace it with
13783 	 * an address (relative to __bpf_base_call).
13784 	 */
13785 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
13786 	if (!desc) {
13787 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
13788 			insn->imm);
13789 		return -EFAULT;
13790 	}
13791 
13792 	insn->imm = desc->imm;
13793 
13794 	return 0;
13795 }
13796 
13797 /* Do various post-verification rewrites in a single program pass.
13798  * These rewrites simplify JIT and interpreter implementations.
13799  */
13800 static int do_misc_fixups(struct bpf_verifier_env *env)
13801 {
13802 	struct bpf_prog *prog = env->prog;
13803 	enum bpf_attach_type eatype = prog->expected_attach_type;
13804 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
13805 	struct bpf_insn *insn = prog->insnsi;
13806 	const struct bpf_func_proto *fn;
13807 	const int insn_cnt = prog->len;
13808 	const struct bpf_map_ops *ops;
13809 	struct bpf_insn_aux_data *aux;
13810 	struct bpf_insn insn_buf[16];
13811 	struct bpf_prog *new_prog;
13812 	struct bpf_map *map_ptr;
13813 	int i, ret, cnt, delta = 0;
13814 
13815 	for (i = 0; i < insn_cnt; i++, insn++) {
13816 		/* Make divide-by-zero exceptions impossible. */
13817 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
13818 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
13819 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
13820 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
13821 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
13822 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
13823 			struct bpf_insn *patchlet;
13824 			struct bpf_insn chk_and_div[] = {
13825 				/* [R,W]x div 0 -> 0 */
13826 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13827 					     BPF_JNE | BPF_K, insn->src_reg,
13828 					     0, 2, 0),
13829 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
13830 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13831 				*insn,
13832 			};
13833 			struct bpf_insn chk_and_mod[] = {
13834 				/* [R,W]x mod 0 -> [R,W]x */
13835 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13836 					     BPF_JEQ | BPF_K, insn->src_reg,
13837 					     0, 1 + (is64 ? 0 : 1), 0),
13838 				*insn,
13839 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13840 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
13841 			};
13842 
13843 			patchlet = isdiv ? chk_and_div : chk_and_mod;
13844 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
13845 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
13846 
13847 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
13848 			if (!new_prog)
13849 				return -ENOMEM;
13850 
13851 			delta    += cnt - 1;
13852 			env->prog = prog = new_prog;
13853 			insn      = new_prog->insnsi + i + delta;
13854 			continue;
13855 		}
13856 
13857 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
13858 		if (BPF_CLASS(insn->code) == BPF_LD &&
13859 		    (BPF_MODE(insn->code) == BPF_ABS ||
13860 		     BPF_MODE(insn->code) == BPF_IND)) {
13861 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
13862 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13863 				verbose(env, "bpf verifier is misconfigured\n");
13864 				return -EINVAL;
13865 			}
13866 
13867 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13868 			if (!new_prog)
13869 				return -ENOMEM;
13870 
13871 			delta    += cnt - 1;
13872 			env->prog = prog = new_prog;
13873 			insn      = new_prog->insnsi + i + delta;
13874 			continue;
13875 		}
13876 
13877 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
13878 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
13879 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
13880 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
13881 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
13882 			struct bpf_insn *patch = &insn_buf[0];
13883 			bool issrc, isneg, isimm;
13884 			u32 off_reg;
13885 
13886 			aux = &env->insn_aux_data[i + delta];
13887 			if (!aux->alu_state ||
13888 			    aux->alu_state == BPF_ALU_NON_POINTER)
13889 				continue;
13890 
13891 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
13892 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
13893 				BPF_ALU_SANITIZE_SRC;
13894 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
13895 
13896 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
13897 			if (isimm) {
13898 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13899 			} else {
13900 				if (isneg)
13901 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13902 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13903 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
13904 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
13905 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
13906 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
13907 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
13908 			}
13909 			if (!issrc)
13910 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
13911 			insn->src_reg = BPF_REG_AX;
13912 			if (isneg)
13913 				insn->code = insn->code == code_add ?
13914 					     code_sub : code_add;
13915 			*patch++ = *insn;
13916 			if (issrc && isneg && !isimm)
13917 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13918 			cnt = patch - insn_buf;
13919 
13920 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13921 			if (!new_prog)
13922 				return -ENOMEM;
13923 
13924 			delta    += cnt - 1;
13925 			env->prog = prog = new_prog;
13926 			insn      = new_prog->insnsi + i + delta;
13927 			continue;
13928 		}
13929 
13930 		if (insn->code != (BPF_JMP | BPF_CALL))
13931 			continue;
13932 		if (insn->src_reg == BPF_PSEUDO_CALL)
13933 			continue;
13934 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
13935 			ret = fixup_kfunc_call(env, insn);
13936 			if (ret)
13937 				return ret;
13938 			continue;
13939 		}
13940 
13941 		if (insn->imm == BPF_FUNC_get_route_realm)
13942 			prog->dst_needed = 1;
13943 		if (insn->imm == BPF_FUNC_get_prandom_u32)
13944 			bpf_user_rnd_init_once();
13945 		if (insn->imm == BPF_FUNC_override_return)
13946 			prog->kprobe_override = 1;
13947 		if (insn->imm == BPF_FUNC_tail_call) {
13948 			/* If we tail call into other programs, we
13949 			 * cannot make any assumptions since they can
13950 			 * be replaced dynamically during runtime in
13951 			 * the program array.
13952 			 */
13953 			prog->cb_access = 1;
13954 			if (!allow_tail_call_in_subprogs(env))
13955 				prog->aux->stack_depth = MAX_BPF_STACK;
13956 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
13957 
13958 			/* mark bpf_tail_call as different opcode to avoid
13959 			 * conditional branch in the interpreter for every normal
13960 			 * call and to prevent accidental JITing by JIT compiler
13961 			 * that doesn't support bpf_tail_call yet
13962 			 */
13963 			insn->imm = 0;
13964 			insn->code = BPF_JMP | BPF_TAIL_CALL;
13965 
13966 			aux = &env->insn_aux_data[i + delta];
13967 			if (env->bpf_capable && !prog->blinding_requested &&
13968 			    prog->jit_requested &&
13969 			    !bpf_map_key_poisoned(aux) &&
13970 			    !bpf_map_ptr_poisoned(aux) &&
13971 			    !bpf_map_ptr_unpriv(aux)) {
13972 				struct bpf_jit_poke_descriptor desc = {
13973 					.reason = BPF_POKE_REASON_TAIL_CALL,
13974 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
13975 					.tail_call.key = bpf_map_key_immediate(aux),
13976 					.insn_idx = i + delta,
13977 				};
13978 
13979 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
13980 				if (ret < 0) {
13981 					verbose(env, "adding tail call poke descriptor failed\n");
13982 					return ret;
13983 				}
13984 
13985 				insn->imm = ret + 1;
13986 				continue;
13987 			}
13988 
13989 			if (!bpf_map_ptr_unpriv(aux))
13990 				continue;
13991 
13992 			/* instead of changing every JIT dealing with tail_call
13993 			 * emit two extra insns:
13994 			 * if (index >= max_entries) goto out;
13995 			 * index &= array->index_mask;
13996 			 * to avoid out-of-bounds cpu speculation
13997 			 */
13998 			if (bpf_map_ptr_poisoned(aux)) {
13999 				verbose(env, "tail_call abusing map_ptr\n");
14000 				return -EINVAL;
14001 			}
14002 
14003 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
14004 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
14005 						  map_ptr->max_entries, 2);
14006 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
14007 						    container_of(map_ptr,
14008 								 struct bpf_array,
14009 								 map)->index_mask);
14010 			insn_buf[2] = *insn;
14011 			cnt = 3;
14012 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14013 			if (!new_prog)
14014 				return -ENOMEM;
14015 
14016 			delta    += cnt - 1;
14017 			env->prog = prog = new_prog;
14018 			insn      = new_prog->insnsi + i + delta;
14019 			continue;
14020 		}
14021 
14022 		if (insn->imm == BPF_FUNC_timer_set_callback) {
14023 			/* The verifier will process callback_fn as many times as necessary
14024 			 * with different maps and the register states prepared by
14025 			 * set_timer_callback_state will be accurate.
14026 			 *
14027 			 * The following use case is valid:
14028 			 *   map1 is shared by prog1, prog2, prog3.
14029 			 *   prog1 calls bpf_timer_init for some map1 elements
14030 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
14031 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
14032 			 *   prog3 calls bpf_timer_start for some map1 elements.
14033 			 *     Those that were not both bpf_timer_init-ed and
14034 			 *     bpf_timer_set_callback-ed will return -EINVAL.
14035 			 */
14036 			struct bpf_insn ld_addrs[2] = {
14037 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
14038 			};
14039 
14040 			insn_buf[0] = ld_addrs[0];
14041 			insn_buf[1] = ld_addrs[1];
14042 			insn_buf[2] = *insn;
14043 			cnt = 3;
14044 
14045 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14046 			if (!new_prog)
14047 				return -ENOMEM;
14048 
14049 			delta    += cnt - 1;
14050 			env->prog = prog = new_prog;
14051 			insn      = new_prog->insnsi + i + delta;
14052 			goto patch_call_imm;
14053 		}
14054 
14055 		if (insn->imm == BPF_FUNC_task_storage_get ||
14056 		    insn->imm == BPF_FUNC_sk_storage_get ||
14057 		    insn->imm == BPF_FUNC_inode_storage_get) {
14058 			if (env->prog->aux->sleepable)
14059 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
14060 			else
14061 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
14062 			insn_buf[1] = *insn;
14063 			cnt = 2;
14064 
14065 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14066 			if (!new_prog)
14067 				return -ENOMEM;
14068 
14069 			delta += cnt - 1;
14070 			env->prog = prog = new_prog;
14071 			insn = new_prog->insnsi + i + delta;
14072 			goto patch_call_imm;
14073 		}
14074 
14075 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
14076 		 * and other inlining handlers are currently limited to 64 bit
14077 		 * only.
14078 		 */
14079 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
14080 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
14081 		     insn->imm == BPF_FUNC_map_update_elem ||
14082 		     insn->imm == BPF_FUNC_map_delete_elem ||
14083 		     insn->imm == BPF_FUNC_map_push_elem   ||
14084 		     insn->imm == BPF_FUNC_map_pop_elem    ||
14085 		     insn->imm == BPF_FUNC_map_peek_elem   ||
14086 		     insn->imm == BPF_FUNC_redirect_map    ||
14087 		     insn->imm == BPF_FUNC_for_each_map_elem ||
14088 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
14089 			aux = &env->insn_aux_data[i + delta];
14090 			if (bpf_map_ptr_poisoned(aux))
14091 				goto patch_call_imm;
14092 
14093 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
14094 			ops = map_ptr->ops;
14095 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
14096 			    ops->map_gen_lookup) {
14097 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
14098 				if (cnt == -EOPNOTSUPP)
14099 					goto patch_map_ops_generic;
14100 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
14101 					verbose(env, "bpf verifier is misconfigured\n");
14102 					return -EINVAL;
14103 				}
14104 
14105 				new_prog = bpf_patch_insn_data(env, i + delta,
14106 							       insn_buf, cnt);
14107 				if (!new_prog)
14108 					return -ENOMEM;
14109 
14110 				delta    += cnt - 1;
14111 				env->prog = prog = new_prog;
14112 				insn      = new_prog->insnsi + i + delta;
14113 				continue;
14114 			}
14115 
14116 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
14117 				     (void *(*)(struct bpf_map *map, void *key))NULL));
14118 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
14119 				     (int (*)(struct bpf_map *map, void *key))NULL));
14120 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
14121 				     (int (*)(struct bpf_map *map, void *key, void *value,
14122 					      u64 flags))NULL));
14123 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
14124 				     (int (*)(struct bpf_map *map, void *value,
14125 					      u64 flags))NULL));
14126 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
14127 				     (int (*)(struct bpf_map *map, void *value))NULL));
14128 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
14129 				     (int (*)(struct bpf_map *map, void *value))NULL));
14130 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
14131 				     (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
14132 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
14133 				     (int (*)(struct bpf_map *map,
14134 					      bpf_callback_t callback_fn,
14135 					      void *callback_ctx,
14136 					      u64 flags))NULL));
14137 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
14138 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
14139 
14140 patch_map_ops_generic:
14141 			switch (insn->imm) {
14142 			case BPF_FUNC_map_lookup_elem:
14143 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
14144 				continue;
14145 			case BPF_FUNC_map_update_elem:
14146 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
14147 				continue;
14148 			case BPF_FUNC_map_delete_elem:
14149 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
14150 				continue;
14151 			case BPF_FUNC_map_push_elem:
14152 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
14153 				continue;
14154 			case BPF_FUNC_map_pop_elem:
14155 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
14156 				continue;
14157 			case BPF_FUNC_map_peek_elem:
14158 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
14159 				continue;
14160 			case BPF_FUNC_redirect_map:
14161 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
14162 				continue;
14163 			case BPF_FUNC_for_each_map_elem:
14164 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
14165 				continue;
14166 			case BPF_FUNC_map_lookup_percpu_elem:
14167 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
14168 				continue;
14169 			}
14170 
14171 			goto patch_call_imm;
14172 		}
14173 
14174 		/* Implement bpf_jiffies64 inline. */
14175 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
14176 		    insn->imm == BPF_FUNC_jiffies64) {
14177 			struct bpf_insn ld_jiffies_addr[2] = {
14178 				BPF_LD_IMM64(BPF_REG_0,
14179 					     (unsigned long)&jiffies),
14180 			};
14181 
14182 			insn_buf[0] = ld_jiffies_addr[0];
14183 			insn_buf[1] = ld_jiffies_addr[1];
14184 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
14185 						  BPF_REG_0, 0);
14186 			cnt = 3;
14187 
14188 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
14189 						       cnt);
14190 			if (!new_prog)
14191 				return -ENOMEM;
14192 
14193 			delta    += cnt - 1;
14194 			env->prog = prog = new_prog;
14195 			insn      = new_prog->insnsi + i + delta;
14196 			continue;
14197 		}
14198 
14199 		/* Implement bpf_get_func_arg inline. */
14200 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14201 		    insn->imm == BPF_FUNC_get_func_arg) {
14202 			/* Load nr_args from ctx - 8 */
14203 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14204 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
14205 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
14206 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
14207 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
14208 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14209 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
14210 			insn_buf[7] = BPF_JMP_A(1);
14211 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
14212 			cnt = 9;
14213 
14214 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14215 			if (!new_prog)
14216 				return -ENOMEM;
14217 
14218 			delta    += cnt - 1;
14219 			env->prog = prog = new_prog;
14220 			insn      = new_prog->insnsi + i + delta;
14221 			continue;
14222 		}
14223 
14224 		/* Implement bpf_get_func_ret inline. */
14225 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14226 		    insn->imm == BPF_FUNC_get_func_ret) {
14227 			if (eatype == BPF_TRACE_FEXIT ||
14228 			    eatype == BPF_MODIFY_RETURN) {
14229 				/* Load nr_args from ctx - 8 */
14230 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14231 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
14232 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
14233 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14234 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
14235 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
14236 				cnt = 6;
14237 			} else {
14238 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
14239 				cnt = 1;
14240 			}
14241 
14242 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14243 			if (!new_prog)
14244 				return -ENOMEM;
14245 
14246 			delta    += cnt - 1;
14247 			env->prog = prog = new_prog;
14248 			insn      = new_prog->insnsi + i + delta;
14249 			continue;
14250 		}
14251 
14252 		/* Implement get_func_arg_cnt inline. */
14253 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14254 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
14255 			/* Load nr_args from ctx - 8 */
14256 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14257 
14258 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14259 			if (!new_prog)
14260 				return -ENOMEM;
14261 
14262 			env->prog = prog = new_prog;
14263 			insn      = new_prog->insnsi + i + delta;
14264 			continue;
14265 		}
14266 
14267 		/* Implement bpf_get_func_ip inline. */
14268 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14269 		    insn->imm == BPF_FUNC_get_func_ip) {
14270 			/* Load IP address from ctx - 16 */
14271 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
14272 
14273 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14274 			if (!new_prog)
14275 				return -ENOMEM;
14276 
14277 			env->prog = prog = new_prog;
14278 			insn      = new_prog->insnsi + i + delta;
14279 			continue;
14280 		}
14281 
14282 patch_call_imm:
14283 		fn = env->ops->get_func_proto(insn->imm, env->prog);
14284 		/* all functions that have prototype and verifier allowed
14285 		 * programs to call them, must be real in-kernel functions
14286 		 */
14287 		if (!fn->func) {
14288 			verbose(env,
14289 				"kernel subsystem misconfigured func %s#%d\n",
14290 				func_id_name(insn->imm), insn->imm);
14291 			return -EFAULT;
14292 		}
14293 		insn->imm = fn->func - __bpf_call_base;
14294 	}
14295 
14296 	/* Since poke tab is now finalized, publish aux to tracker. */
14297 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
14298 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
14299 		if (!map_ptr->ops->map_poke_track ||
14300 		    !map_ptr->ops->map_poke_untrack ||
14301 		    !map_ptr->ops->map_poke_run) {
14302 			verbose(env, "bpf verifier is misconfigured\n");
14303 			return -EINVAL;
14304 		}
14305 
14306 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
14307 		if (ret < 0) {
14308 			verbose(env, "tracking tail call prog failed\n");
14309 			return ret;
14310 		}
14311 	}
14312 
14313 	sort_kfunc_descs_by_imm(env->prog);
14314 
14315 	return 0;
14316 }
14317 
14318 static void free_states(struct bpf_verifier_env *env)
14319 {
14320 	struct bpf_verifier_state_list *sl, *sln;
14321 	int i;
14322 
14323 	sl = env->free_list;
14324 	while (sl) {
14325 		sln = sl->next;
14326 		free_verifier_state(&sl->state, false);
14327 		kfree(sl);
14328 		sl = sln;
14329 	}
14330 	env->free_list = NULL;
14331 
14332 	if (!env->explored_states)
14333 		return;
14334 
14335 	for (i = 0; i < state_htab_size(env); i++) {
14336 		sl = env->explored_states[i];
14337 
14338 		while (sl) {
14339 			sln = sl->next;
14340 			free_verifier_state(&sl->state, false);
14341 			kfree(sl);
14342 			sl = sln;
14343 		}
14344 		env->explored_states[i] = NULL;
14345 	}
14346 }
14347 
14348 static int do_check_common(struct bpf_verifier_env *env, int subprog)
14349 {
14350 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
14351 	struct bpf_verifier_state *state;
14352 	struct bpf_reg_state *regs;
14353 	int ret, i;
14354 
14355 	env->prev_linfo = NULL;
14356 	env->pass_cnt++;
14357 
14358 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
14359 	if (!state)
14360 		return -ENOMEM;
14361 	state->curframe = 0;
14362 	state->speculative = false;
14363 	state->branches = 1;
14364 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
14365 	if (!state->frame[0]) {
14366 		kfree(state);
14367 		return -ENOMEM;
14368 	}
14369 	env->cur_state = state;
14370 	init_func_state(env, state->frame[0],
14371 			BPF_MAIN_FUNC /* callsite */,
14372 			0 /* frameno */,
14373 			subprog);
14374 
14375 	regs = state->frame[state->curframe]->regs;
14376 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
14377 		ret = btf_prepare_func_args(env, subprog, regs);
14378 		if (ret)
14379 			goto out;
14380 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
14381 			if (regs[i].type == PTR_TO_CTX)
14382 				mark_reg_known_zero(env, regs, i);
14383 			else if (regs[i].type == SCALAR_VALUE)
14384 				mark_reg_unknown(env, regs, i);
14385 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
14386 				const u32 mem_size = regs[i].mem_size;
14387 
14388 				mark_reg_known_zero(env, regs, i);
14389 				regs[i].mem_size = mem_size;
14390 				regs[i].id = ++env->id_gen;
14391 			}
14392 		}
14393 	} else {
14394 		/* 1st arg to a function */
14395 		regs[BPF_REG_1].type = PTR_TO_CTX;
14396 		mark_reg_known_zero(env, regs, BPF_REG_1);
14397 		ret = btf_check_subprog_arg_match(env, subprog, regs);
14398 		if (ret == -EFAULT)
14399 			/* unlikely verifier bug. abort.
14400 			 * ret == 0 and ret < 0 are sadly acceptable for
14401 			 * main() function due to backward compatibility.
14402 			 * Like socket filter program may be written as:
14403 			 * int bpf_prog(struct pt_regs *ctx)
14404 			 * and never dereference that ctx in the program.
14405 			 * 'struct pt_regs' is a type mismatch for socket
14406 			 * filter that should be using 'struct __sk_buff'.
14407 			 */
14408 			goto out;
14409 	}
14410 
14411 	ret = do_check(env);
14412 out:
14413 	/* check for NULL is necessary, since cur_state can be freed inside
14414 	 * do_check() under memory pressure.
14415 	 */
14416 	if (env->cur_state) {
14417 		free_verifier_state(env->cur_state, true);
14418 		env->cur_state = NULL;
14419 	}
14420 	while (!pop_stack(env, NULL, NULL, false));
14421 	if (!ret && pop_log)
14422 		bpf_vlog_reset(&env->log, 0);
14423 	free_states(env);
14424 	return ret;
14425 }
14426 
14427 /* Verify all global functions in a BPF program one by one based on their BTF.
14428  * All global functions must pass verification. Otherwise the whole program is rejected.
14429  * Consider:
14430  * int bar(int);
14431  * int foo(int f)
14432  * {
14433  *    return bar(f);
14434  * }
14435  * int bar(int b)
14436  * {
14437  *    ...
14438  * }
14439  * foo() will be verified first for R1=any_scalar_value. During verification it
14440  * will be assumed that bar() already verified successfully and call to bar()
14441  * from foo() will be checked for type match only. Later bar() will be verified
14442  * independently to check that it's safe for R1=any_scalar_value.
14443  */
14444 static int do_check_subprogs(struct bpf_verifier_env *env)
14445 {
14446 	struct bpf_prog_aux *aux = env->prog->aux;
14447 	int i, ret;
14448 
14449 	if (!aux->func_info)
14450 		return 0;
14451 
14452 	for (i = 1; i < env->subprog_cnt; i++) {
14453 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
14454 			continue;
14455 		env->insn_idx = env->subprog_info[i].start;
14456 		WARN_ON_ONCE(env->insn_idx == 0);
14457 		ret = do_check_common(env, i);
14458 		if (ret) {
14459 			return ret;
14460 		} else if (env->log.level & BPF_LOG_LEVEL) {
14461 			verbose(env,
14462 				"Func#%d is safe for any args that match its prototype\n",
14463 				i);
14464 		}
14465 	}
14466 	return 0;
14467 }
14468 
14469 static int do_check_main(struct bpf_verifier_env *env)
14470 {
14471 	int ret;
14472 
14473 	env->insn_idx = 0;
14474 	ret = do_check_common(env, 0);
14475 	if (!ret)
14476 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
14477 	return ret;
14478 }
14479 
14480 
14481 static void print_verification_stats(struct bpf_verifier_env *env)
14482 {
14483 	int i;
14484 
14485 	if (env->log.level & BPF_LOG_STATS) {
14486 		verbose(env, "verification time %lld usec\n",
14487 			div_u64(env->verification_time, 1000));
14488 		verbose(env, "stack depth ");
14489 		for (i = 0; i < env->subprog_cnt; i++) {
14490 			u32 depth = env->subprog_info[i].stack_depth;
14491 
14492 			verbose(env, "%d", depth);
14493 			if (i + 1 < env->subprog_cnt)
14494 				verbose(env, "+");
14495 		}
14496 		verbose(env, "\n");
14497 	}
14498 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
14499 		"total_states %d peak_states %d mark_read %d\n",
14500 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
14501 		env->max_states_per_insn, env->total_states,
14502 		env->peak_states, env->longest_mark_read_walk);
14503 }
14504 
14505 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
14506 {
14507 	const struct btf_type *t, *func_proto;
14508 	const struct bpf_struct_ops *st_ops;
14509 	const struct btf_member *member;
14510 	struct bpf_prog *prog = env->prog;
14511 	u32 btf_id, member_idx;
14512 	const char *mname;
14513 
14514 	if (!prog->gpl_compatible) {
14515 		verbose(env, "struct ops programs must have a GPL compatible license\n");
14516 		return -EINVAL;
14517 	}
14518 
14519 	btf_id = prog->aux->attach_btf_id;
14520 	st_ops = bpf_struct_ops_find(btf_id);
14521 	if (!st_ops) {
14522 		verbose(env, "attach_btf_id %u is not a supported struct\n",
14523 			btf_id);
14524 		return -ENOTSUPP;
14525 	}
14526 
14527 	t = st_ops->type;
14528 	member_idx = prog->expected_attach_type;
14529 	if (member_idx >= btf_type_vlen(t)) {
14530 		verbose(env, "attach to invalid member idx %u of struct %s\n",
14531 			member_idx, st_ops->name);
14532 		return -EINVAL;
14533 	}
14534 
14535 	member = &btf_type_member(t)[member_idx];
14536 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
14537 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
14538 					       NULL);
14539 	if (!func_proto) {
14540 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
14541 			mname, member_idx, st_ops->name);
14542 		return -EINVAL;
14543 	}
14544 
14545 	if (st_ops->check_member) {
14546 		int err = st_ops->check_member(t, member);
14547 
14548 		if (err) {
14549 			verbose(env, "attach to unsupported member %s of struct %s\n",
14550 				mname, st_ops->name);
14551 			return err;
14552 		}
14553 	}
14554 
14555 	prog->aux->attach_func_proto = func_proto;
14556 	prog->aux->attach_func_name = mname;
14557 	env->ops = st_ops->verifier_ops;
14558 
14559 	return 0;
14560 }
14561 #define SECURITY_PREFIX "security_"
14562 
14563 static int check_attach_modify_return(unsigned long addr, const char *func_name)
14564 {
14565 	if (within_error_injection_list(addr) ||
14566 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
14567 		return 0;
14568 
14569 	return -EINVAL;
14570 }
14571 
14572 /* list of non-sleepable functions that are otherwise on
14573  * ALLOW_ERROR_INJECTION list
14574  */
14575 BTF_SET_START(btf_non_sleepable_error_inject)
14576 /* Three functions below can be called from sleepable and non-sleepable context.
14577  * Assume non-sleepable from bpf safety point of view.
14578  */
14579 BTF_ID(func, __filemap_add_folio)
14580 BTF_ID(func, should_fail_alloc_page)
14581 BTF_ID(func, should_failslab)
14582 BTF_SET_END(btf_non_sleepable_error_inject)
14583 
14584 static int check_non_sleepable_error_inject(u32 btf_id)
14585 {
14586 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
14587 }
14588 
14589 int bpf_check_attach_target(struct bpf_verifier_log *log,
14590 			    const struct bpf_prog *prog,
14591 			    const struct bpf_prog *tgt_prog,
14592 			    u32 btf_id,
14593 			    struct bpf_attach_target_info *tgt_info)
14594 {
14595 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
14596 	const char prefix[] = "btf_trace_";
14597 	int ret = 0, subprog = -1, i;
14598 	const struct btf_type *t;
14599 	bool conservative = true;
14600 	const char *tname;
14601 	struct btf *btf;
14602 	long addr = 0;
14603 
14604 	if (!btf_id) {
14605 		bpf_log(log, "Tracing programs must provide btf_id\n");
14606 		return -EINVAL;
14607 	}
14608 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
14609 	if (!btf) {
14610 		bpf_log(log,
14611 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
14612 		return -EINVAL;
14613 	}
14614 	t = btf_type_by_id(btf, btf_id);
14615 	if (!t) {
14616 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
14617 		return -EINVAL;
14618 	}
14619 	tname = btf_name_by_offset(btf, t->name_off);
14620 	if (!tname) {
14621 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
14622 		return -EINVAL;
14623 	}
14624 	if (tgt_prog) {
14625 		struct bpf_prog_aux *aux = tgt_prog->aux;
14626 
14627 		for (i = 0; i < aux->func_info_cnt; i++)
14628 			if (aux->func_info[i].type_id == btf_id) {
14629 				subprog = i;
14630 				break;
14631 			}
14632 		if (subprog == -1) {
14633 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
14634 			return -EINVAL;
14635 		}
14636 		conservative = aux->func_info_aux[subprog].unreliable;
14637 		if (prog_extension) {
14638 			if (conservative) {
14639 				bpf_log(log,
14640 					"Cannot replace static functions\n");
14641 				return -EINVAL;
14642 			}
14643 			if (!prog->jit_requested) {
14644 				bpf_log(log,
14645 					"Extension programs should be JITed\n");
14646 				return -EINVAL;
14647 			}
14648 		}
14649 		if (!tgt_prog->jited) {
14650 			bpf_log(log, "Can attach to only JITed progs\n");
14651 			return -EINVAL;
14652 		}
14653 		if (tgt_prog->type == prog->type) {
14654 			/* Cannot fentry/fexit another fentry/fexit program.
14655 			 * Cannot attach program extension to another extension.
14656 			 * It's ok to attach fentry/fexit to extension program.
14657 			 */
14658 			bpf_log(log, "Cannot recursively attach\n");
14659 			return -EINVAL;
14660 		}
14661 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
14662 		    prog_extension &&
14663 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
14664 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
14665 			/* Program extensions can extend all program types
14666 			 * except fentry/fexit. The reason is the following.
14667 			 * The fentry/fexit programs are used for performance
14668 			 * analysis, stats and can be attached to any program
14669 			 * type except themselves. When extension program is
14670 			 * replacing XDP function it is necessary to allow
14671 			 * performance analysis of all functions. Both original
14672 			 * XDP program and its program extension. Hence
14673 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
14674 			 * allowed. If extending of fentry/fexit was allowed it
14675 			 * would be possible to create long call chain
14676 			 * fentry->extension->fentry->extension beyond
14677 			 * reasonable stack size. Hence extending fentry is not
14678 			 * allowed.
14679 			 */
14680 			bpf_log(log, "Cannot extend fentry/fexit\n");
14681 			return -EINVAL;
14682 		}
14683 	} else {
14684 		if (prog_extension) {
14685 			bpf_log(log, "Cannot replace kernel functions\n");
14686 			return -EINVAL;
14687 		}
14688 	}
14689 
14690 	switch (prog->expected_attach_type) {
14691 	case BPF_TRACE_RAW_TP:
14692 		if (tgt_prog) {
14693 			bpf_log(log,
14694 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
14695 			return -EINVAL;
14696 		}
14697 		if (!btf_type_is_typedef(t)) {
14698 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
14699 				btf_id);
14700 			return -EINVAL;
14701 		}
14702 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
14703 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
14704 				btf_id, tname);
14705 			return -EINVAL;
14706 		}
14707 		tname += sizeof(prefix) - 1;
14708 		t = btf_type_by_id(btf, t->type);
14709 		if (!btf_type_is_ptr(t))
14710 			/* should never happen in valid vmlinux build */
14711 			return -EINVAL;
14712 		t = btf_type_by_id(btf, t->type);
14713 		if (!btf_type_is_func_proto(t))
14714 			/* should never happen in valid vmlinux build */
14715 			return -EINVAL;
14716 
14717 		break;
14718 	case BPF_TRACE_ITER:
14719 		if (!btf_type_is_func(t)) {
14720 			bpf_log(log, "attach_btf_id %u is not a function\n",
14721 				btf_id);
14722 			return -EINVAL;
14723 		}
14724 		t = btf_type_by_id(btf, t->type);
14725 		if (!btf_type_is_func_proto(t))
14726 			return -EINVAL;
14727 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14728 		if (ret)
14729 			return ret;
14730 		break;
14731 	default:
14732 		if (!prog_extension)
14733 			return -EINVAL;
14734 		fallthrough;
14735 	case BPF_MODIFY_RETURN:
14736 	case BPF_LSM_MAC:
14737 	case BPF_TRACE_FENTRY:
14738 	case BPF_TRACE_FEXIT:
14739 		if (!btf_type_is_func(t)) {
14740 			bpf_log(log, "attach_btf_id %u is not a function\n",
14741 				btf_id);
14742 			return -EINVAL;
14743 		}
14744 		if (prog_extension &&
14745 		    btf_check_type_match(log, prog, btf, t))
14746 			return -EINVAL;
14747 		t = btf_type_by_id(btf, t->type);
14748 		if (!btf_type_is_func_proto(t))
14749 			return -EINVAL;
14750 
14751 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
14752 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
14753 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
14754 			return -EINVAL;
14755 
14756 		if (tgt_prog && conservative)
14757 			t = NULL;
14758 
14759 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14760 		if (ret < 0)
14761 			return ret;
14762 
14763 		if (tgt_prog) {
14764 			if (subprog == 0)
14765 				addr = (long) tgt_prog->bpf_func;
14766 			else
14767 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
14768 		} else {
14769 			addr = kallsyms_lookup_name(tname);
14770 			if (!addr) {
14771 				bpf_log(log,
14772 					"The address of function %s cannot be found\n",
14773 					tname);
14774 				return -ENOENT;
14775 			}
14776 		}
14777 
14778 		if (prog->aux->sleepable) {
14779 			ret = -EINVAL;
14780 			switch (prog->type) {
14781 			case BPF_PROG_TYPE_TRACING:
14782 				/* fentry/fexit/fmod_ret progs can be sleepable only if they are
14783 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
14784 				 */
14785 				if (!check_non_sleepable_error_inject(btf_id) &&
14786 				    within_error_injection_list(addr))
14787 					ret = 0;
14788 				break;
14789 			case BPF_PROG_TYPE_LSM:
14790 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
14791 				 * Only some of them are sleepable.
14792 				 */
14793 				if (bpf_lsm_is_sleepable_hook(btf_id))
14794 					ret = 0;
14795 				break;
14796 			default:
14797 				break;
14798 			}
14799 			if (ret) {
14800 				bpf_log(log, "%s is not sleepable\n", tname);
14801 				return ret;
14802 			}
14803 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
14804 			if (tgt_prog) {
14805 				bpf_log(log, "can't modify return codes of BPF programs\n");
14806 				return -EINVAL;
14807 			}
14808 			ret = check_attach_modify_return(addr, tname);
14809 			if (ret) {
14810 				bpf_log(log, "%s() is not modifiable\n", tname);
14811 				return ret;
14812 			}
14813 		}
14814 
14815 		break;
14816 	}
14817 	tgt_info->tgt_addr = addr;
14818 	tgt_info->tgt_name = tname;
14819 	tgt_info->tgt_type = t;
14820 	return 0;
14821 }
14822 
14823 BTF_SET_START(btf_id_deny)
14824 BTF_ID_UNUSED
14825 #ifdef CONFIG_SMP
14826 BTF_ID(func, migrate_disable)
14827 BTF_ID(func, migrate_enable)
14828 #endif
14829 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
14830 BTF_ID(func, rcu_read_unlock_strict)
14831 #endif
14832 BTF_SET_END(btf_id_deny)
14833 
14834 static int check_attach_btf_id(struct bpf_verifier_env *env)
14835 {
14836 	struct bpf_prog *prog = env->prog;
14837 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
14838 	struct bpf_attach_target_info tgt_info = {};
14839 	u32 btf_id = prog->aux->attach_btf_id;
14840 	struct bpf_trampoline *tr;
14841 	int ret;
14842 	u64 key;
14843 
14844 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
14845 		if (prog->aux->sleepable)
14846 			/* attach_btf_id checked to be zero already */
14847 			return 0;
14848 		verbose(env, "Syscall programs can only be sleepable\n");
14849 		return -EINVAL;
14850 	}
14851 
14852 	if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
14853 	    prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
14854 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
14855 		return -EINVAL;
14856 	}
14857 
14858 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
14859 		return check_struct_ops_btf_id(env);
14860 
14861 	if (prog->type != BPF_PROG_TYPE_TRACING &&
14862 	    prog->type != BPF_PROG_TYPE_LSM &&
14863 	    prog->type != BPF_PROG_TYPE_EXT)
14864 		return 0;
14865 
14866 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
14867 	if (ret)
14868 		return ret;
14869 
14870 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
14871 		/* to make freplace equivalent to their targets, they need to
14872 		 * inherit env->ops and expected_attach_type for the rest of the
14873 		 * verification
14874 		 */
14875 		env->ops = bpf_verifier_ops[tgt_prog->type];
14876 		prog->expected_attach_type = tgt_prog->expected_attach_type;
14877 	}
14878 
14879 	/* store info about the attachment target that will be used later */
14880 	prog->aux->attach_func_proto = tgt_info.tgt_type;
14881 	prog->aux->attach_func_name = tgt_info.tgt_name;
14882 
14883 	if (tgt_prog) {
14884 		prog->aux->saved_dst_prog_type = tgt_prog->type;
14885 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
14886 	}
14887 
14888 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
14889 		prog->aux->attach_btf_trace = true;
14890 		return 0;
14891 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
14892 		if (!bpf_iter_prog_supported(prog))
14893 			return -EINVAL;
14894 		return 0;
14895 	}
14896 
14897 	if (prog->type == BPF_PROG_TYPE_LSM) {
14898 		ret = bpf_lsm_verify_prog(&env->log, prog);
14899 		if (ret < 0)
14900 			return ret;
14901 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
14902 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
14903 		return -EINVAL;
14904 	}
14905 
14906 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
14907 	tr = bpf_trampoline_get(key, &tgt_info);
14908 	if (!tr)
14909 		return -ENOMEM;
14910 
14911 	prog->aux->dst_trampoline = tr;
14912 	return 0;
14913 }
14914 
14915 struct btf *bpf_get_btf_vmlinux(void)
14916 {
14917 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
14918 		mutex_lock(&bpf_verifier_lock);
14919 		if (!btf_vmlinux)
14920 			btf_vmlinux = btf_parse_vmlinux();
14921 		mutex_unlock(&bpf_verifier_lock);
14922 	}
14923 	return btf_vmlinux;
14924 }
14925 
14926 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
14927 {
14928 	u64 start_time = ktime_get_ns();
14929 	struct bpf_verifier_env *env;
14930 	struct bpf_verifier_log *log;
14931 	int i, len, ret = -EINVAL;
14932 	bool is_priv;
14933 
14934 	/* no program is valid */
14935 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
14936 		return -EINVAL;
14937 
14938 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
14939 	 * allocate/free it every time bpf_check() is called
14940 	 */
14941 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
14942 	if (!env)
14943 		return -ENOMEM;
14944 	log = &env->log;
14945 
14946 	len = (*prog)->len;
14947 	env->insn_aux_data =
14948 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
14949 	ret = -ENOMEM;
14950 	if (!env->insn_aux_data)
14951 		goto err_free_env;
14952 	for (i = 0; i < len; i++)
14953 		env->insn_aux_data[i].orig_idx = i;
14954 	env->prog = *prog;
14955 	env->ops = bpf_verifier_ops[env->prog->type];
14956 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
14957 	is_priv = bpf_capable();
14958 
14959 	bpf_get_btf_vmlinux();
14960 
14961 	/* grab the mutex to protect few globals used by verifier */
14962 	if (!is_priv)
14963 		mutex_lock(&bpf_verifier_lock);
14964 
14965 	if (attr->log_level || attr->log_buf || attr->log_size) {
14966 		/* user requested verbose verifier output
14967 		 * and supplied buffer to store the verification trace
14968 		 */
14969 		log->level = attr->log_level;
14970 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
14971 		log->len_total = attr->log_size;
14972 
14973 		/* log attributes have to be sane */
14974 		if (!bpf_verifier_log_attr_valid(log)) {
14975 			ret = -EINVAL;
14976 			goto err_unlock;
14977 		}
14978 	}
14979 
14980 	mark_verifier_state_clean(env);
14981 
14982 	if (IS_ERR(btf_vmlinux)) {
14983 		/* Either gcc or pahole or kernel are broken. */
14984 		verbose(env, "in-kernel BTF is malformed\n");
14985 		ret = PTR_ERR(btf_vmlinux);
14986 		goto skip_full_check;
14987 	}
14988 
14989 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
14990 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
14991 		env->strict_alignment = true;
14992 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
14993 		env->strict_alignment = false;
14994 
14995 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
14996 	env->allow_uninit_stack = bpf_allow_uninit_stack();
14997 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
14998 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
14999 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
15000 	env->bpf_capable = bpf_capable();
15001 
15002 	if (is_priv)
15003 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
15004 
15005 	env->explored_states = kvcalloc(state_htab_size(env),
15006 				       sizeof(struct bpf_verifier_state_list *),
15007 				       GFP_USER);
15008 	ret = -ENOMEM;
15009 	if (!env->explored_states)
15010 		goto skip_full_check;
15011 
15012 	ret = add_subprog_and_kfunc(env);
15013 	if (ret < 0)
15014 		goto skip_full_check;
15015 
15016 	ret = check_subprogs(env);
15017 	if (ret < 0)
15018 		goto skip_full_check;
15019 
15020 	ret = check_btf_info(env, attr, uattr);
15021 	if (ret < 0)
15022 		goto skip_full_check;
15023 
15024 	ret = check_attach_btf_id(env);
15025 	if (ret)
15026 		goto skip_full_check;
15027 
15028 	ret = resolve_pseudo_ldimm64(env);
15029 	if (ret < 0)
15030 		goto skip_full_check;
15031 
15032 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
15033 		ret = bpf_prog_offload_verifier_prep(env->prog);
15034 		if (ret)
15035 			goto skip_full_check;
15036 	}
15037 
15038 	ret = check_cfg(env);
15039 	if (ret < 0)
15040 		goto skip_full_check;
15041 
15042 	ret = do_check_subprogs(env);
15043 	ret = ret ?: do_check_main(env);
15044 
15045 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
15046 		ret = bpf_prog_offload_finalize(env);
15047 
15048 skip_full_check:
15049 	kvfree(env->explored_states);
15050 
15051 	if (ret == 0)
15052 		ret = check_max_stack_depth(env);
15053 
15054 	/* instruction rewrites happen after this point */
15055 	if (is_priv) {
15056 		if (ret == 0)
15057 			opt_hard_wire_dead_code_branches(env);
15058 		if (ret == 0)
15059 			ret = opt_remove_dead_code(env);
15060 		if (ret == 0)
15061 			ret = opt_remove_nops(env);
15062 	} else {
15063 		if (ret == 0)
15064 			sanitize_dead_code(env);
15065 	}
15066 
15067 	if (ret == 0)
15068 		/* program is valid, convert *(u32*)(ctx + off) accesses */
15069 		ret = convert_ctx_accesses(env);
15070 
15071 	if (ret == 0)
15072 		ret = do_misc_fixups(env);
15073 
15074 	/* do 32-bit optimization after insn patching has done so those patched
15075 	 * insns could be handled correctly.
15076 	 */
15077 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
15078 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
15079 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
15080 								     : false;
15081 	}
15082 
15083 	if (ret == 0)
15084 		ret = fixup_call_args(env);
15085 
15086 	env->verification_time = ktime_get_ns() - start_time;
15087 	print_verification_stats(env);
15088 	env->prog->aux->verified_insns = env->insn_processed;
15089 
15090 	if (log->level && bpf_verifier_log_full(log))
15091 		ret = -ENOSPC;
15092 	if (log->level && !log->ubuf) {
15093 		ret = -EFAULT;
15094 		goto err_release_maps;
15095 	}
15096 
15097 	if (ret)
15098 		goto err_release_maps;
15099 
15100 	if (env->used_map_cnt) {
15101 		/* if program passed verifier, update used_maps in bpf_prog_info */
15102 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
15103 							  sizeof(env->used_maps[0]),
15104 							  GFP_KERNEL);
15105 
15106 		if (!env->prog->aux->used_maps) {
15107 			ret = -ENOMEM;
15108 			goto err_release_maps;
15109 		}
15110 
15111 		memcpy(env->prog->aux->used_maps, env->used_maps,
15112 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
15113 		env->prog->aux->used_map_cnt = env->used_map_cnt;
15114 	}
15115 	if (env->used_btf_cnt) {
15116 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
15117 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
15118 							  sizeof(env->used_btfs[0]),
15119 							  GFP_KERNEL);
15120 		if (!env->prog->aux->used_btfs) {
15121 			ret = -ENOMEM;
15122 			goto err_release_maps;
15123 		}
15124 
15125 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
15126 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
15127 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
15128 	}
15129 	if (env->used_map_cnt || env->used_btf_cnt) {
15130 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
15131 		 * bpf_ld_imm64 instructions
15132 		 */
15133 		convert_pseudo_ld_imm64(env);
15134 	}
15135 
15136 	adjust_btf_func(env);
15137 
15138 err_release_maps:
15139 	if (!env->prog->aux->used_maps)
15140 		/* if we didn't copy map pointers into bpf_prog_info, release
15141 		 * them now. Otherwise free_used_maps() will release them.
15142 		 */
15143 		release_maps(env);
15144 	if (!env->prog->aux->used_btfs)
15145 		release_btfs(env);
15146 
15147 	/* extension progs temporarily inherit the attach_type of their targets
15148 	   for verification purposes, so set it back to zero before returning
15149 	 */
15150 	if (env->prog->type == BPF_PROG_TYPE_EXT)
15151 		env->prog->expected_attach_type = 0;
15152 
15153 	*prog = env->prog;
15154 err_unlock:
15155 	if (!is_priv)
15156 		mutex_unlock(&bpf_verifier_lock);
15157 	vfree(env->insn_aux_data);
15158 err_free_env:
15159 	kfree(env);
15160 	return ret;
15161 }
15162