xref: /openbmc/linux/kernel/bpf/verifier.c (revision c10d12e3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 
27 #include "disasm.h"
28 
29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
30 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
31 	[_id] = & _name ## _verifier_ops,
32 #define BPF_MAP_TYPE(_id, _ops)
33 #define BPF_LINK_TYPE(_id, _name)
34 #include <linux/bpf_types.h>
35 #undef BPF_PROG_TYPE
36 #undef BPF_MAP_TYPE
37 #undef BPF_LINK_TYPE
38 };
39 
40 /* bpf_check() is a static code analyzer that walks eBPF program
41  * instruction by instruction and updates register/stack state.
42  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
43  *
44  * The first pass is depth-first-search to check that the program is a DAG.
45  * It rejects the following programs:
46  * - larger than BPF_MAXINSNS insns
47  * - if loop is present (detected via back-edge)
48  * - unreachable insns exist (shouldn't be a forest. program = one function)
49  * - out of bounds or malformed jumps
50  * The second pass is all possible path descent from the 1st insn.
51  * Since it's analyzing all paths through the program, the length of the
52  * analysis is limited to 64k insn, which may be hit even if total number of
53  * insn is less then 4K, but there are too many branches that change stack/regs.
54  * Number of 'branches to be analyzed' is limited to 1k
55  *
56  * On entry to each instruction, each register has a type, and the instruction
57  * changes the types of the registers depending on instruction semantics.
58  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
59  * copied to R1.
60  *
61  * All registers are 64-bit.
62  * R0 - return register
63  * R1-R5 argument passing registers
64  * R6-R9 callee saved registers
65  * R10 - frame pointer read-only
66  *
67  * At the start of BPF program the register R1 contains a pointer to bpf_context
68  * and has type PTR_TO_CTX.
69  *
70  * Verifier tracks arithmetic operations on pointers in case:
71  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
72  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
73  * 1st insn copies R10 (which has FRAME_PTR) type into R1
74  * and 2nd arithmetic instruction is pattern matched to recognize
75  * that it wants to construct a pointer to some element within stack.
76  * So after 2nd insn, the register R1 has type PTR_TO_STACK
77  * (and -20 constant is saved for further stack bounds checking).
78  * Meaning that this reg is a pointer to stack plus known immediate constant.
79  *
80  * Most of the time the registers have SCALAR_VALUE type, which
81  * means the register has some value, but it's not a valid pointer.
82  * (like pointer plus pointer becomes SCALAR_VALUE type)
83  *
84  * When verifier sees load or store instructions the type of base register
85  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
86  * four pointer types recognized by check_mem_access() function.
87  *
88  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
89  * and the range of [ptr, ptr + map's value_size) is accessible.
90  *
91  * registers used to pass values to function calls are checked against
92  * function argument constraints.
93  *
94  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
95  * It means that the register type passed to this function must be
96  * PTR_TO_STACK and it will be used inside the function as
97  * 'pointer to map element key'
98  *
99  * For example the argument constraints for bpf_map_lookup_elem():
100  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
101  *   .arg1_type = ARG_CONST_MAP_PTR,
102  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
103  *
104  * ret_type says that this function returns 'pointer to map elem value or null'
105  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
106  * 2nd argument should be a pointer to stack, which will be used inside
107  * the helper function as a pointer to map element key.
108  *
109  * On the kernel side the helper function looks like:
110  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
111  * {
112  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
113  *    void *key = (void *) (unsigned long) r2;
114  *    void *value;
115  *
116  *    here kernel can access 'key' and 'map' pointers safely, knowing that
117  *    [key, key + map->key_size) bytes are valid and were initialized on
118  *    the stack of eBPF program.
119  * }
120  *
121  * Corresponding eBPF program may look like:
122  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
123  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
124  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
125  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
126  * here verifier looks at prototype of map_lookup_elem() and sees:
127  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
128  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129  *
130  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
131  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
132  * and were initialized prior to this call.
133  * If it's ok, then verifier allows this BPF_CALL insn and looks at
134  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
135  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
136  * returns either pointer to map value or NULL.
137  *
138  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
139  * insn, the register holding that pointer in the true branch changes state to
140  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
141  * branch. See check_cond_jmp_op().
142  *
143  * After the call R0 is set to return type of the function and registers R1-R5
144  * are set to NOT_INIT to indicate that they are no longer readable.
145  *
146  * The following reference types represent a potential reference to a kernel
147  * resource which, after first being allocated, must be checked and freed by
148  * the BPF program:
149  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
150  *
151  * When the verifier sees a helper call return a reference type, it allocates a
152  * pointer id for the reference and stores it in the current function state.
153  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
154  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
155  * passes through a NULL-check conditional. For the branch wherein the state is
156  * changed to CONST_IMM, the verifier releases the reference.
157  *
158  * For each helper function that allocates a reference, such as
159  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
160  * bpf_sk_release(). When a reference type passes into the release function,
161  * the verifier also releases the reference. If any unchecked or unreleased
162  * reference remains at the end of the program, the verifier rejects it.
163  */
164 
165 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
166 struct bpf_verifier_stack_elem {
167 	/* verifer state is 'st'
168 	 * before processing instruction 'insn_idx'
169 	 * and after processing instruction 'prev_insn_idx'
170 	 */
171 	struct bpf_verifier_state st;
172 	int insn_idx;
173 	int prev_insn_idx;
174 	struct bpf_verifier_stack_elem *next;
175 	/* length of verifier log at the time this state was pushed on stack */
176 	u32 log_pos;
177 };
178 
179 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
180 #define BPF_COMPLEXITY_LIMIT_STATES	64
181 
182 #define BPF_MAP_KEY_POISON	(1ULL << 63)
183 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
184 
185 #define BPF_MAP_PTR_UNPRIV	1UL
186 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
187 					  POISON_POINTER_DELTA))
188 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189 
190 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
191 {
192 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
193 }
194 
195 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
196 {
197 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
198 }
199 
200 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
201 			      const struct bpf_map *map, bool unpriv)
202 {
203 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
204 	unpriv |= bpf_map_ptr_unpriv(aux);
205 	aux->map_ptr_state = (unsigned long)map |
206 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
207 }
208 
209 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
210 {
211 	return aux->map_key_state & BPF_MAP_KEY_POISON;
212 }
213 
214 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
215 {
216 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
217 }
218 
219 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
220 {
221 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
222 }
223 
224 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
225 {
226 	bool poisoned = bpf_map_key_poisoned(aux);
227 
228 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
229 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
230 }
231 
232 static bool bpf_pseudo_call(const struct bpf_insn *insn)
233 {
234 	return insn->code == (BPF_JMP | BPF_CALL) &&
235 	       insn->src_reg == BPF_PSEUDO_CALL;
236 }
237 
238 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
239 {
240 	return insn->code == (BPF_JMP | BPF_CALL) &&
241 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
242 }
243 
244 struct bpf_call_arg_meta {
245 	struct bpf_map *map_ptr;
246 	bool raw_mode;
247 	bool pkt_access;
248 	int regno;
249 	int access_size;
250 	int mem_size;
251 	u64 msize_max_value;
252 	int ref_obj_id;
253 	int map_uid;
254 	int func_id;
255 	struct btf *btf;
256 	u32 btf_id;
257 	struct btf *ret_btf;
258 	u32 ret_btf_id;
259 	u32 subprogno;
260 };
261 
262 struct btf *btf_vmlinux;
263 
264 static DEFINE_MUTEX(bpf_verifier_lock);
265 
266 static const struct bpf_line_info *
267 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
268 {
269 	const struct bpf_line_info *linfo;
270 	const struct bpf_prog *prog;
271 	u32 i, nr_linfo;
272 
273 	prog = env->prog;
274 	nr_linfo = prog->aux->nr_linfo;
275 
276 	if (!nr_linfo || insn_off >= prog->len)
277 		return NULL;
278 
279 	linfo = prog->aux->linfo;
280 	for (i = 1; i < nr_linfo; i++)
281 		if (insn_off < linfo[i].insn_off)
282 			break;
283 
284 	return &linfo[i - 1];
285 }
286 
287 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
288 		       va_list args)
289 {
290 	unsigned int n;
291 
292 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
293 
294 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
295 		  "verifier log line truncated - local buffer too short\n");
296 
297 	if (log->level == BPF_LOG_KERNEL) {
298 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
299 
300 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
301 		return;
302 	}
303 
304 	n = min(log->len_total - log->len_used - 1, n);
305 	log->kbuf[n] = '\0';
306 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
307 		log->len_used += n;
308 	else
309 		log->ubuf = NULL;
310 }
311 
312 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
313 {
314 	char zero = 0;
315 
316 	if (!bpf_verifier_log_needed(log))
317 		return;
318 
319 	log->len_used = new_pos;
320 	if (put_user(zero, log->ubuf + new_pos))
321 		log->ubuf = NULL;
322 }
323 
324 /* log_level controls verbosity level of eBPF verifier.
325  * bpf_verifier_log_write() is used to dump the verification trace to the log,
326  * so the user can figure out what's wrong with the program
327  */
328 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
329 					   const char *fmt, ...)
330 {
331 	va_list args;
332 
333 	if (!bpf_verifier_log_needed(&env->log))
334 		return;
335 
336 	va_start(args, fmt);
337 	bpf_verifier_vlog(&env->log, fmt, args);
338 	va_end(args);
339 }
340 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
341 
342 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
343 {
344 	struct bpf_verifier_env *env = private_data;
345 	va_list args;
346 
347 	if (!bpf_verifier_log_needed(&env->log))
348 		return;
349 
350 	va_start(args, fmt);
351 	bpf_verifier_vlog(&env->log, fmt, args);
352 	va_end(args);
353 }
354 
355 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
356 			    const char *fmt, ...)
357 {
358 	va_list args;
359 
360 	if (!bpf_verifier_log_needed(log))
361 		return;
362 
363 	va_start(args, fmt);
364 	bpf_verifier_vlog(log, fmt, args);
365 	va_end(args);
366 }
367 
368 static const char *ltrim(const char *s)
369 {
370 	while (isspace(*s))
371 		s++;
372 
373 	return s;
374 }
375 
376 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
377 					 u32 insn_off,
378 					 const char *prefix_fmt, ...)
379 {
380 	const struct bpf_line_info *linfo;
381 
382 	if (!bpf_verifier_log_needed(&env->log))
383 		return;
384 
385 	linfo = find_linfo(env, insn_off);
386 	if (!linfo || linfo == env->prev_linfo)
387 		return;
388 
389 	if (prefix_fmt) {
390 		va_list args;
391 
392 		va_start(args, prefix_fmt);
393 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
394 		va_end(args);
395 	}
396 
397 	verbose(env, "%s\n",
398 		ltrim(btf_name_by_offset(env->prog->aux->btf,
399 					 linfo->line_off)));
400 
401 	env->prev_linfo = linfo;
402 }
403 
404 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
405 				   struct bpf_reg_state *reg,
406 				   struct tnum *range, const char *ctx,
407 				   const char *reg_name)
408 {
409 	char tn_buf[48];
410 
411 	verbose(env, "At %s the register %s ", ctx, reg_name);
412 	if (!tnum_is_unknown(reg->var_off)) {
413 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
414 		verbose(env, "has value %s", tn_buf);
415 	} else {
416 		verbose(env, "has unknown scalar value");
417 	}
418 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
419 	verbose(env, " should have been in %s\n", tn_buf);
420 }
421 
422 static bool type_is_pkt_pointer(enum bpf_reg_type type)
423 {
424 	return type == PTR_TO_PACKET ||
425 	       type == PTR_TO_PACKET_META;
426 }
427 
428 static bool type_is_sk_pointer(enum bpf_reg_type type)
429 {
430 	return type == PTR_TO_SOCKET ||
431 		type == PTR_TO_SOCK_COMMON ||
432 		type == PTR_TO_TCP_SOCK ||
433 		type == PTR_TO_XDP_SOCK;
434 }
435 
436 static bool reg_type_not_null(enum bpf_reg_type type)
437 {
438 	return type == PTR_TO_SOCKET ||
439 		type == PTR_TO_TCP_SOCK ||
440 		type == PTR_TO_MAP_VALUE ||
441 		type == PTR_TO_MAP_KEY ||
442 		type == PTR_TO_SOCK_COMMON;
443 }
444 
445 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
446 {
447 	return reg->type == PTR_TO_MAP_VALUE &&
448 		map_value_has_spin_lock(reg->map_ptr);
449 }
450 
451 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
452 {
453 	return base_type(type) == PTR_TO_SOCKET ||
454 		base_type(type) == PTR_TO_TCP_SOCK ||
455 		base_type(type) == PTR_TO_MEM ||
456 		base_type(type) == PTR_TO_BTF_ID;
457 }
458 
459 static bool type_is_rdonly_mem(u32 type)
460 {
461 	return type & MEM_RDONLY;
462 }
463 
464 static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
465 {
466 	return type == ARG_PTR_TO_SOCK_COMMON;
467 }
468 
469 static bool type_may_be_null(u32 type)
470 {
471 	return type & PTR_MAYBE_NULL;
472 }
473 
474 /* Determine whether the function releases some resources allocated by another
475  * function call. The first reference type argument will be assumed to be
476  * released by release_reference().
477  */
478 static bool is_release_function(enum bpf_func_id func_id)
479 {
480 	return func_id == BPF_FUNC_sk_release ||
481 	       func_id == BPF_FUNC_ringbuf_submit ||
482 	       func_id == BPF_FUNC_ringbuf_discard;
483 }
484 
485 static bool may_be_acquire_function(enum bpf_func_id func_id)
486 {
487 	return func_id == BPF_FUNC_sk_lookup_tcp ||
488 		func_id == BPF_FUNC_sk_lookup_udp ||
489 		func_id == BPF_FUNC_skc_lookup_tcp ||
490 		func_id == BPF_FUNC_map_lookup_elem ||
491 	        func_id == BPF_FUNC_ringbuf_reserve;
492 }
493 
494 static bool is_acquire_function(enum bpf_func_id func_id,
495 				const struct bpf_map *map)
496 {
497 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
498 
499 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
500 	    func_id == BPF_FUNC_sk_lookup_udp ||
501 	    func_id == BPF_FUNC_skc_lookup_tcp ||
502 	    func_id == BPF_FUNC_ringbuf_reserve)
503 		return true;
504 
505 	if (func_id == BPF_FUNC_map_lookup_elem &&
506 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
507 	     map_type == BPF_MAP_TYPE_SOCKHASH))
508 		return true;
509 
510 	return false;
511 }
512 
513 static bool is_ptr_cast_function(enum bpf_func_id func_id)
514 {
515 	return func_id == BPF_FUNC_tcp_sock ||
516 		func_id == BPF_FUNC_sk_fullsock ||
517 		func_id == BPF_FUNC_skc_to_tcp_sock ||
518 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
519 		func_id == BPF_FUNC_skc_to_udp6_sock ||
520 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
521 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
522 }
523 
524 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
525 {
526 	return BPF_CLASS(insn->code) == BPF_STX &&
527 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
528 	       insn->imm == BPF_CMPXCHG;
529 }
530 
531 /* string representation of 'enum bpf_reg_type'
532  *
533  * Note that reg_type_str() can not appear more than once in a single verbose()
534  * statement.
535  */
536 static const char *reg_type_str(struct bpf_verifier_env *env,
537 				enum bpf_reg_type type)
538 {
539 	char postfix[16] = {0}, prefix[32] = {0};
540 	static const char * const str[] = {
541 		[NOT_INIT]		= "?",
542 		[SCALAR_VALUE]		= "scalar",
543 		[PTR_TO_CTX]		= "ctx",
544 		[CONST_PTR_TO_MAP]	= "map_ptr",
545 		[PTR_TO_MAP_VALUE]	= "map_value",
546 		[PTR_TO_STACK]		= "fp",
547 		[PTR_TO_PACKET]		= "pkt",
548 		[PTR_TO_PACKET_META]	= "pkt_meta",
549 		[PTR_TO_PACKET_END]	= "pkt_end",
550 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
551 		[PTR_TO_SOCKET]		= "sock",
552 		[PTR_TO_SOCK_COMMON]	= "sock_common",
553 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
554 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
555 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
556 		[PTR_TO_BTF_ID]		= "ptr_",
557 		[PTR_TO_PERCPU_BTF_ID]	= "percpu_ptr_",
558 		[PTR_TO_MEM]		= "mem",
559 		[PTR_TO_BUF]		= "buf",
560 		[PTR_TO_FUNC]		= "func",
561 		[PTR_TO_MAP_KEY]	= "map_key",
562 	};
563 
564 	if (type & PTR_MAYBE_NULL) {
565 		if (base_type(type) == PTR_TO_BTF_ID ||
566 		    base_type(type) == PTR_TO_PERCPU_BTF_ID)
567 			strncpy(postfix, "or_null_", 16);
568 		else
569 			strncpy(postfix, "_or_null", 16);
570 	}
571 
572 	if (type & MEM_RDONLY)
573 		strncpy(prefix, "rdonly_", 32);
574 	if (type & MEM_ALLOC)
575 		strncpy(prefix, "alloc_", 32);
576 	if (type & MEM_USER)
577 		strncpy(prefix, "user_", 32);
578 
579 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
580 		 prefix, str[base_type(type)], postfix);
581 	return env->type_str_buf;
582 }
583 
584 static char slot_type_char[] = {
585 	[STACK_INVALID]	= '?',
586 	[STACK_SPILL]	= 'r',
587 	[STACK_MISC]	= 'm',
588 	[STACK_ZERO]	= '0',
589 };
590 
591 static void print_liveness(struct bpf_verifier_env *env,
592 			   enum bpf_reg_liveness live)
593 {
594 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
595 	    verbose(env, "_");
596 	if (live & REG_LIVE_READ)
597 		verbose(env, "r");
598 	if (live & REG_LIVE_WRITTEN)
599 		verbose(env, "w");
600 	if (live & REG_LIVE_DONE)
601 		verbose(env, "D");
602 }
603 
604 static struct bpf_func_state *func(struct bpf_verifier_env *env,
605 				   const struct bpf_reg_state *reg)
606 {
607 	struct bpf_verifier_state *cur = env->cur_state;
608 
609 	return cur->frame[reg->frameno];
610 }
611 
612 static const char *kernel_type_name(const struct btf* btf, u32 id)
613 {
614 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
615 }
616 
617 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
618 {
619 	env->scratched_regs |= 1U << regno;
620 }
621 
622 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
623 {
624 	env->scratched_stack_slots |= 1ULL << spi;
625 }
626 
627 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
628 {
629 	return (env->scratched_regs >> regno) & 1;
630 }
631 
632 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
633 {
634 	return (env->scratched_stack_slots >> regno) & 1;
635 }
636 
637 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
638 {
639 	return env->scratched_regs || env->scratched_stack_slots;
640 }
641 
642 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
643 {
644 	env->scratched_regs = 0U;
645 	env->scratched_stack_slots = 0ULL;
646 }
647 
648 /* Used for printing the entire verifier state. */
649 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
650 {
651 	env->scratched_regs = ~0U;
652 	env->scratched_stack_slots = ~0ULL;
653 }
654 
655 /* The reg state of a pointer or a bounded scalar was saved when
656  * it was spilled to the stack.
657  */
658 static bool is_spilled_reg(const struct bpf_stack_state *stack)
659 {
660 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
661 }
662 
663 static void scrub_spilled_slot(u8 *stype)
664 {
665 	if (*stype != STACK_INVALID)
666 		*stype = STACK_MISC;
667 }
668 
669 static void print_verifier_state(struct bpf_verifier_env *env,
670 				 const struct bpf_func_state *state,
671 				 bool print_all)
672 {
673 	const struct bpf_reg_state *reg;
674 	enum bpf_reg_type t;
675 	int i;
676 
677 	if (state->frameno)
678 		verbose(env, " frame%d:", state->frameno);
679 	for (i = 0; i < MAX_BPF_REG; i++) {
680 		reg = &state->regs[i];
681 		t = reg->type;
682 		if (t == NOT_INIT)
683 			continue;
684 		if (!print_all && !reg_scratched(env, i))
685 			continue;
686 		verbose(env, " R%d", i);
687 		print_liveness(env, reg->live);
688 		verbose(env, "=");
689 		if (t == SCALAR_VALUE && reg->precise)
690 			verbose(env, "P");
691 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
692 		    tnum_is_const(reg->var_off)) {
693 			/* reg->off should be 0 for SCALAR_VALUE */
694 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
695 			verbose(env, "%lld", reg->var_off.value + reg->off);
696 		} else {
697 			const char *sep = "";
698 
699 			verbose(env, "%s", reg_type_str(env, t));
700 			if (base_type(t) == PTR_TO_BTF_ID ||
701 			    base_type(t) == PTR_TO_PERCPU_BTF_ID)
702 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
703 			verbose(env, "(");
704 /*
705  * _a stands for append, was shortened to avoid multiline statements below.
706  * This macro is used to output a comma separated list of attributes.
707  */
708 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
709 
710 			if (reg->id)
711 				verbose_a("id=%d", reg->id);
712 			if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
713 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
714 			if (t != SCALAR_VALUE)
715 				verbose_a("off=%d", reg->off);
716 			if (type_is_pkt_pointer(t))
717 				verbose_a("r=%d", reg->range);
718 			else if (base_type(t) == CONST_PTR_TO_MAP ||
719 				 base_type(t) == PTR_TO_MAP_KEY ||
720 				 base_type(t) == PTR_TO_MAP_VALUE)
721 				verbose_a("ks=%d,vs=%d",
722 					  reg->map_ptr->key_size,
723 					  reg->map_ptr->value_size);
724 			if (tnum_is_const(reg->var_off)) {
725 				/* Typically an immediate SCALAR_VALUE, but
726 				 * could be a pointer whose offset is too big
727 				 * for reg->off
728 				 */
729 				verbose_a("imm=%llx", reg->var_off.value);
730 			} else {
731 				if (reg->smin_value != reg->umin_value &&
732 				    reg->smin_value != S64_MIN)
733 					verbose_a("smin=%lld", (long long)reg->smin_value);
734 				if (reg->smax_value != reg->umax_value &&
735 				    reg->smax_value != S64_MAX)
736 					verbose_a("smax=%lld", (long long)reg->smax_value);
737 				if (reg->umin_value != 0)
738 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
739 				if (reg->umax_value != U64_MAX)
740 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
741 				if (!tnum_is_unknown(reg->var_off)) {
742 					char tn_buf[48];
743 
744 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
745 					verbose_a("var_off=%s", tn_buf);
746 				}
747 				if (reg->s32_min_value != reg->smin_value &&
748 				    reg->s32_min_value != S32_MIN)
749 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
750 				if (reg->s32_max_value != reg->smax_value &&
751 				    reg->s32_max_value != S32_MAX)
752 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
753 				if (reg->u32_min_value != reg->umin_value &&
754 				    reg->u32_min_value != U32_MIN)
755 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
756 				if (reg->u32_max_value != reg->umax_value &&
757 				    reg->u32_max_value != U32_MAX)
758 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
759 			}
760 #undef verbose_a
761 
762 			verbose(env, ")");
763 		}
764 	}
765 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
766 		char types_buf[BPF_REG_SIZE + 1];
767 		bool valid = false;
768 		int j;
769 
770 		for (j = 0; j < BPF_REG_SIZE; j++) {
771 			if (state->stack[i].slot_type[j] != STACK_INVALID)
772 				valid = true;
773 			types_buf[j] = slot_type_char[
774 					state->stack[i].slot_type[j]];
775 		}
776 		types_buf[BPF_REG_SIZE] = 0;
777 		if (!valid)
778 			continue;
779 		if (!print_all && !stack_slot_scratched(env, i))
780 			continue;
781 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
782 		print_liveness(env, state->stack[i].spilled_ptr.live);
783 		if (is_spilled_reg(&state->stack[i])) {
784 			reg = &state->stack[i].spilled_ptr;
785 			t = reg->type;
786 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
787 			if (t == SCALAR_VALUE && reg->precise)
788 				verbose(env, "P");
789 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
790 				verbose(env, "%lld", reg->var_off.value + reg->off);
791 		} else {
792 			verbose(env, "=%s", types_buf);
793 		}
794 	}
795 	if (state->acquired_refs && state->refs[0].id) {
796 		verbose(env, " refs=%d", state->refs[0].id);
797 		for (i = 1; i < state->acquired_refs; i++)
798 			if (state->refs[i].id)
799 				verbose(env, ",%d", state->refs[i].id);
800 	}
801 	if (state->in_callback_fn)
802 		verbose(env, " cb");
803 	if (state->in_async_callback_fn)
804 		verbose(env, " async_cb");
805 	verbose(env, "\n");
806 	mark_verifier_state_clean(env);
807 }
808 
809 static inline u32 vlog_alignment(u32 pos)
810 {
811 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
812 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
813 }
814 
815 static void print_insn_state(struct bpf_verifier_env *env,
816 			     const struct bpf_func_state *state)
817 {
818 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
819 		/* remove new line character */
820 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
821 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
822 	} else {
823 		verbose(env, "%d:", env->insn_idx);
824 	}
825 	print_verifier_state(env, state, false);
826 }
827 
828 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
829  * small to hold src. This is different from krealloc since we don't want to preserve
830  * the contents of dst.
831  *
832  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
833  * not be allocated.
834  */
835 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
836 {
837 	size_t bytes;
838 
839 	if (ZERO_OR_NULL_PTR(src))
840 		goto out;
841 
842 	if (unlikely(check_mul_overflow(n, size, &bytes)))
843 		return NULL;
844 
845 	if (ksize(dst) < bytes) {
846 		kfree(dst);
847 		dst = kmalloc_track_caller(bytes, flags);
848 		if (!dst)
849 			return NULL;
850 	}
851 
852 	memcpy(dst, src, bytes);
853 out:
854 	return dst ? dst : ZERO_SIZE_PTR;
855 }
856 
857 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
858  * small to hold new_n items. new items are zeroed out if the array grows.
859  *
860  * Contrary to krealloc_array, does not free arr if new_n is zero.
861  */
862 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
863 {
864 	if (!new_n || old_n == new_n)
865 		goto out;
866 
867 	arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
868 	if (!arr)
869 		return NULL;
870 
871 	if (new_n > old_n)
872 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
873 
874 out:
875 	return arr ? arr : ZERO_SIZE_PTR;
876 }
877 
878 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
879 {
880 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
881 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
882 	if (!dst->refs)
883 		return -ENOMEM;
884 
885 	dst->acquired_refs = src->acquired_refs;
886 	return 0;
887 }
888 
889 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
890 {
891 	size_t n = src->allocated_stack / BPF_REG_SIZE;
892 
893 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
894 				GFP_KERNEL);
895 	if (!dst->stack)
896 		return -ENOMEM;
897 
898 	dst->allocated_stack = src->allocated_stack;
899 	return 0;
900 }
901 
902 static int resize_reference_state(struct bpf_func_state *state, size_t n)
903 {
904 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
905 				    sizeof(struct bpf_reference_state));
906 	if (!state->refs)
907 		return -ENOMEM;
908 
909 	state->acquired_refs = n;
910 	return 0;
911 }
912 
913 static int grow_stack_state(struct bpf_func_state *state, int size)
914 {
915 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
916 
917 	if (old_n >= n)
918 		return 0;
919 
920 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
921 	if (!state->stack)
922 		return -ENOMEM;
923 
924 	state->allocated_stack = size;
925 	return 0;
926 }
927 
928 /* Acquire a pointer id from the env and update the state->refs to include
929  * this new pointer reference.
930  * On success, returns a valid pointer id to associate with the register
931  * On failure, returns a negative errno.
932  */
933 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
934 {
935 	struct bpf_func_state *state = cur_func(env);
936 	int new_ofs = state->acquired_refs;
937 	int id, err;
938 
939 	err = resize_reference_state(state, state->acquired_refs + 1);
940 	if (err)
941 		return err;
942 	id = ++env->id_gen;
943 	state->refs[new_ofs].id = id;
944 	state->refs[new_ofs].insn_idx = insn_idx;
945 
946 	return id;
947 }
948 
949 /* release function corresponding to acquire_reference_state(). Idempotent. */
950 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
951 {
952 	int i, last_idx;
953 
954 	last_idx = state->acquired_refs - 1;
955 	for (i = 0; i < state->acquired_refs; i++) {
956 		if (state->refs[i].id == ptr_id) {
957 			if (last_idx && i != last_idx)
958 				memcpy(&state->refs[i], &state->refs[last_idx],
959 				       sizeof(*state->refs));
960 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
961 			state->acquired_refs--;
962 			return 0;
963 		}
964 	}
965 	return -EINVAL;
966 }
967 
968 static void free_func_state(struct bpf_func_state *state)
969 {
970 	if (!state)
971 		return;
972 	kfree(state->refs);
973 	kfree(state->stack);
974 	kfree(state);
975 }
976 
977 static void clear_jmp_history(struct bpf_verifier_state *state)
978 {
979 	kfree(state->jmp_history);
980 	state->jmp_history = NULL;
981 	state->jmp_history_cnt = 0;
982 }
983 
984 static void free_verifier_state(struct bpf_verifier_state *state,
985 				bool free_self)
986 {
987 	int i;
988 
989 	for (i = 0; i <= state->curframe; i++) {
990 		free_func_state(state->frame[i]);
991 		state->frame[i] = NULL;
992 	}
993 	clear_jmp_history(state);
994 	if (free_self)
995 		kfree(state);
996 }
997 
998 /* copy verifier state from src to dst growing dst stack space
999  * when necessary to accommodate larger src stack
1000  */
1001 static int copy_func_state(struct bpf_func_state *dst,
1002 			   const struct bpf_func_state *src)
1003 {
1004 	int err;
1005 
1006 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1007 	err = copy_reference_state(dst, src);
1008 	if (err)
1009 		return err;
1010 	return copy_stack_state(dst, src);
1011 }
1012 
1013 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1014 			       const struct bpf_verifier_state *src)
1015 {
1016 	struct bpf_func_state *dst;
1017 	int i, err;
1018 
1019 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1020 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1021 					    GFP_USER);
1022 	if (!dst_state->jmp_history)
1023 		return -ENOMEM;
1024 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1025 
1026 	/* if dst has more stack frames then src frame, free them */
1027 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1028 		free_func_state(dst_state->frame[i]);
1029 		dst_state->frame[i] = NULL;
1030 	}
1031 	dst_state->speculative = src->speculative;
1032 	dst_state->curframe = src->curframe;
1033 	dst_state->active_spin_lock = src->active_spin_lock;
1034 	dst_state->branches = src->branches;
1035 	dst_state->parent = src->parent;
1036 	dst_state->first_insn_idx = src->first_insn_idx;
1037 	dst_state->last_insn_idx = src->last_insn_idx;
1038 	for (i = 0; i <= src->curframe; i++) {
1039 		dst = dst_state->frame[i];
1040 		if (!dst) {
1041 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1042 			if (!dst)
1043 				return -ENOMEM;
1044 			dst_state->frame[i] = dst;
1045 		}
1046 		err = copy_func_state(dst, src->frame[i]);
1047 		if (err)
1048 			return err;
1049 	}
1050 	return 0;
1051 }
1052 
1053 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1054 {
1055 	while (st) {
1056 		u32 br = --st->branches;
1057 
1058 		/* WARN_ON(br > 1) technically makes sense here,
1059 		 * but see comment in push_stack(), hence:
1060 		 */
1061 		WARN_ONCE((int)br < 0,
1062 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1063 			  br);
1064 		if (br)
1065 			break;
1066 		st = st->parent;
1067 	}
1068 }
1069 
1070 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1071 		     int *insn_idx, bool pop_log)
1072 {
1073 	struct bpf_verifier_state *cur = env->cur_state;
1074 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1075 	int err;
1076 
1077 	if (env->head == NULL)
1078 		return -ENOENT;
1079 
1080 	if (cur) {
1081 		err = copy_verifier_state(cur, &head->st);
1082 		if (err)
1083 			return err;
1084 	}
1085 	if (pop_log)
1086 		bpf_vlog_reset(&env->log, head->log_pos);
1087 	if (insn_idx)
1088 		*insn_idx = head->insn_idx;
1089 	if (prev_insn_idx)
1090 		*prev_insn_idx = head->prev_insn_idx;
1091 	elem = head->next;
1092 	free_verifier_state(&head->st, false);
1093 	kfree(head);
1094 	env->head = elem;
1095 	env->stack_size--;
1096 	return 0;
1097 }
1098 
1099 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1100 					     int insn_idx, int prev_insn_idx,
1101 					     bool speculative)
1102 {
1103 	struct bpf_verifier_state *cur = env->cur_state;
1104 	struct bpf_verifier_stack_elem *elem;
1105 	int err;
1106 
1107 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1108 	if (!elem)
1109 		goto err;
1110 
1111 	elem->insn_idx = insn_idx;
1112 	elem->prev_insn_idx = prev_insn_idx;
1113 	elem->next = env->head;
1114 	elem->log_pos = env->log.len_used;
1115 	env->head = elem;
1116 	env->stack_size++;
1117 	err = copy_verifier_state(&elem->st, cur);
1118 	if (err)
1119 		goto err;
1120 	elem->st.speculative |= speculative;
1121 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1122 		verbose(env, "The sequence of %d jumps is too complex.\n",
1123 			env->stack_size);
1124 		goto err;
1125 	}
1126 	if (elem->st.parent) {
1127 		++elem->st.parent->branches;
1128 		/* WARN_ON(branches > 2) technically makes sense here,
1129 		 * but
1130 		 * 1. speculative states will bump 'branches' for non-branch
1131 		 * instructions
1132 		 * 2. is_state_visited() heuristics may decide not to create
1133 		 * a new state for a sequence of branches and all such current
1134 		 * and cloned states will be pointing to a single parent state
1135 		 * which might have large 'branches' count.
1136 		 */
1137 	}
1138 	return &elem->st;
1139 err:
1140 	free_verifier_state(env->cur_state, true);
1141 	env->cur_state = NULL;
1142 	/* pop all elements and return */
1143 	while (!pop_stack(env, NULL, NULL, false));
1144 	return NULL;
1145 }
1146 
1147 #define CALLER_SAVED_REGS 6
1148 static const int caller_saved[CALLER_SAVED_REGS] = {
1149 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1150 };
1151 
1152 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1153 				struct bpf_reg_state *reg);
1154 
1155 /* This helper doesn't clear reg->id */
1156 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1157 {
1158 	reg->var_off = tnum_const(imm);
1159 	reg->smin_value = (s64)imm;
1160 	reg->smax_value = (s64)imm;
1161 	reg->umin_value = imm;
1162 	reg->umax_value = imm;
1163 
1164 	reg->s32_min_value = (s32)imm;
1165 	reg->s32_max_value = (s32)imm;
1166 	reg->u32_min_value = (u32)imm;
1167 	reg->u32_max_value = (u32)imm;
1168 }
1169 
1170 /* Mark the unknown part of a register (variable offset or scalar value) as
1171  * known to have the value @imm.
1172  */
1173 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1174 {
1175 	/* Clear id, off, and union(map_ptr, range) */
1176 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1177 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1178 	___mark_reg_known(reg, imm);
1179 }
1180 
1181 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1182 {
1183 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1184 	reg->s32_min_value = (s32)imm;
1185 	reg->s32_max_value = (s32)imm;
1186 	reg->u32_min_value = (u32)imm;
1187 	reg->u32_max_value = (u32)imm;
1188 }
1189 
1190 /* Mark the 'variable offset' part of a register as zero.  This should be
1191  * used only on registers holding a pointer type.
1192  */
1193 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1194 {
1195 	__mark_reg_known(reg, 0);
1196 }
1197 
1198 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1199 {
1200 	__mark_reg_known(reg, 0);
1201 	reg->type = SCALAR_VALUE;
1202 }
1203 
1204 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1205 				struct bpf_reg_state *regs, u32 regno)
1206 {
1207 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1208 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1209 		/* Something bad happened, let's kill all regs */
1210 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1211 			__mark_reg_not_init(env, regs + regno);
1212 		return;
1213 	}
1214 	__mark_reg_known_zero(regs + regno);
1215 }
1216 
1217 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1218 {
1219 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1220 		const struct bpf_map *map = reg->map_ptr;
1221 
1222 		if (map->inner_map_meta) {
1223 			reg->type = CONST_PTR_TO_MAP;
1224 			reg->map_ptr = map->inner_map_meta;
1225 			/* transfer reg's id which is unique for every map_lookup_elem
1226 			 * as UID of the inner map.
1227 			 */
1228 			if (map_value_has_timer(map->inner_map_meta))
1229 				reg->map_uid = reg->id;
1230 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1231 			reg->type = PTR_TO_XDP_SOCK;
1232 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1233 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1234 			reg->type = PTR_TO_SOCKET;
1235 		} else {
1236 			reg->type = PTR_TO_MAP_VALUE;
1237 		}
1238 		return;
1239 	}
1240 
1241 	reg->type &= ~PTR_MAYBE_NULL;
1242 }
1243 
1244 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1245 {
1246 	return type_is_pkt_pointer(reg->type);
1247 }
1248 
1249 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1250 {
1251 	return reg_is_pkt_pointer(reg) ||
1252 	       reg->type == PTR_TO_PACKET_END;
1253 }
1254 
1255 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1256 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1257 				    enum bpf_reg_type which)
1258 {
1259 	/* The register can already have a range from prior markings.
1260 	 * This is fine as long as it hasn't been advanced from its
1261 	 * origin.
1262 	 */
1263 	return reg->type == which &&
1264 	       reg->id == 0 &&
1265 	       reg->off == 0 &&
1266 	       tnum_equals_const(reg->var_off, 0);
1267 }
1268 
1269 /* Reset the min/max bounds of a register */
1270 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1271 {
1272 	reg->smin_value = S64_MIN;
1273 	reg->smax_value = S64_MAX;
1274 	reg->umin_value = 0;
1275 	reg->umax_value = U64_MAX;
1276 
1277 	reg->s32_min_value = S32_MIN;
1278 	reg->s32_max_value = S32_MAX;
1279 	reg->u32_min_value = 0;
1280 	reg->u32_max_value = U32_MAX;
1281 }
1282 
1283 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1284 {
1285 	reg->smin_value = S64_MIN;
1286 	reg->smax_value = S64_MAX;
1287 	reg->umin_value = 0;
1288 	reg->umax_value = U64_MAX;
1289 }
1290 
1291 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1292 {
1293 	reg->s32_min_value = S32_MIN;
1294 	reg->s32_max_value = S32_MAX;
1295 	reg->u32_min_value = 0;
1296 	reg->u32_max_value = U32_MAX;
1297 }
1298 
1299 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1300 {
1301 	struct tnum var32_off = tnum_subreg(reg->var_off);
1302 
1303 	/* min signed is max(sign bit) | min(other bits) */
1304 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1305 			var32_off.value | (var32_off.mask & S32_MIN));
1306 	/* max signed is min(sign bit) | max(other bits) */
1307 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1308 			var32_off.value | (var32_off.mask & S32_MAX));
1309 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1310 	reg->u32_max_value = min(reg->u32_max_value,
1311 				 (u32)(var32_off.value | var32_off.mask));
1312 }
1313 
1314 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1315 {
1316 	/* min signed is max(sign bit) | min(other bits) */
1317 	reg->smin_value = max_t(s64, reg->smin_value,
1318 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1319 	/* max signed is min(sign bit) | max(other bits) */
1320 	reg->smax_value = min_t(s64, reg->smax_value,
1321 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1322 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1323 	reg->umax_value = min(reg->umax_value,
1324 			      reg->var_off.value | reg->var_off.mask);
1325 }
1326 
1327 static void __update_reg_bounds(struct bpf_reg_state *reg)
1328 {
1329 	__update_reg32_bounds(reg);
1330 	__update_reg64_bounds(reg);
1331 }
1332 
1333 /* Uses signed min/max values to inform unsigned, and vice-versa */
1334 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1335 {
1336 	/* Learn sign from signed bounds.
1337 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1338 	 * are the same, so combine.  This works even in the negative case, e.g.
1339 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1340 	 */
1341 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1342 		reg->s32_min_value = reg->u32_min_value =
1343 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1344 		reg->s32_max_value = reg->u32_max_value =
1345 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1346 		return;
1347 	}
1348 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1349 	 * boundary, so we must be careful.
1350 	 */
1351 	if ((s32)reg->u32_max_value >= 0) {
1352 		/* Positive.  We can't learn anything from the smin, but smax
1353 		 * is positive, hence safe.
1354 		 */
1355 		reg->s32_min_value = reg->u32_min_value;
1356 		reg->s32_max_value = reg->u32_max_value =
1357 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1358 	} else if ((s32)reg->u32_min_value < 0) {
1359 		/* Negative.  We can't learn anything from the smax, but smin
1360 		 * is negative, hence safe.
1361 		 */
1362 		reg->s32_min_value = reg->u32_min_value =
1363 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1364 		reg->s32_max_value = reg->u32_max_value;
1365 	}
1366 }
1367 
1368 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1369 {
1370 	/* Learn sign from signed bounds.
1371 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1372 	 * are the same, so combine.  This works even in the negative case, e.g.
1373 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1374 	 */
1375 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1376 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1377 							  reg->umin_value);
1378 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1379 							  reg->umax_value);
1380 		return;
1381 	}
1382 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1383 	 * boundary, so we must be careful.
1384 	 */
1385 	if ((s64)reg->umax_value >= 0) {
1386 		/* Positive.  We can't learn anything from the smin, but smax
1387 		 * is positive, hence safe.
1388 		 */
1389 		reg->smin_value = reg->umin_value;
1390 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1391 							  reg->umax_value);
1392 	} else if ((s64)reg->umin_value < 0) {
1393 		/* Negative.  We can't learn anything from the smax, but smin
1394 		 * is negative, hence safe.
1395 		 */
1396 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1397 							  reg->umin_value);
1398 		reg->smax_value = reg->umax_value;
1399 	}
1400 }
1401 
1402 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1403 {
1404 	__reg32_deduce_bounds(reg);
1405 	__reg64_deduce_bounds(reg);
1406 }
1407 
1408 /* Attempts to improve var_off based on unsigned min/max information */
1409 static void __reg_bound_offset(struct bpf_reg_state *reg)
1410 {
1411 	struct tnum var64_off = tnum_intersect(reg->var_off,
1412 					       tnum_range(reg->umin_value,
1413 							  reg->umax_value));
1414 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1415 						tnum_range(reg->u32_min_value,
1416 							   reg->u32_max_value));
1417 
1418 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1419 }
1420 
1421 static bool __reg32_bound_s64(s32 a)
1422 {
1423 	return a >= 0 && a <= S32_MAX;
1424 }
1425 
1426 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1427 {
1428 	reg->umin_value = reg->u32_min_value;
1429 	reg->umax_value = reg->u32_max_value;
1430 
1431 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1432 	 * be positive otherwise set to worse case bounds and refine later
1433 	 * from tnum.
1434 	 */
1435 	if (__reg32_bound_s64(reg->s32_min_value) &&
1436 	    __reg32_bound_s64(reg->s32_max_value)) {
1437 		reg->smin_value = reg->s32_min_value;
1438 		reg->smax_value = reg->s32_max_value;
1439 	} else {
1440 		reg->smin_value = 0;
1441 		reg->smax_value = U32_MAX;
1442 	}
1443 }
1444 
1445 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1446 {
1447 	/* special case when 64-bit register has upper 32-bit register
1448 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1449 	 * allowing us to use 32-bit bounds directly,
1450 	 */
1451 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1452 		__reg_assign_32_into_64(reg);
1453 	} else {
1454 		/* Otherwise the best we can do is push lower 32bit known and
1455 		 * unknown bits into register (var_off set from jmp logic)
1456 		 * then learn as much as possible from the 64-bit tnum
1457 		 * known and unknown bits. The previous smin/smax bounds are
1458 		 * invalid here because of jmp32 compare so mark them unknown
1459 		 * so they do not impact tnum bounds calculation.
1460 		 */
1461 		__mark_reg64_unbounded(reg);
1462 		__update_reg_bounds(reg);
1463 	}
1464 
1465 	/* Intersecting with the old var_off might have improved our bounds
1466 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1467 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1468 	 */
1469 	__reg_deduce_bounds(reg);
1470 	__reg_bound_offset(reg);
1471 	__update_reg_bounds(reg);
1472 }
1473 
1474 static bool __reg64_bound_s32(s64 a)
1475 {
1476 	return a >= S32_MIN && a <= S32_MAX;
1477 }
1478 
1479 static bool __reg64_bound_u32(u64 a)
1480 {
1481 	return a >= U32_MIN && a <= U32_MAX;
1482 }
1483 
1484 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1485 {
1486 	__mark_reg32_unbounded(reg);
1487 
1488 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1489 		reg->s32_min_value = (s32)reg->smin_value;
1490 		reg->s32_max_value = (s32)reg->smax_value;
1491 	}
1492 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1493 		reg->u32_min_value = (u32)reg->umin_value;
1494 		reg->u32_max_value = (u32)reg->umax_value;
1495 	}
1496 
1497 	/* Intersecting with the old var_off might have improved our bounds
1498 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1499 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1500 	 */
1501 	__reg_deduce_bounds(reg);
1502 	__reg_bound_offset(reg);
1503 	__update_reg_bounds(reg);
1504 }
1505 
1506 /* Mark a register as having a completely unknown (scalar) value. */
1507 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1508 			       struct bpf_reg_state *reg)
1509 {
1510 	/*
1511 	 * Clear type, id, off, and union(map_ptr, range) and
1512 	 * padding between 'type' and union
1513 	 */
1514 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1515 	reg->type = SCALAR_VALUE;
1516 	reg->var_off = tnum_unknown;
1517 	reg->frameno = 0;
1518 	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
1519 	__mark_reg_unbounded(reg);
1520 }
1521 
1522 static void mark_reg_unknown(struct bpf_verifier_env *env,
1523 			     struct bpf_reg_state *regs, u32 regno)
1524 {
1525 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1526 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1527 		/* Something bad happened, let's kill all regs except FP */
1528 		for (regno = 0; regno < BPF_REG_FP; regno++)
1529 			__mark_reg_not_init(env, regs + regno);
1530 		return;
1531 	}
1532 	__mark_reg_unknown(env, regs + regno);
1533 }
1534 
1535 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1536 				struct bpf_reg_state *reg)
1537 {
1538 	__mark_reg_unknown(env, reg);
1539 	reg->type = NOT_INIT;
1540 }
1541 
1542 static void mark_reg_not_init(struct bpf_verifier_env *env,
1543 			      struct bpf_reg_state *regs, u32 regno)
1544 {
1545 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1546 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1547 		/* Something bad happened, let's kill all regs except FP */
1548 		for (regno = 0; regno < BPF_REG_FP; regno++)
1549 			__mark_reg_not_init(env, regs + regno);
1550 		return;
1551 	}
1552 	__mark_reg_not_init(env, regs + regno);
1553 }
1554 
1555 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1556 			    struct bpf_reg_state *regs, u32 regno,
1557 			    enum bpf_reg_type reg_type,
1558 			    struct btf *btf, u32 btf_id,
1559 			    enum bpf_type_flag flag)
1560 {
1561 	if (reg_type == SCALAR_VALUE) {
1562 		mark_reg_unknown(env, regs, regno);
1563 		return;
1564 	}
1565 	mark_reg_known_zero(env, regs, regno);
1566 	regs[regno].type = PTR_TO_BTF_ID | flag;
1567 	regs[regno].btf = btf;
1568 	regs[regno].btf_id = btf_id;
1569 }
1570 
1571 #define DEF_NOT_SUBREG	(0)
1572 static void init_reg_state(struct bpf_verifier_env *env,
1573 			   struct bpf_func_state *state)
1574 {
1575 	struct bpf_reg_state *regs = state->regs;
1576 	int i;
1577 
1578 	for (i = 0; i < MAX_BPF_REG; i++) {
1579 		mark_reg_not_init(env, regs, i);
1580 		regs[i].live = REG_LIVE_NONE;
1581 		regs[i].parent = NULL;
1582 		regs[i].subreg_def = DEF_NOT_SUBREG;
1583 	}
1584 
1585 	/* frame pointer */
1586 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1587 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1588 	regs[BPF_REG_FP].frameno = state->frameno;
1589 }
1590 
1591 #define BPF_MAIN_FUNC (-1)
1592 static void init_func_state(struct bpf_verifier_env *env,
1593 			    struct bpf_func_state *state,
1594 			    int callsite, int frameno, int subprogno)
1595 {
1596 	state->callsite = callsite;
1597 	state->frameno = frameno;
1598 	state->subprogno = subprogno;
1599 	init_reg_state(env, state);
1600 	mark_verifier_state_scratched(env);
1601 }
1602 
1603 /* Similar to push_stack(), but for async callbacks */
1604 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1605 						int insn_idx, int prev_insn_idx,
1606 						int subprog)
1607 {
1608 	struct bpf_verifier_stack_elem *elem;
1609 	struct bpf_func_state *frame;
1610 
1611 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1612 	if (!elem)
1613 		goto err;
1614 
1615 	elem->insn_idx = insn_idx;
1616 	elem->prev_insn_idx = prev_insn_idx;
1617 	elem->next = env->head;
1618 	elem->log_pos = env->log.len_used;
1619 	env->head = elem;
1620 	env->stack_size++;
1621 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1622 		verbose(env,
1623 			"The sequence of %d jumps is too complex for async cb.\n",
1624 			env->stack_size);
1625 		goto err;
1626 	}
1627 	/* Unlike push_stack() do not copy_verifier_state().
1628 	 * The caller state doesn't matter.
1629 	 * This is async callback. It starts in a fresh stack.
1630 	 * Initialize it similar to do_check_common().
1631 	 */
1632 	elem->st.branches = 1;
1633 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1634 	if (!frame)
1635 		goto err;
1636 	init_func_state(env, frame,
1637 			BPF_MAIN_FUNC /* callsite */,
1638 			0 /* frameno within this callchain */,
1639 			subprog /* subprog number within this prog */);
1640 	elem->st.frame[0] = frame;
1641 	return &elem->st;
1642 err:
1643 	free_verifier_state(env->cur_state, true);
1644 	env->cur_state = NULL;
1645 	/* pop all elements and return */
1646 	while (!pop_stack(env, NULL, NULL, false));
1647 	return NULL;
1648 }
1649 
1650 
1651 enum reg_arg_type {
1652 	SRC_OP,		/* register is used as source operand */
1653 	DST_OP,		/* register is used as destination operand */
1654 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1655 };
1656 
1657 static int cmp_subprogs(const void *a, const void *b)
1658 {
1659 	return ((struct bpf_subprog_info *)a)->start -
1660 	       ((struct bpf_subprog_info *)b)->start;
1661 }
1662 
1663 static int find_subprog(struct bpf_verifier_env *env, int off)
1664 {
1665 	struct bpf_subprog_info *p;
1666 
1667 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1668 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1669 	if (!p)
1670 		return -ENOENT;
1671 	return p - env->subprog_info;
1672 
1673 }
1674 
1675 static int add_subprog(struct bpf_verifier_env *env, int off)
1676 {
1677 	int insn_cnt = env->prog->len;
1678 	int ret;
1679 
1680 	if (off >= insn_cnt || off < 0) {
1681 		verbose(env, "call to invalid destination\n");
1682 		return -EINVAL;
1683 	}
1684 	ret = find_subprog(env, off);
1685 	if (ret >= 0)
1686 		return ret;
1687 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1688 		verbose(env, "too many subprograms\n");
1689 		return -E2BIG;
1690 	}
1691 	/* determine subprog starts. The end is one before the next starts */
1692 	env->subprog_info[env->subprog_cnt++].start = off;
1693 	sort(env->subprog_info, env->subprog_cnt,
1694 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1695 	return env->subprog_cnt - 1;
1696 }
1697 
1698 #define MAX_KFUNC_DESCS 256
1699 #define MAX_KFUNC_BTFS	256
1700 
1701 struct bpf_kfunc_desc {
1702 	struct btf_func_model func_model;
1703 	u32 func_id;
1704 	s32 imm;
1705 	u16 offset;
1706 };
1707 
1708 struct bpf_kfunc_btf {
1709 	struct btf *btf;
1710 	struct module *module;
1711 	u16 offset;
1712 };
1713 
1714 struct bpf_kfunc_desc_tab {
1715 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1716 	u32 nr_descs;
1717 };
1718 
1719 struct bpf_kfunc_btf_tab {
1720 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1721 	u32 nr_descs;
1722 };
1723 
1724 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
1725 {
1726 	const struct bpf_kfunc_desc *d0 = a;
1727 	const struct bpf_kfunc_desc *d1 = b;
1728 
1729 	/* func_id is not greater than BTF_MAX_TYPE */
1730 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1731 }
1732 
1733 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1734 {
1735 	const struct bpf_kfunc_btf *d0 = a;
1736 	const struct bpf_kfunc_btf *d1 = b;
1737 
1738 	return d0->offset - d1->offset;
1739 }
1740 
1741 static const struct bpf_kfunc_desc *
1742 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
1743 {
1744 	struct bpf_kfunc_desc desc = {
1745 		.func_id = func_id,
1746 		.offset = offset,
1747 	};
1748 	struct bpf_kfunc_desc_tab *tab;
1749 
1750 	tab = prog->aux->kfunc_tab;
1751 	return bsearch(&desc, tab->descs, tab->nr_descs,
1752 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1753 }
1754 
1755 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1756 					 s16 offset)
1757 {
1758 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
1759 	struct bpf_kfunc_btf_tab *tab;
1760 	struct bpf_kfunc_btf *b;
1761 	struct module *mod;
1762 	struct btf *btf;
1763 	int btf_fd;
1764 
1765 	tab = env->prog->aux->kfunc_btf_tab;
1766 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1767 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1768 	if (!b) {
1769 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
1770 			verbose(env, "too many different module BTFs\n");
1771 			return ERR_PTR(-E2BIG);
1772 		}
1773 
1774 		if (bpfptr_is_null(env->fd_array)) {
1775 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1776 			return ERR_PTR(-EPROTO);
1777 		}
1778 
1779 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1780 					    offset * sizeof(btf_fd),
1781 					    sizeof(btf_fd)))
1782 			return ERR_PTR(-EFAULT);
1783 
1784 		btf = btf_get_by_fd(btf_fd);
1785 		if (IS_ERR(btf)) {
1786 			verbose(env, "invalid module BTF fd specified\n");
1787 			return btf;
1788 		}
1789 
1790 		if (!btf_is_module(btf)) {
1791 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
1792 			btf_put(btf);
1793 			return ERR_PTR(-EINVAL);
1794 		}
1795 
1796 		mod = btf_try_get_module(btf);
1797 		if (!mod) {
1798 			btf_put(btf);
1799 			return ERR_PTR(-ENXIO);
1800 		}
1801 
1802 		b = &tab->descs[tab->nr_descs++];
1803 		b->btf = btf;
1804 		b->module = mod;
1805 		b->offset = offset;
1806 
1807 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1808 		     kfunc_btf_cmp_by_off, NULL);
1809 	}
1810 	return b->btf;
1811 }
1812 
1813 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1814 {
1815 	if (!tab)
1816 		return;
1817 
1818 	while (tab->nr_descs--) {
1819 		module_put(tab->descs[tab->nr_descs].module);
1820 		btf_put(tab->descs[tab->nr_descs].btf);
1821 	}
1822 	kfree(tab);
1823 }
1824 
1825 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env,
1826 				       u32 func_id, s16 offset)
1827 {
1828 	if (offset) {
1829 		if (offset < 0) {
1830 			/* In the future, this can be allowed to increase limit
1831 			 * of fd index into fd_array, interpreted as u16.
1832 			 */
1833 			verbose(env, "negative offset disallowed for kernel module function call\n");
1834 			return ERR_PTR(-EINVAL);
1835 		}
1836 
1837 		return __find_kfunc_desc_btf(env, offset);
1838 	}
1839 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
1840 }
1841 
1842 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
1843 {
1844 	const struct btf_type *func, *func_proto;
1845 	struct bpf_kfunc_btf_tab *btf_tab;
1846 	struct bpf_kfunc_desc_tab *tab;
1847 	struct bpf_prog_aux *prog_aux;
1848 	struct bpf_kfunc_desc *desc;
1849 	const char *func_name;
1850 	struct btf *desc_btf;
1851 	unsigned long call_imm;
1852 	unsigned long addr;
1853 	int err;
1854 
1855 	prog_aux = env->prog->aux;
1856 	tab = prog_aux->kfunc_tab;
1857 	btf_tab = prog_aux->kfunc_btf_tab;
1858 	if (!tab) {
1859 		if (!btf_vmlinux) {
1860 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
1861 			return -ENOTSUPP;
1862 		}
1863 
1864 		if (!env->prog->jit_requested) {
1865 			verbose(env, "JIT is required for calling kernel function\n");
1866 			return -ENOTSUPP;
1867 		}
1868 
1869 		if (!bpf_jit_supports_kfunc_call()) {
1870 			verbose(env, "JIT does not support calling kernel function\n");
1871 			return -ENOTSUPP;
1872 		}
1873 
1874 		if (!env->prog->gpl_compatible) {
1875 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
1876 			return -EINVAL;
1877 		}
1878 
1879 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1880 		if (!tab)
1881 			return -ENOMEM;
1882 		prog_aux->kfunc_tab = tab;
1883 	}
1884 
1885 	/* func_id == 0 is always invalid, but instead of returning an error, be
1886 	 * conservative and wait until the code elimination pass before returning
1887 	 * error, so that invalid calls that get pruned out can be in BPF programs
1888 	 * loaded from userspace.  It is also required that offset be untouched
1889 	 * for such calls.
1890 	 */
1891 	if (!func_id && !offset)
1892 		return 0;
1893 
1894 	if (!btf_tab && offset) {
1895 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
1896 		if (!btf_tab)
1897 			return -ENOMEM;
1898 		prog_aux->kfunc_btf_tab = btf_tab;
1899 	}
1900 
1901 	desc_btf = find_kfunc_desc_btf(env, func_id, offset);
1902 	if (IS_ERR(desc_btf)) {
1903 		verbose(env, "failed to find BTF for kernel function\n");
1904 		return PTR_ERR(desc_btf);
1905 	}
1906 
1907 	if (find_kfunc_desc(env->prog, func_id, offset))
1908 		return 0;
1909 
1910 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
1911 		verbose(env, "too many different kernel function calls\n");
1912 		return -E2BIG;
1913 	}
1914 
1915 	func = btf_type_by_id(desc_btf, func_id);
1916 	if (!func || !btf_type_is_func(func)) {
1917 		verbose(env, "kernel btf_id %u is not a function\n",
1918 			func_id);
1919 		return -EINVAL;
1920 	}
1921 	func_proto = btf_type_by_id(desc_btf, func->type);
1922 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
1923 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
1924 			func_id);
1925 		return -EINVAL;
1926 	}
1927 
1928 	func_name = btf_name_by_offset(desc_btf, func->name_off);
1929 	addr = kallsyms_lookup_name(func_name);
1930 	if (!addr) {
1931 		verbose(env, "cannot find address for kernel function %s\n",
1932 			func_name);
1933 		return -EINVAL;
1934 	}
1935 
1936 	call_imm = BPF_CALL_IMM(addr);
1937 	/* Check whether or not the relative offset overflows desc->imm */
1938 	if ((unsigned long)(s32)call_imm != call_imm) {
1939 		verbose(env, "address of kernel function %s is out of range\n",
1940 			func_name);
1941 		return -EINVAL;
1942 	}
1943 
1944 	desc = &tab->descs[tab->nr_descs++];
1945 	desc->func_id = func_id;
1946 	desc->imm = call_imm;
1947 	desc->offset = offset;
1948 	err = btf_distill_func_proto(&env->log, desc_btf,
1949 				     func_proto, func_name,
1950 				     &desc->func_model);
1951 	if (!err)
1952 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1953 		     kfunc_desc_cmp_by_id_off, NULL);
1954 	return err;
1955 }
1956 
1957 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
1958 {
1959 	const struct bpf_kfunc_desc *d0 = a;
1960 	const struct bpf_kfunc_desc *d1 = b;
1961 
1962 	if (d0->imm > d1->imm)
1963 		return 1;
1964 	else if (d0->imm < d1->imm)
1965 		return -1;
1966 	return 0;
1967 }
1968 
1969 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
1970 {
1971 	struct bpf_kfunc_desc_tab *tab;
1972 
1973 	tab = prog->aux->kfunc_tab;
1974 	if (!tab)
1975 		return;
1976 
1977 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1978 	     kfunc_desc_cmp_by_imm, NULL);
1979 }
1980 
1981 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1982 {
1983 	return !!prog->aux->kfunc_tab;
1984 }
1985 
1986 const struct btf_func_model *
1987 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1988 			 const struct bpf_insn *insn)
1989 {
1990 	const struct bpf_kfunc_desc desc = {
1991 		.imm = insn->imm,
1992 	};
1993 	const struct bpf_kfunc_desc *res;
1994 	struct bpf_kfunc_desc_tab *tab;
1995 
1996 	tab = prog->aux->kfunc_tab;
1997 	res = bsearch(&desc, tab->descs, tab->nr_descs,
1998 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
1999 
2000 	return res ? &res->func_model : NULL;
2001 }
2002 
2003 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2004 {
2005 	struct bpf_subprog_info *subprog = env->subprog_info;
2006 	struct bpf_insn *insn = env->prog->insnsi;
2007 	int i, ret, insn_cnt = env->prog->len;
2008 
2009 	/* Add entry function. */
2010 	ret = add_subprog(env, 0);
2011 	if (ret)
2012 		return ret;
2013 
2014 	for (i = 0; i < insn_cnt; i++, insn++) {
2015 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2016 		    !bpf_pseudo_kfunc_call(insn))
2017 			continue;
2018 
2019 		if (!env->bpf_capable) {
2020 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2021 			return -EPERM;
2022 		}
2023 
2024 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2025 			ret = add_subprog(env, i + insn->imm + 1);
2026 		else
2027 			ret = add_kfunc_call(env, insn->imm, insn->off);
2028 
2029 		if (ret < 0)
2030 			return ret;
2031 	}
2032 
2033 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2034 	 * logic. 'subprog_cnt' should not be increased.
2035 	 */
2036 	subprog[env->subprog_cnt].start = insn_cnt;
2037 
2038 	if (env->log.level & BPF_LOG_LEVEL2)
2039 		for (i = 0; i < env->subprog_cnt; i++)
2040 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2041 
2042 	return 0;
2043 }
2044 
2045 static int check_subprogs(struct bpf_verifier_env *env)
2046 {
2047 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2048 	struct bpf_subprog_info *subprog = env->subprog_info;
2049 	struct bpf_insn *insn = env->prog->insnsi;
2050 	int insn_cnt = env->prog->len;
2051 
2052 	/* now check that all jumps are within the same subprog */
2053 	subprog_start = subprog[cur_subprog].start;
2054 	subprog_end = subprog[cur_subprog + 1].start;
2055 	for (i = 0; i < insn_cnt; i++) {
2056 		u8 code = insn[i].code;
2057 
2058 		if (code == (BPF_JMP | BPF_CALL) &&
2059 		    insn[i].imm == BPF_FUNC_tail_call &&
2060 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2061 			subprog[cur_subprog].has_tail_call = true;
2062 		if (BPF_CLASS(code) == BPF_LD &&
2063 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2064 			subprog[cur_subprog].has_ld_abs = true;
2065 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2066 			goto next;
2067 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2068 			goto next;
2069 		off = i + insn[i].off + 1;
2070 		if (off < subprog_start || off >= subprog_end) {
2071 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2072 			return -EINVAL;
2073 		}
2074 next:
2075 		if (i == subprog_end - 1) {
2076 			/* to avoid fall-through from one subprog into another
2077 			 * the last insn of the subprog should be either exit
2078 			 * or unconditional jump back
2079 			 */
2080 			if (code != (BPF_JMP | BPF_EXIT) &&
2081 			    code != (BPF_JMP | BPF_JA)) {
2082 				verbose(env, "last insn is not an exit or jmp\n");
2083 				return -EINVAL;
2084 			}
2085 			subprog_start = subprog_end;
2086 			cur_subprog++;
2087 			if (cur_subprog < env->subprog_cnt)
2088 				subprog_end = subprog[cur_subprog + 1].start;
2089 		}
2090 	}
2091 	return 0;
2092 }
2093 
2094 /* Parentage chain of this register (or stack slot) should take care of all
2095  * issues like callee-saved registers, stack slot allocation time, etc.
2096  */
2097 static int mark_reg_read(struct bpf_verifier_env *env,
2098 			 const struct bpf_reg_state *state,
2099 			 struct bpf_reg_state *parent, u8 flag)
2100 {
2101 	bool writes = parent == state->parent; /* Observe write marks */
2102 	int cnt = 0;
2103 
2104 	while (parent) {
2105 		/* if read wasn't screened by an earlier write ... */
2106 		if (writes && state->live & REG_LIVE_WRITTEN)
2107 			break;
2108 		if (parent->live & REG_LIVE_DONE) {
2109 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2110 				reg_type_str(env, parent->type),
2111 				parent->var_off.value, parent->off);
2112 			return -EFAULT;
2113 		}
2114 		/* The first condition is more likely to be true than the
2115 		 * second, checked it first.
2116 		 */
2117 		if ((parent->live & REG_LIVE_READ) == flag ||
2118 		    parent->live & REG_LIVE_READ64)
2119 			/* The parentage chain never changes and
2120 			 * this parent was already marked as LIVE_READ.
2121 			 * There is no need to keep walking the chain again and
2122 			 * keep re-marking all parents as LIVE_READ.
2123 			 * This case happens when the same register is read
2124 			 * multiple times without writes into it in-between.
2125 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2126 			 * then no need to set the weak REG_LIVE_READ32.
2127 			 */
2128 			break;
2129 		/* ... then we depend on parent's value */
2130 		parent->live |= flag;
2131 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2132 		if (flag == REG_LIVE_READ64)
2133 			parent->live &= ~REG_LIVE_READ32;
2134 		state = parent;
2135 		parent = state->parent;
2136 		writes = true;
2137 		cnt++;
2138 	}
2139 
2140 	if (env->longest_mark_read_walk < cnt)
2141 		env->longest_mark_read_walk = cnt;
2142 	return 0;
2143 }
2144 
2145 /* This function is supposed to be used by the following 32-bit optimization
2146  * code only. It returns TRUE if the source or destination register operates
2147  * on 64-bit, otherwise return FALSE.
2148  */
2149 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2150 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2151 {
2152 	u8 code, class, op;
2153 
2154 	code = insn->code;
2155 	class = BPF_CLASS(code);
2156 	op = BPF_OP(code);
2157 	if (class == BPF_JMP) {
2158 		/* BPF_EXIT for "main" will reach here. Return TRUE
2159 		 * conservatively.
2160 		 */
2161 		if (op == BPF_EXIT)
2162 			return true;
2163 		if (op == BPF_CALL) {
2164 			/* BPF to BPF call will reach here because of marking
2165 			 * caller saved clobber with DST_OP_NO_MARK for which we
2166 			 * don't care the register def because they are anyway
2167 			 * marked as NOT_INIT already.
2168 			 */
2169 			if (insn->src_reg == BPF_PSEUDO_CALL)
2170 				return false;
2171 			/* Helper call will reach here because of arg type
2172 			 * check, conservatively return TRUE.
2173 			 */
2174 			if (t == SRC_OP)
2175 				return true;
2176 
2177 			return false;
2178 		}
2179 	}
2180 
2181 	if (class == BPF_ALU64 || class == BPF_JMP ||
2182 	    /* BPF_END always use BPF_ALU class. */
2183 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2184 		return true;
2185 
2186 	if (class == BPF_ALU || class == BPF_JMP32)
2187 		return false;
2188 
2189 	if (class == BPF_LDX) {
2190 		if (t != SRC_OP)
2191 			return BPF_SIZE(code) == BPF_DW;
2192 		/* LDX source must be ptr. */
2193 		return true;
2194 	}
2195 
2196 	if (class == BPF_STX) {
2197 		/* BPF_STX (including atomic variants) has multiple source
2198 		 * operands, one of which is a ptr. Check whether the caller is
2199 		 * asking about it.
2200 		 */
2201 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2202 			return true;
2203 		return BPF_SIZE(code) == BPF_DW;
2204 	}
2205 
2206 	if (class == BPF_LD) {
2207 		u8 mode = BPF_MODE(code);
2208 
2209 		/* LD_IMM64 */
2210 		if (mode == BPF_IMM)
2211 			return true;
2212 
2213 		/* Both LD_IND and LD_ABS return 32-bit data. */
2214 		if (t != SRC_OP)
2215 			return  false;
2216 
2217 		/* Implicit ctx ptr. */
2218 		if (regno == BPF_REG_6)
2219 			return true;
2220 
2221 		/* Explicit source could be any width. */
2222 		return true;
2223 	}
2224 
2225 	if (class == BPF_ST)
2226 		/* The only source register for BPF_ST is a ptr. */
2227 		return true;
2228 
2229 	/* Conservatively return true at default. */
2230 	return true;
2231 }
2232 
2233 /* Return the regno defined by the insn, or -1. */
2234 static int insn_def_regno(const struct bpf_insn *insn)
2235 {
2236 	switch (BPF_CLASS(insn->code)) {
2237 	case BPF_JMP:
2238 	case BPF_JMP32:
2239 	case BPF_ST:
2240 		return -1;
2241 	case BPF_STX:
2242 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2243 		    (insn->imm & BPF_FETCH)) {
2244 			if (insn->imm == BPF_CMPXCHG)
2245 				return BPF_REG_0;
2246 			else
2247 				return insn->src_reg;
2248 		} else {
2249 			return -1;
2250 		}
2251 	default:
2252 		return insn->dst_reg;
2253 	}
2254 }
2255 
2256 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2257 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2258 {
2259 	int dst_reg = insn_def_regno(insn);
2260 
2261 	if (dst_reg == -1)
2262 		return false;
2263 
2264 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2265 }
2266 
2267 static void mark_insn_zext(struct bpf_verifier_env *env,
2268 			   struct bpf_reg_state *reg)
2269 {
2270 	s32 def_idx = reg->subreg_def;
2271 
2272 	if (def_idx == DEF_NOT_SUBREG)
2273 		return;
2274 
2275 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2276 	/* The dst will be zero extended, so won't be sub-register anymore. */
2277 	reg->subreg_def = DEF_NOT_SUBREG;
2278 }
2279 
2280 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2281 			 enum reg_arg_type t)
2282 {
2283 	struct bpf_verifier_state *vstate = env->cur_state;
2284 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2285 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2286 	struct bpf_reg_state *reg, *regs = state->regs;
2287 	bool rw64;
2288 
2289 	if (regno >= MAX_BPF_REG) {
2290 		verbose(env, "R%d is invalid\n", regno);
2291 		return -EINVAL;
2292 	}
2293 
2294 	mark_reg_scratched(env, regno);
2295 
2296 	reg = &regs[regno];
2297 	rw64 = is_reg64(env, insn, regno, reg, t);
2298 	if (t == SRC_OP) {
2299 		/* check whether register used as source operand can be read */
2300 		if (reg->type == NOT_INIT) {
2301 			verbose(env, "R%d !read_ok\n", regno);
2302 			return -EACCES;
2303 		}
2304 		/* We don't need to worry about FP liveness because it's read-only */
2305 		if (regno == BPF_REG_FP)
2306 			return 0;
2307 
2308 		if (rw64)
2309 			mark_insn_zext(env, reg);
2310 
2311 		return mark_reg_read(env, reg, reg->parent,
2312 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2313 	} else {
2314 		/* check whether register used as dest operand can be written to */
2315 		if (regno == BPF_REG_FP) {
2316 			verbose(env, "frame pointer is read only\n");
2317 			return -EACCES;
2318 		}
2319 		reg->live |= REG_LIVE_WRITTEN;
2320 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2321 		if (t == DST_OP)
2322 			mark_reg_unknown(env, regs, regno);
2323 	}
2324 	return 0;
2325 }
2326 
2327 /* for any branch, call, exit record the history of jmps in the given state */
2328 static int push_jmp_history(struct bpf_verifier_env *env,
2329 			    struct bpf_verifier_state *cur)
2330 {
2331 	u32 cnt = cur->jmp_history_cnt;
2332 	struct bpf_idx_pair *p;
2333 
2334 	cnt++;
2335 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2336 	if (!p)
2337 		return -ENOMEM;
2338 	p[cnt - 1].idx = env->insn_idx;
2339 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2340 	cur->jmp_history = p;
2341 	cur->jmp_history_cnt = cnt;
2342 	return 0;
2343 }
2344 
2345 /* Backtrack one insn at a time. If idx is not at the top of recorded
2346  * history then previous instruction came from straight line execution.
2347  */
2348 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2349 			     u32 *history)
2350 {
2351 	u32 cnt = *history;
2352 
2353 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2354 		i = st->jmp_history[cnt - 1].prev_idx;
2355 		(*history)--;
2356 	} else {
2357 		i--;
2358 	}
2359 	return i;
2360 }
2361 
2362 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2363 {
2364 	const struct btf_type *func;
2365 	struct btf *desc_btf;
2366 
2367 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2368 		return NULL;
2369 
2370 	desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off);
2371 	if (IS_ERR(desc_btf))
2372 		return "<error>";
2373 
2374 	func = btf_type_by_id(desc_btf, insn->imm);
2375 	return btf_name_by_offset(desc_btf, func->name_off);
2376 }
2377 
2378 /* For given verifier state backtrack_insn() is called from the last insn to
2379  * the first insn. Its purpose is to compute a bitmask of registers and
2380  * stack slots that needs precision in the parent verifier state.
2381  */
2382 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2383 			  u32 *reg_mask, u64 *stack_mask)
2384 {
2385 	const struct bpf_insn_cbs cbs = {
2386 		.cb_call	= disasm_kfunc_name,
2387 		.cb_print	= verbose,
2388 		.private_data	= env,
2389 	};
2390 	struct bpf_insn *insn = env->prog->insnsi + idx;
2391 	u8 class = BPF_CLASS(insn->code);
2392 	u8 opcode = BPF_OP(insn->code);
2393 	u8 mode = BPF_MODE(insn->code);
2394 	u32 dreg = 1u << insn->dst_reg;
2395 	u32 sreg = 1u << insn->src_reg;
2396 	u32 spi;
2397 
2398 	if (insn->code == 0)
2399 		return 0;
2400 	if (env->log.level & BPF_LOG_LEVEL2) {
2401 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2402 		verbose(env, "%d: ", idx);
2403 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2404 	}
2405 
2406 	if (class == BPF_ALU || class == BPF_ALU64) {
2407 		if (!(*reg_mask & dreg))
2408 			return 0;
2409 		if (opcode == BPF_MOV) {
2410 			if (BPF_SRC(insn->code) == BPF_X) {
2411 				/* dreg = sreg
2412 				 * dreg needs precision after this insn
2413 				 * sreg needs precision before this insn
2414 				 */
2415 				*reg_mask &= ~dreg;
2416 				*reg_mask |= sreg;
2417 			} else {
2418 				/* dreg = K
2419 				 * dreg needs precision after this insn.
2420 				 * Corresponding register is already marked
2421 				 * as precise=true in this verifier state.
2422 				 * No further markings in parent are necessary
2423 				 */
2424 				*reg_mask &= ~dreg;
2425 			}
2426 		} else {
2427 			if (BPF_SRC(insn->code) == BPF_X) {
2428 				/* dreg += sreg
2429 				 * both dreg and sreg need precision
2430 				 * before this insn
2431 				 */
2432 				*reg_mask |= sreg;
2433 			} /* else dreg += K
2434 			   * dreg still needs precision before this insn
2435 			   */
2436 		}
2437 	} else if (class == BPF_LDX) {
2438 		if (!(*reg_mask & dreg))
2439 			return 0;
2440 		*reg_mask &= ~dreg;
2441 
2442 		/* scalars can only be spilled into stack w/o losing precision.
2443 		 * Load from any other memory can be zero extended.
2444 		 * The desire to keep that precision is already indicated
2445 		 * by 'precise' mark in corresponding register of this state.
2446 		 * No further tracking necessary.
2447 		 */
2448 		if (insn->src_reg != BPF_REG_FP)
2449 			return 0;
2450 
2451 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2452 		 * that [fp - off] slot contains scalar that needs to be
2453 		 * tracked with precision
2454 		 */
2455 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2456 		if (spi >= 64) {
2457 			verbose(env, "BUG spi %d\n", spi);
2458 			WARN_ONCE(1, "verifier backtracking bug");
2459 			return -EFAULT;
2460 		}
2461 		*stack_mask |= 1ull << spi;
2462 	} else if (class == BPF_STX || class == BPF_ST) {
2463 		if (*reg_mask & dreg)
2464 			/* stx & st shouldn't be using _scalar_ dst_reg
2465 			 * to access memory. It means backtracking
2466 			 * encountered a case of pointer subtraction.
2467 			 */
2468 			return -ENOTSUPP;
2469 		/* scalars can only be spilled into stack */
2470 		if (insn->dst_reg != BPF_REG_FP)
2471 			return 0;
2472 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2473 		if (spi >= 64) {
2474 			verbose(env, "BUG spi %d\n", spi);
2475 			WARN_ONCE(1, "verifier backtracking bug");
2476 			return -EFAULT;
2477 		}
2478 		if (!(*stack_mask & (1ull << spi)))
2479 			return 0;
2480 		*stack_mask &= ~(1ull << spi);
2481 		if (class == BPF_STX)
2482 			*reg_mask |= sreg;
2483 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2484 		if (opcode == BPF_CALL) {
2485 			if (insn->src_reg == BPF_PSEUDO_CALL)
2486 				return -ENOTSUPP;
2487 			/* regular helper call sets R0 */
2488 			*reg_mask &= ~1;
2489 			if (*reg_mask & 0x3f) {
2490 				/* if backtracing was looking for registers R1-R5
2491 				 * they should have been found already.
2492 				 */
2493 				verbose(env, "BUG regs %x\n", *reg_mask);
2494 				WARN_ONCE(1, "verifier backtracking bug");
2495 				return -EFAULT;
2496 			}
2497 		} else if (opcode == BPF_EXIT) {
2498 			return -ENOTSUPP;
2499 		}
2500 	} else if (class == BPF_LD) {
2501 		if (!(*reg_mask & dreg))
2502 			return 0;
2503 		*reg_mask &= ~dreg;
2504 		/* It's ld_imm64 or ld_abs or ld_ind.
2505 		 * For ld_imm64 no further tracking of precision
2506 		 * into parent is necessary
2507 		 */
2508 		if (mode == BPF_IND || mode == BPF_ABS)
2509 			/* to be analyzed */
2510 			return -ENOTSUPP;
2511 	}
2512 	return 0;
2513 }
2514 
2515 /* the scalar precision tracking algorithm:
2516  * . at the start all registers have precise=false.
2517  * . scalar ranges are tracked as normal through alu and jmp insns.
2518  * . once precise value of the scalar register is used in:
2519  *   .  ptr + scalar alu
2520  *   . if (scalar cond K|scalar)
2521  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2522  *   backtrack through the verifier states and mark all registers and
2523  *   stack slots with spilled constants that these scalar regisers
2524  *   should be precise.
2525  * . during state pruning two registers (or spilled stack slots)
2526  *   are equivalent if both are not precise.
2527  *
2528  * Note the verifier cannot simply walk register parentage chain,
2529  * since many different registers and stack slots could have been
2530  * used to compute single precise scalar.
2531  *
2532  * The approach of starting with precise=true for all registers and then
2533  * backtrack to mark a register as not precise when the verifier detects
2534  * that program doesn't care about specific value (e.g., when helper
2535  * takes register as ARG_ANYTHING parameter) is not safe.
2536  *
2537  * It's ok to walk single parentage chain of the verifier states.
2538  * It's possible that this backtracking will go all the way till 1st insn.
2539  * All other branches will be explored for needing precision later.
2540  *
2541  * The backtracking needs to deal with cases like:
2542  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2543  * r9 -= r8
2544  * r5 = r9
2545  * if r5 > 0x79f goto pc+7
2546  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2547  * r5 += 1
2548  * ...
2549  * call bpf_perf_event_output#25
2550  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2551  *
2552  * and this case:
2553  * r6 = 1
2554  * call foo // uses callee's r6 inside to compute r0
2555  * r0 += r6
2556  * if r0 == 0 goto
2557  *
2558  * to track above reg_mask/stack_mask needs to be independent for each frame.
2559  *
2560  * Also if parent's curframe > frame where backtracking started,
2561  * the verifier need to mark registers in both frames, otherwise callees
2562  * may incorrectly prune callers. This is similar to
2563  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2564  *
2565  * For now backtracking falls back into conservative marking.
2566  */
2567 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2568 				     struct bpf_verifier_state *st)
2569 {
2570 	struct bpf_func_state *func;
2571 	struct bpf_reg_state *reg;
2572 	int i, j;
2573 
2574 	/* big hammer: mark all scalars precise in this path.
2575 	 * pop_stack may still get !precise scalars.
2576 	 */
2577 	for (; st; st = st->parent)
2578 		for (i = 0; i <= st->curframe; i++) {
2579 			func = st->frame[i];
2580 			for (j = 0; j < BPF_REG_FP; j++) {
2581 				reg = &func->regs[j];
2582 				if (reg->type != SCALAR_VALUE)
2583 					continue;
2584 				reg->precise = true;
2585 			}
2586 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2587 				if (!is_spilled_reg(&func->stack[j]))
2588 					continue;
2589 				reg = &func->stack[j].spilled_ptr;
2590 				if (reg->type != SCALAR_VALUE)
2591 					continue;
2592 				reg->precise = true;
2593 			}
2594 		}
2595 }
2596 
2597 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2598 				  int spi)
2599 {
2600 	struct bpf_verifier_state *st = env->cur_state;
2601 	int first_idx = st->first_insn_idx;
2602 	int last_idx = env->insn_idx;
2603 	struct bpf_func_state *func;
2604 	struct bpf_reg_state *reg;
2605 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2606 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
2607 	bool skip_first = true;
2608 	bool new_marks = false;
2609 	int i, err;
2610 
2611 	if (!env->bpf_capable)
2612 		return 0;
2613 
2614 	func = st->frame[st->curframe];
2615 	if (regno >= 0) {
2616 		reg = &func->regs[regno];
2617 		if (reg->type != SCALAR_VALUE) {
2618 			WARN_ONCE(1, "backtracing misuse");
2619 			return -EFAULT;
2620 		}
2621 		if (!reg->precise)
2622 			new_marks = true;
2623 		else
2624 			reg_mask = 0;
2625 		reg->precise = true;
2626 	}
2627 
2628 	while (spi >= 0) {
2629 		if (!is_spilled_reg(&func->stack[spi])) {
2630 			stack_mask = 0;
2631 			break;
2632 		}
2633 		reg = &func->stack[spi].spilled_ptr;
2634 		if (reg->type != SCALAR_VALUE) {
2635 			stack_mask = 0;
2636 			break;
2637 		}
2638 		if (!reg->precise)
2639 			new_marks = true;
2640 		else
2641 			stack_mask = 0;
2642 		reg->precise = true;
2643 		break;
2644 	}
2645 
2646 	if (!new_marks)
2647 		return 0;
2648 	if (!reg_mask && !stack_mask)
2649 		return 0;
2650 	for (;;) {
2651 		DECLARE_BITMAP(mask, 64);
2652 		u32 history = st->jmp_history_cnt;
2653 
2654 		if (env->log.level & BPF_LOG_LEVEL2)
2655 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2656 		for (i = last_idx;;) {
2657 			if (skip_first) {
2658 				err = 0;
2659 				skip_first = false;
2660 			} else {
2661 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2662 			}
2663 			if (err == -ENOTSUPP) {
2664 				mark_all_scalars_precise(env, st);
2665 				return 0;
2666 			} else if (err) {
2667 				return err;
2668 			}
2669 			if (!reg_mask && !stack_mask)
2670 				/* Found assignment(s) into tracked register in this state.
2671 				 * Since this state is already marked, just return.
2672 				 * Nothing to be tracked further in the parent state.
2673 				 */
2674 				return 0;
2675 			if (i == first_idx)
2676 				break;
2677 			i = get_prev_insn_idx(st, i, &history);
2678 			if (i >= env->prog->len) {
2679 				/* This can happen if backtracking reached insn 0
2680 				 * and there are still reg_mask or stack_mask
2681 				 * to backtrack.
2682 				 * It means the backtracking missed the spot where
2683 				 * particular register was initialized with a constant.
2684 				 */
2685 				verbose(env, "BUG backtracking idx %d\n", i);
2686 				WARN_ONCE(1, "verifier backtracking bug");
2687 				return -EFAULT;
2688 			}
2689 		}
2690 		st = st->parent;
2691 		if (!st)
2692 			break;
2693 
2694 		new_marks = false;
2695 		func = st->frame[st->curframe];
2696 		bitmap_from_u64(mask, reg_mask);
2697 		for_each_set_bit(i, mask, 32) {
2698 			reg = &func->regs[i];
2699 			if (reg->type != SCALAR_VALUE) {
2700 				reg_mask &= ~(1u << i);
2701 				continue;
2702 			}
2703 			if (!reg->precise)
2704 				new_marks = true;
2705 			reg->precise = true;
2706 		}
2707 
2708 		bitmap_from_u64(mask, stack_mask);
2709 		for_each_set_bit(i, mask, 64) {
2710 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
2711 				/* the sequence of instructions:
2712 				 * 2: (bf) r3 = r10
2713 				 * 3: (7b) *(u64 *)(r3 -8) = r0
2714 				 * 4: (79) r4 = *(u64 *)(r10 -8)
2715 				 * doesn't contain jmps. It's backtracked
2716 				 * as a single block.
2717 				 * During backtracking insn 3 is not recognized as
2718 				 * stack access, so at the end of backtracking
2719 				 * stack slot fp-8 is still marked in stack_mask.
2720 				 * However the parent state may not have accessed
2721 				 * fp-8 and it's "unallocated" stack space.
2722 				 * In such case fallback to conservative.
2723 				 */
2724 				mark_all_scalars_precise(env, st);
2725 				return 0;
2726 			}
2727 
2728 			if (!is_spilled_reg(&func->stack[i])) {
2729 				stack_mask &= ~(1ull << i);
2730 				continue;
2731 			}
2732 			reg = &func->stack[i].spilled_ptr;
2733 			if (reg->type != SCALAR_VALUE) {
2734 				stack_mask &= ~(1ull << i);
2735 				continue;
2736 			}
2737 			if (!reg->precise)
2738 				new_marks = true;
2739 			reg->precise = true;
2740 		}
2741 		if (env->log.level & BPF_LOG_LEVEL2) {
2742 			verbose(env, "parent %s regs=%x stack=%llx marks:",
2743 				new_marks ? "didn't have" : "already had",
2744 				reg_mask, stack_mask);
2745 			print_verifier_state(env, func, true);
2746 		}
2747 
2748 		if (!reg_mask && !stack_mask)
2749 			break;
2750 		if (!new_marks)
2751 			break;
2752 
2753 		last_idx = st->last_insn_idx;
2754 		first_idx = st->first_insn_idx;
2755 	}
2756 	return 0;
2757 }
2758 
2759 static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2760 {
2761 	return __mark_chain_precision(env, regno, -1);
2762 }
2763 
2764 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2765 {
2766 	return __mark_chain_precision(env, -1, spi);
2767 }
2768 
2769 static bool is_spillable_regtype(enum bpf_reg_type type)
2770 {
2771 	switch (base_type(type)) {
2772 	case PTR_TO_MAP_VALUE:
2773 	case PTR_TO_STACK:
2774 	case PTR_TO_CTX:
2775 	case PTR_TO_PACKET:
2776 	case PTR_TO_PACKET_META:
2777 	case PTR_TO_PACKET_END:
2778 	case PTR_TO_FLOW_KEYS:
2779 	case CONST_PTR_TO_MAP:
2780 	case PTR_TO_SOCKET:
2781 	case PTR_TO_SOCK_COMMON:
2782 	case PTR_TO_TCP_SOCK:
2783 	case PTR_TO_XDP_SOCK:
2784 	case PTR_TO_BTF_ID:
2785 	case PTR_TO_BUF:
2786 	case PTR_TO_PERCPU_BTF_ID:
2787 	case PTR_TO_MEM:
2788 	case PTR_TO_FUNC:
2789 	case PTR_TO_MAP_KEY:
2790 		return true;
2791 	default:
2792 		return false;
2793 	}
2794 }
2795 
2796 /* Does this register contain a constant zero? */
2797 static bool register_is_null(struct bpf_reg_state *reg)
2798 {
2799 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2800 }
2801 
2802 static bool register_is_const(struct bpf_reg_state *reg)
2803 {
2804 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2805 }
2806 
2807 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2808 {
2809 	return tnum_is_unknown(reg->var_off) &&
2810 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2811 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2812 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2813 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2814 }
2815 
2816 static bool register_is_bounded(struct bpf_reg_state *reg)
2817 {
2818 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2819 }
2820 
2821 static bool __is_pointer_value(bool allow_ptr_leaks,
2822 			       const struct bpf_reg_state *reg)
2823 {
2824 	if (allow_ptr_leaks)
2825 		return false;
2826 
2827 	return reg->type != SCALAR_VALUE;
2828 }
2829 
2830 static void save_register_state(struct bpf_func_state *state,
2831 				int spi, struct bpf_reg_state *reg,
2832 				int size)
2833 {
2834 	int i;
2835 
2836 	state->stack[spi].spilled_ptr = *reg;
2837 	if (size == BPF_REG_SIZE)
2838 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2839 
2840 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
2841 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
2842 
2843 	/* size < 8 bytes spill */
2844 	for (; i; i--)
2845 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
2846 }
2847 
2848 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
2849  * stack boundary and alignment are checked in check_mem_access()
2850  */
2851 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
2852 				       /* stack frame we're writing to */
2853 				       struct bpf_func_state *state,
2854 				       int off, int size, int value_regno,
2855 				       int insn_idx)
2856 {
2857 	struct bpf_func_state *cur; /* state of the current function */
2858 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
2859 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
2860 	struct bpf_reg_state *reg = NULL;
2861 
2862 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
2863 	if (err)
2864 		return err;
2865 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
2866 	 * so it's aligned access and [off, off + size) are within stack limits
2867 	 */
2868 	if (!env->allow_ptr_leaks &&
2869 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
2870 	    size != BPF_REG_SIZE) {
2871 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
2872 		return -EACCES;
2873 	}
2874 
2875 	cur = env->cur_state->frame[env->cur_state->curframe];
2876 	if (value_regno >= 0)
2877 		reg = &cur->regs[value_regno];
2878 	if (!env->bypass_spec_v4) {
2879 		bool sanitize = reg && is_spillable_regtype(reg->type);
2880 
2881 		for (i = 0; i < size; i++) {
2882 			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
2883 				sanitize = true;
2884 				break;
2885 			}
2886 		}
2887 
2888 		if (sanitize)
2889 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
2890 	}
2891 
2892 	mark_stack_slot_scratched(env, spi);
2893 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
2894 	    !register_is_null(reg) && env->bpf_capable) {
2895 		if (dst_reg != BPF_REG_FP) {
2896 			/* The backtracking logic can only recognize explicit
2897 			 * stack slot address like [fp - 8]. Other spill of
2898 			 * scalar via different register has to be conservative.
2899 			 * Backtrack from here and mark all registers as precise
2900 			 * that contributed into 'reg' being a constant.
2901 			 */
2902 			err = mark_chain_precision(env, value_regno);
2903 			if (err)
2904 				return err;
2905 		}
2906 		save_register_state(state, spi, reg, size);
2907 	} else if (reg && is_spillable_regtype(reg->type)) {
2908 		/* register containing pointer is being spilled into stack */
2909 		if (size != BPF_REG_SIZE) {
2910 			verbose_linfo(env, insn_idx, "; ");
2911 			verbose(env, "invalid size of register spill\n");
2912 			return -EACCES;
2913 		}
2914 		if (state != cur && reg->type == PTR_TO_STACK) {
2915 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2916 			return -EINVAL;
2917 		}
2918 		save_register_state(state, spi, reg, size);
2919 	} else {
2920 		u8 type = STACK_MISC;
2921 
2922 		/* regular write of data into stack destroys any spilled ptr */
2923 		state->stack[spi].spilled_ptr.type = NOT_INIT;
2924 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2925 		if (is_spilled_reg(&state->stack[spi]))
2926 			for (i = 0; i < BPF_REG_SIZE; i++)
2927 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
2928 
2929 		/* only mark the slot as written if all 8 bytes were written
2930 		 * otherwise read propagation may incorrectly stop too soon
2931 		 * when stack slots are partially written.
2932 		 * This heuristic means that read propagation will be
2933 		 * conservative, since it will add reg_live_read marks
2934 		 * to stack slots all the way to first state when programs
2935 		 * writes+reads less than 8 bytes
2936 		 */
2937 		if (size == BPF_REG_SIZE)
2938 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2939 
2940 		/* when we zero initialize stack slots mark them as such */
2941 		if (reg && register_is_null(reg)) {
2942 			/* backtracking doesn't work for STACK_ZERO yet. */
2943 			err = mark_chain_precision(env, value_regno);
2944 			if (err)
2945 				return err;
2946 			type = STACK_ZERO;
2947 		}
2948 
2949 		/* Mark slots affected by this stack write. */
2950 		for (i = 0; i < size; i++)
2951 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
2952 				type;
2953 	}
2954 	return 0;
2955 }
2956 
2957 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
2958  * known to contain a variable offset.
2959  * This function checks whether the write is permitted and conservatively
2960  * tracks the effects of the write, considering that each stack slot in the
2961  * dynamic range is potentially written to.
2962  *
2963  * 'off' includes 'regno->off'.
2964  * 'value_regno' can be -1, meaning that an unknown value is being written to
2965  * the stack.
2966  *
2967  * Spilled pointers in range are not marked as written because we don't know
2968  * what's going to be actually written. This means that read propagation for
2969  * future reads cannot be terminated by this write.
2970  *
2971  * For privileged programs, uninitialized stack slots are considered
2972  * initialized by this write (even though we don't know exactly what offsets
2973  * are going to be written to). The idea is that we don't want the verifier to
2974  * reject future reads that access slots written to through variable offsets.
2975  */
2976 static int check_stack_write_var_off(struct bpf_verifier_env *env,
2977 				     /* func where register points to */
2978 				     struct bpf_func_state *state,
2979 				     int ptr_regno, int off, int size,
2980 				     int value_regno, int insn_idx)
2981 {
2982 	struct bpf_func_state *cur; /* state of the current function */
2983 	int min_off, max_off;
2984 	int i, err;
2985 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
2986 	bool writing_zero = false;
2987 	/* set if the fact that we're writing a zero is used to let any
2988 	 * stack slots remain STACK_ZERO
2989 	 */
2990 	bool zero_used = false;
2991 
2992 	cur = env->cur_state->frame[env->cur_state->curframe];
2993 	ptr_reg = &cur->regs[ptr_regno];
2994 	min_off = ptr_reg->smin_value + off;
2995 	max_off = ptr_reg->smax_value + off + size;
2996 	if (value_regno >= 0)
2997 		value_reg = &cur->regs[value_regno];
2998 	if (value_reg && register_is_null(value_reg))
2999 		writing_zero = true;
3000 
3001 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3002 	if (err)
3003 		return err;
3004 
3005 
3006 	/* Variable offset writes destroy any spilled pointers in range. */
3007 	for (i = min_off; i < max_off; i++) {
3008 		u8 new_type, *stype;
3009 		int slot, spi;
3010 
3011 		slot = -i - 1;
3012 		spi = slot / BPF_REG_SIZE;
3013 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3014 		mark_stack_slot_scratched(env, spi);
3015 
3016 		if (!env->allow_ptr_leaks
3017 				&& *stype != NOT_INIT
3018 				&& *stype != SCALAR_VALUE) {
3019 			/* Reject the write if there's are spilled pointers in
3020 			 * range. If we didn't reject here, the ptr status
3021 			 * would be erased below (even though not all slots are
3022 			 * actually overwritten), possibly opening the door to
3023 			 * leaks.
3024 			 */
3025 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3026 				insn_idx, i);
3027 			return -EINVAL;
3028 		}
3029 
3030 		/* Erase all spilled pointers. */
3031 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3032 
3033 		/* Update the slot type. */
3034 		new_type = STACK_MISC;
3035 		if (writing_zero && *stype == STACK_ZERO) {
3036 			new_type = STACK_ZERO;
3037 			zero_used = true;
3038 		}
3039 		/* If the slot is STACK_INVALID, we check whether it's OK to
3040 		 * pretend that it will be initialized by this write. The slot
3041 		 * might not actually be written to, and so if we mark it as
3042 		 * initialized future reads might leak uninitialized memory.
3043 		 * For privileged programs, we will accept such reads to slots
3044 		 * that may or may not be written because, if we're reject
3045 		 * them, the error would be too confusing.
3046 		 */
3047 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3048 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3049 					insn_idx, i);
3050 			return -EINVAL;
3051 		}
3052 		*stype = new_type;
3053 	}
3054 	if (zero_used) {
3055 		/* backtracking doesn't work for STACK_ZERO yet. */
3056 		err = mark_chain_precision(env, value_regno);
3057 		if (err)
3058 			return err;
3059 	}
3060 	return 0;
3061 }
3062 
3063 /* When register 'dst_regno' is assigned some values from stack[min_off,
3064  * max_off), we set the register's type according to the types of the
3065  * respective stack slots. If all the stack values are known to be zeros, then
3066  * so is the destination reg. Otherwise, the register is considered to be
3067  * SCALAR. This function does not deal with register filling; the caller must
3068  * ensure that all spilled registers in the stack range have been marked as
3069  * read.
3070  */
3071 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3072 				/* func where src register points to */
3073 				struct bpf_func_state *ptr_state,
3074 				int min_off, int max_off, int dst_regno)
3075 {
3076 	struct bpf_verifier_state *vstate = env->cur_state;
3077 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3078 	int i, slot, spi;
3079 	u8 *stype;
3080 	int zeros = 0;
3081 
3082 	for (i = min_off; i < max_off; i++) {
3083 		slot = -i - 1;
3084 		spi = slot / BPF_REG_SIZE;
3085 		stype = ptr_state->stack[spi].slot_type;
3086 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3087 			break;
3088 		zeros++;
3089 	}
3090 	if (zeros == max_off - min_off) {
3091 		/* any access_size read into register is zero extended,
3092 		 * so the whole register == const_zero
3093 		 */
3094 		__mark_reg_const_zero(&state->regs[dst_regno]);
3095 		/* backtracking doesn't support STACK_ZERO yet,
3096 		 * so mark it precise here, so that later
3097 		 * backtracking can stop here.
3098 		 * Backtracking may not need this if this register
3099 		 * doesn't participate in pointer adjustment.
3100 		 * Forward propagation of precise flag is not
3101 		 * necessary either. This mark is only to stop
3102 		 * backtracking. Any register that contributed
3103 		 * to const 0 was marked precise before spill.
3104 		 */
3105 		state->regs[dst_regno].precise = true;
3106 	} else {
3107 		/* have read misc data from the stack */
3108 		mark_reg_unknown(env, state->regs, dst_regno);
3109 	}
3110 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3111 }
3112 
3113 /* Read the stack at 'off' and put the results into the register indicated by
3114  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3115  * spilled reg.
3116  *
3117  * 'dst_regno' can be -1, meaning that the read value is not going to a
3118  * register.
3119  *
3120  * The access is assumed to be within the current stack bounds.
3121  */
3122 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3123 				      /* func where src register points to */
3124 				      struct bpf_func_state *reg_state,
3125 				      int off, int size, int dst_regno)
3126 {
3127 	struct bpf_verifier_state *vstate = env->cur_state;
3128 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3129 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3130 	struct bpf_reg_state *reg;
3131 	u8 *stype, type;
3132 
3133 	stype = reg_state->stack[spi].slot_type;
3134 	reg = &reg_state->stack[spi].spilled_ptr;
3135 
3136 	if (is_spilled_reg(&reg_state->stack[spi])) {
3137 		u8 spill_size = 1;
3138 
3139 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3140 			spill_size++;
3141 
3142 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3143 			if (reg->type != SCALAR_VALUE) {
3144 				verbose_linfo(env, env->insn_idx, "; ");
3145 				verbose(env, "invalid size of register fill\n");
3146 				return -EACCES;
3147 			}
3148 
3149 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3150 			if (dst_regno < 0)
3151 				return 0;
3152 
3153 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3154 				/* The earlier check_reg_arg() has decided the
3155 				 * subreg_def for this insn.  Save it first.
3156 				 */
3157 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3158 
3159 				state->regs[dst_regno] = *reg;
3160 				state->regs[dst_regno].subreg_def = subreg_def;
3161 			} else {
3162 				for (i = 0; i < size; i++) {
3163 					type = stype[(slot - i) % BPF_REG_SIZE];
3164 					if (type == STACK_SPILL)
3165 						continue;
3166 					if (type == STACK_MISC)
3167 						continue;
3168 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3169 						off, i, size);
3170 					return -EACCES;
3171 				}
3172 				mark_reg_unknown(env, state->regs, dst_regno);
3173 			}
3174 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3175 			return 0;
3176 		}
3177 
3178 		if (dst_regno >= 0) {
3179 			/* restore register state from stack */
3180 			state->regs[dst_regno] = *reg;
3181 			/* mark reg as written since spilled pointer state likely
3182 			 * has its liveness marks cleared by is_state_visited()
3183 			 * which resets stack/reg liveness for state transitions
3184 			 */
3185 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3186 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3187 			/* If dst_regno==-1, the caller is asking us whether
3188 			 * it is acceptable to use this value as a SCALAR_VALUE
3189 			 * (e.g. for XADD).
3190 			 * We must not allow unprivileged callers to do that
3191 			 * with spilled pointers.
3192 			 */
3193 			verbose(env, "leaking pointer from stack off %d\n",
3194 				off);
3195 			return -EACCES;
3196 		}
3197 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3198 	} else {
3199 		for (i = 0; i < size; i++) {
3200 			type = stype[(slot - i) % BPF_REG_SIZE];
3201 			if (type == STACK_MISC)
3202 				continue;
3203 			if (type == STACK_ZERO)
3204 				continue;
3205 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3206 				off, i, size);
3207 			return -EACCES;
3208 		}
3209 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3210 		if (dst_regno >= 0)
3211 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3212 	}
3213 	return 0;
3214 }
3215 
3216 enum stack_access_src {
3217 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3218 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3219 };
3220 
3221 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3222 					 int regno, int off, int access_size,
3223 					 bool zero_size_allowed,
3224 					 enum stack_access_src type,
3225 					 struct bpf_call_arg_meta *meta);
3226 
3227 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3228 {
3229 	return cur_regs(env) + regno;
3230 }
3231 
3232 /* Read the stack at 'ptr_regno + off' and put the result into the register
3233  * 'dst_regno'.
3234  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3235  * but not its variable offset.
3236  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3237  *
3238  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3239  * filling registers (i.e. reads of spilled register cannot be detected when
3240  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3241  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3242  * offset; for a fixed offset check_stack_read_fixed_off should be used
3243  * instead.
3244  */
3245 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3246 				    int ptr_regno, int off, int size, int dst_regno)
3247 {
3248 	/* The state of the source register. */
3249 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3250 	struct bpf_func_state *ptr_state = func(env, reg);
3251 	int err;
3252 	int min_off, max_off;
3253 
3254 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3255 	 */
3256 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3257 					    false, ACCESS_DIRECT, NULL);
3258 	if (err)
3259 		return err;
3260 
3261 	min_off = reg->smin_value + off;
3262 	max_off = reg->smax_value + off;
3263 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3264 	return 0;
3265 }
3266 
3267 /* check_stack_read dispatches to check_stack_read_fixed_off or
3268  * check_stack_read_var_off.
3269  *
3270  * The caller must ensure that the offset falls within the allocated stack
3271  * bounds.
3272  *
3273  * 'dst_regno' is a register which will receive the value from the stack. It
3274  * can be -1, meaning that the read value is not going to a register.
3275  */
3276 static int check_stack_read(struct bpf_verifier_env *env,
3277 			    int ptr_regno, int off, int size,
3278 			    int dst_regno)
3279 {
3280 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3281 	struct bpf_func_state *state = func(env, reg);
3282 	int err;
3283 	/* Some accesses are only permitted with a static offset. */
3284 	bool var_off = !tnum_is_const(reg->var_off);
3285 
3286 	/* The offset is required to be static when reads don't go to a
3287 	 * register, in order to not leak pointers (see
3288 	 * check_stack_read_fixed_off).
3289 	 */
3290 	if (dst_regno < 0 && var_off) {
3291 		char tn_buf[48];
3292 
3293 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3294 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3295 			tn_buf, off, size);
3296 		return -EACCES;
3297 	}
3298 	/* Variable offset is prohibited for unprivileged mode for simplicity
3299 	 * since it requires corresponding support in Spectre masking for stack
3300 	 * ALU. See also retrieve_ptr_limit().
3301 	 */
3302 	if (!env->bypass_spec_v1 && var_off) {
3303 		char tn_buf[48];
3304 
3305 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3306 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3307 				ptr_regno, tn_buf);
3308 		return -EACCES;
3309 	}
3310 
3311 	if (!var_off) {
3312 		off += reg->var_off.value;
3313 		err = check_stack_read_fixed_off(env, state, off, size,
3314 						 dst_regno);
3315 	} else {
3316 		/* Variable offset stack reads need more conservative handling
3317 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3318 		 * branch.
3319 		 */
3320 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3321 					       dst_regno);
3322 	}
3323 	return err;
3324 }
3325 
3326 
3327 /* check_stack_write dispatches to check_stack_write_fixed_off or
3328  * check_stack_write_var_off.
3329  *
3330  * 'ptr_regno' is the register used as a pointer into the stack.
3331  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3332  * 'value_regno' is the register whose value we're writing to the stack. It can
3333  * be -1, meaning that we're not writing from a register.
3334  *
3335  * The caller must ensure that the offset falls within the maximum stack size.
3336  */
3337 static int check_stack_write(struct bpf_verifier_env *env,
3338 			     int ptr_regno, int off, int size,
3339 			     int value_regno, int insn_idx)
3340 {
3341 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3342 	struct bpf_func_state *state = func(env, reg);
3343 	int err;
3344 
3345 	if (tnum_is_const(reg->var_off)) {
3346 		off += reg->var_off.value;
3347 		err = check_stack_write_fixed_off(env, state, off, size,
3348 						  value_regno, insn_idx);
3349 	} else {
3350 		/* Variable offset stack reads need more conservative handling
3351 		 * than fixed offset ones.
3352 		 */
3353 		err = check_stack_write_var_off(env, state,
3354 						ptr_regno, off, size,
3355 						value_regno, insn_idx);
3356 	}
3357 	return err;
3358 }
3359 
3360 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3361 				 int off, int size, enum bpf_access_type type)
3362 {
3363 	struct bpf_reg_state *regs = cur_regs(env);
3364 	struct bpf_map *map = regs[regno].map_ptr;
3365 	u32 cap = bpf_map_flags_to_cap(map);
3366 
3367 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3368 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3369 			map->value_size, off, size);
3370 		return -EACCES;
3371 	}
3372 
3373 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3374 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3375 			map->value_size, off, size);
3376 		return -EACCES;
3377 	}
3378 
3379 	return 0;
3380 }
3381 
3382 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3383 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3384 			      int off, int size, u32 mem_size,
3385 			      bool zero_size_allowed)
3386 {
3387 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3388 	struct bpf_reg_state *reg;
3389 
3390 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3391 		return 0;
3392 
3393 	reg = &cur_regs(env)[regno];
3394 	switch (reg->type) {
3395 	case PTR_TO_MAP_KEY:
3396 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3397 			mem_size, off, size);
3398 		break;
3399 	case PTR_TO_MAP_VALUE:
3400 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
3401 			mem_size, off, size);
3402 		break;
3403 	case PTR_TO_PACKET:
3404 	case PTR_TO_PACKET_META:
3405 	case PTR_TO_PACKET_END:
3406 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3407 			off, size, regno, reg->id, off, mem_size);
3408 		break;
3409 	case PTR_TO_MEM:
3410 	default:
3411 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3412 			mem_size, off, size);
3413 	}
3414 
3415 	return -EACCES;
3416 }
3417 
3418 /* check read/write into a memory region with possible variable offset */
3419 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3420 				   int off, int size, u32 mem_size,
3421 				   bool zero_size_allowed)
3422 {
3423 	struct bpf_verifier_state *vstate = env->cur_state;
3424 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3425 	struct bpf_reg_state *reg = &state->regs[regno];
3426 	int err;
3427 
3428 	/* We may have adjusted the register pointing to memory region, so we
3429 	 * need to try adding each of min_value and max_value to off
3430 	 * to make sure our theoretical access will be safe.
3431 	 *
3432 	 * The minimum value is only important with signed
3433 	 * comparisons where we can't assume the floor of a
3434 	 * value is 0.  If we are using signed variables for our
3435 	 * index'es we need to make sure that whatever we use
3436 	 * will have a set floor within our range.
3437 	 */
3438 	if (reg->smin_value < 0 &&
3439 	    (reg->smin_value == S64_MIN ||
3440 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3441 	      reg->smin_value + off < 0)) {
3442 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3443 			regno);
3444 		return -EACCES;
3445 	}
3446 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
3447 				 mem_size, zero_size_allowed);
3448 	if (err) {
3449 		verbose(env, "R%d min value is outside of the allowed memory range\n",
3450 			regno);
3451 		return err;
3452 	}
3453 
3454 	/* If we haven't set a max value then we need to bail since we can't be
3455 	 * sure we won't do bad things.
3456 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
3457 	 */
3458 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
3459 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
3460 			regno);
3461 		return -EACCES;
3462 	}
3463 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
3464 				 mem_size, zero_size_allowed);
3465 	if (err) {
3466 		verbose(env, "R%d max value is outside of the allowed memory range\n",
3467 			regno);
3468 		return err;
3469 	}
3470 
3471 	return 0;
3472 }
3473 
3474 /* check read/write into a map element with possible variable offset */
3475 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3476 			    int off, int size, bool zero_size_allowed)
3477 {
3478 	struct bpf_verifier_state *vstate = env->cur_state;
3479 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3480 	struct bpf_reg_state *reg = &state->regs[regno];
3481 	struct bpf_map *map = reg->map_ptr;
3482 	int err;
3483 
3484 	err = check_mem_region_access(env, regno, off, size, map->value_size,
3485 				      zero_size_allowed);
3486 	if (err)
3487 		return err;
3488 
3489 	if (map_value_has_spin_lock(map)) {
3490 		u32 lock = map->spin_lock_off;
3491 
3492 		/* if any part of struct bpf_spin_lock can be touched by
3493 		 * load/store reject this program.
3494 		 * To check that [x1, x2) overlaps with [y1, y2)
3495 		 * it is sufficient to check x1 < y2 && y1 < x2.
3496 		 */
3497 		if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3498 		     lock < reg->umax_value + off + size) {
3499 			verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3500 			return -EACCES;
3501 		}
3502 	}
3503 	if (map_value_has_timer(map)) {
3504 		u32 t = map->timer_off;
3505 
3506 		if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3507 		     t < reg->umax_value + off + size) {
3508 			verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3509 			return -EACCES;
3510 		}
3511 	}
3512 	return err;
3513 }
3514 
3515 #define MAX_PACKET_OFF 0xffff
3516 
3517 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3518 				       const struct bpf_call_arg_meta *meta,
3519 				       enum bpf_access_type t)
3520 {
3521 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3522 
3523 	switch (prog_type) {
3524 	/* Program types only with direct read access go here! */
3525 	case BPF_PROG_TYPE_LWT_IN:
3526 	case BPF_PROG_TYPE_LWT_OUT:
3527 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3528 	case BPF_PROG_TYPE_SK_REUSEPORT:
3529 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3530 	case BPF_PROG_TYPE_CGROUP_SKB:
3531 		if (t == BPF_WRITE)
3532 			return false;
3533 		fallthrough;
3534 
3535 	/* Program types with direct read + write access go here! */
3536 	case BPF_PROG_TYPE_SCHED_CLS:
3537 	case BPF_PROG_TYPE_SCHED_ACT:
3538 	case BPF_PROG_TYPE_XDP:
3539 	case BPF_PROG_TYPE_LWT_XMIT:
3540 	case BPF_PROG_TYPE_SK_SKB:
3541 	case BPF_PROG_TYPE_SK_MSG:
3542 		if (meta)
3543 			return meta->pkt_access;
3544 
3545 		env->seen_direct_write = true;
3546 		return true;
3547 
3548 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3549 		if (t == BPF_WRITE)
3550 			env->seen_direct_write = true;
3551 
3552 		return true;
3553 
3554 	default:
3555 		return false;
3556 	}
3557 }
3558 
3559 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
3560 			       int size, bool zero_size_allowed)
3561 {
3562 	struct bpf_reg_state *regs = cur_regs(env);
3563 	struct bpf_reg_state *reg = &regs[regno];
3564 	int err;
3565 
3566 	/* We may have added a variable offset to the packet pointer; but any
3567 	 * reg->range we have comes after that.  We are only checking the fixed
3568 	 * offset.
3569 	 */
3570 
3571 	/* We don't allow negative numbers, because we aren't tracking enough
3572 	 * detail to prove they're safe.
3573 	 */
3574 	if (reg->smin_value < 0) {
3575 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3576 			regno);
3577 		return -EACCES;
3578 	}
3579 
3580 	err = reg->range < 0 ? -EINVAL :
3581 	      __check_mem_access(env, regno, off, size, reg->range,
3582 				 zero_size_allowed);
3583 	if (err) {
3584 		verbose(env, "R%d offset is outside of the packet\n", regno);
3585 		return err;
3586 	}
3587 
3588 	/* __check_mem_access has made sure "off + size - 1" is within u16.
3589 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3590 	 * otherwise find_good_pkt_pointers would have refused to set range info
3591 	 * that __check_mem_access would have rejected this pkt access.
3592 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3593 	 */
3594 	env->prog->aux->max_pkt_offset =
3595 		max_t(u32, env->prog->aux->max_pkt_offset,
3596 		      off + reg->umax_value + size - 1);
3597 
3598 	return err;
3599 }
3600 
3601 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
3602 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
3603 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
3604 			    struct btf **btf, u32 *btf_id)
3605 {
3606 	struct bpf_insn_access_aux info = {
3607 		.reg_type = *reg_type,
3608 		.log = &env->log,
3609 	};
3610 
3611 	if (env->ops->is_valid_access &&
3612 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
3613 		/* A non zero info.ctx_field_size indicates that this field is a
3614 		 * candidate for later verifier transformation to load the whole
3615 		 * field and then apply a mask when accessed with a narrower
3616 		 * access than actual ctx access size. A zero info.ctx_field_size
3617 		 * will only allow for whole field access and rejects any other
3618 		 * type of narrower access.
3619 		 */
3620 		*reg_type = info.reg_type;
3621 
3622 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
3623 			*btf = info.btf;
3624 			*btf_id = info.btf_id;
3625 		} else {
3626 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
3627 		}
3628 		/* remember the offset of last byte accessed in ctx */
3629 		if (env->prog->aux->max_ctx_offset < off + size)
3630 			env->prog->aux->max_ctx_offset = off + size;
3631 		return 0;
3632 	}
3633 
3634 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
3635 	return -EACCES;
3636 }
3637 
3638 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
3639 				  int size)
3640 {
3641 	if (size < 0 || off < 0 ||
3642 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
3643 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
3644 			off, size);
3645 		return -EACCES;
3646 	}
3647 	return 0;
3648 }
3649 
3650 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
3651 			     u32 regno, int off, int size,
3652 			     enum bpf_access_type t)
3653 {
3654 	struct bpf_reg_state *regs = cur_regs(env);
3655 	struct bpf_reg_state *reg = &regs[regno];
3656 	struct bpf_insn_access_aux info = {};
3657 	bool valid;
3658 
3659 	if (reg->smin_value < 0) {
3660 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3661 			regno);
3662 		return -EACCES;
3663 	}
3664 
3665 	switch (reg->type) {
3666 	case PTR_TO_SOCK_COMMON:
3667 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
3668 		break;
3669 	case PTR_TO_SOCKET:
3670 		valid = bpf_sock_is_valid_access(off, size, t, &info);
3671 		break;
3672 	case PTR_TO_TCP_SOCK:
3673 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
3674 		break;
3675 	case PTR_TO_XDP_SOCK:
3676 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
3677 		break;
3678 	default:
3679 		valid = false;
3680 	}
3681 
3682 
3683 	if (valid) {
3684 		env->insn_aux_data[insn_idx].ctx_field_size =
3685 			info.ctx_field_size;
3686 		return 0;
3687 	}
3688 
3689 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
3690 		regno, reg_type_str(env, reg->type), off, size);
3691 
3692 	return -EACCES;
3693 }
3694 
3695 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
3696 {
3697 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
3698 }
3699 
3700 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
3701 {
3702 	const struct bpf_reg_state *reg = reg_state(env, regno);
3703 
3704 	return reg->type == PTR_TO_CTX;
3705 }
3706 
3707 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
3708 {
3709 	const struct bpf_reg_state *reg = reg_state(env, regno);
3710 
3711 	return type_is_sk_pointer(reg->type);
3712 }
3713 
3714 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
3715 {
3716 	const struct bpf_reg_state *reg = reg_state(env, regno);
3717 
3718 	return type_is_pkt_pointer(reg->type);
3719 }
3720 
3721 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
3722 {
3723 	const struct bpf_reg_state *reg = reg_state(env, regno);
3724 
3725 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
3726 	return reg->type == PTR_TO_FLOW_KEYS;
3727 }
3728 
3729 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
3730 				   const struct bpf_reg_state *reg,
3731 				   int off, int size, bool strict)
3732 {
3733 	struct tnum reg_off;
3734 	int ip_align;
3735 
3736 	/* Byte size accesses are always allowed. */
3737 	if (!strict || size == 1)
3738 		return 0;
3739 
3740 	/* For platforms that do not have a Kconfig enabling
3741 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
3742 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
3743 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
3744 	 * to this code only in strict mode where we want to emulate
3745 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
3746 	 * unconditional IP align value of '2'.
3747 	 */
3748 	ip_align = 2;
3749 
3750 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
3751 	if (!tnum_is_aligned(reg_off, size)) {
3752 		char tn_buf[48];
3753 
3754 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3755 		verbose(env,
3756 			"misaligned packet access off %d+%s+%d+%d size %d\n",
3757 			ip_align, tn_buf, reg->off, off, size);
3758 		return -EACCES;
3759 	}
3760 
3761 	return 0;
3762 }
3763 
3764 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
3765 				       const struct bpf_reg_state *reg,
3766 				       const char *pointer_desc,
3767 				       int off, int size, bool strict)
3768 {
3769 	struct tnum reg_off;
3770 
3771 	/* Byte size accesses are always allowed. */
3772 	if (!strict || size == 1)
3773 		return 0;
3774 
3775 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
3776 	if (!tnum_is_aligned(reg_off, size)) {
3777 		char tn_buf[48];
3778 
3779 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3780 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
3781 			pointer_desc, tn_buf, reg->off, off, size);
3782 		return -EACCES;
3783 	}
3784 
3785 	return 0;
3786 }
3787 
3788 static int check_ptr_alignment(struct bpf_verifier_env *env,
3789 			       const struct bpf_reg_state *reg, int off,
3790 			       int size, bool strict_alignment_once)
3791 {
3792 	bool strict = env->strict_alignment || strict_alignment_once;
3793 	const char *pointer_desc = "";
3794 
3795 	switch (reg->type) {
3796 	case PTR_TO_PACKET:
3797 	case PTR_TO_PACKET_META:
3798 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
3799 		 * right in front, treat it the very same way.
3800 		 */
3801 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
3802 	case PTR_TO_FLOW_KEYS:
3803 		pointer_desc = "flow keys ";
3804 		break;
3805 	case PTR_TO_MAP_KEY:
3806 		pointer_desc = "key ";
3807 		break;
3808 	case PTR_TO_MAP_VALUE:
3809 		pointer_desc = "value ";
3810 		break;
3811 	case PTR_TO_CTX:
3812 		pointer_desc = "context ";
3813 		break;
3814 	case PTR_TO_STACK:
3815 		pointer_desc = "stack ";
3816 		/* The stack spill tracking logic in check_stack_write_fixed_off()
3817 		 * and check_stack_read_fixed_off() relies on stack accesses being
3818 		 * aligned.
3819 		 */
3820 		strict = true;
3821 		break;
3822 	case PTR_TO_SOCKET:
3823 		pointer_desc = "sock ";
3824 		break;
3825 	case PTR_TO_SOCK_COMMON:
3826 		pointer_desc = "sock_common ";
3827 		break;
3828 	case PTR_TO_TCP_SOCK:
3829 		pointer_desc = "tcp_sock ";
3830 		break;
3831 	case PTR_TO_XDP_SOCK:
3832 		pointer_desc = "xdp_sock ";
3833 		break;
3834 	default:
3835 		break;
3836 	}
3837 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
3838 					   strict);
3839 }
3840 
3841 static int update_stack_depth(struct bpf_verifier_env *env,
3842 			      const struct bpf_func_state *func,
3843 			      int off)
3844 {
3845 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
3846 
3847 	if (stack >= -off)
3848 		return 0;
3849 
3850 	/* update known max for given subprogram */
3851 	env->subprog_info[func->subprogno].stack_depth = -off;
3852 	return 0;
3853 }
3854 
3855 /* starting from main bpf function walk all instructions of the function
3856  * and recursively walk all callees that given function can call.
3857  * Ignore jump and exit insns.
3858  * Since recursion is prevented by check_cfg() this algorithm
3859  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
3860  */
3861 static int check_max_stack_depth(struct bpf_verifier_env *env)
3862 {
3863 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
3864 	struct bpf_subprog_info *subprog = env->subprog_info;
3865 	struct bpf_insn *insn = env->prog->insnsi;
3866 	bool tail_call_reachable = false;
3867 	int ret_insn[MAX_CALL_FRAMES];
3868 	int ret_prog[MAX_CALL_FRAMES];
3869 	int j;
3870 
3871 process_func:
3872 	/* protect against potential stack overflow that might happen when
3873 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
3874 	 * depth for such case down to 256 so that the worst case scenario
3875 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
3876 	 * 8k).
3877 	 *
3878 	 * To get the idea what might happen, see an example:
3879 	 * func1 -> sub rsp, 128
3880 	 *  subfunc1 -> sub rsp, 256
3881 	 *  tailcall1 -> add rsp, 256
3882 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
3883 	 *   subfunc2 -> sub rsp, 64
3884 	 *   subfunc22 -> sub rsp, 128
3885 	 *   tailcall2 -> add rsp, 128
3886 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
3887 	 *
3888 	 * tailcall will unwind the current stack frame but it will not get rid
3889 	 * of caller's stack as shown on the example above.
3890 	 */
3891 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
3892 		verbose(env,
3893 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
3894 			depth);
3895 		return -EACCES;
3896 	}
3897 	/* round up to 32-bytes, since this is granularity
3898 	 * of interpreter stack size
3899 	 */
3900 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
3901 	if (depth > MAX_BPF_STACK) {
3902 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
3903 			frame + 1, depth);
3904 		return -EACCES;
3905 	}
3906 continue_func:
3907 	subprog_end = subprog[idx + 1].start;
3908 	for (; i < subprog_end; i++) {
3909 		int next_insn;
3910 
3911 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
3912 			continue;
3913 		/* remember insn and function to return to */
3914 		ret_insn[frame] = i + 1;
3915 		ret_prog[frame] = idx;
3916 
3917 		/* find the callee */
3918 		next_insn = i + insn[i].imm + 1;
3919 		idx = find_subprog(env, next_insn);
3920 		if (idx < 0) {
3921 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3922 				  next_insn);
3923 			return -EFAULT;
3924 		}
3925 		if (subprog[idx].is_async_cb) {
3926 			if (subprog[idx].has_tail_call) {
3927 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
3928 				return -EFAULT;
3929 			}
3930 			 /* async callbacks don't increase bpf prog stack size */
3931 			continue;
3932 		}
3933 		i = next_insn;
3934 
3935 		if (subprog[idx].has_tail_call)
3936 			tail_call_reachable = true;
3937 
3938 		frame++;
3939 		if (frame >= MAX_CALL_FRAMES) {
3940 			verbose(env, "the call stack of %d frames is too deep !\n",
3941 				frame);
3942 			return -E2BIG;
3943 		}
3944 		goto process_func;
3945 	}
3946 	/* if tail call got detected across bpf2bpf calls then mark each of the
3947 	 * currently present subprog frames as tail call reachable subprogs;
3948 	 * this info will be utilized by JIT so that we will be preserving the
3949 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
3950 	 */
3951 	if (tail_call_reachable)
3952 		for (j = 0; j < frame; j++)
3953 			subprog[ret_prog[j]].tail_call_reachable = true;
3954 	if (subprog[0].tail_call_reachable)
3955 		env->prog->aux->tail_call_reachable = true;
3956 
3957 	/* end of for() loop means the last insn of the 'subprog'
3958 	 * was reached. Doesn't matter whether it was JA or EXIT
3959 	 */
3960 	if (frame == 0)
3961 		return 0;
3962 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
3963 	frame--;
3964 	i = ret_insn[frame];
3965 	idx = ret_prog[frame];
3966 	goto continue_func;
3967 }
3968 
3969 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
3970 static int get_callee_stack_depth(struct bpf_verifier_env *env,
3971 				  const struct bpf_insn *insn, int idx)
3972 {
3973 	int start = idx + insn->imm + 1, subprog;
3974 
3975 	subprog = find_subprog(env, start);
3976 	if (subprog < 0) {
3977 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3978 			  start);
3979 		return -EFAULT;
3980 	}
3981 	return env->subprog_info[subprog].stack_depth;
3982 }
3983 #endif
3984 
3985 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3986 			       const struct bpf_reg_state *reg, int regno,
3987 			       bool fixed_off_ok)
3988 {
3989 	/* Access to this pointer-typed register or passing it to a helper
3990 	 * is only allowed in its original, unmodified form.
3991 	 */
3992 
3993 	if (!fixed_off_ok && reg->off) {
3994 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3995 			reg_type_str(env, reg->type), regno, reg->off);
3996 		return -EACCES;
3997 	}
3998 
3999 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4000 		char tn_buf[48];
4001 
4002 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4003 		verbose(env, "variable %s access var_off=%s disallowed\n",
4004 			reg_type_str(env, reg->type), tn_buf);
4005 		return -EACCES;
4006 	}
4007 
4008 	return 0;
4009 }
4010 
4011 int check_ptr_off_reg(struct bpf_verifier_env *env,
4012 		      const struct bpf_reg_state *reg, int regno)
4013 {
4014 	return __check_ptr_off_reg(env, reg, regno, false);
4015 }
4016 
4017 static int __check_buffer_access(struct bpf_verifier_env *env,
4018 				 const char *buf_info,
4019 				 const struct bpf_reg_state *reg,
4020 				 int regno, int off, int size)
4021 {
4022 	if (off < 0) {
4023 		verbose(env,
4024 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4025 			regno, buf_info, off, size);
4026 		return -EACCES;
4027 	}
4028 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4029 		char tn_buf[48];
4030 
4031 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4032 		verbose(env,
4033 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4034 			regno, off, tn_buf);
4035 		return -EACCES;
4036 	}
4037 
4038 	return 0;
4039 }
4040 
4041 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4042 				  const struct bpf_reg_state *reg,
4043 				  int regno, int off, int size)
4044 {
4045 	int err;
4046 
4047 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4048 	if (err)
4049 		return err;
4050 
4051 	if (off + size > env->prog->aux->max_tp_access)
4052 		env->prog->aux->max_tp_access = off + size;
4053 
4054 	return 0;
4055 }
4056 
4057 static int check_buffer_access(struct bpf_verifier_env *env,
4058 			       const struct bpf_reg_state *reg,
4059 			       int regno, int off, int size,
4060 			       bool zero_size_allowed,
4061 			       const char *buf_info,
4062 			       u32 *max_access)
4063 {
4064 	int err;
4065 
4066 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4067 	if (err)
4068 		return err;
4069 
4070 	if (off + size > *max_access)
4071 		*max_access = off + size;
4072 
4073 	return 0;
4074 }
4075 
4076 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4077 static void zext_32_to_64(struct bpf_reg_state *reg)
4078 {
4079 	reg->var_off = tnum_subreg(reg->var_off);
4080 	__reg_assign_32_into_64(reg);
4081 }
4082 
4083 /* truncate register to smaller size (in bytes)
4084  * must be called with size < BPF_REG_SIZE
4085  */
4086 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4087 {
4088 	u64 mask;
4089 
4090 	/* clear high bits in bit representation */
4091 	reg->var_off = tnum_cast(reg->var_off, size);
4092 
4093 	/* fix arithmetic bounds */
4094 	mask = ((u64)1 << (size * 8)) - 1;
4095 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4096 		reg->umin_value &= mask;
4097 		reg->umax_value &= mask;
4098 	} else {
4099 		reg->umin_value = 0;
4100 		reg->umax_value = mask;
4101 	}
4102 	reg->smin_value = reg->umin_value;
4103 	reg->smax_value = reg->umax_value;
4104 
4105 	/* If size is smaller than 32bit register the 32bit register
4106 	 * values are also truncated so we push 64-bit bounds into
4107 	 * 32-bit bounds. Above were truncated < 32-bits already.
4108 	 */
4109 	if (size >= 4)
4110 		return;
4111 	__reg_combine_64_into_32(reg);
4112 }
4113 
4114 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4115 {
4116 	/* A map is considered read-only if the following condition are true:
4117 	 *
4118 	 * 1) BPF program side cannot change any of the map content. The
4119 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4120 	 *    and was set at map creation time.
4121 	 * 2) The map value(s) have been initialized from user space by a
4122 	 *    loader and then "frozen", such that no new map update/delete
4123 	 *    operations from syscall side are possible for the rest of
4124 	 *    the map's lifetime from that point onwards.
4125 	 * 3) Any parallel/pending map update/delete operations from syscall
4126 	 *    side have been completed. Only after that point, it's safe to
4127 	 *    assume that map value(s) are immutable.
4128 	 */
4129 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4130 	       READ_ONCE(map->frozen) &&
4131 	       !bpf_map_write_active(map);
4132 }
4133 
4134 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4135 {
4136 	void *ptr;
4137 	u64 addr;
4138 	int err;
4139 
4140 	err = map->ops->map_direct_value_addr(map, &addr, off);
4141 	if (err)
4142 		return err;
4143 	ptr = (void *)(long)addr + off;
4144 
4145 	switch (size) {
4146 	case sizeof(u8):
4147 		*val = (u64)*(u8 *)ptr;
4148 		break;
4149 	case sizeof(u16):
4150 		*val = (u64)*(u16 *)ptr;
4151 		break;
4152 	case sizeof(u32):
4153 		*val = (u64)*(u32 *)ptr;
4154 		break;
4155 	case sizeof(u64):
4156 		*val = *(u64 *)ptr;
4157 		break;
4158 	default:
4159 		return -EINVAL;
4160 	}
4161 	return 0;
4162 }
4163 
4164 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4165 				   struct bpf_reg_state *regs,
4166 				   int regno, int off, int size,
4167 				   enum bpf_access_type atype,
4168 				   int value_regno)
4169 {
4170 	struct bpf_reg_state *reg = regs + regno;
4171 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4172 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4173 	enum bpf_type_flag flag = 0;
4174 	u32 btf_id;
4175 	int ret;
4176 
4177 	if (off < 0) {
4178 		verbose(env,
4179 			"R%d is ptr_%s invalid negative access: off=%d\n",
4180 			regno, tname, off);
4181 		return -EACCES;
4182 	}
4183 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4184 		char tn_buf[48];
4185 
4186 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4187 		verbose(env,
4188 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4189 			regno, tname, off, tn_buf);
4190 		return -EACCES;
4191 	}
4192 
4193 	if (reg->type & MEM_USER) {
4194 		verbose(env,
4195 			"R%d is ptr_%s access user memory: off=%d\n",
4196 			regno, tname, off);
4197 		return -EACCES;
4198 	}
4199 
4200 	if (env->ops->btf_struct_access) {
4201 		ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
4202 						  off, size, atype, &btf_id, &flag);
4203 	} else {
4204 		if (atype != BPF_READ) {
4205 			verbose(env, "only read is supported\n");
4206 			return -EACCES;
4207 		}
4208 
4209 		ret = btf_struct_access(&env->log, reg->btf, t, off, size,
4210 					atype, &btf_id, &flag);
4211 	}
4212 
4213 	if (ret < 0)
4214 		return ret;
4215 
4216 	if (atype == BPF_READ && value_regno >= 0)
4217 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
4218 
4219 	return 0;
4220 }
4221 
4222 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4223 				   struct bpf_reg_state *regs,
4224 				   int regno, int off, int size,
4225 				   enum bpf_access_type atype,
4226 				   int value_regno)
4227 {
4228 	struct bpf_reg_state *reg = regs + regno;
4229 	struct bpf_map *map = reg->map_ptr;
4230 	enum bpf_type_flag flag = 0;
4231 	const struct btf_type *t;
4232 	const char *tname;
4233 	u32 btf_id;
4234 	int ret;
4235 
4236 	if (!btf_vmlinux) {
4237 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4238 		return -ENOTSUPP;
4239 	}
4240 
4241 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4242 		verbose(env, "map_ptr access not supported for map type %d\n",
4243 			map->map_type);
4244 		return -ENOTSUPP;
4245 	}
4246 
4247 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4248 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4249 
4250 	if (!env->allow_ptr_to_map_access) {
4251 		verbose(env,
4252 			"%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4253 			tname);
4254 		return -EPERM;
4255 	}
4256 
4257 	if (off < 0) {
4258 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
4259 			regno, tname, off);
4260 		return -EACCES;
4261 	}
4262 
4263 	if (atype != BPF_READ) {
4264 		verbose(env, "only read from %s is supported\n", tname);
4265 		return -EACCES;
4266 	}
4267 
4268 	ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
4269 	if (ret < 0)
4270 		return ret;
4271 
4272 	if (value_regno >= 0)
4273 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
4274 
4275 	return 0;
4276 }
4277 
4278 /* Check that the stack access at the given offset is within bounds. The
4279  * maximum valid offset is -1.
4280  *
4281  * The minimum valid offset is -MAX_BPF_STACK for writes, and
4282  * -state->allocated_stack for reads.
4283  */
4284 static int check_stack_slot_within_bounds(int off,
4285 					  struct bpf_func_state *state,
4286 					  enum bpf_access_type t)
4287 {
4288 	int min_valid_off;
4289 
4290 	if (t == BPF_WRITE)
4291 		min_valid_off = -MAX_BPF_STACK;
4292 	else
4293 		min_valid_off = -state->allocated_stack;
4294 
4295 	if (off < min_valid_off || off > -1)
4296 		return -EACCES;
4297 	return 0;
4298 }
4299 
4300 /* Check that the stack access at 'regno + off' falls within the maximum stack
4301  * bounds.
4302  *
4303  * 'off' includes `regno->offset`, but not its dynamic part (if any).
4304  */
4305 static int check_stack_access_within_bounds(
4306 		struct bpf_verifier_env *env,
4307 		int regno, int off, int access_size,
4308 		enum stack_access_src src, enum bpf_access_type type)
4309 {
4310 	struct bpf_reg_state *regs = cur_regs(env);
4311 	struct bpf_reg_state *reg = regs + regno;
4312 	struct bpf_func_state *state = func(env, reg);
4313 	int min_off, max_off;
4314 	int err;
4315 	char *err_extra;
4316 
4317 	if (src == ACCESS_HELPER)
4318 		/* We don't know if helpers are reading or writing (or both). */
4319 		err_extra = " indirect access to";
4320 	else if (type == BPF_READ)
4321 		err_extra = " read from";
4322 	else
4323 		err_extra = " write to";
4324 
4325 	if (tnum_is_const(reg->var_off)) {
4326 		min_off = reg->var_off.value + off;
4327 		if (access_size > 0)
4328 			max_off = min_off + access_size - 1;
4329 		else
4330 			max_off = min_off;
4331 	} else {
4332 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4333 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
4334 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4335 				err_extra, regno);
4336 			return -EACCES;
4337 		}
4338 		min_off = reg->smin_value + off;
4339 		if (access_size > 0)
4340 			max_off = reg->smax_value + off + access_size - 1;
4341 		else
4342 			max_off = min_off;
4343 	}
4344 
4345 	err = check_stack_slot_within_bounds(min_off, state, type);
4346 	if (!err)
4347 		err = check_stack_slot_within_bounds(max_off, state, type);
4348 
4349 	if (err) {
4350 		if (tnum_is_const(reg->var_off)) {
4351 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4352 				err_extra, regno, off, access_size);
4353 		} else {
4354 			char tn_buf[48];
4355 
4356 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4357 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4358 				err_extra, regno, tn_buf, access_size);
4359 		}
4360 	}
4361 	return err;
4362 }
4363 
4364 /* check whether memory at (regno + off) is accessible for t = (read | write)
4365  * if t==write, value_regno is a register which value is stored into memory
4366  * if t==read, value_regno is a register which will receive the value from memory
4367  * if t==write && value_regno==-1, some unknown value is stored into memory
4368  * if t==read && value_regno==-1, don't care what we read from memory
4369  */
4370 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4371 			    int off, int bpf_size, enum bpf_access_type t,
4372 			    int value_regno, bool strict_alignment_once)
4373 {
4374 	struct bpf_reg_state *regs = cur_regs(env);
4375 	struct bpf_reg_state *reg = regs + regno;
4376 	struct bpf_func_state *state;
4377 	int size, err = 0;
4378 
4379 	size = bpf_size_to_bytes(bpf_size);
4380 	if (size < 0)
4381 		return size;
4382 
4383 	/* alignment checks will add in reg->off themselves */
4384 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
4385 	if (err)
4386 		return err;
4387 
4388 	/* for access checks, reg->off is just part of off */
4389 	off += reg->off;
4390 
4391 	if (reg->type == PTR_TO_MAP_KEY) {
4392 		if (t == BPF_WRITE) {
4393 			verbose(env, "write to change key R%d not allowed\n", regno);
4394 			return -EACCES;
4395 		}
4396 
4397 		err = check_mem_region_access(env, regno, off, size,
4398 					      reg->map_ptr->key_size, false);
4399 		if (err)
4400 			return err;
4401 		if (value_regno >= 0)
4402 			mark_reg_unknown(env, regs, value_regno);
4403 	} else if (reg->type == PTR_TO_MAP_VALUE) {
4404 		if (t == BPF_WRITE && value_regno >= 0 &&
4405 		    is_pointer_value(env, value_regno)) {
4406 			verbose(env, "R%d leaks addr into map\n", value_regno);
4407 			return -EACCES;
4408 		}
4409 		err = check_map_access_type(env, regno, off, size, t);
4410 		if (err)
4411 			return err;
4412 		err = check_map_access(env, regno, off, size, false);
4413 		if (!err && t == BPF_READ && value_regno >= 0) {
4414 			struct bpf_map *map = reg->map_ptr;
4415 
4416 			/* if map is read-only, track its contents as scalars */
4417 			if (tnum_is_const(reg->var_off) &&
4418 			    bpf_map_is_rdonly(map) &&
4419 			    map->ops->map_direct_value_addr) {
4420 				int map_off = off + reg->var_off.value;
4421 				u64 val = 0;
4422 
4423 				err = bpf_map_direct_read(map, map_off, size,
4424 							  &val);
4425 				if (err)
4426 					return err;
4427 
4428 				regs[value_regno].type = SCALAR_VALUE;
4429 				__mark_reg_known(&regs[value_regno], val);
4430 			} else {
4431 				mark_reg_unknown(env, regs, value_regno);
4432 			}
4433 		}
4434 	} else if (base_type(reg->type) == PTR_TO_MEM) {
4435 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4436 
4437 		if (type_may_be_null(reg->type)) {
4438 			verbose(env, "R%d invalid mem access '%s'\n", regno,
4439 				reg_type_str(env, reg->type));
4440 			return -EACCES;
4441 		}
4442 
4443 		if (t == BPF_WRITE && rdonly_mem) {
4444 			verbose(env, "R%d cannot write into %s\n",
4445 				regno, reg_type_str(env, reg->type));
4446 			return -EACCES;
4447 		}
4448 
4449 		if (t == BPF_WRITE && value_regno >= 0 &&
4450 		    is_pointer_value(env, value_regno)) {
4451 			verbose(env, "R%d leaks addr into mem\n", value_regno);
4452 			return -EACCES;
4453 		}
4454 
4455 		err = check_mem_region_access(env, regno, off, size,
4456 					      reg->mem_size, false);
4457 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
4458 			mark_reg_unknown(env, regs, value_regno);
4459 	} else if (reg->type == PTR_TO_CTX) {
4460 		enum bpf_reg_type reg_type = SCALAR_VALUE;
4461 		struct btf *btf = NULL;
4462 		u32 btf_id = 0;
4463 
4464 		if (t == BPF_WRITE && value_regno >= 0 &&
4465 		    is_pointer_value(env, value_regno)) {
4466 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
4467 			return -EACCES;
4468 		}
4469 
4470 		err = check_ptr_off_reg(env, reg, regno);
4471 		if (err < 0)
4472 			return err;
4473 
4474 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
4475 				       &btf_id);
4476 		if (err)
4477 			verbose_linfo(env, insn_idx, "; ");
4478 		if (!err && t == BPF_READ && value_regno >= 0) {
4479 			/* ctx access returns either a scalar, or a
4480 			 * PTR_TO_PACKET[_META,_END]. In the latter
4481 			 * case, we know the offset is zero.
4482 			 */
4483 			if (reg_type == SCALAR_VALUE) {
4484 				mark_reg_unknown(env, regs, value_regno);
4485 			} else {
4486 				mark_reg_known_zero(env, regs,
4487 						    value_regno);
4488 				if (type_may_be_null(reg_type))
4489 					regs[value_regno].id = ++env->id_gen;
4490 				/* A load of ctx field could have different
4491 				 * actual load size with the one encoded in the
4492 				 * insn. When the dst is PTR, it is for sure not
4493 				 * a sub-register.
4494 				 */
4495 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
4496 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
4497 					regs[value_regno].btf = btf;
4498 					regs[value_regno].btf_id = btf_id;
4499 				}
4500 			}
4501 			regs[value_regno].type = reg_type;
4502 		}
4503 
4504 	} else if (reg->type == PTR_TO_STACK) {
4505 		/* Basic bounds checks. */
4506 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
4507 		if (err)
4508 			return err;
4509 
4510 		state = func(env, reg);
4511 		err = update_stack_depth(env, state, off);
4512 		if (err)
4513 			return err;
4514 
4515 		if (t == BPF_READ)
4516 			err = check_stack_read(env, regno, off, size,
4517 					       value_regno);
4518 		else
4519 			err = check_stack_write(env, regno, off, size,
4520 						value_regno, insn_idx);
4521 	} else if (reg_is_pkt_pointer(reg)) {
4522 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
4523 			verbose(env, "cannot write into packet\n");
4524 			return -EACCES;
4525 		}
4526 		if (t == BPF_WRITE && value_regno >= 0 &&
4527 		    is_pointer_value(env, value_regno)) {
4528 			verbose(env, "R%d leaks addr into packet\n",
4529 				value_regno);
4530 			return -EACCES;
4531 		}
4532 		err = check_packet_access(env, regno, off, size, false);
4533 		if (!err && t == BPF_READ && value_regno >= 0)
4534 			mark_reg_unknown(env, regs, value_regno);
4535 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
4536 		if (t == BPF_WRITE && value_regno >= 0 &&
4537 		    is_pointer_value(env, value_regno)) {
4538 			verbose(env, "R%d leaks addr into flow keys\n",
4539 				value_regno);
4540 			return -EACCES;
4541 		}
4542 
4543 		err = check_flow_keys_access(env, off, size);
4544 		if (!err && t == BPF_READ && value_regno >= 0)
4545 			mark_reg_unknown(env, regs, value_regno);
4546 	} else if (type_is_sk_pointer(reg->type)) {
4547 		if (t == BPF_WRITE) {
4548 			verbose(env, "R%d cannot write into %s\n",
4549 				regno, reg_type_str(env, reg->type));
4550 			return -EACCES;
4551 		}
4552 		err = check_sock_access(env, insn_idx, regno, off, size, t);
4553 		if (!err && value_regno >= 0)
4554 			mark_reg_unknown(env, regs, value_regno);
4555 	} else if (reg->type == PTR_TO_TP_BUFFER) {
4556 		err = check_tp_buffer_access(env, reg, regno, off, size);
4557 		if (!err && t == BPF_READ && value_regno >= 0)
4558 			mark_reg_unknown(env, regs, value_regno);
4559 	} else if (reg->type == PTR_TO_BTF_ID) {
4560 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4561 					      value_regno);
4562 	} else if (reg->type == CONST_PTR_TO_MAP) {
4563 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4564 					      value_regno);
4565 	} else if (base_type(reg->type) == PTR_TO_BUF) {
4566 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4567 		const char *buf_info;
4568 		u32 *max_access;
4569 
4570 		if (rdonly_mem) {
4571 			if (t == BPF_WRITE) {
4572 				verbose(env, "R%d cannot write into %s\n",
4573 					regno, reg_type_str(env, reg->type));
4574 				return -EACCES;
4575 			}
4576 			buf_info = "rdonly";
4577 			max_access = &env->prog->aux->max_rdonly_access;
4578 		} else {
4579 			buf_info = "rdwr";
4580 			max_access = &env->prog->aux->max_rdwr_access;
4581 		}
4582 
4583 		err = check_buffer_access(env, reg, regno, off, size, false,
4584 					  buf_info, max_access);
4585 
4586 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
4587 			mark_reg_unknown(env, regs, value_regno);
4588 	} else {
4589 		verbose(env, "R%d invalid mem access '%s'\n", regno,
4590 			reg_type_str(env, reg->type));
4591 		return -EACCES;
4592 	}
4593 
4594 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
4595 	    regs[value_regno].type == SCALAR_VALUE) {
4596 		/* b/h/w load zero-extends, mark upper bits as known 0 */
4597 		coerce_reg_to_size(&regs[value_regno], size);
4598 	}
4599 	return err;
4600 }
4601 
4602 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
4603 {
4604 	int load_reg;
4605 	int err;
4606 
4607 	switch (insn->imm) {
4608 	case BPF_ADD:
4609 	case BPF_ADD | BPF_FETCH:
4610 	case BPF_AND:
4611 	case BPF_AND | BPF_FETCH:
4612 	case BPF_OR:
4613 	case BPF_OR | BPF_FETCH:
4614 	case BPF_XOR:
4615 	case BPF_XOR | BPF_FETCH:
4616 	case BPF_XCHG:
4617 	case BPF_CMPXCHG:
4618 		break;
4619 	default:
4620 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4621 		return -EINVAL;
4622 	}
4623 
4624 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4625 		verbose(env, "invalid atomic operand size\n");
4626 		return -EINVAL;
4627 	}
4628 
4629 	/* check src1 operand */
4630 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
4631 	if (err)
4632 		return err;
4633 
4634 	/* check src2 operand */
4635 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4636 	if (err)
4637 		return err;
4638 
4639 	if (insn->imm == BPF_CMPXCHG) {
4640 		/* Check comparison of R0 with memory location */
4641 		const u32 aux_reg = BPF_REG_0;
4642 
4643 		err = check_reg_arg(env, aux_reg, SRC_OP);
4644 		if (err)
4645 			return err;
4646 
4647 		if (is_pointer_value(env, aux_reg)) {
4648 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
4649 			return -EACCES;
4650 		}
4651 	}
4652 
4653 	if (is_pointer_value(env, insn->src_reg)) {
4654 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
4655 		return -EACCES;
4656 	}
4657 
4658 	if (is_ctx_reg(env, insn->dst_reg) ||
4659 	    is_pkt_reg(env, insn->dst_reg) ||
4660 	    is_flow_key_reg(env, insn->dst_reg) ||
4661 	    is_sk_reg(env, insn->dst_reg)) {
4662 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
4663 			insn->dst_reg,
4664 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
4665 		return -EACCES;
4666 	}
4667 
4668 	if (insn->imm & BPF_FETCH) {
4669 		if (insn->imm == BPF_CMPXCHG)
4670 			load_reg = BPF_REG_0;
4671 		else
4672 			load_reg = insn->src_reg;
4673 
4674 		/* check and record load of old value */
4675 		err = check_reg_arg(env, load_reg, DST_OP);
4676 		if (err)
4677 			return err;
4678 	} else {
4679 		/* This instruction accesses a memory location but doesn't
4680 		 * actually load it into a register.
4681 		 */
4682 		load_reg = -1;
4683 	}
4684 
4685 	/* Check whether we can read the memory, with second call for fetch
4686 	 * case to simulate the register fill.
4687 	 */
4688 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4689 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
4690 	if (!err && load_reg >= 0)
4691 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4692 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
4693 				       true);
4694 	if (err)
4695 		return err;
4696 
4697 	/* Check whether we can write into the same memory. */
4698 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4699 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
4700 	if (err)
4701 		return err;
4702 
4703 	return 0;
4704 }
4705 
4706 /* When register 'regno' is used to read the stack (either directly or through
4707  * a helper function) make sure that it's within stack boundary and, depending
4708  * on the access type, that all elements of the stack are initialized.
4709  *
4710  * 'off' includes 'regno->off', but not its dynamic part (if any).
4711  *
4712  * All registers that have been spilled on the stack in the slots within the
4713  * read offsets are marked as read.
4714  */
4715 static int check_stack_range_initialized(
4716 		struct bpf_verifier_env *env, int regno, int off,
4717 		int access_size, bool zero_size_allowed,
4718 		enum stack_access_src type, struct bpf_call_arg_meta *meta)
4719 {
4720 	struct bpf_reg_state *reg = reg_state(env, regno);
4721 	struct bpf_func_state *state = func(env, reg);
4722 	int err, min_off, max_off, i, j, slot, spi;
4723 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
4724 	enum bpf_access_type bounds_check_type;
4725 	/* Some accesses can write anything into the stack, others are
4726 	 * read-only.
4727 	 */
4728 	bool clobber = false;
4729 
4730 	if (access_size == 0 && !zero_size_allowed) {
4731 		verbose(env, "invalid zero-sized read\n");
4732 		return -EACCES;
4733 	}
4734 
4735 	if (type == ACCESS_HELPER) {
4736 		/* The bounds checks for writes are more permissive than for
4737 		 * reads. However, if raw_mode is not set, we'll do extra
4738 		 * checks below.
4739 		 */
4740 		bounds_check_type = BPF_WRITE;
4741 		clobber = true;
4742 	} else {
4743 		bounds_check_type = BPF_READ;
4744 	}
4745 	err = check_stack_access_within_bounds(env, regno, off, access_size,
4746 					       type, bounds_check_type);
4747 	if (err)
4748 		return err;
4749 
4750 
4751 	if (tnum_is_const(reg->var_off)) {
4752 		min_off = max_off = reg->var_off.value + off;
4753 	} else {
4754 		/* Variable offset is prohibited for unprivileged mode for
4755 		 * simplicity since it requires corresponding support in
4756 		 * Spectre masking for stack ALU.
4757 		 * See also retrieve_ptr_limit().
4758 		 */
4759 		if (!env->bypass_spec_v1) {
4760 			char tn_buf[48];
4761 
4762 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4763 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
4764 				regno, err_extra, tn_buf);
4765 			return -EACCES;
4766 		}
4767 		/* Only initialized buffer on stack is allowed to be accessed
4768 		 * with variable offset. With uninitialized buffer it's hard to
4769 		 * guarantee that whole memory is marked as initialized on
4770 		 * helper return since specific bounds are unknown what may
4771 		 * cause uninitialized stack leaking.
4772 		 */
4773 		if (meta && meta->raw_mode)
4774 			meta = NULL;
4775 
4776 		min_off = reg->smin_value + off;
4777 		max_off = reg->smax_value + off;
4778 	}
4779 
4780 	if (meta && meta->raw_mode) {
4781 		meta->access_size = access_size;
4782 		meta->regno = regno;
4783 		return 0;
4784 	}
4785 
4786 	for (i = min_off; i < max_off + access_size; i++) {
4787 		u8 *stype;
4788 
4789 		slot = -i - 1;
4790 		spi = slot / BPF_REG_SIZE;
4791 		if (state->allocated_stack <= slot)
4792 			goto err;
4793 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
4794 		if (*stype == STACK_MISC)
4795 			goto mark;
4796 		if (*stype == STACK_ZERO) {
4797 			if (clobber) {
4798 				/* helper can write anything into the stack */
4799 				*stype = STACK_MISC;
4800 			}
4801 			goto mark;
4802 		}
4803 
4804 		if (is_spilled_reg(&state->stack[spi]) &&
4805 		    state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
4806 			goto mark;
4807 
4808 		if (is_spilled_reg(&state->stack[spi]) &&
4809 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
4810 		     env->allow_ptr_leaks)) {
4811 			if (clobber) {
4812 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
4813 				for (j = 0; j < BPF_REG_SIZE; j++)
4814 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
4815 			}
4816 			goto mark;
4817 		}
4818 
4819 err:
4820 		if (tnum_is_const(reg->var_off)) {
4821 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
4822 				err_extra, regno, min_off, i - min_off, access_size);
4823 		} else {
4824 			char tn_buf[48];
4825 
4826 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4827 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
4828 				err_extra, regno, tn_buf, i - min_off, access_size);
4829 		}
4830 		return -EACCES;
4831 mark:
4832 		/* reading any byte out of 8-byte 'spill_slot' will cause
4833 		 * the whole slot to be marked as 'read'
4834 		 */
4835 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
4836 			      state->stack[spi].spilled_ptr.parent,
4837 			      REG_LIVE_READ64);
4838 	}
4839 	return update_stack_depth(env, state, min_off);
4840 }
4841 
4842 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
4843 				   int access_size, bool zero_size_allowed,
4844 				   struct bpf_call_arg_meta *meta)
4845 {
4846 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4847 	const char *buf_info;
4848 	u32 *max_access;
4849 
4850 	switch (base_type(reg->type)) {
4851 	case PTR_TO_PACKET:
4852 	case PTR_TO_PACKET_META:
4853 		return check_packet_access(env, regno, reg->off, access_size,
4854 					   zero_size_allowed);
4855 	case PTR_TO_MAP_KEY:
4856 		return check_mem_region_access(env, regno, reg->off, access_size,
4857 					       reg->map_ptr->key_size, false);
4858 	case PTR_TO_MAP_VALUE:
4859 		if (check_map_access_type(env, regno, reg->off, access_size,
4860 					  meta && meta->raw_mode ? BPF_WRITE :
4861 					  BPF_READ))
4862 			return -EACCES;
4863 		return check_map_access(env, regno, reg->off, access_size,
4864 					zero_size_allowed);
4865 	case PTR_TO_MEM:
4866 		return check_mem_region_access(env, regno, reg->off,
4867 					       access_size, reg->mem_size,
4868 					       zero_size_allowed);
4869 	case PTR_TO_BUF:
4870 		if (type_is_rdonly_mem(reg->type)) {
4871 			if (meta && meta->raw_mode)
4872 				return -EACCES;
4873 
4874 			buf_info = "rdonly";
4875 			max_access = &env->prog->aux->max_rdonly_access;
4876 		} else {
4877 			buf_info = "rdwr";
4878 			max_access = &env->prog->aux->max_rdwr_access;
4879 		}
4880 		return check_buffer_access(env, reg, regno, reg->off,
4881 					   access_size, zero_size_allowed,
4882 					   buf_info, max_access);
4883 	case PTR_TO_STACK:
4884 		return check_stack_range_initialized(
4885 				env,
4886 				regno, reg->off, access_size,
4887 				zero_size_allowed, ACCESS_HELPER, meta);
4888 	default: /* scalar_value or invalid ptr */
4889 		/* Allow zero-byte read from NULL, regardless of pointer type */
4890 		if (zero_size_allowed && access_size == 0 &&
4891 		    register_is_null(reg))
4892 			return 0;
4893 
4894 		verbose(env, "R%d type=%s ", regno,
4895 			reg_type_str(env, reg->type));
4896 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
4897 		return -EACCES;
4898 	}
4899 }
4900 
4901 static int check_mem_size_reg(struct bpf_verifier_env *env,
4902 			      struct bpf_reg_state *reg, u32 regno,
4903 			      bool zero_size_allowed,
4904 			      struct bpf_call_arg_meta *meta)
4905 {
4906 	int err;
4907 
4908 	/* This is used to refine r0 return value bounds for helpers
4909 	 * that enforce this value as an upper bound on return values.
4910 	 * See do_refine_retval_range() for helpers that can refine
4911 	 * the return value. C type of helper is u32 so we pull register
4912 	 * bound from umax_value however, if negative verifier errors
4913 	 * out. Only upper bounds can be learned because retval is an
4914 	 * int type and negative retvals are allowed.
4915 	 */
4916 	if (meta)
4917 		meta->msize_max_value = reg->umax_value;
4918 
4919 	/* The register is SCALAR_VALUE; the access check
4920 	 * happens using its boundaries.
4921 	 */
4922 	if (!tnum_is_const(reg->var_off))
4923 		/* For unprivileged variable accesses, disable raw
4924 		 * mode so that the program is required to
4925 		 * initialize all the memory that the helper could
4926 		 * just partially fill up.
4927 		 */
4928 		meta = NULL;
4929 
4930 	if (reg->smin_value < 0) {
4931 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
4932 			regno);
4933 		return -EACCES;
4934 	}
4935 
4936 	if (reg->umin_value == 0) {
4937 		err = check_helper_mem_access(env, regno - 1, 0,
4938 					      zero_size_allowed,
4939 					      meta);
4940 		if (err)
4941 			return err;
4942 	}
4943 
4944 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
4945 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
4946 			regno);
4947 		return -EACCES;
4948 	}
4949 	err = check_helper_mem_access(env, regno - 1,
4950 				      reg->umax_value,
4951 				      zero_size_allowed, meta);
4952 	if (!err)
4953 		err = mark_chain_precision(env, regno);
4954 	return err;
4955 }
4956 
4957 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
4958 		   u32 regno, u32 mem_size)
4959 {
4960 	if (register_is_null(reg))
4961 		return 0;
4962 
4963 	if (type_may_be_null(reg->type)) {
4964 		/* Assuming that the register contains a value check if the memory
4965 		 * access is safe. Temporarily save and restore the register's state as
4966 		 * the conversion shouldn't be visible to a caller.
4967 		 */
4968 		const struct bpf_reg_state saved_reg = *reg;
4969 		int rv;
4970 
4971 		mark_ptr_not_null_reg(reg);
4972 		rv = check_helper_mem_access(env, regno, mem_size, true, NULL);
4973 		*reg = saved_reg;
4974 		return rv;
4975 	}
4976 
4977 	return check_helper_mem_access(env, regno, mem_size, true, NULL);
4978 }
4979 
4980 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
4981 			     u32 regno)
4982 {
4983 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
4984 	bool may_be_null = type_may_be_null(mem_reg->type);
4985 	struct bpf_reg_state saved_reg;
4986 	int err;
4987 
4988 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
4989 
4990 	if (may_be_null) {
4991 		saved_reg = *mem_reg;
4992 		mark_ptr_not_null_reg(mem_reg);
4993 	}
4994 
4995 	err = check_mem_size_reg(env, reg, regno, true, NULL);
4996 
4997 	if (may_be_null)
4998 		*mem_reg = saved_reg;
4999 	return err;
5000 }
5001 
5002 /* Implementation details:
5003  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5004  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5005  * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5006  * value_or_null->value transition, since the verifier only cares about
5007  * the range of access to valid map value pointer and doesn't care about actual
5008  * address of the map element.
5009  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5010  * reg->id > 0 after value_or_null->value transition. By doing so
5011  * two bpf_map_lookups will be considered two different pointers that
5012  * point to different bpf_spin_locks.
5013  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5014  * dead-locks.
5015  * Since only one bpf_spin_lock is allowed the checks are simpler than
5016  * reg_is_refcounted() logic. The verifier needs to remember only
5017  * one spin_lock instead of array of acquired_refs.
5018  * cur_state->active_spin_lock remembers which map value element got locked
5019  * and clears it after bpf_spin_unlock.
5020  */
5021 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5022 			     bool is_lock)
5023 {
5024 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5025 	struct bpf_verifier_state *cur = env->cur_state;
5026 	bool is_const = tnum_is_const(reg->var_off);
5027 	struct bpf_map *map = reg->map_ptr;
5028 	u64 val = reg->var_off.value;
5029 
5030 	if (!is_const) {
5031 		verbose(env,
5032 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5033 			regno);
5034 		return -EINVAL;
5035 	}
5036 	if (!map->btf) {
5037 		verbose(env,
5038 			"map '%s' has to have BTF in order to use bpf_spin_lock\n",
5039 			map->name);
5040 		return -EINVAL;
5041 	}
5042 	if (!map_value_has_spin_lock(map)) {
5043 		if (map->spin_lock_off == -E2BIG)
5044 			verbose(env,
5045 				"map '%s' has more than one 'struct bpf_spin_lock'\n",
5046 				map->name);
5047 		else if (map->spin_lock_off == -ENOENT)
5048 			verbose(env,
5049 				"map '%s' doesn't have 'struct bpf_spin_lock'\n",
5050 				map->name);
5051 		else
5052 			verbose(env,
5053 				"map '%s' is not a struct type or bpf_spin_lock is mangled\n",
5054 				map->name);
5055 		return -EINVAL;
5056 	}
5057 	if (map->spin_lock_off != val + reg->off) {
5058 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
5059 			val + reg->off);
5060 		return -EINVAL;
5061 	}
5062 	if (is_lock) {
5063 		if (cur->active_spin_lock) {
5064 			verbose(env,
5065 				"Locking two bpf_spin_locks are not allowed\n");
5066 			return -EINVAL;
5067 		}
5068 		cur->active_spin_lock = reg->id;
5069 	} else {
5070 		if (!cur->active_spin_lock) {
5071 			verbose(env, "bpf_spin_unlock without taking a lock\n");
5072 			return -EINVAL;
5073 		}
5074 		if (cur->active_spin_lock != reg->id) {
5075 			verbose(env, "bpf_spin_unlock of different lock\n");
5076 			return -EINVAL;
5077 		}
5078 		cur->active_spin_lock = 0;
5079 	}
5080 	return 0;
5081 }
5082 
5083 static int process_timer_func(struct bpf_verifier_env *env, int regno,
5084 			      struct bpf_call_arg_meta *meta)
5085 {
5086 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5087 	bool is_const = tnum_is_const(reg->var_off);
5088 	struct bpf_map *map = reg->map_ptr;
5089 	u64 val = reg->var_off.value;
5090 
5091 	if (!is_const) {
5092 		verbose(env,
5093 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5094 			regno);
5095 		return -EINVAL;
5096 	}
5097 	if (!map->btf) {
5098 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5099 			map->name);
5100 		return -EINVAL;
5101 	}
5102 	if (!map_value_has_timer(map)) {
5103 		if (map->timer_off == -E2BIG)
5104 			verbose(env,
5105 				"map '%s' has more than one 'struct bpf_timer'\n",
5106 				map->name);
5107 		else if (map->timer_off == -ENOENT)
5108 			verbose(env,
5109 				"map '%s' doesn't have 'struct bpf_timer'\n",
5110 				map->name);
5111 		else
5112 			verbose(env,
5113 				"map '%s' is not a struct type or bpf_timer is mangled\n",
5114 				map->name);
5115 		return -EINVAL;
5116 	}
5117 	if (map->timer_off != val + reg->off) {
5118 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5119 			val + reg->off, map->timer_off);
5120 		return -EINVAL;
5121 	}
5122 	if (meta->map_ptr) {
5123 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5124 		return -EFAULT;
5125 	}
5126 	meta->map_uid = reg->map_uid;
5127 	meta->map_ptr = map;
5128 	return 0;
5129 }
5130 
5131 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
5132 {
5133 	return base_type(type) == ARG_PTR_TO_MEM ||
5134 	       base_type(type) == ARG_PTR_TO_UNINIT_MEM;
5135 }
5136 
5137 static bool arg_type_is_mem_size(enum bpf_arg_type type)
5138 {
5139 	return type == ARG_CONST_SIZE ||
5140 	       type == ARG_CONST_SIZE_OR_ZERO;
5141 }
5142 
5143 static bool arg_type_is_alloc_size(enum bpf_arg_type type)
5144 {
5145 	return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
5146 }
5147 
5148 static bool arg_type_is_int_ptr(enum bpf_arg_type type)
5149 {
5150 	return type == ARG_PTR_TO_INT ||
5151 	       type == ARG_PTR_TO_LONG;
5152 }
5153 
5154 static int int_ptr_type_to_size(enum bpf_arg_type type)
5155 {
5156 	if (type == ARG_PTR_TO_INT)
5157 		return sizeof(u32);
5158 	else if (type == ARG_PTR_TO_LONG)
5159 		return sizeof(u64);
5160 
5161 	return -EINVAL;
5162 }
5163 
5164 static int resolve_map_arg_type(struct bpf_verifier_env *env,
5165 				 const struct bpf_call_arg_meta *meta,
5166 				 enum bpf_arg_type *arg_type)
5167 {
5168 	if (!meta->map_ptr) {
5169 		/* kernel subsystem misconfigured verifier */
5170 		verbose(env, "invalid map_ptr to access map->type\n");
5171 		return -EACCES;
5172 	}
5173 
5174 	switch (meta->map_ptr->map_type) {
5175 	case BPF_MAP_TYPE_SOCKMAP:
5176 	case BPF_MAP_TYPE_SOCKHASH:
5177 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
5178 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
5179 		} else {
5180 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
5181 			return -EINVAL;
5182 		}
5183 		break;
5184 	case BPF_MAP_TYPE_BLOOM_FILTER:
5185 		if (meta->func_id == BPF_FUNC_map_peek_elem)
5186 			*arg_type = ARG_PTR_TO_MAP_VALUE;
5187 		break;
5188 	default:
5189 		break;
5190 	}
5191 	return 0;
5192 }
5193 
5194 struct bpf_reg_types {
5195 	const enum bpf_reg_type types[10];
5196 	u32 *btf_id;
5197 };
5198 
5199 static const struct bpf_reg_types map_key_value_types = {
5200 	.types = {
5201 		PTR_TO_STACK,
5202 		PTR_TO_PACKET,
5203 		PTR_TO_PACKET_META,
5204 		PTR_TO_MAP_KEY,
5205 		PTR_TO_MAP_VALUE,
5206 	},
5207 };
5208 
5209 static const struct bpf_reg_types sock_types = {
5210 	.types = {
5211 		PTR_TO_SOCK_COMMON,
5212 		PTR_TO_SOCKET,
5213 		PTR_TO_TCP_SOCK,
5214 		PTR_TO_XDP_SOCK,
5215 	},
5216 };
5217 
5218 #ifdef CONFIG_NET
5219 static const struct bpf_reg_types btf_id_sock_common_types = {
5220 	.types = {
5221 		PTR_TO_SOCK_COMMON,
5222 		PTR_TO_SOCKET,
5223 		PTR_TO_TCP_SOCK,
5224 		PTR_TO_XDP_SOCK,
5225 		PTR_TO_BTF_ID,
5226 	},
5227 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5228 };
5229 #endif
5230 
5231 static const struct bpf_reg_types mem_types = {
5232 	.types = {
5233 		PTR_TO_STACK,
5234 		PTR_TO_PACKET,
5235 		PTR_TO_PACKET_META,
5236 		PTR_TO_MAP_KEY,
5237 		PTR_TO_MAP_VALUE,
5238 		PTR_TO_MEM,
5239 		PTR_TO_MEM | MEM_ALLOC,
5240 		PTR_TO_BUF,
5241 	},
5242 };
5243 
5244 static const struct bpf_reg_types int_ptr_types = {
5245 	.types = {
5246 		PTR_TO_STACK,
5247 		PTR_TO_PACKET,
5248 		PTR_TO_PACKET_META,
5249 		PTR_TO_MAP_KEY,
5250 		PTR_TO_MAP_VALUE,
5251 	},
5252 };
5253 
5254 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5255 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5256 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5257 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
5258 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5259 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5260 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
5261 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } };
5262 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5263 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
5264 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
5265 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
5266 
5267 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5268 	[ARG_PTR_TO_MAP_KEY]		= &map_key_value_types,
5269 	[ARG_PTR_TO_MAP_VALUE]		= &map_key_value_types,
5270 	[ARG_PTR_TO_UNINIT_MAP_VALUE]	= &map_key_value_types,
5271 	[ARG_CONST_SIZE]		= &scalar_types,
5272 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
5273 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
5274 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
5275 	[ARG_PTR_TO_CTX]		= &context_types,
5276 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
5277 #ifdef CONFIG_NET
5278 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
5279 #endif
5280 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
5281 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
5282 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
5283 	[ARG_PTR_TO_MEM]		= &mem_types,
5284 	[ARG_PTR_TO_UNINIT_MEM]		= &mem_types,
5285 	[ARG_PTR_TO_ALLOC_MEM]		= &alloc_mem_types,
5286 	[ARG_PTR_TO_INT]		= &int_ptr_types,
5287 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
5288 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
5289 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
5290 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
5291 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
5292 	[ARG_PTR_TO_TIMER]		= &timer_types,
5293 };
5294 
5295 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
5296 			  enum bpf_arg_type arg_type,
5297 			  const u32 *arg_btf_id)
5298 {
5299 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5300 	enum bpf_reg_type expected, type = reg->type;
5301 	const struct bpf_reg_types *compatible;
5302 	int i, j;
5303 
5304 	compatible = compatible_reg_types[base_type(arg_type)];
5305 	if (!compatible) {
5306 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5307 		return -EFAULT;
5308 	}
5309 
5310 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
5311 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
5312 	 *
5313 	 * Same for MAYBE_NULL:
5314 	 *
5315 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
5316 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
5317 	 *
5318 	 * Therefore we fold these flags depending on the arg_type before comparison.
5319 	 */
5320 	if (arg_type & MEM_RDONLY)
5321 		type &= ~MEM_RDONLY;
5322 	if (arg_type & PTR_MAYBE_NULL)
5323 		type &= ~PTR_MAYBE_NULL;
5324 
5325 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5326 		expected = compatible->types[i];
5327 		if (expected == NOT_INIT)
5328 			break;
5329 
5330 		if (type == expected)
5331 			goto found;
5332 	}
5333 
5334 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
5335 	for (j = 0; j + 1 < i; j++)
5336 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
5337 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
5338 	return -EACCES;
5339 
5340 found:
5341 	if (reg->type == PTR_TO_BTF_ID) {
5342 		if (!arg_btf_id) {
5343 			if (!compatible->btf_id) {
5344 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5345 				return -EFAULT;
5346 			}
5347 			arg_btf_id = compatible->btf_id;
5348 		}
5349 
5350 		if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5351 					  btf_vmlinux, *arg_btf_id)) {
5352 			verbose(env, "R%d is of type %s but %s is expected\n",
5353 				regno, kernel_type_name(reg->btf, reg->btf_id),
5354 				kernel_type_name(btf_vmlinux, *arg_btf_id));
5355 			return -EACCES;
5356 		}
5357 	}
5358 
5359 	return 0;
5360 }
5361 
5362 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5363 			  struct bpf_call_arg_meta *meta,
5364 			  const struct bpf_func_proto *fn)
5365 {
5366 	u32 regno = BPF_REG_1 + arg;
5367 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5368 	enum bpf_arg_type arg_type = fn->arg_type[arg];
5369 	enum bpf_reg_type type = reg->type;
5370 	int err = 0;
5371 
5372 	if (arg_type == ARG_DONTCARE)
5373 		return 0;
5374 
5375 	err = check_reg_arg(env, regno, SRC_OP);
5376 	if (err)
5377 		return err;
5378 
5379 	if (arg_type == ARG_ANYTHING) {
5380 		if (is_pointer_value(env, regno)) {
5381 			verbose(env, "R%d leaks addr into helper function\n",
5382 				regno);
5383 			return -EACCES;
5384 		}
5385 		return 0;
5386 	}
5387 
5388 	if (type_is_pkt_pointer(type) &&
5389 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
5390 		verbose(env, "helper access to the packet is not allowed\n");
5391 		return -EACCES;
5392 	}
5393 
5394 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
5395 	    base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
5396 		err = resolve_map_arg_type(env, meta, &arg_type);
5397 		if (err)
5398 			return err;
5399 	}
5400 
5401 	if (register_is_null(reg) && type_may_be_null(arg_type))
5402 		/* A NULL register has a SCALAR_VALUE type, so skip
5403 		 * type checking.
5404 		 */
5405 		goto skip_type_check;
5406 
5407 	err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
5408 	if (err)
5409 		return err;
5410 
5411 	switch ((u32)type) {
5412 	case SCALAR_VALUE:
5413 	/* Pointer types where reg offset is explicitly allowed: */
5414 	case PTR_TO_PACKET:
5415 	case PTR_TO_PACKET_META:
5416 	case PTR_TO_MAP_KEY:
5417 	case PTR_TO_MAP_VALUE:
5418 	case PTR_TO_MEM:
5419 	case PTR_TO_MEM | MEM_RDONLY:
5420 	case PTR_TO_MEM | MEM_ALLOC:
5421 	case PTR_TO_BUF:
5422 	case PTR_TO_BUF | MEM_RDONLY:
5423 	case PTR_TO_STACK:
5424 		/* Some of the argument types nevertheless require a
5425 		 * zero register offset.
5426 		 */
5427 		if (arg_type == ARG_PTR_TO_ALLOC_MEM)
5428 			goto force_off_check;
5429 		break;
5430 	/* All the rest must be rejected: */
5431 	default:
5432 force_off_check:
5433 		err = __check_ptr_off_reg(env, reg, regno,
5434 					  type == PTR_TO_BTF_ID);
5435 		if (err < 0)
5436 			return err;
5437 		break;
5438 	}
5439 
5440 skip_type_check:
5441 	if (reg->ref_obj_id) {
5442 		if (meta->ref_obj_id) {
5443 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5444 				regno, reg->ref_obj_id,
5445 				meta->ref_obj_id);
5446 			return -EFAULT;
5447 		}
5448 		meta->ref_obj_id = reg->ref_obj_id;
5449 	}
5450 
5451 	if (arg_type == ARG_CONST_MAP_PTR) {
5452 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
5453 		if (meta->map_ptr) {
5454 			/* Use map_uid (which is unique id of inner map) to reject:
5455 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5456 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5457 			 * if (inner_map1 && inner_map2) {
5458 			 *     timer = bpf_map_lookup_elem(inner_map1);
5459 			 *     if (timer)
5460 			 *         // mismatch would have been allowed
5461 			 *         bpf_timer_init(timer, inner_map2);
5462 			 * }
5463 			 *
5464 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
5465 			 */
5466 			if (meta->map_ptr != reg->map_ptr ||
5467 			    meta->map_uid != reg->map_uid) {
5468 				verbose(env,
5469 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
5470 					meta->map_uid, reg->map_uid);
5471 				return -EINVAL;
5472 			}
5473 		}
5474 		meta->map_ptr = reg->map_ptr;
5475 		meta->map_uid = reg->map_uid;
5476 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
5477 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
5478 		 * check that [key, key + map->key_size) are within
5479 		 * stack limits and initialized
5480 		 */
5481 		if (!meta->map_ptr) {
5482 			/* in function declaration map_ptr must come before
5483 			 * map_key, so that it's verified and known before
5484 			 * we have to check map_key here. Otherwise it means
5485 			 * that kernel subsystem misconfigured verifier
5486 			 */
5487 			verbose(env, "invalid map_ptr to access map->key\n");
5488 			return -EACCES;
5489 		}
5490 		err = check_helper_mem_access(env, regno,
5491 					      meta->map_ptr->key_size, false,
5492 					      NULL);
5493 	} else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
5494 		   base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
5495 		if (type_may_be_null(arg_type) && register_is_null(reg))
5496 			return 0;
5497 
5498 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
5499 		 * check [value, value + map->value_size) validity
5500 		 */
5501 		if (!meta->map_ptr) {
5502 			/* kernel subsystem misconfigured verifier */
5503 			verbose(env, "invalid map_ptr to access map->value\n");
5504 			return -EACCES;
5505 		}
5506 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
5507 		err = check_helper_mem_access(env, regno,
5508 					      meta->map_ptr->value_size, false,
5509 					      meta);
5510 	} else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
5511 		if (!reg->btf_id) {
5512 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
5513 			return -EACCES;
5514 		}
5515 		meta->ret_btf = reg->btf;
5516 		meta->ret_btf_id = reg->btf_id;
5517 	} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
5518 		if (meta->func_id == BPF_FUNC_spin_lock) {
5519 			if (process_spin_lock(env, regno, true))
5520 				return -EACCES;
5521 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
5522 			if (process_spin_lock(env, regno, false))
5523 				return -EACCES;
5524 		} else {
5525 			verbose(env, "verifier internal error\n");
5526 			return -EFAULT;
5527 		}
5528 	} else if (arg_type == ARG_PTR_TO_TIMER) {
5529 		if (process_timer_func(env, regno, meta))
5530 			return -EACCES;
5531 	} else if (arg_type == ARG_PTR_TO_FUNC) {
5532 		meta->subprogno = reg->subprogno;
5533 	} else if (arg_type_is_mem_ptr(arg_type)) {
5534 		/* The access to this pointer is only checked when we hit the
5535 		 * next is_mem_size argument below.
5536 		 */
5537 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
5538 	} else if (arg_type_is_mem_size(arg_type)) {
5539 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
5540 
5541 		err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
5542 	} else if (arg_type_is_alloc_size(arg_type)) {
5543 		if (!tnum_is_const(reg->var_off)) {
5544 			verbose(env, "R%d is not a known constant'\n",
5545 				regno);
5546 			return -EACCES;
5547 		}
5548 		meta->mem_size = reg->var_off.value;
5549 	} else if (arg_type_is_int_ptr(arg_type)) {
5550 		int size = int_ptr_type_to_size(arg_type);
5551 
5552 		err = check_helper_mem_access(env, regno, size, false, meta);
5553 		if (err)
5554 			return err;
5555 		err = check_ptr_alignment(env, reg, 0, size, true);
5556 	} else if (arg_type == ARG_PTR_TO_CONST_STR) {
5557 		struct bpf_map *map = reg->map_ptr;
5558 		int map_off;
5559 		u64 map_addr;
5560 		char *str_ptr;
5561 
5562 		if (!bpf_map_is_rdonly(map)) {
5563 			verbose(env, "R%d does not point to a readonly map'\n", regno);
5564 			return -EACCES;
5565 		}
5566 
5567 		if (!tnum_is_const(reg->var_off)) {
5568 			verbose(env, "R%d is not a constant address'\n", regno);
5569 			return -EACCES;
5570 		}
5571 
5572 		if (!map->ops->map_direct_value_addr) {
5573 			verbose(env, "no direct value access support for this map type\n");
5574 			return -EACCES;
5575 		}
5576 
5577 		err = check_map_access(env, regno, reg->off,
5578 				       map->value_size - reg->off, false);
5579 		if (err)
5580 			return err;
5581 
5582 		map_off = reg->off + reg->var_off.value;
5583 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
5584 		if (err) {
5585 			verbose(env, "direct value access on string failed\n");
5586 			return err;
5587 		}
5588 
5589 		str_ptr = (char *)(long)(map_addr);
5590 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
5591 			verbose(env, "string is not zero-terminated\n");
5592 			return -EINVAL;
5593 		}
5594 	}
5595 
5596 	return err;
5597 }
5598 
5599 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
5600 {
5601 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
5602 	enum bpf_prog_type type = resolve_prog_type(env->prog);
5603 
5604 	if (func_id != BPF_FUNC_map_update_elem)
5605 		return false;
5606 
5607 	/* It's not possible to get access to a locked struct sock in these
5608 	 * contexts, so updating is safe.
5609 	 */
5610 	switch (type) {
5611 	case BPF_PROG_TYPE_TRACING:
5612 		if (eatype == BPF_TRACE_ITER)
5613 			return true;
5614 		break;
5615 	case BPF_PROG_TYPE_SOCKET_FILTER:
5616 	case BPF_PROG_TYPE_SCHED_CLS:
5617 	case BPF_PROG_TYPE_SCHED_ACT:
5618 	case BPF_PROG_TYPE_XDP:
5619 	case BPF_PROG_TYPE_SK_REUSEPORT:
5620 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5621 	case BPF_PROG_TYPE_SK_LOOKUP:
5622 		return true;
5623 	default:
5624 		break;
5625 	}
5626 
5627 	verbose(env, "cannot update sockmap in this context\n");
5628 	return false;
5629 }
5630 
5631 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
5632 {
5633 	return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
5634 }
5635 
5636 static int check_map_func_compatibility(struct bpf_verifier_env *env,
5637 					struct bpf_map *map, int func_id)
5638 {
5639 	if (!map)
5640 		return 0;
5641 
5642 	/* We need a two way check, first is from map perspective ... */
5643 	switch (map->map_type) {
5644 	case BPF_MAP_TYPE_PROG_ARRAY:
5645 		if (func_id != BPF_FUNC_tail_call)
5646 			goto error;
5647 		break;
5648 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5649 		if (func_id != BPF_FUNC_perf_event_read &&
5650 		    func_id != BPF_FUNC_perf_event_output &&
5651 		    func_id != BPF_FUNC_skb_output &&
5652 		    func_id != BPF_FUNC_perf_event_read_value &&
5653 		    func_id != BPF_FUNC_xdp_output)
5654 			goto error;
5655 		break;
5656 	case BPF_MAP_TYPE_RINGBUF:
5657 		if (func_id != BPF_FUNC_ringbuf_output &&
5658 		    func_id != BPF_FUNC_ringbuf_reserve &&
5659 		    func_id != BPF_FUNC_ringbuf_query)
5660 			goto error;
5661 		break;
5662 	case BPF_MAP_TYPE_STACK_TRACE:
5663 		if (func_id != BPF_FUNC_get_stackid)
5664 			goto error;
5665 		break;
5666 	case BPF_MAP_TYPE_CGROUP_ARRAY:
5667 		if (func_id != BPF_FUNC_skb_under_cgroup &&
5668 		    func_id != BPF_FUNC_current_task_under_cgroup)
5669 			goto error;
5670 		break;
5671 	case BPF_MAP_TYPE_CGROUP_STORAGE:
5672 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
5673 		if (func_id != BPF_FUNC_get_local_storage)
5674 			goto error;
5675 		break;
5676 	case BPF_MAP_TYPE_DEVMAP:
5677 	case BPF_MAP_TYPE_DEVMAP_HASH:
5678 		if (func_id != BPF_FUNC_redirect_map &&
5679 		    func_id != BPF_FUNC_map_lookup_elem)
5680 			goto error;
5681 		break;
5682 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
5683 	 * appear.
5684 	 */
5685 	case BPF_MAP_TYPE_CPUMAP:
5686 		if (func_id != BPF_FUNC_redirect_map)
5687 			goto error;
5688 		break;
5689 	case BPF_MAP_TYPE_XSKMAP:
5690 		if (func_id != BPF_FUNC_redirect_map &&
5691 		    func_id != BPF_FUNC_map_lookup_elem)
5692 			goto error;
5693 		break;
5694 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5695 	case BPF_MAP_TYPE_HASH_OF_MAPS:
5696 		if (func_id != BPF_FUNC_map_lookup_elem)
5697 			goto error;
5698 		break;
5699 	case BPF_MAP_TYPE_SOCKMAP:
5700 		if (func_id != BPF_FUNC_sk_redirect_map &&
5701 		    func_id != BPF_FUNC_sock_map_update &&
5702 		    func_id != BPF_FUNC_map_delete_elem &&
5703 		    func_id != BPF_FUNC_msg_redirect_map &&
5704 		    func_id != BPF_FUNC_sk_select_reuseport &&
5705 		    func_id != BPF_FUNC_map_lookup_elem &&
5706 		    !may_update_sockmap(env, func_id))
5707 			goto error;
5708 		break;
5709 	case BPF_MAP_TYPE_SOCKHASH:
5710 		if (func_id != BPF_FUNC_sk_redirect_hash &&
5711 		    func_id != BPF_FUNC_sock_hash_update &&
5712 		    func_id != BPF_FUNC_map_delete_elem &&
5713 		    func_id != BPF_FUNC_msg_redirect_hash &&
5714 		    func_id != BPF_FUNC_sk_select_reuseport &&
5715 		    func_id != BPF_FUNC_map_lookup_elem &&
5716 		    !may_update_sockmap(env, func_id))
5717 			goto error;
5718 		break;
5719 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
5720 		if (func_id != BPF_FUNC_sk_select_reuseport)
5721 			goto error;
5722 		break;
5723 	case BPF_MAP_TYPE_QUEUE:
5724 	case BPF_MAP_TYPE_STACK:
5725 		if (func_id != BPF_FUNC_map_peek_elem &&
5726 		    func_id != BPF_FUNC_map_pop_elem &&
5727 		    func_id != BPF_FUNC_map_push_elem)
5728 			goto error;
5729 		break;
5730 	case BPF_MAP_TYPE_SK_STORAGE:
5731 		if (func_id != BPF_FUNC_sk_storage_get &&
5732 		    func_id != BPF_FUNC_sk_storage_delete)
5733 			goto error;
5734 		break;
5735 	case BPF_MAP_TYPE_INODE_STORAGE:
5736 		if (func_id != BPF_FUNC_inode_storage_get &&
5737 		    func_id != BPF_FUNC_inode_storage_delete)
5738 			goto error;
5739 		break;
5740 	case BPF_MAP_TYPE_TASK_STORAGE:
5741 		if (func_id != BPF_FUNC_task_storage_get &&
5742 		    func_id != BPF_FUNC_task_storage_delete)
5743 			goto error;
5744 		break;
5745 	case BPF_MAP_TYPE_BLOOM_FILTER:
5746 		if (func_id != BPF_FUNC_map_peek_elem &&
5747 		    func_id != BPF_FUNC_map_push_elem)
5748 			goto error;
5749 		break;
5750 	default:
5751 		break;
5752 	}
5753 
5754 	/* ... and second from the function itself. */
5755 	switch (func_id) {
5756 	case BPF_FUNC_tail_call:
5757 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
5758 			goto error;
5759 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
5760 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
5761 			return -EINVAL;
5762 		}
5763 		break;
5764 	case BPF_FUNC_perf_event_read:
5765 	case BPF_FUNC_perf_event_output:
5766 	case BPF_FUNC_perf_event_read_value:
5767 	case BPF_FUNC_skb_output:
5768 	case BPF_FUNC_xdp_output:
5769 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
5770 			goto error;
5771 		break;
5772 	case BPF_FUNC_ringbuf_output:
5773 	case BPF_FUNC_ringbuf_reserve:
5774 	case BPF_FUNC_ringbuf_query:
5775 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
5776 			goto error;
5777 		break;
5778 	case BPF_FUNC_get_stackid:
5779 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
5780 			goto error;
5781 		break;
5782 	case BPF_FUNC_current_task_under_cgroup:
5783 	case BPF_FUNC_skb_under_cgroup:
5784 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
5785 			goto error;
5786 		break;
5787 	case BPF_FUNC_redirect_map:
5788 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
5789 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
5790 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
5791 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
5792 			goto error;
5793 		break;
5794 	case BPF_FUNC_sk_redirect_map:
5795 	case BPF_FUNC_msg_redirect_map:
5796 	case BPF_FUNC_sock_map_update:
5797 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
5798 			goto error;
5799 		break;
5800 	case BPF_FUNC_sk_redirect_hash:
5801 	case BPF_FUNC_msg_redirect_hash:
5802 	case BPF_FUNC_sock_hash_update:
5803 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
5804 			goto error;
5805 		break;
5806 	case BPF_FUNC_get_local_storage:
5807 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
5808 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
5809 			goto error;
5810 		break;
5811 	case BPF_FUNC_sk_select_reuseport:
5812 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
5813 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
5814 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
5815 			goto error;
5816 		break;
5817 	case BPF_FUNC_map_pop_elem:
5818 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5819 		    map->map_type != BPF_MAP_TYPE_STACK)
5820 			goto error;
5821 		break;
5822 	case BPF_FUNC_map_peek_elem:
5823 	case BPF_FUNC_map_push_elem:
5824 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5825 		    map->map_type != BPF_MAP_TYPE_STACK &&
5826 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
5827 			goto error;
5828 		break;
5829 	case BPF_FUNC_sk_storage_get:
5830 	case BPF_FUNC_sk_storage_delete:
5831 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
5832 			goto error;
5833 		break;
5834 	case BPF_FUNC_inode_storage_get:
5835 	case BPF_FUNC_inode_storage_delete:
5836 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
5837 			goto error;
5838 		break;
5839 	case BPF_FUNC_task_storage_get:
5840 	case BPF_FUNC_task_storage_delete:
5841 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
5842 			goto error;
5843 		break;
5844 	default:
5845 		break;
5846 	}
5847 
5848 	return 0;
5849 error:
5850 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
5851 		map->map_type, func_id_name(func_id), func_id);
5852 	return -EINVAL;
5853 }
5854 
5855 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
5856 {
5857 	int count = 0;
5858 
5859 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
5860 		count++;
5861 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
5862 		count++;
5863 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
5864 		count++;
5865 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
5866 		count++;
5867 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
5868 		count++;
5869 
5870 	/* We only support one arg being in raw mode at the moment,
5871 	 * which is sufficient for the helper functions we have
5872 	 * right now.
5873 	 */
5874 	return count <= 1;
5875 }
5876 
5877 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
5878 				    enum bpf_arg_type arg_next)
5879 {
5880 	return (arg_type_is_mem_ptr(arg_curr) &&
5881 	        !arg_type_is_mem_size(arg_next)) ||
5882 	       (!arg_type_is_mem_ptr(arg_curr) &&
5883 		arg_type_is_mem_size(arg_next));
5884 }
5885 
5886 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
5887 {
5888 	/* bpf_xxx(..., buf, len) call will access 'len'
5889 	 * bytes from memory 'buf'. Both arg types need
5890 	 * to be paired, so make sure there's no buggy
5891 	 * helper function specification.
5892 	 */
5893 	if (arg_type_is_mem_size(fn->arg1_type) ||
5894 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
5895 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
5896 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
5897 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
5898 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
5899 		return false;
5900 
5901 	return true;
5902 }
5903 
5904 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
5905 {
5906 	int count = 0;
5907 
5908 	if (arg_type_may_be_refcounted(fn->arg1_type))
5909 		count++;
5910 	if (arg_type_may_be_refcounted(fn->arg2_type))
5911 		count++;
5912 	if (arg_type_may_be_refcounted(fn->arg3_type))
5913 		count++;
5914 	if (arg_type_may_be_refcounted(fn->arg4_type))
5915 		count++;
5916 	if (arg_type_may_be_refcounted(fn->arg5_type))
5917 		count++;
5918 
5919 	/* A reference acquiring function cannot acquire
5920 	 * another refcounted ptr.
5921 	 */
5922 	if (may_be_acquire_function(func_id) && count)
5923 		return false;
5924 
5925 	/* We only support one arg being unreferenced at the moment,
5926 	 * which is sufficient for the helper functions we have right now.
5927 	 */
5928 	return count <= 1;
5929 }
5930 
5931 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
5932 {
5933 	int i;
5934 
5935 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
5936 		if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
5937 			return false;
5938 
5939 		if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
5940 			return false;
5941 	}
5942 
5943 	return true;
5944 }
5945 
5946 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
5947 {
5948 	return check_raw_mode_ok(fn) &&
5949 	       check_arg_pair_ok(fn) &&
5950 	       check_btf_id_ok(fn) &&
5951 	       check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
5952 }
5953 
5954 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
5955  * are now invalid, so turn them into unknown SCALAR_VALUE.
5956  */
5957 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
5958 				     struct bpf_func_state *state)
5959 {
5960 	struct bpf_reg_state *regs = state->regs, *reg;
5961 	int i;
5962 
5963 	for (i = 0; i < MAX_BPF_REG; i++)
5964 		if (reg_is_pkt_pointer_any(&regs[i]))
5965 			mark_reg_unknown(env, regs, i);
5966 
5967 	bpf_for_each_spilled_reg(i, state, reg) {
5968 		if (!reg)
5969 			continue;
5970 		if (reg_is_pkt_pointer_any(reg))
5971 			__mark_reg_unknown(env, reg);
5972 	}
5973 }
5974 
5975 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
5976 {
5977 	struct bpf_verifier_state *vstate = env->cur_state;
5978 	int i;
5979 
5980 	for (i = 0; i <= vstate->curframe; i++)
5981 		__clear_all_pkt_pointers(env, vstate->frame[i]);
5982 }
5983 
5984 enum {
5985 	AT_PKT_END = -1,
5986 	BEYOND_PKT_END = -2,
5987 };
5988 
5989 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
5990 {
5991 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
5992 	struct bpf_reg_state *reg = &state->regs[regn];
5993 
5994 	if (reg->type != PTR_TO_PACKET)
5995 		/* PTR_TO_PACKET_META is not supported yet */
5996 		return;
5997 
5998 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
5999 	 * How far beyond pkt_end it goes is unknown.
6000 	 * if (!range_open) it's the case of pkt >= pkt_end
6001 	 * if (range_open) it's the case of pkt > pkt_end
6002 	 * hence this pointer is at least 1 byte bigger than pkt_end
6003 	 */
6004 	if (range_open)
6005 		reg->range = BEYOND_PKT_END;
6006 	else
6007 		reg->range = AT_PKT_END;
6008 }
6009 
6010 static void release_reg_references(struct bpf_verifier_env *env,
6011 				   struct bpf_func_state *state,
6012 				   int ref_obj_id)
6013 {
6014 	struct bpf_reg_state *regs = state->regs, *reg;
6015 	int i;
6016 
6017 	for (i = 0; i < MAX_BPF_REG; i++)
6018 		if (regs[i].ref_obj_id == ref_obj_id)
6019 			mark_reg_unknown(env, regs, i);
6020 
6021 	bpf_for_each_spilled_reg(i, state, reg) {
6022 		if (!reg)
6023 			continue;
6024 		if (reg->ref_obj_id == ref_obj_id)
6025 			__mark_reg_unknown(env, reg);
6026 	}
6027 }
6028 
6029 /* The pointer with the specified id has released its reference to kernel
6030  * resources. Identify all copies of the same pointer and clear the reference.
6031  */
6032 static int release_reference(struct bpf_verifier_env *env,
6033 			     int ref_obj_id)
6034 {
6035 	struct bpf_verifier_state *vstate = env->cur_state;
6036 	int err;
6037 	int i;
6038 
6039 	err = release_reference_state(cur_func(env), ref_obj_id);
6040 	if (err)
6041 		return err;
6042 
6043 	for (i = 0; i <= vstate->curframe; i++)
6044 		release_reg_references(env, vstate->frame[i], ref_obj_id);
6045 
6046 	return 0;
6047 }
6048 
6049 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6050 				    struct bpf_reg_state *regs)
6051 {
6052 	int i;
6053 
6054 	/* after the call registers r0 - r5 were scratched */
6055 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6056 		mark_reg_not_init(env, regs, caller_saved[i]);
6057 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6058 	}
6059 }
6060 
6061 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6062 				   struct bpf_func_state *caller,
6063 				   struct bpf_func_state *callee,
6064 				   int insn_idx);
6065 
6066 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6067 			     int *insn_idx, int subprog,
6068 			     set_callee_state_fn set_callee_state_cb)
6069 {
6070 	struct bpf_verifier_state *state = env->cur_state;
6071 	struct bpf_func_info_aux *func_info_aux;
6072 	struct bpf_func_state *caller, *callee;
6073 	int err;
6074 	bool is_global = false;
6075 
6076 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
6077 		verbose(env, "the call stack of %d frames is too deep\n",
6078 			state->curframe + 2);
6079 		return -E2BIG;
6080 	}
6081 
6082 	caller = state->frame[state->curframe];
6083 	if (state->frame[state->curframe + 1]) {
6084 		verbose(env, "verifier bug. Frame %d already allocated\n",
6085 			state->curframe + 1);
6086 		return -EFAULT;
6087 	}
6088 
6089 	func_info_aux = env->prog->aux->func_info_aux;
6090 	if (func_info_aux)
6091 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6092 	err = btf_check_subprog_arg_match(env, subprog, caller->regs);
6093 	if (err == -EFAULT)
6094 		return err;
6095 	if (is_global) {
6096 		if (err) {
6097 			verbose(env, "Caller passes invalid args into func#%d\n",
6098 				subprog);
6099 			return err;
6100 		} else {
6101 			if (env->log.level & BPF_LOG_LEVEL)
6102 				verbose(env,
6103 					"Func#%d is global and valid. Skipping.\n",
6104 					subprog);
6105 			clear_caller_saved_regs(env, caller->regs);
6106 
6107 			/* All global functions return a 64-bit SCALAR_VALUE */
6108 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
6109 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6110 
6111 			/* continue with next insn after call */
6112 			return 0;
6113 		}
6114 	}
6115 
6116 	if (insn->code == (BPF_JMP | BPF_CALL) &&
6117 	    insn->src_reg == 0 &&
6118 	    insn->imm == BPF_FUNC_timer_set_callback) {
6119 		struct bpf_verifier_state *async_cb;
6120 
6121 		/* there is no real recursion here. timer callbacks are async */
6122 		env->subprog_info[subprog].is_async_cb = true;
6123 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6124 					 *insn_idx, subprog);
6125 		if (!async_cb)
6126 			return -EFAULT;
6127 		callee = async_cb->frame[0];
6128 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
6129 
6130 		/* Convert bpf_timer_set_callback() args into timer callback args */
6131 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
6132 		if (err)
6133 			return err;
6134 
6135 		clear_caller_saved_regs(env, caller->regs);
6136 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
6137 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6138 		/* continue with next insn after call */
6139 		return 0;
6140 	}
6141 
6142 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
6143 	if (!callee)
6144 		return -ENOMEM;
6145 	state->frame[state->curframe + 1] = callee;
6146 
6147 	/* callee cannot access r0, r6 - r9 for reading and has to write
6148 	 * into its own stack before reading from it.
6149 	 * callee can read/write into caller's stack
6150 	 */
6151 	init_func_state(env, callee,
6152 			/* remember the callsite, it will be used by bpf_exit */
6153 			*insn_idx /* callsite */,
6154 			state->curframe + 1 /* frameno within this callchain */,
6155 			subprog /* subprog number within this prog */);
6156 
6157 	/* Transfer references to the callee */
6158 	err = copy_reference_state(callee, caller);
6159 	if (err)
6160 		return err;
6161 
6162 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
6163 	if (err)
6164 		return err;
6165 
6166 	clear_caller_saved_regs(env, caller->regs);
6167 
6168 	/* only increment it after check_reg_arg() finished */
6169 	state->curframe++;
6170 
6171 	/* and go analyze first insn of the callee */
6172 	*insn_idx = env->subprog_info[subprog].start - 1;
6173 
6174 	if (env->log.level & BPF_LOG_LEVEL) {
6175 		verbose(env, "caller:\n");
6176 		print_verifier_state(env, caller, true);
6177 		verbose(env, "callee:\n");
6178 		print_verifier_state(env, callee, true);
6179 	}
6180 	return 0;
6181 }
6182 
6183 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6184 				   struct bpf_func_state *caller,
6185 				   struct bpf_func_state *callee)
6186 {
6187 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6188 	 *      void *callback_ctx, u64 flags);
6189 	 * callback_fn(struct bpf_map *map, void *key, void *value,
6190 	 *      void *callback_ctx);
6191 	 */
6192 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6193 
6194 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6195 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6196 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6197 
6198 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6199 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6200 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6201 
6202 	/* pointer to stack or null */
6203 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6204 
6205 	/* unused */
6206 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6207 	return 0;
6208 }
6209 
6210 static int set_callee_state(struct bpf_verifier_env *env,
6211 			    struct bpf_func_state *caller,
6212 			    struct bpf_func_state *callee, int insn_idx)
6213 {
6214 	int i;
6215 
6216 	/* copy r1 - r5 args that callee can access.  The copy includes parent
6217 	 * pointers, which connects us up to the liveness chain
6218 	 */
6219 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6220 		callee->regs[i] = caller->regs[i];
6221 	return 0;
6222 }
6223 
6224 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6225 			   int *insn_idx)
6226 {
6227 	int subprog, target_insn;
6228 
6229 	target_insn = *insn_idx + insn->imm + 1;
6230 	subprog = find_subprog(env, target_insn);
6231 	if (subprog < 0) {
6232 		verbose(env, "verifier bug. No program starts at insn %d\n",
6233 			target_insn);
6234 		return -EFAULT;
6235 	}
6236 
6237 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6238 }
6239 
6240 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6241 				       struct bpf_func_state *caller,
6242 				       struct bpf_func_state *callee,
6243 				       int insn_idx)
6244 {
6245 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6246 	struct bpf_map *map;
6247 	int err;
6248 
6249 	if (bpf_map_ptr_poisoned(insn_aux)) {
6250 		verbose(env, "tail_call abusing map_ptr\n");
6251 		return -EINVAL;
6252 	}
6253 
6254 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6255 	if (!map->ops->map_set_for_each_callback_args ||
6256 	    !map->ops->map_for_each_callback) {
6257 		verbose(env, "callback function not allowed for map\n");
6258 		return -ENOTSUPP;
6259 	}
6260 
6261 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6262 	if (err)
6263 		return err;
6264 
6265 	callee->in_callback_fn = true;
6266 	return 0;
6267 }
6268 
6269 static int set_loop_callback_state(struct bpf_verifier_env *env,
6270 				   struct bpf_func_state *caller,
6271 				   struct bpf_func_state *callee,
6272 				   int insn_idx)
6273 {
6274 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
6275 	 *	    u64 flags);
6276 	 * callback_fn(u32 index, void *callback_ctx);
6277 	 */
6278 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
6279 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6280 
6281 	/* unused */
6282 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6283 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6284 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6285 
6286 	callee->in_callback_fn = true;
6287 	return 0;
6288 }
6289 
6290 static int set_timer_callback_state(struct bpf_verifier_env *env,
6291 				    struct bpf_func_state *caller,
6292 				    struct bpf_func_state *callee,
6293 				    int insn_idx)
6294 {
6295 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6296 
6297 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6298 	 * callback_fn(struct bpf_map *map, void *key, void *value);
6299 	 */
6300 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6301 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6302 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
6303 
6304 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6305 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6306 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
6307 
6308 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6309 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6310 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
6311 
6312 	/* unused */
6313 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6314 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6315 	callee->in_async_callback_fn = true;
6316 	return 0;
6317 }
6318 
6319 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
6320 				       struct bpf_func_state *caller,
6321 				       struct bpf_func_state *callee,
6322 				       int insn_idx)
6323 {
6324 	/* bpf_find_vma(struct task_struct *task, u64 addr,
6325 	 *               void *callback_fn, void *callback_ctx, u64 flags)
6326 	 * (callback_fn)(struct task_struct *task,
6327 	 *               struct vm_area_struct *vma, void *callback_ctx);
6328 	 */
6329 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6330 
6331 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
6332 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6333 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
6334 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
6335 
6336 	/* pointer to stack or null */
6337 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
6338 
6339 	/* unused */
6340 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6341 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6342 	callee->in_callback_fn = true;
6343 	return 0;
6344 }
6345 
6346 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6347 {
6348 	struct bpf_verifier_state *state = env->cur_state;
6349 	struct bpf_func_state *caller, *callee;
6350 	struct bpf_reg_state *r0;
6351 	int err;
6352 
6353 	callee = state->frame[state->curframe];
6354 	r0 = &callee->regs[BPF_REG_0];
6355 	if (r0->type == PTR_TO_STACK) {
6356 		/* technically it's ok to return caller's stack pointer
6357 		 * (or caller's caller's pointer) back to the caller,
6358 		 * since these pointers are valid. Only current stack
6359 		 * pointer will be invalid as soon as function exits,
6360 		 * but let's be conservative
6361 		 */
6362 		verbose(env, "cannot return stack pointer to the caller\n");
6363 		return -EINVAL;
6364 	}
6365 
6366 	state->curframe--;
6367 	caller = state->frame[state->curframe];
6368 	if (callee->in_callback_fn) {
6369 		/* enforce R0 return value range [0, 1]. */
6370 		struct tnum range = tnum_range(0, 1);
6371 
6372 		if (r0->type != SCALAR_VALUE) {
6373 			verbose(env, "R0 not a scalar value\n");
6374 			return -EACCES;
6375 		}
6376 		if (!tnum_in(range, r0->var_off)) {
6377 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6378 			return -EINVAL;
6379 		}
6380 	} else {
6381 		/* return to the caller whatever r0 had in the callee */
6382 		caller->regs[BPF_REG_0] = *r0;
6383 	}
6384 
6385 	/* Transfer references to the caller */
6386 	err = copy_reference_state(caller, callee);
6387 	if (err)
6388 		return err;
6389 
6390 	*insn_idx = callee->callsite + 1;
6391 	if (env->log.level & BPF_LOG_LEVEL) {
6392 		verbose(env, "returning from callee:\n");
6393 		print_verifier_state(env, callee, true);
6394 		verbose(env, "to caller at %d:\n", *insn_idx);
6395 		print_verifier_state(env, caller, true);
6396 	}
6397 	/* clear everything in the callee */
6398 	free_func_state(callee);
6399 	state->frame[state->curframe + 1] = NULL;
6400 	return 0;
6401 }
6402 
6403 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
6404 				   int func_id,
6405 				   struct bpf_call_arg_meta *meta)
6406 {
6407 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
6408 
6409 	if (ret_type != RET_INTEGER ||
6410 	    (func_id != BPF_FUNC_get_stack &&
6411 	     func_id != BPF_FUNC_get_task_stack &&
6412 	     func_id != BPF_FUNC_probe_read_str &&
6413 	     func_id != BPF_FUNC_probe_read_kernel_str &&
6414 	     func_id != BPF_FUNC_probe_read_user_str))
6415 		return;
6416 
6417 	ret_reg->smax_value = meta->msize_max_value;
6418 	ret_reg->s32_max_value = meta->msize_max_value;
6419 	ret_reg->smin_value = -MAX_ERRNO;
6420 	ret_reg->s32_min_value = -MAX_ERRNO;
6421 	__reg_deduce_bounds(ret_reg);
6422 	__reg_bound_offset(ret_reg);
6423 	__update_reg_bounds(ret_reg);
6424 }
6425 
6426 static int
6427 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6428 		int func_id, int insn_idx)
6429 {
6430 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6431 	struct bpf_map *map = meta->map_ptr;
6432 
6433 	if (func_id != BPF_FUNC_tail_call &&
6434 	    func_id != BPF_FUNC_map_lookup_elem &&
6435 	    func_id != BPF_FUNC_map_update_elem &&
6436 	    func_id != BPF_FUNC_map_delete_elem &&
6437 	    func_id != BPF_FUNC_map_push_elem &&
6438 	    func_id != BPF_FUNC_map_pop_elem &&
6439 	    func_id != BPF_FUNC_map_peek_elem &&
6440 	    func_id != BPF_FUNC_for_each_map_elem &&
6441 	    func_id != BPF_FUNC_redirect_map)
6442 		return 0;
6443 
6444 	if (map == NULL) {
6445 		verbose(env, "kernel subsystem misconfigured verifier\n");
6446 		return -EINVAL;
6447 	}
6448 
6449 	/* In case of read-only, some additional restrictions
6450 	 * need to be applied in order to prevent altering the
6451 	 * state of the map from program side.
6452 	 */
6453 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
6454 	    (func_id == BPF_FUNC_map_delete_elem ||
6455 	     func_id == BPF_FUNC_map_update_elem ||
6456 	     func_id == BPF_FUNC_map_push_elem ||
6457 	     func_id == BPF_FUNC_map_pop_elem)) {
6458 		verbose(env, "write into map forbidden\n");
6459 		return -EACCES;
6460 	}
6461 
6462 	if (!BPF_MAP_PTR(aux->map_ptr_state))
6463 		bpf_map_ptr_store(aux, meta->map_ptr,
6464 				  !meta->map_ptr->bypass_spec_v1);
6465 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
6466 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
6467 				  !meta->map_ptr->bypass_spec_v1);
6468 	return 0;
6469 }
6470 
6471 static int
6472 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6473 		int func_id, int insn_idx)
6474 {
6475 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6476 	struct bpf_reg_state *regs = cur_regs(env), *reg;
6477 	struct bpf_map *map = meta->map_ptr;
6478 	struct tnum range;
6479 	u64 val;
6480 	int err;
6481 
6482 	if (func_id != BPF_FUNC_tail_call)
6483 		return 0;
6484 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
6485 		verbose(env, "kernel subsystem misconfigured verifier\n");
6486 		return -EINVAL;
6487 	}
6488 
6489 	range = tnum_range(0, map->max_entries - 1);
6490 	reg = &regs[BPF_REG_3];
6491 
6492 	if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
6493 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6494 		return 0;
6495 	}
6496 
6497 	err = mark_chain_precision(env, BPF_REG_3);
6498 	if (err)
6499 		return err;
6500 
6501 	val = reg->var_off.value;
6502 	if (bpf_map_key_unseen(aux))
6503 		bpf_map_key_store(aux, val);
6504 	else if (!bpf_map_key_poisoned(aux) &&
6505 		  bpf_map_key_immediate(aux) != val)
6506 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6507 	return 0;
6508 }
6509 
6510 static int check_reference_leak(struct bpf_verifier_env *env)
6511 {
6512 	struct bpf_func_state *state = cur_func(env);
6513 	int i;
6514 
6515 	for (i = 0; i < state->acquired_refs; i++) {
6516 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
6517 			state->refs[i].id, state->refs[i].insn_idx);
6518 	}
6519 	return state->acquired_refs ? -EINVAL : 0;
6520 }
6521 
6522 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
6523 				   struct bpf_reg_state *regs)
6524 {
6525 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
6526 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
6527 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
6528 	int err, fmt_map_off, num_args;
6529 	u64 fmt_addr;
6530 	char *fmt;
6531 
6532 	/* data must be an array of u64 */
6533 	if (data_len_reg->var_off.value % 8)
6534 		return -EINVAL;
6535 	num_args = data_len_reg->var_off.value / 8;
6536 
6537 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
6538 	 * and map_direct_value_addr is set.
6539 	 */
6540 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
6541 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
6542 						  fmt_map_off);
6543 	if (err) {
6544 		verbose(env, "verifier bug\n");
6545 		return -EFAULT;
6546 	}
6547 	fmt = (char *)(long)fmt_addr + fmt_map_off;
6548 
6549 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
6550 	 * can focus on validating the format specifiers.
6551 	 */
6552 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
6553 	if (err < 0)
6554 		verbose(env, "Invalid format string\n");
6555 
6556 	return err;
6557 }
6558 
6559 static int check_get_func_ip(struct bpf_verifier_env *env)
6560 {
6561 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6562 	int func_id = BPF_FUNC_get_func_ip;
6563 
6564 	if (type == BPF_PROG_TYPE_TRACING) {
6565 		if (!bpf_prog_has_trampoline(env->prog)) {
6566 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
6567 				func_id_name(func_id), func_id);
6568 			return -ENOTSUPP;
6569 		}
6570 		return 0;
6571 	} else if (type == BPF_PROG_TYPE_KPROBE) {
6572 		return 0;
6573 	}
6574 
6575 	verbose(env, "func %s#%d not supported for program type %d\n",
6576 		func_id_name(func_id), func_id, type);
6577 	return -ENOTSUPP;
6578 }
6579 
6580 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6581 			     int *insn_idx_p)
6582 {
6583 	const struct bpf_func_proto *fn = NULL;
6584 	enum bpf_return_type ret_type;
6585 	enum bpf_type_flag ret_flag;
6586 	struct bpf_reg_state *regs;
6587 	struct bpf_call_arg_meta meta;
6588 	int insn_idx = *insn_idx_p;
6589 	bool changes_data;
6590 	int i, err, func_id;
6591 
6592 	/* find function prototype */
6593 	func_id = insn->imm;
6594 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
6595 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
6596 			func_id);
6597 		return -EINVAL;
6598 	}
6599 
6600 	if (env->ops->get_func_proto)
6601 		fn = env->ops->get_func_proto(func_id, env->prog);
6602 	if (!fn) {
6603 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
6604 			func_id);
6605 		return -EINVAL;
6606 	}
6607 
6608 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
6609 	if (!env->prog->gpl_compatible && fn->gpl_only) {
6610 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
6611 		return -EINVAL;
6612 	}
6613 
6614 	if (fn->allowed && !fn->allowed(env->prog)) {
6615 		verbose(env, "helper call is not allowed in probe\n");
6616 		return -EINVAL;
6617 	}
6618 
6619 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
6620 	changes_data = bpf_helper_changes_pkt_data(fn->func);
6621 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
6622 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
6623 			func_id_name(func_id), func_id);
6624 		return -EINVAL;
6625 	}
6626 
6627 	memset(&meta, 0, sizeof(meta));
6628 	meta.pkt_access = fn->pkt_access;
6629 
6630 	err = check_func_proto(fn, func_id);
6631 	if (err) {
6632 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
6633 			func_id_name(func_id), func_id);
6634 		return err;
6635 	}
6636 
6637 	meta.func_id = func_id;
6638 	/* check args */
6639 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
6640 		err = check_func_arg(env, i, &meta, fn);
6641 		if (err)
6642 			return err;
6643 	}
6644 
6645 	err = record_func_map(env, &meta, func_id, insn_idx);
6646 	if (err)
6647 		return err;
6648 
6649 	err = record_func_key(env, &meta, func_id, insn_idx);
6650 	if (err)
6651 		return err;
6652 
6653 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
6654 	 * is inferred from register state.
6655 	 */
6656 	for (i = 0; i < meta.access_size; i++) {
6657 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
6658 				       BPF_WRITE, -1, false);
6659 		if (err)
6660 			return err;
6661 	}
6662 
6663 	if (is_release_function(func_id)) {
6664 		err = release_reference(env, meta.ref_obj_id);
6665 		if (err) {
6666 			verbose(env, "func %s#%d reference has not been acquired before\n",
6667 				func_id_name(func_id), func_id);
6668 			return err;
6669 		}
6670 	}
6671 
6672 	regs = cur_regs(env);
6673 
6674 	switch (func_id) {
6675 	case BPF_FUNC_tail_call:
6676 		err = check_reference_leak(env);
6677 		if (err) {
6678 			verbose(env, "tail_call would lead to reference leak\n");
6679 			return err;
6680 		}
6681 		break;
6682 	case BPF_FUNC_get_local_storage:
6683 		/* check that flags argument in get_local_storage(map, flags) is 0,
6684 		 * this is required because get_local_storage() can't return an error.
6685 		 */
6686 		if (!register_is_null(&regs[BPF_REG_2])) {
6687 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
6688 			return -EINVAL;
6689 		}
6690 		break;
6691 	case BPF_FUNC_for_each_map_elem:
6692 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6693 					set_map_elem_callback_state);
6694 		break;
6695 	case BPF_FUNC_timer_set_callback:
6696 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6697 					set_timer_callback_state);
6698 		break;
6699 	case BPF_FUNC_find_vma:
6700 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6701 					set_find_vma_callback_state);
6702 		break;
6703 	case BPF_FUNC_snprintf:
6704 		err = check_bpf_snprintf_call(env, regs);
6705 		break;
6706 	case BPF_FUNC_loop:
6707 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6708 					set_loop_callback_state);
6709 		break;
6710 	}
6711 
6712 	if (err)
6713 		return err;
6714 
6715 	/* reset caller saved regs */
6716 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6717 		mark_reg_not_init(env, regs, caller_saved[i]);
6718 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6719 	}
6720 
6721 	/* helper call returns 64-bit value. */
6722 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6723 
6724 	/* update return register (already marked as written above) */
6725 	ret_type = fn->ret_type;
6726 	ret_flag = type_flag(fn->ret_type);
6727 	if (ret_type == RET_INTEGER) {
6728 		/* sets type to SCALAR_VALUE */
6729 		mark_reg_unknown(env, regs, BPF_REG_0);
6730 	} else if (ret_type == RET_VOID) {
6731 		regs[BPF_REG_0].type = NOT_INIT;
6732 	} else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
6733 		/* There is no offset yet applied, variable or fixed */
6734 		mark_reg_known_zero(env, regs, BPF_REG_0);
6735 		/* remember map_ptr, so that check_map_access()
6736 		 * can check 'value_size' boundary of memory access
6737 		 * to map element returned from bpf_map_lookup_elem()
6738 		 */
6739 		if (meta.map_ptr == NULL) {
6740 			verbose(env,
6741 				"kernel subsystem misconfigured verifier\n");
6742 			return -EINVAL;
6743 		}
6744 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
6745 		regs[BPF_REG_0].map_uid = meta.map_uid;
6746 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
6747 		if (!type_may_be_null(ret_type) &&
6748 		    map_value_has_spin_lock(meta.map_ptr)) {
6749 			regs[BPF_REG_0].id = ++env->id_gen;
6750 		}
6751 	} else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
6752 		mark_reg_known_zero(env, regs, BPF_REG_0);
6753 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
6754 	} else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
6755 		mark_reg_known_zero(env, regs, BPF_REG_0);
6756 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
6757 	} else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
6758 		mark_reg_known_zero(env, regs, BPF_REG_0);
6759 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
6760 	} else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
6761 		mark_reg_known_zero(env, regs, BPF_REG_0);
6762 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
6763 		regs[BPF_REG_0].mem_size = meta.mem_size;
6764 	} else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
6765 		const struct btf_type *t;
6766 
6767 		mark_reg_known_zero(env, regs, BPF_REG_0);
6768 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
6769 		if (!btf_type_is_struct(t)) {
6770 			u32 tsize;
6771 			const struct btf_type *ret;
6772 			const char *tname;
6773 
6774 			/* resolve the type size of ksym. */
6775 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
6776 			if (IS_ERR(ret)) {
6777 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
6778 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
6779 					tname, PTR_ERR(ret));
6780 				return -EINVAL;
6781 			}
6782 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
6783 			regs[BPF_REG_0].mem_size = tsize;
6784 		} else {
6785 			/* MEM_RDONLY may be carried from ret_flag, but it
6786 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
6787 			 * it will confuse the check of PTR_TO_BTF_ID in
6788 			 * check_mem_access().
6789 			 */
6790 			ret_flag &= ~MEM_RDONLY;
6791 
6792 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
6793 			regs[BPF_REG_0].btf = meta.ret_btf;
6794 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
6795 		}
6796 	} else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
6797 		int ret_btf_id;
6798 
6799 		mark_reg_known_zero(env, regs, BPF_REG_0);
6800 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
6801 		ret_btf_id = *fn->ret_btf_id;
6802 		if (ret_btf_id == 0) {
6803 			verbose(env, "invalid return type %u of func %s#%d\n",
6804 				base_type(ret_type), func_id_name(func_id),
6805 				func_id);
6806 			return -EINVAL;
6807 		}
6808 		/* current BPF helper definitions are only coming from
6809 		 * built-in code with type IDs from  vmlinux BTF
6810 		 */
6811 		regs[BPF_REG_0].btf = btf_vmlinux;
6812 		regs[BPF_REG_0].btf_id = ret_btf_id;
6813 	} else {
6814 		verbose(env, "unknown return type %u of func %s#%d\n",
6815 			base_type(ret_type), func_id_name(func_id), func_id);
6816 		return -EINVAL;
6817 	}
6818 
6819 	if (type_may_be_null(regs[BPF_REG_0].type))
6820 		regs[BPF_REG_0].id = ++env->id_gen;
6821 
6822 	if (is_ptr_cast_function(func_id)) {
6823 		/* For release_reference() */
6824 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
6825 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
6826 		int id = acquire_reference_state(env, insn_idx);
6827 
6828 		if (id < 0)
6829 			return id;
6830 		/* For mark_ptr_or_null_reg() */
6831 		regs[BPF_REG_0].id = id;
6832 		/* For release_reference() */
6833 		regs[BPF_REG_0].ref_obj_id = id;
6834 	}
6835 
6836 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
6837 
6838 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
6839 	if (err)
6840 		return err;
6841 
6842 	if ((func_id == BPF_FUNC_get_stack ||
6843 	     func_id == BPF_FUNC_get_task_stack) &&
6844 	    !env->prog->has_callchain_buf) {
6845 		const char *err_str;
6846 
6847 #ifdef CONFIG_PERF_EVENTS
6848 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
6849 		err_str = "cannot get callchain buffer for func %s#%d\n";
6850 #else
6851 		err = -ENOTSUPP;
6852 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
6853 #endif
6854 		if (err) {
6855 			verbose(env, err_str, func_id_name(func_id), func_id);
6856 			return err;
6857 		}
6858 
6859 		env->prog->has_callchain_buf = true;
6860 	}
6861 
6862 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
6863 		env->prog->call_get_stack = true;
6864 
6865 	if (func_id == BPF_FUNC_get_func_ip) {
6866 		if (check_get_func_ip(env))
6867 			return -ENOTSUPP;
6868 		env->prog->call_get_func_ip = true;
6869 	}
6870 
6871 	if (changes_data)
6872 		clear_all_pkt_pointers(env);
6873 	return 0;
6874 }
6875 
6876 /* mark_btf_func_reg_size() is used when the reg size is determined by
6877  * the BTF func_proto's return value size and argument.
6878  */
6879 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
6880 				   size_t reg_size)
6881 {
6882 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
6883 
6884 	if (regno == BPF_REG_0) {
6885 		/* Function return value */
6886 		reg->live |= REG_LIVE_WRITTEN;
6887 		reg->subreg_def = reg_size == sizeof(u64) ?
6888 			DEF_NOT_SUBREG : env->insn_idx + 1;
6889 	} else {
6890 		/* Function argument */
6891 		if (reg_size == sizeof(u64)) {
6892 			mark_insn_zext(env, reg);
6893 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
6894 		} else {
6895 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
6896 		}
6897 	}
6898 }
6899 
6900 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6901 			    int *insn_idx_p)
6902 {
6903 	const struct btf_type *t, *func, *func_proto, *ptr_type;
6904 	struct bpf_reg_state *regs = cur_regs(env);
6905 	const char *func_name, *ptr_type_name;
6906 	u32 i, nargs, func_id, ptr_type_id;
6907 	int err, insn_idx = *insn_idx_p;
6908 	const struct btf_param *args;
6909 	struct btf *desc_btf;
6910 	bool acq;
6911 
6912 	/* skip for now, but return error when we find this in fixup_kfunc_call */
6913 	if (!insn->imm)
6914 		return 0;
6915 
6916 	desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off);
6917 	if (IS_ERR(desc_btf))
6918 		return PTR_ERR(desc_btf);
6919 
6920 	func_id = insn->imm;
6921 	func = btf_type_by_id(desc_btf, func_id);
6922 	func_name = btf_name_by_offset(desc_btf, func->name_off);
6923 	func_proto = btf_type_by_id(desc_btf, func->type);
6924 
6925 	if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
6926 				      BTF_KFUNC_TYPE_CHECK, func_id)) {
6927 		verbose(env, "calling kernel function %s is not allowed\n",
6928 			func_name);
6929 		return -EACCES;
6930 	}
6931 
6932 	acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
6933 					BTF_KFUNC_TYPE_ACQUIRE, func_id);
6934 
6935 	/* Check the arguments */
6936 	err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
6937 	if (err < 0)
6938 		return err;
6939 	/* In case of release function, we get register number of refcounted
6940 	 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now
6941 	 */
6942 	if (err) {
6943 		err = release_reference(env, regs[err].ref_obj_id);
6944 		if (err) {
6945 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
6946 				func_name, func_id);
6947 			return err;
6948 		}
6949 	}
6950 
6951 	for (i = 0; i < CALLER_SAVED_REGS; i++)
6952 		mark_reg_not_init(env, regs, caller_saved[i]);
6953 
6954 	/* Check return type */
6955 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
6956 
6957 	if (acq && !btf_type_is_ptr(t)) {
6958 		verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
6959 		return -EINVAL;
6960 	}
6961 
6962 	if (btf_type_is_scalar(t)) {
6963 		mark_reg_unknown(env, regs, BPF_REG_0);
6964 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
6965 	} else if (btf_type_is_ptr(t)) {
6966 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
6967 						   &ptr_type_id);
6968 		if (!btf_type_is_struct(ptr_type)) {
6969 			ptr_type_name = btf_name_by_offset(desc_btf,
6970 							   ptr_type->name_off);
6971 			verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
6972 				func_name, btf_type_str(ptr_type),
6973 				ptr_type_name);
6974 			return -EINVAL;
6975 		}
6976 		mark_reg_known_zero(env, regs, BPF_REG_0);
6977 		regs[BPF_REG_0].btf = desc_btf;
6978 		regs[BPF_REG_0].type = PTR_TO_BTF_ID;
6979 		regs[BPF_REG_0].btf_id = ptr_type_id;
6980 		if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
6981 					      BTF_KFUNC_TYPE_RET_NULL, func_id)) {
6982 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
6983 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
6984 			regs[BPF_REG_0].id = ++env->id_gen;
6985 		}
6986 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
6987 		if (acq) {
6988 			int id = acquire_reference_state(env, insn_idx);
6989 
6990 			if (id < 0)
6991 				return id;
6992 			regs[BPF_REG_0].id = id;
6993 			regs[BPF_REG_0].ref_obj_id = id;
6994 		}
6995 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
6996 
6997 	nargs = btf_type_vlen(func_proto);
6998 	args = (const struct btf_param *)(func_proto + 1);
6999 	for (i = 0; i < nargs; i++) {
7000 		u32 regno = i + 1;
7001 
7002 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
7003 		if (btf_type_is_ptr(t))
7004 			mark_btf_func_reg_size(env, regno, sizeof(void *));
7005 		else
7006 			/* scalar. ensured by btf_check_kfunc_arg_match() */
7007 			mark_btf_func_reg_size(env, regno, t->size);
7008 	}
7009 
7010 	return 0;
7011 }
7012 
7013 static bool signed_add_overflows(s64 a, s64 b)
7014 {
7015 	/* Do the add in u64, where overflow is well-defined */
7016 	s64 res = (s64)((u64)a + (u64)b);
7017 
7018 	if (b < 0)
7019 		return res > a;
7020 	return res < a;
7021 }
7022 
7023 static bool signed_add32_overflows(s32 a, s32 b)
7024 {
7025 	/* Do the add in u32, where overflow is well-defined */
7026 	s32 res = (s32)((u32)a + (u32)b);
7027 
7028 	if (b < 0)
7029 		return res > a;
7030 	return res < a;
7031 }
7032 
7033 static bool signed_sub_overflows(s64 a, s64 b)
7034 {
7035 	/* Do the sub in u64, where overflow is well-defined */
7036 	s64 res = (s64)((u64)a - (u64)b);
7037 
7038 	if (b < 0)
7039 		return res < a;
7040 	return res > a;
7041 }
7042 
7043 static bool signed_sub32_overflows(s32 a, s32 b)
7044 {
7045 	/* Do the sub in u32, where overflow is well-defined */
7046 	s32 res = (s32)((u32)a - (u32)b);
7047 
7048 	if (b < 0)
7049 		return res < a;
7050 	return res > a;
7051 }
7052 
7053 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7054 				  const struct bpf_reg_state *reg,
7055 				  enum bpf_reg_type type)
7056 {
7057 	bool known = tnum_is_const(reg->var_off);
7058 	s64 val = reg->var_off.value;
7059 	s64 smin = reg->smin_value;
7060 
7061 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7062 		verbose(env, "math between %s pointer and %lld is not allowed\n",
7063 			reg_type_str(env, type), val);
7064 		return false;
7065 	}
7066 
7067 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7068 		verbose(env, "%s pointer offset %d is not allowed\n",
7069 			reg_type_str(env, type), reg->off);
7070 		return false;
7071 	}
7072 
7073 	if (smin == S64_MIN) {
7074 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
7075 			reg_type_str(env, type));
7076 		return false;
7077 	}
7078 
7079 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7080 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
7081 			smin, reg_type_str(env, type));
7082 		return false;
7083 	}
7084 
7085 	return true;
7086 }
7087 
7088 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7089 {
7090 	return &env->insn_aux_data[env->insn_idx];
7091 }
7092 
7093 enum {
7094 	REASON_BOUNDS	= -1,
7095 	REASON_TYPE	= -2,
7096 	REASON_PATHS	= -3,
7097 	REASON_LIMIT	= -4,
7098 	REASON_STACK	= -5,
7099 };
7100 
7101 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
7102 			      u32 *alu_limit, bool mask_to_left)
7103 {
7104 	u32 max = 0, ptr_limit = 0;
7105 
7106 	switch (ptr_reg->type) {
7107 	case PTR_TO_STACK:
7108 		/* Offset 0 is out-of-bounds, but acceptable start for the
7109 		 * left direction, see BPF_REG_FP. Also, unknown scalar
7110 		 * offset where we would need to deal with min/max bounds is
7111 		 * currently prohibited for unprivileged.
7112 		 */
7113 		max = MAX_BPF_STACK + mask_to_left;
7114 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
7115 		break;
7116 	case PTR_TO_MAP_VALUE:
7117 		max = ptr_reg->map_ptr->value_size;
7118 		ptr_limit = (mask_to_left ?
7119 			     ptr_reg->smin_value :
7120 			     ptr_reg->umax_value) + ptr_reg->off;
7121 		break;
7122 	default:
7123 		return REASON_TYPE;
7124 	}
7125 
7126 	if (ptr_limit >= max)
7127 		return REASON_LIMIT;
7128 	*alu_limit = ptr_limit;
7129 	return 0;
7130 }
7131 
7132 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
7133 				    const struct bpf_insn *insn)
7134 {
7135 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
7136 }
7137 
7138 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
7139 				       u32 alu_state, u32 alu_limit)
7140 {
7141 	/* If we arrived here from different branches with different
7142 	 * state or limits to sanitize, then this won't work.
7143 	 */
7144 	if (aux->alu_state &&
7145 	    (aux->alu_state != alu_state ||
7146 	     aux->alu_limit != alu_limit))
7147 		return REASON_PATHS;
7148 
7149 	/* Corresponding fixup done in do_misc_fixups(). */
7150 	aux->alu_state = alu_state;
7151 	aux->alu_limit = alu_limit;
7152 	return 0;
7153 }
7154 
7155 static int sanitize_val_alu(struct bpf_verifier_env *env,
7156 			    struct bpf_insn *insn)
7157 {
7158 	struct bpf_insn_aux_data *aux = cur_aux(env);
7159 
7160 	if (can_skip_alu_sanitation(env, insn))
7161 		return 0;
7162 
7163 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
7164 }
7165 
7166 static bool sanitize_needed(u8 opcode)
7167 {
7168 	return opcode == BPF_ADD || opcode == BPF_SUB;
7169 }
7170 
7171 struct bpf_sanitize_info {
7172 	struct bpf_insn_aux_data aux;
7173 	bool mask_to_left;
7174 };
7175 
7176 static struct bpf_verifier_state *
7177 sanitize_speculative_path(struct bpf_verifier_env *env,
7178 			  const struct bpf_insn *insn,
7179 			  u32 next_idx, u32 curr_idx)
7180 {
7181 	struct bpf_verifier_state *branch;
7182 	struct bpf_reg_state *regs;
7183 
7184 	branch = push_stack(env, next_idx, curr_idx, true);
7185 	if (branch && insn) {
7186 		regs = branch->frame[branch->curframe]->regs;
7187 		if (BPF_SRC(insn->code) == BPF_K) {
7188 			mark_reg_unknown(env, regs, insn->dst_reg);
7189 		} else if (BPF_SRC(insn->code) == BPF_X) {
7190 			mark_reg_unknown(env, regs, insn->dst_reg);
7191 			mark_reg_unknown(env, regs, insn->src_reg);
7192 		}
7193 	}
7194 	return branch;
7195 }
7196 
7197 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
7198 			    struct bpf_insn *insn,
7199 			    const struct bpf_reg_state *ptr_reg,
7200 			    const struct bpf_reg_state *off_reg,
7201 			    struct bpf_reg_state *dst_reg,
7202 			    struct bpf_sanitize_info *info,
7203 			    const bool commit_window)
7204 {
7205 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
7206 	struct bpf_verifier_state *vstate = env->cur_state;
7207 	bool off_is_imm = tnum_is_const(off_reg->var_off);
7208 	bool off_is_neg = off_reg->smin_value < 0;
7209 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
7210 	u8 opcode = BPF_OP(insn->code);
7211 	u32 alu_state, alu_limit;
7212 	struct bpf_reg_state tmp;
7213 	bool ret;
7214 	int err;
7215 
7216 	if (can_skip_alu_sanitation(env, insn))
7217 		return 0;
7218 
7219 	/* We already marked aux for masking from non-speculative
7220 	 * paths, thus we got here in the first place. We only care
7221 	 * to explore bad access from here.
7222 	 */
7223 	if (vstate->speculative)
7224 		goto do_sim;
7225 
7226 	if (!commit_window) {
7227 		if (!tnum_is_const(off_reg->var_off) &&
7228 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
7229 			return REASON_BOUNDS;
7230 
7231 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
7232 				     (opcode == BPF_SUB && !off_is_neg);
7233 	}
7234 
7235 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
7236 	if (err < 0)
7237 		return err;
7238 
7239 	if (commit_window) {
7240 		/* In commit phase we narrow the masking window based on
7241 		 * the observed pointer move after the simulated operation.
7242 		 */
7243 		alu_state = info->aux.alu_state;
7244 		alu_limit = abs(info->aux.alu_limit - alu_limit);
7245 	} else {
7246 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
7247 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7248 		alu_state |= ptr_is_dst_reg ?
7249 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
7250 
7251 		/* Limit pruning on unknown scalars to enable deep search for
7252 		 * potential masking differences from other program paths.
7253 		 */
7254 		if (!off_is_imm)
7255 			env->explore_alu_limits = true;
7256 	}
7257 
7258 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
7259 	if (err < 0)
7260 		return err;
7261 do_sim:
7262 	/* If we're in commit phase, we're done here given we already
7263 	 * pushed the truncated dst_reg into the speculative verification
7264 	 * stack.
7265 	 *
7266 	 * Also, when register is a known constant, we rewrite register-based
7267 	 * operation to immediate-based, and thus do not need masking (and as
7268 	 * a consequence, do not need to simulate the zero-truncation either).
7269 	 */
7270 	if (commit_window || off_is_imm)
7271 		return 0;
7272 
7273 	/* Simulate and find potential out-of-bounds access under
7274 	 * speculative execution from truncation as a result of
7275 	 * masking when off was not within expected range. If off
7276 	 * sits in dst, then we temporarily need to move ptr there
7277 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
7278 	 * for cases where we use K-based arithmetic in one direction
7279 	 * and truncated reg-based in the other in order to explore
7280 	 * bad access.
7281 	 */
7282 	if (!ptr_is_dst_reg) {
7283 		tmp = *dst_reg;
7284 		*dst_reg = *ptr_reg;
7285 	}
7286 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
7287 					env->insn_idx);
7288 	if (!ptr_is_dst_reg && ret)
7289 		*dst_reg = tmp;
7290 	return !ret ? REASON_STACK : 0;
7291 }
7292 
7293 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
7294 {
7295 	struct bpf_verifier_state *vstate = env->cur_state;
7296 
7297 	/* If we simulate paths under speculation, we don't update the
7298 	 * insn as 'seen' such that when we verify unreachable paths in
7299 	 * the non-speculative domain, sanitize_dead_code() can still
7300 	 * rewrite/sanitize them.
7301 	 */
7302 	if (!vstate->speculative)
7303 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
7304 }
7305 
7306 static int sanitize_err(struct bpf_verifier_env *env,
7307 			const struct bpf_insn *insn, int reason,
7308 			const struct bpf_reg_state *off_reg,
7309 			const struct bpf_reg_state *dst_reg)
7310 {
7311 	static const char *err = "pointer arithmetic with it prohibited for !root";
7312 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
7313 	u32 dst = insn->dst_reg, src = insn->src_reg;
7314 
7315 	switch (reason) {
7316 	case REASON_BOUNDS:
7317 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
7318 			off_reg == dst_reg ? dst : src, err);
7319 		break;
7320 	case REASON_TYPE:
7321 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
7322 			off_reg == dst_reg ? src : dst, err);
7323 		break;
7324 	case REASON_PATHS:
7325 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
7326 			dst, op, err);
7327 		break;
7328 	case REASON_LIMIT:
7329 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
7330 			dst, op, err);
7331 		break;
7332 	case REASON_STACK:
7333 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
7334 			dst, err);
7335 		break;
7336 	default:
7337 		verbose(env, "verifier internal error: unknown reason (%d)\n",
7338 			reason);
7339 		break;
7340 	}
7341 
7342 	return -EACCES;
7343 }
7344 
7345 /* check that stack access falls within stack limits and that 'reg' doesn't
7346  * have a variable offset.
7347  *
7348  * Variable offset is prohibited for unprivileged mode for simplicity since it
7349  * requires corresponding support in Spectre masking for stack ALU.  See also
7350  * retrieve_ptr_limit().
7351  *
7352  *
7353  * 'off' includes 'reg->off'.
7354  */
7355 static int check_stack_access_for_ptr_arithmetic(
7356 				struct bpf_verifier_env *env,
7357 				int regno,
7358 				const struct bpf_reg_state *reg,
7359 				int off)
7360 {
7361 	if (!tnum_is_const(reg->var_off)) {
7362 		char tn_buf[48];
7363 
7364 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7365 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
7366 			regno, tn_buf, off);
7367 		return -EACCES;
7368 	}
7369 
7370 	if (off >= 0 || off < -MAX_BPF_STACK) {
7371 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
7372 			"prohibited for !root; off=%d\n", regno, off);
7373 		return -EACCES;
7374 	}
7375 
7376 	return 0;
7377 }
7378 
7379 static int sanitize_check_bounds(struct bpf_verifier_env *env,
7380 				 const struct bpf_insn *insn,
7381 				 const struct bpf_reg_state *dst_reg)
7382 {
7383 	u32 dst = insn->dst_reg;
7384 
7385 	/* For unprivileged we require that resulting offset must be in bounds
7386 	 * in order to be able to sanitize access later on.
7387 	 */
7388 	if (env->bypass_spec_v1)
7389 		return 0;
7390 
7391 	switch (dst_reg->type) {
7392 	case PTR_TO_STACK:
7393 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
7394 					dst_reg->off + dst_reg->var_off.value))
7395 			return -EACCES;
7396 		break;
7397 	case PTR_TO_MAP_VALUE:
7398 		if (check_map_access(env, dst, dst_reg->off, 1, false)) {
7399 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
7400 				"prohibited for !root\n", dst);
7401 			return -EACCES;
7402 		}
7403 		break;
7404 	default:
7405 		break;
7406 	}
7407 
7408 	return 0;
7409 }
7410 
7411 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
7412  * Caller should also handle BPF_MOV case separately.
7413  * If we return -EACCES, caller may want to try again treating pointer as a
7414  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
7415  */
7416 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7417 				   struct bpf_insn *insn,
7418 				   const struct bpf_reg_state *ptr_reg,
7419 				   const struct bpf_reg_state *off_reg)
7420 {
7421 	struct bpf_verifier_state *vstate = env->cur_state;
7422 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
7423 	struct bpf_reg_state *regs = state->regs, *dst_reg;
7424 	bool known = tnum_is_const(off_reg->var_off);
7425 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
7426 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
7427 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
7428 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
7429 	struct bpf_sanitize_info info = {};
7430 	u8 opcode = BPF_OP(insn->code);
7431 	u32 dst = insn->dst_reg;
7432 	int ret;
7433 
7434 	dst_reg = &regs[dst];
7435 
7436 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
7437 	    smin_val > smax_val || umin_val > umax_val) {
7438 		/* Taint dst register if offset had invalid bounds derived from
7439 		 * e.g. dead branches.
7440 		 */
7441 		__mark_reg_unknown(env, dst_reg);
7442 		return 0;
7443 	}
7444 
7445 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
7446 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
7447 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
7448 			__mark_reg_unknown(env, dst_reg);
7449 			return 0;
7450 		}
7451 
7452 		verbose(env,
7453 			"R%d 32-bit pointer arithmetic prohibited\n",
7454 			dst);
7455 		return -EACCES;
7456 	}
7457 
7458 	if (ptr_reg->type & PTR_MAYBE_NULL) {
7459 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
7460 			dst, reg_type_str(env, ptr_reg->type));
7461 		return -EACCES;
7462 	}
7463 
7464 	switch (base_type(ptr_reg->type)) {
7465 	case CONST_PTR_TO_MAP:
7466 		/* smin_val represents the known value */
7467 		if (known && smin_val == 0 && opcode == BPF_ADD)
7468 			break;
7469 		fallthrough;
7470 	case PTR_TO_PACKET_END:
7471 	case PTR_TO_SOCKET:
7472 	case PTR_TO_SOCK_COMMON:
7473 	case PTR_TO_TCP_SOCK:
7474 	case PTR_TO_XDP_SOCK:
7475 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
7476 			dst, reg_type_str(env, ptr_reg->type));
7477 		return -EACCES;
7478 	default:
7479 		break;
7480 	}
7481 
7482 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
7483 	 * The id may be overwritten later if we create a new variable offset.
7484 	 */
7485 	dst_reg->type = ptr_reg->type;
7486 	dst_reg->id = ptr_reg->id;
7487 
7488 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
7489 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
7490 		return -EINVAL;
7491 
7492 	/* pointer types do not carry 32-bit bounds at the moment. */
7493 	__mark_reg32_unbounded(dst_reg);
7494 
7495 	if (sanitize_needed(opcode)) {
7496 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
7497 				       &info, false);
7498 		if (ret < 0)
7499 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
7500 	}
7501 
7502 	switch (opcode) {
7503 	case BPF_ADD:
7504 		/* We can take a fixed offset as long as it doesn't overflow
7505 		 * the s32 'off' field
7506 		 */
7507 		if (known && (ptr_reg->off + smin_val ==
7508 			      (s64)(s32)(ptr_reg->off + smin_val))) {
7509 			/* pointer += K.  Accumulate it into fixed offset */
7510 			dst_reg->smin_value = smin_ptr;
7511 			dst_reg->smax_value = smax_ptr;
7512 			dst_reg->umin_value = umin_ptr;
7513 			dst_reg->umax_value = umax_ptr;
7514 			dst_reg->var_off = ptr_reg->var_off;
7515 			dst_reg->off = ptr_reg->off + smin_val;
7516 			dst_reg->raw = ptr_reg->raw;
7517 			break;
7518 		}
7519 		/* A new variable offset is created.  Note that off_reg->off
7520 		 * == 0, since it's a scalar.
7521 		 * dst_reg gets the pointer type and since some positive
7522 		 * integer value was added to the pointer, give it a new 'id'
7523 		 * if it's a PTR_TO_PACKET.
7524 		 * this creates a new 'base' pointer, off_reg (variable) gets
7525 		 * added into the variable offset, and we copy the fixed offset
7526 		 * from ptr_reg.
7527 		 */
7528 		if (signed_add_overflows(smin_ptr, smin_val) ||
7529 		    signed_add_overflows(smax_ptr, smax_val)) {
7530 			dst_reg->smin_value = S64_MIN;
7531 			dst_reg->smax_value = S64_MAX;
7532 		} else {
7533 			dst_reg->smin_value = smin_ptr + smin_val;
7534 			dst_reg->smax_value = smax_ptr + smax_val;
7535 		}
7536 		if (umin_ptr + umin_val < umin_ptr ||
7537 		    umax_ptr + umax_val < umax_ptr) {
7538 			dst_reg->umin_value = 0;
7539 			dst_reg->umax_value = U64_MAX;
7540 		} else {
7541 			dst_reg->umin_value = umin_ptr + umin_val;
7542 			dst_reg->umax_value = umax_ptr + umax_val;
7543 		}
7544 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
7545 		dst_reg->off = ptr_reg->off;
7546 		dst_reg->raw = ptr_reg->raw;
7547 		if (reg_is_pkt_pointer(ptr_reg)) {
7548 			dst_reg->id = ++env->id_gen;
7549 			/* something was added to pkt_ptr, set range to zero */
7550 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
7551 		}
7552 		break;
7553 	case BPF_SUB:
7554 		if (dst_reg == off_reg) {
7555 			/* scalar -= pointer.  Creates an unknown scalar */
7556 			verbose(env, "R%d tried to subtract pointer from scalar\n",
7557 				dst);
7558 			return -EACCES;
7559 		}
7560 		/* We don't allow subtraction from FP, because (according to
7561 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
7562 		 * be able to deal with it.
7563 		 */
7564 		if (ptr_reg->type == PTR_TO_STACK) {
7565 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
7566 				dst);
7567 			return -EACCES;
7568 		}
7569 		if (known && (ptr_reg->off - smin_val ==
7570 			      (s64)(s32)(ptr_reg->off - smin_val))) {
7571 			/* pointer -= K.  Subtract it from fixed offset */
7572 			dst_reg->smin_value = smin_ptr;
7573 			dst_reg->smax_value = smax_ptr;
7574 			dst_reg->umin_value = umin_ptr;
7575 			dst_reg->umax_value = umax_ptr;
7576 			dst_reg->var_off = ptr_reg->var_off;
7577 			dst_reg->id = ptr_reg->id;
7578 			dst_reg->off = ptr_reg->off - smin_val;
7579 			dst_reg->raw = ptr_reg->raw;
7580 			break;
7581 		}
7582 		/* A new variable offset is created.  If the subtrahend is known
7583 		 * nonnegative, then any reg->range we had before is still good.
7584 		 */
7585 		if (signed_sub_overflows(smin_ptr, smax_val) ||
7586 		    signed_sub_overflows(smax_ptr, smin_val)) {
7587 			/* Overflow possible, we know nothing */
7588 			dst_reg->smin_value = S64_MIN;
7589 			dst_reg->smax_value = S64_MAX;
7590 		} else {
7591 			dst_reg->smin_value = smin_ptr - smax_val;
7592 			dst_reg->smax_value = smax_ptr - smin_val;
7593 		}
7594 		if (umin_ptr < umax_val) {
7595 			/* Overflow possible, we know nothing */
7596 			dst_reg->umin_value = 0;
7597 			dst_reg->umax_value = U64_MAX;
7598 		} else {
7599 			/* Cannot overflow (as long as bounds are consistent) */
7600 			dst_reg->umin_value = umin_ptr - umax_val;
7601 			dst_reg->umax_value = umax_ptr - umin_val;
7602 		}
7603 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
7604 		dst_reg->off = ptr_reg->off;
7605 		dst_reg->raw = ptr_reg->raw;
7606 		if (reg_is_pkt_pointer(ptr_reg)) {
7607 			dst_reg->id = ++env->id_gen;
7608 			/* something was added to pkt_ptr, set range to zero */
7609 			if (smin_val < 0)
7610 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
7611 		}
7612 		break;
7613 	case BPF_AND:
7614 	case BPF_OR:
7615 	case BPF_XOR:
7616 		/* bitwise ops on pointers are troublesome, prohibit. */
7617 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
7618 			dst, bpf_alu_string[opcode >> 4]);
7619 		return -EACCES;
7620 	default:
7621 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
7622 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
7623 			dst, bpf_alu_string[opcode >> 4]);
7624 		return -EACCES;
7625 	}
7626 
7627 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
7628 		return -EINVAL;
7629 
7630 	__update_reg_bounds(dst_reg);
7631 	__reg_deduce_bounds(dst_reg);
7632 	__reg_bound_offset(dst_reg);
7633 
7634 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
7635 		return -EACCES;
7636 	if (sanitize_needed(opcode)) {
7637 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
7638 				       &info, true);
7639 		if (ret < 0)
7640 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
7641 	}
7642 
7643 	return 0;
7644 }
7645 
7646 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
7647 				 struct bpf_reg_state *src_reg)
7648 {
7649 	s32 smin_val = src_reg->s32_min_value;
7650 	s32 smax_val = src_reg->s32_max_value;
7651 	u32 umin_val = src_reg->u32_min_value;
7652 	u32 umax_val = src_reg->u32_max_value;
7653 
7654 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
7655 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
7656 		dst_reg->s32_min_value = S32_MIN;
7657 		dst_reg->s32_max_value = S32_MAX;
7658 	} else {
7659 		dst_reg->s32_min_value += smin_val;
7660 		dst_reg->s32_max_value += smax_val;
7661 	}
7662 	if (dst_reg->u32_min_value + umin_val < umin_val ||
7663 	    dst_reg->u32_max_value + umax_val < umax_val) {
7664 		dst_reg->u32_min_value = 0;
7665 		dst_reg->u32_max_value = U32_MAX;
7666 	} else {
7667 		dst_reg->u32_min_value += umin_val;
7668 		dst_reg->u32_max_value += umax_val;
7669 	}
7670 }
7671 
7672 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
7673 			       struct bpf_reg_state *src_reg)
7674 {
7675 	s64 smin_val = src_reg->smin_value;
7676 	s64 smax_val = src_reg->smax_value;
7677 	u64 umin_val = src_reg->umin_value;
7678 	u64 umax_val = src_reg->umax_value;
7679 
7680 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
7681 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
7682 		dst_reg->smin_value = S64_MIN;
7683 		dst_reg->smax_value = S64_MAX;
7684 	} else {
7685 		dst_reg->smin_value += smin_val;
7686 		dst_reg->smax_value += smax_val;
7687 	}
7688 	if (dst_reg->umin_value + umin_val < umin_val ||
7689 	    dst_reg->umax_value + umax_val < umax_val) {
7690 		dst_reg->umin_value = 0;
7691 		dst_reg->umax_value = U64_MAX;
7692 	} else {
7693 		dst_reg->umin_value += umin_val;
7694 		dst_reg->umax_value += umax_val;
7695 	}
7696 }
7697 
7698 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
7699 				 struct bpf_reg_state *src_reg)
7700 {
7701 	s32 smin_val = src_reg->s32_min_value;
7702 	s32 smax_val = src_reg->s32_max_value;
7703 	u32 umin_val = src_reg->u32_min_value;
7704 	u32 umax_val = src_reg->u32_max_value;
7705 
7706 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
7707 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
7708 		/* Overflow possible, we know nothing */
7709 		dst_reg->s32_min_value = S32_MIN;
7710 		dst_reg->s32_max_value = S32_MAX;
7711 	} else {
7712 		dst_reg->s32_min_value -= smax_val;
7713 		dst_reg->s32_max_value -= smin_val;
7714 	}
7715 	if (dst_reg->u32_min_value < umax_val) {
7716 		/* Overflow possible, we know nothing */
7717 		dst_reg->u32_min_value = 0;
7718 		dst_reg->u32_max_value = U32_MAX;
7719 	} else {
7720 		/* Cannot overflow (as long as bounds are consistent) */
7721 		dst_reg->u32_min_value -= umax_val;
7722 		dst_reg->u32_max_value -= umin_val;
7723 	}
7724 }
7725 
7726 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
7727 			       struct bpf_reg_state *src_reg)
7728 {
7729 	s64 smin_val = src_reg->smin_value;
7730 	s64 smax_val = src_reg->smax_value;
7731 	u64 umin_val = src_reg->umin_value;
7732 	u64 umax_val = src_reg->umax_value;
7733 
7734 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
7735 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
7736 		/* Overflow possible, we know nothing */
7737 		dst_reg->smin_value = S64_MIN;
7738 		dst_reg->smax_value = S64_MAX;
7739 	} else {
7740 		dst_reg->smin_value -= smax_val;
7741 		dst_reg->smax_value -= smin_val;
7742 	}
7743 	if (dst_reg->umin_value < umax_val) {
7744 		/* Overflow possible, we know nothing */
7745 		dst_reg->umin_value = 0;
7746 		dst_reg->umax_value = U64_MAX;
7747 	} else {
7748 		/* Cannot overflow (as long as bounds are consistent) */
7749 		dst_reg->umin_value -= umax_val;
7750 		dst_reg->umax_value -= umin_val;
7751 	}
7752 }
7753 
7754 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
7755 				 struct bpf_reg_state *src_reg)
7756 {
7757 	s32 smin_val = src_reg->s32_min_value;
7758 	u32 umin_val = src_reg->u32_min_value;
7759 	u32 umax_val = src_reg->u32_max_value;
7760 
7761 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
7762 		/* Ain't nobody got time to multiply that sign */
7763 		__mark_reg32_unbounded(dst_reg);
7764 		return;
7765 	}
7766 	/* Both values are positive, so we can work with unsigned and
7767 	 * copy the result to signed (unless it exceeds S32_MAX).
7768 	 */
7769 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
7770 		/* Potential overflow, we know nothing */
7771 		__mark_reg32_unbounded(dst_reg);
7772 		return;
7773 	}
7774 	dst_reg->u32_min_value *= umin_val;
7775 	dst_reg->u32_max_value *= umax_val;
7776 	if (dst_reg->u32_max_value > S32_MAX) {
7777 		/* Overflow possible, we know nothing */
7778 		dst_reg->s32_min_value = S32_MIN;
7779 		dst_reg->s32_max_value = S32_MAX;
7780 	} else {
7781 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7782 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7783 	}
7784 }
7785 
7786 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
7787 			       struct bpf_reg_state *src_reg)
7788 {
7789 	s64 smin_val = src_reg->smin_value;
7790 	u64 umin_val = src_reg->umin_value;
7791 	u64 umax_val = src_reg->umax_value;
7792 
7793 	if (smin_val < 0 || dst_reg->smin_value < 0) {
7794 		/* Ain't nobody got time to multiply that sign */
7795 		__mark_reg64_unbounded(dst_reg);
7796 		return;
7797 	}
7798 	/* Both values are positive, so we can work with unsigned and
7799 	 * copy the result to signed (unless it exceeds S64_MAX).
7800 	 */
7801 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
7802 		/* Potential overflow, we know nothing */
7803 		__mark_reg64_unbounded(dst_reg);
7804 		return;
7805 	}
7806 	dst_reg->umin_value *= umin_val;
7807 	dst_reg->umax_value *= umax_val;
7808 	if (dst_reg->umax_value > S64_MAX) {
7809 		/* Overflow possible, we know nothing */
7810 		dst_reg->smin_value = S64_MIN;
7811 		dst_reg->smax_value = S64_MAX;
7812 	} else {
7813 		dst_reg->smin_value = dst_reg->umin_value;
7814 		dst_reg->smax_value = dst_reg->umax_value;
7815 	}
7816 }
7817 
7818 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
7819 				 struct bpf_reg_state *src_reg)
7820 {
7821 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
7822 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7823 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7824 	s32 smin_val = src_reg->s32_min_value;
7825 	u32 umax_val = src_reg->u32_max_value;
7826 
7827 	if (src_known && dst_known) {
7828 		__mark_reg32_known(dst_reg, var32_off.value);
7829 		return;
7830 	}
7831 
7832 	/* We get our minimum from the var_off, since that's inherently
7833 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
7834 	 */
7835 	dst_reg->u32_min_value = var32_off.value;
7836 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
7837 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7838 		/* Lose signed bounds when ANDing negative numbers,
7839 		 * ain't nobody got time for that.
7840 		 */
7841 		dst_reg->s32_min_value = S32_MIN;
7842 		dst_reg->s32_max_value = S32_MAX;
7843 	} else {
7844 		/* ANDing two positives gives a positive, so safe to
7845 		 * cast result into s64.
7846 		 */
7847 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7848 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7849 	}
7850 }
7851 
7852 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
7853 			       struct bpf_reg_state *src_reg)
7854 {
7855 	bool src_known = tnum_is_const(src_reg->var_off);
7856 	bool dst_known = tnum_is_const(dst_reg->var_off);
7857 	s64 smin_val = src_reg->smin_value;
7858 	u64 umax_val = src_reg->umax_value;
7859 
7860 	if (src_known && dst_known) {
7861 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
7862 		return;
7863 	}
7864 
7865 	/* We get our minimum from the var_off, since that's inherently
7866 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
7867 	 */
7868 	dst_reg->umin_value = dst_reg->var_off.value;
7869 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
7870 	if (dst_reg->smin_value < 0 || smin_val < 0) {
7871 		/* Lose signed bounds when ANDing negative numbers,
7872 		 * ain't nobody got time for that.
7873 		 */
7874 		dst_reg->smin_value = S64_MIN;
7875 		dst_reg->smax_value = S64_MAX;
7876 	} else {
7877 		/* ANDing two positives gives a positive, so safe to
7878 		 * cast result into s64.
7879 		 */
7880 		dst_reg->smin_value = dst_reg->umin_value;
7881 		dst_reg->smax_value = dst_reg->umax_value;
7882 	}
7883 	/* We may learn something more from the var_off */
7884 	__update_reg_bounds(dst_reg);
7885 }
7886 
7887 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
7888 				struct bpf_reg_state *src_reg)
7889 {
7890 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
7891 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7892 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7893 	s32 smin_val = src_reg->s32_min_value;
7894 	u32 umin_val = src_reg->u32_min_value;
7895 
7896 	if (src_known && dst_known) {
7897 		__mark_reg32_known(dst_reg, var32_off.value);
7898 		return;
7899 	}
7900 
7901 	/* We get our maximum from the var_off, and our minimum is the
7902 	 * maximum of the operands' minima
7903 	 */
7904 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
7905 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
7906 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7907 		/* Lose signed bounds when ORing negative numbers,
7908 		 * ain't nobody got time for that.
7909 		 */
7910 		dst_reg->s32_min_value = S32_MIN;
7911 		dst_reg->s32_max_value = S32_MAX;
7912 	} else {
7913 		/* ORing two positives gives a positive, so safe to
7914 		 * cast result into s64.
7915 		 */
7916 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7917 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7918 	}
7919 }
7920 
7921 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
7922 			      struct bpf_reg_state *src_reg)
7923 {
7924 	bool src_known = tnum_is_const(src_reg->var_off);
7925 	bool dst_known = tnum_is_const(dst_reg->var_off);
7926 	s64 smin_val = src_reg->smin_value;
7927 	u64 umin_val = src_reg->umin_value;
7928 
7929 	if (src_known && dst_known) {
7930 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
7931 		return;
7932 	}
7933 
7934 	/* We get our maximum from the var_off, and our minimum is the
7935 	 * maximum of the operands' minima
7936 	 */
7937 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
7938 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
7939 	if (dst_reg->smin_value < 0 || smin_val < 0) {
7940 		/* Lose signed bounds when ORing negative numbers,
7941 		 * ain't nobody got time for that.
7942 		 */
7943 		dst_reg->smin_value = S64_MIN;
7944 		dst_reg->smax_value = S64_MAX;
7945 	} else {
7946 		/* ORing two positives gives a positive, so safe to
7947 		 * cast result into s64.
7948 		 */
7949 		dst_reg->smin_value = dst_reg->umin_value;
7950 		dst_reg->smax_value = dst_reg->umax_value;
7951 	}
7952 	/* We may learn something more from the var_off */
7953 	__update_reg_bounds(dst_reg);
7954 }
7955 
7956 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
7957 				 struct bpf_reg_state *src_reg)
7958 {
7959 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
7960 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7961 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7962 	s32 smin_val = src_reg->s32_min_value;
7963 
7964 	if (src_known && dst_known) {
7965 		__mark_reg32_known(dst_reg, var32_off.value);
7966 		return;
7967 	}
7968 
7969 	/* We get both minimum and maximum from the var32_off. */
7970 	dst_reg->u32_min_value = var32_off.value;
7971 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
7972 
7973 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
7974 		/* XORing two positive sign numbers gives a positive,
7975 		 * so safe to cast u32 result into s32.
7976 		 */
7977 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7978 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7979 	} else {
7980 		dst_reg->s32_min_value = S32_MIN;
7981 		dst_reg->s32_max_value = S32_MAX;
7982 	}
7983 }
7984 
7985 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
7986 			       struct bpf_reg_state *src_reg)
7987 {
7988 	bool src_known = tnum_is_const(src_reg->var_off);
7989 	bool dst_known = tnum_is_const(dst_reg->var_off);
7990 	s64 smin_val = src_reg->smin_value;
7991 
7992 	if (src_known && dst_known) {
7993 		/* dst_reg->var_off.value has been updated earlier */
7994 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
7995 		return;
7996 	}
7997 
7998 	/* We get both minimum and maximum from the var_off. */
7999 	dst_reg->umin_value = dst_reg->var_off.value;
8000 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8001 
8002 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
8003 		/* XORing two positive sign numbers gives a positive,
8004 		 * so safe to cast u64 result into s64.
8005 		 */
8006 		dst_reg->smin_value = dst_reg->umin_value;
8007 		dst_reg->smax_value = dst_reg->umax_value;
8008 	} else {
8009 		dst_reg->smin_value = S64_MIN;
8010 		dst_reg->smax_value = S64_MAX;
8011 	}
8012 
8013 	__update_reg_bounds(dst_reg);
8014 }
8015 
8016 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8017 				   u64 umin_val, u64 umax_val)
8018 {
8019 	/* We lose all sign bit information (except what we can pick
8020 	 * up from var_off)
8021 	 */
8022 	dst_reg->s32_min_value = S32_MIN;
8023 	dst_reg->s32_max_value = S32_MAX;
8024 	/* If we might shift our top bit out, then we know nothing */
8025 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
8026 		dst_reg->u32_min_value = 0;
8027 		dst_reg->u32_max_value = U32_MAX;
8028 	} else {
8029 		dst_reg->u32_min_value <<= umin_val;
8030 		dst_reg->u32_max_value <<= umax_val;
8031 	}
8032 }
8033 
8034 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8035 				 struct bpf_reg_state *src_reg)
8036 {
8037 	u32 umax_val = src_reg->u32_max_value;
8038 	u32 umin_val = src_reg->u32_min_value;
8039 	/* u32 alu operation will zext upper bits */
8040 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8041 
8042 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8043 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
8044 	/* Not required but being careful mark reg64 bounds as unknown so
8045 	 * that we are forced to pick them up from tnum and zext later and
8046 	 * if some path skips this step we are still safe.
8047 	 */
8048 	__mark_reg64_unbounded(dst_reg);
8049 	__update_reg32_bounds(dst_reg);
8050 }
8051 
8052 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
8053 				   u64 umin_val, u64 umax_val)
8054 {
8055 	/* Special case <<32 because it is a common compiler pattern to sign
8056 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
8057 	 * positive we know this shift will also be positive so we can track
8058 	 * bounds correctly. Otherwise we lose all sign bit information except
8059 	 * what we can pick up from var_off. Perhaps we can generalize this
8060 	 * later to shifts of any length.
8061 	 */
8062 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
8063 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
8064 	else
8065 		dst_reg->smax_value = S64_MAX;
8066 
8067 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
8068 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
8069 	else
8070 		dst_reg->smin_value = S64_MIN;
8071 
8072 	/* If we might shift our top bit out, then we know nothing */
8073 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
8074 		dst_reg->umin_value = 0;
8075 		dst_reg->umax_value = U64_MAX;
8076 	} else {
8077 		dst_reg->umin_value <<= umin_val;
8078 		dst_reg->umax_value <<= umax_val;
8079 	}
8080 }
8081 
8082 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
8083 			       struct bpf_reg_state *src_reg)
8084 {
8085 	u64 umax_val = src_reg->umax_value;
8086 	u64 umin_val = src_reg->umin_value;
8087 
8088 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
8089 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
8090 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8091 
8092 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
8093 	/* We may learn something more from the var_off */
8094 	__update_reg_bounds(dst_reg);
8095 }
8096 
8097 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
8098 				 struct bpf_reg_state *src_reg)
8099 {
8100 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8101 	u32 umax_val = src_reg->u32_max_value;
8102 	u32 umin_val = src_reg->u32_min_value;
8103 
8104 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8105 	 * be negative, then either:
8106 	 * 1) src_reg might be zero, so the sign bit of the result is
8107 	 *    unknown, so we lose our signed bounds
8108 	 * 2) it's known negative, thus the unsigned bounds capture the
8109 	 *    signed bounds
8110 	 * 3) the signed bounds cross zero, so they tell us nothing
8111 	 *    about the result
8112 	 * If the value in dst_reg is known nonnegative, then again the
8113 	 * unsigned bounds capture the signed bounds.
8114 	 * Thus, in all cases it suffices to blow away our signed bounds
8115 	 * and rely on inferring new ones from the unsigned bounds and
8116 	 * var_off of the result.
8117 	 */
8118 	dst_reg->s32_min_value = S32_MIN;
8119 	dst_reg->s32_max_value = S32_MAX;
8120 
8121 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
8122 	dst_reg->u32_min_value >>= umax_val;
8123 	dst_reg->u32_max_value >>= umin_val;
8124 
8125 	__mark_reg64_unbounded(dst_reg);
8126 	__update_reg32_bounds(dst_reg);
8127 }
8128 
8129 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
8130 			       struct bpf_reg_state *src_reg)
8131 {
8132 	u64 umax_val = src_reg->umax_value;
8133 	u64 umin_val = src_reg->umin_value;
8134 
8135 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8136 	 * be negative, then either:
8137 	 * 1) src_reg might be zero, so the sign bit of the result is
8138 	 *    unknown, so we lose our signed bounds
8139 	 * 2) it's known negative, thus the unsigned bounds capture the
8140 	 *    signed bounds
8141 	 * 3) the signed bounds cross zero, so they tell us nothing
8142 	 *    about the result
8143 	 * If the value in dst_reg is known nonnegative, then again the
8144 	 * unsigned bounds capture the signed bounds.
8145 	 * Thus, in all cases it suffices to blow away our signed bounds
8146 	 * and rely on inferring new ones from the unsigned bounds and
8147 	 * var_off of the result.
8148 	 */
8149 	dst_reg->smin_value = S64_MIN;
8150 	dst_reg->smax_value = S64_MAX;
8151 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
8152 	dst_reg->umin_value >>= umax_val;
8153 	dst_reg->umax_value >>= umin_val;
8154 
8155 	/* Its not easy to operate on alu32 bounds here because it depends
8156 	 * on bits being shifted in. Take easy way out and mark unbounded
8157 	 * so we can recalculate later from tnum.
8158 	 */
8159 	__mark_reg32_unbounded(dst_reg);
8160 	__update_reg_bounds(dst_reg);
8161 }
8162 
8163 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
8164 				  struct bpf_reg_state *src_reg)
8165 {
8166 	u64 umin_val = src_reg->u32_min_value;
8167 
8168 	/* Upon reaching here, src_known is true and
8169 	 * umax_val is equal to umin_val.
8170 	 */
8171 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
8172 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
8173 
8174 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
8175 
8176 	/* blow away the dst_reg umin_value/umax_value and rely on
8177 	 * dst_reg var_off to refine the result.
8178 	 */
8179 	dst_reg->u32_min_value = 0;
8180 	dst_reg->u32_max_value = U32_MAX;
8181 
8182 	__mark_reg64_unbounded(dst_reg);
8183 	__update_reg32_bounds(dst_reg);
8184 }
8185 
8186 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
8187 				struct bpf_reg_state *src_reg)
8188 {
8189 	u64 umin_val = src_reg->umin_value;
8190 
8191 	/* Upon reaching here, src_known is true and umax_val is equal
8192 	 * to umin_val.
8193 	 */
8194 	dst_reg->smin_value >>= umin_val;
8195 	dst_reg->smax_value >>= umin_val;
8196 
8197 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
8198 
8199 	/* blow away the dst_reg umin_value/umax_value and rely on
8200 	 * dst_reg var_off to refine the result.
8201 	 */
8202 	dst_reg->umin_value = 0;
8203 	dst_reg->umax_value = U64_MAX;
8204 
8205 	/* Its not easy to operate on alu32 bounds here because it depends
8206 	 * on bits being shifted in from upper 32-bits. Take easy way out
8207 	 * and mark unbounded so we can recalculate later from tnum.
8208 	 */
8209 	__mark_reg32_unbounded(dst_reg);
8210 	__update_reg_bounds(dst_reg);
8211 }
8212 
8213 /* WARNING: This function does calculations on 64-bit values, but the actual
8214  * execution may occur on 32-bit values. Therefore, things like bitshifts
8215  * need extra checks in the 32-bit case.
8216  */
8217 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
8218 				      struct bpf_insn *insn,
8219 				      struct bpf_reg_state *dst_reg,
8220 				      struct bpf_reg_state src_reg)
8221 {
8222 	struct bpf_reg_state *regs = cur_regs(env);
8223 	u8 opcode = BPF_OP(insn->code);
8224 	bool src_known;
8225 	s64 smin_val, smax_val;
8226 	u64 umin_val, umax_val;
8227 	s32 s32_min_val, s32_max_val;
8228 	u32 u32_min_val, u32_max_val;
8229 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
8230 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
8231 	int ret;
8232 
8233 	smin_val = src_reg.smin_value;
8234 	smax_val = src_reg.smax_value;
8235 	umin_val = src_reg.umin_value;
8236 	umax_val = src_reg.umax_value;
8237 
8238 	s32_min_val = src_reg.s32_min_value;
8239 	s32_max_val = src_reg.s32_max_value;
8240 	u32_min_val = src_reg.u32_min_value;
8241 	u32_max_val = src_reg.u32_max_value;
8242 
8243 	if (alu32) {
8244 		src_known = tnum_subreg_is_const(src_reg.var_off);
8245 		if ((src_known &&
8246 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
8247 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
8248 			/* Taint dst register if offset had invalid bounds
8249 			 * derived from e.g. dead branches.
8250 			 */
8251 			__mark_reg_unknown(env, dst_reg);
8252 			return 0;
8253 		}
8254 	} else {
8255 		src_known = tnum_is_const(src_reg.var_off);
8256 		if ((src_known &&
8257 		     (smin_val != smax_val || umin_val != umax_val)) ||
8258 		    smin_val > smax_val || umin_val > umax_val) {
8259 			/* Taint dst register if offset had invalid bounds
8260 			 * derived from e.g. dead branches.
8261 			 */
8262 			__mark_reg_unknown(env, dst_reg);
8263 			return 0;
8264 		}
8265 	}
8266 
8267 	if (!src_known &&
8268 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
8269 		__mark_reg_unknown(env, dst_reg);
8270 		return 0;
8271 	}
8272 
8273 	if (sanitize_needed(opcode)) {
8274 		ret = sanitize_val_alu(env, insn);
8275 		if (ret < 0)
8276 			return sanitize_err(env, insn, ret, NULL, NULL);
8277 	}
8278 
8279 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
8280 	 * There are two classes of instructions: The first class we track both
8281 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
8282 	 * greatest amount of precision when alu operations are mixed with jmp32
8283 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
8284 	 * and BPF_OR. This is possible because these ops have fairly easy to
8285 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
8286 	 * See alu32 verifier tests for examples. The second class of
8287 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
8288 	 * with regards to tracking sign/unsigned bounds because the bits may
8289 	 * cross subreg boundaries in the alu64 case. When this happens we mark
8290 	 * the reg unbounded in the subreg bound space and use the resulting
8291 	 * tnum to calculate an approximation of the sign/unsigned bounds.
8292 	 */
8293 	switch (opcode) {
8294 	case BPF_ADD:
8295 		scalar32_min_max_add(dst_reg, &src_reg);
8296 		scalar_min_max_add(dst_reg, &src_reg);
8297 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
8298 		break;
8299 	case BPF_SUB:
8300 		scalar32_min_max_sub(dst_reg, &src_reg);
8301 		scalar_min_max_sub(dst_reg, &src_reg);
8302 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
8303 		break;
8304 	case BPF_MUL:
8305 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
8306 		scalar32_min_max_mul(dst_reg, &src_reg);
8307 		scalar_min_max_mul(dst_reg, &src_reg);
8308 		break;
8309 	case BPF_AND:
8310 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
8311 		scalar32_min_max_and(dst_reg, &src_reg);
8312 		scalar_min_max_and(dst_reg, &src_reg);
8313 		break;
8314 	case BPF_OR:
8315 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
8316 		scalar32_min_max_or(dst_reg, &src_reg);
8317 		scalar_min_max_or(dst_reg, &src_reg);
8318 		break;
8319 	case BPF_XOR:
8320 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
8321 		scalar32_min_max_xor(dst_reg, &src_reg);
8322 		scalar_min_max_xor(dst_reg, &src_reg);
8323 		break;
8324 	case BPF_LSH:
8325 		if (umax_val >= insn_bitness) {
8326 			/* Shifts greater than 31 or 63 are undefined.
8327 			 * This includes shifts by a negative number.
8328 			 */
8329 			mark_reg_unknown(env, regs, insn->dst_reg);
8330 			break;
8331 		}
8332 		if (alu32)
8333 			scalar32_min_max_lsh(dst_reg, &src_reg);
8334 		else
8335 			scalar_min_max_lsh(dst_reg, &src_reg);
8336 		break;
8337 	case BPF_RSH:
8338 		if (umax_val >= insn_bitness) {
8339 			/* Shifts greater than 31 or 63 are undefined.
8340 			 * This includes shifts by a negative number.
8341 			 */
8342 			mark_reg_unknown(env, regs, insn->dst_reg);
8343 			break;
8344 		}
8345 		if (alu32)
8346 			scalar32_min_max_rsh(dst_reg, &src_reg);
8347 		else
8348 			scalar_min_max_rsh(dst_reg, &src_reg);
8349 		break;
8350 	case BPF_ARSH:
8351 		if (umax_val >= insn_bitness) {
8352 			/* Shifts greater than 31 or 63 are undefined.
8353 			 * This includes shifts by a negative number.
8354 			 */
8355 			mark_reg_unknown(env, regs, insn->dst_reg);
8356 			break;
8357 		}
8358 		if (alu32)
8359 			scalar32_min_max_arsh(dst_reg, &src_reg);
8360 		else
8361 			scalar_min_max_arsh(dst_reg, &src_reg);
8362 		break;
8363 	default:
8364 		mark_reg_unknown(env, regs, insn->dst_reg);
8365 		break;
8366 	}
8367 
8368 	/* ALU32 ops are zero extended into 64bit register */
8369 	if (alu32)
8370 		zext_32_to_64(dst_reg);
8371 
8372 	__update_reg_bounds(dst_reg);
8373 	__reg_deduce_bounds(dst_reg);
8374 	__reg_bound_offset(dst_reg);
8375 	return 0;
8376 }
8377 
8378 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
8379  * and var_off.
8380  */
8381 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
8382 				   struct bpf_insn *insn)
8383 {
8384 	struct bpf_verifier_state *vstate = env->cur_state;
8385 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8386 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
8387 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
8388 	u8 opcode = BPF_OP(insn->code);
8389 	int err;
8390 
8391 	dst_reg = &regs[insn->dst_reg];
8392 	src_reg = NULL;
8393 	if (dst_reg->type != SCALAR_VALUE)
8394 		ptr_reg = dst_reg;
8395 	else
8396 		/* Make sure ID is cleared otherwise dst_reg min/max could be
8397 		 * incorrectly propagated into other registers by find_equal_scalars()
8398 		 */
8399 		dst_reg->id = 0;
8400 	if (BPF_SRC(insn->code) == BPF_X) {
8401 		src_reg = &regs[insn->src_reg];
8402 		if (src_reg->type != SCALAR_VALUE) {
8403 			if (dst_reg->type != SCALAR_VALUE) {
8404 				/* Combining two pointers by any ALU op yields
8405 				 * an arbitrary scalar. Disallow all math except
8406 				 * pointer subtraction
8407 				 */
8408 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
8409 					mark_reg_unknown(env, regs, insn->dst_reg);
8410 					return 0;
8411 				}
8412 				verbose(env, "R%d pointer %s pointer prohibited\n",
8413 					insn->dst_reg,
8414 					bpf_alu_string[opcode >> 4]);
8415 				return -EACCES;
8416 			} else {
8417 				/* scalar += pointer
8418 				 * This is legal, but we have to reverse our
8419 				 * src/dest handling in computing the range
8420 				 */
8421 				err = mark_chain_precision(env, insn->dst_reg);
8422 				if (err)
8423 					return err;
8424 				return adjust_ptr_min_max_vals(env, insn,
8425 							       src_reg, dst_reg);
8426 			}
8427 		} else if (ptr_reg) {
8428 			/* pointer += scalar */
8429 			err = mark_chain_precision(env, insn->src_reg);
8430 			if (err)
8431 				return err;
8432 			return adjust_ptr_min_max_vals(env, insn,
8433 						       dst_reg, src_reg);
8434 		}
8435 	} else {
8436 		/* Pretend the src is a reg with a known value, since we only
8437 		 * need to be able to read from this state.
8438 		 */
8439 		off_reg.type = SCALAR_VALUE;
8440 		__mark_reg_known(&off_reg, insn->imm);
8441 		src_reg = &off_reg;
8442 		if (ptr_reg) /* pointer += K */
8443 			return adjust_ptr_min_max_vals(env, insn,
8444 						       ptr_reg, src_reg);
8445 	}
8446 
8447 	/* Got here implies adding two SCALAR_VALUEs */
8448 	if (WARN_ON_ONCE(ptr_reg)) {
8449 		print_verifier_state(env, state, true);
8450 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
8451 		return -EINVAL;
8452 	}
8453 	if (WARN_ON(!src_reg)) {
8454 		print_verifier_state(env, state, true);
8455 		verbose(env, "verifier internal error: no src_reg\n");
8456 		return -EINVAL;
8457 	}
8458 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
8459 }
8460 
8461 /* check validity of 32-bit and 64-bit arithmetic operations */
8462 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
8463 {
8464 	struct bpf_reg_state *regs = cur_regs(env);
8465 	u8 opcode = BPF_OP(insn->code);
8466 	int err;
8467 
8468 	if (opcode == BPF_END || opcode == BPF_NEG) {
8469 		if (opcode == BPF_NEG) {
8470 			if (BPF_SRC(insn->code) != 0 ||
8471 			    insn->src_reg != BPF_REG_0 ||
8472 			    insn->off != 0 || insn->imm != 0) {
8473 				verbose(env, "BPF_NEG uses reserved fields\n");
8474 				return -EINVAL;
8475 			}
8476 		} else {
8477 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
8478 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
8479 			    BPF_CLASS(insn->code) == BPF_ALU64) {
8480 				verbose(env, "BPF_END uses reserved fields\n");
8481 				return -EINVAL;
8482 			}
8483 		}
8484 
8485 		/* check src operand */
8486 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
8487 		if (err)
8488 			return err;
8489 
8490 		if (is_pointer_value(env, insn->dst_reg)) {
8491 			verbose(env, "R%d pointer arithmetic prohibited\n",
8492 				insn->dst_reg);
8493 			return -EACCES;
8494 		}
8495 
8496 		/* check dest operand */
8497 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
8498 		if (err)
8499 			return err;
8500 
8501 	} else if (opcode == BPF_MOV) {
8502 
8503 		if (BPF_SRC(insn->code) == BPF_X) {
8504 			if (insn->imm != 0 || insn->off != 0) {
8505 				verbose(env, "BPF_MOV uses reserved fields\n");
8506 				return -EINVAL;
8507 			}
8508 
8509 			/* check src operand */
8510 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
8511 			if (err)
8512 				return err;
8513 		} else {
8514 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
8515 				verbose(env, "BPF_MOV uses reserved fields\n");
8516 				return -EINVAL;
8517 			}
8518 		}
8519 
8520 		/* check dest operand, mark as required later */
8521 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
8522 		if (err)
8523 			return err;
8524 
8525 		if (BPF_SRC(insn->code) == BPF_X) {
8526 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
8527 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
8528 
8529 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
8530 				/* case: R1 = R2
8531 				 * copy register state to dest reg
8532 				 */
8533 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
8534 					/* Assign src and dst registers the same ID
8535 					 * that will be used by find_equal_scalars()
8536 					 * to propagate min/max range.
8537 					 */
8538 					src_reg->id = ++env->id_gen;
8539 				*dst_reg = *src_reg;
8540 				dst_reg->live |= REG_LIVE_WRITTEN;
8541 				dst_reg->subreg_def = DEF_NOT_SUBREG;
8542 			} else {
8543 				/* R1 = (u32) R2 */
8544 				if (is_pointer_value(env, insn->src_reg)) {
8545 					verbose(env,
8546 						"R%d partial copy of pointer\n",
8547 						insn->src_reg);
8548 					return -EACCES;
8549 				} else if (src_reg->type == SCALAR_VALUE) {
8550 					*dst_reg = *src_reg;
8551 					/* Make sure ID is cleared otherwise
8552 					 * dst_reg min/max could be incorrectly
8553 					 * propagated into src_reg by find_equal_scalars()
8554 					 */
8555 					dst_reg->id = 0;
8556 					dst_reg->live |= REG_LIVE_WRITTEN;
8557 					dst_reg->subreg_def = env->insn_idx + 1;
8558 				} else {
8559 					mark_reg_unknown(env, regs,
8560 							 insn->dst_reg);
8561 				}
8562 				zext_32_to_64(dst_reg);
8563 
8564 				__update_reg_bounds(dst_reg);
8565 				__reg_deduce_bounds(dst_reg);
8566 				__reg_bound_offset(dst_reg);
8567 			}
8568 		} else {
8569 			/* case: R = imm
8570 			 * remember the value we stored into this reg
8571 			 */
8572 			/* clear any state __mark_reg_known doesn't set */
8573 			mark_reg_unknown(env, regs, insn->dst_reg);
8574 			regs[insn->dst_reg].type = SCALAR_VALUE;
8575 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
8576 				__mark_reg_known(regs + insn->dst_reg,
8577 						 insn->imm);
8578 			} else {
8579 				__mark_reg_known(regs + insn->dst_reg,
8580 						 (u32)insn->imm);
8581 			}
8582 		}
8583 
8584 	} else if (opcode > BPF_END) {
8585 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
8586 		return -EINVAL;
8587 
8588 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
8589 
8590 		if (BPF_SRC(insn->code) == BPF_X) {
8591 			if (insn->imm != 0 || insn->off != 0) {
8592 				verbose(env, "BPF_ALU uses reserved fields\n");
8593 				return -EINVAL;
8594 			}
8595 			/* check src1 operand */
8596 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
8597 			if (err)
8598 				return err;
8599 		} else {
8600 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
8601 				verbose(env, "BPF_ALU uses reserved fields\n");
8602 				return -EINVAL;
8603 			}
8604 		}
8605 
8606 		/* check src2 operand */
8607 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
8608 		if (err)
8609 			return err;
8610 
8611 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
8612 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
8613 			verbose(env, "div by zero\n");
8614 			return -EINVAL;
8615 		}
8616 
8617 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
8618 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
8619 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
8620 
8621 			if (insn->imm < 0 || insn->imm >= size) {
8622 				verbose(env, "invalid shift %d\n", insn->imm);
8623 				return -EINVAL;
8624 			}
8625 		}
8626 
8627 		/* check dest operand */
8628 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
8629 		if (err)
8630 			return err;
8631 
8632 		return adjust_reg_min_max_vals(env, insn);
8633 	}
8634 
8635 	return 0;
8636 }
8637 
8638 static void __find_good_pkt_pointers(struct bpf_func_state *state,
8639 				     struct bpf_reg_state *dst_reg,
8640 				     enum bpf_reg_type type, int new_range)
8641 {
8642 	struct bpf_reg_state *reg;
8643 	int i;
8644 
8645 	for (i = 0; i < MAX_BPF_REG; i++) {
8646 		reg = &state->regs[i];
8647 		if (reg->type == type && reg->id == dst_reg->id)
8648 			/* keep the maximum range already checked */
8649 			reg->range = max(reg->range, new_range);
8650 	}
8651 
8652 	bpf_for_each_spilled_reg(i, state, reg) {
8653 		if (!reg)
8654 			continue;
8655 		if (reg->type == type && reg->id == dst_reg->id)
8656 			reg->range = max(reg->range, new_range);
8657 	}
8658 }
8659 
8660 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
8661 				   struct bpf_reg_state *dst_reg,
8662 				   enum bpf_reg_type type,
8663 				   bool range_right_open)
8664 {
8665 	int new_range, i;
8666 
8667 	if (dst_reg->off < 0 ||
8668 	    (dst_reg->off == 0 && range_right_open))
8669 		/* This doesn't give us any range */
8670 		return;
8671 
8672 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
8673 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
8674 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
8675 		 * than pkt_end, but that's because it's also less than pkt.
8676 		 */
8677 		return;
8678 
8679 	new_range = dst_reg->off;
8680 	if (range_right_open)
8681 		new_range++;
8682 
8683 	/* Examples for register markings:
8684 	 *
8685 	 * pkt_data in dst register:
8686 	 *
8687 	 *   r2 = r3;
8688 	 *   r2 += 8;
8689 	 *   if (r2 > pkt_end) goto <handle exception>
8690 	 *   <access okay>
8691 	 *
8692 	 *   r2 = r3;
8693 	 *   r2 += 8;
8694 	 *   if (r2 < pkt_end) goto <access okay>
8695 	 *   <handle exception>
8696 	 *
8697 	 *   Where:
8698 	 *     r2 == dst_reg, pkt_end == src_reg
8699 	 *     r2=pkt(id=n,off=8,r=0)
8700 	 *     r3=pkt(id=n,off=0,r=0)
8701 	 *
8702 	 * pkt_data in src register:
8703 	 *
8704 	 *   r2 = r3;
8705 	 *   r2 += 8;
8706 	 *   if (pkt_end >= r2) goto <access okay>
8707 	 *   <handle exception>
8708 	 *
8709 	 *   r2 = r3;
8710 	 *   r2 += 8;
8711 	 *   if (pkt_end <= r2) goto <handle exception>
8712 	 *   <access okay>
8713 	 *
8714 	 *   Where:
8715 	 *     pkt_end == dst_reg, r2 == src_reg
8716 	 *     r2=pkt(id=n,off=8,r=0)
8717 	 *     r3=pkt(id=n,off=0,r=0)
8718 	 *
8719 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
8720 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
8721 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
8722 	 * the check.
8723 	 */
8724 
8725 	/* If our ids match, then we must have the same max_value.  And we
8726 	 * don't care about the other reg's fixed offset, since if it's too big
8727 	 * the range won't allow anything.
8728 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
8729 	 */
8730 	for (i = 0; i <= vstate->curframe; i++)
8731 		__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
8732 					 new_range);
8733 }
8734 
8735 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
8736 {
8737 	struct tnum subreg = tnum_subreg(reg->var_off);
8738 	s32 sval = (s32)val;
8739 
8740 	switch (opcode) {
8741 	case BPF_JEQ:
8742 		if (tnum_is_const(subreg))
8743 			return !!tnum_equals_const(subreg, val);
8744 		break;
8745 	case BPF_JNE:
8746 		if (tnum_is_const(subreg))
8747 			return !tnum_equals_const(subreg, val);
8748 		break;
8749 	case BPF_JSET:
8750 		if ((~subreg.mask & subreg.value) & val)
8751 			return 1;
8752 		if (!((subreg.mask | subreg.value) & val))
8753 			return 0;
8754 		break;
8755 	case BPF_JGT:
8756 		if (reg->u32_min_value > val)
8757 			return 1;
8758 		else if (reg->u32_max_value <= val)
8759 			return 0;
8760 		break;
8761 	case BPF_JSGT:
8762 		if (reg->s32_min_value > sval)
8763 			return 1;
8764 		else if (reg->s32_max_value <= sval)
8765 			return 0;
8766 		break;
8767 	case BPF_JLT:
8768 		if (reg->u32_max_value < val)
8769 			return 1;
8770 		else if (reg->u32_min_value >= val)
8771 			return 0;
8772 		break;
8773 	case BPF_JSLT:
8774 		if (reg->s32_max_value < sval)
8775 			return 1;
8776 		else if (reg->s32_min_value >= sval)
8777 			return 0;
8778 		break;
8779 	case BPF_JGE:
8780 		if (reg->u32_min_value >= val)
8781 			return 1;
8782 		else if (reg->u32_max_value < val)
8783 			return 0;
8784 		break;
8785 	case BPF_JSGE:
8786 		if (reg->s32_min_value >= sval)
8787 			return 1;
8788 		else if (reg->s32_max_value < sval)
8789 			return 0;
8790 		break;
8791 	case BPF_JLE:
8792 		if (reg->u32_max_value <= val)
8793 			return 1;
8794 		else if (reg->u32_min_value > val)
8795 			return 0;
8796 		break;
8797 	case BPF_JSLE:
8798 		if (reg->s32_max_value <= sval)
8799 			return 1;
8800 		else if (reg->s32_min_value > sval)
8801 			return 0;
8802 		break;
8803 	}
8804 
8805 	return -1;
8806 }
8807 
8808 
8809 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
8810 {
8811 	s64 sval = (s64)val;
8812 
8813 	switch (opcode) {
8814 	case BPF_JEQ:
8815 		if (tnum_is_const(reg->var_off))
8816 			return !!tnum_equals_const(reg->var_off, val);
8817 		break;
8818 	case BPF_JNE:
8819 		if (tnum_is_const(reg->var_off))
8820 			return !tnum_equals_const(reg->var_off, val);
8821 		break;
8822 	case BPF_JSET:
8823 		if ((~reg->var_off.mask & reg->var_off.value) & val)
8824 			return 1;
8825 		if (!((reg->var_off.mask | reg->var_off.value) & val))
8826 			return 0;
8827 		break;
8828 	case BPF_JGT:
8829 		if (reg->umin_value > val)
8830 			return 1;
8831 		else if (reg->umax_value <= val)
8832 			return 0;
8833 		break;
8834 	case BPF_JSGT:
8835 		if (reg->smin_value > sval)
8836 			return 1;
8837 		else if (reg->smax_value <= sval)
8838 			return 0;
8839 		break;
8840 	case BPF_JLT:
8841 		if (reg->umax_value < val)
8842 			return 1;
8843 		else if (reg->umin_value >= val)
8844 			return 0;
8845 		break;
8846 	case BPF_JSLT:
8847 		if (reg->smax_value < sval)
8848 			return 1;
8849 		else if (reg->smin_value >= sval)
8850 			return 0;
8851 		break;
8852 	case BPF_JGE:
8853 		if (reg->umin_value >= val)
8854 			return 1;
8855 		else if (reg->umax_value < val)
8856 			return 0;
8857 		break;
8858 	case BPF_JSGE:
8859 		if (reg->smin_value >= sval)
8860 			return 1;
8861 		else if (reg->smax_value < sval)
8862 			return 0;
8863 		break;
8864 	case BPF_JLE:
8865 		if (reg->umax_value <= val)
8866 			return 1;
8867 		else if (reg->umin_value > val)
8868 			return 0;
8869 		break;
8870 	case BPF_JSLE:
8871 		if (reg->smax_value <= sval)
8872 			return 1;
8873 		else if (reg->smin_value > sval)
8874 			return 0;
8875 		break;
8876 	}
8877 
8878 	return -1;
8879 }
8880 
8881 /* compute branch direction of the expression "if (reg opcode val) goto target;"
8882  * and return:
8883  *  1 - branch will be taken and "goto target" will be executed
8884  *  0 - branch will not be taken and fall-through to next insn
8885  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
8886  *      range [0,10]
8887  */
8888 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
8889 			   bool is_jmp32)
8890 {
8891 	if (__is_pointer_value(false, reg)) {
8892 		if (!reg_type_not_null(reg->type))
8893 			return -1;
8894 
8895 		/* If pointer is valid tests against zero will fail so we can
8896 		 * use this to direct branch taken.
8897 		 */
8898 		if (val != 0)
8899 			return -1;
8900 
8901 		switch (opcode) {
8902 		case BPF_JEQ:
8903 			return 0;
8904 		case BPF_JNE:
8905 			return 1;
8906 		default:
8907 			return -1;
8908 		}
8909 	}
8910 
8911 	if (is_jmp32)
8912 		return is_branch32_taken(reg, val, opcode);
8913 	return is_branch64_taken(reg, val, opcode);
8914 }
8915 
8916 static int flip_opcode(u32 opcode)
8917 {
8918 	/* How can we transform "a <op> b" into "b <op> a"? */
8919 	static const u8 opcode_flip[16] = {
8920 		/* these stay the same */
8921 		[BPF_JEQ  >> 4] = BPF_JEQ,
8922 		[BPF_JNE  >> 4] = BPF_JNE,
8923 		[BPF_JSET >> 4] = BPF_JSET,
8924 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
8925 		[BPF_JGE  >> 4] = BPF_JLE,
8926 		[BPF_JGT  >> 4] = BPF_JLT,
8927 		[BPF_JLE  >> 4] = BPF_JGE,
8928 		[BPF_JLT  >> 4] = BPF_JGT,
8929 		[BPF_JSGE >> 4] = BPF_JSLE,
8930 		[BPF_JSGT >> 4] = BPF_JSLT,
8931 		[BPF_JSLE >> 4] = BPF_JSGE,
8932 		[BPF_JSLT >> 4] = BPF_JSGT
8933 	};
8934 	return opcode_flip[opcode >> 4];
8935 }
8936 
8937 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
8938 				   struct bpf_reg_state *src_reg,
8939 				   u8 opcode)
8940 {
8941 	struct bpf_reg_state *pkt;
8942 
8943 	if (src_reg->type == PTR_TO_PACKET_END) {
8944 		pkt = dst_reg;
8945 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
8946 		pkt = src_reg;
8947 		opcode = flip_opcode(opcode);
8948 	} else {
8949 		return -1;
8950 	}
8951 
8952 	if (pkt->range >= 0)
8953 		return -1;
8954 
8955 	switch (opcode) {
8956 	case BPF_JLE:
8957 		/* pkt <= pkt_end */
8958 		fallthrough;
8959 	case BPF_JGT:
8960 		/* pkt > pkt_end */
8961 		if (pkt->range == BEYOND_PKT_END)
8962 			/* pkt has at last one extra byte beyond pkt_end */
8963 			return opcode == BPF_JGT;
8964 		break;
8965 	case BPF_JLT:
8966 		/* pkt < pkt_end */
8967 		fallthrough;
8968 	case BPF_JGE:
8969 		/* pkt >= pkt_end */
8970 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
8971 			return opcode == BPF_JGE;
8972 		break;
8973 	}
8974 	return -1;
8975 }
8976 
8977 /* Adjusts the register min/max values in the case that the dst_reg is the
8978  * variable register that we are working on, and src_reg is a constant or we're
8979  * simply doing a BPF_K check.
8980  * In JEQ/JNE cases we also adjust the var_off values.
8981  */
8982 static void reg_set_min_max(struct bpf_reg_state *true_reg,
8983 			    struct bpf_reg_state *false_reg,
8984 			    u64 val, u32 val32,
8985 			    u8 opcode, bool is_jmp32)
8986 {
8987 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
8988 	struct tnum false_64off = false_reg->var_off;
8989 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
8990 	struct tnum true_64off = true_reg->var_off;
8991 	s64 sval = (s64)val;
8992 	s32 sval32 = (s32)val32;
8993 
8994 	/* If the dst_reg is a pointer, we can't learn anything about its
8995 	 * variable offset from the compare (unless src_reg were a pointer into
8996 	 * the same object, but we don't bother with that.
8997 	 * Since false_reg and true_reg have the same type by construction, we
8998 	 * only need to check one of them for pointerness.
8999 	 */
9000 	if (__is_pointer_value(false, false_reg))
9001 		return;
9002 
9003 	switch (opcode) {
9004 	case BPF_JEQ:
9005 	case BPF_JNE:
9006 	{
9007 		struct bpf_reg_state *reg =
9008 			opcode == BPF_JEQ ? true_reg : false_reg;
9009 
9010 		/* JEQ/JNE comparison doesn't change the register equivalence.
9011 		 * r1 = r2;
9012 		 * if (r1 == 42) goto label;
9013 		 * ...
9014 		 * label: // here both r1 and r2 are known to be 42.
9015 		 *
9016 		 * Hence when marking register as known preserve it's ID.
9017 		 */
9018 		if (is_jmp32)
9019 			__mark_reg32_known(reg, val32);
9020 		else
9021 			___mark_reg_known(reg, val);
9022 		break;
9023 	}
9024 	case BPF_JSET:
9025 		if (is_jmp32) {
9026 			false_32off = tnum_and(false_32off, tnum_const(~val32));
9027 			if (is_power_of_2(val32))
9028 				true_32off = tnum_or(true_32off,
9029 						     tnum_const(val32));
9030 		} else {
9031 			false_64off = tnum_and(false_64off, tnum_const(~val));
9032 			if (is_power_of_2(val))
9033 				true_64off = tnum_or(true_64off,
9034 						     tnum_const(val));
9035 		}
9036 		break;
9037 	case BPF_JGE:
9038 	case BPF_JGT:
9039 	{
9040 		if (is_jmp32) {
9041 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
9042 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
9043 
9044 			false_reg->u32_max_value = min(false_reg->u32_max_value,
9045 						       false_umax);
9046 			true_reg->u32_min_value = max(true_reg->u32_min_value,
9047 						      true_umin);
9048 		} else {
9049 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
9050 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
9051 
9052 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
9053 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
9054 		}
9055 		break;
9056 	}
9057 	case BPF_JSGE:
9058 	case BPF_JSGT:
9059 	{
9060 		if (is_jmp32) {
9061 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
9062 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
9063 
9064 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
9065 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
9066 		} else {
9067 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
9068 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
9069 
9070 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
9071 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
9072 		}
9073 		break;
9074 	}
9075 	case BPF_JLE:
9076 	case BPF_JLT:
9077 	{
9078 		if (is_jmp32) {
9079 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
9080 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
9081 
9082 			false_reg->u32_min_value = max(false_reg->u32_min_value,
9083 						       false_umin);
9084 			true_reg->u32_max_value = min(true_reg->u32_max_value,
9085 						      true_umax);
9086 		} else {
9087 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
9088 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
9089 
9090 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
9091 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
9092 		}
9093 		break;
9094 	}
9095 	case BPF_JSLE:
9096 	case BPF_JSLT:
9097 	{
9098 		if (is_jmp32) {
9099 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
9100 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
9101 
9102 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
9103 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
9104 		} else {
9105 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
9106 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
9107 
9108 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
9109 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
9110 		}
9111 		break;
9112 	}
9113 	default:
9114 		return;
9115 	}
9116 
9117 	if (is_jmp32) {
9118 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
9119 					     tnum_subreg(false_32off));
9120 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
9121 					    tnum_subreg(true_32off));
9122 		__reg_combine_32_into_64(false_reg);
9123 		__reg_combine_32_into_64(true_reg);
9124 	} else {
9125 		false_reg->var_off = false_64off;
9126 		true_reg->var_off = true_64off;
9127 		__reg_combine_64_into_32(false_reg);
9128 		__reg_combine_64_into_32(true_reg);
9129 	}
9130 }
9131 
9132 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
9133  * the variable reg.
9134  */
9135 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
9136 				struct bpf_reg_state *false_reg,
9137 				u64 val, u32 val32,
9138 				u8 opcode, bool is_jmp32)
9139 {
9140 	opcode = flip_opcode(opcode);
9141 	/* This uses zero as "not present in table"; luckily the zero opcode,
9142 	 * BPF_JA, can't get here.
9143 	 */
9144 	if (opcode)
9145 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
9146 }
9147 
9148 /* Regs are known to be equal, so intersect their min/max/var_off */
9149 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
9150 				  struct bpf_reg_state *dst_reg)
9151 {
9152 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
9153 							dst_reg->umin_value);
9154 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
9155 							dst_reg->umax_value);
9156 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
9157 							dst_reg->smin_value);
9158 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
9159 							dst_reg->smax_value);
9160 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
9161 							     dst_reg->var_off);
9162 	/* We might have learned new bounds from the var_off. */
9163 	__update_reg_bounds(src_reg);
9164 	__update_reg_bounds(dst_reg);
9165 	/* We might have learned something about the sign bit. */
9166 	__reg_deduce_bounds(src_reg);
9167 	__reg_deduce_bounds(dst_reg);
9168 	/* We might have learned some bits from the bounds. */
9169 	__reg_bound_offset(src_reg);
9170 	__reg_bound_offset(dst_reg);
9171 	/* Intersecting with the old var_off might have improved our bounds
9172 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
9173 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
9174 	 */
9175 	__update_reg_bounds(src_reg);
9176 	__update_reg_bounds(dst_reg);
9177 }
9178 
9179 static void reg_combine_min_max(struct bpf_reg_state *true_src,
9180 				struct bpf_reg_state *true_dst,
9181 				struct bpf_reg_state *false_src,
9182 				struct bpf_reg_state *false_dst,
9183 				u8 opcode)
9184 {
9185 	switch (opcode) {
9186 	case BPF_JEQ:
9187 		__reg_combine_min_max(true_src, true_dst);
9188 		break;
9189 	case BPF_JNE:
9190 		__reg_combine_min_max(false_src, false_dst);
9191 		break;
9192 	}
9193 }
9194 
9195 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
9196 				 struct bpf_reg_state *reg, u32 id,
9197 				 bool is_null)
9198 {
9199 	if (type_may_be_null(reg->type) && reg->id == id &&
9200 	    !WARN_ON_ONCE(!reg->id)) {
9201 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
9202 				 !tnum_equals_const(reg->var_off, 0) ||
9203 				 reg->off)) {
9204 			/* Old offset (both fixed and variable parts) should
9205 			 * have been known-zero, because we don't allow pointer
9206 			 * arithmetic on pointers that might be NULL. If we
9207 			 * see this happening, don't convert the register.
9208 			 */
9209 			return;
9210 		}
9211 		if (is_null) {
9212 			reg->type = SCALAR_VALUE;
9213 			/* We don't need id and ref_obj_id from this point
9214 			 * onwards anymore, thus we should better reset it,
9215 			 * so that state pruning has chances to take effect.
9216 			 */
9217 			reg->id = 0;
9218 			reg->ref_obj_id = 0;
9219 
9220 			return;
9221 		}
9222 
9223 		mark_ptr_not_null_reg(reg);
9224 
9225 		if (!reg_may_point_to_spin_lock(reg)) {
9226 			/* For not-NULL ptr, reg->ref_obj_id will be reset
9227 			 * in release_reg_references().
9228 			 *
9229 			 * reg->id is still used by spin_lock ptr. Other
9230 			 * than spin_lock ptr type, reg->id can be reset.
9231 			 */
9232 			reg->id = 0;
9233 		}
9234 	}
9235 }
9236 
9237 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
9238 				    bool is_null)
9239 {
9240 	struct bpf_reg_state *reg;
9241 	int i;
9242 
9243 	for (i = 0; i < MAX_BPF_REG; i++)
9244 		mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
9245 
9246 	bpf_for_each_spilled_reg(i, state, reg) {
9247 		if (!reg)
9248 			continue;
9249 		mark_ptr_or_null_reg(state, reg, id, is_null);
9250 	}
9251 }
9252 
9253 /* The logic is similar to find_good_pkt_pointers(), both could eventually
9254  * be folded together at some point.
9255  */
9256 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9257 				  bool is_null)
9258 {
9259 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9260 	struct bpf_reg_state *regs = state->regs;
9261 	u32 ref_obj_id = regs[regno].ref_obj_id;
9262 	u32 id = regs[regno].id;
9263 	int i;
9264 
9265 	if (ref_obj_id && ref_obj_id == id && is_null)
9266 		/* regs[regno] is in the " == NULL" branch.
9267 		 * No one could have freed the reference state before
9268 		 * doing the NULL check.
9269 		 */
9270 		WARN_ON_ONCE(release_reference_state(state, id));
9271 
9272 	for (i = 0; i <= vstate->curframe; i++)
9273 		__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
9274 }
9275 
9276 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
9277 				   struct bpf_reg_state *dst_reg,
9278 				   struct bpf_reg_state *src_reg,
9279 				   struct bpf_verifier_state *this_branch,
9280 				   struct bpf_verifier_state *other_branch)
9281 {
9282 	if (BPF_SRC(insn->code) != BPF_X)
9283 		return false;
9284 
9285 	/* Pointers are always 64-bit. */
9286 	if (BPF_CLASS(insn->code) == BPF_JMP32)
9287 		return false;
9288 
9289 	switch (BPF_OP(insn->code)) {
9290 	case BPF_JGT:
9291 		if ((dst_reg->type == PTR_TO_PACKET &&
9292 		     src_reg->type == PTR_TO_PACKET_END) ||
9293 		    (dst_reg->type == PTR_TO_PACKET_META &&
9294 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9295 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
9296 			find_good_pkt_pointers(this_branch, dst_reg,
9297 					       dst_reg->type, false);
9298 			mark_pkt_end(other_branch, insn->dst_reg, true);
9299 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9300 			    src_reg->type == PTR_TO_PACKET) ||
9301 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9302 			    src_reg->type == PTR_TO_PACKET_META)) {
9303 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
9304 			find_good_pkt_pointers(other_branch, src_reg,
9305 					       src_reg->type, true);
9306 			mark_pkt_end(this_branch, insn->src_reg, false);
9307 		} else {
9308 			return false;
9309 		}
9310 		break;
9311 	case BPF_JLT:
9312 		if ((dst_reg->type == PTR_TO_PACKET &&
9313 		     src_reg->type == PTR_TO_PACKET_END) ||
9314 		    (dst_reg->type == PTR_TO_PACKET_META &&
9315 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9316 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
9317 			find_good_pkt_pointers(other_branch, dst_reg,
9318 					       dst_reg->type, true);
9319 			mark_pkt_end(this_branch, insn->dst_reg, false);
9320 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9321 			    src_reg->type == PTR_TO_PACKET) ||
9322 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9323 			    src_reg->type == PTR_TO_PACKET_META)) {
9324 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
9325 			find_good_pkt_pointers(this_branch, src_reg,
9326 					       src_reg->type, false);
9327 			mark_pkt_end(other_branch, insn->src_reg, true);
9328 		} else {
9329 			return false;
9330 		}
9331 		break;
9332 	case BPF_JGE:
9333 		if ((dst_reg->type == PTR_TO_PACKET &&
9334 		     src_reg->type == PTR_TO_PACKET_END) ||
9335 		    (dst_reg->type == PTR_TO_PACKET_META &&
9336 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9337 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
9338 			find_good_pkt_pointers(this_branch, dst_reg,
9339 					       dst_reg->type, true);
9340 			mark_pkt_end(other_branch, insn->dst_reg, false);
9341 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9342 			    src_reg->type == PTR_TO_PACKET) ||
9343 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9344 			    src_reg->type == PTR_TO_PACKET_META)) {
9345 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
9346 			find_good_pkt_pointers(other_branch, src_reg,
9347 					       src_reg->type, false);
9348 			mark_pkt_end(this_branch, insn->src_reg, true);
9349 		} else {
9350 			return false;
9351 		}
9352 		break;
9353 	case BPF_JLE:
9354 		if ((dst_reg->type == PTR_TO_PACKET &&
9355 		     src_reg->type == PTR_TO_PACKET_END) ||
9356 		    (dst_reg->type == PTR_TO_PACKET_META &&
9357 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9358 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
9359 			find_good_pkt_pointers(other_branch, dst_reg,
9360 					       dst_reg->type, false);
9361 			mark_pkt_end(this_branch, insn->dst_reg, true);
9362 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9363 			    src_reg->type == PTR_TO_PACKET) ||
9364 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9365 			    src_reg->type == PTR_TO_PACKET_META)) {
9366 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
9367 			find_good_pkt_pointers(this_branch, src_reg,
9368 					       src_reg->type, true);
9369 			mark_pkt_end(other_branch, insn->src_reg, false);
9370 		} else {
9371 			return false;
9372 		}
9373 		break;
9374 	default:
9375 		return false;
9376 	}
9377 
9378 	return true;
9379 }
9380 
9381 static void find_equal_scalars(struct bpf_verifier_state *vstate,
9382 			       struct bpf_reg_state *known_reg)
9383 {
9384 	struct bpf_func_state *state;
9385 	struct bpf_reg_state *reg;
9386 	int i, j;
9387 
9388 	for (i = 0; i <= vstate->curframe; i++) {
9389 		state = vstate->frame[i];
9390 		for (j = 0; j < MAX_BPF_REG; j++) {
9391 			reg = &state->regs[j];
9392 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9393 				*reg = *known_reg;
9394 		}
9395 
9396 		bpf_for_each_spilled_reg(j, state, reg) {
9397 			if (!reg)
9398 				continue;
9399 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9400 				*reg = *known_reg;
9401 		}
9402 	}
9403 }
9404 
9405 static int check_cond_jmp_op(struct bpf_verifier_env *env,
9406 			     struct bpf_insn *insn, int *insn_idx)
9407 {
9408 	struct bpf_verifier_state *this_branch = env->cur_state;
9409 	struct bpf_verifier_state *other_branch;
9410 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
9411 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
9412 	u8 opcode = BPF_OP(insn->code);
9413 	bool is_jmp32;
9414 	int pred = -1;
9415 	int err;
9416 
9417 	/* Only conditional jumps are expected to reach here. */
9418 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
9419 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
9420 		return -EINVAL;
9421 	}
9422 
9423 	if (BPF_SRC(insn->code) == BPF_X) {
9424 		if (insn->imm != 0) {
9425 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
9426 			return -EINVAL;
9427 		}
9428 
9429 		/* check src1 operand */
9430 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
9431 		if (err)
9432 			return err;
9433 
9434 		if (is_pointer_value(env, insn->src_reg)) {
9435 			verbose(env, "R%d pointer comparison prohibited\n",
9436 				insn->src_reg);
9437 			return -EACCES;
9438 		}
9439 		src_reg = &regs[insn->src_reg];
9440 	} else {
9441 		if (insn->src_reg != BPF_REG_0) {
9442 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
9443 			return -EINVAL;
9444 		}
9445 	}
9446 
9447 	/* check src2 operand */
9448 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9449 	if (err)
9450 		return err;
9451 
9452 	dst_reg = &regs[insn->dst_reg];
9453 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
9454 
9455 	if (BPF_SRC(insn->code) == BPF_K) {
9456 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
9457 	} else if (src_reg->type == SCALAR_VALUE &&
9458 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
9459 		pred = is_branch_taken(dst_reg,
9460 				       tnum_subreg(src_reg->var_off).value,
9461 				       opcode,
9462 				       is_jmp32);
9463 	} else if (src_reg->type == SCALAR_VALUE &&
9464 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
9465 		pred = is_branch_taken(dst_reg,
9466 				       src_reg->var_off.value,
9467 				       opcode,
9468 				       is_jmp32);
9469 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
9470 		   reg_is_pkt_pointer_any(src_reg) &&
9471 		   !is_jmp32) {
9472 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
9473 	}
9474 
9475 	if (pred >= 0) {
9476 		/* If we get here with a dst_reg pointer type it is because
9477 		 * above is_branch_taken() special cased the 0 comparison.
9478 		 */
9479 		if (!__is_pointer_value(false, dst_reg))
9480 			err = mark_chain_precision(env, insn->dst_reg);
9481 		if (BPF_SRC(insn->code) == BPF_X && !err &&
9482 		    !__is_pointer_value(false, src_reg))
9483 			err = mark_chain_precision(env, insn->src_reg);
9484 		if (err)
9485 			return err;
9486 	}
9487 
9488 	if (pred == 1) {
9489 		/* Only follow the goto, ignore fall-through. If needed, push
9490 		 * the fall-through branch for simulation under speculative
9491 		 * execution.
9492 		 */
9493 		if (!env->bypass_spec_v1 &&
9494 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
9495 					       *insn_idx))
9496 			return -EFAULT;
9497 		*insn_idx += insn->off;
9498 		return 0;
9499 	} else if (pred == 0) {
9500 		/* Only follow the fall-through branch, since that's where the
9501 		 * program will go. If needed, push the goto branch for
9502 		 * simulation under speculative execution.
9503 		 */
9504 		if (!env->bypass_spec_v1 &&
9505 		    !sanitize_speculative_path(env, insn,
9506 					       *insn_idx + insn->off + 1,
9507 					       *insn_idx))
9508 			return -EFAULT;
9509 		return 0;
9510 	}
9511 
9512 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
9513 				  false);
9514 	if (!other_branch)
9515 		return -EFAULT;
9516 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
9517 
9518 	/* detect if we are comparing against a constant value so we can adjust
9519 	 * our min/max values for our dst register.
9520 	 * this is only legit if both are scalars (or pointers to the same
9521 	 * object, I suppose, but we don't support that right now), because
9522 	 * otherwise the different base pointers mean the offsets aren't
9523 	 * comparable.
9524 	 */
9525 	if (BPF_SRC(insn->code) == BPF_X) {
9526 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
9527 
9528 		if (dst_reg->type == SCALAR_VALUE &&
9529 		    src_reg->type == SCALAR_VALUE) {
9530 			if (tnum_is_const(src_reg->var_off) ||
9531 			    (is_jmp32 &&
9532 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
9533 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
9534 						dst_reg,
9535 						src_reg->var_off.value,
9536 						tnum_subreg(src_reg->var_off).value,
9537 						opcode, is_jmp32);
9538 			else if (tnum_is_const(dst_reg->var_off) ||
9539 				 (is_jmp32 &&
9540 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
9541 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
9542 						    src_reg,
9543 						    dst_reg->var_off.value,
9544 						    tnum_subreg(dst_reg->var_off).value,
9545 						    opcode, is_jmp32);
9546 			else if (!is_jmp32 &&
9547 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
9548 				/* Comparing for equality, we can combine knowledge */
9549 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
9550 						    &other_branch_regs[insn->dst_reg],
9551 						    src_reg, dst_reg, opcode);
9552 			if (src_reg->id &&
9553 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
9554 				find_equal_scalars(this_branch, src_reg);
9555 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
9556 			}
9557 
9558 		}
9559 	} else if (dst_reg->type == SCALAR_VALUE) {
9560 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
9561 					dst_reg, insn->imm, (u32)insn->imm,
9562 					opcode, is_jmp32);
9563 	}
9564 
9565 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
9566 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
9567 		find_equal_scalars(this_branch, dst_reg);
9568 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
9569 	}
9570 
9571 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
9572 	 * NOTE: these optimizations below are related with pointer comparison
9573 	 *       which will never be JMP32.
9574 	 */
9575 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
9576 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
9577 	    type_may_be_null(dst_reg->type)) {
9578 		/* Mark all identical registers in each branch as either
9579 		 * safe or unknown depending R == 0 or R != 0 conditional.
9580 		 */
9581 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
9582 				      opcode == BPF_JNE);
9583 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
9584 				      opcode == BPF_JEQ);
9585 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
9586 					   this_branch, other_branch) &&
9587 		   is_pointer_value(env, insn->dst_reg)) {
9588 		verbose(env, "R%d pointer comparison prohibited\n",
9589 			insn->dst_reg);
9590 		return -EACCES;
9591 	}
9592 	if (env->log.level & BPF_LOG_LEVEL)
9593 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
9594 	return 0;
9595 }
9596 
9597 /* verify BPF_LD_IMM64 instruction */
9598 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
9599 {
9600 	struct bpf_insn_aux_data *aux = cur_aux(env);
9601 	struct bpf_reg_state *regs = cur_regs(env);
9602 	struct bpf_reg_state *dst_reg;
9603 	struct bpf_map *map;
9604 	int err;
9605 
9606 	if (BPF_SIZE(insn->code) != BPF_DW) {
9607 		verbose(env, "invalid BPF_LD_IMM insn\n");
9608 		return -EINVAL;
9609 	}
9610 	if (insn->off != 0) {
9611 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
9612 		return -EINVAL;
9613 	}
9614 
9615 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
9616 	if (err)
9617 		return err;
9618 
9619 	dst_reg = &regs[insn->dst_reg];
9620 	if (insn->src_reg == 0) {
9621 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
9622 
9623 		dst_reg->type = SCALAR_VALUE;
9624 		__mark_reg_known(&regs[insn->dst_reg], imm);
9625 		return 0;
9626 	}
9627 
9628 	/* All special src_reg cases are listed below. From this point onwards
9629 	 * we either succeed and assign a corresponding dst_reg->type after
9630 	 * zeroing the offset, or fail and reject the program.
9631 	 */
9632 	mark_reg_known_zero(env, regs, insn->dst_reg);
9633 
9634 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
9635 		dst_reg->type = aux->btf_var.reg_type;
9636 		switch (base_type(dst_reg->type)) {
9637 		case PTR_TO_MEM:
9638 			dst_reg->mem_size = aux->btf_var.mem_size;
9639 			break;
9640 		case PTR_TO_BTF_ID:
9641 		case PTR_TO_PERCPU_BTF_ID:
9642 			dst_reg->btf = aux->btf_var.btf;
9643 			dst_reg->btf_id = aux->btf_var.btf_id;
9644 			break;
9645 		default:
9646 			verbose(env, "bpf verifier is misconfigured\n");
9647 			return -EFAULT;
9648 		}
9649 		return 0;
9650 	}
9651 
9652 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
9653 		struct bpf_prog_aux *aux = env->prog->aux;
9654 		u32 subprogno = find_subprog(env,
9655 					     env->insn_idx + insn->imm + 1);
9656 
9657 		if (!aux->func_info) {
9658 			verbose(env, "missing btf func_info\n");
9659 			return -EINVAL;
9660 		}
9661 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
9662 			verbose(env, "callback function not static\n");
9663 			return -EINVAL;
9664 		}
9665 
9666 		dst_reg->type = PTR_TO_FUNC;
9667 		dst_reg->subprogno = subprogno;
9668 		return 0;
9669 	}
9670 
9671 	map = env->used_maps[aux->map_index];
9672 	dst_reg->map_ptr = map;
9673 
9674 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
9675 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
9676 		dst_reg->type = PTR_TO_MAP_VALUE;
9677 		dst_reg->off = aux->map_off;
9678 		if (map_value_has_spin_lock(map))
9679 			dst_reg->id = ++env->id_gen;
9680 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
9681 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
9682 		dst_reg->type = CONST_PTR_TO_MAP;
9683 	} else {
9684 		verbose(env, "bpf verifier is misconfigured\n");
9685 		return -EINVAL;
9686 	}
9687 
9688 	return 0;
9689 }
9690 
9691 static bool may_access_skb(enum bpf_prog_type type)
9692 {
9693 	switch (type) {
9694 	case BPF_PROG_TYPE_SOCKET_FILTER:
9695 	case BPF_PROG_TYPE_SCHED_CLS:
9696 	case BPF_PROG_TYPE_SCHED_ACT:
9697 		return true;
9698 	default:
9699 		return false;
9700 	}
9701 }
9702 
9703 /* verify safety of LD_ABS|LD_IND instructions:
9704  * - they can only appear in the programs where ctx == skb
9705  * - since they are wrappers of function calls, they scratch R1-R5 registers,
9706  *   preserve R6-R9, and store return value into R0
9707  *
9708  * Implicit input:
9709  *   ctx == skb == R6 == CTX
9710  *
9711  * Explicit input:
9712  *   SRC == any register
9713  *   IMM == 32-bit immediate
9714  *
9715  * Output:
9716  *   R0 - 8/16/32-bit skb data converted to cpu endianness
9717  */
9718 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
9719 {
9720 	struct bpf_reg_state *regs = cur_regs(env);
9721 	static const int ctx_reg = BPF_REG_6;
9722 	u8 mode = BPF_MODE(insn->code);
9723 	int i, err;
9724 
9725 	if (!may_access_skb(resolve_prog_type(env->prog))) {
9726 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
9727 		return -EINVAL;
9728 	}
9729 
9730 	if (!env->ops->gen_ld_abs) {
9731 		verbose(env, "bpf verifier is misconfigured\n");
9732 		return -EINVAL;
9733 	}
9734 
9735 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
9736 	    BPF_SIZE(insn->code) == BPF_DW ||
9737 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
9738 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
9739 		return -EINVAL;
9740 	}
9741 
9742 	/* check whether implicit source operand (register R6) is readable */
9743 	err = check_reg_arg(env, ctx_reg, SRC_OP);
9744 	if (err)
9745 		return err;
9746 
9747 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
9748 	 * gen_ld_abs() may terminate the program at runtime, leading to
9749 	 * reference leak.
9750 	 */
9751 	err = check_reference_leak(env);
9752 	if (err) {
9753 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
9754 		return err;
9755 	}
9756 
9757 	if (env->cur_state->active_spin_lock) {
9758 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
9759 		return -EINVAL;
9760 	}
9761 
9762 	if (regs[ctx_reg].type != PTR_TO_CTX) {
9763 		verbose(env,
9764 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
9765 		return -EINVAL;
9766 	}
9767 
9768 	if (mode == BPF_IND) {
9769 		/* check explicit source operand */
9770 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
9771 		if (err)
9772 			return err;
9773 	}
9774 
9775 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
9776 	if (err < 0)
9777 		return err;
9778 
9779 	/* reset caller saved regs to unreadable */
9780 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
9781 		mark_reg_not_init(env, regs, caller_saved[i]);
9782 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9783 	}
9784 
9785 	/* mark destination R0 register as readable, since it contains
9786 	 * the value fetched from the packet.
9787 	 * Already marked as written above.
9788 	 */
9789 	mark_reg_unknown(env, regs, BPF_REG_0);
9790 	/* ld_abs load up to 32-bit skb data. */
9791 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
9792 	return 0;
9793 }
9794 
9795 static int check_return_code(struct bpf_verifier_env *env)
9796 {
9797 	struct tnum enforce_attach_type_range = tnum_unknown;
9798 	const struct bpf_prog *prog = env->prog;
9799 	struct bpf_reg_state *reg;
9800 	struct tnum range = tnum_range(0, 1);
9801 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
9802 	int err;
9803 	struct bpf_func_state *frame = env->cur_state->frame[0];
9804 	const bool is_subprog = frame->subprogno;
9805 
9806 	/* LSM and struct_ops func-ptr's return type could be "void" */
9807 	if (!is_subprog &&
9808 	    (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
9809 	     prog_type == BPF_PROG_TYPE_LSM) &&
9810 	    !prog->aux->attach_func_proto->type)
9811 		return 0;
9812 
9813 	/* eBPF calling convention is such that R0 is used
9814 	 * to return the value from eBPF program.
9815 	 * Make sure that it's readable at this time
9816 	 * of bpf_exit, which means that program wrote
9817 	 * something into it earlier
9818 	 */
9819 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
9820 	if (err)
9821 		return err;
9822 
9823 	if (is_pointer_value(env, BPF_REG_0)) {
9824 		verbose(env, "R0 leaks addr as return value\n");
9825 		return -EACCES;
9826 	}
9827 
9828 	reg = cur_regs(env) + BPF_REG_0;
9829 
9830 	if (frame->in_async_callback_fn) {
9831 		/* enforce return zero from async callbacks like timer */
9832 		if (reg->type != SCALAR_VALUE) {
9833 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
9834 				reg_type_str(env, reg->type));
9835 			return -EINVAL;
9836 		}
9837 
9838 		if (!tnum_in(tnum_const(0), reg->var_off)) {
9839 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
9840 			return -EINVAL;
9841 		}
9842 		return 0;
9843 	}
9844 
9845 	if (is_subprog) {
9846 		if (reg->type != SCALAR_VALUE) {
9847 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
9848 				reg_type_str(env, reg->type));
9849 			return -EINVAL;
9850 		}
9851 		return 0;
9852 	}
9853 
9854 	switch (prog_type) {
9855 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
9856 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
9857 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
9858 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
9859 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
9860 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
9861 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
9862 			range = tnum_range(1, 1);
9863 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
9864 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
9865 			range = tnum_range(0, 3);
9866 		break;
9867 	case BPF_PROG_TYPE_CGROUP_SKB:
9868 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
9869 			range = tnum_range(0, 3);
9870 			enforce_attach_type_range = tnum_range(2, 3);
9871 		}
9872 		break;
9873 	case BPF_PROG_TYPE_CGROUP_SOCK:
9874 	case BPF_PROG_TYPE_SOCK_OPS:
9875 	case BPF_PROG_TYPE_CGROUP_DEVICE:
9876 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
9877 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
9878 		break;
9879 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
9880 		if (!env->prog->aux->attach_btf_id)
9881 			return 0;
9882 		range = tnum_const(0);
9883 		break;
9884 	case BPF_PROG_TYPE_TRACING:
9885 		switch (env->prog->expected_attach_type) {
9886 		case BPF_TRACE_FENTRY:
9887 		case BPF_TRACE_FEXIT:
9888 			range = tnum_const(0);
9889 			break;
9890 		case BPF_TRACE_RAW_TP:
9891 		case BPF_MODIFY_RETURN:
9892 			return 0;
9893 		case BPF_TRACE_ITER:
9894 			break;
9895 		default:
9896 			return -ENOTSUPP;
9897 		}
9898 		break;
9899 	case BPF_PROG_TYPE_SK_LOOKUP:
9900 		range = tnum_range(SK_DROP, SK_PASS);
9901 		break;
9902 	case BPF_PROG_TYPE_EXT:
9903 		/* freplace program can return anything as its return value
9904 		 * depends on the to-be-replaced kernel func or bpf program.
9905 		 */
9906 	default:
9907 		return 0;
9908 	}
9909 
9910 	if (reg->type != SCALAR_VALUE) {
9911 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
9912 			reg_type_str(env, reg->type));
9913 		return -EINVAL;
9914 	}
9915 
9916 	if (!tnum_in(range, reg->var_off)) {
9917 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
9918 		return -EINVAL;
9919 	}
9920 
9921 	if (!tnum_is_unknown(enforce_attach_type_range) &&
9922 	    tnum_in(enforce_attach_type_range, reg->var_off))
9923 		env->prog->enforce_expected_attach_type = 1;
9924 	return 0;
9925 }
9926 
9927 /* non-recursive DFS pseudo code
9928  * 1  procedure DFS-iterative(G,v):
9929  * 2      label v as discovered
9930  * 3      let S be a stack
9931  * 4      S.push(v)
9932  * 5      while S is not empty
9933  * 6            t <- S.pop()
9934  * 7            if t is what we're looking for:
9935  * 8                return t
9936  * 9            for all edges e in G.adjacentEdges(t) do
9937  * 10               if edge e is already labelled
9938  * 11                   continue with the next edge
9939  * 12               w <- G.adjacentVertex(t,e)
9940  * 13               if vertex w is not discovered and not explored
9941  * 14                   label e as tree-edge
9942  * 15                   label w as discovered
9943  * 16                   S.push(w)
9944  * 17                   continue at 5
9945  * 18               else if vertex w is discovered
9946  * 19                   label e as back-edge
9947  * 20               else
9948  * 21                   // vertex w is explored
9949  * 22                   label e as forward- or cross-edge
9950  * 23           label t as explored
9951  * 24           S.pop()
9952  *
9953  * convention:
9954  * 0x10 - discovered
9955  * 0x11 - discovered and fall-through edge labelled
9956  * 0x12 - discovered and fall-through and branch edges labelled
9957  * 0x20 - explored
9958  */
9959 
9960 enum {
9961 	DISCOVERED = 0x10,
9962 	EXPLORED = 0x20,
9963 	FALLTHROUGH = 1,
9964 	BRANCH = 2,
9965 };
9966 
9967 static u32 state_htab_size(struct bpf_verifier_env *env)
9968 {
9969 	return env->prog->len;
9970 }
9971 
9972 static struct bpf_verifier_state_list **explored_state(
9973 					struct bpf_verifier_env *env,
9974 					int idx)
9975 {
9976 	struct bpf_verifier_state *cur = env->cur_state;
9977 	struct bpf_func_state *state = cur->frame[cur->curframe];
9978 
9979 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
9980 }
9981 
9982 static void init_explored_state(struct bpf_verifier_env *env, int idx)
9983 {
9984 	env->insn_aux_data[idx].prune_point = true;
9985 }
9986 
9987 enum {
9988 	DONE_EXPLORING = 0,
9989 	KEEP_EXPLORING = 1,
9990 };
9991 
9992 /* t, w, e - match pseudo-code above:
9993  * t - index of current instruction
9994  * w - next instruction
9995  * e - edge
9996  */
9997 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
9998 		     bool loop_ok)
9999 {
10000 	int *insn_stack = env->cfg.insn_stack;
10001 	int *insn_state = env->cfg.insn_state;
10002 
10003 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
10004 		return DONE_EXPLORING;
10005 
10006 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
10007 		return DONE_EXPLORING;
10008 
10009 	if (w < 0 || w >= env->prog->len) {
10010 		verbose_linfo(env, t, "%d: ", t);
10011 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
10012 		return -EINVAL;
10013 	}
10014 
10015 	if (e == BRANCH)
10016 		/* mark branch target for state pruning */
10017 		init_explored_state(env, w);
10018 
10019 	if (insn_state[w] == 0) {
10020 		/* tree-edge */
10021 		insn_state[t] = DISCOVERED | e;
10022 		insn_state[w] = DISCOVERED;
10023 		if (env->cfg.cur_stack >= env->prog->len)
10024 			return -E2BIG;
10025 		insn_stack[env->cfg.cur_stack++] = w;
10026 		return KEEP_EXPLORING;
10027 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
10028 		if (loop_ok && env->bpf_capable)
10029 			return DONE_EXPLORING;
10030 		verbose_linfo(env, t, "%d: ", t);
10031 		verbose_linfo(env, w, "%d: ", w);
10032 		verbose(env, "back-edge from insn %d to %d\n", t, w);
10033 		return -EINVAL;
10034 	} else if (insn_state[w] == EXPLORED) {
10035 		/* forward- or cross-edge */
10036 		insn_state[t] = DISCOVERED | e;
10037 	} else {
10038 		verbose(env, "insn state internal bug\n");
10039 		return -EFAULT;
10040 	}
10041 	return DONE_EXPLORING;
10042 }
10043 
10044 static int visit_func_call_insn(int t, int insn_cnt,
10045 				struct bpf_insn *insns,
10046 				struct bpf_verifier_env *env,
10047 				bool visit_callee)
10048 {
10049 	int ret;
10050 
10051 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
10052 	if (ret)
10053 		return ret;
10054 
10055 	if (t + 1 < insn_cnt)
10056 		init_explored_state(env, t + 1);
10057 	if (visit_callee) {
10058 		init_explored_state(env, t);
10059 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
10060 				/* It's ok to allow recursion from CFG point of
10061 				 * view. __check_func_call() will do the actual
10062 				 * check.
10063 				 */
10064 				bpf_pseudo_func(insns + t));
10065 	}
10066 	return ret;
10067 }
10068 
10069 /* Visits the instruction at index t and returns one of the following:
10070  *  < 0 - an error occurred
10071  *  DONE_EXPLORING - the instruction was fully explored
10072  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
10073  */
10074 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
10075 {
10076 	struct bpf_insn *insns = env->prog->insnsi;
10077 	int ret;
10078 
10079 	if (bpf_pseudo_func(insns + t))
10080 		return visit_func_call_insn(t, insn_cnt, insns, env, true);
10081 
10082 	/* All non-branch instructions have a single fall-through edge. */
10083 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
10084 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
10085 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
10086 
10087 	switch (BPF_OP(insns[t].code)) {
10088 	case BPF_EXIT:
10089 		return DONE_EXPLORING;
10090 
10091 	case BPF_CALL:
10092 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
10093 			/* Mark this call insn to trigger is_state_visited() check
10094 			 * before call itself is processed by __check_func_call().
10095 			 * Otherwise new async state will be pushed for further
10096 			 * exploration.
10097 			 */
10098 			init_explored_state(env, t);
10099 		return visit_func_call_insn(t, insn_cnt, insns, env,
10100 					    insns[t].src_reg == BPF_PSEUDO_CALL);
10101 
10102 	case BPF_JA:
10103 		if (BPF_SRC(insns[t].code) != BPF_K)
10104 			return -EINVAL;
10105 
10106 		/* unconditional jump with single edge */
10107 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
10108 				true);
10109 		if (ret)
10110 			return ret;
10111 
10112 		/* unconditional jmp is not a good pruning point,
10113 		 * but it's marked, since backtracking needs
10114 		 * to record jmp history in is_state_visited().
10115 		 */
10116 		init_explored_state(env, t + insns[t].off + 1);
10117 		/* tell verifier to check for equivalent states
10118 		 * after every call and jump
10119 		 */
10120 		if (t + 1 < insn_cnt)
10121 			init_explored_state(env, t + 1);
10122 
10123 		return ret;
10124 
10125 	default:
10126 		/* conditional jump with two edges */
10127 		init_explored_state(env, t);
10128 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
10129 		if (ret)
10130 			return ret;
10131 
10132 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
10133 	}
10134 }
10135 
10136 /* non-recursive depth-first-search to detect loops in BPF program
10137  * loop == back-edge in directed graph
10138  */
10139 static int check_cfg(struct bpf_verifier_env *env)
10140 {
10141 	int insn_cnt = env->prog->len;
10142 	int *insn_stack, *insn_state;
10143 	int ret = 0;
10144 	int i;
10145 
10146 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10147 	if (!insn_state)
10148 		return -ENOMEM;
10149 
10150 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10151 	if (!insn_stack) {
10152 		kvfree(insn_state);
10153 		return -ENOMEM;
10154 	}
10155 
10156 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
10157 	insn_stack[0] = 0; /* 0 is the first instruction */
10158 	env->cfg.cur_stack = 1;
10159 
10160 	while (env->cfg.cur_stack > 0) {
10161 		int t = insn_stack[env->cfg.cur_stack - 1];
10162 
10163 		ret = visit_insn(t, insn_cnt, env);
10164 		switch (ret) {
10165 		case DONE_EXPLORING:
10166 			insn_state[t] = EXPLORED;
10167 			env->cfg.cur_stack--;
10168 			break;
10169 		case KEEP_EXPLORING:
10170 			break;
10171 		default:
10172 			if (ret > 0) {
10173 				verbose(env, "visit_insn internal bug\n");
10174 				ret = -EFAULT;
10175 			}
10176 			goto err_free;
10177 		}
10178 	}
10179 
10180 	if (env->cfg.cur_stack < 0) {
10181 		verbose(env, "pop stack internal bug\n");
10182 		ret = -EFAULT;
10183 		goto err_free;
10184 	}
10185 
10186 	for (i = 0; i < insn_cnt; i++) {
10187 		if (insn_state[i] != EXPLORED) {
10188 			verbose(env, "unreachable insn %d\n", i);
10189 			ret = -EINVAL;
10190 			goto err_free;
10191 		}
10192 	}
10193 	ret = 0; /* cfg looks good */
10194 
10195 err_free:
10196 	kvfree(insn_state);
10197 	kvfree(insn_stack);
10198 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
10199 	return ret;
10200 }
10201 
10202 static int check_abnormal_return(struct bpf_verifier_env *env)
10203 {
10204 	int i;
10205 
10206 	for (i = 1; i < env->subprog_cnt; i++) {
10207 		if (env->subprog_info[i].has_ld_abs) {
10208 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
10209 			return -EINVAL;
10210 		}
10211 		if (env->subprog_info[i].has_tail_call) {
10212 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
10213 			return -EINVAL;
10214 		}
10215 	}
10216 	return 0;
10217 }
10218 
10219 /* The minimum supported BTF func info size */
10220 #define MIN_BPF_FUNCINFO_SIZE	8
10221 #define MAX_FUNCINFO_REC_SIZE	252
10222 
10223 static int check_btf_func(struct bpf_verifier_env *env,
10224 			  const union bpf_attr *attr,
10225 			  bpfptr_t uattr)
10226 {
10227 	const struct btf_type *type, *func_proto, *ret_type;
10228 	u32 i, nfuncs, urec_size, min_size;
10229 	u32 krec_size = sizeof(struct bpf_func_info);
10230 	struct bpf_func_info *krecord;
10231 	struct bpf_func_info_aux *info_aux = NULL;
10232 	struct bpf_prog *prog;
10233 	const struct btf *btf;
10234 	bpfptr_t urecord;
10235 	u32 prev_offset = 0;
10236 	bool scalar_return;
10237 	int ret = -ENOMEM;
10238 
10239 	nfuncs = attr->func_info_cnt;
10240 	if (!nfuncs) {
10241 		if (check_abnormal_return(env))
10242 			return -EINVAL;
10243 		return 0;
10244 	}
10245 
10246 	if (nfuncs != env->subprog_cnt) {
10247 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
10248 		return -EINVAL;
10249 	}
10250 
10251 	urec_size = attr->func_info_rec_size;
10252 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
10253 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
10254 	    urec_size % sizeof(u32)) {
10255 		verbose(env, "invalid func info rec size %u\n", urec_size);
10256 		return -EINVAL;
10257 	}
10258 
10259 	prog = env->prog;
10260 	btf = prog->aux->btf;
10261 
10262 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
10263 	min_size = min_t(u32, krec_size, urec_size);
10264 
10265 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
10266 	if (!krecord)
10267 		return -ENOMEM;
10268 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
10269 	if (!info_aux)
10270 		goto err_free;
10271 
10272 	for (i = 0; i < nfuncs; i++) {
10273 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
10274 		if (ret) {
10275 			if (ret == -E2BIG) {
10276 				verbose(env, "nonzero tailing record in func info");
10277 				/* set the size kernel expects so loader can zero
10278 				 * out the rest of the record.
10279 				 */
10280 				if (copy_to_bpfptr_offset(uattr,
10281 							  offsetof(union bpf_attr, func_info_rec_size),
10282 							  &min_size, sizeof(min_size)))
10283 					ret = -EFAULT;
10284 			}
10285 			goto err_free;
10286 		}
10287 
10288 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
10289 			ret = -EFAULT;
10290 			goto err_free;
10291 		}
10292 
10293 		/* check insn_off */
10294 		ret = -EINVAL;
10295 		if (i == 0) {
10296 			if (krecord[i].insn_off) {
10297 				verbose(env,
10298 					"nonzero insn_off %u for the first func info record",
10299 					krecord[i].insn_off);
10300 				goto err_free;
10301 			}
10302 		} else if (krecord[i].insn_off <= prev_offset) {
10303 			verbose(env,
10304 				"same or smaller insn offset (%u) than previous func info record (%u)",
10305 				krecord[i].insn_off, prev_offset);
10306 			goto err_free;
10307 		}
10308 
10309 		if (env->subprog_info[i].start != krecord[i].insn_off) {
10310 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
10311 			goto err_free;
10312 		}
10313 
10314 		/* check type_id */
10315 		type = btf_type_by_id(btf, krecord[i].type_id);
10316 		if (!type || !btf_type_is_func(type)) {
10317 			verbose(env, "invalid type id %d in func info",
10318 				krecord[i].type_id);
10319 			goto err_free;
10320 		}
10321 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
10322 
10323 		func_proto = btf_type_by_id(btf, type->type);
10324 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
10325 			/* btf_func_check() already verified it during BTF load */
10326 			goto err_free;
10327 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
10328 		scalar_return =
10329 			btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
10330 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
10331 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
10332 			goto err_free;
10333 		}
10334 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
10335 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
10336 			goto err_free;
10337 		}
10338 
10339 		prev_offset = krecord[i].insn_off;
10340 		bpfptr_add(&urecord, urec_size);
10341 	}
10342 
10343 	prog->aux->func_info = krecord;
10344 	prog->aux->func_info_cnt = nfuncs;
10345 	prog->aux->func_info_aux = info_aux;
10346 	return 0;
10347 
10348 err_free:
10349 	kvfree(krecord);
10350 	kfree(info_aux);
10351 	return ret;
10352 }
10353 
10354 static void adjust_btf_func(struct bpf_verifier_env *env)
10355 {
10356 	struct bpf_prog_aux *aux = env->prog->aux;
10357 	int i;
10358 
10359 	if (!aux->func_info)
10360 		return;
10361 
10362 	for (i = 0; i < env->subprog_cnt; i++)
10363 		aux->func_info[i].insn_off = env->subprog_info[i].start;
10364 }
10365 
10366 #define MIN_BPF_LINEINFO_SIZE	(offsetof(struct bpf_line_info, line_col) + \
10367 		sizeof(((struct bpf_line_info *)(0))->line_col))
10368 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
10369 
10370 static int check_btf_line(struct bpf_verifier_env *env,
10371 			  const union bpf_attr *attr,
10372 			  bpfptr_t uattr)
10373 {
10374 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
10375 	struct bpf_subprog_info *sub;
10376 	struct bpf_line_info *linfo;
10377 	struct bpf_prog *prog;
10378 	const struct btf *btf;
10379 	bpfptr_t ulinfo;
10380 	int err;
10381 
10382 	nr_linfo = attr->line_info_cnt;
10383 	if (!nr_linfo)
10384 		return 0;
10385 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
10386 		return -EINVAL;
10387 
10388 	rec_size = attr->line_info_rec_size;
10389 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
10390 	    rec_size > MAX_LINEINFO_REC_SIZE ||
10391 	    rec_size & (sizeof(u32) - 1))
10392 		return -EINVAL;
10393 
10394 	/* Need to zero it in case the userspace may
10395 	 * pass in a smaller bpf_line_info object.
10396 	 */
10397 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
10398 			 GFP_KERNEL | __GFP_NOWARN);
10399 	if (!linfo)
10400 		return -ENOMEM;
10401 
10402 	prog = env->prog;
10403 	btf = prog->aux->btf;
10404 
10405 	s = 0;
10406 	sub = env->subprog_info;
10407 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
10408 	expected_size = sizeof(struct bpf_line_info);
10409 	ncopy = min_t(u32, expected_size, rec_size);
10410 	for (i = 0; i < nr_linfo; i++) {
10411 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
10412 		if (err) {
10413 			if (err == -E2BIG) {
10414 				verbose(env, "nonzero tailing record in line_info");
10415 				if (copy_to_bpfptr_offset(uattr,
10416 							  offsetof(union bpf_attr, line_info_rec_size),
10417 							  &expected_size, sizeof(expected_size)))
10418 					err = -EFAULT;
10419 			}
10420 			goto err_free;
10421 		}
10422 
10423 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
10424 			err = -EFAULT;
10425 			goto err_free;
10426 		}
10427 
10428 		/*
10429 		 * Check insn_off to ensure
10430 		 * 1) strictly increasing AND
10431 		 * 2) bounded by prog->len
10432 		 *
10433 		 * The linfo[0].insn_off == 0 check logically falls into
10434 		 * the later "missing bpf_line_info for func..." case
10435 		 * because the first linfo[0].insn_off must be the
10436 		 * first sub also and the first sub must have
10437 		 * subprog_info[0].start == 0.
10438 		 */
10439 		if ((i && linfo[i].insn_off <= prev_offset) ||
10440 		    linfo[i].insn_off >= prog->len) {
10441 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
10442 				i, linfo[i].insn_off, prev_offset,
10443 				prog->len);
10444 			err = -EINVAL;
10445 			goto err_free;
10446 		}
10447 
10448 		if (!prog->insnsi[linfo[i].insn_off].code) {
10449 			verbose(env,
10450 				"Invalid insn code at line_info[%u].insn_off\n",
10451 				i);
10452 			err = -EINVAL;
10453 			goto err_free;
10454 		}
10455 
10456 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
10457 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
10458 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
10459 			err = -EINVAL;
10460 			goto err_free;
10461 		}
10462 
10463 		if (s != env->subprog_cnt) {
10464 			if (linfo[i].insn_off == sub[s].start) {
10465 				sub[s].linfo_idx = i;
10466 				s++;
10467 			} else if (sub[s].start < linfo[i].insn_off) {
10468 				verbose(env, "missing bpf_line_info for func#%u\n", s);
10469 				err = -EINVAL;
10470 				goto err_free;
10471 			}
10472 		}
10473 
10474 		prev_offset = linfo[i].insn_off;
10475 		bpfptr_add(&ulinfo, rec_size);
10476 	}
10477 
10478 	if (s != env->subprog_cnt) {
10479 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
10480 			env->subprog_cnt - s, s);
10481 		err = -EINVAL;
10482 		goto err_free;
10483 	}
10484 
10485 	prog->aux->linfo = linfo;
10486 	prog->aux->nr_linfo = nr_linfo;
10487 
10488 	return 0;
10489 
10490 err_free:
10491 	kvfree(linfo);
10492 	return err;
10493 }
10494 
10495 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
10496 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
10497 
10498 static int check_core_relo(struct bpf_verifier_env *env,
10499 			   const union bpf_attr *attr,
10500 			   bpfptr_t uattr)
10501 {
10502 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
10503 	struct bpf_core_relo core_relo = {};
10504 	struct bpf_prog *prog = env->prog;
10505 	const struct btf *btf = prog->aux->btf;
10506 	struct bpf_core_ctx ctx = {
10507 		.log = &env->log,
10508 		.btf = btf,
10509 	};
10510 	bpfptr_t u_core_relo;
10511 	int err;
10512 
10513 	nr_core_relo = attr->core_relo_cnt;
10514 	if (!nr_core_relo)
10515 		return 0;
10516 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
10517 		return -EINVAL;
10518 
10519 	rec_size = attr->core_relo_rec_size;
10520 	if (rec_size < MIN_CORE_RELO_SIZE ||
10521 	    rec_size > MAX_CORE_RELO_SIZE ||
10522 	    rec_size % sizeof(u32))
10523 		return -EINVAL;
10524 
10525 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
10526 	expected_size = sizeof(struct bpf_core_relo);
10527 	ncopy = min_t(u32, expected_size, rec_size);
10528 
10529 	/* Unlike func_info and line_info, copy and apply each CO-RE
10530 	 * relocation record one at a time.
10531 	 */
10532 	for (i = 0; i < nr_core_relo; i++) {
10533 		/* future proofing when sizeof(bpf_core_relo) changes */
10534 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
10535 		if (err) {
10536 			if (err == -E2BIG) {
10537 				verbose(env, "nonzero tailing record in core_relo");
10538 				if (copy_to_bpfptr_offset(uattr,
10539 							  offsetof(union bpf_attr, core_relo_rec_size),
10540 							  &expected_size, sizeof(expected_size)))
10541 					err = -EFAULT;
10542 			}
10543 			break;
10544 		}
10545 
10546 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
10547 			err = -EFAULT;
10548 			break;
10549 		}
10550 
10551 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
10552 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
10553 				i, core_relo.insn_off, prog->len);
10554 			err = -EINVAL;
10555 			break;
10556 		}
10557 
10558 		err = bpf_core_apply(&ctx, &core_relo, i,
10559 				     &prog->insnsi[core_relo.insn_off / 8]);
10560 		if (err)
10561 			break;
10562 		bpfptr_add(&u_core_relo, rec_size);
10563 	}
10564 	return err;
10565 }
10566 
10567 static int check_btf_info(struct bpf_verifier_env *env,
10568 			  const union bpf_attr *attr,
10569 			  bpfptr_t uattr)
10570 {
10571 	struct btf *btf;
10572 	int err;
10573 
10574 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
10575 		if (check_abnormal_return(env))
10576 			return -EINVAL;
10577 		return 0;
10578 	}
10579 
10580 	btf = btf_get_by_fd(attr->prog_btf_fd);
10581 	if (IS_ERR(btf))
10582 		return PTR_ERR(btf);
10583 	if (btf_is_kernel(btf)) {
10584 		btf_put(btf);
10585 		return -EACCES;
10586 	}
10587 	env->prog->aux->btf = btf;
10588 
10589 	err = check_btf_func(env, attr, uattr);
10590 	if (err)
10591 		return err;
10592 
10593 	err = check_btf_line(env, attr, uattr);
10594 	if (err)
10595 		return err;
10596 
10597 	err = check_core_relo(env, attr, uattr);
10598 	if (err)
10599 		return err;
10600 
10601 	return 0;
10602 }
10603 
10604 /* check %cur's range satisfies %old's */
10605 static bool range_within(struct bpf_reg_state *old,
10606 			 struct bpf_reg_state *cur)
10607 {
10608 	return old->umin_value <= cur->umin_value &&
10609 	       old->umax_value >= cur->umax_value &&
10610 	       old->smin_value <= cur->smin_value &&
10611 	       old->smax_value >= cur->smax_value &&
10612 	       old->u32_min_value <= cur->u32_min_value &&
10613 	       old->u32_max_value >= cur->u32_max_value &&
10614 	       old->s32_min_value <= cur->s32_min_value &&
10615 	       old->s32_max_value >= cur->s32_max_value;
10616 }
10617 
10618 /* If in the old state two registers had the same id, then they need to have
10619  * the same id in the new state as well.  But that id could be different from
10620  * the old state, so we need to track the mapping from old to new ids.
10621  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
10622  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
10623  * regs with a different old id could still have new id 9, we don't care about
10624  * that.
10625  * So we look through our idmap to see if this old id has been seen before.  If
10626  * so, we require the new id to match; otherwise, we add the id pair to the map.
10627  */
10628 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
10629 {
10630 	unsigned int i;
10631 
10632 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
10633 		if (!idmap[i].old) {
10634 			/* Reached an empty slot; haven't seen this id before */
10635 			idmap[i].old = old_id;
10636 			idmap[i].cur = cur_id;
10637 			return true;
10638 		}
10639 		if (idmap[i].old == old_id)
10640 			return idmap[i].cur == cur_id;
10641 	}
10642 	/* We ran out of idmap slots, which should be impossible */
10643 	WARN_ON_ONCE(1);
10644 	return false;
10645 }
10646 
10647 static void clean_func_state(struct bpf_verifier_env *env,
10648 			     struct bpf_func_state *st)
10649 {
10650 	enum bpf_reg_liveness live;
10651 	int i, j;
10652 
10653 	for (i = 0; i < BPF_REG_FP; i++) {
10654 		live = st->regs[i].live;
10655 		/* liveness must not touch this register anymore */
10656 		st->regs[i].live |= REG_LIVE_DONE;
10657 		if (!(live & REG_LIVE_READ))
10658 			/* since the register is unused, clear its state
10659 			 * to make further comparison simpler
10660 			 */
10661 			__mark_reg_not_init(env, &st->regs[i]);
10662 	}
10663 
10664 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
10665 		live = st->stack[i].spilled_ptr.live;
10666 		/* liveness must not touch this stack slot anymore */
10667 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
10668 		if (!(live & REG_LIVE_READ)) {
10669 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
10670 			for (j = 0; j < BPF_REG_SIZE; j++)
10671 				st->stack[i].slot_type[j] = STACK_INVALID;
10672 		}
10673 	}
10674 }
10675 
10676 static void clean_verifier_state(struct bpf_verifier_env *env,
10677 				 struct bpf_verifier_state *st)
10678 {
10679 	int i;
10680 
10681 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
10682 		/* all regs in this state in all frames were already marked */
10683 		return;
10684 
10685 	for (i = 0; i <= st->curframe; i++)
10686 		clean_func_state(env, st->frame[i]);
10687 }
10688 
10689 /* the parentage chains form a tree.
10690  * the verifier states are added to state lists at given insn and
10691  * pushed into state stack for future exploration.
10692  * when the verifier reaches bpf_exit insn some of the verifer states
10693  * stored in the state lists have their final liveness state already,
10694  * but a lot of states will get revised from liveness point of view when
10695  * the verifier explores other branches.
10696  * Example:
10697  * 1: r0 = 1
10698  * 2: if r1 == 100 goto pc+1
10699  * 3: r0 = 2
10700  * 4: exit
10701  * when the verifier reaches exit insn the register r0 in the state list of
10702  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
10703  * of insn 2 and goes exploring further. At the insn 4 it will walk the
10704  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
10705  *
10706  * Since the verifier pushes the branch states as it sees them while exploring
10707  * the program the condition of walking the branch instruction for the second
10708  * time means that all states below this branch were already explored and
10709  * their final liveness marks are already propagated.
10710  * Hence when the verifier completes the search of state list in is_state_visited()
10711  * we can call this clean_live_states() function to mark all liveness states
10712  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
10713  * will not be used.
10714  * This function also clears the registers and stack for states that !READ
10715  * to simplify state merging.
10716  *
10717  * Important note here that walking the same branch instruction in the callee
10718  * doesn't meant that the states are DONE. The verifier has to compare
10719  * the callsites
10720  */
10721 static void clean_live_states(struct bpf_verifier_env *env, int insn,
10722 			      struct bpf_verifier_state *cur)
10723 {
10724 	struct bpf_verifier_state_list *sl;
10725 	int i;
10726 
10727 	sl = *explored_state(env, insn);
10728 	while (sl) {
10729 		if (sl->state.branches)
10730 			goto next;
10731 		if (sl->state.insn_idx != insn ||
10732 		    sl->state.curframe != cur->curframe)
10733 			goto next;
10734 		for (i = 0; i <= cur->curframe; i++)
10735 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
10736 				goto next;
10737 		clean_verifier_state(env, &sl->state);
10738 next:
10739 		sl = sl->next;
10740 	}
10741 }
10742 
10743 /* Returns true if (rold safe implies rcur safe) */
10744 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
10745 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
10746 {
10747 	bool equal;
10748 
10749 	if (!(rold->live & REG_LIVE_READ))
10750 		/* explored state didn't use this */
10751 		return true;
10752 
10753 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
10754 
10755 	if (rold->type == PTR_TO_STACK)
10756 		/* two stack pointers are equal only if they're pointing to
10757 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
10758 		 */
10759 		return equal && rold->frameno == rcur->frameno;
10760 
10761 	if (equal)
10762 		return true;
10763 
10764 	if (rold->type == NOT_INIT)
10765 		/* explored state can't have used this */
10766 		return true;
10767 	if (rcur->type == NOT_INIT)
10768 		return false;
10769 	switch (base_type(rold->type)) {
10770 	case SCALAR_VALUE:
10771 		if (env->explore_alu_limits)
10772 			return false;
10773 		if (rcur->type == SCALAR_VALUE) {
10774 			if (!rold->precise && !rcur->precise)
10775 				return true;
10776 			/* new val must satisfy old val knowledge */
10777 			return range_within(rold, rcur) &&
10778 			       tnum_in(rold->var_off, rcur->var_off);
10779 		} else {
10780 			/* We're trying to use a pointer in place of a scalar.
10781 			 * Even if the scalar was unbounded, this could lead to
10782 			 * pointer leaks because scalars are allowed to leak
10783 			 * while pointers are not. We could make this safe in
10784 			 * special cases if root is calling us, but it's
10785 			 * probably not worth the hassle.
10786 			 */
10787 			return false;
10788 		}
10789 	case PTR_TO_MAP_KEY:
10790 	case PTR_TO_MAP_VALUE:
10791 		/* a PTR_TO_MAP_VALUE could be safe to use as a
10792 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
10793 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
10794 		 * checked, doing so could have affected others with the same
10795 		 * id, and we can't check for that because we lost the id when
10796 		 * we converted to a PTR_TO_MAP_VALUE.
10797 		 */
10798 		if (type_may_be_null(rold->type)) {
10799 			if (!type_may_be_null(rcur->type))
10800 				return false;
10801 			if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
10802 				return false;
10803 			/* Check our ids match any regs they're supposed to */
10804 			return check_ids(rold->id, rcur->id, idmap);
10805 		}
10806 
10807 		/* If the new min/max/var_off satisfy the old ones and
10808 		 * everything else matches, we are OK.
10809 		 * 'id' is not compared, since it's only used for maps with
10810 		 * bpf_spin_lock inside map element and in such cases if
10811 		 * the rest of the prog is valid for one map element then
10812 		 * it's valid for all map elements regardless of the key
10813 		 * used in bpf_map_lookup()
10814 		 */
10815 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
10816 		       range_within(rold, rcur) &&
10817 		       tnum_in(rold->var_off, rcur->var_off);
10818 	case PTR_TO_PACKET_META:
10819 	case PTR_TO_PACKET:
10820 		if (rcur->type != rold->type)
10821 			return false;
10822 		/* We must have at least as much range as the old ptr
10823 		 * did, so that any accesses which were safe before are
10824 		 * still safe.  This is true even if old range < old off,
10825 		 * since someone could have accessed through (ptr - k), or
10826 		 * even done ptr -= k in a register, to get a safe access.
10827 		 */
10828 		if (rold->range > rcur->range)
10829 			return false;
10830 		/* If the offsets don't match, we can't trust our alignment;
10831 		 * nor can we be sure that we won't fall out of range.
10832 		 */
10833 		if (rold->off != rcur->off)
10834 			return false;
10835 		/* id relations must be preserved */
10836 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
10837 			return false;
10838 		/* new val must satisfy old val knowledge */
10839 		return range_within(rold, rcur) &&
10840 		       tnum_in(rold->var_off, rcur->var_off);
10841 	case PTR_TO_CTX:
10842 	case CONST_PTR_TO_MAP:
10843 	case PTR_TO_PACKET_END:
10844 	case PTR_TO_FLOW_KEYS:
10845 	case PTR_TO_SOCKET:
10846 	case PTR_TO_SOCK_COMMON:
10847 	case PTR_TO_TCP_SOCK:
10848 	case PTR_TO_XDP_SOCK:
10849 		/* Only valid matches are exact, which memcmp() above
10850 		 * would have accepted
10851 		 */
10852 	default:
10853 		/* Don't know what's going on, just say it's not safe */
10854 		return false;
10855 	}
10856 
10857 	/* Shouldn't get here; if we do, say it's not safe */
10858 	WARN_ON_ONCE(1);
10859 	return false;
10860 }
10861 
10862 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
10863 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
10864 {
10865 	int i, spi;
10866 
10867 	/* walk slots of the explored stack and ignore any additional
10868 	 * slots in the current stack, since explored(safe) state
10869 	 * didn't use them
10870 	 */
10871 	for (i = 0; i < old->allocated_stack; i++) {
10872 		spi = i / BPF_REG_SIZE;
10873 
10874 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
10875 			i += BPF_REG_SIZE - 1;
10876 			/* explored state didn't use this */
10877 			continue;
10878 		}
10879 
10880 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
10881 			continue;
10882 
10883 		/* explored stack has more populated slots than current stack
10884 		 * and these slots were used
10885 		 */
10886 		if (i >= cur->allocated_stack)
10887 			return false;
10888 
10889 		/* if old state was safe with misc data in the stack
10890 		 * it will be safe with zero-initialized stack.
10891 		 * The opposite is not true
10892 		 */
10893 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
10894 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
10895 			continue;
10896 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
10897 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
10898 			/* Ex: old explored (safe) state has STACK_SPILL in
10899 			 * this stack slot, but current has STACK_MISC ->
10900 			 * this verifier states are not equivalent,
10901 			 * return false to continue verification of this path
10902 			 */
10903 			return false;
10904 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
10905 			continue;
10906 		if (!is_spilled_reg(&old->stack[spi]))
10907 			continue;
10908 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
10909 			     &cur->stack[spi].spilled_ptr, idmap))
10910 			/* when explored and current stack slot are both storing
10911 			 * spilled registers, check that stored pointers types
10912 			 * are the same as well.
10913 			 * Ex: explored safe path could have stored
10914 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
10915 			 * but current path has stored:
10916 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
10917 			 * such verifier states are not equivalent.
10918 			 * return false to continue verification of this path
10919 			 */
10920 			return false;
10921 	}
10922 	return true;
10923 }
10924 
10925 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
10926 {
10927 	if (old->acquired_refs != cur->acquired_refs)
10928 		return false;
10929 	return !memcmp(old->refs, cur->refs,
10930 		       sizeof(*old->refs) * old->acquired_refs);
10931 }
10932 
10933 /* compare two verifier states
10934  *
10935  * all states stored in state_list are known to be valid, since
10936  * verifier reached 'bpf_exit' instruction through them
10937  *
10938  * this function is called when verifier exploring different branches of
10939  * execution popped from the state stack. If it sees an old state that has
10940  * more strict register state and more strict stack state then this execution
10941  * branch doesn't need to be explored further, since verifier already
10942  * concluded that more strict state leads to valid finish.
10943  *
10944  * Therefore two states are equivalent if register state is more conservative
10945  * and explored stack state is more conservative than the current one.
10946  * Example:
10947  *       explored                   current
10948  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
10949  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
10950  *
10951  * In other words if current stack state (one being explored) has more
10952  * valid slots than old one that already passed validation, it means
10953  * the verifier can stop exploring and conclude that current state is valid too
10954  *
10955  * Similarly with registers. If explored state has register type as invalid
10956  * whereas register type in current state is meaningful, it means that
10957  * the current state will reach 'bpf_exit' instruction safely
10958  */
10959 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
10960 			      struct bpf_func_state *cur)
10961 {
10962 	int i;
10963 
10964 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
10965 	for (i = 0; i < MAX_BPF_REG; i++)
10966 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
10967 			     env->idmap_scratch))
10968 			return false;
10969 
10970 	if (!stacksafe(env, old, cur, env->idmap_scratch))
10971 		return false;
10972 
10973 	if (!refsafe(old, cur))
10974 		return false;
10975 
10976 	return true;
10977 }
10978 
10979 static bool states_equal(struct bpf_verifier_env *env,
10980 			 struct bpf_verifier_state *old,
10981 			 struct bpf_verifier_state *cur)
10982 {
10983 	int i;
10984 
10985 	if (old->curframe != cur->curframe)
10986 		return false;
10987 
10988 	/* Verification state from speculative execution simulation
10989 	 * must never prune a non-speculative execution one.
10990 	 */
10991 	if (old->speculative && !cur->speculative)
10992 		return false;
10993 
10994 	if (old->active_spin_lock != cur->active_spin_lock)
10995 		return false;
10996 
10997 	/* for states to be equal callsites have to be the same
10998 	 * and all frame states need to be equivalent
10999 	 */
11000 	for (i = 0; i <= old->curframe; i++) {
11001 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
11002 			return false;
11003 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
11004 			return false;
11005 	}
11006 	return true;
11007 }
11008 
11009 /* Return 0 if no propagation happened. Return negative error code if error
11010  * happened. Otherwise, return the propagated bit.
11011  */
11012 static int propagate_liveness_reg(struct bpf_verifier_env *env,
11013 				  struct bpf_reg_state *reg,
11014 				  struct bpf_reg_state *parent_reg)
11015 {
11016 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
11017 	u8 flag = reg->live & REG_LIVE_READ;
11018 	int err;
11019 
11020 	/* When comes here, read flags of PARENT_REG or REG could be any of
11021 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
11022 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
11023 	 */
11024 	if (parent_flag == REG_LIVE_READ64 ||
11025 	    /* Or if there is no read flag from REG. */
11026 	    !flag ||
11027 	    /* Or if the read flag from REG is the same as PARENT_REG. */
11028 	    parent_flag == flag)
11029 		return 0;
11030 
11031 	err = mark_reg_read(env, reg, parent_reg, flag);
11032 	if (err)
11033 		return err;
11034 
11035 	return flag;
11036 }
11037 
11038 /* A write screens off any subsequent reads; but write marks come from the
11039  * straight-line code between a state and its parent.  When we arrive at an
11040  * equivalent state (jump target or such) we didn't arrive by the straight-line
11041  * code, so read marks in the state must propagate to the parent regardless
11042  * of the state's write marks. That's what 'parent == state->parent' comparison
11043  * in mark_reg_read() is for.
11044  */
11045 static int propagate_liveness(struct bpf_verifier_env *env,
11046 			      const struct bpf_verifier_state *vstate,
11047 			      struct bpf_verifier_state *vparent)
11048 {
11049 	struct bpf_reg_state *state_reg, *parent_reg;
11050 	struct bpf_func_state *state, *parent;
11051 	int i, frame, err = 0;
11052 
11053 	if (vparent->curframe != vstate->curframe) {
11054 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
11055 		     vparent->curframe, vstate->curframe);
11056 		return -EFAULT;
11057 	}
11058 	/* Propagate read liveness of registers... */
11059 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
11060 	for (frame = 0; frame <= vstate->curframe; frame++) {
11061 		parent = vparent->frame[frame];
11062 		state = vstate->frame[frame];
11063 		parent_reg = parent->regs;
11064 		state_reg = state->regs;
11065 		/* We don't need to worry about FP liveness, it's read-only */
11066 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
11067 			err = propagate_liveness_reg(env, &state_reg[i],
11068 						     &parent_reg[i]);
11069 			if (err < 0)
11070 				return err;
11071 			if (err == REG_LIVE_READ64)
11072 				mark_insn_zext(env, &parent_reg[i]);
11073 		}
11074 
11075 		/* Propagate stack slots. */
11076 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
11077 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
11078 			parent_reg = &parent->stack[i].spilled_ptr;
11079 			state_reg = &state->stack[i].spilled_ptr;
11080 			err = propagate_liveness_reg(env, state_reg,
11081 						     parent_reg);
11082 			if (err < 0)
11083 				return err;
11084 		}
11085 	}
11086 	return 0;
11087 }
11088 
11089 /* find precise scalars in the previous equivalent state and
11090  * propagate them into the current state
11091  */
11092 static int propagate_precision(struct bpf_verifier_env *env,
11093 			       const struct bpf_verifier_state *old)
11094 {
11095 	struct bpf_reg_state *state_reg;
11096 	struct bpf_func_state *state;
11097 	int i, err = 0;
11098 
11099 	state = old->frame[old->curframe];
11100 	state_reg = state->regs;
11101 	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
11102 		if (state_reg->type != SCALAR_VALUE ||
11103 		    !state_reg->precise)
11104 			continue;
11105 		if (env->log.level & BPF_LOG_LEVEL2)
11106 			verbose(env, "propagating r%d\n", i);
11107 		err = mark_chain_precision(env, i);
11108 		if (err < 0)
11109 			return err;
11110 	}
11111 
11112 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
11113 		if (!is_spilled_reg(&state->stack[i]))
11114 			continue;
11115 		state_reg = &state->stack[i].spilled_ptr;
11116 		if (state_reg->type != SCALAR_VALUE ||
11117 		    !state_reg->precise)
11118 			continue;
11119 		if (env->log.level & BPF_LOG_LEVEL2)
11120 			verbose(env, "propagating fp%d\n",
11121 				(-i - 1) * BPF_REG_SIZE);
11122 		err = mark_chain_precision_stack(env, i);
11123 		if (err < 0)
11124 			return err;
11125 	}
11126 	return 0;
11127 }
11128 
11129 static bool states_maybe_looping(struct bpf_verifier_state *old,
11130 				 struct bpf_verifier_state *cur)
11131 {
11132 	struct bpf_func_state *fold, *fcur;
11133 	int i, fr = cur->curframe;
11134 
11135 	if (old->curframe != fr)
11136 		return false;
11137 
11138 	fold = old->frame[fr];
11139 	fcur = cur->frame[fr];
11140 	for (i = 0; i < MAX_BPF_REG; i++)
11141 		if (memcmp(&fold->regs[i], &fcur->regs[i],
11142 			   offsetof(struct bpf_reg_state, parent)))
11143 			return false;
11144 	return true;
11145 }
11146 
11147 
11148 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
11149 {
11150 	struct bpf_verifier_state_list *new_sl;
11151 	struct bpf_verifier_state_list *sl, **pprev;
11152 	struct bpf_verifier_state *cur = env->cur_state, *new;
11153 	int i, j, err, states_cnt = 0;
11154 	bool add_new_state = env->test_state_freq ? true : false;
11155 
11156 	cur->last_insn_idx = env->prev_insn_idx;
11157 	if (!env->insn_aux_data[insn_idx].prune_point)
11158 		/* this 'insn_idx' instruction wasn't marked, so we will not
11159 		 * be doing state search here
11160 		 */
11161 		return 0;
11162 
11163 	/* bpf progs typically have pruning point every 4 instructions
11164 	 * http://vger.kernel.org/bpfconf2019.html#session-1
11165 	 * Do not add new state for future pruning if the verifier hasn't seen
11166 	 * at least 2 jumps and at least 8 instructions.
11167 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
11168 	 * In tests that amounts to up to 50% reduction into total verifier
11169 	 * memory consumption and 20% verifier time speedup.
11170 	 */
11171 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
11172 	    env->insn_processed - env->prev_insn_processed >= 8)
11173 		add_new_state = true;
11174 
11175 	pprev = explored_state(env, insn_idx);
11176 	sl = *pprev;
11177 
11178 	clean_live_states(env, insn_idx, cur);
11179 
11180 	while (sl) {
11181 		states_cnt++;
11182 		if (sl->state.insn_idx != insn_idx)
11183 			goto next;
11184 
11185 		if (sl->state.branches) {
11186 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
11187 
11188 			if (frame->in_async_callback_fn &&
11189 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
11190 				/* Different async_entry_cnt means that the verifier is
11191 				 * processing another entry into async callback.
11192 				 * Seeing the same state is not an indication of infinite
11193 				 * loop or infinite recursion.
11194 				 * But finding the same state doesn't mean that it's safe
11195 				 * to stop processing the current state. The previous state
11196 				 * hasn't yet reached bpf_exit, since state.branches > 0.
11197 				 * Checking in_async_callback_fn alone is not enough either.
11198 				 * Since the verifier still needs to catch infinite loops
11199 				 * inside async callbacks.
11200 				 */
11201 			} else if (states_maybe_looping(&sl->state, cur) &&
11202 				   states_equal(env, &sl->state, cur)) {
11203 				verbose_linfo(env, insn_idx, "; ");
11204 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
11205 				return -EINVAL;
11206 			}
11207 			/* if the verifier is processing a loop, avoid adding new state
11208 			 * too often, since different loop iterations have distinct
11209 			 * states and may not help future pruning.
11210 			 * This threshold shouldn't be too low to make sure that
11211 			 * a loop with large bound will be rejected quickly.
11212 			 * The most abusive loop will be:
11213 			 * r1 += 1
11214 			 * if r1 < 1000000 goto pc-2
11215 			 * 1M insn_procssed limit / 100 == 10k peak states.
11216 			 * This threshold shouldn't be too high either, since states
11217 			 * at the end of the loop are likely to be useful in pruning.
11218 			 */
11219 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
11220 			    env->insn_processed - env->prev_insn_processed < 100)
11221 				add_new_state = false;
11222 			goto miss;
11223 		}
11224 		if (states_equal(env, &sl->state, cur)) {
11225 			sl->hit_cnt++;
11226 			/* reached equivalent register/stack state,
11227 			 * prune the search.
11228 			 * Registers read by the continuation are read by us.
11229 			 * If we have any write marks in env->cur_state, they
11230 			 * will prevent corresponding reads in the continuation
11231 			 * from reaching our parent (an explored_state).  Our
11232 			 * own state will get the read marks recorded, but
11233 			 * they'll be immediately forgotten as we're pruning
11234 			 * this state and will pop a new one.
11235 			 */
11236 			err = propagate_liveness(env, &sl->state, cur);
11237 
11238 			/* if previous state reached the exit with precision and
11239 			 * current state is equivalent to it (except precsion marks)
11240 			 * the precision needs to be propagated back in
11241 			 * the current state.
11242 			 */
11243 			err = err ? : push_jmp_history(env, cur);
11244 			err = err ? : propagate_precision(env, &sl->state);
11245 			if (err)
11246 				return err;
11247 			return 1;
11248 		}
11249 miss:
11250 		/* when new state is not going to be added do not increase miss count.
11251 		 * Otherwise several loop iterations will remove the state
11252 		 * recorded earlier. The goal of these heuristics is to have
11253 		 * states from some iterations of the loop (some in the beginning
11254 		 * and some at the end) to help pruning.
11255 		 */
11256 		if (add_new_state)
11257 			sl->miss_cnt++;
11258 		/* heuristic to determine whether this state is beneficial
11259 		 * to keep checking from state equivalence point of view.
11260 		 * Higher numbers increase max_states_per_insn and verification time,
11261 		 * but do not meaningfully decrease insn_processed.
11262 		 */
11263 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
11264 			/* the state is unlikely to be useful. Remove it to
11265 			 * speed up verification
11266 			 */
11267 			*pprev = sl->next;
11268 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
11269 				u32 br = sl->state.branches;
11270 
11271 				WARN_ONCE(br,
11272 					  "BUG live_done but branches_to_explore %d\n",
11273 					  br);
11274 				free_verifier_state(&sl->state, false);
11275 				kfree(sl);
11276 				env->peak_states--;
11277 			} else {
11278 				/* cannot free this state, since parentage chain may
11279 				 * walk it later. Add it for free_list instead to
11280 				 * be freed at the end of verification
11281 				 */
11282 				sl->next = env->free_list;
11283 				env->free_list = sl;
11284 			}
11285 			sl = *pprev;
11286 			continue;
11287 		}
11288 next:
11289 		pprev = &sl->next;
11290 		sl = *pprev;
11291 	}
11292 
11293 	if (env->max_states_per_insn < states_cnt)
11294 		env->max_states_per_insn = states_cnt;
11295 
11296 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
11297 		return push_jmp_history(env, cur);
11298 
11299 	if (!add_new_state)
11300 		return push_jmp_history(env, cur);
11301 
11302 	/* There were no equivalent states, remember the current one.
11303 	 * Technically the current state is not proven to be safe yet,
11304 	 * but it will either reach outer most bpf_exit (which means it's safe)
11305 	 * or it will be rejected. When there are no loops the verifier won't be
11306 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
11307 	 * again on the way to bpf_exit.
11308 	 * When looping the sl->state.branches will be > 0 and this state
11309 	 * will not be considered for equivalence until branches == 0.
11310 	 */
11311 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
11312 	if (!new_sl)
11313 		return -ENOMEM;
11314 	env->total_states++;
11315 	env->peak_states++;
11316 	env->prev_jmps_processed = env->jmps_processed;
11317 	env->prev_insn_processed = env->insn_processed;
11318 
11319 	/* add new state to the head of linked list */
11320 	new = &new_sl->state;
11321 	err = copy_verifier_state(new, cur);
11322 	if (err) {
11323 		free_verifier_state(new, false);
11324 		kfree(new_sl);
11325 		return err;
11326 	}
11327 	new->insn_idx = insn_idx;
11328 	WARN_ONCE(new->branches != 1,
11329 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
11330 
11331 	cur->parent = new;
11332 	cur->first_insn_idx = insn_idx;
11333 	clear_jmp_history(cur);
11334 	new_sl->next = *explored_state(env, insn_idx);
11335 	*explored_state(env, insn_idx) = new_sl;
11336 	/* connect new state to parentage chain. Current frame needs all
11337 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
11338 	 * to the stack implicitly by JITs) so in callers' frames connect just
11339 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
11340 	 * the state of the call instruction (with WRITTEN set), and r0 comes
11341 	 * from callee with its full parentage chain, anyway.
11342 	 */
11343 	/* clear write marks in current state: the writes we did are not writes
11344 	 * our child did, so they don't screen off its reads from us.
11345 	 * (There are no read marks in current state, because reads always mark
11346 	 * their parent and current state never has children yet.  Only
11347 	 * explored_states can get read marks.)
11348 	 */
11349 	for (j = 0; j <= cur->curframe; j++) {
11350 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
11351 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
11352 		for (i = 0; i < BPF_REG_FP; i++)
11353 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
11354 	}
11355 
11356 	/* all stack frames are accessible from callee, clear them all */
11357 	for (j = 0; j <= cur->curframe; j++) {
11358 		struct bpf_func_state *frame = cur->frame[j];
11359 		struct bpf_func_state *newframe = new->frame[j];
11360 
11361 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
11362 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
11363 			frame->stack[i].spilled_ptr.parent =
11364 						&newframe->stack[i].spilled_ptr;
11365 		}
11366 	}
11367 	return 0;
11368 }
11369 
11370 /* Return true if it's OK to have the same insn return a different type. */
11371 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
11372 {
11373 	switch (base_type(type)) {
11374 	case PTR_TO_CTX:
11375 	case PTR_TO_SOCKET:
11376 	case PTR_TO_SOCK_COMMON:
11377 	case PTR_TO_TCP_SOCK:
11378 	case PTR_TO_XDP_SOCK:
11379 	case PTR_TO_BTF_ID:
11380 		return false;
11381 	default:
11382 		return true;
11383 	}
11384 }
11385 
11386 /* If an instruction was previously used with particular pointer types, then we
11387  * need to be careful to avoid cases such as the below, where it may be ok
11388  * for one branch accessing the pointer, but not ok for the other branch:
11389  *
11390  * R1 = sock_ptr
11391  * goto X;
11392  * ...
11393  * R1 = some_other_valid_ptr;
11394  * goto X;
11395  * ...
11396  * R2 = *(u32 *)(R1 + 0);
11397  */
11398 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
11399 {
11400 	return src != prev && (!reg_type_mismatch_ok(src) ||
11401 			       !reg_type_mismatch_ok(prev));
11402 }
11403 
11404 static int do_check(struct bpf_verifier_env *env)
11405 {
11406 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
11407 	struct bpf_verifier_state *state = env->cur_state;
11408 	struct bpf_insn *insns = env->prog->insnsi;
11409 	struct bpf_reg_state *regs;
11410 	int insn_cnt = env->prog->len;
11411 	bool do_print_state = false;
11412 	int prev_insn_idx = -1;
11413 
11414 	for (;;) {
11415 		struct bpf_insn *insn;
11416 		u8 class;
11417 		int err;
11418 
11419 		env->prev_insn_idx = prev_insn_idx;
11420 		if (env->insn_idx >= insn_cnt) {
11421 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
11422 				env->insn_idx, insn_cnt);
11423 			return -EFAULT;
11424 		}
11425 
11426 		insn = &insns[env->insn_idx];
11427 		class = BPF_CLASS(insn->code);
11428 
11429 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
11430 			verbose(env,
11431 				"BPF program is too large. Processed %d insn\n",
11432 				env->insn_processed);
11433 			return -E2BIG;
11434 		}
11435 
11436 		err = is_state_visited(env, env->insn_idx);
11437 		if (err < 0)
11438 			return err;
11439 		if (err == 1) {
11440 			/* found equivalent state, can prune the search */
11441 			if (env->log.level & BPF_LOG_LEVEL) {
11442 				if (do_print_state)
11443 					verbose(env, "\nfrom %d to %d%s: safe\n",
11444 						env->prev_insn_idx, env->insn_idx,
11445 						env->cur_state->speculative ?
11446 						" (speculative execution)" : "");
11447 				else
11448 					verbose(env, "%d: safe\n", env->insn_idx);
11449 			}
11450 			goto process_bpf_exit;
11451 		}
11452 
11453 		if (signal_pending(current))
11454 			return -EAGAIN;
11455 
11456 		if (need_resched())
11457 			cond_resched();
11458 
11459 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
11460 			verbose(env, "\nfrom %d to %d%s:",
11461 				env->prev_insn_idx, env->insn_idx,
11462 				env->cur_state->speculative ?
11463 				" (speculative execution)" : "");
11464 			print_verifier_state(env, state->frame[state->curframe], true);
11465 			do_print_state = false;
11466 		}
11467 
11468 		if (env->log.level & BPF_LOG_LEVEL) {
11469 			const struct bpf_insn_cbs cbs = {
11470 				.cb_call	= disasm_kfunc_name,
11471 				.cb_print	= verbose,
11472 				.private_data	= env,
11473 			};
11474 
11475 			if (verifier_state_scratched(env))
11476 				print_insn_state(env, state->frame[state->curframe]);
11477 
11478 			verbose_linfo(env, env->insn_idx, "; ");
11479 			env->prev_log_len = env->log.len_used;
11480 			verbose(env, "%d: ", env->insn_idx);
11481 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
11482 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
11483 			env->prev_log_len = env->log.len_used;
11484 		}
11485 
11486 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
11487 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
11488 							   env->prev_insn_idx);
11489 			if (err)
11490 				return err;
11491 		}
11492 
11493 		regs = cur_regs(env);
11494 		sanitize_mark_insn_seen(env);
11495 		prev_insn_idx = env->insn_idx;
11496 
11497 		if (class == BPF_ALU || class == BPF_ALU64) {
11498 			err = check_alu_op(env, insn);
11499 			if (err)
11500 				return err;
11501 
11502 		} else if (class == BPF_LDX) {
11503 			enum bpf_reg_type *prev_src_type, src_reg_type;
11504 
11505 			/* check for reserved fields is already done */
11506 
11507 			/* check src operand */
11508 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11509 			if (err)
11510 				return err;
11511 
11512 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11513 			if (err)
11514 				return err;
11515 
11516 			src_reg_type = regs[insn->src_reg].type;
11517 
11518 			/* check that memory (src_reg + off) is readable,
11519 			 * the state of dst_reg will be updated by this func
11520 			 */
11521 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
11522 					       insn->off, BPF_SIZE(insn->code),
11523 					       BPF_READ, insn->dst_reg, false);
11524 			if (err)
11525 				return err;
11526 
11527 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
11528 
11529 			if (*prev_src_type == NOT_INIT) {
11530 				/* saw a valid insn
11531 				 * dst_reg = *(u32 *)(src_reg + off)
11532 				 * save type to validate intersecting paths
11533 				 */
11534 				*prev_src_type = src_reg_type;
11535 
11536 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
11537 				/* ABuser program is trying to use the same insn
11538 				 * dst_reg = *(u32*) (src_reg + off)
11539 				 * with different pointer types:
11540 				 * src_reg == ctx in one branch and
11541 				 * src_reg == stack|map in some other branch.
11542 				 * Reject it.
11543 				 */
11544 				verbose(env, "same insn cannot be used with different pointers\n");
11545 				return -EINVAL;
11546 			}
11547 
11548 		} else if (class == BPF_STX) {
11549 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
11550 
11551 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
11552 				err = check_atomic(env, env->insn_idx, insn);
11553 				if (err)
11554 					return err;
11555 				env->insn_idx++;
11556 				continue;
11557 			}
11558 
11559 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
11560 				verbose(env, "BPF_STX uses reserved fields\n");
11561 				return -EINVAL;
11562 			}
11563 
11564 			/* check src1 operand */
11565 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11566 			if (err)
11567 				return err;
11568 			/* check src2 operand */
11569 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11570 			if (err)
11571 				return err;
11572 
11573 			dst_reg_type = regs[insn->dst_reg].type;
11574 
11575 			/* check that memory (dst_reg + off) is writeable */
11576 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11577 					       insn->off, BPF_SIZE(insn->code),
11578 					       BPF_WRITE, insn->src_reg, false);
11579 			if (err)
11580 				return err;
11581 
11582 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
11583 
11584 			if (*prev_dst_type == NOT_INIT) {
11585 				*prev_dst_type = dst_reg_type;
11586 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
11587 				verbose(env, "same insn cannot be used with different pointers\n");
11588 				return -EINVAL;
11589 			}
11590 
11591 		} else if (class == BPF_ST) {
11592 			if (BPF_MODE(insn->code) != BPF_MEM ||
11593 			    insn->src_reg != BPF_REG_0) {
11594 				verbose(env, "BPF_ST uses reserved fields\n");
11595 				return -EINVAL;
11596 			}
11597 			/* check src operand */
11598 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11599 			if (err)
11600 				return err;
11601 
11602 			if (is_ctx_reg(env, insn->dst_reg)) {
11603 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
11604 					insn->dst_reg,
11605 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
11606 				return -EACCES;
11607 			}
11608 
11609 			/* check that memory (dst_reg + off) is writeable */
11610 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11611 					       insn->off, BPF_SIZE(insn->code),
11612 					       BPF_WRITE, -1, false);
11613 			if (err)
11614 				return err;
11615 
11616 		} else if (class == BPF_JMP || class == BPF_JMP32) {
11617 			u8 opcode = BPF_OP(insn->code);
11618 
11619 			env->jmps_processed++;
11620 			if (opcode == BPF_CALL) {
11621 				if (BPF_SRC(insn->code) != BPF_K ||
11622 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
11623 				     && insn->off != 0) ||
11624 				    (insn->src_reg != BPF_REG_0 &&
11625 				     insn->src_reg != BPF_PSEUDO_CALL &&
11626 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
11627 				    insn->dst_reg != BPF_REG_0 ||
11628 				    class == BPF_JMP32) {
11629 					verbose(env, "BPF_CALL uses reserved fields\n");
11630 					return -EINVAL;
11631 				}
11632 
11633 				if (env->cur_state->active_spin_lock &&
11634 				    (insn->src_reg == BPF_PSEUDO_CALL ||
11635 				     insn->imm != BPF_FUNC_spin_unlock)) {
11636 					verbose(env, "function calls are not allowed while holding a lock\n");
11637 					return -EINVAL;
11638 				}
11639 				if (insn->src_reg == BPF_PSEUDO_CALL)
11640 					err = check_func_call(env, insn, &env->insn_idx);
11641 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
11642 					err = check_kfunc_call(env, insn, &env->insn_idx);
11643 				else
11644 					err = check_helper_call(env, insn, &env->insn_idx);
11645 				if (err)
11646 					return err;
11647 			} else if (opcode == BPF_JA) {
11648 				if (BPF_SRC(insn->code) != BPF_K ||
11649 				    insn->imm != 0 ||
11650 				    insn->src_reg != BPF_REG_0 ||
11651 				    insn->dst_reg != BPF_REG_0 ||
11652 				    class == BPF_JMP32) {
11653 					verbose(env, "BPF_JA uses reserved fields\n");
11654 					return -EINVAL;
11655 				}
11656 
11657 				env->insn_idx += insn->off + 1;
11658 				continue;
11659 
11660 			} else if (opcode == BPF_EXIT) {
11661 				if (BPF_SRC(insn->code) != BPF_K ||
11662 				    insn->imm != 0 ||
11663 				    insn->src_reg != BPF_REG_0 ||
11664 				    insn->dst_reg != BPF_REG_0 ||
11665 				    class == BPF_JMP32) {
11666 					verbose(env, "BPF_EXIT uses reserved fields\n");
11667 					return -EINVAL;
11668 				}
11669 
11670 				if (env->cur_state->active_spin_lock) {
11671 					verbose(env, "bpf_spin_unlock is missing\n");
11672 					return -EINVAL;
11673 				}
11674 
11675 				if (state->curframe) {
11676 					/* exit from nested function */
11677 					err = prepare_func_exit(env, &env->insn_idx);
11678 					if (err)
11679 						return err;
11680 					do_print_state = true;
11681 					continue;
11682 				}
11683 
11684 				err = check_reference_leak(env);
11685 				if (err)
11686 					return err;
11687 
11688 				err = check_return_code(env);
11689 				if (err)
11690 					return err;
11691 process_bpf_exit:
11692 				mark_verifier_state_scratched(env);
11693 				update_branch_counts(env, env->cur_state);
11694 				err = pop_stack(env, &prev_insn_idx,
11695 						&env->insn_idx, pop_log);
11696 				if (err < 0) {
11697 					if (err != -ENOENT)
11698 						return err;
11699 					break;
11700 				} else {
11701 					do_print_state = true;
11702 					continue;
11703 				}
11704 			} else {
11705 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
11706 				if (err)
11707 					return err;
11708 			}
11709 		} else if (class == BPF_LD) {
11710 			u8 mode = BPF_MODE(insn->code);
11711 
11712 			if (mode == BPF_ABS || mode == BPF_IND) {
11713 				err = check_ld_abs(env, insn);
11714 				if (err)
11715 					return err;
11716 
11717 			} else if (mode == BPF_IMM) {
11718 				err = check_ld_imm(env, insn);
11719 				if (err)
11720 					return err;
11721 
11722 				env->insn_idx++;
11723 				sanitize_mark_insn_seen(env);
11724 			} else {
11725 				verbose(env, "invalid BPF_LD mode\n");
11726 				return -EINVAL;
11727 			}
11728 		} else {
11729 			verbose(env, "unknown insn class %d\n", class);
11730 			return -EINVAL;
11731 		}
11732 
11733 		env->insn_idx++;
11734 	}
11735 
11736 	return 0;
11737 }
11738 
11739 static int find_btf_percpu_datasec(struct btf *btf)
11740 {
11741 	const struct btf_type *t;
11742 	const char *tname;
11743 	int i, n;
11744 
11745 	/*
11746 	 * Both vmlinux and module each have their own ".data..percpu"
11747 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
11748 	 * types to look at only module's own BTF types.
11749 	 */
11750 	n = btf_nr_types(btf);
11751 	if (btf_is_module(btf))
11752 		i = btf_nr_types(btf_vmlinux);
11753 	else
11754 		i = 1;
11755 
11756 	for(; i < n; i++) {
11757 		t = btf_type_by_id(btf, i);
11758 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
11759 			continue;
11760 
11761 		tname = btf_name_by_offset(btf, t->name_off);
11762 		if (!strcmp(tname, ".data..percpu"))
11763 			return i;
11764 	}
11765 
11766 	return -ENOENT;
11767 }
11768 
11769 /* replace pseudo btf_id with kernel symbol address */
11770 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
11771 			       struct bpf_insn *insn,
11772 			       struct bpf_insn_aux_data *aux)
11773 {
11774 	const struct btf_var_secinfo *vsi;
11775 	const struct btf_type *datasec;
11776 	struct btf_mod_pair *btf_mod;
11777 	const struct btf_type *t;
11778 	const char *sym_name;
11779 	bool percpu = false;
11780 	u32 type, id = insn->imm;
11781 	struct btf *btf;
11782 	s32 datasec_id;
11783 	u64 addr;
11784 	int i, btf_fd, err;
11785 
11786 	btf_fd = insn[1].imm;
11787 	if (btf_fd) {
11788 		btf = btf_get_by_fd(btf_fd);
11789 		if (IS_ERR(btf)) {
11790 			verbose(env, "invalid module BTF object FD specified.\n");
11791 			return -EINVAL;
11792 		}
11793 	} else {
11794 		if (!btf_vmlinux) {
11795 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
11796 			return -EINVAL;
11797 		}
11798 		btf = btf_vmlinux;
11799 		btf_get(btf);
11800 	}
11801 
11802 	t = btf_type_by_id(btf, id);
11803 	if (!t) {
11804 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
11805 		err = -ENOENT;
11806 		goto err_put;
11807 	}
11808 
11809 	if (!btf_type_is_var(t)) {
11810 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
11811 		err = -EINVAL;
11812 		goto err_put;
11813 	}
11814 
11815 	sym_name = btf_name_by_offset(btf, t->name_off);
11816 	addr = kallsyms_lookup_name(sym_name);
11817 	if (!addr) {
11818 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
11819 			sym_name);
11820 		err = -ENOENT;
11821 		goto err_put;
11822 	}
11823 
11824 	datasec_id = find_btf_percpu_datasec(btf);
11825 	if (datasec_id > 0) {
11826 		datasec = btf_type_by_id(btf, datasec_id);
11827 		for_each_vsi(i, datasec, vsi) {
11828 			if (vsi->type == id) {
11829 				percpu = true;
11830 				break;
11831 			}
11832 		}
11833 	}
11834 
11835 	insn[0].imm = (u32)addr;
11836 	insn[1].imm = addr >> 32;
11837 
11838 	type = t->type;
11839 	t = btf_type_skip_modifiers(btf, type, NULL);
11840 	if (percpu) {
11841 		aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
11842 		aux->btf_var.btf = btf;
11843 		aux->btf_var.btf_id = type;
11844 	} else if (!btf_type_is_struct(t)) {
11845 		const struct btf_type *ret;
11846 		const char *tname;
11847 		u32 tsize;
11848 
11849 		/* resolve the type size of ksym. */
11850 		ret = btf_resolve_size(btf, t, &tsize);
11851 		if (IS_ERR(ret)) {
11852 			tname = btf_name_by_offset(btf, t->name_off);
11853 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
11854 				tname, PTR_ERR(ret));
11855 			err = -EINVAL;
11856 			goto err_put;
11857 		}
11858 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
11859 		aux->btf_var.mem_size = tsize;
11860 	} else {
11861 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
11862 		aux->btf_var.btf = btf;
11863 		aux->btf_var.btf_id = type;
11864 	}
11865 
11866 	/* check whether we recorded this BTF (and maybe module) already */
11867 	for (i = 0; i < env->used_btf_cnt; i++) {
11868 		if (env->used_btfs[i].btf == btf) {
11869 			btf_put(btf);
11870 			return 0;
11871 		}
11872 	}
11873 
11874 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
11875 		err = -E2BIG;
11876 		goto err_put;
11877 	}
11878 
11879 	btf_mod = &env->used_btfs[env->used_btf_cnt];
11880 	btf_mod->btf = btf;
11881 	btf_mod->module = NULL;
11882 
11883 	/* if we reference variables from kernel module, bump its refcount */
11884 	if (btf_is_module(btf)) {
11885 		btf_mod->module = btf_try_get_module(btf);
11886 		if (!btf_mod->module) {
11887 			err = -ENXIO;
11888 			goto err_put;
11889 		}
11890 	}
11891 
11892 	env->used_btf_cnt++;
11893 
11894 	return 0;
11895 err_put:
11896 	btf_put(btf);
11897 	return err;
11898 }
11899 
11900 static int check_map_prealloc(struct bpf_map *map)
11901 {
11902 	return (map->map_type != BPF_MAP_TYPE_HASH &&
11903 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
11904 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
11905 		!(map->map_flags & BPF_F_NO_PREALLOC);
11906 }
11907 
11908 static bool is_tracing_prog_type(enum bpf_prog_type type)
11909 {
11910 	switch (type) {
11911 	case BPF_PROG_TYPE_KPROBE:
11912 	case BPF_PROG_TYPE_TRACEPOINT:
11913 	case BPF_PROG_TYPE_PERF_EVENT:
11914 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
11915 		return true;
11916 	default:
11917 		return false;
11918 	}
11919 }
11920 
11921 static bool is_preallocated_map(struct bpf_map *map)
11922 {
11923 	if (!check_map_prealloc(map))
11924 		return false;
11925 	if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
11926 		return false;
11927 	return true;
11928 }
11929 
11930 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
11931 					struct bpf_map *map,
11932 					struct bpf_prog *prog)
11933 
11934 {
11935 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
11936 	/*
11937 	 * Validate that trace type programs use preallocated hash maps.
11938 	 *
11939 	 * For programs attached to PERF events this is mandatory as the
11940 	 * perf NMI can hit any arbitrary code sequence.
11941 	 *
11942 	 * All other trace types using preallocated hash maps are unsafe as
11943 	 * well because tracepoint or kprobes can be inside locked regions
11944 	 * of the memory allocator or at a place where a recursion into the
11945 	 * memory allocator would see inconsistent state.
11946 	 *
11947 	 * On RT enabled kernels run-time allocation of all trace type
11948 	 * programs is strictly prohibited due to lock type constraints. On
11949 	 * !RT kernels it is allowed for backwards compatibility reasons for
11950 	 * now, but warnings are emitted so developers are made aware of
11951 	 * the unsafety and can fix their programs before this is enforced.
11952 	 */
11953 	if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
11954 		if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
11955 			verbose(env, "perf_event programs can only use preallocated hash map\n");
11956 			return -EINVAL;
11957 		}
11958 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
11959 			verbose(env, "trace type programs can only use preallocated hash map\n");
11960 			return -EINVAL;
11961 		}
11962 		WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
11963 		verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
11964 	}
11965 
11966 	if (map_value_has_spin_lock(map)) {
11967 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
11968 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
11969 			return -EINVAL;
11970 		}
11971 
11972 		if (is_tracing_prog_type(prog_type)) {
11973 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
11974 			return -EINVAL;
11975 		}
11976 
11977 		if (prog->aux->sleepable) {
11978 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
11979 			return -EINVAL;
11980 		}
11981 	}
11982 
11983 	if (map_value_has_timer(map)) {
11984 		if (is_tracing_prog_type(prog_type)) {
11985 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
11986 			return -EINVAL;
11987 		}
11988 	}
11989 
11990 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
11991 	    !bpf_offload_prog_map_match(prog, map)) {
11992 		verbose(env, "offload device mismatch between prog and map\n");
11993 		return -EINVAL;
11994 	}
11995 
11996 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
11997 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
11998 		return -EINVAL;
11999 	}
12000 
12001 	if (prog->aux->sleepable)
12002 		switch (map->map_type) {
12003 		case BPF_MAP_TYPE_HASH:
12004 		case BPF_MAP_TYPE_LRU_HASH:
12005 		case BPF_MAP_TYPE_ARRAY:
12006 		case BPF_MAP_TYPE_PERCPU_HASH:
12007 		case BPF_MAP_TYPE_PERCPU_ARRAY:
12008 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
12009 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
12010 		case BPF_MAP_TYPE_HASH_OF_MAPS:
12011 			if (!is_preallocated_map(map)) {
12012 				verbose(env,
12013 					"Sleepable programs can only use preallocated maps\n");
12014 				return -EINVAL;
12015 			}
12016 			break;
12017 		case BPF_MAP_TYPE_RINGBUF:
12018 		case BPF_MAP_TYPE_INODE_STORAGE:
12019 		case BPF_MAP_TYPE_SK_STORAGE:
12020 		case BPF_MAP_TYPE_TASK_STORAGE:
12021 			break;
12022 		default:
12023 			verbose(env,
12024 				"Sleepable programs can only use array, hash, and ringbuf maps\n");
12025 			return -EINVAL;
12026 		}
12027 
12028 	return 0;
12029 }
12030 
12031 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
12032 {
12033 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
12034 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
12035 }
12036 
12037 /* find and rewrite pseudo imm in ld_imm64 instructions:
12038  *
12039  * 1. if it accesses map FD, replace it with actual map pointer.
12040  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
12041  *
12042  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
12043  */
12044 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
12045 {
12046 	struct bpf_insn *insn = env->prog->insnsi;
12047 	int insn_cnt = env->prog->len;
12048 	int i, j, err;
12049 
12050 	err = bpf_prog_calc_tag(env->prog);
12051 	if (err)
12052 		return err;
12053 
12054 	for (i = 0; i < insn_cnt; i++, insn++) {
12055 		if (BPF_CLASS(insn->code) == BPF_LDX &&
12056 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
12057 			verbose(env, "BPF_LDX uses reserved fields\n");
12058 			return -EINVAL;
12059 		}
12060 
12061 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
12062 			struct bpf_insn_aux_data *aux;
12063 			struct bpf_map *map;
12064 			struct fd f;
12065 			u64 addr;
12066 			u32 fd;
12067 
12068 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
12069 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
12070 			    insn[1].off != 0) {
12071 				verbose(env, "invalid bpf_ld_imm64 insn\n");
12072 				return -EINVAL;
12073 			}
12074 
12075 			if (insn[0].src_reg == 0)
12076 				/* valid generic load 64-bit imm */
12077 				goto next_insn;
12078 
12079 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
12080 				aux = &env->insn_aux_data[i];
12081 				err = check_pseudo_btf_id(env, insn, aux);
12082 				if (err)
12083 					return err;
12084 				goto next_insn;
12085 			}
12086 
12087 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
12088 				aux = &env->insn_aux_data[i];
12089 				aux->ptr_type = PTR_TO_FUNC;
12090 				goto next_insn;
12091 			}
12092 
12093 			/* In final convert_pseudo_ld_imm64() step, this is
12094 			 * converted into regular 64-bit imm load insn.
12095 			 */
12096 			switch (insn[0].src_reg) {
12097 			case BPF_PSEUDO_MAP_VALUE:
12098 			case BPF_PSEUDO_MAP_IDX_VALUE:
12099 				break;
12100 			case BPF_PSEUDO_MAP_FD:
12101 			case BPF_PSEUDO_MAP_IDX:
12102 				if (insn[1].imm == 0)
12103 					break;
12104 				fallthrough;
12105 			default:
12106 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
12107 				return -EINVAL;
12108 			}
12109 
12110 			switch (insn[0].src_reg) {
12111 			case BPF_PSEUDO_MAP_IDX_VALUE:
12112 			case BPF_PSEUDO_MAP_IDX:
12113 				if (bpfptr_is_null(env->fd_array)) {
12114 					verbose(env, "fd_idx without fd_array is invalid\n");
12115 					return -EPROTO;
12116 				}
12117 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
12118 							    insn[0].imm * sizeof(fd),
12119 							    sizeof(fd)))
12120 					return -EFAULT;
12121 				break;
12122 			default:
12123 				fd = insn[0].imm;
12124 				break;
12125 			}
12126 
12127 			f = fdget(fd);
12128 			map = __bpf_map_get(f);
12129 			if (IS_ERR(map)) {
12130 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
12131 					insn[0].imm);
12132 				return PTR_ERR(map);
12133 			}
12134 
12135 			err = check_map_prog_compatibility(env, map, env->prog);
12136 			if (err) {
12137 				fdput(f);
12138 				return err;
12139 			}
12140 
12141 			aux = &env->insn_aux_data[i];
12142 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
12143 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
12144 				addr = (unsigned long)map;
12145 			} else {
12146 				u32 off = insn[1].imm;
12147 
12148 				if (off >= BPF_MAX_VAR_OFF) {
12149 					verbose(env, "direct value offset of %u is not allowed\n", off);
12150 					fdput(f);
12151 					return -EINVAL;
12152 				}
12153 
12154 				if (!map->ops->map_direct_value_addr) {
12155 					verbose(env, "no direct value access support for this map type\n");
12156 					fdput(f);
12157 					return -EINVAL;
12158 				}
12159 
12160 				err = map->ops->map_direct_value_addr(map, &addr, off);
12161 				if (err) {
12162 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
12163 						map->value_size, off);
12164 					fdput(f);
12165 					return err;
12166 				}
12167 
12168 				aux->map_off = off;
12169 				addr += off;
12170 			}
12171 
12172 			insn[0].imm = (u32)addr;
12173 			insn[1].imm = addr >> 32;
12174 
12175 			/* check whether we recorded this map already */
12176 			for (j = 0; j < env->used_map_cnt; j++) {
12177 				if (env->used_maps[j] == map) {
12178 					aux->map_index = j;
12179 					fdput(f);
12180 					goto next_insn;
12181 				}
12182 			}
12183 
12184 			if (env->used_map_cnt >= MAX_USED_MAPS) {
12185 				fdput(f);
12186 				return -E2BIG;
12187 			}
12188 
12189 			/* hold the map. If the program is rejected by verifier,
12190 			 * the map will be released by release_maps() or it
12191 			 * will be used by the valid program until it's unloaded
12192 			 * and all maps are released in free_used_maps()
12193 			 */
12194 			bpf_map_inc(map);
12195 
12196 			aux->map_index = env->used_map_cnt;
12197 			env->used_maps[env->used_map_cnt++] = map;
12198 
12199 			if (bpf_map_is_cgroup_storage(map) &&
12200 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
12201 				verbose(env, "only one cgroup storage of each type is allowed\n");
12202 				fdput(f);
12203 				return -EBUSY;
12204 			}
12205 
12206 			fdput(f);
12207 next_insn:
12208 			insn++;
12209 			i++;
12210 			continue;
12211 		}
12212 
12213 		/* Basic sanity check before we invest more work here. */
12214 		if (!bpf_opcode_in_insntable(insn->code)) {
12215 			verbose(env, "unknown opcode %02x\n", insn->code);
12216 			return -EINVAL;
12217 		}
12218 	}
12219 
12220 	/* now all pseudo BPF_LD_IMM64 instructions load valid
12221 	 * 'struct bpf_map *' into a register instead of user map_fd.
12222 	 * These pointers will be used later by verifier to validate map access.
12223 	 */
12224 	return 0;
12225 }
12226 
12227 /* drop refcnt of maps used by the rejected program */
12228 static void release_maps(struct bpf_verifier_env *env)
12229 {
12230 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
12231 			     env->used_map_cnt);
12232 }
12233 
12234 /* drop refcnt of maps used by the rejected program */
12235 static void release_btfs(struct bpf_verifier_env *env)
12236 {
12237 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
12238 			     env->used_btf_cnt);
12239 }
12240 
12241 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
12242 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
12243 {
12244 	struct bpf_insn *insn = env->prog->insnsi;
12245 	int insn_cnt = env->prog->len;
12246 	int i;
12247 
12248 	for (i = 0; i < insn_cnt; i++, insn++) {
12249 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
12250 			continue;
12251 		if (insn->src_reg == BPF_PSEUDO_FUNC)
12252 			continue;
12253 		insn->src_reg = 0;
12254 	}
12255 }
12256 
12257 /* single env->prog->insni[off] instruction was replaced with the range
12258  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
12259  * [0, off) and [off, end) to new locations, so the patched range stays zero
12260  */
12261 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
12262 				 struct bpf_insn_aux_data *new_data,
12263 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
12264 {
12265 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
12266 	struct bpf_insn *insn = new_prog->insnsi;
12267 	u32 old_seen = old_data[off].seen;
12268 	u32 prog_len;
12269 	int i;
12270 
12271 	/* aux info at OFF always needs adjustment, no matter fast path
12272 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
12273 	 * original insn at old prog.
12274 	 */
12275 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
12276 
12277 	if (cnt == 1)
12278 		return;
12279 	prog_len = new_prog->len;
12280 
12281 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
12282 	memcpy(new_data + off + cnt - 1, old_data + off,
12283 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
12284 	for (i = off; i < off + cnt - 1; i++) {
12285 		/* Expand insni[off]'s seen count to the patched range. */
12286 		new_data[i].seen = old_seen;
12287 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
12288 	}
12289 	env->insn_aux_data = new_data;
12290 	vfree(old_data);
12291 }
12292 
12293 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
12294 {
12295 	int i;
12296 
12297 	if (len == 1)
12298 		return;
12299 	/* NOTE: fake 'exit' subprog should be updated as well. */
12300 	for (i = 0; i <= env->subprog_cnt; i++) {
12301 		if (env->subprog_info[i].start <= off)
12302 			continue;
12303 		env->subprog_info[i].start += len - 1;
12304 	}
12305 }
12306 
12307 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
12308 {
12309 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
12310 	int i, sz = prog->aux->size_poke_tab;
12311 	struct bpf_jit_poke_descriptor *desc;
12312 
12313 	for (i = 0; i < sz; i++) {
12314 		desc = &tab[i];
12315 		if (desc->insn_idx <= off)
12316 			continue;
12317 		desc->insn_idx += len - 1;
12318 	}
12319 }
12320 
12321 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
12322 					    const struct bpf_insn *patch, u32 len)
12323 {
12324 	struct bpf_prog *new_prog;
12325 	struct bpf_insn_aux_data *new_data = NULL;
12326 
12327 	if (len > 1) {
12328 		new_data = vzalloc(array_size(env->prog->len + len - 1,
12329 					      sizeof(struct bpf_insn_aux_data)));
12330 		if (!new_data)
12331 			return NULL;
12332 	}
12333 
12334 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
12335 	if (IS_ERR(new_prog)) {
12336 		if (PTR_ERR(new_prog) == -ERANGE)
12337 			verbose(env,
12338 				"insn %d cannot be patched due to 16-bit range\n",
12339 				env->insn_aux_data[off].orig_idx);
12340 		vfree(new_data);
12341 		return NULL;
12342 	}
12343 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
12344 	adjust_subprog_starts(env, off, len);
12345 	adjust_poke_descs(new_prog, off, len);
12346 	return new_prog;
12347 }
12348 
12349 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
12350 					      u32 off, u32 cnt)
12351 {
12352 	int i, j;
12353 
12354 	/* find first prog starting at or after off (first to remove) */
12355 	for (i = 0; i < env->subprog_cnt; i++)
12356 		if (env->subprog_info[i].start >= off)
12357 			break;
12358 	/* find first prog starting at or after off + cnt (first to stay) */
12359 	for (j = i; j < env->subprog_cnt; j++)
12360 		if (env->subprog_info[j].start >= off + cnt)
12361 			break;
12362 	/* if j doesn't start exactly at off + cnt, we are just removing
12363 	 * the front of previous prog
12364 	 */
12365 	if (env->subprog_info[j].start != off + cnt)
12366 		j--;
12367 
12368 	if (j > i) {
12369 		struct bpf_prog_aux *aux = env->prog->aux;
12370 		int move;
12371 
12372 		/* move fake 'exit' subprog as well */
12373 		move = env->subprog_cnt + 1 - j;
12374 
12375 		memmove(env->subprog_info + i,
12376 			env->subprog_info + j,
12377 			sizeof(*env->subprog_info) * move);
12378 		env->subprog_cnt -= j - i;
12379 
12380 		/* remove func_info */
12381 		if (aux->func_info) {
12382 			move = aux->func_info_cnt - j;
12383 
12384 			memmove(aux->func_info + i,
12385 				aux->func_info + j,
12386 				sizeof(*aux->func_info) * move);
12387 			aux->func_info_cnt -= j - i;
12388 			/* func_info->insn_off is set after all code rewrites,
12389 			 * in adjust_btf_func() - no need to adjust
12390 			 */
12391 		}
12392 	} else {
12393 		/* convert i from "first prog to remove" to "first to adjust" */
12394 		if (env->subprog_info[i].start == off)
12395 			i++;
12396 	}
12397 
12398 	/* update fake 'exit' subprog as well */
12399 	for (; i <= env->subprog_cnt; i++)
12400 		env->subprog_info[i].start -= cnt;
12401 
12402 	return 0;
12403 }
12404 
12405 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
12406 				      u32 cnt)
12407 {
12408 	struct bpf_prog *prog = env->prog;
12409 	u32 i, l_off, l_cnt, nr_linfo;
12410 	struct bpf_line_info *linfo;
12411 
12412 	nr_linfo = prog->aux->nr_linfo;
12413 	if (!nr_linfo)
12414 		return 0;
12415 
12416 	linfo = prog->aux->linfo;
12417 
12418 	/* find first line info to remove, count lines to be removed */
12419 	for (i = 0; i < nr_linfo; i++)
12420 		if (linfo[i].insn_off >= off)
12421 			break;
12422 
12423 	l_off = i;
12424 	l_cnt = 0;
12425 	for (; i < nr_linfo; i++)
12426 		if (linfo[i].insn_off < off + cnt)
12427 			l_cnt++;
12428 		else
12429 			break;
12430 
12431 	/* First live insn doesn't match first live linfo, it needs to "inherit"
12432 	 * last removed linfo.  prog is already modified, so prog->len == off
12433 	 * means no live instructions after (tail of the program was removed).
12434 	 */
12435 	if (prog->len != off && l_cnt &&
12436 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
12437 		l_cnt--;
12438 		linfo[--i].insn_off = off + cnt;
12439 	}
12440 
12441 	/* remove the line info which refer to the removed instructions */
12442 	if (l_cnt) {
12443 		memmove(linfo + l_off, linfo + i,
12444 			sizeof(*linfo) * (nr_linfo - i));
12445 
12446 		prog->aux->nr_linfo -= l_cnt;
12447 		nr_linfo = prog->aux->nr_linfo;
12448 	}
12449 
12450 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
12451 	for (i = l_off; i < nr_linfo; i++)
12452 		linfo[i].insn_off -= cnt;
12453 
12454 	/* fix up all subprogs (incl. 'exit') which start >= off */
12455 	for (i = 0; i <= env->subprog_cnt; i++)
12456 		if (env->subprog_info[i].linfo_idx > l_off) {
12457 			/* program may have started in the removed region but
12458 			 * may not be fully removed
12459 			 */
12460 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
12461 				env->subprog_info[i].linfo_idx -= l_cnt;
12462 			else
12463 				env->subprog_info[i].linfo_idx = l_off;
12464 		}
12465 
12466 	return 0;
12467 }
12468 
12469 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
12470 {
12471 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12472 	unsigned int orig_prog_len = env->prog->len;
12473 	int err;
12474 
12475 	if (bpf_prog_is_dev_bound(env->prog->aux))
12476 		bpf_prog_offload_remove_insns(env, off, cnt);
12477 
12478 	err = bpf_remove_insns(env->prog, off, cnt);
12479 	if (err)
12480 		return err;
12481 
12482 	err = adjust_subprog_starts_after_remove(env, off, cnt);
12483 	if (err)
12484 		return err;
12485 
12486 	err = bpf_adj_linfo_after_remove(env, off, cnt);
12487 	if (err)
12488 		return err;
12489 
12490 	memmove(aux_data + off,	aux_data + off + cnt,
12491 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
12492 
12493 	return 0;
12494 }
12495 
12496 /* The verifier does more data flow analysis than llvm and will not
12497  * explore branches that are dead at run time. Malicious programs can
12498  * have dead code too. Therefore replace all dead at-run-time code
12499  * with 'ja -1'.
12500  *
12501  * Just nops are not optimal, e.g. if they would sit at the end of the
12502  * program and through another bug we would manage to jump there, then
12503  * we'd execute beyond program memory otherwise. Returning exception
12504  * code also wouldn't work since we can have subprogs where the dead
12505  * code could be located.
12506  */
12507 static void sanitize_dead_code(struct bpf_verifier_env *env)
12508 {
12509 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12510 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
12511 	struct bpf_insn *insn = env->prog->insnsi;
12512 	const int insn_cnt = env->prog->len;
12513 	int i;
12514 
12515 	for (i = 0; i < insn_cnt; i++) {
12516 		if (aux_data[i].seen)
12517 			continue;
12518 		memcpy(insn + i, &trap, sizeof(trap));
12519 		aux_data[i].zext_dst = false;
12520 	}
12521 }
12522 
12523 static bool insn_is_cond_jump(u8 code)
12524 {
12525 	u8 op;
12526 
12527 	if (BPF_CLASS(code) == BPF_JMP32)
12528 		return true;
12529 
12530 	if (BPF_CLASS(code) != BPF_JMP)
12531 		return false;
12532 
12533 	op = BPF_OP(code);
12534 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
12535 }
12536 
12537 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
12538 {
12539 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12540 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12541 	struct bpf_insn *insn = env->prog->insnsi;
12542 	const int insn_cnt = env->prog->len;
12543 	int i;
12544 
12545 	for (i = 0; i < insn_cnt; i++, insn++) {
12546 		if (!insn_is_cond_jump(insn->code))
12547 			continue;
12548 
12549 		if (!aux_data[i + 1].seen)
12550 			ja.off = insn->off;
12551 		else if (!aux_data[i + 1 + insn->off].seen)
12552 			ja.off = 0;
12553 		else
12554 			continue;
12555 
12556 		if (bpf_prog_is_dev_bound(env->prog->aux))
12557 			bpf_prog_offload_replace_insn(env, i, &ja);
12558 
12559 		memcpy(insn, &ja, sizeof(ja));
12560 	}
12561 }
12562 
12563 static int opt_remove_dead_code(struct bpf_verifier_env *env)
12564 {
12565 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12566 	int insn_cnt = env->prog->len;
12567 	int i, err;
12568 
12569 	for (i = 0; i < insn_cnt; i++) {
12570 		int j;
12571 
12572 		j = 0;
12573 		while (i + j < insn_cnt && !aux_data[i + j].seen)
12574 			j++;
12575 		if (!j)
12576 			continue;
12577 
12578 		err = verifier_remove_insns(env, i, j);
12579 		if (err)
12580 			return err;
12581 		insn_cnt = env->prog->len;
12582 	}
12583 
12584 	return 0;
12585 }
12586 
12587 static int opt_remove_nops(struct bpf_verifier_env *env)
12588 {
12589 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12590 	struct bpf_insn *insn = env->prog->insnsi;
12591 	int insn_cnt = env->prog->len;
12592 	int i, err;
12593 
12594 	for (i = 0; i < insn_cnt; i++) {
12595 		if (memcmp(&insn[i], &ja, sizeof(ja)))
12596 			continue;
12597 
12598 		err = verifier_remove_insns(env, i, 1);
12599 		if (err)
12600 			return err;
12601 		insn_cnt--;
12602 		i--;
12603 	}
12604 
12605 	return 0;
12606 }
12607 
12608 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
12609 					 const union bpf_attr *attr)
12610 {
12611 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
12612 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
12613 	int i, patch_len, delta = 0, len = env->prog->len;
12614 	struct bpf_insn *insns = env->prog->insnsi;
12615 	struct bpf_prog *new_prog;
12616 	bool rnd_hi32;
12617 
12618 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
12619 	zext_patch[1] = BPF_ZEXT_REG(0);
12620 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
12621 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
12622 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
12623 	for (i = 0; i < len; i++) {
12624 		int adj_idx = i + delta;
12625 		struct bpf_insn insn;
12626 		int load_reg;
12627 
12628 		insn = insns[adj_idx];
12629 		load_reg = insn_def_regno(&insn);
12630 		if (!aux[adj_idx].zext_dst) {
12631 			u8 code, class;
12632 			u32 imm_rnd;
12633 
12634 			if (!rnd_hi32)
12635 				continue;
12636 
12637 			code = insn.code;
12638 			class = BPF_CLASS(code);
12639 			if (load_reg == -1)
12640 				continue;
12641 
12642 			/* NOTE: arg "reg" (the fourth one) is only used for
12643 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
12644 			 *       here.
12645 			 */
12646 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
12647 				if (class == BPF_LD &&
12648 				    BPF_MODE(code) == BPF_IMM)
12649 					i++;
12650 				continue;
12651 			}
12652 
12653 			/* ctx load could be transformed into wider load. */
12654 			if (class == BPF_LDX &&
12655 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
12656 				continue;
12657 
12658 			imm_rnd = get_random_int();
12659 			rnd_hi32_patch[0] = insn;
12660 			rnd_hi32_patch[1].imm = imm_rnd;
12661 			rnd_hi32_patch[3].dst_reg = load_reg;
12662 			patch = rnd_hi32_patch;
12663 			patch_len = 4;
12664 			goto apply_patch_buffer;
12665 		}
12666 
12667 		/* Add in an zero-extend instruction if a) the JIT has requested
12668 		 * it or b) it's a CMPXCHG.
12669 		 *
12670 		 * The latter is because: BPF_CMPXCHG always loads a value into
12671 		 * R0, therefore always zero-extends. However some archs'
12672 		 * equivalent instruction only does this load when the
12673 		 * comparison is successful. This detail of CMPXCHG is
12674 		 * orthogonal to the general zero-extension behaviour of the
12675 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
12676 		 */
12677 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
12678 			continue;
12679 
12680 		if (WARN_ON(load_reg == -1)) {
12681 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
12682 			return -EFAULT;
12683 		}
12684 
12685 		zext_patch[0] = insn;
12686 		zext_patch[1].dst_reg = load_reg;
12687 		zext_patch[1].src_reg = load_reg;
12688 		patch = zext_patch;
12689 		patch_len = 2;
12690 apply_patch_buffer:
12691 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
12692 		if (!new_prog)
12693 			return -ENOMEM;
12694 		env->prog = new_prog;
12695 		insns = new_prog->insnsi;
12696 		aux = env->insn_aux_data;
12697 		delta += patch_len - 1;
12698 	}
12699 
12700 	return 0;
12701 }
12702 
12703 /* convert load instructions that access fields of a context type into a
12704  * sequence of instructions that access fields of the underlying structure:
12705  *     struct __sk_buff    -> struct sk_buff
12706  *     struct bpf_sock_ops -> struct sock
12707  */
12708 static int convert_ctx_accesses(struct bpf_verifier_env *env)
12709 {
12710 	const struct bpf_verifier_ops *ops = env->ops;
12711 	int i, cnt, size, ctx_field_size, delta = 0;
12712 	const int insn_cnt = env->prog->len;
12713 	struct bpf_insn insn_buf[16], *insn;
12714 	u32 target_size, size_default, off;
12715 	struct bpf_prog *new_prog;
12716 	enum bpf_access_type type;
12717 	bool is_narrower_load;
12718 
12719 	if (ops->gen_prologue || env->seen_direct_write) {
12720 		if (!ops->gen_prologue) {
12721 			verbose(env, "bpf verifier is misconfigured\n");
12722 			return -EINVAL;
12723 		}
12724 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
12725 					env->prog);
12726 		if (cnt >= ARRAY_SIZE(insn_buf)) {
12727 			verbose(env, "bpf verifier is misconfigured\n");
12728 			return -EINVAL;
12729 		} else if (cnt) {
12730 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
12731 			if (!new_prog)
12732 				return -ENOMEM;
12733 
12734 			env->prog = new_prog;
12735 			delta += cnt - 1;
12736 		}
12737 	}
12738 
12739 	if (bpf_prog_is_dev_bound(env->prog->aux))
12740 		return 0;
12741 
12742 	insn = env->prog->insnsi + delta;
12743 
12744 	for (i = 0; i < insn_cnt; i++, insn++) {
12745 		bpf_convert_ctx_access_t convert_ctx_access;
12746 		bool ctx_access;
12747 
12748 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
12749 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
12750 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
12751 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
12752 			type = BPF_READ;
12753 			ctx_access = true;
12754 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
12755 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
12756 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
12757 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
12758 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
12759 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
12760 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
12761 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
12762 			type = BPF_WRITE;
12763 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
12764 		} else {
12765 			continue;
12766 		}
12767 
12768 		if (type == BPF_WRITE &&
12769 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
12770 			struct bpf_insn patch[] = {
12771 				*insn,
12772 				BPF_ST_NOSPEC(),
12773 			};
12774 
12775 			cnt = ARRAY_SIZE(patch);
12776 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
12777 			if (!new_prog)
12778 				return -ENOMEM;
12779 
12780 			delta    += cnt - 1;
12781 			env->prog = new_prog;
12782 			insn      = new_prog->insnsi + i + delta;
12783 			continue;
12784 		}
12785 
12786 		if (!ctx_access)
12787 			continue;
12788 
12789 		switch (env->insn_aux_data[i + delta].ptr_type) {
12790 		case PTR_TO_CTX:
12791 			if (!ops->convert_ctx_access)
12792 				continue;
12793 			convert_ctx_access = ops->convert_ctx_access;
12794 			break;
12795 		case PTR_TO_SOCKET:
12796 		case PTR_TO_SOCK_COMMON:
12797 			convert_ctx_access = bpf_sock_convert_ctx_access;
12798 			break;
12799 		case PTR_TO_TCP_SOCK:
12800 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
12801 			break;
12802 		case PTR_TO_XDP_SOCK:
12803 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
12804 			break;
12805 		case PTR_TO_BTF_ID:
12806 			if (type == BPF_READ) {
12807 				insn->code = BPF_LDX | BPF_PROBE_MEM |
12808 					BPF_SIZE((insn)->code);
12809 				env->prog->aux->num_exentries++;
12810 			} else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
12811 				verbose(env, "Writes through BTF pointers are not allowed\n");
12812 				return -EINVAL;
12813 			}
12814 			continue;
12815 		default:
12816 			continue;
12817 		}
12818 
12819 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
12820 		size = BPF_LDST_BYTES(insn);
12821 
12822 		/* If the read access is a narrower load of the field,
12823 		 * convert to a 4/8-byte load, to minimum program type specific
12824 		 * convert_ctx_access changes. If conversion is successful,
12825 		 * we will apply proper mask to the result.
12826 		 */
12827 		is_narrower_load = size < ctx_field_size;
12828 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
12829 		off = insn->off;
12830 		if (is_narrower_load) {
12831 			u8 size_code;
12832 
12833 			if (type == BPF_WRITE) {
12834 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
12835 				return -EINVAL;
12836 			}
12837 
12838 			size_code = BPF_H;
12839 			if (ctx_field_size == 4)
12840 				size_code = BPF_W;
12841 			else if (ctx_field_size == 8)
12842 				size_code = BPF_DW;
12843 
12844 			insn->off = off & ~(size_default - 1);
12845 			insn->code = BPF_LDX | BPF_MEM | size_code;
12846 		}
12847 
12848 		target_size = 0;
12849 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
12850 					 &target_size);
12851 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
12852 		    (ctx_field_size && !target_size)) {
12853 			verbose(env, "bpf verifier is misconfigured\n");
12854 			return -EINVAL;
12855 		}
12856 
12857 		if (is_narrower_load && size < target_size) {
12858 			u8 shift = bpf_ctx_narrow_access_offset(
12859 				off, size, size_default) * 8;
12860 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
12861 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
12862 				return -EINVAL;
12863 			}
12864 			if (ctx_field_size <= 4) {
12865 				if (shift)
12866 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
12867 									insn->dst_reg,
12868 									shift);
12869 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
12870 								(1 << size * 8) - 1);
12871 			} else {
12872 				if (shift)
12873 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
12874 									insn->dst_reg,
12875 									shift);
12876 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
12877 								(1ULL << size * 8) - 1);
12878 			}
12879 		}
12880 
12881 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
12882 		if (!new_prog)
12883 			return -ENOMEM;
12884 
12885 		delta += cnt - 1;
12886 
12887 		/* keep walking new program and skip insns we just inserted */
12888 		env->prog = new_prog;
12889 		insn      = new_prog->insnsi + i + delta;
12890 	}
12891 
12892 	return 0;
12893 }
12894 
12895 static int jit_subprogs(struct bpf_verifier_env *env)
12896 {
12897 	struct bpf_prog *prog = env->prog, **func, *tmp;
12898 	int i, j, subprog_start, subprog_end = 0, len, subprog;
12899 	struct bpf_map *map_ptr;
12900 	struct bpf_insn *insn;
12901 	void *old_bpf_func;
12902 	int err, num_exentries;
12903 
12904 	if (env->subprog_cnt <= 1)
12905 		return 0;
12906 
12907 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
12908 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
12909 			continue;
12910 
12911 		/* Upon error here we cannot fall back to interpreter but
12912 		 * need a hard reject of the program. Thus -EFAULT is
12913 		 * propagated in any case.
12914 		 */
12915 		subprog = find_subprog(env, i + insn->imm + 1);
12916 		if (subprog < 0) {
12917 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
12918 				  i + insn->imm + 1);
12919 			return -EFAULT;
12920 		}
12921 		/* temporarily remember subprog id inside insn instead of
12922 		 * aux_data, since next loop will split up all insns into funcs
12923 		 */
12924 		insn->off = subprog;
12925 		/* remember original imm in case JIT fails and fallback
12926 		 * to interpreter will be needed
12927 		 */
12928 		env->insn_aux_data[i].call_imm = insn->imm;
12929 		/* point imm to __bpf_call_base+1 from JITs point of view */
12930 		insn->imm = 1;
12931 		if (bpf_pseudo_func(insn))
12932 			/* jit (e.g. x86_64) may emit fewer instructions
12933 			 * if it learns a u32 imm is the same as a u64 imm.
12934 			 * Force a non zero here.
12935 			 */
12936 			insn[1].imm = 1;
12937 	}
12938 
12939 	err = bpf_prog_alloc_jited_linfo(prog);
12940 	if (err)
12941 		goto out_undo_insn;
12942 
12943 	err = -ENOMEM;
12944 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
12945 	if (!func)
12946 		goto out_undo_insn;
12947 
12948 	for (i = 0; i < env->subprog_cnt; i++) {
12949 		subprog_start = subprog_end;
12950 		subprog_end = env->subprog_info[i + 1].start;
12951 
12952 		len = subprog_end - subprog_start;
12953 		/* bpf_prog_run() doesn't call subprogs directly,
12954 		 * hence main prog stats include the runtime of subprogs.
12955 		 * subprogs don't have IDs and not reachable via prog_get_next_id
12956 		 * func[i]->stats will never be accessed and stays NULL
12957 		 */
12958 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
12959 		if (!func[i])
12960 			goto out_free;
12961 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
12962 		       len * sizeof(struct bpf_insn));
12963 		func[i]->type = prog->type;
12964 		func[i]->len = len;
12965 		if (bpf_prog_calc_tag(func[i]))
12966 			goto out_free;
12967 		func[i]->is_func = 1;
12968 		func[i]->aux->func_idx = i;
12969 		/* Below members will be freed only at prog->aux */
12970 		func[i]->aux->btf = prog->aux->btf;
12971 		func[i]->aux->func_info = prog->aux->func_info;
12972 		func[i]->aux->poke_tab = prog->aux->poke_tab;
12973 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
12974 
12975 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
12976 			struct bpf_jit_poke_descriptor *poke;
12977 
12978 			poke = &prog->aux->poke_tab[j];
12979 			if (poke->insn_idx < subprog_end &&
12980 			    poke->insn_idx >= subprog_start)
12981 				poke->aux = func[i]->aux;
12982 		}
12983 
12984 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
12985 		 * Long term would need debug info to populate names
12986 		 */
12987 		func[i]->aux->name[0] = 'F';
12988 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
12989 		func[i]->jit_requested = 1;
12990 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
12991 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
12992 		func[i]->aux->linfo = prog->aux->linfo;
12993 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
12994 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
12995 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
12996 		num_exentries = 0;
12997 		insn = func[i]->insnsi;
12998 		for (j = 0; j < func[i]->len; j++, insn++) {
12999 			if (BPF_CLASS(insn->code) == BPF_LDX &&
13000 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
13001 				num_exentries++;
13002 		}
13003 		func[i]->aux->num_exentries = num_exentries;
13004 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
13005 		func[i] = bpf_int_jit_compile(func[i]);
13006 		if (!func[i]->jited) {
13007 			err = -ENOTSUPP;
13008 			goto out_free;
13009 		}
13010 		cond_resched();
13011 	}
13012 
13013 	/* at this point all bpf functions were successfully JITed
13014 	 * now populate all bpf_calls with correct addresses and
13015 	 * run last pass of JIT
13016 	 */
13017 	for (i = 0; i < env->subprog_cnt; i++) {
13018 		insn = func[i]->insnsi;
13019 		for (j = 0; j < func[i]->len; j++, insn++) {
13020 			if (bpf_pseudo_func(insn)) {
13021 				subprog = insn->off;
13022 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
13023 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
13024 				continue;
13025 			}
13026 			if (!bpf_pseudo_call(insn))
13027 				continue;
13028 			subprog = insn->off;
13029 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
13030 		}
13031 
13032 		/* we use the aux data to keep a list of the start addresses
13033 		 * of the JITed images for each function in the program
13034 		 *
13035 		 * for some architectures, such as powerpc64, the imm field
13036 		 * might not be large enough to hold the offset of the start
13037 		 * address of the callee's JITed image from __bpf_call_base
13038 		 *
13039 		 * in such cases, we can lookup the start address of a callee
13040 		 * by using its subprog id, available from the off field of
13041 		 * the call instruction, as an index for this list
13042 		 */
13043 		func[i]->aux->func = func;
13044 		func[i]->aux->func_cnt = env->subprog_cnt;
13045 	}
13046 	for (i = 0; i < env->subprog_cnt; i++) {
13047 		old_bpf_func = func[i]->bpf_func;
13048 		tmp = bpf_int_jit_compile(func[i]);
13049 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
13050 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
13051 			err = -ENOTSUPP;
13052 			goto out_free;
13053 		}
13054 		cond_resched();
13055 	}
13056 
13057 	/* finally lock prog and jit images for all functions and
13058 	 * populate kallsysm
13059 	 */
13060 	for (i = 0; i < env->subprog_cnt; i++) {
13061 		bpf_prog_lock_ro(func[i]);
13062 		bpf_prog_kallsyms_add(func[i]);
13063 	}
13064 
13065 	/* Last step: make now unused interpreter insns from main
13066 	 * prog consistent for later dump requests, so they can
13067 	 * later look the same as if they were interpreted only.
13068 	 */
13069 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13070 		if (bpf_pseudo_func(insn)) {
13071 			insn[0].imm = env->insn_aux_data[i].call_imm;
13072 			insn[1].imm = insn->off;
13073 			insn->off = 0;
13074 			continue;
13075 		}
13076 		if (!bpf_pseudo_call(insn))
13077 			continue;
13078 		insn->off = env->insn_aux_data[i].call_imm;
13079 		subprog = find_subprog(env, i + insn->off + 1);
13080 		insn->imm = subprog;
13081 	}
13082 
13083 	prog->jited = 1;
13084 	prog->bpf_func = func[0]->bpf_func;
13085 	prog->jited_len = func[0]->jited_len;
13086 	prog->aux->func = func;
13087 	prog->aux->func_cnt = env->subprog_cnt;
13088 	bpf_prog_jit_attempt_done(prog);
13089 	return 0;
13090 out_free:
13091 	/* We failed JIT'ing, so at this point we need to unregister poke
13092 	 * descriptors from subprogs, so that kernel is not attempting to
13093 	 * patch it anymore as we're freeing the subprog JIT memory.
13094 	 */
13095 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13096 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13097 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
13098 	}
13099 	/* At this point we're guaranteed that poke descriptors are not
13100 	 * live anymore. We can just unlink its descriptor table as it's
13101 	 * released with the main prog.
13102 	 */
13103 	for (i = 0; i < env->subprog_cnt; i++) {
13104 		if (!func[i])
13105 			continue;
13106 		func[i]->aux->poke_tab = NULL;
13107 		bpf_jit_free(func[i]);
13108 	}
13109 	kfree(func);
13110 out_undo_insn:
13111 	/* cleanup main prog to be interpreted */
13112 	prog->jit_requested = 0;
13113 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13114 		if (!bpf_pseudo_call(insn))
13115 			continue;
13116 		insn->off = 0;
13117 		insn->imm = env->insn_aux_data[i].call_imm;
13118 	}
13119 	bpf_prog_jit_attempt_done(prog);
13120 	return err;
13121 }
13122 
13123 static int fixup_call_args(struct bpf_verifier_env *env)
13124 {
13125 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13126 	struct bpf_prog *prog = env->prog;
13127 	struct bpf_insn *insn = prog->insnsi;
13128 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
13129 	int i, depth;
13130 #endif
13131 	int err = 0;
13132 
13133 	if (env->prog->jit_requested &&
13134 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
13135 		err = jit_subprogs(env);
13136 		if (err == 0)
13137 			return 0;
13138 		if (err == -EFAULT)
13139 			return err;
13140 	}
13141 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13142 	if (has_kfunc_call) {
13143 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
13144 		return -EINVAL;
13145 	}
13146 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
13147 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
13148 		 * have to be rejected, since interpreter doesn't support them yet.
13149 		 */
13150 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
13151 		return -EINVAL;
13152 	}
13153 	for (i = 0; i < prog->len; i++, insn++) {
13154 		if (bpf_pseudo_func(insn)) {
13155 			/* When JIT fails the progs with callback calls
13156 			 * have to be rejected, since interpreter doesn't support them yet.
13157 			 */
13158 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
13159 			return -EINVAL;
13160 		}
13161 
13162 		if (!bpf_pseudo_call(insn))
13163 			continue;
13164 		depth = get_callee_stack_depth(env, insn, i);
13165 		if (depth < 0)
13166 			return depth;
13167 		bpf_patch_call_args(insn, depth);
13168 	}
13169 	err = 0;
13170 #endif
13171 	return err;
13172 }
13173 
13174 static int fixup_kfunc_call(struct bpf_verifier_env *env,
13175 			    struct bpf_insn *insn)
13176 {
13177 	const struct bpf_kfunc_desc *desc;
13178 
13179 	if (!insn->imm) {
13180 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
13181 		return -EINVAL;
13182 	}
13183 
13184 	/* insn->imm has the btf func_id. Replace it with
13185 	 * an address (relative to __bpf_base_call).
13186 	 */
13187 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
13188 	if (!desc) {
13189 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
13190 			insn->imm);
13191 		return -EFAULT;
13192 	}
13193 
13194 	insn->imm = desc->imm;
13195 
13196 	return 0;
13197 }
13198 
13199 /* Do various post-verification rewrites in a single program pass.
13200  * These rewrites simplify JIT and interpreter implementations.
13201  */
13202 static int do_misc_fixups(struct bpf_verifier_env *env)
13203 {
13204 	struct bpf_prog *prog = env->prog;
13205 	enum bpf_attach_type eatype = prog->expected_attach_type;
13206 	bool expect_blinding = bpf_jit_blinding_enabled(prog);
13207 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
13208 	struct bpf_insn *insn = prog->insnsi;
13209 	const struct bpf_func_proto *fn;
13210 	const int insn_cnt = prog->len;
13211 	const struct bpf_map_ops *ops;
13212 	struct bpf_insn_aux_data *aux;
13213 	struct bpf_insn insn_buf[16];
13214 	struct bpf_prog *new_prog;
13215 	struct bpf_map *map_ptr;
13216 	int i, ret, cnt, delta = 0;
13217 
13218 	for (i = 0; i < insn_cnt; i++, insn++) {
13219 		/* Make divide-by-zero exceptions impossible. */
13220 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
13221 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
13222 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
13223 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
13224 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
13225 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
13226 			struct bpf_insn *patchlet;
13227 			struct bpf_insn chk_and_div[] = {
13228 				/* [R,W]x div 0 -> 0 */
13229 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13230 					     BPF_JNE | BPF_K, insn->src_reg,
13231 					     0, 2, 0),
13232 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
13233 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13234 				*insn,
13235 			};
13236 			struct bpf_insn chk_and_mod[] = {
13237 				/* [R,W]x mod 0 -> [R,W]x */
13238 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13239 					     BPF_JEQ | BPF_K, insn->src_reg,
13240 					     0, 1 + (is64 ? 0 : 1), 0),
13241 				*insn,
13242 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13243 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
13244 			};
13245 
13246 			patchlet = isdiv ? chk_and_div : chk_and_mod;
13247 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
13248 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
13249 
13250 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
13251 			if (!new_prog)
13252 				return -ENOMEM;
13253 
13254 			delta    += cnt - 1;
13255 			env->prog = prog = new_prog;
13256 			insn      = new_prog->insnsi + i + delta;
13257 			continue;
13258 		}
13259 
13260 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
13261 		if (BPF_CLASS(insn->code) == BPF_LD &&
13262 		    (BPF_MODE(insn->code) == BPF_ABS ||
13263 		     BPF_MODE(insn->code) == BPF_IND)) {
13264 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
13265 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13266 				verbose(env, "bpf verifier is misconfigured\n");
13267 				return -EINVAL;
13268 			}
13269 
13270 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13271 			if (!new_prog)
13272 				return -ENOMEM;
13273 
13274 			delta    += cnt - 1;
13275 			env->prog = prog = new_prog;
13276 			insn      = new_prog->insnsi + i + delta;
13277 			continue;
13278 		}
13279 
13280 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
13281 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
13282 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
13283 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
13284 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
13285 			struct bpf_insn *patch = &insn_buf[0];
13286 			bool issrc, isneg, isimm;
13287 			u32 off_reg;
13288 
13289 			aux = &env->insn_aux_data[i + delta];
13290 			if (!aux->alu_state ||
13291 			    aux->alu_state == BPF_ALU_NON_POINTER)
13292 				continue;
13293 
13294 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
13295 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
13296 				BPF_ALU_SANITIZE_SRC;
13297 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
13298 
13299 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
13300 			if (isimm) {
13301 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13302 			} else {
13303 				if (isneg)
13304 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13305 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13306 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
13307 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
13308 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
13309 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
13310 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
13311 			}
13312 			if (!issrc)
13313 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
13314 			insn->src_reg = BPF_REG_AX;
13315 			if (isneg)
13316 				insn->code = insn->code == code_add ?
13317 					     code_sub : code_add;
13318 			*patch++ = *insn;
13319 			if (issrc && isneg && !isimm)
13320 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13321 			cnt = patch - insn_buf;
13322 
13323 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13324 			if (!new_prog)
13325 				return -ENOMEM;
13326 
13327 			delta    += cnt - 1;
13328 			env->prog = prog = new_prog;
13329 			insn      = new_prog->insnsi + i + delta;
13330 			continue;
13331 		}
13332 
13333 		if (insn->code != (BPF_JMP | BPF_CALL))
13334 			continue;
13335 		if (insn->src_reg == BPF_PSEUDO_CALL)
13336 			continue;
13337 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
13338 			ret = fixup_kfunc_call(env, insn);
13339 			if (ret)
13340 				return ret;
13341 			continue;
13342 		}
13343 
13344 		if (insn->imm == BPF_FUNC_get_route_realm)
13345 			prog->dst_needed = 1;
13346 		if (insn->imm == BPF_FUNC_get_prandom_u32)
13347 			bpf_user_rnd_init_once();
13348 		if (insn->imm == BPF_FUNC_override_return)
13349 			prog->kprobe_override = 1;
13350 		if (insn->imm == BPF_FUNC_tail_call) {
13351 			/* If we tail call into other programs, we
13352 			 * cannot make any assumptions since they can
13353 			 * be replaced dynamically during runtime in
13354 			 * the program array.
13355 			 */
13356 			prog->cb_access = 1;
13357 			if (!allow_tail_call_in_subprogs(env))
13358 				prog->aux->stack_depth = MAX_BPF_STACK;
13359 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
13360 
13361 			/* mark bpf_tail_call as different opcode to avoid
13362 			 * conditional branch in the interpreter for every normal
13363 			 * call and to prevent accidental JITing by JIT compiler
13364 			 * that doesn't support bpf_tail_call yet
13365 			 */
13366 			insn->imm = 0;
13367 			insn->code = BPF_JMP | BPF_TAIL_CALL;
13368 
13369 			aux = &env->insn_aux_data[i + delta];
13370 			if (env->bpf_capable && !expect_blinding &&
13371 			    prog->jit_requested &&
13372 			    !bpf_map_key_poisoned(aux) &&
13373 			    !bpf_map_ptr_poisoned(aux) &&
13374 			    !bpf_map_ptr_unpriv(aux)) {
13375 				struct bpf_jit_poke_descriptor desc = {
13376 					.reason = BPF_POKE_REASON_TAIL_CALL,
13377 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
13378 					.tail_call.key = bpf_map_key_immediate(aux),
13379 					.insn_idx = i + delta,
13380 				};
13381 
13382 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
13383 				if (ret < 0) {
13384 					verbose(env, "adding tail call poke descriptor failed\n");
13385 					return ret;
13386 				}
13387 
13388 				insn->imm = ret + 1;
13389 				continue;
13390 			}
13391 
13392 			if (!bpf_map_ptr_unpriv(aux))
13393 				continue;
13394 
13395 			/* instead of changing every JIT dealing with tail_call
13396 			 * emit two extra insns:
13397 			 * if (index >= max_entries) goto out;
13398 			 * index &= array->index_mask;
13399 			 * to avoid out-of-bounds cpu speculation
13400 			 */
13401 			if (bpf_map_ptr_poisoned(aux)) {
13402 				verbose(env, "tail_call abusing map_ptr\n");
13403 				return -EINVAL;
13404 			}
13405 
13406 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
13407 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
13408 						  map_ptr->max_entries, 2);
13409 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
13410 						    container_of(map_ptr,
13411 								 struct bpf_array,
13412 								 map)->index_mask);
13413 			insn_buf[2] = *insn;
13414 			cnt = 3;
13415 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13416 			if (!new_prog)
13417 				return -ENOMEM;
13418 
13419 			delta    += cnt - 1;
13420 			env->prog = prog = new_prog;
13421 			insn      = new_prog->insnsi + i + delta;
13422 			continue;
13423 		}
13424 
13425 		if (insn->imm == BPF_FUNC_timer_set_callback) {
13426 			/* The verifier will process callback_fn as many times as necessary
13427 			 * with different maps and the register states prepared by
13428 			 * set_timer_callback_state will be accurate.
13429 			 *
13430 			 * The following use case is valid:
13431 			 *   map1 is shared by prog1, prog2, prog3.
13432 			 *   prog1 calls bpf_timer_init for some map1 elements
13433 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
13434 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
13435 			 *   prog3 calls bpf_timer_start for some map1 elements.
13436 			 *     Those that were not both bpf_timer_init-ed and
13437 			 *     bpf_timer_set_callback-ed will return -EINVAL.
13438 			 */
13439 			struct bpf_insn ld_addrs[2] = {
13440 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
13441 			};
13442 
13443 			insn_buf[0] = ld_addrs[0];
13444 			insn_buf[1] = ld_addrs[1];
13445 			insn_buf[2] = *insn;
13446 			cnt = 3;
13447 
13448 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13449 			if (!new_prog)
13450 				return -ENOMEM;
13451 
13452 			delta    += cnt - 1;
13453 			env->prog = prog = new_prog;
13454 			insn      = new_prog->insnsi + i + delta;
13455 			goto patch_call_imm;
13456 		}
13457 
13458 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
13459 		 * and other inlining handlers are currently limited to 64 bit
13460 		 * only.
13461 		 */
13462 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
13463 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
13464 		     insn->imm == BPF_FUNC_map_update_elem ||
13465 		     insn->imm == BPF_FUNC_map_delete_elem ||
13466 		     insn->imm == BPF_FUNC_map_push_elem   ||
13467 		     insn->imm == BPF_FUNC_map_pop_elem    ||
13468 		     insn->imm == BPF_FUNC_map_peek_elem   ||
13469 		     insn->imm == BPF_FUNC_redirect_map    ||
13470 		     insn->imm == BPF_FUNC_for_each_map_elem)) {
13471 			aux = &env->insn_aux_data[i + delta];
13472 			if (bpf_map_ptr_poisoned(aux))
13473 				goto patch_call_imm;
13474 
13475 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
13476 			ops = map_ptr->ops;
13477 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
13478 			    ops->map_gen_lookup) {
13479 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
13480 				if (cnt == -EOPNOTSUPP)
13481 					goto patch_map_ops_generic;
13482 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13483 					verbose(env, "bpf verifier is misconfigured\n");
13484 					return -EINVAL;
13485 				}
13486 
13487 				new_prog = bpf_patch_insn_data(env, i + delta,
13488 							       insn_buf, cnt);
13489 				if (!new_prog)
13490 					return -ENOMEM;
13491 
13492 				delta    += cnt - 1;
13493 				env->prog = prog = new_prog;
13494 				insn      = new_prog->insnsi + i + delta;
13495 				continue;
13496 			}
13497 
13498 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
13499 				     (void *(*)(struct bpf_map *map, void *key))NULL));
13500 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
13501 				     (int (*)(struct bpf_map *map, void *key))NULL));
13502 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
13503 				     (int (*)(struct bpf_map *map, void *key, void *value,
13504 					      u64 flags))NULL));
13505 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
13506 				     (int (*)(struct bpf_map *map, void *value,
13507 					      u64 flags))NULL));
13508 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
13509 				     (int (*)(struct bpf_map *map, void *value))NULL));
13510 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
13511 				     (int (*)(struct bpf_map *map, void *value))NULL));
13512 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
13513 				     (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
13514 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
13515 				     (int (*)(struct bpf_map *map,
13516 					      bpf_callback_t callback_fn,
13517 					      void *callback_ctx,
13518 					      u64 flags))NULL));
13519 
13520 patch_map_ops_generic:
13521 			switch (insn->imm) {
13522 			case BPF_FUNC_map_lookup_elem:
13523 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
13524 				continue;
13525 			case BPF_FUNC_map_update_elem:
13526 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
13527 				continue;
13528 			case BPF_FUNC_map_delete_elem:
13529 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
13530 				continue;
13531 			case BPF_FUNC_map_push_elem:
13532 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
13533 				continue;
13534 			case BPF_FUNC_map_pop_elem:
13535 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
13536 				continue;
13537 			case BPF_FUNC_map_peek_elem:
13538 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
13539 				continue;
13540 			case BPF_FUNC_redirect_map:
13541 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
13542 				continue;
13543 			case BPF_FUNC_for_each_map_elem:
13544 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
13545 				continue;
13546 			}
13547 
13548 			goto patch_call_imm;
13549 		}
13550 
13551 		/* Implement bpf_jiffies64 inline. */
13552 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
13553 		    insn->imm == BPF_FUNC_jiffies64) {
13554 			struct bpf_insn ld_jiffies_addr[2] = {
13555 				BPF_LD_IMM64(BPF_REG_0,
13556 					     (unsigned long)&jiffies),
13557 			};
13558 
13559 			insn_buf[0] = ld_jiffies_addr[0];
13560 			insn_buf[1] = ld_jiffies_addr[1];
13561 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
13562 						  BPF_REG_0, 0);
13563 			cnt = 3;
13564 
13565 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
13566 						       cnt);
13567 			if (!new_prog)
13568 				return -ENOMEM;
13569 
13570 			delta    += cnt - 1;
13571 			env->prog = prog = new_prog;
13572 			insn      = new_prog->insnsi + i + delta;
13573 			continue;
13574 		}
13575 
13576 		/* Implement bpf_get_func_arg inline. */
13577 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13578 		    insn->imm == BPF_FUNC_get_func_arg) {
13579 			/* Load nr_args from ctx - 8 */
13580 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13581 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
13582 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
13583 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
13584 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
13585 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13586 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
13587 			insn_buf[7] = BPF_JMP_A(1);
13588 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
13589 			cnt = 9;
13590 
13591 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13592 			if (!new_prog)
13593 				return -ENOMEM;
13594 
13595 			delta    += cnt - 1;
13596 			env->prog = prog = new_prog;
13597 			insn      = new_prog->insnsi + i + delta;
13598 			continue;
13599 		}
13600 
13601 		/* Implement bpf_get_func_ret inline. */
13602 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13603 		    insn->imm == BPF_FUNC_get_func_ret) {
13604 			if (eatype == BPF_TRACE_FEXIT ||
13605 			    eatype == BPF_MODIFY_RETURN) {
13606 				/* Load nr_args from ctx - 8 */
13607 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13608 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
13609 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
13610 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13611 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
13612 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
13613 				cnt = 6;
13614 			} else {
13615 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
13616 				cnt = 1;
13617 			}
13618 
13619 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13620 			if (!new_prog)
13621 				return -ENOMEM;
13622 
13623 			delta    += cnt - 1;
13624 			env->prog = prog = new_prog;
13625 			insn      = new_prog->insnsi + i + delta;
13626 			continue;
13627 		}
13628 
13629 		/* Implement get_func_arg_cnt inline. */
13630 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13631 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
13632 			/* Load nr_args from ctx - 8 */
13633 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13634 
13635 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13636 			if (!new_prog)
13637 				return -ENOMEM;
13638 
13639 			env->prog = prog = new_prog;
13640 			insn      = new_prog->insnsi + i + delta;
13641 			continue;
13642 		}
13643 
13644 		/* Implement bpf_get_func_ip inline. */
13645 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13646 		    insn->imm == BPF_FUNC_get_func_ip) {
13647 			/* Load IP address from ctx - 16 */
13648 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
13649 
13650 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13651 			if (!new_prog)
13652 				return -ENOMEM;
13653 
13654 			env->prog = prog = new_prog;
13655 			insn      = new_prog->insnsi + i + delta;
13656 			continue;
13657 		}
13658 
13659 patch_call_imm:
13660 		fn = env->ops->get_func_proto(insn->imm, env->prog);
13661 		/* all functions that have prototype and verifier allowed
13662 		 * programs to call them, must be real in-kernel functions
13663 		 */
13664 		if (!fn->func) {
13665 			verbose(env,
13666 				"kernel subsystem misconfigured func %s#%d\n",
13667 				func_id_name(insn->imm), insn->imm);
13668 			return -EFAULT;
13669 		}
13670 		insn->imm = fn->func - __bpf_call_base;
13671 	}
13672 
13673 	/* Since poke tab is now finalized, publish aux to tracker. */
13674 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13675 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13676 		if (!map_ptr->ops->map_poke_track ||
13677 		    !map_ptr->ops->map_poke_untrack ||
13678 		    !map_ptr->ops->map_poke_run) {
13679 			verbose(env, "bpf verifier is misconfigured\n");
13680 			return -EINVAL;
13681 		}
13682 
13683 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
13684 		if (ret < 0) {
13685 			verbose(env, "tracking tail call prog failed\n");
13686 			return ret;
13687 		}
13688 	}
13689 
13690 	sort_kfunc_descs_by_imm(env->prog);
13691 
13692 	return 0;
13693 }
13694 
13695 static void free_states(struct bpf_verifier_env *env)
13696 {
13697 	struct bpf_verifier_state_list *sl, *sln;
13698 	int i;
13699 
13700 	sl = env->free_list;
13701 	while (sl) {
13702 		sln = sl->next;
13703 		free_verifier_state(&sl->state, false);
13704 		kfree(sl);
13705 		sl = sln;
13706 	}
13707 	env->free_list = NULL;
13708 
13709 	if (!env->explored_states)
13710 		return;
13711 
13712 	for (i = 0; i < state_htab_size(env); i++) {
13713 		sl = env->explored_states[i];
13714 
13715 		while (sl) {
13716 			sln = sl->next;
13717 			free_verifier_state(&sl->state, false);
13718 			kfree(sl);
13719 			sl = sln;
13720 		}
13721 		env->explored_states[i] = NULL;
13722 	}
13723 }
13724 
13725 static int do_check_common(struct bpf_verifier_env *env, int subprog)
13726 {
13727 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
13728 	struct bpf_verifier_state *state;
13729 	struct bpf_reg_state *regs;
13730 	int ret, i;
13731 
13732 	env->prev_linfo = NULL;
13733 	env->pass_cnt++;
13734 
13735 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
13736 	if (!state)
13737 		return -ENOMEM;
13738 	state->curframe = 0;
13739 	state->speculative = false;
13740 	state->branches = 1;
13741 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
13742 	if (!state->frame[0]) {
13743 		kfree(state);
13744 		return -ENOMEM;
13745 	}
13746 	env->cur_state = state;
13747 	init_func_state(env, state->frame[0],
13748 			BPF_MAIN_FUNC /* callsite */,
13749 			0 /* frameno */,
13750 			subprog);
13751 
13752 	regs = state->frame[state->curframe]->regs;
13753 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
13754 		ret = btf_prepare_func_args(env, subprog, regs);
13755 		if (ret)
13756 			goto out;
13757 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
13758 			if (regs[i].type == PTR_TO_CTX)
13759 				mark_reg_known_zero(env, regs, i);
13760 			else if (regs[i].type == SCALAR_VALUE)
13761 				mark_reg_unknown(env, regs, i);
13762 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
13763 				const u32 mem_size = regs[i].mem_size;
13764 
13765 				mark_reg_known_zero(env, regs, i);
13766 				regs[i].mem_size = mem_size;
13767 				regs[i].id = ++env->id_gen;
13768 			}
13769 		}
13770 	} else {
13771 		/* 1st arg to a function */
13772 		regs[BPF_REG_1].type = PTR_TO_CTX;
13773 		mark_reg_known_zero(env, regs, BPF_REG_1);
13774 		ret = btf_check_subprog_arg_match(env, subprog, regs);
13775 		if (ret == -EFAULT)
13776 			/* unlikely verifier bug. abort.
13777 			 * ret == 0 and ret < 0 are sadly acceptable for
13778 			 * main() function due to backward compatibility.
13779 			 * Like socket filter program may be written as:
13780 			 * int bpf_prog(struct pt_regs *ctx)
13781 			 * and never dereference that ctx in the program.
13782 			 * 'struct pt_regs' is a type mismatch for socket
13783 			 * filter that should be using 'struct __sk_buff'.
13784 			 */
13785 			goto out;
13786 	}
13787 
13788 	ret = do_check(env);
13789 out:
13790 	/* check for NULL is necessary, since cur_state can be freed inside
13791 	 * do_check() under memory pressure.
13792 	 */
13793 	if (env->cur_state) {
13794 		free_verifier_state(env->cur_state, true);
13795 		env->cur_state = NULL;
13796 	}
13797 	while (!pop_stack(env, NULL, NULL, false));
13798 	if (!ret && pop_log)
13799 		bpf_vlog_reset(&env->log, 0);
13800 	free_states(env);
13801 	return ret;
13802 }
13803 
13804 /* Verify all global functions in a BPF program one by one based on their BTF.
13805  * All global functions must pass verification. Otherwise the whole program is rejected.
13806  * Consider:
13807  * int bar(int);
13808  * int foo(int f)
13809  * {
13810  *    return bar(f);
13811  * }
13812  * int bar(int b)
13813  * {
13814  *    ...
13815  * }
13816  * foo() will be verified first for R1=any_scalar_value. During verification it
13817  * will be assumed that bar() already verified successfully and call to bar()
13818  * from foo() will be checked for type match only. Later bar() will be verified
13819  * independently to check that it's safe for R1=any_scalar_value.
13820  */
13821 static int do_check_subprogs(struct bpf_verifier_env *env)
13822 {
13823 	struct bpf_prog_aux *aux = env->prog->aux;
13824 	int i, ret;
13825 
13826 	if (!aux->func_info)
13827 		return 0;
13828 
13829 	for (i = 1; i < env->subprog_cnt; i++) {
13830 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
13831 			continue;
13832 		env->insn_idx = env->subprog_info[i].start;
13833 		WARN_ON_ONCE(env->insn_idx == 0);
13834 		ret = do_check_common(env, i);
13835 		if (ret) {
13836 			return ret;
13837 		} else if (env->log.level & BPF_LOG_LEVEL) {
13838 			verbose(env,
13839 				"Func#%d is safe for any args that match its prototype\n",
13840 				i);
13841 		}
13842 	}
13843 	return 0;
13844 }
13845 
13846 static int do_check_main(struct bpf_verifier_env *env)
13847 {
13848 	int ret;
13849 
13850 	env->insn_idx = 0;
13851 	ret = do_check_common(env, 0);
13852 	if (!ret)
13853 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
13854 	return ret;
13855 }
13856 
13857 
13858 static void print_verification_stats(struct bpf_verifier_env *env)
13859 {
13860 	int i;
13861 
13862 	if (env->log.level & BPF_LOG_STATS) {
13863 		verbose(env, "verification time %lld usec\n",
13864 			div_u64(env->verification_time, 1000));
13865 		verbose(env, "stack depth ");
13866 		for (i = 0; i < env->subprog_cnt; i++) {
13867 			u32 depth = env->subprog_info[i].stack_depth;
13868 
13869 			verbose(env, "%d", depth);
13870 			if (i + 1 < env->subprog_cnt)
13871 				verbose(env, "+");
13872 		}
13873 		verbose(env, "\n");
13874 	}
13875 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
13876 		"total_states %d peak_states %d mark_read %d\n",
13877 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
13878 		env->max_states_per_insn, env->total_states,
13879 		env->peak_states, env->longest_mark_read_walk);
13880 }
13881 
13882 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
13883 {
13884 	const struct btf_type *t, *func_proto;
13885 	const struct bpf_struct_ops *st_ops;
13886 	const struct btf_member *member;
13887 	struct bpf_prog *prog = env->prog;
13888 	u32 btf_id, member_idx;
13889 	const char *mname;
13890 
13891 	if (!prog->gpl_compatible) {
13892 		verbose(env, "struct ops programs must have a GPL compatible license\n");
13893 		return -EINVAL;
13894 	}
13895 
13896 	btf_id = prog->aux->attach_btf_id;
13897 	st_ops = bpf_struct_ops_find(btf_id);
13898 	if (!st_ops) {
13899 		verbose(env, "attach_btf_id %u is not a supported struct\n",
13900 			btf_id);
13901 		return -ENOTSUPP;
13902 	}
13903 
13904 	t = st_ops->type;
13905 	member_idx = prog->expected_attach_type;
13906 	if (member_idx >= btf_type_vlen(t)) {
13907 		verbose(env, "attach to invalid member idx %u of struct %s\n",
13908 			member_idx, st_ops->name);
13909 		return -EINVAL;
13910 	}
13911 
13912 	member = &btf_type_member(t)[member_idx];
13913 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
13914 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
13915 					       NULL);
13916 	if (!func_proto) {
13917 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
13918 			mname, member_idx, st_ops->name);
13919 		return -EINVAL;
13920 	}
13921 
13922 	if (st_ops->check_member) {
13923 		int err = st_ops->check_member(t, member);
13924 
13925 		if (err) {
13926 			verbose(env, "attach to unsupported member %s of struct %s\n",
13927 				mname, st_ops->name);
13928 			return err;
13929 		}
13930 	}
13931 
13932 	prog->aux->attach_func_proto = func_proto;
13933 	prog->aux->attach_func_name = mname;
13934 	env->ops = st_ops->verifier_ops;
13935 
13936 	return 0;
13937 }
13938 #define SECURITY_PREFIX "security_"
13939 
13940 static int check_attach_modify_return(unsigned long addr, const char *func_name)
13941 {
13942 	if (within_error_injection_list(addr) ||
13943 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
13944 		return 0;
13945 
13946 	return -EINVAL;
13947 }
13948 
13949 /* list of non-sleepable functions that are otherwise on
13950  * ALLOW_ERROR_INJECTION list
13951  */
13952 BTF_SET_START(btf_non_sleepable_error_inject)
13953 /* Three functions below can be called from sleepable and non-sleepable context.
13954  * Assume non-sleepable from bpf safety point of view.
13955  */
13956 BTF_ID(func, __filemap_add_folio)
13957 BTF_ID(func, should_fail_alloc_page)
13958 BTF_ID(func, should_failslab)
13959 BTF_SET_END(btf_non_sleepable_error_inject)
13960 
13961 static int check_non_sleepable_error_inject(u32 btf_id)
13962 {
13963 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
13964 }
13965 
13966 int bpf_check_attach_target(struct bpf_verifier_log *log,
13967 			    const struct bpf_prog *prog,
13968 			    const struct bpf_prog *tgt_prog,
13969 			    u32 btf_id,
13970 			    struct bpf_attach_target_info *tgt_info)
13971 {
13972 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
13973 	const char prefix[] = "btf_trace_";
13974 	int ret = 0, subprog = -1, i;
13975 	const struct btf_type *t;
13976 	bool conservative = true;
13977 	const char *tname;
13978 	struct btf *btf;
13979 	long addr = 0;
13980 
13981 	if (!btf_id) {
13982 		bpf_log(log, "Tracing programs must provide btf_id\n");
13983 		return -EINVAL;
13984 	}
13985 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
13986 	if (!btf) {
13987 		bpf_log(log,
13988 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
13989 		return -EINVAL;
13990 	}
13991 	t = btf_type_by_id(btf, btf_id);
13992 	if (!t) {
13993 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
13994 		return -EINVAL;
13995 	}
13996 	tname = btf_name_by_offset(btf, t->name_off);
13997 	if (!tname) {
13998 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
13999 		return -EINVAL;
14000 	}
14001 	if (tgt_prog) {
14002 		struct bpf_prog_aux *aux = tgt_prog->aux;
14003 
14004 		for (i = 0; i < aux->func_info_cnt; i++)
14005 			if (aux->func_info[i].type_id == btf_id) {
14006 				subprog = i;
14007 				break;
14008 			}
14009 		if (subprog == -1) {
14010 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
14011 			return -EINVAL;
14012 		}
14013 		conservative = aux->func_info_aux[subprog].unreliable;
14014 		if (prog_extension) {
14015 			if (conservative) {
14016 				bpf_log(log,
14017 					"Cannot replace static functions\n");
14018 				return -EINVAL;
14019 			}
14020 			if (!prog->jit_requested) {
14021 				bpf_log(log,
14022 					"Extension programs should be JITed\n");
14023 				return -EINVAL;
14024 			}
14025 		}
14026 		if (!tgt_prog->jited) {
14027 			bpf_log(log, "Can attach to only JITed progs\n");
14028 			return -EINVAL;
14029 		}
14030 		if (tgt_prog->type == prog->type) {
14031 			/* Cannot fentry/fexit another fentry/fexit program.
14032 			 * Cannot attach program extension to another extension.
14033 			 * It's ok to attach fentry/fexit to extension program.
14034 			 */
14035 			bpf_log(log, "Cannot recursively attach\n");
14036 			return -EINVAL;
14037 		}
14038 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
14039 		    prog_extension &&
14040 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
14041 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
14042 			/* Program extensions can extend all program types
14043 			 * except fentry/fexit. The reason is the following.
14044 			 * The fentry/fexit programs are used for performance
14045 			 * analysis, stats and can be attached to any program
14046 			 * type except themselves. When extension program is
14047 			 * replacing XDP function it is necessary to allow
14048 			 * performance analysis of all functions. Both original
14049 			 * XDP program and its program extension. Hence
14050 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
14051 			 * allowed. If extending of fentry/fexit was allowed it
14052 			 * would be possible to create long call chain
14053 			 * fentry->extension->fentry->extension beyond
14054 			 * reasonable stack size. Hence extending fentry is not
14055 			 * allowed.
14056 			 */
14057 			bpf_log(log, "Cannot extend fentry/fexit\n");
14058 			return -EINVAL;
14059 		}
14060 	} else {
14061 		if (prog_extension) {
14062 			bpf_log(log, "Cannot replace kernel functions\n");
14063 			return -EINVAL;
14064 		}
14065 	}
14066 
14067 	switch (prog->expected_attach_type) {
14068 	case BPF_TRACE_RAW_TP:
14069 		if (tgt_prog) {
14070 			bpf_log(log,
14071 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
14072 			return -EINVAL;
14073 		}
14074 		if (!btf_type_is_typedef(t)) {
14075 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
14076 				btf_id);
14077 			return -EINVAL;
14078 		}
14079 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
14080 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
14081 				btf_id, tname);
14082 			return -EINVAL;
14083 		}
14084 		tname += sizeof(prefix) - 1;
14085 		t = btf_type_by_id(btf, t->type);
14086 		if (!btf_type_is_ptr(t))
14087 			/* should never happen in valid vmlinux build */
14088 			return -EINVAL;
14089 		t = btf_type_by_id(btf, t->type);
14090 		if (!btf_type_is_func_proto(t))
14091 			/* should never happen in valid vmlinux build */
14092 			return -EINVAL;
14093 
14094 		break;
14095 	case BPF_TRACE_ITER:
14096 		if (!btf_type_is_func(t)) {
14097 			bpf_log(log, "attach_btf_id %u is not a function\n",
14098 				btf_id);
14099 			return -EINVAL;
14100 		}
14101 		t = btf_type_by_id(btf, t->type);
14102 		if (!btf_type_is_func_proto(t))
14103 			return -EINVAL;
14104 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14105 		if (ret)
14106 			return ret;
14107 		break;
14108 	default:
14109 		if (!prog_extension)
14110 			return -EINVAL;
14111 		fallthrough;
14112 	case BPF_MODIFY_RETURN:
14113 	case BPF_LSM_MAC:
14114 	case BPF_TRACE_FENTRY:
14115 	case BPF_TRACE_FEXIT:
14116 		if (!btf_type_is_func(t)) {
14117 			bpf_log(log, "attach_btf_id %u is not a function\n",
14118 				btf_id);
14119 			return -EINVAL;
14120 		}
14121 		if (prog_extension &&
14122 		    btf_check_type_match(log, prog, btf, t))
14123 			return -EINVAL;
14124 		t = btf_type_by_id(btf, t->type);
14125 		if (!btf_type_is_func_proto(t))
14126 			return -EINVAL;
14127 
14128 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
14129 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
14130 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
14131 			return -EINVAL;
14132 
14133 		if (tgt_prog && conservative)
14134 			t = NULL;
14135 
14136 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14137 		if (ret < 0)
14138 			return ret;
14139 
14140 		if (tgt_prog) {
14141 			if (subprog == 0)
14142 				addr = (long) tgt_prog->bpf_func;
14143 			else
14144 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
14145 		} else {
14146 			addr = kallsyms_lookup_name(tname);
14147 			if (!addr) {
14148 				bpf_log(log,
14149 					"The address of function %s cannot be found\n",
14150 					tname);
14151 				return -ENOENT;
14152 			}
14153 		}
14154 
14155 		if (prog->aux->sleepable) {
14156 			ret = -EINVAL;
14157 			switch (prog->type) {
14158 			case BPF_PROG_TYPE_TRACING:
14159 				/* fentry/fexit/fmod_ret progs can be sleepable only if they are
14160 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
14161 				 */
14162 				if (!check_non_sleepable_error_inject(btf_id) &&
14163 				    within_error_injection_list(addr))
14164 					ret = 0;
14165 				break;
14166 			case BPF_PROG_TYPE_LSM:
14167 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
14168 				 * Only some of them are sleepable.
14169 				 */
14170 				if (bpf_lsm_is_sleepable_hook(btf_id))
14171 					ret = 0;
14172 				break;
14173 			default:
14174 				break;
14175 			}
14176 			if (ret) {
14177 				bpf_log(log, "%s is not sleepable\n", tname);
14178 				return ret;
14179 			}
14180 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
14181 			if (tgt_prog) {
14182 				bpf_log(log, "can't modify return codes of BPF programs\n");
14183 				return -EINVAL;
14184 			}
14185 			ret = check_attach_modify_return(addr, tname);
14186 			if (ret) {
14187 				bpf_log(log, "%s() is not modifiable\n", tname);
14188 				return ret;
14189 			}
14190 		}
14191 
14192 		break;
14193 	}
14194 	tgt_info->tgt_addr = addr;
14195 	tgt_info->tgt_name = tname;
14196 	tgt_info->tgt_type = t;
14197 	return 0;
14198 }
14199 
14200 BTF_SET_START(btf_id_deny)
14201 BTF_ID_UNUSED
14202 #ifdef CONFIG_SMP
14203 BTF_ID(func, migrate_disable)
14204 BTF_ID(func, migrate_enable)
14205 #endif
14206 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
14207 BTF_ID(func, rcu_read_unlock_strict)
14208 #endif
14209 BTF_SET_END(btf_id_deny)
14210 
14211 static int check_attach_btf_id(struct bpf_verifier_env *env)
14212 {
14213 	struct bpf_prog *prog = env->prog;
14214 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
14215 	struct bpf_attach_target_info tgt_info = {};
14216 	u32 btf_id = prog->aux->attach_btf_id;
14217 	struct bpf_trampoline *tr;
14218 	int ret;
14219 	u64 key;
14220 
14221 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
14222 		if (prog->aux->sleepable)
14223 			/* attach_btf_id checked to be zero already */
14224 			return 0;
14225 		verbose(env, "Syscall programs can only be sleepable\n");
14226 		return -EINVAL;
14227 	}
14228 
14229 	if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
14230 	    prog->type != BPF_PROG_TYPE_LSM) {
14231 		verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
14232 		return -EINVAL;
14233 	}
14234 
14235 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
14236 		return check_struct_ops_btf_id(env);
14237 
14238 	if (prog->type != BPF_PROG_TYPE_TRACING &&
14239 	    prog->type != BPF_PROG_TYPE_LSM &&
14240 	    prog->type != BPF_PROG_TYPE_EXT)
14241 		return 0;
14242 
14243 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
14244 	if (ret)
14245 		return ret;
14246 
14247 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
14248 		/* to make freplace equivalent to their targets, they need to
14249 		 * inherit env->ops and expected_attach_type for the rest of the
14250 		 * verification
14251 		 */
14252 		env->ops = bpf_verifier_ops[tgt_prog->type];
14253 		prog->expected_attach_type = tgt_prog->expected_attach_type;
14254 	}
14255 
14256 	/* store info about the attachment target that will be used later */
14257 	prog->aux->attach_func_proto = tgt_info.tgt_type;
14258 	prog->aux->attach_func_name = tgt_info.tgt_name;
14259 
14260 	if (tgt_prog) {
14261 		prog->aux->saved_dst_prog_type = tgt_prog->type;
14262 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
14263 	}
14264 
14265 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
14266 		prog->aux->attach_btf_trace = true;
14267 		return 0;
14268 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
14269 		if (!bpf_iter_prog_supported(prog))
14270 			return -EINVAL;
14271 		return 0;
14272 	}
14273 
14274 	if (prog->type == BPF_PROG_TYPE_LSM) {
14275 		ret = bpf_lsm_verify_prog(&env->log, prog);
14276 		if (ret < 0)
14277 			return ret;
14278 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
14279 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
14280 		return -EINVAL;
14281 	}
14282 
14283 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
14284 	tr = bpf_trampoline_get(key, &tgt_info);
14285 	if (!tr)
14286 		return -ENOMEM;
14287 
14288 	prog->aux->dst_trampoline = tr;
14289 	return 0;
14290 }
14291 
14292 struct btf *bpf_get_btf_vmlinux(void)
14293 {
14294 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
14295 		mutex_lock(&bpf_verifier_lock);
14296 		if (!btf_vmlinux)
14297 			btf_vmlinux = btf_parse_vmlinux();
14298 		mutex_unlock(&bpf_verifier_lock);
14299 	}
14300 	return btf_vmlinux;
14301 }
14302 
14303 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
14304 {
14305 	u64 start_time = ktime_get_ns();
14306 	struct bpf_verifier_env *env;
14307 	struct bpf_verifier_log *log;
14308 	int i, len, ret = -EINVAL;
14309 	bool is_priv;
14310 
14311 	/* no program is valid */
14312 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
14313 		return -EINVAL;
14314 
14315 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
14316 	 * allocate/free it every time bpf_check() is called
14317 	 */
14318 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
14319 	if (!env)
14320 		return -ENOMEM;
14321 	log = &env->log;
14322 
14323 	len = (*prog)->len;
14324 	env->insn_aux_data =
14325 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
14326 	ret = -ENOMEM;
14327 	if (!env->insn_aux_data)
14328 		goto err_free_env;
14329 	for (i = 0; i < len; i++)
14330 		env->insn_aux_data[i].orig_idx = i;
14331 	env->prog = *prog;
14332 	env->ops = bpf_verifier_ops[env->prog->type];
14333 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
14334 	is_priv = bpf_capable();
14335 
14336 	bpf_get_btf_vmlinux();
14337 
14338 	/* grab the mutex to protect few globals used by verifier */
14339 	if (!is_priv)
14340 		mutex_lock(&bpf_verifier_lock);
14341 
14342 	if (attr->log_level || attr->log_buf || attr->log_size) {
14343 		/* user requested verbose verifier output
14344 		 * and supplied buffer to store the verification trace
14345 		 */
14346 		log->level = attr->log_level;
14347 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
14348 		log->len_total = attr->log_size;
14349 
14350 		/* log attributes have to be sane */
14351 		if (!bpf_verifier_log_attr_valid(log)) {
14352 			ret = -EINVAL;
14353 			goto err_unlock;
14354 		}
14355 	}
14356 
14357 	mark_verifier_state_clean(env);
14358 
14359 	if (IS_ERR(btf_vmlinux)) {
14360 		/* Either gcc or pahole or kernel are broken. */
14361 		verbose(env, "in-kernel BTF is malformed\n");
14362 		ret = PTR_ERR(btf_vmlinux);
14363 		goto skip_full_check;
14364 	}
14365 
14366 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
14367 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
14368 		env->strict_alignment = true;
14369 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
14370 		env->strict_alignment = false;
14371 
14372 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
14373 	env->allow_uninit_stack = bpf_allow_uninit_stack();
14374 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
14375 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
14376 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
14377 	env->bpf_capable = bpf_capable();
14378 
14379 	if (is_priv)
14380 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
14381 
14382 	env->explored_states = kvcalloc(state_htab_size(env),
14383 				       sizeof(struct bpf_verifier_state_list *),
14384 				       GFP_USER);
14385 	ret = -ENOMEM;
14386 	if (!env->explored_states)
14387 		goto skip_full_check;
14388 
14389 	ret = add_subprog_and_kfunc(env);
14390 	if (ret < 0)
14391 		goto skip_full_check;
14392 
14393 	ret = check_subprogs(env);
14394 	if (ret < 0)
14395 		goto skip_full_check;
14396 
14397 	ret = check_btf_info(env, attr, uattr);
14398 	if (ret < 0)
14399 		goto skip_full_check;
14400 
14401 	ret = check_attach_btf_id(env);
14402 	if (ret)
14403 		goto skip_full_check;
14404 
14405 	ret = resolve_pseudo_ldimm64(env);
14406 	if (ret < 0)
14407 		goto skip_full_check;
14408 
14409 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
14410 		ret = bpf_prog_offload_verifier_prep(env->prog);
14411 		if (ret)
14412 			goto skip_full_check;
14413 	}
14414 
14415 	ret = check_cfg(env);
14416 	if (ret < 0)
14417 		goto skip_full_check;
14418 
14419 	ret = do_check_subprogs(env);
14420 	ret = ret ?: do_check_main(env);
14421 
14422 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
14423 		ret = bpf_prog_offload_finalize(env);
14424 
14425 skip_full_check:
14426 	kvfree(env->explored_states);
14427 
14428 	if (ret == 0)
14429 		ret = check_max_stack_depth(env);
14430 
14431 	/* instruction rewrites happen after this point */
14432 	if (is_priv) {
14433 		if (ret == 0)
14434 			opt_hard_wire_dead_code_branches(env);
14435 		if (ret == 0)
14436 			ret = opt_remove_dead_code(env);
14437 		if (ret == 0)
14438 			ret = opt_remove_nops(env);
14439 	} else {
14440 		if (ret == 0)
14441 			sanitize_dead_code(env);
14442 	}
14443 
14444 	if (ret == 0)
14445 		/* program is valid, convert *(u32*)(ctx + off) accesses */
14446 		ret = convert_ctx_accesses(env);
14447 
14448 	if (ret == 0)
14449 		ret = do_misc_fixups(env);
14450 
14451 	/* do 32-bit optimization after insn patching has done so those patched
14452 	 * insns could be handled correctly.
14453 	 */
14454 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
14455 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
14456 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
14457 								     : false;
14458 	}
14459 
14460 	if (ret == 0)
14461 		ret = fixup_call_args(env);
14462 
14463 	env->verification_time = ktime_get_ns() - start_time;
14464 	print_verification_stats(env);
14465 	env->prog->aux->verified_insns = env->insn_processed;
14466 
14467 	if (log->level && bpf_verifier_log_full(log))
14468 		ret = -ENOSPC;
14469 	if (log->level && !log->ubuf) {
14470 		ret = -EFAULT;
14471 		goto err_release_maps;
14472 	}
14473 
14474 	if (ret)
14475 		goto err_release_maps;
14476 
14477 	if (env->used_map_cnt) {
14478 		/* if program passed verifier, update used_maps in bpf_prog_info */
14479 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
14480 							  sizeof(env->used_maps[0]),
14481 							  GFP_KERNEL);
14482 
14483 		if (!env->prog->aux->used_maps) {
14484 			ret = -ENOMEM;
14485 			goto err_release_maps;
14486 		}
14487 
14488 		memcpy(env->prog->aux->used_maps, env->used_maps,
14489 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
14490 		env->prog->aux->used_map_cnt = env->used_map_cnt;
14491 	}
14492 	if (env->used_btf_cnt) {
14493 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
14494 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
14495 							  sizeof(env->used_btfs[0]),
14496 							  GFP_KERNEL);
14497 		if (!env->prog->aux->used_btfs) {
14498 			ret = -ENOMEM;
14499 			goto err_release_maps;
14500 		}
14501 
14502 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
14503 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
14504 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
14505 	}
14506 	if (env->used_map_cnt || env->used_btf_cnt) {
14507 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
14508 		 * bpf_ld_imm64 instructions
14509 		 */
14510 		convert_pseudo_ld_imm64(env);
14511 	}
14512 
14513 	adjust_btf_func(env);
14514 
14515 err_release_maps:
14516 	if (!env->prog->aux->used_maps)
14517 		/* if we didn't copy map pointers into bpf_prog_info, release
14518 		 * them now. Otherwise free_used_maps() will release them.
14519 		 */
14520 		release_maps(env);
14521 	if (!env->prog->aux->used_btfs)
14522 		release_btfs(env);
14523 
14524 	/* extension progs temporarily inherit the attach_type of their targets
14525 	   for verification purposes, so set it back to zero before returning
14526 	 */
14527 	if (env->prog->type == BPF_PROG_TYPE_EXT)
14528 		env->prog->expected_attach_type = 0;
14529 
14530 	*prog = env->prog;
14531 err_unlock:
14532 	if (!is_priv)
14533 		mutex_unlock(&bpf_verifier_lock);
14534 	vfree(env->insn_aux_data);
14535 err_free_env:
14536 	kfree(env);
14537 	return ret;
14538 }
14539