xref: /openbmc/linux/kernel/bpf/verifier.c (revision a48acad7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 
28 #include "disasm.h"
29 
30 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
31 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
32 	[_id] = & _name ## _verifier_ops,
33 #define BPF_MAP_TYPE(_id, _ops)
34 #define BPF_LINK_TYPE(_id, _name)
35 #include <linux/bpf_types.h>
36 #undef BPF_PROG_TYPE
37 #undef BPF_MAP_TYPE
38 #undef BPF_LINK_TYPE
39 };
40 
41 /* bpf_check() is a static code analyzer that walks eBPF program
42  * instruction by instruction and updates register/stack state.
43  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
44  *
45  * The first pass is depth-first-search to check that the program is a DAG.
46  * It rejects the following programs:
47  * - larger than BPF_MAXINSNS insns
48  * - if loop is present (detected via back-edge)
49  * - unreachable insns exist (shouldn't be a forest. program = one function)
50  * - out of bounds or malformed jumps
51  * The second pass is all possible path descent from the 1st insn.
52  * Since it's analyzing all paths through the program, the length of the
53  * analysis is limited to 64k insn, which may be hit even if total number of
54  * insn is less then 4K, but there are too many branches that change stack/regs.
55  * Number of 'branches to be analyzed' is limited to 1k
56  *
57  * On entry to each instruction, each register has a type, and the instruction
58  * changes the types of the registers depending on instruction semantics.
59  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60  * copied to R1.
61  *
62  * All registers are 64-bit.
63  * R0 - return register
64  * R1-R5 argument passing registers
65  * R6-R9 callee saved registers
66  * R10 - frame pointer read-only
67  *
68  * At the start of BPF program the register R1 contains a pointer to bpf_context
69  * and has type PTR_TO_CTX.
70  *
71  * Verifier tracks arithmetic operations on pointers in case:
72  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
74  * 1st insn copies R10 (which has FRAME_PTR) type into R1
75  * and 2nd arithmetic instruction is pattern matched to recognize
76  * that it wants to construct a pointer to some element within stack.
77  * So after 2nd insn, the register R1 has type PTR_TO_STACK
78  * (and -20 constant is saved for further stack bounds checking).
79  * Meaning that this reg is a pointer to stack plus known immediate constant.
80  *
81  * Most of the time the registers have SCALAR_VALUE type, which
82  * means the register has some value, but it's not a valid pointer.
83  * (like pointer plus pointer becomes SCALAR_VALUE type)
84  *
85  * When verifier sees load or store instructions the type of base register
86  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
87  * four pointer types recognized by check_mem_access() function.
88  *
89  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
90  * and the range of [ptr, ptr + map's value_size) is accessible.
91  *
92  * registers used to pass values to function calls are checked against
93  * function argument constraints.
94  *
95  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
96  * It means that the register type passed to this function must be
97  * PTR_TO_STACK and it will be used inside the function as
98  * 'pointer to map element key'
99  *
100  * For example the argument constraints for bpf_map_lookup_elem():
101  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
102  *   .arg1_type = ARG_CONST_MAP_PTR,
103  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
104  *
105  * ret_type says that this function returns 'pointer to map elem value or null'
106  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
107  * 2nd argument should be a pointer to stack, which will be used inside
108  * the helper function as a pointer to map element key.
109  *
110  * On the kernel side the helper function looks like:
111  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
112  * {
113  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114  *    void *key = (void *) (unsigned long) r2;
115  *    void *value;
116  *
117  *    here kernel can access 'key' and 'map' pointers safely, knowing that
118  *    [key, key + map->key_size) bytes are valid and were initialized on
119  *    the stack of eBPF program.
120  * }
121  *
122  * Corresponding eBPF program may look like:
123  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
124  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
125  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
126  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
127  * here verifier looks at prototype of map_lookup_elem() and sees:
128  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
129  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
130  *
131  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
132  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
133  * and were initialized prior to this call.
134  * If it's ok, then verifier allows this BPF_CALL insn and looks at
135  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
136  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
137  * returns either pointer to map value or NULL.
138  *
139  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
140  * insn, the register holding that pointer in the true branch changes state to
141  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
142  * branch. See check_cond_jmp_op().
143  *
144  * After the call R0 is set to return type of the function and registers R1-R5
145  * are set to NOT_INIT to indicate that they are no longer readable.
146  *
147  * The following reference types represent a potential reference to a kernel
148  * resource which, after first being allocated, must be checked and freed by
149  * the BPF program:
150  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
151  *
152  * When the verifier sees a helper call return a reference type, it allocates a
153  * pointer id for the reference and stores it in the current function state.
154  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
155  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
156  * passes through a NULL-check conditional. For the branch wherein the state is
157  * changed to CONST_IMM, the verifier releases the reference.
158  *
159  * For each helper function that allocates a reference, such as
160  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
161  * bpf_sk_release(). When a reference type passes into the release function,
162  * the verifier also releases the reference. If any unchecked or unreleased
163  * reference remains at the end of the program, the verifier rejects it.
164  */
165 
166 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
167 struct bpf_verifier_stack_elem {
168 	/* verifer state is 'st'
169 	 * before processing instruction 'insn_idx'
170 	 * and after processing instruction 'prev_insn_idx'
171 	 */
172 	struct bpf_verifier_state st;
173 	int insn_idx;
174 	int prev_insn_idx;
175 	struct bpf_verifier_stack_elem *next;
176 	/* length of verifier log at the time this state was pushed on stack */
177 	u32 log_pos;
178 };
179 
180 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_KEY_POISON	(1ULL << 63)
184 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
185 
186 #define BPF_MAP_PTR_UNPRIV	1UL
187 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
188 					  POISON_POINTER_DELTA))
189 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
190 
191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
192 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
193 
194 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
195 {
196 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
197 }
198 
199 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
200 {
201 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
202 }
203 
204 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
205 			      const struct bpf_map *map, bool unpriv)
206 {
207 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
208 	unpriv |= bpf_map_ptr_unpriv(aux);
209 	aux->map_ptr_state = (unsigned long)map |
210 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
211 }
212 
213 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
214 {
215 	return aux->map_key_state & BPF_MAP_KEY_POISON;
216 }
217 
218 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
219 {
220 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
221 }
222 
223 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
224 {
225 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
226 }
227 
228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
229 {
230 	bool poisoned = bpf_map_key_poisoned(aux);
231 
232 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
233 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
234 }
235 
236 static bool bpf_pseudo_call(const struct bpf_insn *insn)
237 {
238 	return insn->code == (BPF_JMP | BPF_CALL) &&
239 	       insn->src_reg == BPF_PSEUDO_CALL;
240 }
241 
242 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
243 {
244 	return insn->code == (BPF_JMP | BPF_CALL) &&
245 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
246 }
247 
248 struct bpf_call_arg_meta {
249 	struct bpf_map *map_ptr;
250 	bool raw_mode;
251 	bool pkt_access;
252 	u8 release_regno;
253 	int regno;
254 	int access_size;
255 	int mem_size;
256 	u64 msize_max_value;
257 	int ref_obj_id;
258 	int map_uid;
259 	int func_id;
260 	struct btf *btf;
261 	u32 btf_id;
262 	struct btf *ret_btf;
263 	u32 ret_btf_id;
264 	u32 subprogno;
265 	struct btf_field *kptr_field;
266 	u8 uninit_dynptr_regno;
267 };
268 
269 struct btf *btf_vmlinux;
270 
271 static DEFINE_MUTEX(bpf_verifier_lock);
272 
273 static const struct bpf_line_info *
274 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
275 {
276 	const struct bpf_line_info *linfo;
277 	const struct bpf_prog *prog;
278 	u32 i, nr_linfo;
279 
280 	prog = env->prog;
281 	nr_linfo = prog->aux->nr_linfo;
282 
283 	if (!nr_linfo || insn_off >= prog->len)
284 		return NULL;
285 
286 	linfo = prog->aux->linfo;
287 	for (i = 1; i < nr_linfo; i++)
288 		if (insn_off < linfo[i].insn_off)
289 			break;
290 
291 	return &linfo[i - 1];
292 }
293 
294 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
295 		       va_list args)
296 {
297 	unsigned int n;
298 
299 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
300 
301 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
302 		  "verifier log line truncated - local buffer too short\n");
303 
304 	if (log->level == BPF_LOG_KERNEL) {
305 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
306 
307 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
308 		return;
309 	}
310 
311 	n = min(log->len_total - log->len_used - 1, n);
312 	log->kbuf[n] = '\0';
313 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
314 		log->len_used += n;
315 	else
316 		log->ubuf = NULL;
317 }
318 
319 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
320 {
321 	char zero = 0;
322 
323 	if (!bpf_verifier_log_needed(log))
324 		return;
325 
326 	log->len_used = new_pos;
327 	if (put_user(zero, log->ubuf + new_pos))
328 		log->ubuf = NULL;
329 }
330 
331 /* log_level controls verbosity level of eBPF verifier.
332  * bpf_verifier_log_write() is used to dump the verification trace to the log,
333  * so the user can figure out what's wrong with the program
334  */
335 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
336 					   const char *fmt, ...)
337 {
338 	va_list args;
339 
340 	if (!bpf_verifier_log_needed(&env->log))
341 		return;
342 
343 	va_start(args, fmt);
344 	bpf_verifier_vlog(&env->log, fmt, args);
345 	va_end(args);
346 }
347 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
348 
349 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
350 {
351 	struct bpf_verifier_env *env = private_data;
352 	va_list args;
353 
354 	if (!bpf_verifier_log_needed(&env->log))
355 		return;
356 
357 	va_start(args, fmt);
358 	bpf_verifier_vlog(&env->log, fmt, args);
359 	va_end(args);
360 }
361 
362 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
363 			    const char *fmt, ...)
364 {
365 	va_list args;
366 
367 	if (!bpf_verifier_log_needed(log))
368 		return;
369 
370 	va_start(args, fmt);
371 	bpf_verifier_vlog(log, fmt, args);
372 	va_end(args);
373 }
374 EXPORT_SYMBOL_GPL(bpf_log);
375 
376 static const char *ltrim(const char *s)
377 {
378 	while (isspace(*s))
379 		s++;
380 
381 	return s;
382 }
383 
384 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
385 					 u32 insn_off,
386 					 const char *prefix_fmt, ...)
387 {
388 	const struct bpf_line_info *linfo;
389 
390 	if (!bpf_verifier_log_needed(&env->log))
391 		return;
392 
393 	linfo = find_linfo(env, insn_off);
394 	if (!linfo || linfo == env->prev_linfo)
395 		return;
396 
397 	if (prefix_fmt) {
398 		va_list args;
399 
400 		va_start(args, prefix_fmt);
401 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
402 		va_end(args);
403 	}
404 
405 	verbose(env, "%s\n",
406 		ltrim(btf_name_by_offset(env->prog->aux->btf,
407 					 linfo->line_off)));
408 
409 	env->prev_linfo = linfo;
410 }
411 
412 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
413 				   struct bpf_reg_state *reg,
414 				   struct tnum *range, const char *ctx,
415 				   const char *reg_name)
416 {
417 	char tn_buf[48];
418 
419 	verbose(env, "At %s the register %s ", ctx, reg_name);
420 	if (!tnum_is_unknown(reg->var_off)) {
421 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
422 		verbose(env, "has value %s", tn_buf);
423 	} else {
424 		verbose(env, "has unknown scalar value");
425 	}
426 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
427 	verbose(env, " should have been in %s\n", tn_buf);
428 }
429 
430 static bool type_is_pkt_pointer(enum bpf_reg_type type)
431 {
432 	type = base_type(type);
433 	return type == PTR_TO_PACKET ||
434 	       type == PTR_TO_PACKET_META;
435 }
436 
437 static bool type_is_sk_pointer(enum bpf_reg_type type)
438 {
439 	return type == PTR_TO_SOCKET ||
440 		type == PTR_TO_SOCK_COMMON ||
441 		type == PTR_TO_TCP_SOCK ||
442 		type == PTR_TO_XDP_SOCK;
443 }
444 
445 static bool reg_type_not_null(enum bpf_reg_type type)
446 {
447 	return type == PTR_TO_SOCKET ||
448 		type == PTR_TO_TCP_SOCK ||
449 		type == PTR_TO_MAP_VALUE ||
450 		type == PTR_TO_MAP_KEY ||
451 		type == PTR_TO_SOCK_COMMON;
452 }
453 
454 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
455 {
456 	struct btf_record *rec = NULL;
457 	struct btf_struct_meta *meta;
458 
459 	if (reg->type == PTR_TO_MAP_VALUE) {
460 		rec = reg->map_ptr->record;
461 	} else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
462 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
463 		if (meta)
464 			rec = meta->record;
465 	}
466 	return rec;
467 }
468 
469 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
470 {
471 	return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
472 }
473 
474 static bool type_is_rdonly_mem(u32 type)
475 {
476 	return type & MEM_RDONLY;
477 }
478 
479 static bool type_may_be_null(u32 type)
480 {
481 	return type & PTR_MAYBE_NULL;
482 }
483 
484 static bool is_acquire_function(enum bpf_func_id func_id,
485 				const struct bpf_map *map)
486 {
487 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
488 
489 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
490 	    func_id == BPF_FUNC_sk_lookup_udp ||
491 	    func_id == BPF_FUNC_skc_lookup_tcp ||
492 	    func_id == BPF_FUNC_ringbuf_reserve ||
493 	    func_id == BPF_FUNC_kptr_xchg)
494 		return true;
495 
496 	if (func_id == BPF_FUNC_map_lookup_elem &&
497 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
498 	     map_type == BPF_MAP_TYPE_SOCKHASH))
499 		return true;
500 
501 	return false;
502 }
503 
504 static bool is_ptr_cast_function(enum bpf_func_id func_id)
505 {
506 	return func_id == BPF_FUNC_tcp_sock ||
507 		func_id == BPF_FUNC_sk_fullsock ||
508 		func_id == BPF_FUNC_skc_to_tcp_sock ||
509 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
510 		func_id == BPF_FUNC_skc_to_udp6_sock ||
511 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
512 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
513 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
514 }
515 
516 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
517 {
518 	return func_id == BPF_FUNC_dynptr_data;
519 }
520 
521 static bool is_callback_calling_function(enum bpf_func_id func_id)
522 {
523 	return func_id == BPF_FUNC_for_each_map_elem ||
524 	       func_id == BPF_FUNC_timer_set_callback ||
525 	       func_id == BPF_FUNC_find_vma ||
526 	       func_id == BPF_FUNC_loop ||
527 	       func_id == BPF_FUNC_user_ringbuf_drain;
528 }
529 
530 static bool is_storage_get_function(enum bpf_func_id func_id)
531 {
532 	return func_id == BPF_FUNC_sk_storage_get ||
533 	       func_id == BPF_FUNC_inode_storage_get ||
534 	       func_id == BPF_FUNC_task_storage_get ||
535 	       func_id == BPF_FUNC_cgrp_storage_get;
536 }
537 
538 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
539 					const struct bpf_map *map)
540 {
541 	int ref_obj_uses = 0;
542 
543 	if (is_ptr_cast_function(func_id))
544 		ref_obj_uses++;
545 	if (is_acquire_function(func_id, map))
546 		ref_obj_uses++;
547 	if (is_dynptr_ref_function(func_id))
548 		ref_obj_uses++;
549 
550 	return ref_obj_uses > 1;
551 }
552 
553 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
554 {
555 	return BPF_CLASS(insn->code) == BPF_STX &&
556 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
557 	       insn->imm == BPF_CMPXCHG;
558 }
559 
560 /* string representation of 'enum bpf_reg_type'
561  *
562  * Note that reg_type_str() can not appear more than once in a single verbose()
563  * statement.
564  */
565 static const char *reg_type_str(struct bpf_verifier_env *env,
566 				enum bpf_reg_type type)
567 {
568 	char postfix[16] = {0}, prefix[64] = {0};
569 	static const char * const str[] = {
570 		[NOT_INIT]		= "?",
571 		[SCALAR_VALUE]		= "scalar",
572 		[PTR_TO_CTX]		= "ctx",
573 		[CONST_PTR_TO_MAP]	= "map_ptr",
574 		[PTR_TO_MAP_VALUE]	= "map_value",
575 		[PTR_TO_STACK]		= "fp",
576 		[PTR_TO_PACKET]		= "pkt",
577 		[PTR_TO_PACKET_META]	= "pkt_meta",
578 		[PTR_TO_PACKET_END]	= "pkt_end",
579 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
580 		[PTR_TO_SOCKET]		= "sock",
581 		[PTR_TO_SOCK_COMMON]	= "sock_common",
582 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
583 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
584 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
585 		[PTR_TO_BTF_ID]		= "ptr_",
586 		[PTR_TO_MEM]		= "mem",
587 		[PTR_TO_BUF]		= "buf",
588 		[PTR_TO_FUNC]		= "func",
589 		[PTR_TO_MAP_KEY]	= "map_key",
590 		[PTR_TO_DYNPTR]		= "dynptr_ptr",
591 	};
592 
593 	if (type & PTR_MAYBE_NULL) {
594 		if (base_type(type) == PTR_TO_BTF_ID)
595 			strncpy(postfix, "or_null_", 16);
596 		else
597 			strncpy(postfix, "_or_null", 16);
598 	}
599 
600 	snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
601 		 type & MEM_RDONLY ? "rdonly_" : "",
602 		 type & MEM_RINGBUF ? "ringbuf_" : "",
603 		 type & MEM_USER ? "user_" : "",
604 		 type & MEM_PERCPU ? "percpu_" : "",
605 		 type & MEM_RCU ? "rcu_" : "",
606 		 type & PTR_UNTRUSTED ? "untrusted_" : "",
607 		 type & PTR_TRUSTED ? "trusted_" : ""
608 	);
609 
610 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
611 		 prefix, str[base_type(type)], postfix);
612 	return env->type_str_buf;
613 }
614 
615 static char slot_type_char[] = {
616 	[STACK_INVALID]	= '?',
617 	[STACK_SPILL]	= 'r',
618 	[STACK_MISC]	= 'm',
619 	[STACK_ZERO]	= '0',
620 	[STACK_DYNPTR]	= 'd',
621 };
622 
623 static void print_liveness(struct bpf_verifier_env *env,
624 			   enum bpf_reg_liveness live)
625 {
626 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
627 	    verbose(env, "_");
628 	if (live & REG_LIVE_READ)
629 		verbose(env, "r");
630 	if (live & REG_LIVE_WRITTEN)
631 		verbose(env, "w");
632 	if (live & REG_LIVE_DONE)
633 		verbose(env, "D");
634 }
635 
636 static int get_spi(s32 off)
637 {
638 	return (-off - 1) / BPF_REG_SIZE;
639 }
640 
641 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
642 {
643 	int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
644 
645 	/* We need to check that slots between [spi - nr_slots + 1, spi] are
646 	 * within [0, allocated_stack).
647 	 *
648 	 * Please note that the spi grows downwards. For example, a dynptr
649 	 * takes the size of two stack slots; the first slot will be at
650 	 * spi and the second slot will be at spi - 1.
651 	 */
652 	return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
653 }
654 
655 static struct bpf_func_state *func(struct bpf_verifier_env *env,
656 				   const struct bpf_reg_state *reg)
657 {
658 	struct bpf_verifier_state *cur = env->cur_state;
659 
660 	return cur->frame[reg->frameno];
661 }
662 
663 static const char *kernel_type_name(const struct btf* btf, u32 id)
664 {
665 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
666 }
667 
668 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
669 {
670 	env->scratched_regs |= 1U << regno;
671 }
672 
673 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
674 {
675 	env->scratched_stack_slots |= 1ULL << spi;
676 }
677 
678 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
679 {
680 	return (env->scratched_regs >> regno) & 1;
681 }
682 
683 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
684 {
685 	return (env->scratched_stack_slots >> regno) & 1;
686 }
687 
688 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
689 {
690 	return env->scratched_regs || env->scratched_stack_slots;
691 }
692 
693 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
694 {
695 	env->scratched_regs = 0U;
696 	env->scratched_stack_slots = 0ULL;
697 }
698 
699 /* Used for printing the entire verifier state. */
700 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
701 {
702 	env->scratched_regs = ~0U;
703 	env->scratched_stack_slots = ~0ULL;
704 }
705 
706 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
707 {
708 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
709 	case DYNPTR_TYPE_LOCAL:
710 		return BPF_DYNPTR_TYPE_LOCAL;
711 	case DYNPTR_TYPE_RINGBUF:
712 		return BPF_DYNPTR_TYPE_RINGBUF;
713 	default:
714 		return BPF_DYNPTR_TYPE_INVALID;
715 	}
716 }
717 
718 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
719 {
720 	return type == BPF_DYNPTR_TYPE_RINGBUF;
721 }
722 
723 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
724 				   enum bpf_arg_type arg_type, int insn_idx)
725 {
726 	struct bpf_func_state *state = func(env, reg);
727 	enum bpf_dynptr_type type;
728 	int spi, i, id;
729 
730 	spi = get_spi(reg->off);
731 
732 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
733 		return -EINVAL;
734 
735 	for (i = 0; i < BPF_REG_SIZE; i++) {
736 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
737 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
738 	}
739 
740 	type = arg_to_dynptr_type(arg_type);
741 	if (type == BPF_DYNPTR_TYPE_INVALID)
742 		return -EINVAL;
743 
744 	state->stack[spi].spilled_ptr.dynptr.first_slot = true;
745 	state->stack[spi].spilled_ptr.dynptr.type = type;
746 	state->stack[spi - 1].spilled_ptr.dynptr.type = type;
747 
748 	if (dynptr_type_refcounted(type)) {
749 		/* The id is used to track proper releasing */
750 		id = acquire_reference_state(env, insn_idx);
751 		if (id < 0)
752 			return id;
753 
754 		state->stack[spi].spilled_ptr.id = id;
755 		state->stack[spi - 1].spilled_ptr.id = id;
756 	}
757 
758 	return 0;
759 }
760 
761 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
762 {
763 	struct bpf_func_state *state = func(env, reg);
764 	int spi, i;
765 
766 	spi = get_spi(reg->off);
767 
768 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
769 		return -EINVAL;
770 
771 	for (i = 0; i < BPF_REG_SIZE; i++) {
772 		state->stack[spi].slot_type[i] = STACK_INVALID;
773 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
774 	}
775 
776 	/* Invalidate any slices associated with this dynptr */
777 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
778 		release_reference(env, state->stack[spi].spilled_ptr.id);
779 		state->stack[spi].spilled_ptr.id = 0;
780 		state->stack[spi - 1].spilled_ptr.id = 0;
781 	}
782 
783 	state->stack[spi].spilled_ptr.dynptr.first_slot = false;
784 	state->stack[spi].spilled_ptr.dynptr.type = 0;
785 	state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
786 
787 	return 0;
788 }
789 
790 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
791 {
792 	struct bpf_func_state *state = func(env, reg);
793 	int spi = get_spi(reg->off);
794 	int i;
795 
796 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
797 		return true;
798 
799 	for (i = 0; i < BPF_REG_SIZE; i++) {
800 		if (state->stack[spi].slot_type[i] == STACK_DYNPTR ||
801 		    state->stack[spi - 1].slot_type[i] == STACK_DYNPTR)
802 			return false;
803 	}
804 
805 	return true;
806 }
807 
808 bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env,
809 			      struct bpf_reg_state *reg)
810 {
811 	struct bpf_func_state *state = func(env, reg);
812 	int spi = get_spi(reg->off);
813 	int i;
814 
815 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
816 	    !state->stack[spi].spilled_ptr.dynptr.first_slot)
817 		return false;
818 
819 	for (i = 0; i < BPF_REG_SIZE; i++) {
820 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
821 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
822 			return false;
823 	}
824 
825 	return true;
826 }
827 
828 bool is_dynptr_type_expected(struct bpf_verifier_env *env,
829 			     struct bpf_reg_state *reg,
830 			     enum bpf_arg_type arg_type)
831 {
832 	struct bpf_func_state *state = func(env, reg);
833 	enum bpf_dynptr_type dynptr_type;
834 	int spi = get_spi(reg->off);
835 
836 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
837 	if (arg_type == ARG_PTR_TO_DYNPTR)
838 		return true;
839 
840 	dynptr_type = arg_to_dynptr_type(arg_type);
841 
842 	return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
843 }
844 
845 /* The reg state of a pointer or a bounded scalar was saved when
846  * it was spilled to the stack.
847  */
848 static bool is_spilled_reg(const struct bpf_stack_state *stack)
849 {
850 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
851 }
852 
853 static void scrub_spilled_slot(u8 *stype)
854 {
855 	if (*stype != STACK_INVALID)
856 		*stype = STACK_MISC;
857 }
858 
859 static void print_verifier_state(struct bpf_verifier_env *env,
860 				 const struct bpf_func_state *state,
861 				 bool print_all)
862 {
863 	const struct bpf_reg_state *reg;
864 	enum bpf_reg_type t;
865 	int i;
866 
867 	if (state->frameno)
868 		verbose(env, " frame%d:", state->frameno);
869 	for (i = 0; i < MAX_BPF_REG; i++) {
870 		reg = &state->regs[i];
871 		t = reg->type;
872 		if (t == NOT_INIT)
873 			continue;
874 		if (!print_all && !reg_scratched(env, i))
875 			continue;
876 		verbose(env, " R%d", i);
877 		print_liveness(env, reg->live);
878 		verbose(env, "=");
879 		if (t == SCALAR_VALUE && reg->precise)
880 			verbose(env, "P");
881 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
882 		    tnum_is_const(reg->var_off)) {
883 			/* reg->off should be 0 for SCALAR_VALUE */
884 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
885 			verbose(env, "%lld", reg->var_off.value + reg->off);
886 		} else {
887 			const char *sep = "";
888 
889 			verbose(env, "%s", reg_type_str(env, t));
890 			if (base_type(t) == PTR_TO_BTF_ID)
891 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
892 			verbose(env, "(");
893 /*
894  * _a stands for append, was shortened to avoid multiline statements below.
895  * This macro is used to output a comma separated list of attributes.
896  */
897 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
898 
899 			if (reg->id)
900 				verbose_a("id=%d", reg->id);
901 			if (reg->ref_obj_id)
902 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
903 			if (t != SCALAR_VALUE)
904 				verbose_a("off=%d", reg->off);
905 			if (type_is_pkt_pointer(t))
906 				verbose_a("r=%d", reg->range);
907 			else if (base_type(t) == CONST_PTR_TO_MAP ||
908 				 base_type(t) == PTR_TO_MAP_KEY ||
909 				 base_type(t) == PTR_TO_MAP_VALUE)
910 				verbose_a("ks=%d,vs=%d",
911 					  reg->map_ptr->key_size,
912 					  reg->map_ptr->value_size);
913 			if (tnum_is_const(reg->var_off)) {
914 				/* Typically an immediate SCALAR_VALUE, but
915 				 * could be a pointer whose offset is too big
916 				 * for reg->off
917 				 */
918 				verbose_a("imm=%llx", reg->var_off.value);
919 			} else {
920 				if (reg->smin_value != reg->umin_value &&
921 				    reg->smin_value != S64_MIN)
922 					verbose_a("smin=%lld", (long long)reg->smin_value);
923 				if (reg->smax_value != reg->umax_value &&
924 				    reg->smax_value != S64_MAX)
925 					verbose_a("smax=%lld", (long long)reg->smax_value);
926 				if (reg->umin_value != 0)
927 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
928 				if (reg->umax_value != U64_MAX)
929 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
930 				if (!tnum_is_unknown(reg->var_off)) {
931 					char tn_buf[48];
932 
933 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
934 					verbose_a("var_off=%s", tn_buf);
935 				}
936 				if (reg->s32_min_value != reg->smin_value &&
937 				    reg->s32_min_value != S32_MIN)
938 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
939 				if (reg->s32_max_value != reg->smax_value &&
940 				    reg->s32_max_value != S32_MAX)
941 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
942 				if (reg->u32_min_value != reg->umin_value &&
943 				    reg->u32_min_value != U32_MIN)
944 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
945 				if (reg->u32_max_value != reg->umax_value &&
946 				    reg->u32_max_value != U32_MAX)
947 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
948 			}
949 #undef verbose_a
950 
951 			verbose(env, ")");
952 		}
953 	}
954 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
955 		char types_buf[BPF_REG_SIZE + 1];
956 		bool valid = false;
957 		int j;
958 
959 		for (j = 0; j < BPF_REG_SIZE; j++) {
960 			if (state->stack[i].slot_type[j] != STACK_INVALID)
961 				valid = true;
962 			types_buf[j] = slot_type_char[
963 					state->stack[i].slot_type[j]];
964 		}
965 		types_buf[BPF_REG_SIZE] = 0;
966 		if (!valid)
967 			continue;
968 		if (!print_all && !stack_slot_scratched(env, i))
969 			continue;
970 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
971 		print_liveness(env, state->stack[i].spilled_ptr.live);
972 		if (is_spilled_reg(&state->stack[i])) {
973 			reg = &state->stack[i].spilled_ptr;
974 			t = reg->type;
975 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
976 			if (t == SCALAR_VALUE && reg->precise)
977 				verbose(env, "P");
978 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
979 				verbose(env, "%lld", reg->var_off.value + reg->off);
980 		} else {
981 			verbose(env, "=%s", types_buf);
982 		}
983 	}
984 	if (state->acquired_refs && state->refs[0].id) {
985 		verbose(env, " refs=%d", state->refs[0].id);
986 		for (i = 1; i < state->acquired_refs; i++)
987 			if (state->refs[i].id)
988 				verbose(env, ",%d", state->refs[i].id);
989 	}
990 	if (state->in_callback_fn)
991 		verbose(env, " cb");
992 	if (state->in_async_callback_fn)
993 		verbose(env, " async_cb");
994 	verbose(env, "\n");
995 	mark_verifier_state_clean(env);
996 }
997 
998 static inline u32 vlog_alignment(u32 pos)
999 {
1000 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1001 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1002 }
1003 
1004 static void print_insn_state(struct bpf_verifier_env *env,
1005 			     const struct bpf_func_state *state)
1006 {
1007 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
1008 		/* remove new line character */
1009 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
1010 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
1011 	} else {
1012 		verbose(env, "%d:", env->insn_idx);
1013 	}
1014 	print_verifier_state(env, state, false);
1015 }
1016 
1017 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1018  * small to hold src. This is different from krealloc since we don't want to preserve
1019  * the contents of dst.
1020  *
1021  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1022  * not be allocated.
1023  */
1024 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1025 {
1026 	size_t bytes;
1027 
1028 	if (ZERO_OR_NULL_PTR(src))
1029 		goto out;
1030 
1031 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1032 		return NULL;
1033 
1034 	if (ksize(dst) < ksize(src)) {
1035 		kfree(dst);
1036 		dst = kmalloc_track_caller(kmalloc_size_roundup(bytes), flags);
1037 		if (!dst)
1038 			return NULL;
1039 	}
1040 
1041 	memcpy(dst, src, bytes);
1042 out:
1043 	return dst ? dst : ZERO_SIZE_PTR;
1044 }
1045 
1046 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1047  * small to hold new_n items. new items are zeroed out if the array grows.
1048  *
1049  * Contrary to krealloc_array, does not free arr if new_n is zero.
1050  */
1051 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1052 {
1053 	size_t alloc_size;
1054 	void *new_arr;
1055 
1056 	if (!new_n || old_n == new_n)
1057 		goto out;
1058 
1059 	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1060 	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
1061 	if (!new_arr) {
1062 		kfree(arr);
1063 		return NULL;
1064 	}
1065 	arr = new_arr;
1066 
1067 	if (new_n > old_n)
1068 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1069 
1070 out:
1071 	return arr ? arr : ZERO_SIZE_PTR;
1072 }
1073 
1074 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1075 {
1076 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1077 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1078 	if (!dst->refs)
1079 		return -ENOMEM;
1080 
1081 	dst->acquired_refs = src->acquired_refs;
1082 	return 0;
1083 }
1084 
1085 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1086 {
1087 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1088 
1089 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1090 				GFP_KERNEL);
1091 	if (!dst->stack)
1092 		return -ENOMEM;
1093 
1094 	dst->allocated_stack = src->allocated_stack;
1095 	return 0;
1096 }
1097 
1098 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1099 {
1100 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1101 				    sizeof(struct bpf_reference_state));
1102 	if (!state->refs)
1103 		return -ENOMEM;
1104 
1105 	state->acquired_refs = n;
1106 	return 0;
1107 }
1108 
1109 static int grow_stack_state(struct bpf_func_state *state, int size)
1110 {
1111 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1112 
1113 	if (old_n >= n)
1114 		return 0;
1115 
1116 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1117 	if (!state->stack)
1118 		return -ENOMEM;
1119 
1120 	state->allocated_stack = size;
1121 	return 0;
1122 }
1123 
1124 /* Acquire a pointer id from the env and update the state->refs to include
1125  * this new pointer reference.
1126  * On success, returns a valid pointer id to associate with the register
1127  * On failure, returns a negative errno.
1128  */
1129 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1130 {
1131 	struct bpf_func_state *state = cur_func(env);
1132 	int new_ofs = state->acquired_refs;
1133 	int id, err;
1134 
1135 	err = resize_reference_state(state, state->acquired_refs + 1);
1136 	if (err)
1137 		return err;
1138 	id = ++env->id_gen;
1139 	state->refs[new_ofs].id = id;
1140 	state->refs[new_ofs].insn_idx = insn_idx;
1141 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1142 
1143 	return id;
1144 }
1145 
1146 /* release function corresponding to acquire_reference_state(). Idempotent. */
1147 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1148 {
1149 	int i, last_idx;
1150 
1151 	last_idx = state->acquired_refs - 1;
1152 	for (i = 0; i < state->acquired_refs; i++) {
1153 		if (state->refs[i].id == ptr_id) {
1154 			/* Cannot release caller references in callbacks */
1155 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1156 				return -EINVAL;
1157 			if (last_idx && i != last_idx)
1158 				memcpy(&state->refs[i], &state->refs[last_idx],
1159 				       sizeof(*state->refs));
1160 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1161 			state->acquired_refs--;
1162 			return 0;
1163 		}
1164 	}
1165 	return -EINVAL;
1166 }
1167 
1168 static void free_func_state(struct bpf_func_state *state)
1169 {
1170 	if (!state)
1171 		return;
1172 	kfree(state->refs);
1173 	kfree(state->stack);
1174 	kfree(state);
1175 }
1176 
1177 static void clear_jmp_history(struct bpf_verifier_state *state)
1178 {
1179 	kfree(state->jmp_history);
1180 	state->jmp_history = NULL;
1181 	state->jmp_history_cnt = 0;
1182 }
1183 
1184 static void free_verifier_state(struct bpf_verifier_state *state,
1185 				bool free_self)
1186 {
1187 	int i;
1188 
1189 	for (i = 0; i <= state->curframe; i++) {
1190 		free_func_state(state->frame[i]);
1191 		state->frame[i] = NULL;
1192 	}
1193 	clear_jmp_history(state);
1194 	if (free_self)
1195 		kfree(state);
1196 }
1197 
1198 /* copy verifier state from src to dst growing dst stack space
1199  * when necessary to accommodate larger src stack
1200  */
1201 static int copy_func_state(struct bpf_func_state *dst,
1202 			   const struct bpf_func_state *src)
1203 {
1204 	int err;
1205 
1206 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1207 	err = copy_reference_state(dst, src);
1208 	if (err)
1209 		return err;
1210 	return copy_stack_state(dst, src);
1211 }
1212 
1213 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1214 			       const struct bpf_verifier_state *src)
1215 {
1216 	struct bpf_func_state *dst;
1217 	int i, err;
1218 
1219 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1220 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1221 					    GFP_USER);
1222 	if (!dst_state->jmp_history)
1223 		return -ENOMEM;
1224 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1225 
1226 	/* if dst has more stack frames then src frame, free them */
1227 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1228 		free_func_state(dst_state->frame[i]);
1229 		dst_state->frame[i] = NULL;
1230 	}
1231 	dst_state->speculative = src->speculative;
1232 	dst_state->active_rcu_lock = src->active_rcu_lock;
1233 	dst_state->curframe = src->curframe;
1234 	dst_state->active_lock.ptr = src->active_lock.ptr;
1235 	dst_state->active_lock.id = src->active_lock.id;
1236 	dst_state->branches = src->branches;
1237 	dst_state->parent = src->parent;
1238 	dst_state->first_insn_idx = src->first_insn_idx;
1239 	dst_state->last_insn_idx = src->last_insn_idx;
1240 	for (i = 0; i <= src->curframe; i++) {
1241 		dst = dst_state->frame[i];
1242 		if (!dst) {
1243 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1244 			if (!dst)
1245 				return -ENOMEM;
1246 			dst_state->frame[i] = dst;
1247 		}
1248 		err = copy_func_state(dst, src->frame[i]);
1249 		if (err)
1250 			return err;
1251 	}
1252 	return 0;
1253 }
1254 
1255 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1256 {
1257 	while (st) {
1258 		u32 br = --st->branches;
1259 
1260 		/* WARN_ON(br > 1) technically makes sense here,
1261 		 * but see comment in push_stack(), hence:
1262 		 */
1263 		WARN_ONCE((int)br < 0,
1264 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1265 			  br);
1266 		if (br)
1267 			break;
1268 		st = st->parent;
1269 	}
1270 }
1271 
1272 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1273 		     int *insn_idx, bool pop_log)
1274 {
1275 	struct bpf_verifier_state *cur = env->cur_state;
1276 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1277 	int err;
1278 
1279 	if (env->head == NULL)
1280 		return -ENOENT;
1281 
1282 	if (cur) {
1283 		err = copy_verifier_state(cur, &head->st);
1284 		if (err)
1285 			return err;
1286 	}
1287 	if (pop_log)
1288 		bpf_vlog_reset(&env->log, head->log_pos);
1289 	if (insn_idx)
1290 		*insn_idx = head->insn_idx;
1291 	if (prev_insn_idx)
1292 		*prev_insn_idx = head->prev_insn_idx;
1293 	elem = head->next;
1294 	free_verifier_state(&head->st, false);
1295 	kfree(head);
1296 	env->head = elem;
1297 	env->stack_size--;
1298 	return 0;
1299 }
1300 
1301 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1302 					     int insn_idx, int prev_insn_idx,
1303 					     bool speculative)
1304 {
1305 	struct bpf_verifier_state *cur = env->cur_state;
1306 	struct bpf_verifier_stack_elem *elem;
1307 	int err;
1308 
1309 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1310 	if (!elem)
1311 		goto err;
1312 
1313 	elem->insn_idx = insn_idx;
1314 	elem->prev_insn_idx = prev_insn_idx;
1315 	elem->next = env->head;
1316 	elem->log_pos = env->log.len_used;
1317 	env->head = elem;
1318 	env->stack_size++;
1319 	err = copy_verifier_state(&elem->st, cur);
1320 	if (err)
1321 		goto err;
1322 	elem->st.speculative |= speculative;
1323 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1324 		verbose(env, "The sequence of %d jumps is too complex.\n",
1325 			env->stack_size);
1326 		goto err;
1327 	}
1328 	if (elem->st.parent) {
1329 		++elem->st.parent->branches;
1330 		/* WARN_ON(branches > 2) technically makes sense here,
1331 		 * but
1332 		 * 1. speculative states will bump 'branches' for non-branch
1333 		 * instructions
1334 		 * 2. is_state_visited() heuristics may decide not to create
1335 		 * a new state for a sequence of branches and all such current
1336 		 * and cloned states will be pointing to a single parent state
1337 		 * which might have large 'branches' count.
1338 		 */
1339 	}
1340 	return &elem->st;
1341 err:
1342 	free_verifier_state(env->cur_state, true);
1343 	env->cur_state = NULL;
1344 	/* pop all elements and return */
1345 	while (!pop_stack(env, NULL, NULL, false));
1346 	return NULL;
1347 }
1348 
1349 #define CALLER_SAVED_REGS 6
1350 static const int caller_saved[CALLER_SAVED_REGS] = {
1351 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1352 };
1353 
1354 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1355 				struct bpf_reg_state *reg);
1356 
1357 /* This helper doesn't clear reg->id */
1358 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1359 {
1360 	reg->var_off = tnum_const(imm);
1361 	reg->smin_value = (s64)imm;
1362 	reg->smax_value = (s64)imm;
1363 	reg->umin_value = imm;
1364 	reg->umax_value = imm;
1365 
1366 	reg->s32_min_value = (s32)imm;
1367 	reg->s32_max_value = (s32)imm;
1368 	reg->u32_min_value = (u32)imm;
1369 	reg->u32_max_value = (u32)imm;
1370 }
1371 
1372 /* Mark the unknown part of a register (variable offset or scalar value) as
1373  * known to have the value @imm.
1374  */
1375 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1376 {
1377 	/* Clear id, off, and union(map_ptr, range) */
1378 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1379 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1380 	___mark_reg_known(reg, imm);
1381 }
1382 
1383 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1384 {
1385 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1386 	reg->s32_min_value = (s32)imm;
1387 	reg->s32_max_value = (s32)imm;
1388 	reg->u32_min_value = (u32)imm;
1389 	reg->u32_max_value = (u32)imm;
1390 }
1391 
1392 /* Mark the 'variable offset' part of a register as zero.  This should be
1393  * used only on registers holding a pointer type.
1394  */
1395 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1396 {
1397 	__mark_reg_known(reg, 0);
1398 }
1399 
1400 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1401 {
1402 	__mark_reg_known(reg, 0);
1403 	reg->type = SCALAR_VALUE;
1404 }
1405 
1406 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1407 				struct bpf_reg_state *regs, u32 regno)
1408 {
1409 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1410 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1411 		/* Something bad happened, let's kill all regs */
1412 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1413 			__mark_reg_not_init(env, regs + regno);
1414 		return;
1415 	}
1416 	__mark_reg_known_zero(regs + regno);
1417 }
1418 
1419 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1420 {
1421 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1422 		const struct bpf_map *map = reg->map_ptr;
1423 
1424 		if (map->inner_map_meta) {
1425 			reg->type = CONST_PTR_TO_MAP;
1426 			reg->map_ptr = map->inner_map_meta;
1427 			/* transfer reg's id which is unique for every map_lookup_elem
1428 			 * as UID of the inner map.
1429 			 */
1430 			if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
1431 				reg->map_uid = reg->id;
1432 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1433 			reg->type = PTR_TO_XDP_SOCK;
1434 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1435 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1436 			reg->type = PTR_TO_SOCKET;
1437 		} else {
1438 			reg->type = PTR_TO_MAP_VALUE;
1439 		}
1440 		return;
1441 	}
1442 
1443 	reg->type &= ~PTR_MAYBE_NULL;
1444 }
1445 
1446 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1447 {
1448 	return type_is_pkt_pointer(reg->type);
1449 }
1450 
1451 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1452 {
1453 	return reg_is_pkt_pointer(reg) ||
1454 	       reg->type == PTR_TO_PACKET_END;
1455 }
1456 
1457 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1458 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1459 				    enum bpf_reg_type which)
1460 {
1461 	/* The register can already have a range from prior markings.
1462 	 * This is fine as long as it hasn't been advanced from its
1463 	 * origin.
1464 	 */
1465 	return reg->type == which &&
1466 	       reg->id == 0 &&
1467 	       reg->off == 0 &&
1468 	       tnum_equals_const(reg->var_off, 0);
1469 }
1470 
1471 /* Reset the min/max bounds of a register */
1472 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1473 {
1474 	reg->smin_value = S64_MIN;
1475 	reg->smax_value = S64_MAX;
1476 	reg->umin_value = 0;
1477 	reg->umax_value = U64_MAX;
1478 
1479 	reg->s32_min_value = S32_MIN;
1480 	reg->s32_max_value = S32_MAX;
1481 	reg->u32_min_value = 0;
1482 	reg->u32_max_value = U32_MAX;
1483 }
1484 
1485 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1486 {
1487 	reg->smin_value = S64_MIN;
1488 	reg->smax_value = S64_MAX;
1489 	reg->umin_value = 0;
1490 	reg->umax_value = U64_MAX;
1491 }
1492 
1493 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1494 {
1495 	reg->s32_min_value = S32_MIN;
1496 	reg->s32_max_value = S32_MAX;
1497 	reg->u32_min_value = 0;
1498 	reg->u32_max_value = U32_MAX;
1499 }
1500 
1501 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1502 {
1503 	struct tnum var32_off = tnum_subreg(reg->var_off);
1504 
1505 	/* min signed is max(sign bit) | min(other bits) */
1506 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1507 			var32_off.value | (var32_off.mask & S32_MIN));
1508 	/* max signed is min(sign bit) | max(other bits) */
1509 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1510 			var32_off.value | (var32_off.mask & S32_MAX));
1511 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1512 	reg->u32_max_value = min(reg->u32_max_value,
1513 				 (u32)(var32_off.value | var32_off.mask));
1514 }
1515 
1516 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1517 {
1518 	/* min signed is max(sign bit) | min(other bits) */
1519 	reg->smin_value = max_t(s64, reg->smin_value,
1520 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1521 	/* max signed is min(sign bit) | max(other bits) */
1522 	reg->smax_value = min_t(s64, reg->smax_value,
1523 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1524 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1525 	reg->umax_value = min(reg->umax_value,
1526 			      reg->var_off.value | reg->var_off.mask);
1527 }
1528 
1529 static void __update_reg_bounds(struct bpf_reg_state *reg)
1530 {
1531 	__update_reg32_bounds(reg);
1532 	__update_reg64_bounds(reg);
1533 }
1534 
1535 /* Uses signed min/max values to inform unsigned, and vice-versa */
1536 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1537 {
1538 	/* Learn sign from signed bounds.
1539 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1540 	 * are the same, so combine.  This works even in the negative case, e.g.
1541 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1542 	 */
1543 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1544 		reg->s32_min_value = reg->u32_min_value =
1545 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1546 		reg->s32_max_value = reg->u32_max_value =
1547 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1548 		return;
1549 	}
1550 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1551 	 * boundary, so we must be careful.
1552 	 */
1553 	if ((s32)reg->u32_max_value >= 0) {
1554 		/* Positive.  We can't learn anything from the smin, but smax
1555 		 * is positive, hence safe.
1556 		 */
1557 		reg->s32_min_value = reg->u32_min_value;
1558 		reg->s32_max_value = reg->u32_max_value =
1559 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1560 	} else if ((s32)reg->u32_min_value < 0) {
1561 		/* Negative.  We can't learn anything from the smax, but smin
1562 		 * is negative, hence safe.
1563 		 */
1564 		reg->s32_min_value = reg->u32_min_value =
1565 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1566 		reg->s32_max_value = reg->u32_max_value;
1567 	}
1568 }
1569 
1570 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1571 {
1572 	/* Learn sign from signed bounds.
1573 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1574 	 * are the same, so combine.  This works even in the negative case, e.g.
1575 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1576 	 */
1577 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1578 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1579 							  reg->umin_value);
1580 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1581 							  reg->umax_value);
1582 		return;
1583 	}
1584 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1585 	 * boundary, so we must be careful.
1586 	 */
1587 	if ((s64)reg->umax_value >= 0) {
1588 		/* Positive.  We can't learn anything from the smin, but smax
1589 		 * is positive, hence safe.
1590 		 */
1591 		reg->smin_value = reg->umin_value;
1592 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1593 							  reg->umax_value);
1594 	} else if ((s64)reg->umin_value < 0) {
1595 		/* Negative.  We can't learn anything from the smax, but smin
1596 		 * is negative, hence safe.
1597 		 */
1598 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1599 							  reg->umin_value);
1600 		reg->smax_value = reg->umax_value;
1601 	}
1602 }
1603 
1604 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1605 {
1606 	__reg32_deduce_bounds(reg);
1607 	__reg64_deduce_bounds(reg);
1608 }
1609 
1610 /* Attempts to improve var_off based on unsigned min/max information */
1611 static void __reg_bound_offset(struct bpf_reg_state *reg)
1612 {
1613 	struct tnum var64_off = tnum_intersect(reg->var_off,
1614 					       tnum_range(reg->umin_value,
1615 							  reg->umax_value));
1616 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1617 						tnum_range(reg->u32_min_value,
1618 							   reg->u32_max_value));
1619 
1620 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1621 }
1622 
1623 static void reg_bounds_sync(struct bpf_reg_state *reg)
1624 {
1625 	/* We might have learned new bounds from the var_off. */
1626 	__update_reg_bounds(reg);
1627 	/* We might have learned something about the sign bit. */
1628 	__reg_deduce_bounds(reg);
1629 	/* We might have learned some bits from the bounds. */
1630 	__reg_bound_offset(reg);
1631 	/* Intersecting with the old var_off might have improved our bounds
1632 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1633 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1634 	 */
1635 	__update_reg_bounds(reg);
1636 }
1637 
1638 static bool __reg32_bound_s64(s32 a)
1639 {
1640 	return a >= 0 && a <= S32_MAX;
1641 }
1642 
1643 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1644 {
1645 	reg->umin_value = reg->u32_min_value;
1646 	reg->umax_value = reg->u32_max_value;
1647 
1648 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1649 	 * be positive otherwise set to worse case bounds and refine later
1650 	 * from tnum.
1651 	 */
1652 	if (__reg32_bound_s64(reg->s32_min_value) &&
1653 	    __reg32_bound_s64(reg->s32_max_value)) {
1654 		reg->smin_value = reg->s32_min_value;
1655 		reg->smax_value = reg->s32_max_value;
1656 	} else {
1657 		reg->smin_value = 0;
1658 		reg->smax_value = U32_MAX;
1659 	}
1660 }
1661 
1662 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1663 {
1664 	/* special case when 64-bit register has upper 32-bit register
1665 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1666 	 * allowing us to use 32-bit bounds directly,
1667 	 */
1668 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1669 		__reg_assign_32_into_64(reg);
1670 	} else {
1671 		/* Otherwise the best we can do is push lower 32bit known and
1672 		 * unknown bits into register (var_off set from jmp logic)
1673 		 * then learn as much as possible from the 64-bit tnum
1674 		 * known and unknown bits. The previous smin/smax bounds are
1675 		 * invalid here because of jmp32 compare so mark them unknown
1676 		 * so they do not impact tnum bounds calculation.
1677 		 */
1678 		__mark_reg64_unbounded(reg);
1679 	}
1680 	reg_bounds_sync(reg);
1681 }
1682 
1683 static bool __reg64_bound_s32(s64 a)
1684 {
1685 	return a >= S32_MIN && a <= S32_MAX;
1686 }
1687 
1688 static bool __reg64_bound_u32(u64 a)
1689 {
1690 	return a >= U32_MIN && a <= U32_MAX;
1691 }
1692 
1693 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1694 {
1695 	__mark_reg32_unbounded(reg);
1696 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1697 		reg->s32_min_value = (s32)reg->smin_value;
1698 		reg->s32_max_value = (s32)reg->smax_value;
1699 	}
1700 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1701 		reg->u32_min_value = (u32)reg->umin_value;
1702 		reg->u32_max_value = (u32)reg->umax_value;
1703 	}
1704 	reg_bounds_sync(reg);
1705 }
1706 
1707 /* Mark a register as having a completely unknown (scalar) value. */
1708 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1709 			       struct bpf_reg_state *reg)
1710 {
1711 	/*
1712 	 * Clear type, id, off, and union(map_ptr, range) and
1713 	 * padding between 'type' and union
1714 	 */
1715 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1716 	reg->type = SCALAR_VALUE;
1717 	reg->var_off = tnum_unknown;
1718 	reg->frameno = 0;
1719 	reg->precise = !env->bpf_capable;
1720 	__mark_reg_unbounded(reg);
1721 }
1722 
1723 static void mark_reg_unknown(struct bpf_verifier_env *env,
1724 			     struct bpf_reg_state *regs, u32 regno)
1725 {
1726 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1727 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1728 		/* Something bad happened, let's kill all regs except FP */
1729 		for (regno = 0; regno < BPF_REG_FP; regno++)
1730 			__mark_reg_not_init(env, regs + regno);
1731 		return;
1732 	}
1733 	__mark_reg_unknown(env, regs + regno);
1734 }
1735 
1736 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1737 				struct bpf_reg_state *reg)
1738 {
1739 	__mark_reg_unknown(env, reg);
1740 	reg->type = NOT_INIT;
1741 }
1742 
1743 static void mark_reg_not_init(struct bpf_verifier_env *env,
1744 			      struct bpf_reg_state *regs, u32 regno)
1745 {
1746 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1747 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1748 		/* Something bad happened, let's kill all regs except FP */
1749 		for (regno = 0; regno < BPF_REG_FP; regno++)
1750 			__mark_reg_not_init(env, regs + regno);
1751 		return;
1752 	}
1753 	__mark_reg_not_init(env, regs + regno);
1754 }
1755 
1756 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1757 			    struct bpf_reg_state *regs, u32 regno,
1758 			    enum bpf_reg_type reg_type,
1759 			    struct btf *btf, u32 btf_id,
1760 			    enum bpf_type_flag flag)
1761 {
1762 	if (reg_type == SCALAR_VALUE) {
1763 		mark_reg_unknown(env, regs, regno);
1764 		return;
1765 	}
1766 	mark_reg_known_zero(env, regs, regno);
1767 	regs[regno].type = PTR_TO_BTF_ID | flag;
1768 	regs[regno].btf = btf;
1769 	regs[regno].btf_id = btf_id;
1770 }
1771 
1772 #define DEF_NOT_SUBREG	(0)
1773 static void init_reg_state(struct bpf_verifier_env *env,
1774 			   struct bpf_func_state *state)
1775 {
1776 	struct bpf_reg_state *regs = state->regs;
1777 	int i;
1778 
1779 	for (i = 0; i < MAX_BPF_REG; i++) {
1780 		mark_reg_not_init(env, regs, i);
1781 		regs[i].live = REG_LIVE_NONE;
1782 		regs[i].parent = NULL;
1783 		regs[i].subreg_def = DEF_NOT_SUBREG;
1784 	}
1785 
1786 	/* frame pointer */
1787 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1788 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1789 	regs[BPF_REG_FP].frameno = state->frameno;
1790 }
1791 
1792 #define BPF_MAIN_FUNC (-1)
1793 static void init_func_state(struct bpf_verifier_env *env,
1794 			    struct bpf_func_state *state,
1795 			    int callsite, int frameno, int subprogno)
1796 {
1797 	state->callsite = callsite;
1798 	state->frameno = frameno;
1799 	state->subprogno = subprogno;
1800 	state->callback_ret_range = tnum_range(0, 0);
1801 	init_reg_state(env, state);
1802 	mark_verifier_state_scratched(env);
1803 }
1804 
1805 /* Similar to push_stack(), but for async callbacks */
1806 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1807 						int insn_idx, int prev_insn_idx,
1808 						int subprog)
1809 {
1810 	struct bpf_verifier_stack_elem *elem;
1811 	struct bpf_func_state *frame;
1812 
1813 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1814 	if (!elem)
1815 		goto err;
1816 
1817 	elem->insn_idx = insn_idx;
1818 	elem->prev_insn_idx = prev_insn_idx;
1819 	elem->next = env->head;
1820 	elem->log_pos = env->log.len_used;
1821 	env->head = elem;
1822 	env->stack_size++;
1823 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1824 		verbose(env,
1825 			"The sequence of %d jumps is too complex for async cb.\n",
1826 			env->stack_size);
1827 		goto err;
1828 	}
1829 	/* Unlike push_stack() do not copy_verifier_state().
1830 	 * The caller state doesn't matter.
1831 	 * This is async callback. It starts in a fresh stack.
1832 	 * Initialize it similar to do_check_common().
1833 	 */
1834 	elem->st.branches = 1;
1835 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1836 	if (!frame)
1837 		goto err;
1838 	init_func_state(env, frame,
1839 			BPF_MAIN_FUNC /* callsite */,
1840 			0 /* frameno within this callchain */,
1841 			subprog /* subprog number within this prog */);
1842 	elem->st.frame[0] = frame;
1843 	return &elem->st;
1844 err:
1845 	free_verifier_state(env->cur_state, true);
1846 	env->cur_state = NULL;
1847 	/* pop all elements and return */
1848 	while (!pop_stack(env, NULL, NULL, false));
1849 	return NULL;
1850 }
1851 
1852 
1853 enum reg_arg_type {
1854 	SRC_OP,		/* register is used as source operand */
1855 	DST_OP,		/* register is used as destination operand */
1856 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1857 };
1858 
1859 static int cmp_subprogs(const void *a, const void *b)
1860 {
1861 	return ((struct bpf_subprog_info *)a)->start -
1862 	       ((struct bpf_subprog_info *)b)->start;
1863 }
1864 
1865 static int find_subprog(struct bpf_verifier_env *env, int off)
1866 {
1867 	struct bpf_subprog_info *p;
1868 
1869 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1870 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1871 	if (!p)
1872 		return -ENOENT;
1873 	return p - env->subprog_info;
1874 
1875 }
1876 
1877 static int add_subprog(struct bpf_verifier_env *env, int off)
1878 {
1879 	int insn_cnt = env->prog->len;
1880 	int ret;
1881 
1882 	if (off >= insn_cnt || off < 0) {
1883 		verbose(env, "call to invalid destination\n");
1884 		return -EINVAL;
1885 	}
1886 	ret = find_subprog(env, off);
1887 	if (ret >= 0)
1888 		return ret;
1889 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1890 		verbose(env, "too many subprograms\n");
1891 		return -E2BIG;
1892 	}
1893 	/* determine subprog starts. The end is one before the next starts */
1894 	env->subprog_info[env->subprog_cnt++].start = off;
1895 	sort(env->subprog_info, env->subprog_cnt,
1896 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1897 	return env->subprog_cnt - 1;
1898 }
1899 
1900 #define MAX_KFUNC_DESCS 256
1901 #define MAX_KFUNC_BTFS	256
1902 
1903 struct bpf_kfunc_desc {
1904 	struct btf_func_model func_model;
1905 	u32 func_id;
1906 	s32 imm;
1907 	u16 offset;
1908 };
1909 
1910 struct bpf_kfunc_btf {
1911 	struct btf *btf;
1912 	struct module *module;
1913 	u16 offset;
1914 };
1915 
1916 struct bpf_kfunc_desc_tab {
1917 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1918 	u32 nr_descs;
1919 };
1920 
1921 struct bpf_kfunc_btf_tab {
1922 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1923 	u32 nr_descs;
1924 };
1925 
1926 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
1927 {
1928 	const struct bpf_kfunc_desc *d0 = a;
1929 	const struct bpf_kfunc_desc *d1 = b;
1930 
1931 	/* func_id is not greater than BTF_MAX_TYPE */
1932 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1933 }
1934 
1935 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1936 {
1937 	const struct bpf_kfunc_btf *d0 = a;
1938 	const struct bpf_kfunc_btf *d1 = b;
1939 
1940 	return d0->offset - d1->offset;
1941 }
1942 
1943 static const struct bpf_kfunc_desc *
1944 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
1945 {
1946 	struct bpf_kfunc_desc desc = {
1947 		.func_id = func_id,
1948 		.offset = offset,
1949 	};
1950 	struct bpf_kfunc_desc_tab *tab;
1951 
1952 	tab = prog->aux->kfunc_tab;
1953 	return bsearch(&desc, tab->descs, tab->nr_descs,
1954 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1955 }
1956 
1957 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1958 					 s16 offset)
1959 {
1960 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
1961 	struct bpf_kfunc_btf_tab *tab;
1962 	struct bpf_kfunc_btf *b;
1963 	struct module *mod;
1964 	struct btf *btf;
1965 	int btf_fd;
1966 
1967 	tab = env->prog->aux->kfunc_btf_tab;
1968 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1969 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1970 	if (!b) {
1971 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
1972 			verbose(env, "too many different module BTFs\n");
1973 			return ERR_PTR(-E2BIG);
1974 		}
1975 
1976 		if (bpfptr_is_null(env->fd_array)) {
1977 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1978 			return ERR_PTR(-EPROTO);
1979 		}
1980 
1981 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1982 					    offset * sizeof(btf_fd),
1983 					    sizeof(btf_fd)))
1984 			return ERR_PTR(-EFAULT);
1985 
1986 		btf = btf_get_by_fd(btf_fd);
1987 		if (IS_ERR(btf)) {
1988 			verbose(env, "invalid module BTF fd specified\n");
1989 			return btf;
1990 		}
1991 
1992 		if (!btf_is_module(btf)) {
1993 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
1994 			btf_put(btf);
1995 			return ERR_PTR(-EINVAL);
1996 		}
1997 
1998 		mod = btf_try_get_module(btf);
1999 		if (!mod) {
2000 			btf_put(btf);
2001 			return ERR_PTR(-ENXIO);
2002 		}
2003 
2004 		b = &tab->descs[tab->nr_descs++];
2005 		b->btf = btf;
2006 		b->module = mod;
2007 		b->offset = offset;
2008 
2009 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2010 		     kfunc_btf_cmp_by_off, NULL);
2011 	}
2012 	return b->btf;
2013 }
2014 
2015 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2016 {
2017 	if (!tab)
2018 		return;
2019 
2020 	while (tab->nr_descs--) {
2021 		module_put(tab->descs[tab->nr_descs].module);
2022 		btf_put(tab->descs[tab->nr_descs].btf);
2023 	}
2024 	kfree(tab);
2025 }
2026 
2027 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2028 {
2029 	if (offset) {
2030 		if (offset < 0) {
2031 			/* In the future, this can be allowed to increase limit
2032 			 * of fd index into fd_array, interpreted as u16.
2033 			 */
2034 			verbose(env, "negative offset disallowed for kernel module function call\n");
2035 			return ERR_PTR(-EINVAL);
2036 		}
2037 
2038 		return __find_kfunc_desc_btf(env, offset);
2039 	}
2040 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2041 }
2042 
2043 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2044 {
2045 	const struct btf_type *func, *func_proto;
2046 	struct bpf_kfunc_btf_tab *btf_tab;
2047 	struct bpf_kfunc_desc_tab *tab;
2048 	struct bpf_prog_aux *prog_aux;
2049 	struct bpf_kfunc_desc *desc;
2050 	const char *func_name;
2051 	struct btf *desc_btf;
2052 	unsigned long call_imm;
2053 	unsigned long addr;
2054 	int err;
2055 
2056 	prog_aux = env->prog->aux;
2057 	tab = prog_aux->kfunc_tab;
2058 	btf_tab = prog_aux->kfunc_btf_tab;
2059 	if (!tab) {
2060 		if (!btf_vmlinux) {
2061 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2062 			return -ENOTSUPP;
2063 		}
2064 
2065 		if (!env->prog->jit_requested) {
2066 			verbose(env, "JIT is required for calling kernel function\n");
2067 			return -ENOTSUPP;
2068 		}
2069 
2070 		if (!bpf_jit_supports_kfunc_call()) {
2071 			verbose(env, "JIT does not support calling kernel function\n");
2072 			return -ENOTSUPP;
2073 		}
2074 
2075 		if (!env->prog->gpl_compatible) {
2076 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2077 			return -EINVAL;
2078 		}
2079 
2080 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2081 		if (!tab)
2082 			return -ENOMEM;
2083 		prog_aux->kfunc_tab = tab;
2084 	}
2085 
2086 	/* func_id == 0 is always invalid, but instead of returning an error, be
2087 	 * conservative and wait until the code elimination pass before returning
2088 	 * error, so that invalid calls that get pruned out can be in BPF programs
2089 	 * loaded from userspace.  It is also required that offset be untouched
2090 	 * for such calls.
2091 	 */
2092 	if (!func_id && !offset)
2093 		return 0;
2094 
2095 	if (!btf_tab && offset) {
2096 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2097 		if (!btf_tab)
2098 			return -ENOMEM;
2099 		prog_aux->kfunc_btf_tab = btf_tab;
2100 	}
2101 
2102 	desc_btf = find_kfunc_desc_btf(env, offset);
2103 	if (IS_ERR(desc_btf)) {
2104 		verbose(env, "failed to find BTF for kernel function\n");
2105 		return PTR_ERR(desc_btf);
2106 	}
2107 
2108 	if (find_kfunc_desc(env->prog, func_id, offset))
2109 		return 0;
2110 
2111 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2112 		verbose(env, "too many different kernel function calls\n");
2113 		return -E2BIG;
2114 	}
2115 
2116 	func = btf_type_by_id(desc_btf, func_id);
2117 	if (!func || !btf_type_is_func(func)) {
2118 		verbose(env, "kernel btf_id %u is not a function\n",
2119 			func_id);
2120 		return -EINVAL;
2121 	}
2122 	func_proto = btf_type_by_id(desc_btf, func->type);
2123 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2124 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2125 			func_id);
2126 		return -EINVAL;
2127 	}
2128 
2129 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2130 	addr = kallsyms_lookup_name(func_name);
2131 	if (!addr) {
2132 		verbose(env, "cannot find address for kernel function %s\n",
2133 			func_name);
2134 		return -EINVAL;
2135 	}
2136 
2137 	call_imm = BPF_CALL_IMM(addr);
2138 	/* Check whether or not the relative offset overflows desc->imm */
2139 	if ((unsigned long)(s32)call_imm != call_imm) {
2140 		verbose(env, "address of kernel function %s is out of range\n",
2141 			func_name);
2142 		return -EINVAL;
2143 	}
2144 
2145 	desc = &tab->descs[tab->nr_descs++];
2146 	desc->func_id = func_id;
2147 	desc->imm = call_imm;
2148 	desc->offset = offset;
2149 	err = btf_distill_func_proto(&env->log, desc_btf,
2150 				     func_proto, func_name,
2151 				     &desc->func_model);
2152 	if (!err)
2153 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2154 		     kfunc_desc_cmp_by_id_off, NULL);
2155 	return err;
2156 }
2157 
2158 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2159 {
2160 	const struct bpf_kfunc_desc *d0 = a;
2161 	const struct bpf_kfunc_desc *d1 = b;
2162 
2163 	if (d0->imm > d1->imm)
2164 		return 1;
2165 	else if (d0->imm < d1->imm)
2166 		return -1;
2167 	return 0;
2168 }
2169 
2170 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2171 {
2172 	struct bpf_kfunc_desc_tab *tab;
2173 
2174 	tab = prog->aux->kfunc_tab;
2175 	if (!tab)
2176 		return;
2177 
2178 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2179 	     kfunc_desc_cmp_by_imm, NULL);
2180 }
2181 
2182 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2183 {
2184 	return !!prog->aux->kfunc_tab;
2185 }
2186 
2187 const struct btf_func_model *
2188 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2189 			 const struct bpf_insn *insn)
2190 {
2191 	const struct bpf_kfunc_desc desc = {
2192 		.imm = insn->imm,
2193 	};
2194 	const struct bpf_kfunc_desc *res;
2195 	struct bpf_kfunc_desc_tab *tab;
2196 
2197 	tab = prog->aux->kfunc_tab;
2198 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2199 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2200 
2201 	return res ? &res->func_model : NULL;
2202 }
2203 
2204 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2205 {
2206 	struct bpf_subprog_info *subprog = env->subprog_info;
2207 	struct bpf_insn *insn = env->prog->insnsi;
2208 	int i, ret, insn_cnt = env->prog->len;
2209 
2210 	/* Add entry function. */
2211 	ret = add_subprog(env, 0);
2212 	if (ret)
2213 		return ret;
2214 
2215 	for (i = 0; i < insn_cnt; i++, insn++) {
2216 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2217 		    !bpf_pseudo_kfunc_call(insn))
2218 			continue;
2219 
2220 		if (!env->bpf_capable) {
2221 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2222 			return -EPERM;
2223 		}
2224 
2225 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2226 			ret = add_subprog(env, i + insn->imm + 1);
2227 		else
2228 			ret = add_kfunc_call(env, insn->imm, insn->off);
2229 
2230 		if (ret < 0)
2231 			return ret;
2232 	}
2233 
2234 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2235 	 * logic. 'subprog_cnt' should not be increased.
2236 	 */
2237 	subprog[env->subprog_cnt].start = insn_cnt;
2238 
2239 	if (env->log.level & BPF_LOG_LEVEL2)
2240 		for (i = 0; i < env->subprog_cnt; i++)
2241 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2242 
2243 	return 0;
2244 }
2245 
2246 static int check_subprogs(struct bpf_verifier_env *env)
2247 {
2248 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2249 	struct bpf_subprog_info *subprog = env->subprog_info;
2250 	struct bpf_insn *insn = env->prog->insnsi;
2251 	int insn_cnt = env->prog->len;
2252 
2253 	/* now check that all jumps are within the same subprog */
2254 	subprog_start = subprog[cur_subprog].start;
2255 	subprog_end = subprog[cur_subprog + 1].start;
2256 	for (i = 0; i < insn_cnt; i++) {
2257 		u8 code = insn[i].code;
2258 
2259 		if (code == (BPF_JMP | BPF_CALL) &&
2260 		    insn[i].imm == BPF_FUNC_tail_call &&
2261 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2262 			subprog[cur_subprog].has_tail_call = true;
2263 		if (BPF_CLASS(code) == BPF_LD &&
2264 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2265 			subprog[cur_subprog].has_ld_abs = true;
2266 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2267 			goto next;
2268 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2269 			goto next;
2270 		off = i + insn[i].off + 1;
2271 		if (off < subprog_start || off >= subprog_end) {
2272 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2273 			return -EINVAL;
2274 		}
2275 next:
2276 		if (i == subprog_end - 1) {
2277 			/* to avoid fall-through from one subprog into another
2278 			 * the last insn of the subprog should be either exit
2279 			 * or unconditional jump back
2280 			 */
2281 			if (code != (BPF_JMP | BPF_EXIT) &&
2282 			    code != (BPF_JMP | BPF_JA)) {
2283 				verbose(env, "last insn is not an exit or jmp\n");
2284 				return -EINVAL;
2285 			}
2286 			subprog_start = subprog_end;
2287 			cur_subprog++;
2288 			if (cur_subprog < env->subprog_cnt)
2289 				subprog_end = subprog[cur_subprog + 1].start;
2290 		}
2291 	}
2292 	return 0;
2293 }
2294 
2295 /* Parentage chain of this register (or stack slot) should take care of all
2296  * issues like callee-saved registers, stack slot allocation time, etc.
2297  */
2298 static int mark_reg_read(struct bpf_verifier_env *env,
2299 			 const struct bpf_reg_state *state,
2300 			 struct bpf_reg_state *parent, u8 flag)
2301 {
2302 	bool writes = parent == state->parent; /* Observe write marks */
2303 	int cnt = 0;
2304 
2305 	while (parent) {
2306 		/* if read wasn't screened by an earlier write ... */
2307 		if (writes && state->live & REG_LIVE_WRITTEN)
2308 			break;
2309 		if (parent->live & REG_LIVE_DONE) {
2310 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2311 				reg_type_str(env, parent->type),
2312 				parent->var_off.value, parent->off);
2313 			return -EFAULT;
2314 		}
2315 		/* The first condition is more likely to be true than the
2316 		 * second, checked it first.
2317 		 */
2318 		if ((parent->live & REG_LIVE_READ) == flag ||
2319 		    parent->live & REG_LIVE_READ64)
2320 			/* The parentage chain never changes and
2321 			 * this parent was already marked as LIVE_READ.
2322 			 * There is no need to keep walking the chain again and
2323 			 * keep re-marking all parents as LIVE_READ.
2324 			 * This case happens when the same register is read
2325 			 * multiple times without writes into it in-between.
2326 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2327 			 * then no need to set the weak REG_LIVE_READ32.
2328 			 */
2329 			break;
2330 		/* ... then we depend on parent's value */
2331 		parent->live |= flag;
2332 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2333 		if (flag == REG_LIVE_READ64)
2334 			parent->live &= ~REG_LIVE_READ32;
2335 		state = parent;
2336 		parent = state->parent;
2337 		writes = true;
2338 		cnt++;
2339 	}
2340 
2341 	if (env->longest_mark_read_walk < cnt)
2342 		env->longest_mark_read_walk = cnt;
2343 	return 0;
2344 }
2345 
2346 /* This function is supposed to be used by the following 32-bit optimization
2347  * code only. It returns TRUE if the source or destination register operates
2348  * on 64-bit, otherwise return FALSE.
2349  */
2350 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2351 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2352 {
2353 	u8 code, class, op;
2354 
2355 	code = insn->code;
2356 	class = BPF_CLASS(code);
2357 	op = BPF_OP(code);
2358 	if (class == BPF_JMP) {
2359 		/* BPF_EXIT for "main" will reach here. Return TRUE
2360 		 * conservatively.
2361 		 */
2362 		if (op == BPF_EXIT)
2363 			return true;
2364 		if (op == BPF_CALL) {
2365 			/* BPF to BPF call will reach here because of marking
2366 			 * caller saved clobber with DST_OP_NO_MARK for which we
2367 			 * don't care the register def because they are anyway
2368 			 * marked as NOT_INIT already.
2369 			 */
2370 			if (insn->src_reg == BPF_PSEUDO_CALL)
2371 				return false;
2372 			/* Helper call will reach here because of arg type
2373 			 * check, conservatively return TRUE.
2374 			 */
2375 			if (t == SRC_OP)
2376 				return true;
2377 
2378 			return false;
2379 		}
2380 	}
2381 
2382 	if (class == BPF_ALU64 || class == BPF_JMP ||
2383 	    /* BPF_END always use BPF_ALU class. */
2384 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2385 		return true;
2386 
2387 	if (class == BPF_ALU || class == BPF_JMP32)
2388 		return false;
2389 
2390 	if (class == BPF_LDX) {
2391 		if (t != SRC_OP)
2392 			return BPF_SIZE(code) == BPF_DW;
2393 		/* LDX source must be ptr. */
2394 		return true;
2395 	}
2396 
2397 	if (class == BPF_STX) {
2398 		/* BPF_STX (including atomic variants) has multiple source
2399 		 * operands, one of which is a ptr. Check whether the caller is
2400 		 * asking about it.
2401 		 */
2402 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2403 			return true;
2404 		return BPF_SIZE(code) == BPF_DW;
2405 	}
2406 
2407 	if (class == BPF_LD) {
2408 		u8 mode = BPF_MODE(code);
2409 
2410 		/* LD_IMM64 */
2411 		if (mode == BPF_IMM)
2412 			return true;
2413 
2414 		/* Both LD_IND and LD_ABS return 32-bit data. */
2415 		if (t != SRC_OP)
2416 			return  false;
2417 
2418 		/* Implicit ctx ptr. */
2419 		if (regno == BPF_REG_6)
2420 			return true;
2421 
2422 		/* Explicit source could be any width. */
2423 		return true;
2424 	}
2425 
2426 	if (class == BPF_ST)
2427 		/* The only source register for BPF_ST is a ptr. */
2428 		return true;
2429 
2430 	/* Conservatively return true at default. */
2431 	return true;
2432 }
2433 
2434 /* Return the regno defined by the insn, or -1. */
2435 static int insn_def_regno(const struct bpf_insn *insn)
2436 {
2437 	switch (BPF_CLASS(insn->code)) {
2438 	case BPF_JMP:
2439 	case BPF_JMP32:
2440 	case BPF_ST:
2441 		return -1;
2442 	case BPF_STX:
2443 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2444 		    (insn->imm & BPF_FETCH)) {
2445 			if (insn->imm == BPF_CMPXCHG)
2446 				return BPF_REG_0;
2447 			else
2448 				return insn->src_reg;
2449 		} else {
2450 			return -1;
2451 		}
2452 	default:
2453 		return insn->dst_reg;
2454 	}
2455 }
2456 
2457 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2458 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2459 {
2460 	int dst_reg = insn_def_regno(insn);
2461 
2462 	if (dst_reg == -1)
2463 		return false;
2464 
2465 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2466 }
2467 
2468 static void mark_insn_zext(struct bpf_verifier_env *env,
2469 			   struct bpf_reg_state *reg)
2470 {
2471 	s32 def_idx = reg->subreg_def;
2472 
2473 	if (def_idx == DEF_NOT_SUBREG)
2474 		return;
2475 
2476 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2477 	/* The dst will be zero extended, so won't be sub-register anymore. */
2478 	reg->subreg_def = DEF_NOT_SUBREG;
2479 }
2480 
2481 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2482 			 enum reg_arg_type t)
2483 {
2484 	struct bpf_verifier_state *vstate = env->cur_state;
2485 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2486 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2487 	struct bpf_reg_state *reg, *regs = state->regs;
2488 	bool rw64;
2489 
2490 	if (regno >= MAX_BPF_REG) {
2491 		verbose(env, "R%d is invalid\n", regno);
2492 		return -EINVAL;
2493 	}
2494 
2495 	mark_reg_scratched(env, regno);
2496 
2497 	reg = &regs[regno];
2498 	rw64 = is_reg64(env, insn, regno, reg, t);
2499 	if (t == SRC_OP) {
2500 		/* check whether register used as source operand can be read */
2501 		if (reg->type == NOT_INIT) {
2502 			verbose(env, "R%d !read_ok\n", regno);
2503 			return -EACCES;
2504 		}
2505 		/* We don't need to worry about FP liveness because it's read-only */
2506 		if (regno == BPF_REG_FP)
2507 			return 0;
2508 
2509 		if (rw64)
2510 			mark_insn_zext(env, reg);
2511 
2512 		return mark_reg_read(env, reg, reg->parent,
2513 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2514 	} else {
2515 		/* check whether register used as dest operand can be written to */
2516 		if (regno == BPF_REG_FP) {
2517 			verbose(env, "frame pointer is read only\n");
2518 			return -EACCES;
2519 		}
2520 		reg->live |= REG_LIVE_WRITTEN;
2521 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2522 		if (t == DST_OP)
2523 			mark_reg_unknown(env, regs, regno);
2524 	}
2525 	return 0;
2526 }
2527 
2528 /* for any branch, call, exit record the history of jmps in the given state */
2529 static int push_jmp_history(struct bpf_verifier_env *env,
2530 			    struct bpf_verifier_state *cur)
2531 {
2532 	u32 cnt = cur->jmp_history_cnt;
2533 	struct bpf_idx_pair *p;
2534 	size_t alloc_size;
2535 
2536 	cnt++;
2537 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
2538 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
2539 	if (!p)
2540 		return -ENOMEM;
2541 	p[cnt - 1].idx = env->insn_idx;
2542 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2543 	cur->jmp_history = p;
2544 	cur->jmp_history_cnt = cnt;
2545 	return 0;
2546 }
2547 
2548 /* Backtrack one insn at a time. If idx is not at the top of recorded
2549  * history then previous instruction came from straight line execution.
2550  */
2551 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2552 			     u32 *history)
2553 {
2554 	u32 cnt = *history;
2555 
2556 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2557 		i = st->jmp_history[cnt - 1].prev_idx;
2558 		(*history)--;
2559 	} else {
2560 		i--;
2561 	}
2562 	return i;
2563 }
2564 
2565 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2566 {
2567 	const struct btf_type *func;
2568 	struct btf *desc_btf;
2569 
2570 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2571 		return NULL;
2572 
2573 	desc_btf = find_kfunc_desc_btf(data, insn->off);
2574 	if (IS_ERR(desc_btf))
2575 		return "<error>";
2576 
2577 	func = btf_type_by_id(desc_btf, insn->imm);
2578 	return btf_name_by_offset(desc_btf, func->name_off);
2579 }
2580 
2581 /* For given verifier state backtrack_insn() is called from the last insn to
2582  * the first insn. Its purpose is to compute a bitmask of registers and
2583  * stack slots that needs precision in the parent verifier state.
2584  */
2585 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2586 			  u32 *reg_mask, u64 *stack_mask)
2587 {
2588 	const struct bpf_insn_cbs cbs = {
2589 		.cb_call	= disasm_kfunc_name,
2590 		.cb_print	= verbose,
2591 		.private_data	= env,
2592 	};
2593 	struct bpf_insn *insn = env->prog->insnsi + idx;
2594 	u8 class = BPF_CLASS(insn->code);
2595 	u8 opcode = BPF_OP(insn->code);
2596 	u8 mode = BPF_MODE(insn->code);
2597 	u32 dreg = 1u << insn->dst_reg;
2598 	u32 sreg = 1u << insn->src_reg;
2599 	u32 spi;
2600 
2601 	if (insn->code == 0)
2602 		return 0;
2603 	if (env->log.level & BPF_LOG_LEVEL2) {
2604 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2605 		verbose(env, "%d: ", idx);
2606 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2607 	}
2608 
2609 	if (class == BPF_ALU || class == BPF_ALU64) {
2610 		if (!(*reg_mask & dreg))
2611 			return 0;
2612 		if (opcode == BPF_MOV) {
2613 			if (BPF_SRC(insn->code) == BPF_X) {
2614 				/* dreg = sreg
2615 				 * dreg needs precision after this insn
2616 				 * sreg needs precision before this insn
2617 				 */
2618 				*reg_mask &= ~dreg;
2619 				*reg_mask |= sreg;
2620 			} else {
2621 				/* dreg = K
2622 				 * dreg needs precision after this insn.
2623 				 * Corresponding register is already marked
2624 				 * as precise=true in this verifier state.
2625 				 * No further markings in parent are necessary
2626 				 */
2627 				*reg_mask &= ~dreg;
2628 			}
2629 		} else {
2630 			if (BPF_SRC(insn->code) == BPF_X) {
2631 				/* dreg += sreg
2632 				 * both dreg and sreg need precision
2633 				 * before this insn
2634 				 */
2635 				*reg_mask |= sreg;
2636 			} /* else dreg += K
2637 			   * dreg still needs precision before this insn
2638 			   */
2639 		}
2640 	} else if (class == BPF_LDX) {
2641 		if (!(*reg_mask & dreg))
2642 			return 0;
2643 		*reg_mask &= ~dreg;
2644 
2645 		/* scalars can only be spilled into stack w/o losing precision.
2646 		 * Load from any other memory can be zero extended.
2647 		 * The desire to keep that precision is already indicated
2648 		 * by 'precise' mark in corresponding register of this state.
2649 		 * No further tracking necessary.
2650 		 */
2651 		if (insn->src_reg != BPF_REG_FP)
2652 			return 0;
2653 
2654 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2655 		 * that [fp - off] slot contains scalar that needs to be
2656 		 * tracked with precision
2657 		 */
2658 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2659 		if (spi >= 64) {
2660 			verbose(env, "BUG spi %d\n", spi);
2661 			WARN_ONCE(1, "verifier backtracking bug");
2662 			return -EFAULT;
2663 		}
2664 		*stack_mask |= 1ull << spi;
2665 	} else if (class == BPF_STX || class == BPF_ST) {
2666 		if (*reg_mask & dreg)
2667 			/* stx & st shouldn't be using _scalar_ dst_reg
2668 			 * to access memory. It means backtracking
2669 			 * encountered a case of pointer subtraction.
2670 			 */
2671 			return -ENOTSUPP;
2672 		/* scalars can only be spilled into stack */
2673 		if (insn->dst_reg != BPF_REG_FP)
2674 			return 0;
2675 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2676 		if (spi >= 64) {
2677 			verbose(env, "BUG spi %d\n", spi);
2678 			WARN_ONCE(1, "verifier backtracking bug");
2679 			return -EFAULT;
2680 		}
2681 		if (!(*stack_mask & (1ull << spi)))
2682 			return 0;
2683 		*stack_mask &= ~(1ull << spi);
2684 		if (class == BPF_STX)
2685 			*reg_mask |= sreg;
2686 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2687 		if (opcode == BPF_CALL) {
2688 			if (insn->src_reg == BPF_PSEUDO_CALL)
2689 				return -ENOTSUPP;
2690 			/* BPF helpers that invoke callback subprogs are
2691 			 * equivalent to BPF_PSEUDO_CALL above
2692 			 */
2693 			if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
2694 				return -ENOTSUPP;
2695 			/* regular helper call sets R0 */
2696 			*reg_mask &= ~1;
2697 			if (*reg_mask & 0x3f) {
2698 				/* if backtracing was looking for registers R1-R5
2699 				 * they should have been found already.
2700 				 */
2701 				verbose(env, "BUG regs %x\n", *reg_mask);
2702 				WARN_ONCE(1, "verifier backtracking bug");
2703 				return -EFAULT;
2704 			}
2705 		} else if (opcode == BPF_EXIT) {
2706 			return -ENOTSUPP;
2707 		}
2708 	} else if (class == BPF_LD) {
2709 		if (!(*reg_mask & dreg))
2710 			return 0;
2711 		*reg_mask &= ~dreg;
2712 		/* It's ld_imm64 or ld_abs or ld_ind.
2713 		 * For ld_imm64 no further tracking of precision
2714 		 * into parent is necessary
2715 		 */
2716 		if (mode == BPF_IND || mode == BPF_ABS)
2717 			/* to be analyzed */
2718 			return -ENOTSUPP;
2719 	}
2720 	return 0;
2721 }
2722 
2723 /* the scalar precision tracking algorithm:
2724  * . at the start all registers have precise=false.
2725  * . scalar ranges are tracked as normal through alu and jmp insns.
2726  * . once precise value of the scalar register is used in:
2727  *   .  ptr + scalar alu
2728  *   . if (scalar cond K|scalar)
2729  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2730  *   backtrack through the verifier states and mark all registers and
2731  *   stack slots with spilled constants that these scalar regisers
2732  *   should be precise.
2733  * . during state pruning two registers (or spilled stack slots)
2734  *   are equivalent if both are not precise.
2735  *
2736  * Note the verifier cannot simply walk register parentage chain,
2737  * since many different registers and stack slots could have been
2738  * used to compute single precise scalar.
2739  *
2740  * The approach of starting with precise=true for all registers and then
2741  * backtrack to mark a register as not precise when the verifier detects
2742  * that program doesn't care about specific value (e.g., when helper
2743  * takes register as ARG_ANYTHING parameter) is not safe.
2744  *
2745  * It's ok to walk single parentage chain of the verifier states.
2746  * It's possible that this backtracking will go all the way till 1st insn.
2747  * All other branches will be explored for needing precision later.
2748  *
2749  * The backtracking needs to deal with cases like:
2750  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2751  * r9 -= r8
2752  * r5 = r9
2753  * if r5 > 0x79f goto pc+7
2754  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2755  * r5 += 1
2756  * ...
2757  * call bpf_perf_event_output#25
2758  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2759  *
2760  * and this case:
2761  * r6 = 1
2762  * call foo // uses callee's r6 inside to compute r0
2763  * r0 += r6
2764  * if r0 == 0 goto
2765  *
2766  * to track above reg_mask/stack_mask needs to be independent for each frame.
2767  *
2768  * Also if parent's curframe > frame where backtracking started,
2769  * the verifier need to mark registers in both frames, otherwise callees
2770  * may incorrectly prune callers. This is similar to
2771  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2772  *
2773  * For now backtracking falls back into conservative marking.
2774  */
2775 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2776 				     struct bpf_verifier_state *st)
2777 {
2778 	struct bpf_func_state *func;
2779 	struct bpf_reg_state *reg;
2780 	int i, j;
2781 
2782 	/* big hammer: mark all scalars precise in this path.
2783 	 * pop_stack may still get !precise scalars.
2784 	 * We also skip current state and go straight to first parent state,
2785 	 * because precision markings in current non-checkpointed state are
2786 	 * not needed. See why in the comment in __mark_chain_precision below.
2787 	 */
2788 	for (st = st->parent; st; st = st->parent) {
2789 		for (i = 0; i <= st->curframe; i++) {
2790 			func = st->frame[i];
2791 			for (j = 0; j < BPF_REG_FP; j++) {
2792 				reg = &func->regs[j];
2793 				if (reg->type != SCALAR_VALUE)
2794 					continue;
2795 				reg->precise = true;
2796 			}
2797 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2798 				if (!is_spilled_reg(&func->stack[j]))
2799 					continue;
2800 				reg = &func->stack[j].spilled_ptr;
2801 				if (reg->type != SCALAR_VALUE)
2802 					continue;
2803 				reg->precise = true;
2804 			}
2805 		}
2806 	}
2807 }
2808 
2809 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
2810 {
2811 	struct bpf_func_state *func;
2812 	struct bpf_reg_state *reg;
2813 	int i, j;
2814 
2815 	for (i = 0; i <= st->curframe; i++) {
2816 		func = st->frame[i];
2817 		for (j = 0; j < BPF_REG_FP; j++) {
2818 			reg = &func->regs[j];
2819 			if (reg->type != SCALAR_VALUE)
2820 				continue;
2821 			reg->precise = false;
2822 		}
2823 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2824 			if (!is_spilled_reg(&func->stack[j]))
2825 				continue;
2826 			reg = &func->stack[j].spilled_ptr;
2827 			if (reg->type != SCALAR_VALUE)
2828 				continue;
2829 			reg->precise = false;
2830 		}
2831 	}
2832 }
2833 
2834 /*
2835  * __mark_chain_precision() backtracks BPF program instruction sequence and
2836  * chain of verifier states making sure that register *regno* (if regno >= 0)
2837  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
2838  * SCALARS, as well as any other registers and slots that contribute to
2839  * a tracked state of given registers/stack slots, depending on specific BPF
2840  * assembly instructions (see backtrack_insns() for exact instruction handling
2841  * logic). This backtracking relies on recorded jmp_history and is able to
2842  * traverse entire chain of parent states. This process ends only when all the
2843  * necessary registers/slots and their transitive dependencies are marked as
2844  * precise.
2845  *
2846  * One important and subtle aspect is that precise marks *do not matter* in
2847  * the currently verified state (current state). It is important to understand
2848  * why this is the case.
2849  *
2850  * First, note that current state is the state that is not yet "checkpointed",
2851  * i.e., it is not yet put into env->explored_states, and it has no children
2852  * states as well. It's ephemeral, and can end up either a) being discarded if
2853  * compatible explored state is found at some point or BPF_EXIT instruction is
2854  * reached or b) checkpointed and put into env->explored_states, branching out
2855  * into one or more children states.
2856  *
2857  * In the former case, precise markings in current state are completely
2858  * ignored by state comparison code (see regsafe() for details). Only
2859  * checkpointed ("old") state precise markings are important, and if old
2860  * state's register/slot is precise, regsafe() assumes current state's
2861  * register/slot as precise and checks value ranges exactly and precisely. If
2862  * states turn out to be compatible, current state's necessary precise
2863  * markings and any required parent states' precise markings are enforced
2864  * after the fact with propagate_precision() logic, after the fact. But it's
2865  * important to realize that in this case, even after marking current state
2866  * registers/slots as precise, we immediately discard current state. So what
2867  * actually matters is any of the precise markings propagated into current
2868  * state's parent states, which are always checkpointed (due to b) case above).
2869  * As such, for scenario a) it doesn't matter if current state has precise
2870  * markings set or not.
2871  *
2872  * Now, for the scenario b), checkpointing and forking into child(ren)
2873  * state(s). Note that before current state gets to checkpointing step, any
2874  * processed instruction always assumes precise SCALAR register/slot
2875  * knowledge: if precise value or range is useful to prune jump branch, BPF
2876  * verifier takes this opportunity enthusiastically. Similarly, when
2877  * register's value is used to calculate offset or memory address, exact
2878  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
2879  * what we mentioned above about state comparison ignoring precise markings
2880  * during state comparison, BPF verifier ignores and also assumes precise
2881  * markings *at will* during instruction verification process. But as verifier
2882  * assumes precision, it also propagates any precision dependencies across
2883  * parent states, which are not yet finalized, so can be further restricted
2884  * based on new knowledge gained from restrictions enforced by their children
2885  * states. This is so that once those parent states are finalized, i.e., when
2886  * they have no more active children state, state comparison logic in
2887  * is_state_visited() would enforce strict and precise SCALAR ranges, if
2888  * required for correctness.
2889  *
2890  * To build a bit more intuition, note also that once a state is checkpointed,
2891  * the path we took to get to that state is not important. This is crucial
2892  * property for state pruning. When state is checkpointed and finalized at
2893  * some instruction index, it can be correctly and safely used to "short
2894  * circuit" any *compatible* state that reaches exactly the same instruction
2895  * index. I.e., if we jumped to that instruction from a completely different
2896  * code path than original finalized state was derived from, it doesn't
2897  * matter, current state can be discarded because from that instruction
2898  * forward having a compatible state will ensure we will safely reach the
2899  * exit. States describe preconditions for further exploration, but completely
2900  * forget the history of how we got here.
2901  *
2902  * This also means that even if we needed precise SCALAR range to get to
2903  * finalized state, but from that point forward *that same* SCALAR register is
2904  * never used in a precise context (i.e., it's precise value is not needed for
2905  * correctness), it's correct and safe to mark such register as "imprecise"
2906  * (i.e., precise marking set to false). This is what we rely on when we do
2907  * not set precise marking in current state. If no child state requires
2908  * precision for any given SCALAR register, it's safe to dictate that it can
2909  * be imprecise. If any child state does require this register to be precise,
2910  * we'll mark it precise later retroactively during precise markings
2911  * propagation from child state to parent states.
2912  *
2913  * Skipping precise marking setting in current state is a mild version of
2914  * relying on the above observation. But we can utilize this property even
2915  * more aggressively by proactively forgetting any precise marking in the
2916  * current state (which we inherited from the parent state), right before we
2917  * checkpoint it and branch off into new child state. This is done by
2918  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
2919  * finalized states which help in short circuiting more future states.
2920  */
2921 static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
2922 				  int spi)
2923 {
2924 	struct bpf_verifier_state *st = env->cur_state;
2925 	int first_idx = st->first_insn_idx;
2926 	int last_idx = env->insn_idx;
2927 	struct bpf_func_state *func;
2928 	struct bpf_reg_state *reg;
2929 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2930 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
2931 	bool skip_first = true;
2932 	bool new_marks = false;
2933 	int i, err;
2934 
2935 	if (!env->bpf_capable)
2936 		return 0;
2937 
2938 	/* Do sanity checks against current state of register and/or stack
2939 	 * slot, but don't set precise flag in current state, as precision
2940 	 * tracking in the current state is unnecessary.
2941 	 */
2942 	func = st->frame[frame];
2943 	if (regno >= 0) {
2944 		reg = &func->regs[regno];
2945 		if (reg->type != SCALAR_VALUE) {
2946 			WARN_ONCE(1, "backtracing misuse");
2947 			return -EFAULT;
2948 		}
2949 		new_marks = true;
2950 	}
2951 
2952 	while (spi >= 0) {
2953 		if (!is_spilled_reg(&func->stack[spi])) {
2954 			stack_mask = 0;
2955 			break;
2956 		}
2957 		reg = &func->stack[spi].spilled_ptr;
2958 		if (reg->type != SCALAR_VALUE) {
2959 			stack_mask = 0;
2960 			break;
2961 		}
2962 		new_marks = true;
2963 		break;
2964 	}
2965 
2966 	if (!new_marks)
2967 		return 0;
2968 	if (!reg_mask && !stack_mask)
2969 		return 0;
2970 
2971 	for (;;) {
2972 		DECLARE_BITMAP(mask, 64);
2973 		u32 history = st->jmp_history_cnt;
2974 
2975 		if (env->log.level & BPF_LOG_LEVEL2)
2976 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2977 
2978 		if (last_idx < 0) {
2979 			/* we are at the entry into subprog, which
2980 			 * is expected for global funcs, but only if
2981 			 * requested precise registers are R1-R5
2982 			 * (which are global func's input arguments)
2983 			 */
2984 			if (st->curframe == 0 &&
2985 			    st->frame[0]->subprogno > 0 &&
2986 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
2987 			    stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
2988 				bitmap_from_u64(mask, reg_mask);
2989 				for_each_set_bit(i, mask, 32) {
2990 					reg = &st->frame[0]->regs[i];
2991 					if (reg->type != SCALAR_VALUE) {
2992 						reg_mask &= ~(1u << i);
2993 						continue;
2994 					}
2995 					reg->precise = true;
2996 				}
2997 				return 0;
2998 			}
2999 
3000 			verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
3001 				st->frame[0]->subprogno, reg_mask, stack_mask);
3002 			WARN_ONCE(1, "verifier backtracking bug");
3003 			return -EFAULT;
3004 		}
3005 
3006 		for (i = last_idx;;) {
3007 			if (skip_first) {
3008 				err = 0;
3009 				skip_first = false;
3010 			} else {
3011 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
3012 			}
3013 			if (err == -ENOTSUPP) {
3014 				mark_all_scalars_precise(env, st);
3015 				return 0;
3016 			} else if (err) {
3017 				return err;
3018 			}
3019 			if (!reg_mask && !stack_mask)
3020 				/* Found assignment(s) into tracked register in this state.
3021 				 * Since this state is already marked, just return.
3022 				 * Nothing to be tracked further in the parent state.
3023 				 */
3024 				return 0;
3025 			if (i == first_idx)
3026 				break;
3027 			i = get_prev_insn_idx(st, i, &history);
3028 			if (i >= env->prog->len) {
3029 				/* This can happen if backtracking reached insn 0
3030 				 * and there are still reg_mask or stack_mask
3031 				 * to backtrack.
3032 				 * It means the backtracking missed the spot where
3033 				 * particular register was initialized with a constant.
3034 				 */
3035 				verbose(env, "BUG backtracking idx %d\n", i);
3036 				WARN_ONCE(1, "verifier backtracking bug");
3037 				return -EFAULT;
3038 			}
3039 		}
3040 		st = st->parent;
3041 		if (!st)
3042 			break;
3043 
3044 		new_marks = false;
3045 		func = st->frame[frame];
3046 		bitmap_from_u64(mask, reg_mask);
3047 		for_each_set_bit(i, mask, 32) {
3048 			reg = &func->regs[i];
3049 			if (reg->type != SCALAR_VALUE) {
3050 				reg_mask &= ~(1u << i);
3051 				continue;
3052 			}
3053 			if (!reg->precise)
3054 				new_marks = true;
3055 			reg->precise = true;
3056 		}
3057 
3058 		bitmap_from_u64(mask, stack_mask);
3059 		for_each_set_bit(i, mask, 64) {
3060 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
3061 				/* the sequence of instructions:
3062 				 * 2: (bf) r3 = r10
3063 				 * 3: (7b) *(u64 *)(r3 -8) = r0
3064 				 * 4: (79) r4 = *(u64 *)(r10 -8)
3065 				 * doesn't contain jmps. It's backtracked
3066 				 * as a single block.
3067 				 * During backtracking insn 3 is not recognized as
3068 				 * stack access, so at the end of backtracking
3069 				 * stack slot fp-8 is still marked in stack_mask.
3070 				 * However the parent state may not have accessed
3071 				 * fp-8 and it's "unallocated" stack space.
3072 				 * In such case fallback to conservative.
3073 				 */
3074 				mark_all_scalars_precise(env, st);
3075 				return 0;
3076 			}
3077 
3078 			if (!is_spilled_reg(&func->stack[i])) {
3079 				stack_mask &= ~(1ull << i);
3080 				continue;
3081 			}
3082 			reg = &func->stack[i].spilled_ptr;
3083 			if (reg->type != SCALAR_VALUE) {
3084 				stack_mask &= ~(1ull << i);
3085 				continue;
3086 			}
3087 			if (!reg->precise)
3088 				new_marks = true;
3089 			reg->precise = true;
3090 		}
3091 		if (env->log.level & BPF_LOG_LEVEL2) {
3092 			verbose(env, "parent %s regs=%x stack=%llx marks:",
3093 				new_marks ? "didn't have" : "already had",
3094 				reg_mask, stack_mask);
3095 			print_verifier_state(env, func, true);
3096 		}
3097 
3098 		if (!reg_mask && !stack_mask)
3099 			break;
3100 		if (!new_marks)
3101 			break;
3102 
3103 		last_idx = st->last_insn_idx;
3104 		first_idx = st->first_insn_idx;
3105 	}
3106 	return 0;
3107 }
3108 
3109 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
3110 {
3111 	return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
3112 }
3113 
3114 static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
3115 {
3116 	return __mark_chain_precision(env, frame, regno, -1);
3117 }
3118 
3119 static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
3120 {
3121 	return __mark_chain_precision(env, frame, -1, spi);
3122 }
3123 
3124 static bool is_spillable_regtype(enum bpf_reg_type type)
3125 {
3126 	switch (base_type(type)) {
3127 	case PTR_TO_MAP_VALUE:
3128 	case PTR_TO_STACK:
3129 	case PTR_TO_CTX:
3130 	case PTR_TO_PACKET:
3131 	case PTR_TO_PACKET_META:
3132 	case PTR_TO_PACKET_END:
3133 	case PTR_TO_FLOW_KEYS:
3134 	case CONST_PTR_TO_MAP:
3135 	case PTR_TO_SOCKET:
3136 	case PTR_TO_SOCK_COMMON:
3137 	case PTR_TO_TCP_SOCK:
3138 	case PTR_TO_XDP_SOCK:
3139 	case PTR_TO_BTF_ID:
3140 	case PTR_TO_BUF:
3141 	case PTR_TO_MEM:
3142 	case PTR_TO_FUNC:
3143 	case PTR_TO_MAP_KEY:
3144 		return true;
3145 	default:
3146 		return false;
3147 	}
3148 }
3149 
3150 /* Does this register contain a constant zero? */
3151 static bool register_is_null(struct bpf_reg_state *reg)
3152 {
3153 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
3154 }
3155 
3156 static bool register_is_const(struct bpf_reg_state *reg)
3157 {
3158 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
3159 }
3160 
3161 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
3162 {
3163 	return tnum_is_unknown(reg->var_off) &&
3164 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
3165 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
3166 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
3167 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
3168 }
3169 
3170 static bool register_is_bounded(struct bpf_reg_state *reg)
3171 {
3172 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
3173 }
3174 
3175 static bool __is_pointer_value(bool allow_ptr_leaks,
3176 			       const struct bpf_reg_state *reg)
3177 {
3178 	if (allow_ptr_leaks)
3179 		return false;
3180 
3181 	return reg->type != SCALAR_VALUE;
3182 }
3183 
3184 static void save_register_state(struct bpf_func_state *state,
3185 				int spi, struct bpf_reg_state *reg,
3186 				int size)
3187 {
3188 	int i;
3189 
3190 	state->stack[spi].spilled_ptr = *reg;
3191 	if (size == BPF_REG_SIZE)
3192 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3193 
3194 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3195 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
3196 
3197 	/* size < 8 bytes spill */
3198 	for (; i; i--)
3199 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
3200 }
3201 
3202 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
3203  * stack boundary and alignment are checked in check_mem_access()
3204  */
3205 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3206 				       /* stack frame we're writing to */
3207 				       struct bpf_func_state *state,
3208 				       int off, int size, int value_regno,
3209 				       int insn_idx)
3210 {
3211 	struct bpf_func_state *cur; /* state of the current function */
3212 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3213 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
3214 	struct bpf_reg_state *reg = NULL;
3215 
3216 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3217 	if (err)
3218 		return err;
3219 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3220 	 * so it's aligned access and [off, off + size) are within stack limits
3221 	 */
3222 	if (!env->allow_ptr_leaks &&
3223 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3224 	    size != BPF_REG_SIZE) {
3225 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3226 		return -EACCES;
3227 	}
3228 
3229 	cur = env->cur_state->frame[env->cur_state->curframe];
3230 	if (value_regno >= 0)
3231 		reg = &cur->regs[value_regno];
3232 	if (!env->bypass_spec_v4) {
3233 		bool sanitize = reg && is_spillable_regtype(reg->type);
3234 
3235 		for (i = 0; i < size; i++) {
3236 			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
3237 				sanitize = true;
3238 				break;
3239 			}
3240 		}
3241 
3242 		if (sanitize)
3243 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3244 	}
3245 
3246 	mark_stack_slot_scratched(env, spi);
3247 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3248 	    !register_is_null(reg) && env->bpf_capable) {
3249 		if (dst_reg != BPF_REG_FP) {
3250 			/* The backtracking logic can only recognize explicit
3251 			 * stack slot address like [fp - 8]. Other spill of
3252 			 * scalar via different register has to be conservative.
3253 			 * Backtrack from here and mark all registers as precise
3254 			 * that contributed into 'reg' being a constant.
3255 			 */
3256 			err = mark_chain_precision(env, value_regno);
3257 			if (err)
3258 				return err;
3259 		}
3260 		save_register_state(state, spi, reg, size);
3261 	} else if (reg && is_spillable_regtype(reg->type)) {
3262 		/* register containing pointer is being spilled into stack */
3263 		if (size != BPF_REG_SIZE) {
3264 			verbose_linfo(env, insn_idx, "; ");
3265 			verbose(env, "invalid size of register spill\n");
3266 			return -EACCES;
3267 		}
3268 		if (state != cur && reg->type == PTR_TO_STACK) {
3269 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3270 			return -EINVAL;
3271 		}
3272 		save_register_state(state, spi, reg, size);
3273 	} else {
3274 		u8 type = STACK_MISC;
3275 
3276 		/* regular write of data into stack destroys any spilled ptr */
3277 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3278 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
3279 		if (is_spilled_reg(&state->stack[spi]))
3280 			for (i = 0; i < BPF_REG_SIZE; i++)
3281 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3282 
3283 		/* only mark the slot as written if all 8 bytes were written
3284 		 * otherwise read propagation may incorrectly stop too soon
3285 		 * when stack slots are partially written.
3286 		 * This heuristic means that read propagation will be
3287 		 * conservative, since it will add reg_live_read marks
3288 		 * to stack slots all the way to first state when programs
3289 		 * writes+reads less than 8 bytes
3290 		 */
3291 		if (size == BPF_REG_SIZE)
3292 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3293 
3294 		/* when we zero initialize stack slots mark them as such */
3295 		if (reg && register_is_null(reg)) {
3296 			/* backtracking doesn't work for STACK_ZERO yet. */
3297 			err = mark_chain_precision(env, value_regno);
3298 			if (err)
3299 				return err;
3300 			type = STACK_ZERO;
3301 		}
3302 
3303 		/* Mark slots affected by this stack write. */
3304 		for (i = 0; i < size; i++)
3305 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3306 				type;
3307 	}
3308 	return 0;
3309 }
3310 
3311 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3312  * known to contain a variable offset.
3313  * This function checks whether the write is permitted and conservatively
3314  * tracks the effects of the write, considering that each stack slot in the
3315  * dynamic range is potentially written to.
3316  *
3317  * 'off' includes 'regno->off'.
3318  * 'value_regno' can be -1, meaning that an unknown value is being written to
3319  * the stack.
3320  *
3321  * Spilled pointers in range are not marked as written because we don't know
3322  * what's going to be actually written. This means that read propagation for
3323  * future reads cannot be terminated by this write.
3324  *
3325  * For privileged programs, uninitialized stack slots are considered
3326  * initialized by this write (even though we don't know exactly what offsets
3327  * are going to be written to). The idea is that we don't want the verifier to
3328  * reject future reads that access slots written to through variable offsets.
3329  */
3330 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3331 				     /* func where register points to */
3332 				     struct bpf_func_state *state,
3333 				     int ptr_regno, int off, int size,
3334 				     int value_regno, int insn_idx)
3335 {
3336 	struct bpf_func_state *cur; /* state of the current function */
3337 	int min_off, max_off;
3338 	int i, err;
3339 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3340 	bool writing_zero = false;
3341 	/* set if the fact that we're writing a zero is used to let any
3342 	 * stack slots remain STACK_ZERO
3343 	 */
3344 	bool zero_used = false;
3345 
3346 	cur = env->cur_state->frame[env->cur_state->curframe];
3347 	ptr_reg = &cur->regs[ptr_regno];
3348 	min_off = ptr_reg->smin_value + off;
3349 	max_off = ptr_reg->smax_value + off + size;
3350 	if (value_regno >= 0)
3351 		value_reg = &cur->regs[value_regno];
3352 	if (value_reg && register_is_null(value_reg))
3353 		writing_zero = true;
3354 
3355 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3356 	if (err)
3357 		return err;
3358 
3359 
3360 	/* Variable offset writes destroy any spilled pointers in range. */
3361 	for (i = min_off; i < max_off; i++) {
3362 		u8 new_type, *stype;
3363 		int slot, spi;
3364 
3365 		slot = -i - 1;
3366 		spi = slot / BPF_REG_SIZE;
3367 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3368 		mark_stack_slot_scratched(env, spi);
3369 
3370 		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
3371 			/* Reject the write if range we may write to has not
3372 			 * been initialized beforehand. If we didn't reject
3373 			 * here, the ptr status would be erased below (even
3374 			 * though not all slots are actually overwritten),
3375 			 * possibly opening the door to leaks.
3376 			 *
3377 			 * We do however catch STACK_INVALID case below, and
3378 			 * only allow reading possibly uninitialized memory
3379 			 * later for CAP_PERFMON, as the write may not happen to
3380 			 * that slot.
3381 			 */
3382 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3383 				insn_idx, i);
3384 			return -EINVAL;
3385 		}
3386 
3387 		/* Erase all spilled pointers. */
3388 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3389 
3390 		/* Update the slot type. */
3391 		new_type = STACK_MISC;
3392 		if (writing_zero && *stype == STACK_ZERO) {
3393 			new_type = STACK_ZERO;
3394 			zero_used = true;
3395 		}
3396 		/* If the slot is STACK_INVALID, we check whether it's OK to
3397 		 * pretend that it will be initialized by this write. The slot
3398 		 * might not actually be written to, and so if we mark it as
3399 		 * initialized future reads might leak uninitialized memory.
3400 		 * For privileged programs, we will accept such reads to slots
3401 		 * that may or may not be written because, if we're reject
3402 		 * them, the error would be too confusing.
3403 		 */
3404 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3405 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3406 					insn_idx, i);
3407 			return -EINVAL;
3408 		}
3409 		*stype = new_type;
3410 	}
3411 	if (zero_used) {
3412 		/* backtracking doesn't work for STACK_ZERO yet. */
3413 		err = mark_chain_precision(env, value_regno);
3414 		if (err)
3415 			return err;
3416 	}
3417 	return 0;
3418 }
3419 
3420 /* When register 'dst_regno' is assigned some values from stack[min_off,
3421  * max_off), we set the register's type according to the types of the
3422  * respective stack slots. If all the stack values are known to be zeros, then
3423  * so is the destination reg. Otherwise, the register is considered to be
3424  * SCALAR. This function does not deal with register filling; the caller must
3425  * ensure that all spilled registers in the stack range have been marked as
3426  * read.
3427  */
3428 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3429 				/* func where src register points to */
3430 				struct bpf_func_state *ptr_state,
3431 				int min_off, int max_off, int dst_regno)
3432 {
3433 	struct bpf_verifier_state *vstate = env->cur_state;
3434 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3435 	int i, slot, spi;
3436 	u8 *stype;
3437 	int zeros = 0;
3438 
3439 	for (i = min_off; i < max_off; i++) {
3440 		slot = -i - 1;
3441 		spi = slot / BPF_REG_SIZE;
3442 		stype = ptr_state->stack[spi].slot_type;
3443 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3444 			break;
3445 		zeros++;
3446 	}
3447 	if (zeros == max_off - min_off) {
3448 		/* any access_size read into register is zero extended,
3449 		 * so the whole register == const_zero
3450 		 */
3451 		__mark_reg_const_zero(&state->regs[dst_regno]);
3452 		/* backtracking doesn't support STACK_ZERO yet,
3453 		 * so mark it precise here, so that later
3454 		 * backtracking can stop here.
3455 		 * Backtracking may not need this if this register
3456 		 * doesn't participate in pointer adjustment.
3457 		 * Forward propagation of precise flag is not
3458 		 * necessary either. This mark is only to stop
3459 		 * backtracking. Any register that contributed
3460 		 * to const 0 was marked precise before spill.
3461 		 */
3462 		state->regs[dst_regno].precise = true;
3463 	} else {
3464 		/* have read misc data from the stack */
3465 		mark_reg_unknown(env, state->regs, dst_regno);
3466 	}
3467 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3468 }
3469 
3470 /* Read the stack at 'off' and put the results into the register indicated by
3471  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3472  * spilled reg.
3473  *
3474  * 'dst_regno' can be -1, meaning that the read value is not going to a
3475  * register.
3476  *
3477  * The access is assumed to be within the current stack bounds.
3478  */
3479 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3480 				      /* func where src register points to */
3481 				      struct bpf_func_state *reg_state,
3482 				      int off, int size, int dst_regno)
3483 {
3484 	struct bpf_verifier_state *vstate = env->cur_state;
3485 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3486 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3487 	struct bpf_reg_state *reg;
3488 	u8 *stype, type;
3489 
3490 	stype = reg_state->stack[spi].slot_type;
3491 	reg = &reg_state->stack[spi].spilled_ptr;
3492 
3493 	if (is_spilled_reg(&reg_state->stack[spi])) {
3494 		u8 spill_size = 1;
3495 
3496 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3497 			spill_size++;
3498 
3499 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3500 			if (reg->type != SCALAR_VALUE) {
3501 				verbose_linfo(env, env->insn_idx, "; ");
3502 				verbose(env, "invalid size of register fill\n");
3503 				return -EACCES;
3504 			}
3505 
3506 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3507 			if (dst_regno < 0)
3508 				return 0;
3509 
3510 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3511 				/* The earlier check_reg_arg() has decided the
3512 				 * subreg_def for this insn.  Save it first.
3513 				 */
3514 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3515 
3516 				state->regs[dst_regno] = *reg;
3517 				state->regs[dst_regno].subreg_def = subreg_def;
3518 			} else {
3519 				for (i = 0; i < size; i++) {
3520 					type = stype[(slot - i) % BPF_REG_SIZE];
3521 					if (type == STACK_SPILL)
3522 						continue;
3523 					if (type == STACK_MISC)
3524 						continue;
3525 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3526 						off, i, size);
3527 					return -EACCES;
3528 				}
3529 				mark_reg_unknown(env, state->regs, dst_regno);
3530 			}
3531 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3532 			return 0;
3533 		}
3534 
3535 		if (dst_regno >= 0) {
3536 			/* restore register state from stack */
3537 			state->regs[dst_regno] = *reg;
3538 			/* mark reg as written since spilled pointer state likely
3539 			 * has its liveness marks cleared by is_state_visited()
3540 			 * which resets stack/reg liveness for state transitions
3541 			 */
3542 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3543 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3544 			/* If dst_regno==-1, the caller is asking us whether
3545 			 * it is acceptable to use this value as a SCALAR_VALUE
3546 			 * (e.g. for XADD).
3547 			 * We must not allow unprivileged callers to do that
3548 			 * with spilled pointers.
3549 			 */
3550 			verbose(env, "leaking pointer from stack off %d\n",
3551 				off);
3552 			return -EACCES;
3553 		}
3554 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3555 	} else {
3556 		for (i = 0; i < size; i++) {
3557 			type = stype[(slot - i) % BPF_REG_SIZE];
3558 			if (type == STACK_MISC)
3559 				continue;
3560 			if (type == STACK_ZERO)
3561 				continue;
3562 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3563 				off, i, size);
3564 			return -EACCES;
3565 		}
3566 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3567 		if (dst_regno >= 0)
3568 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3569 	}
3570 	return 0;
3571 }
3572 
3573 enum bpf_access_src {
3574 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3575 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3576 };
3577 
3578 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3579 					 int regno, int off, int access_size,
3580 					 bool zero_size_allowed,
3581 					 enum bpf_access_src type,
3582 					 struct bpf_call_arg_meta *meta);
3583 
3584 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3585 {
3586 	return cur_regs(env) + regno;
3587 }
3588 
3589 /* Read the stack at 'ptr_regno + off' and put the result into the register
3590  * 'dst_regno'.
3591  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3592  * but not its variable offset.
3593  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3594  *
3595  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3596  * filling registers (i.e. reads of spilled register cannot be detected when
3597  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3598  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3599  * offset; for a fixed offset check_stack_read_fixed_off should be used
3600  * instead.
3601  */
3602 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3603 				    int ptr_regno, int off, int size, int dst_regno)
3604 {
3605 	/* The state of the source register. */
3606 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3607 	struct bpf_func_state *ptr_state = func(env, reg);
3608 	int err;
3609 	int min_off, max_off;
3610 
3611 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3612 	 */
3613 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3614 					    false, ACCESS_DIRECT, NULL);
3615 	if (err)
3616 		return err;
3617 
3618 	min_off = reg->smin_value + off;
3619 	max_off = reg->smax_value + off;
3620 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3621 	return 0;
3622 }
3623 
3624 /* check_stack_read dispatches to check_stack_read_fixed_off or
3625  * check_stack_read_var_off.
3626  *
3627  * The caller must ensure that the offset falls within the allocated stack
3628  * bounds.
3629  *
3630  * 'dst_regno' is a register which will receive the value from the stack. It
3631  * can be -1, meaning that the read value is not going to a register.
3632  */
3633 static int check_stack_read(struct bpf_verifier_env *env,
3634 			    int ptr_regno, int off, int size,
3635 			    int dst_regno)
3636 {
3637 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3638 	struct bpf_func_state *state = func(env, reg);
3639 	int err;
3640 	/* Some accesses are only permitted with a static offset. */
3641 	bool var_off = !tnum_is_const(reg->var_off);
3642 
3643 	/* The offset is required to be static when reads don't go to a
3644 	 * register, in order to not leak pointers (see
3645 	 * check_stack_read_fixed_off).
3646 	 */
3647 	if (dst_regno < 0 && var_off) {
3648 		char tn_buf[48];
3649 
3650 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3651 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3652 			tn_buf, off, size);
3653 		return -EACCES;
3654 	}
3655 	/* Variable offset is prohibited for unprivileged mode for simplicity
3656 	 * since it requires corresponding support in Spectre masking for stack
3657 	 * ALU. See also retrieve_ptr_limit().
3658 	 */
3659 	if (!env->bypass_spec_v1 && var_off) {
3660 		char tn_buf[48];
3661 
3662 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3663 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3664 				ptr_regno, tn_buf);
3665 		return -EACCES;
3666 	}
3667 
3668 	if (!var_off) {
3669 		off += reg->var_off.value;
3670 		err = check_stack_read_fixed_off(env, state, off, size,
3671 						 dst_regno);
3672 	} else {
3673 		/* Variable offset stack reads need more conservative handling
3674 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3675 		 * branch.
3676 		 */
3677 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3678 					       dst_regno);
3679 	}
3680 	return err;
3681 }
3682 
3683 
3684 /* check_stack_write dispatches to check_stack_write_fixed_off or
3685  * check_stack_write_var_off.
3686  *
3687  * 'ptr_regno' is the register used as a pointer into the stack.
3688  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3689  * 'value_regno' is the register whose value we're writing to the stack. It can
3690  * be -1, meaning that we're not writing from a register.
3691  *
3692  * The caller must ensure that the offset falls within the maximum stack size.
3693  */
3694 static int check_stack_write(struct bpf_verifier_env *env,
3695 			     int ptr_regno, int off, int size,
3696 			     int value_regno, int insn_idx)
3697 {
3698 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3699 	struct bpf_func_state *state = func(env, reg);
3700 	int err;
3701 
3702 	if (tnum_is_const(reg->var_off)) {
3703 		off += reg->var_off.value;
3704 		err = check_stack_write_fixed_off(env, state, off, size,
3705 						  value_regno, insn_idx);
3706 	} else {
3707 		/* Variable offset stack reads need more conservative handling
3708 		 * than fixed offset ones.
3709 		 */
3710 		err = check_stack_write_var_off(env, state,
3711 						ptr_regno, off, size,
3712 						value_regno, insn_idx);
3713 	}
3714 	return err;
3715 }
3716 
3717 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3718 				 int off, int size, enum bpf_access_type type)
3719 {
3720 	struct bpf_reg_state *regs = cur_regs(env);
3721 	struct bpf_map *map = regs[regno].map_ptr;
3722 	u32 cap = bpf_map_flags_to_cap(map);
3723 
3724 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3725 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3726 			map->value_size, off, size);
3727 		return -EACCES;
3728 	}
3729 
3730 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3731 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3732 			map->value_size, off, size);
3733 		return -EACCES;
3734 	}
3735 
3736 	return 0;
3737 }
3738 
3739 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3740 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3741 			      int off, int size, u32 mem_size,
3742 			      bool zero_size_allowed)
3743 {
3744 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3745 	struct bpf_reg_state *reg;
3746 
3747 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3748 		return 0;
3749 
3750 	reg = &cur_regs(env)[regno];
3751 	switch (reg->type) {
3752 	case PTR_TO_MAP_KEY:
3753 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3754 			mem_size, off, size);
3755 		break;
3756 	case PTR_TO_MAP_VALUE:
3757 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
3758 			mem_size, off, size);
3759 		break;
3760 	case PTR_TO_PACKET:
3761 	case PTR_TO_PACKET_META:
3762 	case PTR_TO_PACKET_END:
3763 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3764 			off, size, regno, reg->id, off, mem_size);
3765 		break;
3766 	case PTR_TO_MEM:
3767 	default:
3768 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3769 			mem_size, off, size);
3770 	}
3771 
3772 	return -EACCES;
3773 }
3774 
3775 /* check read/write into a memory region with possible variable offset */
3776 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3777 				   int off, int size, u32 mem_size,
3778 				   bool zero_size_allowed)
3779 {
3780 	struct bpf_verifier_state *vstate = env->cur_state;
3781 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3782 	struct bpf_reg_state *reg = &state->regs[regno];
3783 	int err;
3784 
3785 	/* We may have adjusted the register pointing to memory region, so we
3786 	 * need to try adding each of min_value and max_value to off
3787 	 * to make sure our theoretical access will be safe.
3788 	 *
3789 	 * The minimum value is only important with signed
3790 	 * comparisons where we can't assume the floor of a
3791 	 * value is 0.  If we are using signed variables for our
3792 	 * index'es we need to make sure that whatever we use
3793 	 * will have a set floor within our range.
3794 	 */
3795 	if (reg->smin_value < 0 &&
3796 	    (reg->smin_value == S64_MIN ||
3797 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3798 	      reg->smin_value + off < 0)) {
3799 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3800 			regno);
3801 		return -EACCES;
3802 	}
3803 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
3804 				 mem_size, zero_size_allowed);
3805 	if (err) {
3806 		verbose(env, "R%d min value is outside of the allowed memory range\n",
3807 			regno);
3808 		return err;
3809 	}
3810 
3811 	/* If we haven't set a max value then we need to bail since we can't be
3812 	 * sure we won't do bad things.
3813 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
3814 	 */
3815 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
3816 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
3817 			regno);
3818 		return -EACCES;
3819 	}
3820 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
3821 				 mem_size, zero_size_allowed);
3822 	if (err) {
3823 		verbose(env, "R%d max value is outside of the allowed memory range\n",
3824 			regno);
3825 		return err;
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3832 			       const struct bpf_reg_state *reg, int regno,
3833 			       bool fixed_off_ok)
3834 {
3835 	/* Access to this pointer-typed register or passing it to a helper
3836 	 * is only allowed in its original, unmodified form.
3837 	 */
3838 
3839 	if (reg->off < 0) {
3840 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
3841 			reg_type_str(env, reg->type), regno, reg->off);
3842 		return -EACCES;
3843 	}
3844 
3845 	if (!fixed_off_ok && reg->off) {
3846 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3847 			reg_type_str(env, reg->type), regno, reg->off);
3848 		return -EACCES;
3849 	}
3850 
3851 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3852 		char tn_buf[48];
3853 
3854 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3855 		verbose(env, "variable %s access var_off=%s disallowed\n",
3856 			reg_type_str(env, reg->type), tn_buf);
3857 		return -EACCES;
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 int check_ptr_off_reg(struct bpf_verifier_env *env,
3864 		      const struct bpf_reg_state *reg, int regno)
3865 {
3866 	return __check_ptr_off_reg(env, reg, regno, false);
3867 }
3868 
3869 static int map_kptr_match_type(struct bpf_verifier_env *env,
3870 			       struct btf_field *kptr_field,
3871 			       struct bpf_reg_state *reg, u32 regno)
3872 {
3873 	const char *targ_name = kernel_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
3874 	int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED;
3875 	const char *reg_name = "";
3876 
3877 	/* Only unreferenced case accepts untrusted pointers */
3878 	if (kptr_field->type == BPF_KPTR_UNREF)
3879 		perm_flags |= PTR_UNTRUSTED;
3880 
3881 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
3882 		goto bad_type;
3883 
3884 	if (!btf_is_kernel(reg->btf)) {
3885 		verbose(env, "R%d must point to kernel BTF\n", regno);
3886 		return -EINVAL;
3887 	}
3888 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
3889 	reg_name = kernel_type_name(reg->btf, reg->btf_id);
3890 
3891 	/* For ref_ptr case, release function check should ensure we get one
3892 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
3893 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
3894 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
3895 	 * reg->off and reg->ref_obj_id are not needed here.
3896 	 */
3897 	if (__check_ptr_off_reg(env, reg, regno, true))
3898 		return -EACCES;
3899 
3900 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
3901 	 * we also need to take into account the reg->off.
3902 	 *
3903 	 * We want to support cases like:
3904 	 *
3905 	 * struct foo {
3906 	 *         struct bar br;
3907 	 *         struct baz bz;
3908 	 * };
3909 	 *
3910 	 * struct foo *v;
3911 	 * v = func();	      // PTR_TO_BTF_ID
3912 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
3913 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
3914 	 *                    // first member type of struct after comparison fails
3915 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
3916 	 *                    // to match type
3917 	 *
3918 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
3919 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
3920 	 * the struct to match type against first member of struct, i.e. reject
3921 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
3922 	 * strict mode to true for type match.
3923 	 */
3924 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
3925 				  kptr_field->kptr.btf, kptr_field->kptr.btf_id,
3926 				  kptr_field->type == BPF_KPTR_REF))
3927 		goto bad_type;
3928 	return 0;
3929 bad_type:
3930 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
3931 		reg_type_str(env, reg->type), reg_name);
3932 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
3933 	if (kptr_field->type == BPF_KPTR_UNREF)
3934 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
3935 			targ_name);
3936 	else
3937 		verbose(env, "\n");
3938 	return -EINVAL;
3939 }
3940 
3941 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
3942 				 int value_regno, int insn_idx,
3943 				 struct btf_field *kptr_field)
3944 {
3945 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3946 	int class = BPF_CLASS(insn->code);
3947 	struct bpf_reg_state *val_reg;
3948 
3949 	/* Things we already checked for in check_map_access and caller:
3950 	 *  - Reject cases where variable offset may touch kptr
3951 	 *  - size of access (must be BPF_DW)
3952 	 *  - tnum_is_const(reg->var_off)
3953 	 *  - kptr_field->offset == off + reg->var_off.value
3954 	 */
3955 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
3956 	if (BPF_MODE(insn->code) != BPF_MEM) {
3957 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
3958 		return -EACCES;
3959 	}
3960 
3961 	/* We only allow loading referenced kptr, since it will be marked as
3962 	 * untrusted, similar to unreferenced kptr.
3963 	 */
3964 	if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
3965 		verbose(env, "store to referenced kptr disallowed\n");
3966 		return -EACCES;
3967 	}
3968 
3969 	if (class == BPF_LDX) {
3970 		val_reg = reg_state(env, value_regno);
3971 		/* We can simply mark the value_regno receiving the pointer
3972 		 * value from map as PTR_TO_BTF_ID, with the correct type.
3973 		 */
3974 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
3975 				kptr_field->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
3976 		/* For mark_ptr_or_null_reg */
3977 		val_reg->id = ++env->id_gen;
3978 	} else if (class == BPF_STX) {
3979 		val_reg = reg_state(env, value_regno);
3980 		if (!register_is_null(val_reg) &&
3981 		    map_kptr_match_type(env, kptr_field, val_reg, value_regno))
3982 			return -EACCES;
3983 	} else if (class == BPF_ST) {
3984 		if (insn->imm) {
3985 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
3986 				kptr_field->offset);
3987 			return -EACCES;
3988 		}
3989 	} else {
3990 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
3991 		return -EACCES;
3992 	}
3993 	return 0;
3994 }
3995 
3996 /* check read/write into a map element with possible variable offset */
3997 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3998 			    int off, int size, bool zero_size_allowed,
3999 			    enum bpf_access_src src)
4000 {
4001 	struct bpf_verifier_state *vstate = env->cur_state;
4002 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4003 	struct bpf_reg_state *reg = &state->regs[regno];
4004 	struct bpf_map *map = reg->map_ptr;
4005 	struct btf_record *rec;
4006 	int err, i;
4007 
4008 	err = check_mem_region_access(env, regno, off, size, map->value_size,
4009 				      zero_size_allowed);
4010 	if (err)
4011 		return err;
4012 
4013 	if (IS_ERR_OR_NULL(map->record))
4014 		return 0;
4015 	rec = map->record;
4016 	for (i = 0; i < rec->cnt; i++) {
4017 		struct btf_field *field = &rec->fields[i];
4018 		u32 p = field->offset;
4019 
4020 		/* If any part of a field  can be touched by load/store, reject
4021 		 * this program. To check that [x1, x2) overlaps with [y1, y2),
4022 		 * it is sufficient to check x1 < y2 && y1 < x2.
4023 		 */
4024 		if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
4025 		    p < reg->umax_value + off + size) {
4026 			switch (field->type) {
4027 			case BPF_KPTR_UNREF:
4028 			case BPF_KPTR_REF:
4029 				if (src != ACCESS_DIRECT) {
4030 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
4031 					return -EACCES;
4032 				}
4033 				if (!tnum_is_const(reg->var_off)) {
4034 					verbose(env, "kptr access cannot have variable offset\n");
4035 					return -EACCES;
4036 				}
4037 				if (p != off + reg->var_off.value) {
4038 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
4039 						p, off + reg->var_off.value);
4040 					return -EACCES;
4041 				}
4042 				if (size != bpf_size_to_bytes(BPF_DW)) {
4043 					verbose(env, "kptr access size must be BPF_DW\n");
4044 					return -EACCES;
4045 				}
4046 				break;
4047 			default:
4048 				verbose(env, "%s cannot be accessed directly by load/store\n",
4049 					btf_field_type_name(field->type));
4050 				return -EACCES;
4051 			}
4052 		}
4053 	}
4054 	return 0;
4055 }
4056 
4057 #define MAX_PACKET_OFF 0xffff
4058 
4059 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
4060 				       const struct bpf_call_arg_meta *meta,
4061 				       enum bpf_access_type t)
4062 {
4063 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
4064 
4065 	switch (prog_type) {
4066 	/* Program types only with direct read access go here! */
4067 	case BPF_PROG_TYPE_LWT_IN:
4068 	case BPF_PROG_TYPE_LWT_OUT:
4069 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
4070 	case BPF_PROG_TYPE_SK_REUSEPORT:
4071 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4072 	case BPF_PROG_TYPE_CGROUP_SKB:
4073 		if (t == BPF_WRITE)
4074 			return false;
4075 		fallthrough;
4076 
4077 	/* Program types with direct read + write access go here! */
4078 	case BPF_PROG_TYPE_SCHED_CLS:
4079 	case BPF_PROG_TYPE_SCHED_ACT:
4080 	case BPF_PROG_TYPE_XDP:
4081 	case BPF_PROG_TYPE_LWT_XMIT:
4082 	case BPF_PROG_TYPE_SK_SKB:
4083 	case BPF_PROG_TYPE_SK_MSG:
4084 		if (meta)
4085 			return meta->pkt_access;
4086 
4087 		env->seen_direct_write = true;
4088 		return true;
4089 
4090 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4091 		if (t == BPF_WRITE)
4092 			env->seen_direct_write = true;
4093 
4094 		return true;
4095 
4096 	default:
4097 		return false;
4098 	}
4099 }
4100 
4101 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
4102 			       int size, bool zero_size_allowed)
4103 {
4104 	struct bpf_reg_state *regs = cur_regs(env);
4105 	struct bpf_reg_state *reg = &regs[regno];
4106 	int err;
4107 
4108 	/* We may have added a variable offset to the packet pointer; but any
4109 	 * reg->range we have comes after that.  We are only checking the fixed
4110 	 * offset.
4111 	 */
4112 
4113 	/* We don't allow negative numbers, because we aren't tracking enough
4114 	 * detail to prove they're safe.
4115 	 */
4116 	if (reg->smin_value < 0) {
4117 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4118 			regno);
4119 		return -EACCES;
4120 	}
4121 
4122 	err = reg->range < 0 ? -EINVAL :
4123 	      __check_mem_access(env, regno, off, size, reg->range,
4124 				 zero_size_allowed);
4125 	if (err) {
4126 		verbose(env, "R%d offset is outside of the packet\n", regno);
4127 		return err;
4128 	}
4129 
4130 	/* __check_mem_access has made sure "off + size - 1" is within u16.
4131 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
4132 	 * otherwise find_good_pkt_pointers would have refused to set range info
4133 	 * that __check_mem_access would have rejected this pkt access.
4134 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
4135 	 */
4136 	env->prog->aux->max_pkt_offset =
4137 		max_t(u32, env->prog->aux->max_pkt_offset,
4138 		      off + reg->umax_value + size - 1);
4139 
4140 	return err;
4141 }
4142 
4143 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
4144 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
4145 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
4146 			    struct btf **btf, u32 *btf_id)
4147 {
4148 	struct bpf_insn_access_aux info = {
4149 		.reg_type = *reg_type,
4150 		.log = &env->log,
4151 	};
4152 
4153 	if (env->ops->is_valid_access &&
4154 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
4155 		/* A non zero info.ctx_field_size indicates that this field is a
4156 		 * candidate for later verifier transformation to load the whole
4157 		 * field and then apply a mask when accessed with a narrower
4158 		 * access than actual ctx access size. A zero info.ctx_field_size
4159 		 * will only allow for whole field access and rejects any other
4160 		 * type of narrower access.
4161 		 */
4162 		*reg_type = info.reg_type;
4163 
4164 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
4165 			*btf = info.btf;
4166 			*btf_id = info.btf_id;
4167 		} else {
4168 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
4169 		}
4170 		/* remember the offset of last byte accessed in ctx */
4171 		if (env->prog->aux->max_ctx_offset < off + size)
4172 			env->prog->aux->max_ctx_offset = off + size;
4173 		return 0;
4174 	}
4175 
4176 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
4177 	return -EACCES;
4178 }
4179 
4180 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4181 				  int size)
4182 {
4183 	if (size < 0 || off < 0 ||
4184 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
4185 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
4186 			off, size);
4187 		return -EACCES;
4188 	}
4189 	return 0;
4190 }
4191 
4192 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4193 			     u32 regno, int off, int size,
4194 			     enum bpf_access_type t)
4195 {
4196 	struct bpf_reg_state *regs = cur_regs(env);
4197 	struct bpf_reg_state *reg = &regs[regno];
4198 	struct bpf_insn_access_aux info = {};
4199 	bool valid;
4200 
4201 	if (reg->smin_value < 0) {
4202 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4203 			regno);
4204 		return -EACCES;
4205 	}
4206 
4207 	switch (reg->type) {
4208 	case PTR_TO_SOCK_COMMON:
4209 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4210 		break;
4211 	case PTR_TO_SOCKET:
4212 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4213 		break;
4214 	case PTR_TO_TCP_SOCK:
4215 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4216 		break;
4217 	case PTR_TO_XDP_SOCK:
4218 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4219 		break;
4220 	default:
4221 		valid = false;
4222 	}
4223 
4224 
4225 	if (valid) {
4226 		env->insn_aux_data[insn_idx].ctx_field_size =
4227 			info.ctx_field_size;
4228 		return 0;
4229 	}
4230 
4231 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4232 		regno, reg_type_str(env, reg->type), off, size);
4233 
4234 	return -EACCES;
4235 }
4236 
4237 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4238 {
4239 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4240 }
4241 
4242 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4243 {
4244 	const struct bpf_reg_state *reg = reg_state(env, regno);
4245 
4246 	return reg->type == PTR_TO_CTX;
4247 }
4248 
4249 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4250 {
4251 	const struct bpf_reg_state *reg = reg_state(env, regno);
4252 
4253 	return type_is_sk_pointer(reg->type);
4254 }
4255 
4256 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4257 {
4258 	const struct bpf_reg_state *reg = reg_state(env, regno);
4259 
4260 	return type_is_pkt_pointer(reg->type);
4261 }
4262 
4263 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4264 {
4265 	const struct bpf_reg_state *reg = reg_state(env, regno);
4266 
4267 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4268 	return reg->type == PTR_TO_FLOW_KEYS;
4269 }
4270 
4271 static bool is_trusted_reg(const struct bpf_reg_state *reg)
4272 {
4273 	/* A referenced register is always trusted. */
4274 	if (reg->ref_obj_id)
4275 		return true;
4276 
4277 	/* If a register is not referenced, it is trusted if it has the
4278 	 * MEM_ALLOC, MEM_RCU or PTR_TRUSTED type modifiers, and no others. Some of the
4279 	 * other type modifiers may be safe, but we elect to take an opt-in
4280 	 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
4281 	 * not.
4282 	 *
4283 	 * Eventually, we should make PTR_TRUSTED the single source of truth
4284 	 * for whether a register is trusted.
4285 	 */
4286 	return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
4287 	       !bpf_type_has_unsafe_modifiers(reg->type);
4288 }
4289 
4290 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4291 				   const struct bpf_reg_state *reg,
4292 				   int off, int size, bool strict)
4293 {
4294 	struct tnum reg_off;
4295 	int ip_align;
4296 
4297 	/* Byte size accesses are always allowed. */
4298 	if (!strict || size == 1)
4299 		return 0;
4300 
4301 	/* For platforms that do not have a Kconfig enabling
4302 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4303 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4304 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4305 	 * to this code only in strict mode where we want to emulate
4306 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4307 	 * unconditional IP align value of '2'.
4308 	 */
4309 	ip_align = 2;
4310 
4311 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4312 	if (!tnum_is_aligned(reg_off, size)) {
4313 		char tn_buf[48];
4314 
4315 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4316 		verbose(env,
4317 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4318 			ip_align, tn_buf, reg->off, off, size);
4319 		return -EACCES;
4320 	}
4321 
4322 	return 0;
4323 }
4324 
4325 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4326 				       const struct bpf_reg_state *reg,
4327 				       const char *pointer_desc,
4328 				       int off, int size, bool strict)
4329 {
4330 	struct tnum reg_off;
4331 
4332 	/* Byte size accesses are always allowed. */
4333 	if (!strict || size == 1)
4334 		return 0;
4335 
4336 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4337 	if (!tnum_is_aligned(reg_off, size)) {
4338 		char tn_buf[48];
4339 
4340 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4341 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
4342 			pointer_desc, tn_buf, reg->off, off, size);
4343 		return -EACCES;
4344 	}
4345 
4346 	return 0;
4347 }
4348 
4349 static int check_ptr_alignment(struct bpf_verifier_env *env,
4350 			       const struct bpf_reg_state *reg, int off,
4351 			       int size, bool strict_alignment_once)
4352 {
4353 	bool strict = env->strict_alignment || strict_alignment_once;
4354 	const char *pointer_desc = "";
4355 
4356 	switch (reg->type) {
4357 	case PTR_TO_PACKET:
4358 	case PTR_TO_PACKET_META:
4359 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
4360 		 * right in front, treat it the very same way.
4361 		 */
4362 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
4363 	case PTR_TO_FLOW_KEYS:
4364 		pointer_desc = "flow keys ";
4365 		break;
4366 	case PTR_TO_MAP_KEY:
4367 		pointer_desc = "key ";
4368 		break;
4369 	case PTR_TO_MAP_VALUE:
4370 		pointer_desc = "value ";
4371 		break;
4372 	case PTR_TO_CTX:
4373 		pointer_desc = "context ";
4374 		break;
4375 	case PTR_TO_STACK:
4376 		pointer_desc = "stack ";
4377 		/* The stack spill tracking logic in check_stack_write_fixed_off()
4378 		 * and check_stack_read_fixed_off() relies on stack accesses being
4379 		 * aligned.
4380 		 */
4381 		strict = true;
4382 		break;
4383 	case PTR_TO_SOCKET:
4384 		pointer_desc = "sock ";
4385 		break;
4386 	case PTR_TO_SOCK_COMMON:
4387 		pointer_desc = "sock_common ";
4388 		break;
4389 	case PTR_TO_TCP_SOCK:
4390 		pointer_desc = "tcp_sock ";
4391 		break;
4392 	case PTR_TO_XDP_SOCK:
4393 		pointer_desc = "xdp_sock ";
4394 		break;
4395 	default:
4396 		break;
4397 	}
4398 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4399 					   strict);
4400 }
4401 
4402 static int update_stack_depth(struct bpf_verifier_env *env,
4403 			      const struct bpf_func_state *func,
4404 			      int off)
4405 {
4406 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
4407 
4408 	if (stack >= -off)
4409 		return 0;
4410 
4411 	/* update known max for given subprogram */
4412 	env->subprog_info[func->subprogno].stack_depth = -off;
4413 	return 0;
4414 }
4415 
4416 /* starting from main bpf function walk all instructions of the function
4417  * and recursively walk all callees that given function can call.
4418  * Ignore jump and exit insns.
4419  * Since recursion is prevented by check_cfg() this algorithm
4420  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4421  */
4422 static int check_max_stack_depth(struct bpf_verifier_env *env)
4423 {
4424 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4425 	struct bpf_subprog_info *subprog = env->subprog_info;
4426 	struct bpf_insn *insn = env->prog->insnsi;
4427 	bool tail_call_reachable = false;
4428 	int ret_insn[MAX_CALL_FRAMES];
4429 	int ret_prog[MAX_CALL_FRAMES];
4430 	int j;
4431 
4432 process_func:
4433 	/* protect against potential stack overflow that might happen when
4434 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4435 	 * depth for such case down to 256 so that the worst case scenario
4436 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
4437 	 * 8k).
4438 	 *
4439 	 * To get the idea what might happen, see an example:
4440 	 * func1 -> sub rsp, 128
4441 	 *  subfunc1 -> sub rsp, 256
4442 	 *  tailcall1 -> add rsp, 256
4443 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4444 	 *   subfunc2 -> sub rsp, 64
4445 	 *   subfunc22 -> sub rsp, 128
4446 	 *   tailcall2 -> add rsp, 128
4447 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4448 	 *
4449 	 * tailcall will unwind the current stack frame but it will not get rid
4450 	 * of caller's stack as shown on the example above.
4451 	 */
4452 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
4453 		verbose(env,
4454 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4455 			depth);
4456 		return -EACCES;
4457 	}
4458 	/* round up to 32-bytes, since this is granularity
4459 	 * of interpreter stack size
4460 	 */
4461 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4462 	if (depth > MAX_BPF_STACK) {
4463 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
4464 			frame + 1, depth);
4465 		return -EACCES;
4466 	}
4467 continue_func:
4468 	subprog_end = subprog[idx + 1].start;
4469 	for (; i < subprog_end; i++) {
4470 		int next_insn;
4471 
4472 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
4473 			continue;
4474 		/* remember insn and function to return to */
4475 		ret_insn[frame] = i + 1;
4476 		ret_prog[frame] = idx;
4477 
4478 		/* find the callee */
4479 		next_insn = i + insn[i].imm + 1;
4480 		idx = find_subprog(env, next_insn);
4481 		if (idx < 0) {
4482 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4483 				  next_insn);
4484 			return -EFAULT;
4485 		}
4486 		if (subprog[idx].is_async_cb) {
4487 			if (subprog[idx].has_tail_call) {
4488 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4489 				return -EFAULT;
4490 			}
4491 			 /* async callbacks don't increase bpf prog stack size */
4492 			continue;
4493 		}
4494 		i = next_insn;
4495 
4496 		if (subprog[idx].has_tail_call)
4497 			tail_call_reachable = true;
4498 
4499 		frame++;
4500 		if (frame >= MAX_CALL_FRAMES) {
4501 			verbose(env, "the call stack of %d frames is too deep !\n",
4502 				frame);
4503 			return -E2BIG;
4504 		}
4505 		goto process_func;
4506 	}
4507 	/* if tail call got detected across bpf2bpf calls then mark each of the
4508 	 * currently present subprog frames as tail call reachable subprogs;
4509 	 * this info will be utilized by JIT so that we will be preserving the
4510 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
4511 	 */
4512 	if (tail_call_reachable)
4513 		for (j = 0; j < frame; j++)
4514 			subprog[ret_prog[j]].tail_call_reachable = true;
4515 	if (subprog[0].tail_call_reachable)
4516 		env->prog->aux->tail_call_reachable = true;
4517 
4518 	/* end of for() loop means the last insn of the 'subprog'
4519 	 * was reached. Doesn't matter whether it was JA or EXIT
4520 	 */
4521 	if (frame == 0)
4522 		return 0;
4523 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4524 	frame--;
4525 	i = ret_insn[frame];
4526 	idx = ret_prog[frame];
4527 	goto continue_func;
4528 }
4529 
4530 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
4531 static int get_callee_stack_depth(struct bpf_verifier_env *env,
4532 				  const struct bpf_insn *insn, int idx)
4533 {
4534 	int start = idx + insn->imm + 1, subprog;
4535 
4536 	subprog = find_subprog(env, start);
4537 	if (subprog < 0) {
4538 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4539 			  start);
4540 		return -EFAULT;
4541 	}
4542 	return env->subprog_info[subprog].stack_depth;
4543 }
4544 #endif
4545 
4546 static int __check_buffer_access(struct bpf_verifier_env *env,
4547 				 const char *buf_info,
4548 				 const struct bpf_reg_state *reg,
4549 				 int regno, int off, int size)
4550 {
4551 	if (off < 0) {
4552 		verbose(env,
4553 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4554 			regno, buf_info, off, size);
4555 		return -EACCES;
4556 	}
4557 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4558 		char tn_buf[48];
4559 
4560 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4561 		verbose(env,
4562 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4563 			regno, off, tn_buf);
4564 		return -EACCES;
4565 	}
4566 
4567 	return 0;
4568 }
4569 
4570 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4571 				  const struct bpf_reg_state *reg,
4572 				  int regno, int off, int size)
4573 {
4574 	int err;
4575 
4576 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4577 	if (err)
4578 		return err;
4579 
4580 	if (off + size > env->prog->aux->max_tp_access)
4581 		env->prog->aux->max_tp_access = off + size;
4582 
4583 	return 0;
4584 }
4585 
4586 static int check_buffer_access(struct bpf_verifier_env *env,
4587 			       const struct bpf_reg_state *reg,
4588 			       int regno, int off, int size,
4589 			       bool zero_size_allowed,
4590 			       u32 *max_access)
4591 {
4592 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4593 	int err;
4594 
4595 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4596 	if (err)
4597 		return err;
4598 
4599 	if (off + size > *max_access)
4600 		*max_access = off + size;
4601 
4602 	return 0;
4603 }
4604 
4605 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4606 static void zext_32_to_64(struct bpf_reg_state *reg)
4607 {
4608 	reg->var_off = tnum_subreg(reg->var_off);
4609 	__reg_assign_32_into_64(reg);
4610 }
4611 
4612 /* truncate register to smaller size (in bytes)
4613  * must be called with size < BPF_REG_SIZE
4614  */
4615 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4616 {
4617 	u64 mask;
4618 
4619 	/* clear high bits in bit representation */
4620 	reg->var_off = tnum_cast(reg->var_off, size);
4621 
4622 	/* fix arithmetic bounds */
4623 	mask = ((u64)1 << (size * 8)) - 1;
4624 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4625 		reg->umin_value &= mask;
4626 		reg->umax_value &= mask;
4627 	} else {
4628 		reg->umin_value = 0;
4629 		reg->umax_value = mask;
4630 	}
4631 	reg->smin_value = reg->umin_value;
4632 	reg->smax_value = reg->umax_value;
4633 
4634 	/* If size is smaller than 32bit register the 32bit register
4635 	 * values are also truncated so we push 64-bit bounds into
4636 	 * 32-bit bounds. Above were truncated < 32-bits already.
4637 	 */
4638 	if (size >= 4)
4639 		return;
4640 	__reg_combine_64_into_32(reg);
4641 }
4642 
4643 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4644 {
4645 	/* A map is considered read-only if the following condition are true:
4646 	 *
4647 	 * 1) BPF program side cannot change any of the map content. The
4648 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4649 	 *    and was set at map creation time.
4650 	 * 2) The map value(s) have been initialized from user space by a
4651 	 *    loader and then "frozen", such that no new map update/delete
4652 	 *    operations from syscall side are possible for the rest of
4653 	 *    the map's lifetime from that point onwards.
4654 	 * 3) Any parallel/pending map update/delete operations from syscall
4655 	 *    side have been completed. Only after that point, it's safe to
4656 	 *    assume that map value(s) are immutable.
4657 	 */
4658 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4659 	       READ_ONCE(map->frozen) &&
4660 	       !bpf_map_write_active(map);
4661 }
4662 
4663 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4664 {
4665 	void *ptr;
4666 	u64 addr;
4667 	int err;
4668 
4669 	err = map->ops->map_direct_value_addr(map, &addr, off);
4670 	if (err)
4671 		return err;
4672 	ptr = (void *)(long)addr + off;
4673 
4674 	switch (size) {
4675 	case sizeof(u8):
4676 		*val = (u64)*(u8 *)ptr;
4677 		break;
4678 	case sizeof(u16):
4679 		*val = (u64)*(u16 *)ptr;
4680 		break;
4681 	case sizeof(u32):
4682 		*val = (u64)*(u32 *)ptr;
4683 		break;
4684 	case sizeof(u64):
4685 		*val = *(u64 *)ptr;
4686 		break;
4687 	default:
4688 		return -EINVAL;
4689 	}
4690 	return 0;
4691 }
4692 
4693 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4694 				   struct bpf_reg_state *regs,
4695 				   int regno, int off, int size,
4696 				   enum bpf_access_type atype,
4697 				   int value_regno)
4698 {
4699 	struct bpf_reg_state *reg = regs + regno;
4700 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4701 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4702 	enum bpf_type_flag flag = 0;
4703 	u32 btf_id;
4704 	int ret;
4705 
4706 	if (off < 0) {
4707 		verbose(env,
4708 			"R%d is ptr_%s invalid negative access: off=%d\n",
4709 			regno, tname, off);
4710 		return -EACCES;
4711 	}
4712 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4713 		char tn_buf[48];
4714 
4715 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4716 		verbose(env,
4717 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4718 			regno, tname, off, tn_buf);
4719 		return -EACCES;
4720 	}
4721 
4722 	if (reg->type & MEM_USER) {
4723 		verbose(env,
4724 			"R%d is ptr_%s access user memory: off=%d\n",
4725 			regno, tname, off);
4726 		return -EACCES;
4727 	}
4728 
4729 	if (reg->type & MEM_PERCPU) {
4730 		verbose(env,
4731 			"R%d is ptr_%s access percpu memory: off=%d\n",
4732 			regno, tname, off);
4733 		return -EACCES;
4734 	}
4735 
4736 	if (env->ops->btf_struct_access && !type_is_alloc(reg->type)) {
4737 		if (!btf_is_kernel(reg->btf)) {
4738 			verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
4739 			return -EFAULT;
4740 		}
4741 		ret = env->ops->btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
4742 	} else {
4743 		/* Writes are permitted with default btf_struct_access for
4744 		 * program allocated objects (which always have ref_obj_id > 0),
4745 		 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
4746 		 */
4747 		if (atype != BPF_READ && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
4748 			verbose(env, "only read is supported\n");
4749 			return -EACCES;
4750 		}
4751 
4752 		if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
4753 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
4754 			return -EFAULT;
4755 		}
4756 
4757 		ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
4758 	}
4759 
4760 	if (ret < 0)
4761 		return ret;
4762 
4763 	/* If this is an untrusted pointer, all pointers formed by walking it
4764 	 * also inherit the untrusted flag.
4765 	 */
4766 	if (type_flag(reg->type) & PTR_UNTRUSTED)
4767 		flag |= PTR_UNTRUSTED;
4768 
4769 	/* By default any pointer obtained from walking a trusted pointer is
4770 	 * no longer trusted except the rcu case below.
4771 	 */
4772 	flag &= ~PTR_TRUSTED;
4773 
4774 	if (flag & MEM_RCU) {
4775 		/* Mark value register as MEM_RCU only if it is protected by
4776 		 * bpf_rcu_read_lock() and the ptr reg is trusted. MEM_RCU
4777 		 * itself can already indicate trustedness inside the rcu
4778 		 * read lock region. Also mark it as PTR_TRUSTED.
4779 		 */
4780 		if (!env->cur_state->active_rcu_lock || !is_trusted_reg(reg))
4781 			flag &= ~MEM_RCU;
4782 		else
4783 			flag |= PTR_TRUSTED;
4784 	} else if (reg->type & MEM_RCU) {
4785 		/* ptr (reg) is marked as MEM_RCU, but the struct field is not tagged
4786 		 * with __rcu. Mark the flag as PTR_UNTRUSTED conservatively.
4787 		 */
4788 		flag |= PTR_UNTRUSTED;
4789 	}
4790 
4791 	if (atype == BPF_READ && value_regno >= 0)
4792 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
4793 
4794 	return 0;
4795 }
4796 
4797 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4798 				   struct bpf_reg_state *regs,
4799 				   int regno, int off, int size,
4800 				   enum bpf_access_type atype,
4801 				   int value_regno)
4802 {
4803 	struct bpf_reg_state *reg = regs + regno;
4804 	struct bpf_map *map = reg->map_ptr;
4805 	struct bpf_reg_state map_reg;
4806 	enum bpf_type_flag flag = 0;
4807 	const struct btf_type *t;
4808 	const char *tname;
4809 	u32 btf_id;
4810 	int ret;
4811 
4812 	if (!btf_vmlinux) {
4813 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4814 		return -ENOTSUPP;
4815 	}
4816 
4817 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4818 		verbose(env, "map_ptr access not supported for map type %d\n",
4819 			map->map_type);
4820 		return -ENOTSUPP;
4821 	}
4822 
4823 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4824 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4825 
4826 	if (!env->allow_ptr_to_map_access) {
4827 		verbose(env,
4828 			"%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4829 			tname);
4830 		return -EPERM;
4831 	}
4832 
4833 	if (off < 0) {
4834 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
4835 			regno, tname, off);
4836 		return -EACCES;
4837 	}
4838 
4839 	if (atype != BPF_READ) {
4840 		verbose(env, "only read from %s is supported\n", tname);
4841 		return -EACCES;
4842 	}
4843 
4844 	/* Simulate access to a PTR_TO_BTF_ID */
4845 	memset(&map_reg, 0, sizeof(map_reg));
4846 	mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
4847 	ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag);
4848 	if (ret < 0)
4849 		return ret;
4850 
4851 	if (value_regno >= 0)
4852 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
4853 
4854 	return 0;
4855 }
4856 
4857 /* Check that the stack access at the given offset is within bounds. The
4858  * maximum valid offset is -1.
4859  *
4860  * The minimum valid offset is -MAX_BPF_STACK for writes, and
4861  * -state->allocated_stack for reads.
4862  */
4863 static int check_stack_slot_within_bounds(int off,
4864 					  struct bpf_func_state *state,
4865 					  enum bpf_access_type t)
4866 {
4867 	int min_valid_off;
4868 
4869 	if (t == BPF_WRITE)
4870 		min_valid_off = -MAX_BPF_STACK;
4871 	else
4872 		min_valid_off = -state->allocated_stack;
4873 
4874 	if (off < min_valid_off || off > -1)
4875 		return -EACCES;
4876 	return 0;
4877 }
4878 
4879 /* Check that the stack access at 'regno + off' falls within the maximum stack
4880  * bounds.
4881  *
4882  * 'off' includes `regno->offset`, but not its dynamic part (if any).
4883  */
4884 static int check_stack_access_within_bounds(
4885 		struct bpf_verifier_env *env,
4886 		int regno, int off, int access_size,
4887 		enum bpf_access_src src, enum bpf_access_type type)
4888 {
4889 	struct bpf_reg_state *regs = cur_regs(env);
4890 	struct bpf_reg_state *reg = regs + regno;
4891 	struct bpf_func_state *state = func(env, reg);
4892 	int min_off, max_off;
4893 	int err;
4894 	char *err_extra;
4895 
4896 	if (src == ACCESS_HELPER)
4897 		/* We don't know if helpers are reading or writing (or both). */
4898 		err_extra = " indirect access to";
4899 	else if (type == BPF_READ)
4900 		err_extra = " read from";
4901 	else
4902 		err_extra = " write to";
4903 
4904 	if (tnum_is_const(reg->var_off)) {
4905 		min_off = reg->var_off.value + off;
4906 		if (access_size > 0)
4907 			max_off = min_off + access_size - 1;
4908 		else
4909 			max_off = min_off;
4910 	} else {
4911 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4912 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
4913 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4914 				err_extra, regno);
4915 			return -EACCES;
4916 		}
4917 		min_off = reg->smin_value + off;
4918 		if (access_size > 0)
4919 			max_off = reg->smax_value + off + access_size - 1;
4920 		else
4921 			max_off = min_off;
4922 	}
4923 
4924 	err = check_stack_slot_within_bounds(min_off, state, type);
4925 	if (!err)
4926 		err = check_stack_slot_within_bounds(max_off, state, type);
4927 
4928 	if (err) {
4929 		if (tnum_is_const(reg->var_off)) {
4930 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4931 				err_extra, regno, off, access_size);
4932 		} else {
4933 			char tn_buf[48];
4934 
4935 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4936 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4937 				err_extra, regno, tn_buf, access_size);
4938 		}
4939 	}
4940 	return err;
4941 }
4942 
4943 /* check whether memory at (regno + off) is accessible for t = (read | write)
4944  * if t==write, value_regno is a register which value is stored into memory
4945  * if t==read, value_regno is a register which will receive the value from memory
4946  * if t==write && value_regno==-1, some unknown value is stored into memory
4947  * if t==read && value_regno==-1, don't care what we read from memory
4948  */
4949 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4950 			    int off, int bpf_size, enum bpf_access_type t,
4951 			    int value_regno, bool strict_alignment_once)
4952 {
4953 	struct bpf_reg_state *regs = cur_regs(env);
4954 	struct bpf_reg_state *reg = regs + regno;
4955 	struct bpf_func_state *state;
4956 	int size, err = 0;
4957 
4958 	size = bpf_size_to_bytes(bpf_size);
4959 	if (size < 0)
4960 		return size;
4961 
4962 	/* alignment checks will add in reg->off themselves */
4963 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
4964 	if (err)
4965 		return err;
4966 
4967 	/* for access checks, reg->off is just part of off */
4968 	off += reg->off;
4969 
4970 	if (reg->type == PTR_TO_MAP_KEY) {
4971 		if (t == BPF_WRITE) {
4972 			verbose(env, "write to change key R%d not allowed\n", regno);
4973 			return -EACCES;
4974 		}
4975 
4976 		err = check_mem_region_access(env, regno, off, size,
4977 					      reg->map_ptr->key_size, false);
4978 		if (err)
4979 			return err;
4980 		if (value_regno >= 0)
4981 			mark_reg_unknown(env, regs, value_regno);
4982 	} else if (reg->type == PTR_TO_MAP_VALUE) {
4983 		struct btf_field *kptr_field = NULL;
4984 
4985 		if (t == BPF_WRITE && value_regno >= 0 &&
4986 		    is_pointer_value(env, value_regno)) {
4987 			verbose(env, "R%d leaks addr into map\n", value_regno);
4988 			return -EACCES;
4989 		}
4990 		err = check_map_access_type(env, regno, off, size, t);
4991 		if (err)
4992 			return err;
4993 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
4994 		if (err)
4995 			return err;
4996 		if (tnum_is_const(reg->var_off))
4997 			kptr_field = btf_record_find(reg->map_ptr->record,
4998 						     off + reg->var_off.value, BPF_KPTR);
4999 		if (kptr_field) {
5000 			err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
5001 		} else if (t == BPF_READ && value_regno >= 0) {
5002 			struct bpf_map *map = reg->map_ptr;
5003 
5004 			/* if map is read-only, track its contents as scalars */
5005 			if (tnum_is_const(reg->var_off) &&
5006 			    bpf_map_is_rdonly(map) &&
5007 			    map->ops->map_direct_value_addr) {
5008 				int map_off = off + reg->var_off.value;
5009 				u64 val = 0;
5010 
5011 				err = bpf_map_direct_read(map, map_off, size,
5012 							  &val);
5013 				if (err)
5014 					return err;
5015 
5016 				regs[value_regno].type = SCALAR_VALUE;
5017 				__mark_reg_known(&regs[value_regno], val);
5018 			} else {
5019 				mark_reg_unknown(env, regs, value_regno);
5020 			}
5021 		}
5022 	} else if (base_type(reg->type) == PTR_TO_MEM) {
5023 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5024 
5025 		if (type_may_be_null(reg->type)) {
5026 			verbose(env, "R%d invalid mem access '%s'\n", regno,
5027 				reg_type_str(env, reg->type));
5028 			return -EACCES;
5029 		}
5030 
5031 		if (t == BPF_WRITE && rdonly_mem) {
5032 			verbose(env, "R%d cannot write into %s\n",
5033 				regno, reg_type_str(env, reg->type));
5034 			return -EACCES;
5035 		}
5036 
5037 		if (t == BPF_WRITE && value_regno >= 0 &&
5038 		    is_pointer_value(env, value_regno)) {
5039 			verbose(env, "R%d leaks addr into mem\n", value_regno);
5040 			return -EACCES;
5041 		}
5042 
5043 		err = check_mem_region_access(env, regno, off, size,
5044 					      reg->mem_size, false);
5045 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
5046 			mark_reg_unknown(env, regs, value_regno);
5047 	} else if (reg->type == PTR_TO_CTX) {
5048 		enum bpf_reg_type reg_type = SCALAR_VALUE;
5049 		struct btf *btf = NULL;
5050 		u32 btf_id = 0;
5051 
5052 		if (t == BPF_WRITE && value_regno >= 0 &&
5053 		    is_pointer_value(env, value_regno)) {
5054 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
5055 			return -EACCES;
5056 		}
5057 
5058 		err = check_ptr_off_reg(env, reg, regno);
5059 		if (err < 0)
5060 			return err;
5061 
5062 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
5063 				       &btf_id);
5064 		if (err)
5065 			verbose_linfo(env, insn_idx, "; ");
5066 		if (!err && t == BPF_READ && value_regno >= 0) {
5067 			/* ctx access returns either a scalar, or a
5068 			 * PTR_TO_PACKET[_META,_END]. In the latter
5069 			 * case, we know the offset is zero.
5070 			 */
5071 			if (reg_type == SCALAR_VALUE) {
5072 				mark_reg_unknown(env, regs, value_regno);
5073 			} else {
5074 				mark_reg_known_zero(env, regs,
5075 						    value_regno);
5076 				if (type_may_be_null(reg_type))
5077 					regs[value_regno].id = ++env->id_gen;
5078 				/* A load of ctx field could have different
5079 				 * actual load size with the one encoded in the
5080 				 * insn. When the dst is PTR, it is for sure not
5081 				 * a sub-register.
5082 				 */
5083 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
5084 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
5085 					regs[value_regno].btf = btf;
5086 					regs[value_regno].btf_id = btf_id;
5087 				}
5088 			}
5089 			regs[value_regno].type = reg_type;
5090 		}
5091 
5092 	} else if (reg->type == PTR_TO_STACK) {
5093 		/* Basic bounds checks. */
5094 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
5095 		if (err)
5096 			return err;
5097 
5098 		state = func(env, reg);
5099 		err = update_stack_depth(env, state, off);
5100 		if (err)
5101 			return err;
5102 
5103 		if (t == BPF_READ)
5104 			err = check_stack_read(env, regno, off, size,
5105 					       value_regno);
5106 		else
5107 			err = check_stack_write(env, regno, off, size,
5108 						value_regno, insn_idx);
5109 	} else if (reg_is_pkt_pointer(reg)) {
5110 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
5111 			verbose(env, "cannot write into packet\n");
5112 			return -EACCES;
5113 		}
5114 		if (t == BPF_WRITE && value_regno >= 0 &&
5115 		    is_pointer_value(env, value_regno)) {
5116 			verbose(env, "R%d leaks addr into packet\n",
5117 				value_regno);
5118 			return -EACCES;
5119 		}
5120 		err = check_packet_access(env, regno, off, size, false);
5121 		if (!err && t == BPF_READ && value_regno >= 0)
5122 			mark_reg_unknown(env, regs, value_regno);
5123 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
5124 		if (t == BPF_WRITE && value_regno >= 0 &&
5125 		    is_pointer_value(env, value_regno)) {
5126 			verbose(env, "R%d leaks addr into flow keys\n",
5127 				value_regno);
5128 			return -EACCES;
5129 		}
5130 
5131 		err = check_flow_keys_access(env, off, size);
5132 		if (!err && t == BPF_READ && value_regno >= 0)
5133 			mark_reg_unknown(env, regs, value_regno);
5134 	} else if (type_is_sk_pointer(reg->type)) {
5135 		if (t == BPF_WRITE) {
5136 			verbose(env, "R%d cannot write into %s\n",
5137 				regno, reg_type_str(env, reg->type));
5138 			return -EACCES;
5139 		}
5140 		err = check_sock_access(env, insn_idx, regno, off, size, t);
5141 		if (!err && value_regno >= 0)
5142 			mark_reg_unknown(env, regs, value_regno);
5143 	} else if (reg->type == PTR_TO_TP_BUFFER) {
5144 		err = check_tp_buffer_access(env, reg, regno, off, size);
5145 		if (!err && t == BPF_READ && value_regno >= 0)
5146 			mark_reg_unknown(env, regs, value_regno);
5147 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
5148 		   !type_may_be_null(reg->type)) {
5149 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
5150 					      value_regno);
5151 	} else if (reg->type == CONST_PTR_TO_MAP) {
5152 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
5153 					      value_regno);
5154 	} else if (base_type(reg->type) == PTR_TO_BUF) {
5155 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5156 		u32 *max_access;
5157 
5158 		if (rdonly_mem) {
5159 			if (t == BPF_WRITE) {
5160 				verbose(env, "R%d cannot write into %s\n",
5161 					regno, reg_type_str(env, reg->type));
5162 				return -EACCES;
5163 			}
5164 			max_access = &env->prog->aux->max_rdonly_access;
5165 		} else {
5166 			max_access = &env->prog->aux->max_rdwr_access;
5167 		}
5168 
5169 		err = check_buffer_access(env, reg, regno, off, size, false,
5170 					  max_access);
5171 
5172 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
5173 			mark_reg_unknown(env, regs, value_regno);
5174 	} else {
5175 		verbose(env, "R%d invalid mem access '%s'\n", regno,
5176 			reg_type_str(env, reg->type));
5177 		return -EACCES;
5178 	}
5179 
5180 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
5181 	    regs[value_regno].type == SCALAR_VALUE) {
5182 		/* b/h/w load zero-extends, mark upper bits as known 0 */
5183 		coerce_reg_to_size(&regs[value_regno], size);
5184 	}
5185 	return err;
5186 }
5187 
5188 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
5189 {
5190 	int load_reg;
5191 	int err;
5192 
5193 	switch (insn->imm) {
5194 	case BPF_ADD:
5195 	case BPF_ADD | BPF_FETCH:
5196 	case BPF_AND:
5197 	case BPF_AND | BPF_FETCH:
5198 	case BPF_OR:
5199 	case BPF_OR | BPF_FETCH:
5200 	case BPF_XOR:
5201 	case BPF_XOR | BPF_FETCH:
5202 	case BPF_XCHG:
5203 	case BPF_CMPXCHG:
5204 		break;
5205 	default:
5206 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
5207 		return -EINVAL;
5208 	}
5209 
5210 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
5211 		verbose(env, "invalid atomic operand size\n");
5212 		return -EINVAL;
5213 	}
5214 
5215 	/* check src1 operand */
5216 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
5217 	if (err)
5218 		return err;
5219 
5220 	/* check src2 operand */
5221 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5222 	if (err)
5223 		return err;
5224 
5225 	if (insn->imm == BPF_CMPXCHG) {
5226 		/* Check comparison of R0 with memory location */
5227 		const u32 aux_reg = BPF_REG_0;
5228 
5229 		err = check_reg_arg(env, aux_reg, SRC_OP);
5230 		if (err)
5231 			return err;
5232 
5233 		if (is_pointer_value(env, aux_reg)) {
5234 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
5235 			return -EACCES;
5236 		}
5237 	}
5238 
5239 	if (is_pointer_value(env, insn->src_reg)) {
5240 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
5241 		return -EACCES;
5242 	}
5243 
5244 	if (is_ctx_reg(env, insn->dst_reg) ||
5245 	    is_pkt_reg(env, insn->dst_reg) ||
5246 	    is_flow_key_reg(env, insn->dst_reg) ||
5247 	    is_sk_reg(env, insn->dst_reg)) {
5248 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
5249 			insn->dst_reg,
5250 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
5251 		return -EACCES;
5252 	}
5253 
5254 	if (insn->imm & BPF_FETCH) {
5255 		if (insn->imm == BPF_CMPXCHG)
5256 			load_reg = BPF_REG_0;
5257 		else
5258 			load_reg = insn->src_reg;
5259 
5260 		/* check and record load of old value */
5261 		err = check_reg_arg(env, load_reg, DST_OP);
5262 		if (err)
5263 			return err;
5264 	} else {
5265 		/* This instruction accesses a memory location but doesn't
5266 		 * actually load it into a register.
5267 		 */
5268 		load_reg = -1;
5269 	}
5270 
5271 	/* Check whether we can read the memory, with second call for fetch
5272 	 * case to simulate the register fill.
5273 	 */
5274 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5275 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
5276 	if (!err && load_reg >= 0)
5277 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5278 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
5279 				       true);
5280 	if (err)
5281 		return err;
5282 
5283 	/* Check whether we can write into the same memory. */
5284 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5285 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5286 	if (err)
5287 		return err;
5288 
5289 	return 0;
5290 }
5291 
5292 /* When register 'regno' is used to read the stack (either directly or through
5293  * a helper function) make sure that it's within stack boundary and, depending
5294  * on the access type, that all elements of the stack are initialized.
5295  *
5296  * 'off' includes 'regno->off', but not its dynamic part (if any).
5297  *
5298  * All registers that have been spilled on the stack in the slots within the
5299  * read offsets are marked as read.
5300  */
5301 static int check_stack_range_initialized(
5302 		struct bpf_verifier_env *env, int regno, int off,
5303 		int access_size, bool zero_size_allowed,
5304 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
5305 {
5306 	struct bpf_reg_state *reg = reg_state(env, regno);
5307 	struct bpf_func_state *state = func(env, reg);
5308 	int err, min_off, max_off, i, j, slot, spi;
5309 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5310 	enum bpf_access_type bounds_check_type;
5311 	/* Some accesses can write anything into the stack, others are
5312 	 * read-only.
5313 	 */
5314 	bool clobber = false;
5315 
5316 	if (access_size == 0 && !zero_size_allowed) {
5317 		verbose(env, "invalid zero-sized read\n");
5318 		return -EACCES;
5319 	}
5320 
5321 	if (type == ACCESS_HELPER) {
5322 		/* The bounds checks for writes are more permissive than for
5323 		 * reads. However, if raw_mode is not set, we'll do extra
5324 		 * checks below.
5325 		 */
5326 		bounds_check_type = BPF_WRITE;
5327 		clobber = true;
5328 	} else {
5329 		bounds_check_type = BPF_READ;
5330 	}
5331 	err = check_stack_access_within_bounds(env, regno, off, access_size,
5332 					       type, bounds_check_type);
5333 	if (err)
5334 		return err;
5335 
5336 
5337 	if (tnum_is_const(reg->var_off)) {
5338 		min_off = max_off = reg->var_off.value + off;
5339 	} else {
5340 		/* Variable offset is prohibited for unprivileged mode for
5341 		 * simplicity since it requires corresponding support in
5342 		 * Spectre masking for stack ALU.
5343 		 * See also retrieve_ptr_limit().
5344 		 */
5345 		if (!env->bypass_spec_v1) {
5346 			char tn_buf[48];
5347 
5348 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5349 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5350 				regno, err_extra, tn_buf);
5351 			return -EACCES;
5352 		}
5353 		/* Only initialized buffer on stack is allowed to be accessed
5354 		 * with variable offset. With uninitialized buffer it's hard to
5355 		 * guarantee that whole memory is marked as initialized on
5356 		 * helper return since specific bounds are unknown what may
5357 		 * cause uninitialized stack leaking.
5358 		 */
5359 		if (meta && meta->raw_mode)
5360 			meta = NULL;
5361 
5362 		min_off = reg->smin_value + off;
5363 		max_off = reg->smax_value + off;
5364 	}
5365 
5366 	if (meta && meta->raw_mode) {
5367 		meta->access_size = access_size;
5368 		meta->regno = regno;
5369 		return 0;
5370 	}
5371 
5372 	for (i = min_off; i < max_off + access_size; i++) {
5373 		u8 *stype;
5374 
5375 		slot = -i - 1;
5376 		spi = slot / BPF_REG_SIZE;
5377 		if (state->allocated_stack <= slot)
5378 			goto err;
5379 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5380 		if (*stype == STACK_MISC)
5381 			goto mark;
5382 		if (*stype == STACK_ZERO) {
5383 			if (clobber) {
5384 				/* helper can write anything into the stack */
5385 				*stype = STACK_MISC;
5386 			}
5387 			goto mark;
5388 		}
5389 
5390 		if (is_spilled_reg(&state->stack[spi]) &&
5391 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5392 		     env->allow_ptr_leaks)) {
5393 			if (clobber) {
5394 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5395 				for (j = 0; j < BPF_REG_SIZE; j++)
5396 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
5397 			}
5398 			goto mark;
5399 		}
5400 
5401 err:
5402 		if (tnum_is_const(reg->var_off)) {
5403 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5404 				err_extra, regno, min_off, i - min_off, access_size);
5405 		} else {
5406 			char tn_buf[48];
5407 
5408 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5409 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5410 				err_extra, regno, tn_buf, i - min_off, access_size);
5411 		}
5412 		return -EACCES;
5413 mark:
5414 		/* reading any byte out of 8-byte 'spill_slot' will cause
5415 		 * the whole slot to be marked as 'read'
5416 		 */
5417 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
5418 			      state->stack[spi].spilled_ptr.parent,
5419 			      REG_LIVE_READ64);
5420 		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
5421 		 * be sure that whether stack slot is written to or not. Hence,
5422 		 * we must still conservatively propagate reads upwards even if
5423 		 * helper may write to the entire memory range.
5424 		 */
5425 	}
5426 	return update_stack_depth(env, state, min_off);
5427 }
5428 
5429 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5430 				   int access_size, bool zero_size_allowed,
5431 				   struct bpf_call_arg_meta *meta)
5432 {
5433 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5434 	u32 *max_access;
5435 
5436 	switch (base_type(reg->type)) {
5437 	case PTR_TO_PACKET:
5438 	case PTR_TO_PACKET_META:
5439 		return check_packet_access(env, regno, reg->off, access_size,
5440 					   zero_size_allowed);
5441 	case PTR_TO_MAP_KEY:
5442 		if (meta && meta->raw_mode) {
5443 			verbose(env, "R%d cannot write into %s\n", regno,
5444 				reg_type_str(env, reg->type));
5445 			return -EACCES;
5446 		}
5447 		return check_mem_region_access(env, regno, reg->off, access_size,
5448 					       reg->map_ptr->key_size, false);
5449 	case PTR_TO_MAP_VALUE:
5450 		if (check_map_access_type(env, regno, reg->off, access_size,
5451 					  meta && meta->raw_mode ? BPF_WRITE :
5452 					  BPF_READ))
5453 			return -EACCES;
5454 		return check_map_access(env, regno, reg->off, access_size,
5455 					zero_size_allowed, ACCESS_HELPER);
5456 	case PTR_TO_MEM:
5457 		if (type_is_rdonly_mem(reg->type)) {
5458 			if (meta && meta->raw_mode) {
5459 				verbose(env, "R%d cannot write into %s\n", regno,
5460 					reg_type_str(env, reg->type));
5461 				return -EACCES;
5462 			}
5463 		}
5464 		return check_mem_region_access(env, regno, reg->off,
5465 					       access_size, reg->mem_size,
5466 					       zero_size_allowed);
5467 	case PTR_TO_BUF:
5468 		if (type_is_rdonly_mem(reg->type)) {
5469 			if (meta && meta->raw_mode) {
5470 				verbose(env, "R%d cannot write into %s\n", regno,
5471 					reg_type_str(env, reg->type));
5472 				return -EACCES;
5473 			}
5474 
5475 			max_access = &env->prog->aux->max_rdonly_access;
5476 		} else {
5477 			max_access = &env->prog->aux->max_rdwr_access;
5478 		}
5479 		return check_buffer_access(env, reg, regno, reg->off,
5480 					   access_size, zero_size_allowed,
5481 					   max_access);
5482 	case PTR_TO_STACK:
5483 		return check_stack_range_initialized(
5484 				env,
5485 				regno, reg->off, access_size,
5486 				zero_size_allowed, ACCESS_HELPER, meta);
5487 	case PTR_TO_CTX:
5488 		/* in case the function doesn't know how to access the context,
5489 		 * (because we are in a program of type SYSCALL for example), we
5490 		 * can not statically check its size.
5491 		 * Dynamically check it now.
5492 		 */
5493 		if (!env->ops->convert_ctx_access) {
5494 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
5495 			int offset = access_size - 1;
5496 
5497 			/* Allow zero-byte read from PTR_TO_CTX */
5498 			if (access_size == 0)
5499 				return zero_size_allowed ? 0 : -EACCES;
5500 
5501 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
5502 						atype, -1, false);
5503 		}
5504 
5505 		fallthrough;
5506 	default: /* scalar_value or invalid ptr */
5507 		/* Allow zero-byte read from NULL, regardless of pointer type */
5508 		if (zero_size_allowed && access_size == 0 &&
5509 		    register_is_null(reg))
5510 			return 0;
5511 
5512 		verbose(env, "R%d type=%s ", regno,
5513 			reg_type_str(env, reg->type));
5514 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
5515 		return -EACCES;
5516 	}
5517 }
5518 
5519 static int check_mem_size_reg(struct bpf_verifier_env *env,
5520 			      struct bpf_reg_state *reg, u32 regno,
5521 			      bool zero_size_allowed,
5522 			      struct bpf_call_arg_meta *meta)
5523 {
5524 	int err;
5525 
5526 	/* This is used to refine r0 return value bounds for helpers
5527 	 * that enforce this value as an upper bound on return values.
5528 	 * See do_refine_retval_range() for helpers that can refine
5529 	 * the return value. C type of helper is u32 so we pull register
5530 	 * bound from umax_value however, if negative verifier errors
5531 	 * out. Only upper bounds can be learned because retval is an
5532 	 * int type and negative retvals are allowed.
5533 	 */
5534 	meta->msize_max_value = reg->umax_value;
5535 
5536 	/* The register is SCALAR_VALUE; the access check
5537 	 * happens using its boundaries.
5538 	 */
5539 	if (!tnum_is_const(reg->var_off))
5540 		/* For unprivileged variable accesses, disable raw
5541 		 * mode so that the program is required to
5542 		 * initialize all the memory that the helper could
5543 		 * just partially fill up.
5544 		 */
5545 		meta = NULL;
5546 
5547 	if (reg->smin_value < 0) {
5548 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5549 			regno);
5550 		return -EACCES;
5551 	}
5552 
5553 	if (reg->umin_value == 0) {
5554 		err = check_helper_mem_access(env, regno - 1, 0,
5555 					      zero_size_allowed,
5556 					      meta);
5557 		if (err)
5558 			return err;
5559 	}
5560 
5561 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5562 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5563 			regno);
5564 		return -EACCES;
5565 	}
5566 	err = check_helper_mem_access(env, regno - 1,
5567 				      reg->umax_value,
5568 				      zero_size_allowed, meta);
5569 	if (!err)
5570 		err = mark_chain_precision(env, regno);
5571 	return err;
5572 }
5573 
5574 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5575 		   u32 regno, u32 mem_size)
5576 {
5577 	bool may_be_null = type_may_be_null(reg->type);
5578 	struct bpf_reg_state saved_reg;
5579 	struct bpf_call_arg_meta meta;
5580 	int err;
5581 
5582 	if (register_is_null(reg))
5583 		return 0;
5584 
5585 	memset(&meta, 0, sizeof(meta));
5586 	/* Assuming that the register contains a value check if the memory
5587 	 * access is safe. Temporarily save and restore the register's state as
5588 	 * the conversion shouldn't be visible to a caller.
5589 	 */
5590 	if (may_be_null) {
5591 		saved_reg = *reg;
5592 		mark_ptr_not_null_reg(reg);
5593 	}
5594 
5595 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5596 	/* Check access for BPF_WRITE */
5597 	meta.raw_mode = true;
5598 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5599 
5600 	if (may_be_null)
5601 		*reg = saved_reg;
5602 
5603 	return err;
5604 }
5605 
5606 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5607 				    u32 regno)
5608 {
5609 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5610 	bool may_be_null = type_may_be_null(mem_reg->type);
5611 	struct bpf_reg_state saved_reg;
5612 	struct bpf_call_arg_meta meta;
5613 	int err;
5614 
5615 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5616 
5617 	memset(&meta, 0, sizeof(meta));
5618 
5619 	if (may_be_null) {
5620 		saved_reg = *mem_reg;
5621 		mark_ptr_not_null_reg(mem_reg);
5622 	}
5623 
5624 	err = check_mem_size_reg(env, reg, regno, true, &meta);
5625 	/* Check access for BPF_WRITE */
5626 	meta.raw_mode = true;
5627 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
5628 
5629 	if (may_be_null)
5630 		*mem_reg = saved_reg;
5631 	return err;
5632 }
5633 
5634 /* Implementation details:
5635  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
5636  * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
5637  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5638  * Two separate bpf_obj_new will also have different reg->id.
5639  * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
5640  * clears reg->id after value_or_null->value transition, since the verifier only
5641  * cares about the range of access to valid map value pointer and doesn't care
5642  * about actual address of the map element.
5643  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5644  * reg->id > 0 after value_or_null->value transition. By doing so
5645  * two bpf_map_lookups will be considered two different pointers that
5646  * point to different bpf_spin_locks. Likewise for pointers to allocated objects
5647  * returned from bpf_obj_new.
5648  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5649  * dead-locks.
5650  * Since only one bpf_spin_lock is allowed the checks are simpler than
5651  * reg_is_refcounted() logic. The verifier needs to remember only
5652  * one spin_lock instead of array of acquired_refs.
5653  * cur_state->active_lock remembers which map value element or allocated
5654  * object got locked and clears it after bpf_spin_unlock.
5655  */
5656 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5657 			     bool is_lock)
5658 {
5659 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5660 	struct bpf_verifier_state *cur = env->cur_state;
5661 	bool is_const = tnum_is_const(reg->var_off);
5662 	u64 val = reg->var_off.value;
5663 	struct bpf_map *map = NULL;
5664 	struct btf *btf = NULL;
5665 	struct btf_record *rec;
5666 
5667 	if (!is_const) {
5668 		verbose(env,
5669 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5670 			regno);
5671 		return -EINVAL;
5672 	}
5673 	if (reg->type == PTR_TO_MAP_VALUE) {
5674 		map = reg->map_ptr;
5675 		if (!map->btf) {
5676 			verbose(env,
5677 				"map '%s' has to have BTF in order to use bpf_spin_lock\n",
5678 				map->name);
5679 			return -EINVAL;
5680 		}
5681 	} else {
5682 		btf = reg->btf;
5683 	}
5684 
5685 	rec = reg_btf_record(reg);
5686 	if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
5687 		verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
5688 			map ? map->name : "kptr");
5689 		return -EINVAL;
5690 	}
5691 	if (rec->spin_lock_off != val + reg->off) {
5692 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
5693 			val + reg->off, rec->spin_lock_off);
5694 		return -EINVAL;
5695 	}
5696 	if (is_lock) {
5697 		if (cur->active_lock.ptr) {
5698 			verbose(env,
5699 				"Locking two bpf_spin_locks are not allowed\n");
5700 			return -EINVAL;
5701 		}
5702 		if (map)
5703 			cur->active_lock.ptr = map;
5704 		else
5705 			cur->active_lock.ptr = btf;
5706 		cur->active_lock.id = reg->id;
5707 	} else {
5708 		struct bpf_func_state *fstate = cur_func(env);
5709 		void *ptr;
5710 		int i;
5711 
5712 		if (map)
5713 			ptr = map;
5714 		else
5715 			ptr = btf;
5716 
5717 		if (!cur->active_lock.ptr) {
5718 			verbose(env, "bpf_spin_unlock without taking a lock\n");
5719 			return -EINVAL;
5720 		}
5721 		if (cur->active_lock.ptr != ptr ||
5722 		    cur->active_lock.id != reg->id) {
5723 			verbose(env, "bpf_spin_unlock of different lock\n");
5724 			return -EINVAL;
5725 		}
5726 		cur->active_lock.ptr = NULL;
5727 		cur->active_lock.id = 0;
5728 
5729 		for (i = 0; i < fstate->acquired_refs; i++) {
5730 			int err;
5731 
5732 			/* Complain on error because this reference state cannot
5733 			 * be freed before this point, as bpf_spin_lock critical
5734 			 * section does not allow functions that release the
5735 			 * allocated object immediately.
5736 			 */
5737 			if (!fstate->refs[i].release_on_unlock)
5738 				continue;
5739 			err = release_reference(env, fstate->refs[i].id);
5740 			if (err) {
5741 				verbose(env, "failed to release release_on_unlock reference");
5742 				return err;
5743 			}
5744 		}
5745 	}
5746 	return 0;
5747 }
5748 
5749 static int process_timer_func(struct bpf_verifier_env *env, int regno,
5750 			      struct bpf_call_arg_meta *meta)
5751 {
5752 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5753 	bool is_const = tnum_is_const(reg->var_off);
5754 	struct bpf_map *map = reg->map_ptr;
5755 	u64 val = reg->var_off.value;
5756 
5757 	if (!is_const) {
5758 		verbose(env,
5759 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5760 			regno);
5761 		return -EINVAL;
5762 	}
5763 	if (!map->btf) {
5764 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5765 			map->name);
5766 		return -EINVAL;
5767 	}
5768 	if (!btf_record_has_field(map->record, BPF_TIMER)) {
5769 		verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
5770 		return -EINVAL;
5771 	}
5772 	if (map->record->timer_off != val + reg->off) {
5773 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5774 			val + reg->off, map->record->timer_off);
5775 		return -EINVAL;
5776 	}
5777 	if (meta->map_ptr) {
5778 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5779 		return -EFAULT;
5780 	}
5781 	meta->map_uid = reg->map_uid;
5782 	meta->map_ptr = map;
5783 	return 0;
5784 }
5785 
5786 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
5787 			     struct bpf_call_arg_meta *meta)
5788 {
5789 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5790 	struct bpf_map *map_ptr = reg->map_ptr;
5791 	struct btf_field *kptr_field;
5792 	u32 kptr_off;
5793 
5794 	if (!tnum_is_const(reg->var_off)) {
5795 		verbose(env,
5796 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
5797 			regno);
5798 		return -EINVAL;
5799 	}
5800 	if (!map_ptr->btf) {
5801 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
5802 			map_ptr->name);
5803 		return -EINVAL;
5804 	}
5805 	if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
5806 		verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
5807 		return -EINVAL;
5808 	}
5809 
5810 	meta->map_ptr = map_ptr;
5811 	kptr_off = reg->off + reg->var_off.value;
5812 	kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
5813 	if (!kptr_field) {
5814 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
5815 		return -EACCES;
5816 	}
5817 	if (kptr_field->type != BPF_KPTR_REF) {
5818 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
5819 		return -EACCES;
5820 	}
5821 	meta->kptr_field = kptr_field;
5822 	return 0;
5823 }
5824 
5825 static bool arg_type_is_mem_size(enum bpf_arg_type type)
5826 {
5827 	return type == ARG_CONST_SIZE ||
5828 	       type == ARG_CONST_SIZE_OR_ZERO;
5829 }
5830 
5831 static bool arg_type_is_release(enum bpf_arg_type type)
5832 {
5833 	return type & OBJ_RELEASE;
5834 }
5835 
5836 static bool arg_type_is_dynptr(enum bpf_arg_type type)
5837 {
5838 	return base_type(type) == ARG_PTR_TO_DYNPTR;
5839 }
5840 
5841 static int int_ptr_type_to_size(enum bpf_arg_type type)
5842 {
5843 	if (type == ARG_PTR_TO_INT)
5844 		return sizeof(u32);
5845 	else if (type == ARG_PTR_TO_LONG)
5846 		return sizeof(u64);
5847 
5848 	return -EINVAL;
5849 }
5850 
5851 static int resolve_map_arg_type(struct bpf_verifier_env *env,
5852 				 const struct bpf_call_arg_meta *meta,
5853 				 enum bpf_arg_type *arg_type)
5854 {
5855 	if (!meta->map_ptr) {
5856 		/* kernel subsystem misconfigured verifier */
5857 		verbose(env, "invalid map_ptr to access map->type\n");
5858 		return -EACCES;
5859 	}
5860 
5861 	switch (meta->map_ptr->map_type) {
5862 	case BPF_MAP_TYPE_SOCKMAP:
5863 	case BPF_MAP_TYPE_SOCKHASH:
5864 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
5865 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
5866 		} else {
5867 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
5868 			return -EINVAL;
5869 		}
5870 		break;
5871 	case BPF_MAP_TYPE_BLOOM_FILTER:
5872 		if (meta->func_id == BPF_FUNC_map_peek_elem)
5873 			*arg_type = ARG_PTR_TO_MAP_VALUE;
5874 		break;
5875 	default:
5876 		break;
5877 	}
5878 	return 0;
5879 }
5880 
5881 struct bpf_reg_types {
5882 	const enum bpf_reg_type types[10];
5883 	u32 *btf_id;
5884 };
5885 
5886 static const struct bpf_reg_types sock_types = {
5887 	.types = {
5888 		PTR_TO_SOCK_COMMON,
5889 		PTR_TO_SOCKET,
5890 		PTR_TO_TCP_SOCK,
5891 		PTR_TO_XDP_SOCK,
5892 	},
5893 };
5894 
5895 #ifdef CONFIG_NET
5896 static const struct bpf_reg_types btf_id_sock_common_types = {
5897 	.types = {
5898 		PTR_TO_SOCK_COMMON,
5899 		PTR_TO_SOCKET,
5900 		PTR_TO_TCP_SOCK,
5901 		PTR_TO_XDP_SOCK,
5902 		PTR_TO_BTF_ID,
5903 		PTR_TO_BTF_ID | PTR_TRUSTED,
5904 	},
5905 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5906 };
5907 #endif
5908 
5909 static const struct bpf_reg_types mem_types = {
5910 	.types = {
5911 		PTR_TO_STACK,
5912 		PTR_TO_PACKET,
5913 		PTR_TO_PACKET_META,
5914 		PTR_TO_MAP_KEY,
5915 		PTR_TO_MAP_VALUE,
5916 		PTR_TO_MEM,
5917 		PTR_TO_MEM | MEM_RINGBUF,
5918 		PTR_TO_BUF,
5919 	},
5920 };
5921 
5922 static const struct bpf_reg_types int_ptr_types = {
5923 	.types = {
5924 		PTR_TO_STACK,
5925 		PTR_TO_PACKET,
5926 		PTR_TO_PACKET_META,
5927 		PTR_TO_MAP_KEY,
5928 		PTR_TO_MAP_VALUE,
5929 	},
5930 };
5931 
5932 static const struct bpf_reg_types spin_lock_types = {
5933 	.types = {
5934 		PTR_TO_MAP_VALUE,
5935 		PTR_TO_BTF_ID | MEM_ALLOC,
5936 	}
5937 };
5938 
5939 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5940 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5941 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5942 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
5943 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5944 static const struct bpf_reg_types btf_ptr_types = {
5945 	.types = {
5946 		PTR_TO_BTF_ID,
5947 		PTR_TO_BTF_ID | PTR_TRUSTED,
5948 		PTR_TO_BTF_ID | MEM_RCU | PTR_TRUSTED,
5949 	},
5950 };
5951 static const struct bpf_reg_types percpu_btf_ptr_types = {
5952 	.types = {
5953 		PTR_TO_BTF_ID | MEM_PERCPU,
5954 		PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
5955 	}
5956 };
5957 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5958 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
5959 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
5960 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
5961 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
5962 static const struct bpf_reg_types dynptr_types = {
5963 	.types = {
5964 		PTR_TO_STACK,
5965 		PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL,
5966 	}
5967 };
5968 
5969 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5970 	[ARG_PTR_TO_MAP_KEY]		= &mem_types,
5971 	[ARG_PTR_TO_MAP_VALUE]		= &mem_types,
5972 	[ARG_CONST_SIZE]		= &scalar_types,
5973 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
5974 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
5975 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
5976 	[ARG_PTR_TO_CTX]		= &context_types,
5977 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
5978 #ifdef CONFIG_NET
5979 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
5980 #endif
5981 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
5982 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
5983 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
5984 	[ARG_PTR_TO_MEM]		= &mem_types,
5985 	[ARG_PTR_TO_RINGBUF_MEM]	= &ringbuf_mem_types,
5986 	[ARG_PTR_TO_INT]		= &int_ptr_types,
5987 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
5988 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
5989 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
5990 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
5991 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
5992 	[ARG_PTR_TO_TIMER]		= &timer_types,
5993 	[ARG_PTR_TO_KPTR]		= &kptr_types,
5994 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
5995 };
5996 
5997 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
5998 			  enum bpf_arg_type arg_type,
5999 			  const u32 *arg_btf_id,
6000 			  struct bpf_call_arg_meta *meta)
6001 {
6002 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6003 	enum bpf_reg_type expected, type = reg->type;
6004 	const struct bpf_reg_types *compatible;
6005 	int i, j;
6006 
6007 	compatible = compatible_reg_types[base_type(arg_type)];
6008 	if (!compatible) {
6009 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
6010 		return -EFAULT;
6011 	}
6012 
6013 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
6014 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
6015 	 *
6016 	 * Same for MAYBE_NULL:
6017 	 *
6018 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
6019 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
6020 	 *
6021 	 * Therefore we fold these flags depending on the arg_type before comparison.
6022 	 */
6023 	if (arg_type & MEM_RDONLY)
6024 		type &= ~MEM_RDONLY;
6025 	if (arg_type & PTR_MAYBE_NULL)
6026 		type &= ~PTR_MAYBE_NULL;
6027 
6028 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
6029 		expected = compatible->types[i];
6030 		if (expected == NOT_INIT)
6031 			break;
6032 
6033 		if (type == expected)
6034 			goto found;
6035 	}
6036 
6037 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
6038 	for (j = 0; j + 1 < i; j++)
6039 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
6040 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
6041 	return -EACCES;
6042 
6043 found:
6044 	if (reg->type == PTR_TO_BTF_ID || reg->type & PTR_TRUSTED) {
6045 		/* For bpf_sk_release, it needs to match against first member
6046 		 * 'struct sock_common', hence make an exception for it. This
6047 		 * allows bpf_sk_release to work for multiple socket types.
6048 		 */
6049 		bool strict_type_match = arg_type_is_release(arg_type) &&
6050 					 meta->func_id != BPF_FUNC_sk_release;
6051 
6052 		if (!arg_btf_id) {
6053 			if (!compatible->btf_id) {
6054 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
6055 				return -EFAULT;
6056 			}
6057 			arg_btf_id = compatible->btf_id;
6058 		}
6059 
6060 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
6061 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
6062 				return -EACCES;
6063 		} else {
6064 			if (arg_btf_id == BPF_PTR_POISON) {
6065 				verbose(env, "verifier internal error:");
6066 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
6067 					regno);
6068 				return -EACCES;
6069 			}
6070 
6071 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
6072 						  btf_vmlinux, *arg_btf_id,
6073 						  strict_type_match)) {
6074 				verbose(env, "R%d is of type %s but %s is expected\n",
6075 					regno, kernel_type_name(reg->btf, reg->btf_id),
6076 					kernel_type_name(btf_vmlinux, *arg_btf_id));
6077 				return -EACCES;
6078 			}
6079 		}
6080 	} else if (type_is_alloc(reg->type)) {
6081 		if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock) {
6082 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
6083 			return -EFAULT;
6084 		}
6085 	}
6086 
6087 	return 0;
6088 }
6089 
6090 int check_func_arg_reg_off(struct bpf_verifier_env *env,
6091 			   const struct bpf_reg_state *reg, int regno,
6092 			   enum bpf_arg_type arg_type)
6093 {
6094 	enum bpf_reg_type type = reg->type;
6095 	bool fixed_off_ok = false;
6096 
6097 	switch ((u32)type) {
6098 	/* Pointer types where reg offset is explicitly allowed: */
6099 	case PTR_TO_STACK:
6100 		if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) {
6101 			verbose(env, "cannot pass in dynptr at an offset\n");
6102 			return -EINVAL;
6103 		}
6104 		fallthrough;
6105 	case PTR_TO_PACKET:
6106 	case PTR_TO_PACKET_META:
6107 	case PTR_TO_MAP_KEY:
6108 	case PTR_TO_MAP_VALUE:
6109 	case PTR_TO_MEM:
6110 	case PTR_TO_MEM | MEM_RDONLY:
6111 	case PTR_TO_MEM | MEM_RINGBUF:
6112 	case PTR_TO_BUF:
6113 	case PTR_TO_BUF | MEM_RDONLY:
6114 	case SCALAR_VALUE:
6115 		/* Some of the argument types nevertheless require a
6116 		 * zero register offset.
6117 		 */
6118 		if (base_type(arg_type) != ARG_PTR_TO_RINGBUF_MEM)
6119 			return 0;
6120 		break;
6121 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
6122 	 * fixed offset.
6123 	 */
6124 	case PTR_TO_BTF_ID:
6125 	case PTR_TO_BTF_ID | MEM_ALLOC:
6126 	case PTR_TO_BTF_ID | PTR_TRUSTED:
6127 	case PTR_TO_BTF_ID | MEM_RCU | PTR_TRUSTED:
6128 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
6129 		/* When referenced PTR_TO_BTF_ID is passed to release function,
6130 		 * it's fixed offset must be 0.	In the other cases, fixed offset
6131 		 * can be non-zero.
6132 		 */
6133 		if (arg_type_is_release(arg_type) && reg->off) {
6134 			verbose(env, "R%d must have zero offset when passed to release func\n",
6135 				regno);
6136 			return -EINVAL;
6137 		}
6138 		/* For arg is release pointer, fixed_off_ok must be false, but
6139 		 * we already checked and rejected reg->off != 0 above, so set
6140 		 * to true to allow fixed offset for all other cases.
6141 		 */
6142 		fixed_off_ok = true;
6143 		break;
6144 	default:
6145 		break;
6146 	}
6147 	return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
6148 }
6149 
6150 static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
6151 {
6152 	struct bpf_func_state *state = func(env, reg);
6153 	int spi = get_spi(reg->off);
6154 
6155 	return state->stack[spi].spilled_ptr.id;
6156 }
6157 
6158 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
6159 			  struct bpf_call_arg_meta *meta,
6160 			  const struct bpf_func_proto *fn)
6161 {
6162 	u32 regno = BPF_REG_1 + arg;
6163 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6164 	enum bpf_arg_type arg_type = fn->arg_type[arg];
6165 	enum bpf_reg_type type = reg->type;
6166 	u32 *arg_btf_id = NULL;
6167 	int err = 0;
6168 
6169 	if (arg_type == ARG_DONTCARE)
6170 		return 0;
6171 
6172 	err = check_reg_arg(env, regno, SRC_OP);
6173 	if (err)
6174 		return err;
6175 
6176 	if (arg_type == ARG_ANYTHING) {
6177 		if (is_pointer_value(env, regno)) {
6178 			verbose(env, "R%d leaks addr into helper function\n",
6179 				regno);
6180 			return -EACCES;
6181 		}
6182 		return 0;
6183 	}
6184 
6185 	if (type_is_pkt_pointer(type) &&
6186 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
6187 		verbose(env, "helper access to the packet is not allowed\n");
6188 		return -EACCES;
6189 	}
6190 
6191 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
6192 		err = resolve_map_arg_type(env, meta, &arg_type);
6193 		if (err)
6194 			return err;
6195 	}
6196 
6197 	if (register_is_null(reg) && type_may_be_null(arg_type))
6198 		/* A NULL register has a SCALAR_VALUE type, so skip
6199 		 * type checking.
6200 		 */
6201 		goto skip_type_check;
6202 
6203 	/* arg_btf_id and arg_size are in a union. */
6204 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
6205 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
6206 		arg_btf_id = fn->arg_btf_id[arg];
6207 
6208 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
6209 	if (err)
6210 		return err;
6211 
6212 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
6213 	if (err)
6214 		return err;
6215 
6216 skip_type_check:
6217 	if (arg_type_is_release(arg_type)) {
6218 		if (arg_type_is_dynptr(arg_type)) {
6219 			struct bpf_func_state *state = func(env, reg);
6220 			int spi = get_spi(reg->off);
6221 
6222 			if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
6223 			    !state->stack[spi].spilled_ptr.id) {
6224 				verbose(env, "arg %d is an unacquired reference\n", regno);
6225 				return -EINVAL;
6226 			}
6227 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
6228 			verbose(env, "R%d must be referenced when passed to release function\n",
6229 				regno);
6230 			return -EINVAL;
6231 		}
6232 		if (meta->release_regno) {
6233 			verbose(env, "verifier internal error: more than one release argument\n");
6234 			return -EFAULT;
6235 		}
6236 		meta->release_regno = regno;
6237 	}
6238 
6239 	if (reg->ref_obj_id) {
6240 		if (meta->ref_obj_id) {
6241 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
6242 				regno, reg->ref_obj_id,
6243 				meta->ref_obj_id);
6244 			return -EFAULT;
6245 		}
6246 		meta->ref_obj_id = reg->ref_obj_id;
6247 	}
6248 
6249 	switch (base_type(arg_type)) {
6250 	case ARG_CONST_MAP_PTR:
6251 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
6252 		if (meta->map_ptr) {
6253 			/* Use map_uid (which is unique id of inner map) to reject:
6254 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
6255 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
6256 			 * if (inner_map1 && inner_map2) {
6257 			 *     timer = bpf_map_lookup_elem(inner_map1);
6258 			 *     if (timer)
6259 			 *         // mismatch would have been allowed
6260 			 *         bpf_timer_init(timer, inner_map2);
6261 			 * }
6262 			 *
6263 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
6264 			 */
6265 			if (meta->map_ptr != reg->map_ptr ||
6266 			    meta->map_uid != reg->map_uid) {
6267 				verbose(env,
6268 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
6269 					meta->map_uid, reg->map_uid);
6270 				return -EINVAL;
6271 			}
6272 		}
6273 		meta->map_ptr = reg->map_ptr;
6274 		meta->map_uid = reg->map_uid;
6275 		break;
6276 	case ARG_PTR_TO_MAP_KEY:
6277 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
6278 		 * check that [key, key + map->key_size) are within
6279 		 * stack limits and initialized
6280 		 */
6281 		if (!meta->map_ptr) {
6282 			/* in function declaration map_ptr must come before
6283 			 * map_key, so that it's verified and known before
6284 			 * we have to check map_key here. Otherwise it means
6285 			 * that kernel subsystem misconfigured verifier
6286 			 */
6287 			verbose(env, "invalid map_ptr to access map->key\n");
6288 			return -EACCES;
6289 		}
6290 		err = check_helper_mem_access(env, regno,
6291 					      meta->map_ptr->key_size, false,
6292 					      NULL);
6293 		break;
6294 	case ARG_PTR_TO_MAP_VALUE:
6295 		if (type_may_be_null(arg_type) && register_is_null(reg))
6296 			return 0;
6297 
6298 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
6299 		 * check [value, value + map->value_size) validity
6300 		 */
6301 		if (!meta->map_ptr) {
6302 			/* kernel subsystem misconfigured verifier */
6303 			verbose(env, "invalid map_ptr to access map->value\n");
6304 			return -EACCES;
6305 		}
6306 		meta->raw_mode = arg_type & MEM_UNINIT;
6307 		err = check_helper_mem_access(env, regno,
6308 					      meta->map_ptr->value_size, false,
6309 					      meta);
6310 		break;
6311 	case ARG_PTR_TO_PERCPU_BTF_ID:
6312 		if (!reg->btf_id) {
6313 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
6314 			return -EACCES;
6315 		}
6316 		meta->ret_btf = reg->btf;
6317 		meta->ret_btf_id = reg->btf_id;
6318 		break;
6319 	case ARG_PTR_TO_SPIN_LOCK:
6320 		if (meta->func_id == BPF_FUNC_spin_lock) {
6321 			if (process_spin_lock(env, regno, true))
6322 				return -EACCES;
6323 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
6324 			if (process_spin_lock(env, regno, false))
6325 				return -EACCES;
6326 		} else {
6327 			verbose(env, "verifier internal error\n");
6328 			return -EFAULT;
6329 		}
6330 		break;
6331 	case ARG_PTR_TO_TIMER:
6332 		if (process_timer_func(env, regno, meta))
6333 			return -EACCES;
6334 		break;
6335 	case ARG_PTR_TO_FUNC:
6336 		meta->subprogno = reg->subprogno;
6337 		break;
6338 	case ARG_PTR_TO_MEM:
6339 		/* The access to this pointer is only checked when we hit the
6340 		 * next is_mem_size argument below.
6341 		 */
6342 		meta->raw_mode = arg_type & MEM_UNINIT;
6343 		if (arg_type & MEM_FIXED_SIZE) {
6344 			err = check_helper_mem_access(env, regno,
6345 						      fn->arg_size[arg], false,
6346 						      meta);
6347 		}
6348 		break;
6349 	case ARG_CONST_SIZE:
6350 		err = check_mem_size_reg(env, reg, regno, false, meta);
6351 		break;
6352 	case ARG_CONST_SIZE_OR_ZERO:
6353 		err = check_mem_size_reg(env, reg, regno, true, meta);
6354 		break;
6355 	case ARG_PTR_TO_DYNPTR:
6356 		/* We only need to check for initialized / uninitialized helper
6357 		 * dynptr args if the dynptr is not PTR_TO_DYNPTR, as the
6358 		 * assumption is that if it is, that a helper function
6359 		 * initialized the dynptr on behalf of the BPF program.
6360 		 */
6361 		if (base_type(reg->type) == PTR_TO_DYNPTR)
6362 			break;
6363 		if (arg_type & MEM_UNINIT) {
6364 			if (!is_dynptr_reg_valid_uninit(env, reg)) {
6365 				verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6366 				return -EINVAL;
6367 			}
6368 
6369 			/* We only support one dynptr being uninitialized at the moment,
6370 			 * which is sufficient for the helper functions we have right now.
6371 			 */
6372 			if (meta->uninit_dynptr_regno) {
6373 				verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6374 				return -EFAULT;
6375 			}
6376 
6377 			meta->uninit_dynptr_regno = regno;
6378 		} else if (!is_dynptr_reg_valid_init(env, reg)) {
6379 			verbose(env,
6380 				"Expected an initialized dynptr as arg #%d\n",
6381 				arg + 1);
6382 			return -EINVAL;
6383 		} else if (!is_dynptr_type_expected(env, reg, arg_type)) {
6384 			const char *err_extra = "";
6385 
6386 			switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6387 			case DYNPTR_TYPE_LOCAL:
6388 				err_extra = "local";
6389 				break;
6390 			case DYNPTR_TYPE_RINGBUF:
6391 				err_extra = "ringbuf";
6392 				break;
6393 			default:
6394 				err_extra = "<unknown>";
6395 				break;
6396 			}
6397 			verbose(env,
6398 				"Expected a dynptr of type %s as arg #%d\n",
6399 				err_extra, arg + 1);
6400 			return -EINVAL;
6401 		}
6402 		break;
6403 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
6404 		if (!tnum_is_const(reg->var_off)) {
6405 			verbose(env, "R%d is not a known constant'\n",
6406 				regno);
6407 			return -EACCES;
6408 		}
6409 		meta->mem_size = reg->var_off.value;
6410 		err = mark_chain_precision(env, regno);
6411 		if (err)
6412 			return err;
6413 		break;
6414 	case ARG_PTR_TO_INT:
6415 	case ARG_PTR_TO_LONG:
6416 	{
6417 		int size = int_ptr_type_to_size(arg_type);
6418 
6419 		err = check_helper_mem_access(env, regno, size, false, meta);
6420 		if (err)
6421 			return err;
6422 		err = check_ptr_alignment(env, reg, 0, size, true);
6423 		break;
6424 	}
6425 	case ARG_PTR_TO_CONST_STR:
6426 	{
6427 		struct bpf_map *map = reg->map_ptr;
6428 		int map_off;
6429 		u64 map_addr;
6430 		char *str_ptr;
6431 
6432 		if (!bpf_map_is_rdonly(map)) {
6433 			verbose(env, "R%d does not point to a readonly map'\n", regno);
6434 			return -EACCES;
6435 		}
6436 
6437 		if (!tnum_is_const(reg->var_off)) {
6438 			verbose(env, "R%d is not a constant address'\n", regno);
6439 			return -EACCES;
6440 		}
6441 
6442 		if (!map->ops->map_direct_value_addr) {
6443 			verbose(env, "no direct value access support for this map type\n");
6444 			return -EACCES;
6445 		}
6446 
6447 		err = check_map_access(env, regno, reg->off,
6448 				       map->value_size - reg->off, false,
6449 				       ACCESS_HELPER);
6450 		if (err)
6451 			return err;
6452 
6453 		map_off = reg->off + reg->var_off.value;
6454 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6455 		if (err) {
6456 			verbose(env, "direct value access on string failed\n");
6457 			return err;
6458 		}
6459 
6460 		str_ptr = (char *)(long)(map_addr);
6461 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6462 			verbose(env, "string is not zero-terminated\n");
6463 			return -EINVAL;
6464 		}
6465 		break;
6466 	}
6467 	case ARG_PTR_TO_KPTR:
6468 		if (process_kptr_func(env, regno, meta))
6469 			return -EACCES;
6470 		break;
6471 	}
6472 
6473 	return err;
6474 }
6475 
6476 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
6477 {
6478 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
6479 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6480 
6481 	if (func_id != BPF_FUNC_map_update_elem)
6482 		return false;
6483 
6484 	/* It's not possible to get access to a locked struct sock in these
6485 	 * contexts, so updating is safe.
6486 	 */
6487 	switch (type) {
6488 	case BPF_PROG_TYPE_TRACING:
6489 		if (eatype == BPF_TRACE_ITER)
6490 			return true;
6491 		break;
6492 	case BPF_PROG_TYPE_SOCKET_FILTER:
6493 	case BPF_PROG_TYPE_SCHED_CLS:
6494 	case BPF_PROG_TYPE_SCHED_ACT:
6495 	case BPF_PROG_TYPE_XDP:
6496 	case BPF_PROG_TYPE_SK_REUSEPORT:
6497 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
6498 	case BPF_PROG_TYPE_SK_LOOKUP:
6499 		return true;
6500 	default:
6501 		break;
6502 	}
6503 
6504 	verbose(env, "cannot update sockmap in this context\n");
6505 	return false;
6506 }
6507 
6508 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
6509 {
6510 	return env->prog->jit_requested &&
6511 	       bpf_jit_supports_subprog_tailcalls();
6512 }
6513 
6514 static int check_map_func_compatibility(struct bpf_verifier_env *env,
6515 					struct bpf_map *map, int func_id)
6516 {
6517 	if (!map)
6518 		return 0;
6519 
6520 	/* We need a two way check, first is from map perspective ... */
6521 	switch (map->map_type) {
6522 	case BPF_MAP_TYPE_PROG_ARRAY:
6523 		if (func_id != BPF_FUNC_tail_call)
6524 			goto error;
6525 		break;
6526 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
6527 		if (func_id != BPF_FUNC_perf_event_read &&
6528 		    func_id != BPF_FUNC_perf_event_output &&
6529 		    func_id != BPF_FUNC_skb_output &&
6530 		    func_id != BPF_FUNC_perf_event_read_value &&
6531 		    func_id != BPF_FUNC_xdp_output)
6532 			goto error;
6533 		break;
6534 	case BPF_MAP_TYPE_RINGBUF:
6535 		if (func_id != BPF_FUNC_ringbuf_output &&
6536 		    func_id != BPF_FUNC_ringbuf_reserve &&
6537 		    func_id != BPF_FUNC_ringbuf_query &&
6538 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
6539 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
6540 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
6541 			goto error;
6542 		break;
6543 	case BPF_MAP_TYPE_USER_RINGBUF:
6544 		if (func_id != BPF_FUNC_user_ringbuf_drain)
6545 			goto error;
6546 		break;
6547 	case BPF_MAP_TYPE_STACK_TRACE:
6548 		if (func_id != BPF_FUNC_get_stackid)
6549 			goto error;
6550 		break;
6551 	case BPF_MAP_TYPE_CGROUP_ARRAY:
6552 		if (func_id != BPF_FUNC_skb_under_cgroup &&
6553 		    func_id != BPF_FUNC_current_task_under_cgroup)
6554 			goto error;
6555 		break;
6556 	case BPF_MAP_TYPE_CGROUP_STORAGE:
6557 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
6558 		if (func_id != BPF_FUNC_get_local_storage)
6559 			goto error;
6560 		break;
6561 	case BPF_MAP_TYPE_DEVMAP:
6562 	case BPF_MAP_TYPE_DEVMAP_HASH:
6563 		if (func_id != BPF_FUNC_redirect_map &&
6564 		    func_id != BPF_FUNC_map_lookup_elem)
6565 			goto error;
6566 		break;
6567 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
6568 	 * appear.
6569 	 */
6570 	case BPF_MAP_TYPE_CPUMAP:
6571 		if (func_id != BPF_FUNC_redirect_map)
6572 			goto error;
6573 		break;
6574 	case BPF_MAP_TYPE_XSKMAP:
6575 		if (func_id != BPF_FUNC_redirect_map &&
6576 		    func_id != BPF_FUNC_map_lookup_elem)
6577 			goto error;
6578 		break;
6579 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
6580 	case BPF_MAP_TYPE_HASH_OF_MAPS:
6581 		if (func_id != BPF_FUNC_map_lookup_elem)
6582 			goto error;
6583 		break;
6584 	case BPF_MAP_TYPE_SOCKMAP:
6585 		if (func_id != BPF_FUNC_sk_redirect_map &&
6586 		    func_id != BPF_FUNC_sock_map_update &&
6587 		    func_id != BPF_FUNC_map_delete_elem &&
6588 		    func_id != BPF_FUNC_msg_redirect_map &&
6589 		    func_id != BPF_FUNC_sk_select_reuseport &&
6590 		    func_id != BPF_FUNC_map_lookup_elem &&
6591 		    !may_update_sockmap(env, func_id))
6592 			goto error;
6593 		break;
6594 	case BPF_MAP_TYPE_SOCKHASH:
6595 		if (func_id != BPF_FUNC_sk_redirect_hash &&
6596 		    func_id != BPF_FUNC_sock_hash_update &&
6597 		    func_id != BPF_FUNC_map_delete_elem &&
6598 		    func_id != BPF_FUNC_msg_redirect_hash &&
6599 		    func_id != BPF_FUNC_sk_select_reuseport &&
6600 		    func_id != BPF_FUNC_map_lookup_elem &&
6601 		    !may_update_sockmap(env, func_id))
6602 			goto error;
6603 		break;
6604 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
6605 		if (func_id != BPF_FUNC_sk_select_reuseport)
6606 			goto error;
6607 		break;
6608 	case BPF_MAP_TYPE_QUEUE:
6609 	case BPF_MAP_TYPE_STACK:
6610 		if (func_id != BPF_FUNC_map_peek_elem &&
6611 		    func_id != BPF_FUNC_map_pop_elem &&
6612 		    func_id != BPF_FUNC_map_push_elem)
6613 			goto error;
6614 		break;
6615 	case BPF_MAP_TYPE_SK_STORAGE:
6616 		if (func_id != BPF_FUNC_sk_storage_get &&
6617 		    func_id != BPF_FUNC_sk_storage_delete)
6618 			goto error;
6619 		break;
6620 	case BPF_MAP_TYPE_INODE_STORAGE:
6621 		if (func_id != BPF_FUNC_inode_storage_get &&
6622 		    func_id != BPF_FUNC_inode_storage_delete)
6623 			goto error;
6624 		break;
6625 	case BPF_MAP_TYPE_TASK_STORAGE:
6626 		if (func_id != BPF_FUNC_task_storage_get &&
6627 		    func_id != BPF_FUNC_task_storage_delete)
6628 			goto error;
6629 		break;
6630 	case BPF_MAP_TYPE_CGRP_STORAGE:
6631 		if (func_id != BPF_FUNC_cgrp_storage_get &&
6632 		    func_id != BPF_FUNC_cgrp_storage_delete)
6633 			goto error;
6634 		break;
6635 	case BPF_MAP_TYPE_BLOOM_FILTER:
6636 		if (func_id != BPF_FUNC_map_peek_elem &&
6637 		    func_id != BPF_FUNC_map_push_elem)
6638 			goto error;
6639 		break;
6640 	default:
6641 		break;
6642 	}
6643 
6644 	/* ... and second from the function itself. */
6645 	switch (func_id) {
6646 	case BPF_FUNC_tail_call:
6647 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
6648 			goto error;
6649 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
6650 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
6651 			return -EINVAL;
6652 		}
6653 		break;
6654 	case BPF_FUNC_perf_event_read:
6655 	case BPF_FUNC_perf_event_output:
6656 	case BPF_FUNC_perf_event_read_value:
6657 	case BPF_FUNC_skb_output:
6658 	case BPF_FUNC_xdp_output:
6659 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
6660 			goto error;
6661 		break;
6662 	case BPF_FUNC_ringbuf_output:
6663 	case BPF_FUNC_ringbuf_reserve:
6664 	case BPF_FUNC_ringbuf_query:
6665 	case BPF_FUNC_ringbuf_reserve_dynptr:
6666 	case BPF_FUNC_ringbuf_submit_dynptr:
6667 	case BPF_FUNC_ringbuf_discard_dynptr:
6668 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
6669 			goto error;
6670 		break;
6671 	case BPF_FUNC_user_ringbuf_drain:
6672 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
6673 			goto error;
6674 		break;
6675 	case BPF_FUNC_get_stackid:
6676 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
6677 			goto error;
6678 		break;
6679 	case BPF_FUNC_current_task_under_cgroup:
6680 	case BPF_FUNC_skb_under_cgroup:
6681 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
6682 			goto error;
6683 		break;
6684 	case BPF_FUNC_redirect_map:
6685 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6686 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
6687 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
6688 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
6689 			goto error;
6690 		break;
6691 	case BPF_FUNC_sk_redirect_map:
6692 	case BPF_FUNC_msg_redirect_map:
6693 	case BPF_FUNC_sock_map_update:
6694 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
6695 			goto error;
6696 		break;
6697 	case BPF_FUNC_sk_redirect_hash:
6698 	case BPF_FUNC_msg_redirect_hash:
6699 	case BPF_FUNC_sock_hash_update:
6700 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
6701 			goto error;
6702 		break;
6703 	case BPF_FUNC_get_local_storage:
6704 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
6705 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
6706 			goto error;
6707 		break;
6708 	case BPF_FUNC_sk_select_reuseport:
6709 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
6710 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
6711 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
6712 			goto error;
6713 		break;
6714 	case BPF_FUNC_map_pop_elem:
6715 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6716 		    map->map_type != BPF_MAP_TYPE_STACK)
6717 			goto error;
6718 		break;
6719 	case BPF_FUNC_map_peek_elem:
6720 	case BPF_FUNC_map_push_elem:
6721 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6722 		    map->map_type != BPF_MAP_TYPE_STACK &&
6723 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
6724 			goto error;
6725 		break;
6726 	case BPF_FUNC_map_lookup_percpu_elem:
6727 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
6728 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6729 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
6730 			goto error;
6731 		break;
6732 	case BPF_FUNC_sk_storage_get:
6733 	case BPF_FUNC_sk_storage_delete:
6734 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
6735 			goto error;
6736 		break;
6737 	case BPF_FUNC_inode_storage_get:
6738 	case BPF_FUNC_inode_storage_delete:
6739 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
6740 			goto error;
6741 		break;
6742 	case BPF_FUNC_task_storage_get:
6743 	case BPF_FUNC_task_storage_delete:
6744 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
6745 			goto error;
6746 		break;
6747 	case BPF_FUNC_cgrp_storage_get:
6748 	case BPF_FUNC_cgrp_storage_delete:
6749 		if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
6750 			goto error;
6751 		break;
6752 	default:
6753 		break;
6754 	}
6755 
6756 	return 0;
6757 error:
6758 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
6759 		map->map_type, func_id_name(func_id), func_id);
6760 	return -EINVAL;
6761 }
6762 
6763 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
6764 {
6765 	int count = 0;
6766 
6767 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
6768 		count++;
6769 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
6770 		count++;
6771 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
6772 		count++;
6773 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
6774 		count++;
6775 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
6776 		count++;
6777 
6778 	/* We only support one arg being in raw mode at the moment,
6779 	 * which is sufficient for the helper functions we have
6780 	 * right now.
6781 	 */
6782 	return count <= 1;
6783 }
6784 
6785 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
6786 {
6787 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
6788 	bool has_size = fn->arg_size[arg] != 0;
6789 	bool is_next_size = false;
6790 
6791 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
6792 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
6793 
6794 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
6795 		return is_next_size;
6796 
6797 	return has_size == is_next_size || is_next_size == is_fixed;
6798 }
6799 
6800 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
6801 {
6802 	/* bpf_xxx(..., buf, len) call will access 'len'
6803 	 * bytes from memory 'buf'. Both arg types need
6804 	 * to be paired, so make sure there's no buggy
6805 	 * helper function specification.
6806 	 */
6807 	if (arg_type_is_mem_size(fn->arg1_type) ||
6808 	    check_args_pair_invalid(fn, 0) ||
6809 	    check_args_pair_invalid(fn, 1) ||
6810 	    check_args_pair_invalid(fn, 2) ||
6811 	    check_args_pair_invalid(fn, 3) ||
6812 	    check_args_pair_invalid(fn, 4))
6813 		return false;
6814 
6815 	return true;
6816 }
6817 
6818 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
6819 {
6820 	int i;
6821 
6822 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
6823 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
6824 			return !!fn->arg_btf_id[i];
6825 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
6826 			return fn->arg_btf_id[i] == BPF_PTR_POISON;
6827 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
6828 		    /* arg_btf_id and arg_size are in a union. */
6829 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
6830 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
6831 			return false;
6832 	}
6833 
6834 	return true;
6835 }
6836 
6837 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
6838 {
6839 	return check_raw_mode_ok(fn) &&
6840 	       check_arg_pair_ok(fn) &&
6841 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
6842 }
6843 
6844 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
6845  * are now invalid, so turn them into unknown SCALAR_VALUE.
6846  */
6847 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
6848 {
6849 	struct bpf_func_state *state;
6850 	struct bpf_reg_state *reg;
6851 
6852 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
6853 		if (reg_is_pkt_pointer_any(reg))
6854 			__mark_reg_unknown(env, reg);
6855 	}));
6856 }
6857 
6858 enum {
6859 	AT_PKT_END = -1,
6860 	BEYOND_PKT_END = -2,
6861 };
6862 
6863 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
6864 {
6865 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
6866 	struct bpf_reg_state *reg = &state->regs[regn];
6867 
6868 	if (reg->type != PTR_TO_PACKET)
6869 		/* PTR_TO_PACKET_META is not supported yet */
6870 		return;
6871 
6872 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
6873 	 * How far beyond pkt_end it goes is unknown.
6874 	 * if (!range_open) it's the case of pkt >= pkt_end
6875 	 * if (range_open) it's the case of pkt > pkt_end
6876 	 * hence this pointer is at least 1 byte bigger than pkt_end
6877 	 */
6878 	if (range_open)
6879 		reg->range = BEYOND_PKT_END;
6880 	else
6881 		reg->range = AT_PKT_END;
6882 }
6883 
6884 /* The pointer with the specified id has released its reference to kernel
6885  * resources. Identify all copies of the same pointer and clear the reference.
6886  */
6887 static int release_reference(struct bpf_verifier_env *env,
6888 			     int ref_obj_id)
6889 {
6890 	struct bpf_func_state *state;
6891 	struct bpf_reg_state *reg;
6892 	int err;
6893 
6894 	err = release_reference_state(cur_func(env), ref_obj_id);
6895 	if (err)
6896 		return err;
6897 
6898 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
6899 		if (reg->ref_obj_id == ref_obj_id) {
6900 			if (!env->allow_ptr_leaks)
6901 				__mark_reg_not_init(env, reg);
6902 			else
6903 				__mark_reg_unknown(env, reg);
6904 		}
6905 	}));
6906 
6907 	return 0;
6908 }
6909 
6910 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6911 				    struct bpf_reg_state *regs)
6912 {
6913 	int i;
6914 
6915 	/* after the call registers r0 - r5 were scratched */
6916 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6917 		mark_reg_not_init(env, regs, caller_saved[i]);
6918 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6919 	}
6920 }
6921 
6922 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6923 				   struct bpf_func_state *caller,
6924 				   struct bpf_func_state *callee,
6925 				   int insn_idx);
6926 
6927 static int set_callee_state(struct bpf_verifier_env *env,
6928 			    struct bpf_func_state *caller,
6929 			    struct bpf_func_state *callee, int insn_idx);
6930 
6931 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6932 			     int *insn_idx, int subprog,
6933 			     set_callee_state_fn set_callee_state_cb)
6934 {
6935 	struct bpf_verifier_state *state = env->cur_state;
6936 	struct bpf_func_info_aux *func_info_aux;
6937 	struct bpf_func_state *caller, *callee;
6938 	int err;
6939 	bool is_global = false;
6940 
6941 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
6942 		verbose(env, "the call stack of %d frames is too deep\n",
6943 			state->curframe + 2);
6944 		return -E2BIG;
6945 	}
6946 
6947 	caller = state->frame[state->curframe];
6948 	if (state->frame[state->curframe + 1]) {
6949 		verbose(env, "verifier bug. Frame %d already allocated\n",
6950 			state->curframe + 1);
6951 		return -EFAULT;
6952 	}
6953 
6954 	func_info_aux = env->prog->aux->func_info_aux;
6955 	if (func_info_aux)
6956 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6957 	err = btf_check_subprog_call(env, subprog, caller->regs);
6958 	if (err == -EFAULT)
6959 		return err;
6960 	if (is_global) {
6961 		if (err) {
6962 			verbose(env, "Caller passes invalid args into func#%d\n",
6963 				subprog);
6964 			return err;
6965 		} else {
6966 			if (env->log.level & BPF_LOG_LEVEL)
6967 				verbose(env,
6968 					"Func#%d is global and valid. Skipping.\n",
6969 					subprog);
6970 			clear_caller_saved_regs(env, caller->regs);
6971 
6972 			/* All global functions return a 64-bit SCALAR_VALUE */
6973 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
6974 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6975 
6976 			/* continue with next insn after call */
6977 			return 0;
6978 		}
6979 	}
6980 
6981 	/* set_callee_state is used for direct subprog calls, but we are
6982 	 * interested in validating only BPF helpers that can call subprogs as
6983 	 * callbacks
6984 	 */
6985 	if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
6986 		verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
6987 			func_id_name(insn->imm), insn->imm);
6988 		return -EFAULT;
6989 	}
6990 
6991 	if (insn->code == (BPF_JMP | BPF_CALL) &&
6992 	    insn->src_reg == 0 &&
6993 	    insn->imm == BPF_FUNC_timer_set_callback) {
6994 		struct bpf_verifier_state *async_cb;
6995 
6996 		/* there is no real recursion here. timer callbacks are async */
6997 		env->subprog_info[subprog].is_async_cb = true;
6998 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6999 					 *insn_idx, subprog);
7000 		if (!async_cb)
7001 			return -EFAULT;
7002 		callee = async_cb->frame[0];
7003 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
7004 
7005 		/* Convert bpf_timer_set_callback() args into timer callback args */
7006 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
7007 		if (err)
7008 			return err;
7009 
7010 		clear_caller_saved_regs(env, caller->regs);
7011 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
7012 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7013 		/* continue with next insn after call */
7014 		return 0;
7015 	}
7016 
7017 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
7018 	if (!callee)
7019 		return -ENOMEM;
7020 	state->frame[state->curframe + 1] = callee;
7021 
7022 	/* callee cannot access r0, r6 - r9 for reading and has to write
7023 	 * into its own stack before reading from it.
7024 	 * callee can read/write into caller's stack
7025 	 */
7026 	init_func_state(env, callee,
7027 			/* remember the callsite, it will be used by bpf_exit */
7028 			*insn_idx /* callsite */,
7029 			state->curframe + 1 /* frameno within this callchain */,
7030 			subprog /* subprog number within this prog */);
7031 
7032 	/* Transfer references to the callee */
7033 	err = copy_reference_state(callee, caller);
7034 	if (err)
7035 		goto err_out;
7036 
7037 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
7038 	if (err)
7039 		goto err_out;
7040 
7041 	clear_caller_saved_regs(env, caller->regs);
7042 
7043 	/* only increment it after check_reg_arg() finished */
7044 	state->curframe++;
7045 
7046 	/* and go analyze first insn of the callee */
7047 	*insn_idx = env->subprog_info[subprog].start - 1;
7048 
7049 	if (env->log.level & BPF_LOG_LEVEL) {
7050 		verbose(env, "caller:\n");
7051 		print_verifier_state(env, caller, true);
7052 		verbose(env, "callee:\n");
7053 		print_verifier_state(env, callee, true);
7054 	}
7055 	return 0;
7056 
7057 err_out:
7058 	free_func_state(callee);
7059 	state->frame[state->curframe + 1] = NULL;
7060 	return err;
7061 }
7062 
7063 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
7064 				   struct bpf_func_state *caller,
7065 				   struct bpf_func_state *callee)
7066 {
7067 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
7068 	 *      void *callback_ctx, u64 flags);
7069 	 * callback_fn(struct bpf_map *map, void *key, void *value,
7070 	 *      void *callback_ctx);
7071 	 */
7072 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7073 
7074 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7075 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7076 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7077 
7078 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7079 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7080 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7081 
7082 	/* pointer to stack or null */
7083 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
7084 
7085 	/* unused */
7086 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7087 	return 0;
7088 }
7089 
7090 static int set_callee_state(struct bpf_verifier_env *env,
7091 			    struct bpf_func_state *caller,
7092 			    struct bpf_func_state *callee, int insn_idx)
7093 {
7094 	int i;
7095 
7096 	/* copy r1 - r5 args that callee can access.  The copy includes parent
7097 	 * pointers, which connects us up to the liveness chain
7098 	 */
7099 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
7100 		callee->regs[i] = caller->regs[i];
7101 	return 0;
7102 }
7103 
7104 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7105 			   int *insn_idx)
7106 {
7107 	int subprog, target_insn;
7108 
7109 	target_insn = *insn_idx + insn->imm + 1;
7110 	subprog = find_subprog(env, target_insn);
7111 	if (subprog < 0) {
7112 		verbose(env, "verifier bug. No program starts at insn %d\n",
7113 			target_insn);
7114 		return -EFAULT;
7115 	}
7116 
7117 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
7118 }
7119 
7120 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
7121 				       struct bpf_func_state *caller,
7122 				       struct bpf_func_state *callee,
7123 				       int insn_idx)
7124 {
7125 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
7126 	struct bpf_map *map;
7127 	int err;
7128 
7129 	if (bpf_map_ptr_poisoned(insn_aux)) {
7130 		verbose(env, "tail_call abusing map_ptr\n");
7131 		return -EINVAL;
7132 	}
7133 
7134 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
7135 	if (!map->ops->map_set_for_each_callback_args ||
7136 	    !map->ops->map_for_each_callback) {
7137 		verbose(env, "callback function not allowed for map\n");
7138 		return -ENOTSUPP;
7139 	}
7140 
7141 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
7142 	if (err)
7143 		return err;
7144 
7145 	callee->in_callback_fn = true;
7146 	callee->callback_ret_range = tnum_range(0, 1);
7147 	return 0;
7148 }
7149 
7150 static int set_loop_callback_state(struct bpf_verifier_env *env,
7151 				   struct bpf_func_state *caller,
7152 				   struct bpf_func_state *callee,
7153 				   int insn_idx)
7154 {
7155 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
7156 	 *	    u64 flags);
7157 	 * callback_fn(u32 index, void *callback_ctx);
7158 	 */
7159 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
7160 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7161 
7162 	/* unused */
7163 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7164 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7165 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7166 
7167 	callee->in_callback_fn = true;
7168 	callee->callback_ret_range = tnum_range(0, 1);
7169 	return 0;
7170 }
7171 
7172 static int set_timer_callback_state(struct bpf_verifier_env *env,
7173 				    struct bpf_func_state *caller,
7174 				    struct bpf_func_state *callee,
7175 				    int insn_idx)
7176 {
7177 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
7178 
7179 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
7180 	 * callback_fn(struct bpf_map *map, void *key, void *value);
7181 	 */
7182 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
7183 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
7184 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
7185 
7186 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7187 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7188 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
7189 
7190 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7191 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7192 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
7193 
7194 	/* unused */
7195 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7196 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7197 	callee->in_async_callback_fn = true;
7198 	callee->callback_ret_range = tnum_range(0, 1);
7199 	return 0;
7200 }
7201 
7202 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
7203 				       struct bpf_func_state *caller,
7204 				       struct bpf_func_state *callee,
7205 				       int insn_idx)
7206 {
7207 	/* bpf_find_vma(struct task_struct *task, u64 addr,
7208 	 *               void *callback_fn, void *callback_ctx, u64 flags)
7209 	 * (callback_fn)(struct task_struct *task,
7210 	 *               struct vm_area_struct *vma, void *callback_ctx);
7211 	 */
7212 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7213 
7214 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
7215 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7216 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
7217 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7218 
7219 	/* pointer to stack or null */
7220 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
7221 
7222 	/* unused */
7223 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7224 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7225 	callee->in_callback_fn = true;
7226 	callee->callback_ret_range = tnum_range(0, 1);
7227 	return 0;
7228 }
7229 
7230 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
7231 					   struct bpf_func_state *caller,
7232 					   struct bpf_func_state *callee,
7233 					   int insn_idx)
7234 {
7235 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
7236 	 *			  callback_ctx, u64 flags);
7237 	 * callback_fn(struct bpf_dynptr_t* dynptr, void *callback_ctx);
7238 	 */
7239 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
7240 	callee->regs[BPF_REG_1].type = PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL;
7241 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
7242 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7243 
7244 	/* unused */
7245 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7246 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7247 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7248 
7249 	callee->in_callback_fn = true;
7250 	callee->callback_ret_range = tnum_range(0, 1);
7251 	return 0;
7252 }
7253 
7254 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
7255 {
7256 	struct bpf_verifier_state *state = env->cur_state;
7257 	struct bpf_func_state *caller, *callee;
7258 	struct bpf_reg_state *r0;
7259 	int err;
7260 
7261 	callee = state->frame[state->curframe];
7262 	r0 = &callee->regs[BPF_REG_0];
7263 	if (r0->type == PTR_TO_STACK) {
7264 		/* technically it's ok to return caller's stack pointer
7265 		 * (or caller's caller's pointer) back to the caller,
7266 		 * since these pointers are valid. Only current stack
7267 		 * pointer will be invalid as soon as function exits,
7268 		 * but let's be conservative
7269 		 */
7270 		verbose(env, "cannot return stack pointer to the caller\n");
7271 		return -EINVAL;
7272 	}
7273 
7274 	caller = state->frame[state->curframe - 1];
7275 	if (callee->in_callback_fn) {
7276 		/* enforce R0 return value range [0, 1]. */
7277 		struct tnum range = callee->callback_ret_range;
7278 
7279 		if (r0->type != SCALAR_VALUE) {
7280 			verbose(env, "R0 not a scalar value\n");
7281 			return -EACCES;
7282 		}
7283 		if (!tnum_in(range, r0->var_off)) {
7284 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
7285 			return -EINVAL;
7286 		}
7287 	} else {
7288 		/* return to the caller whatever r0 had in the callee */
7289 		caller->regs[BPF_REG_0] = *r0;
7290 	}
7291 
7292 	/* callback_fn frame should have released its own additions to parent's
7293 	 * reference state at this point, or check_reference_leak would
7294 	 * complain, hence it must be the same as the caller. There is no need
7295 	 * to copy it back.
7296 	 */
7297 	if (!callee->in_callback_fn) {
7298 		/* Transfer references to the caller */
7299 		err = copy_reference_state(caller, callee);
7300 		if (err)
7301 			return err;
7302 	}
7303 
7304 	*insn_idx = callee->callsite + 1;
7305 	if (env->log.level & BPF_LOG_LEVEL) {
7306 		verbose(env, "returning from callee:\n");
7307 		print_verifier_state(env, callee, true);
7308 		verbose(env, "to caller at %d:\n", *insn_idx);
7309 		print_verifier_state(env, caller, true);
7310 	}
7311 	/* clear everything in the callee */
7312 	free_func_state(callee);
7313 	state->frame[state->curframe--] = NULL;
7314 	return 0;
7315 }
7316 
7317 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
7318 				   int func_id,
7319 				   struct bpf_call_arg_meta *meta)
7320 {
7321 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
7322 
7323 	if (ret_type != RET_INTEGER ||
7324 	    (func_id != BPF_FUNC_get_stack &&
7325 	     func_id != BPF_FUNC_get_task_stack &&
7326 	     func_id != BPF_FUNC_probe_read_str &&
7327 	     func_id != BPF_FUNC_probe_read_kernel_str &&
7328 	     func_id != BPF_FUNC_probe_read_user_str))
7329 		return;
7330 
7331 	ret_reg->smax_value = meta->msize_max_value;
7332 	ret_reg->s32_max_value = meta->msize_max_value;
7333 	ret_reg->smin_value = -MAX_ERRNO;
7334 	ret_reg->s32_min_value = -MAX_ERRNO;
7335 	reg_bounds_sync(ret_reg);
7336 }
7337 
7338 static int
7339 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7340 		int func_id, int insn_idx)
7341 {
7342 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7343 	struct bpf_map *map = meta->map_ptr;
7344 
7345 	if (func_id != BPF_FUNC_tail_call &&
7346 	    func_id != BPF_FUNC_map_lookup_elem &&
7347 	    func_id != BPF_FUNC_map_update_elem &&
7348 	    func_id != BPF_FUNC_map_delete_elem &&
7349 	    func_id != BPF_FUNC_map_push_elem &&
7350 	    func_id != BPF_FUNC_map_pop_elem &&
7351 	    func_id != BPF_FUNC_map_peek_elem &&
7352 	    func_id != BPF_FUNC_for_each_map_elem &&
7353 	    func_id != BPF_FUNC_redirect_map &&
7354 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
7355 		return 0;
7356 
7357 	if (map == NULL) {
7358 		verbose(env, "kernel subsystem misconfigured verifier\n");
7359 		return -EINVAL;
7360 	}
7361 
7362 	/* In case of read-only, some additional restrictions
7363 	 * need to be applied in order to prevent altering the
7364 	 * state of the map from program side.
7365 	 */
7366 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
7367 	    (func_id == BPF_FUNC_map_delete_elem ||
7368 	     func_id == BPF_FUNC_map_update_elem ||
7369 	     func_id == BPF_FUNC_map_push_elem ||
7370 	     func_id == BPF_FUNC_map_pop_elem)) {
7371 		verbose(env, "write into map forbidden\n");
7372 		return -EACCES;
7373 	}
7374 
7375 	if (!BPF_MAP_PTR(aux->map_ptr_state))
7376 		bpf_map_ptr_store(aux, meta->map_ptr,
7377 				  !meta->map_ptr->bypass_spec_v1);
7378 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
7379 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
7380 				  !meta->map_ptr->bypass_spec_v1);
7381 	return 0;
7382 }
7383 
7384 static int
7385 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7386 		int func_id, int insn_idx)
7387 {
7388 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7389 	struct bpf_reg_state *regs = cur_regs(env), *reg;
7390 	struct bpf_map *map = meta->map_ptr;
7391 	u64 val, max;
7392 	int err;
7393 
7394 	if (func_id != BPF_FUNC_tail_call)
7395 		return 0;
7396 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
7397 		verbose(env, "kernel subsystem misconfigured verifier\n");
7398 		return -EINVAL;
7399 	}
7400 
7401 	reg = &regs[BPF_REG_3];
7402 	val = reg->var_off.value;
7403 	max = map->max_entries;
7404 
7405 	if (!(register_is_const(reg) && val < max)) {
7406 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7407 		return 0;
7408 	}
7409 
7410 	err = mark_chain_precision(env, BPF_REG_3);
7411 	if (err)
7412 		return err;
7413 	if (bpf_map_key_unseen(aux))
7414 		bpf_map_key_store(aux, val);
7415 	else if (!bpf_map_key_poisoned(aux) &&
7416 		  bpf_map_key_immediate(aux) != val)
7417 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7418 	return 0;
7419 }
7420 
7421 static int check_reference_leak(struct bpf_verifier_env *env)
7422 {
7423 	struct bpf_func_state *state = cur_func(env);
7424 	bool refs_lingering = false;
7425 	int i;
7426 
7427 	if (state->frameno && !state->in_callback_fn)
7428 		return 0;
7429 
7430 	for (i = 0; i < state->acquired_refs; i++) {
7431 		if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
7432 			continue;
7433 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
7434 			state->refs[i].id, state->refs[i].insn_idx);
7435 		refs_lingering = true;
7436 	}
7437 	return refs_lingering ? -EINVAL : 0;
7438 }
7439 
7440 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
7441 				   struct bpf_reg_state *regs)
7442 {
7443 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
7444 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
7445 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
7446 	int err, fmt_map_off, num_args;
7447 	u64 fmt_addr;
7448 	char *fmt;
7449 
7450 	/* data must be an array of u64 */
7451 	if (data_len_reg->var_off.value % 8)
7452 		return -EINVAL;
7453 	num_args = data_len_reg->var_off.value / 8;
7454 
7455 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
7456 	 * and map_direct_value_addr is set.
7457 	 */
7458 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
7459 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
7460 						  fmt_map_off);
7461 	if (err) {
7462 		verbose(env, "verifier bug\n");
7463 		return -EFAULT;
7464 	}
7465 	fmt = (char *)(long)fmt_addr + fmt_map_off;
7466 
7467 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
7468 	 * can focus on validating the format specifiers.
7469 	 */
7470 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
7471 	if (err < 0)
7472 		verbose(env, "Invalid format string\n");
7473 
7474 	return err;
7475 }
7476 
7477 static int check_get_func_ip(struct bpf_verifier_env *env)
7478 {
7479 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7480 	int func_id = BPF_FUNC_get_func_ip;
7481 
7482 	if (type == BPF_PROG_TYPE_TRACING) {
7483 		if (!bpf_prog_has_trampoline(env->prog)) {
7484 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
7485 				func_id_name(func_id), func_id);
7486 			return -ENOTSUPP;
7487 		}
7488 		return 0;
7489 	} else if (type == BPF_PROG_TYPE_KPROBE) {
7490 		return 0;
7491 	}
7492 
7493 	verbose(env, "func %s#%d not supported for program type %d\n",
7494 		func_id_name(func_id), func_id, type);
7495 	return -ENOTSUPP;
7496 }
7497 
7498 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7499 {
7500 	return &env->insn_aux_data[env->insn_idx];
7501 }
7502 
7503 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
7504 {
7505 	struct bpf_reg_state *regs = cur_regs(env);
7506 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
7507 	bool reg_is_null = register_is_null(reg);
7508 
7509 	if (reg_is_null)
7510 		mark_chain_precision(env, BPF_REG_4);
7511 
7512 	return reg_is_null;
7513 }
7514 
7515 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
7516 {
7517 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
7518 
7519 	if (!state->initialized) {
7520 		state->initialized = 1;
7521 		state->fit_for_inline = loop_flag_is_zero(env);
7522 		state->callback_subprogno = subprogno;
7523 		return;
7524 	}
7525 
7526 	if (!state->fit_for_inline)
7527 		return;
7528 
7529 	state->fit_for_inline = (loop_flag_is_zero(env) &&
7530 				 state->callback_subprogno == subprogno);
7531 }
7532 
7533 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7534 			     int *insn_idx_p)
7535 {
7536 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
7537 	const struct bpf_func_proto *fn = NULL;
7538 	enum bpf_return_type ret_type;
7539 	enum bpf_type_flag ret_flag;
7540 	struct bpf_reg_state *regs;
7541 	struct bpf_call_arg_meta meta;
7542 	int insn_idx = *insn_idx_p;
7543 	bool changes_data;
7544 	int i, err, func_id;
7545 
7546 	/* find function prototype */
7547 	func_id = insn->imm;
7548 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
7549 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
7550 			func_id);
7551 		return -EINVAL;
7552 	}
7553 
7554 	if (env->ops->get_func_proto)
7555 		fn = env->ops->get_func_proto(func_id, env->prog);
7556 	if (!fn) {
7557 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
7558 			func_id);
7559 		return -EINVAL;
7560 	}
7561 
7562 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
7563 	if (!env->prog->gpl_compatible && fn->gpl_only) {
7564 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
7565 		return -EINVAL;
7566 	}
7567 
7568 	if (fn->allowed && !fn->allowed(env->prog)) {
7569 		verbose(env, "helper call is not allowed in probe\n");
7570 		return -EINVAL;
7571 	}
7572 
7573 	if (!env->prog->aux->sleepable && fn->might_sleep) {
7574 		verbose(env, "helper call might sleep in a non-sleepable prog\n");
7575 		return -EINVAL;
7576 	}
7577 
7578 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
7579 	changes_data = bpf_helper_changes_pkt_data(fn->func);
7580 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
7581 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
7582 			func_id_name(func_id), func_id);
7583 		return -EINVAL;
7584 	}
7585 
7586 	memset(&meta, 0, sizeof(meta));
7587 	meta.pkt_access = fn->pkt_access;
7588 
7589 	err = check_func_proto(fn, func_id);
7590 	if (err) {
7591 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
7592 			func_id_name(func_id), func_id);
7593 		return err;
7594 	}
7595 
7596 	if (env->cur_state->active_rcu_lock) {
7597 		if (fn->might_sleep) {
7598 			verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
7599 				func_id_name(func_id), func_id);
7600 			return -EINVAL;
7601 		}
7602 
7603 		if (env->prog->aux->sleepable && is_storage_get_function(func_id))
7604 			env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
7605 	}
7606 
7607 	meta.func_id = func_id;
7608 	/* check args */
7609 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7610 		err = check_func_arg(env, i, &meta, fn);
7611 		if (err)
7612 			return err;
7613 	}
7614 
7615 	err = record_func_map(env, &meta, func_id, insn_idx);
7616 	if (err)
7617 		return err;
7618 
7619 	err = record_func_key(env, &meta, func_id, insn_idx);
7620 	if (err)
7621 		return err;
7622 
7623 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
7624 	 * is inferred from register state.
7625 	 */
7626 	for (i = 0; i < meta.access_size; i++) {
7627 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
7628 				       BPF_WRITE, -1, false);
7629 		if (err)
7630 			return err;
7631 	}
7632 
7633 	regs = cur_regs(env);
7634 
7635 	if (meta.uninit_dynptr_regno) {
7636 		/* we write BPF_DW bits (8 bytes) at a time */
7637 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7638 			err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
7639 					       i, BPF_DW, BPF_WRITE, -1, false);
7640 			if (err)
7641 				return err;
7642 		}
7643 
7644 		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
7645 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
7646 					      insn_idx);
7647 		if (err)
7648 			return err;
7649 	}
7650 
7651 	if (meta.release_regno) {
7652 		err = -EINVAL;
7653 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
7654 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
7655 		else if (meta.ref_obj_id)
7656 			err = release_reference(env, meta.ref_obj_id);
7657 		/* meta.ref_obj_id can only be 0 if register that is meant to be
7658 		 * released is NULL, which must be > R0.
7659 		 */
7660 		else if (register_is_null(&regs[meta.release_regno]))
7661 			err = 0;
7662 		if (err) {
7663 			verbose(env, "func %s#%d reference has not been acquired before\n",
7664 				func_id_name(func_id), func_id);
7665 			return err;
7666 		}
7667 	}
7668 
7669 	switch (func_id) {
7670 	case BPF_FUNC_tail_call:
7671 		err = check_reference_leak(env);
7672 		if (err) {
7673 			verbose(env, "tail_call would lead to reference leak\n");
7674 			return err;
7675 		}
7676 		break;
7677 	case BPF_FUNC_get_local_storage:
7678 		/* check that flags argument in get_local_storage(map, flags) is 0,
7679 		 * this is required because get_local_storage() can't return an error.
7680 		 */
7681 		if (!register_is_null(&regs[BPF_REG_2])) {
7682 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
7683 			return -EINVAL;
7684 		}
7685 		break;
7686 	case BPF_FUNC_for_each_map_elem:
7687 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7688 					set_map_elem_callback_state);
7689 		break;
7690 	case BPF_FUNC_timer_set_callback:
7691 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7692 					set_timer_callback_state);
7693 		break;
7694 	case BPF_FUNC_find_vma:
7695 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7696 					set_find_vma_callback_state);
7697 		break;
7698 	case BPF_FUNC_snprintf:
7699 		err = check_bpf_snprintf_call(env, regs);
7700 		break;
7701 	case BPF_FUNC_loop:
7702 		update_loop_inline_state(env, meta.subprogno);
7703 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7704 					set_loop_callback_state);
7705 		break;
7706 	case BPF_FUNC_dynptr_from_mem:
7707 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
7708 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
7709 				reg_type_str(env, regs[BPF_REG_1].type));
7710 			return -EACCES;
7711 		}
7712 		break;
7713 	case BPF_FUNC_set_retval:
7714 		if (prog_type == BPF_PROG_TYPE_LSM &&
7715 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
7716 			if (!env->prog->aux->attach_func_proto->type) {
7717 				/* Make sure programs that attach to void
7718 				 * hooks don't try to modify return value.
7719 				 */
7720 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
7721 				return -EINVAL;
7722 			}
7723 		}
7724 		break;
7725 	case BPF_FUNC_dynptr_data:
7726 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7727 			if (arg_type_is_dynptr(fn->arg_type[i])) {
7728 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
7729 
7730 				if (meta.ref_obj_id) {
7731 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
7732 					return -EFAULT;
7733 				}
7734 
7735 				if (base_type(reg->type) != PTR_TO_DYNPTR)
7736 					/* Find the id of the dynptr we're
7737 					 * tracking the reference of
7738 					 */
7739 					meta.ref_obj_id = stack_slot_get_id(env, reg);
7740 				break;
7741 			}
7742 		}
7743 		if (i == MAX_BPF_FUNC_REG_ARGS) {
7744 			verbose(env, "verifier internal error: no dynptr in bpf_dynptr_data()\n");
7745 			return -EFAULT;
7746 		}
7747 		break;
7748 	case BPF_FUNC_user_ringbuf_drain:
7749 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7750 					set_user_ringbuf_callback_state);
7751 		break;
7752 	}
7753 
7754 	if (err)
7755 		return err;
7756 
7757 	/* reset caller saved regs */
7758 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
7759 		mark_reg_not_init(env, regs, caller_saved[i]);
7760 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7761 	}
7762 
7763 	/* helper call returns 64-bit value. */
7764 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7765 
7766 	/* update return register (already marked as written above) */
7767 	ret_type = fn->ret_type;
7768 	ret_flag = type_flag(ret_type);
7769 
7770 	switch (base_type(ret_type)) {
7771 	case RET_INTEGER:
7772 		/* sets type to SCALAR_VALUE */
7773 		mark_reg_unknown(env, regs, BPF_REG_0);
7774 		break;
7775 	case RET_VOID:
7776 		regs[BPF_REG_0].type = NOT_INIT;
7777 		break;
7778 	case RET_PTR_TO_MAP_VALUE:
7779 		/* There is no offset yet applied, variable or fixed */
7780 		mark_reg_known_zero(env, regs, BPF_REG_0);
7781 		/* remember map_ptr, so that check_map_access()
7782 		 * can check 'value_size' boundary of memory access
7783 		 * to map element returned from bpf_map_lookup_elem()
7784 		 */
7785 		if (meta.map_ptr == NULL) {
7786 			verbose(env,
7787 				"kernel subsystem misconfigured verifier\n");
7788 			return -EINVAL;
7789 		}
7790 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
7791 		regs[BPF_REG_0].map_uid = meta.map_uid;
7792 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7793 		if (!type_may_be_null(ret_type) &&
7794 		    btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
7795 			regs[BPF_REG_0].id = ++env->id_gen;
7796 		}
7797 		break;
7798 	case RET_PTR_TO_SOCKET:
7799 		mark_reg_known_zero(env, regs, BPF_REG_0);
7800 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
7801 		break;
7802 	case RET_PTR_TO_SOCK_COMMON:
7803 		mark_reg_known_zero(env, regs, BPF_REG_0);
7804 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
7805 		break;
7806 	case RET_PTR_TO_TCP_SOCK:
7807 		mark_reg_known_zero(env, regs, BPF_REG_0);
7808 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
7809 		break;
7810 	case RET_PTR_TO_MEM:
7811 		mark_reg_known_zero(env, regs, BPF_REG_0);
7812 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7813 		regs[BPF_REG_0].mem_size = meta.mem_size;
7814 		break;
7815 	case RET_PTR_TO_MEM_OR_BTF_ID:
7816 	{
7817 		const struct btf_type *t;
7818 
7819 		mark_reg_known_zero(env, regs, BPF_REG_0);
7820 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
7821 		if (!btf_type_is_struct(t)) {
7822 			u32 tsize;
7823 			const struct btf_type *ret;
7824 			const char *tname;
7825 
7826 			/* resolve the type size of ksym. */
7827 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
7828 			if (IS_ERR(ret)) {
7829 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
7830 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
7831 					tname, PTR_ERR(ret));
7832 				return -EINVAL;
7833 			}
7834 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7835 			regs[BPF_REG_0].mem_size = tsize;
7836 		} else {
7837 			/* MEM_RDONLY may be carried from ret_flag, but it
7838 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7839 			 * it will confuse the check of PTR_TO_BTF_ID in
7840 			 * check_mem_access().
7841 			 */
7842 			ret_flag &= ~MEM_RDONLY;
7843 
7844 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7845 			regs[BPF_REG_0].btf = meta.ret_btf;
7846 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7847 		}
7848 		break;
7849 	}
7850 	case RET_PTR_TO_BTF_ID:
7851 	{
7852 		struct btf *ret_btf;
7853 		int ret_btf_id;
7854 
7855 		mark_reg_known_zero(env, regs, BPF_REG_0);
7856 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7857 		if (func_id == BPF_FUNC_kptr_xchg) {
7858 			ret_btf = meta.kptr_field->kptr.btf;
7859 			ret_btf_id = meta.kptr_field->kptr.btf_id;
7860 		} else {
7861 			if (fn->ret_btf_id == BPF_PTR_POISON) {
7862 				verbose(env, "verifier internal error:");
7863 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
7864 					func_id_name(func_id));
7865 				return -EINVAL;
7866 			}
7867 			ret_btf = btf_vmlinux;
7868 			ret_btf_id = *fn->ret_btf_id;
7869 		}
7870 		if (ret_btf_id == 0) {
7871 			verbose(env, "invalid return type %u of func %s#%d\n",
7872 				base_type(ret_type), func_id_name(func_id),
7873 				func_id);
7874 			return -EINVAL;
7875 		}
7876 		regs[BPF_REG_0].btf = ret_btf;
7877 		regs[BPF_REG_0].btf_id = ret_btf_id;
7878 		break;
7879 	}
7880 	default:
7881 		verbose(env, "unknown return type %u of func %s#%d\n",
7882 			base_type(ret_type), func_id_name(func_id), func_id);
7883 		return -EINVAL;
7884 	}
7885 
7886 	if (type_may_be_null(regs[BPF_REG_0].type))
7887 		regs[BPF_REG_0].id = ++env->id_gen;
7888 
7889 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
7890 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
7891 			func_id_name(func_id), func_id);
7892 		return -EFAULT;
7893 	}
7894 
7895 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
7896 		/* For release_reference() */
7897 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
7898 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
7899 		int id = acquire_reference_state(env, insn_idx);
7900 
7901 		if (id < 0)
7902 			return id;
7903 		/* For mark_ptr_or_null_reg() */
7904 		regs[BPF_REG_0].id = id;
7905 		/* For release_reference() */
7906 		regs[BPF_REG_0].ref_obj_id = id;
7907 	}
7908 
7909 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
7910 
7911 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
7912 	if (err)
7913 		return err;
7914 
7915 	if ((func_id == BPF_FUNC_get_stack ||
7916 	     func_id == BPF_FUNC_get_task_stack) &&
7917 	    !env->prog->has_callchain_buf) {
7918 		const char *err_str;
7919 
7920 #ifdef CONFIG_PERF_EVENTS
7921 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
7922 		err_str = "cannot get callchain buffer for func %s#%d\n";
7923 #else
7924 		err = -ENOTSUPP;
7925 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
7926 #endif
7927 		if (err) {
7928 			verbose(env, err_str, func_id_name(func_id), func_id);
7929 			return err;
7930 		}
7931 
7932 		env->prog->has_callchain_buf = true;
7933 	}
7934 
7935 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
7936 		env->prog->call_get_stack = true;
7937 
7938 	if (func_id == BPF_FUNC_get_func_ip) {
7939 		if (check_get_func_ip(env))
7940 			return -ENOTSUPP;
7941 		env->prog->call_get_func_ip = true;
7942 	}
7943 
7944 	if (changes_data)
7945 		clear_all_pkt_pointers(env);
7946 	return 0;
7947 }
7948 
7949 /* mark_btf_func_reg_size() is used when the reg size is determined by
7950  * the BTF func_proto's return value size and argument.
7951  */
7952 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
7953 				   size_t reg_size)
7954 {
7955 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
7956 
7957 	if (regno == BPF_REG_0) {
7958 		/* Function return value */
7959 		reg->live |= REG_LIVE_WRITTEN;
7960 		reg->subreg_def = reg_size == sizeof(u64) ?
7961 			DEF_NOT_SUBREG : env->insn_idx + 1;
7962 	} else {
7963 		/* Function argument */
7964 		if (reg_size == sizeof(u64)) {
7965 			mark_insn_zext(env, reg);
7966 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
7967 		} else {
7968 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
7969 		}
7970 	}
7971 }
7972 
7973 struct bpf_kfunc_call_arg_meta {
7974 	/* In parameters */
7975 	struct btf *btf;
7976 	u32 func_id;
7977 	u32 kfunc_flags;
7978 	const struct btf_type *func_proto;
7979 	const char *func_name;
7980 	/* Out parameters */
7981 	u32 ref_obj_id;
7982 	u8 release_regno;
7983 	bool r0_rdonly;
7984 	u32 ret_btf_id;
7985 	u64 r0_size;
7986 	struct {
7987 		u64 value;
7988 		bool found;
7989 	} arg_constant;
7990 	struct {
7991 		struct btf *btf;
7992 		u32 btf_id;
7993 	} arg_obj_drop;
7994 	struct {
7995 		struct btf_field *field;
7996 	} arg_list_head;
7997 };
7998 
7999 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
8000 {
8001 	return meta->kfunc_flags & KF_ACQUIRE;
8002 }
8003 
8004 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
8005 {
8006 	return meta->kfunc_flags & KF_RET_NULL;
8007 }
8008 
8009 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
8010 {
8011 	return meta->kfunc_flags & KF_RELEASE;
8012 }
8013 
8014 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
8015 {
8016 	return meta->kfunc_flags & KF_TRUSTED_ARGS;
8017 }
8018 
8019 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
8020 {
8021 	return meta->kfunc_flags & KF_SLEEPABLE;
8022 }
8023 
8024 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
8025 {
8026 	return meta->kfunc_flags & KF_DESTRUCTIVE;
8027 }
8028 
8029 static bool is_kfunc_arg_kptr_get(struct bpf_kfunc_call_arg_meta *meta, int arg)
8030 {
8031 	return arg == 0 && (meta->kfunc_flags & KF_KPTR_GET);
8032 }
8033 
8034 static bool __kfunc_param_match_suffix(const struct btf *btf,
8035 				       const struct btf_param *arg,
8036 				       const char *suffix)
8037 {
8038 	int suffix_len = strlen(suffix), len;
8039 	const char *param_name;
8040 
8041 	/* In the future, this can be ported to use BTF tagging */
8042 	param_name = btf_name_by_offset(btf, arg->name_off);
8043 	if (str_is_empty(param_name))
8044 		return false;
8045 	len = strlen(param_name);
8046 	if (len < suffix_len)
8047 		return false;
8048 	param_name += len - suffix_len;
8049 	return !strncmp(param_name, suffix, suffix_len);
8050 }
8051 
8052 static bool is_kfunc_arg_mem_size(const struct btf *btf,
8053 				  const struct btf_param *arg,
8054 				  const struct bpf_reg_state *reg)
8055 {
8056 	const struct btf_type *t;
8057 
8058 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8059 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
8060 		return false;
8061 
8062 	return __kfunc_param_match_suffix(btf, arg, "__sz");
8063 }
8064 
8065 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
8066 {
8067 	return __kfunc_param_match_suffix(btf, arg, "__k");
8068 }
8069 
8070 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
8071 {
8072 	return __kfunc_param_match_suffix(btf, arg, "__ign");
8073 }
8074 
8075 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
8076 {
8077 	return __kfunc_param_match_suffix(btf, arg, "__alloc");
8078 }
8079 
8080 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
8081 					  const struct btf_param *arg,
8082 					  const char *name)
8083 {
8084 	int len, target_len = strlen(name);
8085 	const char *param_name;
8086 
8087 	param_name = btf_name_by_offset(btf, arg->name_off);
8088 	if (str_is_empty(param_name))
8089 		return false;
8090 	len = strlen(param_name);
8091 	if (len != target_len)
8092 		return false;
8093 	if (strcmp(param_name, name))
8094 		return false;
8095 
8096 	return true;
8097 }
8098 
8099 enum {
8100 	KF_ARG_DYNPTR_ID,
8101 	KF_ARG_LIST_HEAD_ID,
8102 	KF_ARG_LIST_NODE_ID,
8103 };
8104 
8105 BTF_ID_LIST(kf_arg_btf_ids)
8106 BTF_ID(struct, bpf_dynptr_kern)
8107 BTF_ID(struct, bpf_list_head)
8108 BTF_ID(struct, bpf_list_node)
8109 
8110 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
8111 				    const struct btf_param *arg, int type)
8112 {
8113 	const struct btf_type *t;
8114 	u32 res_id;
8115 
8116 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8117 	if (!t)
8118 		return false;
8119 	if (!btf_type_is_ptr(t))
8120 		return false;
8121 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
8122 	if (!t)
8123 		return false;
8124 	return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
8125 }
8126 
8127 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
8128 {
8129 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
8130 }
8131 
8132 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
8133 {
8134 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
8135 }
8136 
8137 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
8138 {
8139 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
8140 }
8141 
8142 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
8143 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
8144 					const struct btf *btf,
8145 					const struct btf_type *t, int rec)
8146 {
8147 	const struct btf_type *member_type;
8148 	const struct btf_member *member;
8149 	u32 i;
8150 
8151 	if (!btf_type_is_struct(t))
8152 		return false;
8153 
8154 	for_each_member(i, t, member) {
8155 		const struct btf_array *array;
8156 
8157 		member_type = btf_type_skip_modifiers(btf, member->type, NULL);
8158 		if (btf_type_is_struct(member_type)) {
8159 			if (rec >= 3) {
8160 				verbose(env, "max struct nesting depth exceeded\n");
8161 				return false;
8162 			}
8163 			if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
8164 				return false;
8165 			continue;
8166 		}
8167 		if (btf_type_is_array(member_type)) {
8168 			array = btf_array(member_type);
8169 			if (!array->nelems)
8170 				return false;
8171 			member_type = btf_type_skip_modifiers(btf, array->type, NULL);
8172 			if (!btf_type_is_scalar(member_type))
8173 				return false;
8174 			continue;
8175 		}
8176 		if (!btf_type_is_scalar(member_type))
8177 			return false;
8178 	}
8179 	return true;
8180 }
8181 
8182 
8183 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
8184 #ifdef CONFIG_NET
8185 	[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
8186 	[PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
8187 	[PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
8188 #endif
8189 };
8190 
8191 enum kfunc_ptr_arg_type {
8192 	KF_ARG_PTR_TO_CTX,
8193 	KF_ARG_PTR_TO_ALLOC_BTF_ID,  /* Allocated object */
8194 	KF_ARG_PTR_TO_KPTR,	     /* PTR_TO_KPTR but type specific */
8195 	KF_ARG_PTR_TO_DYNPTR,
8196 	KF_ARG_PTR_TO_LIST_HEAD,
8197 	KF_ARG_PTR_TO_LIST_NODE,
8198 	KF_ARG_PTR_TO_BTF_ID,	     /* Also covers reg2btf_ids conversions */
8199 	KF_ARG_PTR_TO_MEM,
8200 	KF_ARG_PTR_TO_MEM_SIZE,	     /* Size derived from next argument, skip it */
8201 };
8202 
8203 enum special_kfunc_type {
8204 	KF_bpf_obj_new_impl,
8205 	KF_bpf_obj_drop_impl,
8206 	KF_bpf_list_push_front,
8207 	KF_bpf_list_push_back,
8208 	KF_bpf_list_pop_front,
8209 	KF_bpf_list_pop_back,
8210 	KF_bpf_cast_to_kern_ctx,
8211 	KF_bpf_rdonly_cast,
8212 	KF_bpf_rcu_read_lock,
8213 	KF_bpf_rcu_read_unlock,
8214 };
8215 
8216 BTF_SET_START(special_kfunc_set)
8217 BTF_ID(func, bpf_obj_new_impl)
8218 BTF_ID(func, bpf_obj_drop_impl)
8219 BTF_ID(func, bpf_list_push_front)
8220 BTF_ID(func, bpf_list_push_back)
8221 BTF_ID(func, bpf_list_pop_front)
8222 BTF_ID(func, bpf_list_pop_back)
8223 BTF_ID(func, bpf_cast_to_kern_ctx)
8224 BTF_ID(func, bpf_rdonly_cast)
8225 BTF_SET_END(special_kfunc_set)
8226 
8227 BTF_ID_LIST(special_kfunc_list)
8228 BTF_ID(func, bpf_obj_new_impl)
8229 BTF_ID(func, bpf_obj_drop_impl)
8230 BTF_ID(func, bpf_list_push_front)
8231 BTF_ID(func, bpf_list_push_back)
8232 BTF_ID(func, bpf_list_pop_front)
8233 BTF_ID(func, bpf_list_pop_back)
8234 BTF_ID(func, bpf_cast_to_kern_ctx)
8235 BTF_ID(func, bpf_rdonly_cast)
8236 BTF_ID(func, bpf_rcu_read_lock)
8237 BTF_ID(func, bpf_rcu_read_unlock)
8238 
8239 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
8240 {
8241 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
8242 }
8243 
8244 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
8245 {
8246 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
8247 }
8248 
8249 static enum kfunc_ptr_arg_type
8250 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
8251 		       struct bpf_kfunc_call_arg_meta *meta,
8252 		       const struct btf_type *t, const struct btf_type *ref_t,
8253 		       const char *ref_tname, const struct btf_param *args,
8254 		       int argno, int nargs)
8255 {
8256 	u32 regno = argno + 1;
8257 	struct bpf_reg_state *regs = cur_regs(env);
8258 	struct bpf_reg_state *reg = &regs[regno];
8259 	bool arg_mem_size = false;
8260 
8261 	if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
8262 		return KF_ARG_PTR_TO_CTX;
8263 
8264 	/* In this function, we verify the kfunc's BTF as per the argument type,
8265 	 * leaving the rest of the verification with respect to the register
8266 	 * type to our caller. When a set of conditions hold in the BTF type of
8267 	 * arguments, we resolve it to a known kfunc_ptr_arg_type.
8268 	 */
8269 	if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
8270 		return KF_ARG_PTR_TO_CTX;
8271 
8272 	if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
8273 		return KF_ARG_PTR_TO_ALLOC_BTF_ID;
8274 
8275 	if (is_kfunc_arg_kptr_get(meta, argno)) {
8276 		if (!btf_type_is_ptr(ref_t)) {
8277 			verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
8278 			return -EINVAL;
8279 		}
8280 		ref_t = btf_type_by_id(meta->btf, ref_t->type);
8281 		ref_tname = btf_name_by_offset(meta->btf, ref_t->name_off);
8282 		if (!btf_type_is_struct(ref_t)) {
8283 			verbose(env, "kernel function %s args#0 pointer type %s %s is not supported\n",
8284 				meta->func_name, btf_type_str(ref_t), ref_tname);
8285 			return -EINVAL;
8286 		}
8287 		return KF_ARG_PTR_TO_KPTR;
8288 	}
8289 
8290 	if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
8291 		return KF_ARG_PTR_TO_DYNPTR;
8292 
8293 	if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
8294 		return KF_ARG_PTR_TO_LIST_HEAD;
8295 
8296 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
8297 		return KF_ARG_PTR_TO_LIST_NODE;
8298 
8299 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
8300 		if (!btf_type_is_struct(ref_t)) {
8301 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
8302 				meta->func_name, argno, btf_type_str(ref_t), ref_tname);
8303 			return -EINVAL;
8304 		}
8305 		return KF_ARG_PTR_TO_BTF_ID;
8306 	}
8307 
8308 	if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))
8309 		arg_mem_size = true;
8310 
8311 	/* This is the catch all argument type of register types supported by
8312 	 * check_helper_mem_access. However, we only allow when argument type is
8313 	 * pointer to scalar, or struct composed (recursively) of scalars. When
8314 	 * arg_mem_size is true, the pointer can be void *.
8315 	 */
8316 	if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
8317 	    (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
8318 		verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
8319 			argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
8320 		return -EINVAL;
8321 	}
8322 	return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
8323 }
8324 
8325 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
8326 					struct bpf_reg_state *reg,
8327 					const struct btf_type *ref_t,
8328 					const char *ref_tname, u32 ref_id,
8329 					struct bpf_kfunc_call_arg_meta *meta,
8330 					int argno)
8331 {
8332 	const struct btf_type *reg_ref_t;
8333 	bool strict_type_match = false;
8334 	const struct btf *reg_btf;
8335 	const char *reg_ref_tname;
8336 	u32 reg_ref_id;
8337 
8338 	if (base_type(reg->type) == PTR_TO_BTF_ID) {
8339 		reg_btf = reg->btf;
8340 		reg_ref_id = reg->btf_id;
8341 	} else {
8342 		reg_btf = btf_vmlinux;
8343 		reg_ref_id = *reg2btf_ids[base_type(reg->type)];
8344 	}
8345 
8346 	if (is_kfunc_trusted_args(meta) || (is_kfunc_release(meta) && reg->ref_obj_id))
8347 		strict_type_match = true;
8348 
8349 	reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
8350 	reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
8351 	if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
8352 		verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
8353 			meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
8354 			btf_type_str(reg_ref_t), reg_ref_tname);
8355 		return -EINVAL;
8356 	}
8357 	return 0;
8358 }
8359 
8360 static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
8361 				      struct bpf_reg_state *reg,
8362 				      const struct btf_type *ref_t,
8363 				      const char *ref_tname,
8364 				      struct bpf_kfunc_call_arg_meta *meta,
8365 				      int argno)
8366 {
8367 	struct btf_field *kptr_field;
8368 
8369 	/* check_func_arg_reg_off allows var_off for
8370 	 * PTR_TO_MAP_VALUE, but we need fixed offset to find
8371 	 * off_desc.
8372 	 */
8373 	if (!tnum_is_const(reg->var_off)) {
8374 		verbose(env, "arg#0 must have constant offset\n");
8375 		return -EINVAL;
8376 	}
8377 
8378 	kptr_field = btf_record_find(reg->map_ptr->record, reg->off + reg->var_off.value, BPF_KPTR);
8379 	if (!kptr_field || kptr_field->type != BPF_KPTR_REF) {
8380 		verbose(env, "arg#0 no referenced kptr at map value offset=%llu\n",
8381 			reg->off + reg->var_off.value);
8382 		return -EINVAL;
8383 	}
8384 
8385 	if (!btf_struct_ids_match(&env->log, meta->btf, ref_t->type, 0, kptr_field->kptr.btf,
8386 				  kptr_field->kptr.btf_id, true)) {
8387 		verbose(env, "kernel function %s args#%d expected pointer to %s %s\n",
8388 			meta->func_name, argno, btf_type_str(ref_t), ref_tname);
8389 		return -EINVAL;
8390 	}
8391 	return 0;
8392 }
8393 
8394 static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
8395 {
8396 	struct bpf_func_state *state = cur_func(env);
8397 	struct bpf_reg_state *reg;
8398 	int i;
8399 
8400 	/* bpf_spin_lock only allows calling list_push and list_pop, no BPF
8401 	 * subprogs, no global functions. This means that the references would
8402 	 * not be released inside the critical section but they may be added to
8403 	 * the reference state, and the acquired_refs are never copied out for a
8404 	 * different frame as BPF to BPF calls don't work in bpf_spin_lock
8405 	 * critical sections.
8406 	 */
8407 	if (!ref_obj_id) {
8408 		verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
8409 		return -EFAULT;
8410 	}
8411 	for (i = 0; i < state->acquired_refs; i++) {
8412 		if (state->refs[i].id == ref_obj_id) {
8413 			if (state->refs[i].release_on_unlock) {
8414 				verbose(env, "verifier internal error: expected false release_on_unlock");
8415 				return -EFAULT;
8416 			}
8417 			state->refs[i].release_on_unlock = true;
8418 			/* Now mark everyone sharing same ref_obj_id as untrusted */
8419 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
8420 				if (reg->ref_obj_id == ref_obj_id)
8421 					reg->type |= PTR_UNTRUSTED;
8422 			}));
8423 			return 0;
8424 		}
8425 	}
8426 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
8427 	return -EFAULT;
8428 }
8429 
8430 /* Implementation details:
8431  *
8432  * Each register points to some region of memory, which we define as an
8433  * allocation. Each allocation may embed a bpf_spin_lock which protects any
8434  * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
8435  * allocation. The lock and the data it protects are colocated in the same
8436  * memory region.
8437  *
8438  * Hence, everytime a register holds a pointer value pointing to such
8439  * allocation, the verifier preserves a unique reg->id for it.
8440  *
8441  * The verifier remembers the lock 'ptr' and the lock 'id' whenever
8442  * bpf_spin_lock is called.
8443  *
8444  * To enable this, lock state in the verifier captures two values:
8445  *	active_lock.ptr = Register's type specific pointer
8446  *	active_lock.id  = A unique ID for each register pointer value
8447  *
8448  * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
8449  * supported register types.
8450  *
8451  * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
8452  * allocated objects is the reg->btf pointer.
8453  *
8454  * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
8455  * can establish the provenance of the map value statically for each distinct
8456  * lookup into such maps. They always contain a single map value hence unique
8457  * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
8458  *
8459  * So, in case of global variables, they use array maps with max_entries = 1,
8460  * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
8461  * into the same map value as max_entries is 1, as described above).
8462  *
8463  * In case of inner map lookups, the inner map pointer has same map_ptr as the
8464  * outer map pointer (in verifier context), but each lookup into an inner map
8465  * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
8466  * maps from the same outer map share the same map_ptr as active_lock.ptr, they
8467  * will get different reg->id assigned to each lookup, hence different
8468  * active_lock.id.
8469  *
8470  * In case of allocated objects, active_lock.ptr is the reg->btf, and the
8471  * reg->id is a unique ID preserved after the NULL pointer check on the pointer
8472  * returned from bpf_obj_new. Each allocation receives a new reg->id.
8473  */
8474 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
8475 {
8476 	void *ptr;
8477 	u32 id;
8478 
8479 	switch ((int)reg->type) {
8480 	case PTR_TO_MAP_VALUE:
8481 		ptr = reg->map_ptr;
8482 		break;
8483 	case PTR_TO_BTF_ID | MEM_ALLOC:
8484 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
8485 		ptr = reg->btf;
8486 		break;
8487 	default:
8488 		verbose(env, "verifier internal error: unknown reg type for lock check\n");
8489 		return -EFAULT;
8490 	}
8491 	id = reg->id;
8492 
8493 	if (!env->cur_state->active_lock.ptr)
8494 		return -EINVAL;
8495 	if (env->cur_state->active_lock.ptr != ptr ||
8496 	    env->cur_state->active_lock.id != id) {
8497 		verbose(env, "held lock and object are not in the same allocation\n");
8498 		return -EINVAL;
8499 	}
8500 	return 0;
8501 }
8502 
8503 static bool is_bpf_list_api_kfunc(u32 btf_id)
8504 {
8505 	return btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
8506 	       btf_id == special_kfunc_list[KF_bpf_list_push_back] ||
8507 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
8508 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
8509 }
8510 
8511 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
8512 					   struct bpf_reg_state *reg, u32 regno,
8513 					   struct bpf_kfunc_call_arg_meta *meta)
8514 {
8515 	struct btf_field *field;
8516 	struct btf_record *rec;
8517 	u32 list_head_off;
8518 
8519 	if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
8520 		verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
8521 		return -EFAULT;
8522 	}
8523 
8524 	if (!tnum_is_const(reg->var_off)) {
8525 		verbose(env,
8526 			"R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
8527 			regno);
8528 		return -EINVAL;
8529 	}
8530 
8531 	rec = reg_btf_record(reg);
8532 	list_head_off = reg->off + reg->var_off.value;
8533 	field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
8534 	if (!field) {
8535 		verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
8536 		return -EINVAL;
8537 	}
8538 
8539 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
8540 	if (check_reg_allocation_locked(env, reg)) {
8541 		verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
8542 			rec->spin_lock_off);
8543 		return -EINVAL;
8544 	}
8545 
8546 	if (meta->arg_list_head.field) {
8547 		verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
8548 		return -EFAULT;
8549 	}
8550 	meta->arg_list_head.field = field;
8551 	return 0;
8552 }
8553 
8554 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
8555 					   struct bpf_reg_state *reg, u32 regno,
8556 					   struct bpf_kfunc_call_arg_meta *meta)
8557 {
8558 	const struct btf_type *et, *t;
8559 	struct btf_field *field;
8560 	struct btf_record *rec;
8561 	u32 list_node_off;
8562 
8563 	if (meta->btf != btf_vmlinux ||
8564 	    (meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
8565 	     meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
8566 		verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
8567 		return -EFAULT;
8568 	}
8569 
8570 	if (!tnum_is_const(reg->var_off)) {
8571 		verbose(env,
8572 			"R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
8573 			regno);
8574 		return -EINVAL;
8575 	}
8576 
8577 	rec = reg_btf_record(reg);
8578 	list_node_off = reg->off + reg->var_off.value;
8579 	field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
8580 	if (!field || field->offset != list_node_off) {
8581 		verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
8582 		return -EINVAL;
8583 	}
8584 
8585 	field = meta->arg_list_head.field;
8586 
8587 	et = btf_type_by_id(field->list_head.btf, field->list_head.value_btf_id);
8588 	t = btf_type_by_id(reg->btf, reg->btf_id);
8589 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->list_head.btf,
8590 				  field->list_head.value_btf_id, true)) {
8591 		verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
8592 			"in struct %s, but arg is at offset=%d in struct %s\n",
8593 			field->list_head.node_offset, btf_name_by_offset(field->list_head.btf, et->name_off),
8594 			list_node_off, btf_name_by_offset(reg->btf, t->name_off));
8595 		return -EINVAL;
8596 	}
8597 
8598 	if (list_node_off != field->list_head.node_offset) {
8599 		verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
8600 			list_node_off, field->list_head.node_offset,
8601 			btf_name_by_offset(field->list_head.btf, et->name_off));
8602 		return -EINVAL;
8603 	}
8604 	/* Set arg#1 for expiration after unlock */
8605 	return ref_set_release_on_unlock(env, reg->ref_obj_id);
8606 }
8607 
8608 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
8609 {
8610 	const char *func_name = meta->func_name, *ref_tname;
8611 	const struct btf *btf = meta->btf;
8612 	const struct btf_param *args;
8613 	u32 i, nargs;
8614 	int ret;
8615 
8616 	args = (const struct btf_param *)(meta->func_proto + 1);
8617 	nargs = btf_type_vlen(meta->func_proto);
8618 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
8619 		verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
8620 			MAX_BPF_FUNC_REG_ARGS);
8621 		return -EINVAL;
8622 	}
8623 
8624 	/* Check that BTF function arguments match actual types that the
8625 	 * verifier sees.
8626 	 */
8627 	for (i = 0; i < nargs; i++) {
8628 		struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
8629 		const struct btf_type *t, *ref_t, *resolve_ret;
8630 		enum bpf_arg_type arg_type = ARG_DONTCARE;
8631 		u32 regno = i + 1, ref_id, type_size;
8632 		bool is_ret_buf_sz = false;
8633 		int kf_arg_type;
8634 
8635 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
8636 
8637 		if (is_kfunc_arg_ignore(btf, &args[i]))
8638 			continue;
8639 
8640 		if (btf_type_is_scalar(t)) {
8641 			if (reg->type != SCALAR_VALUE) {
8642 				verbose(env, "R%d is not a scalar\n", regno);
8643 				return -EINVAL;
8644 			}
8645 
8646 			if (is_kfunc_arg_constant(meta->btf, &args[i])) {
8647 				if (meta->arg_constant.found) {
8648 					verbose(env, "verifier internal error: only one constant argument permitted\n");
8649 					return -EFAULT;
8650 				}
8651 				if (!tnum_is_const(reg->var_off)) {
8652 					verbose(env, "R%d must be a known constant\n", regno);
8653 					return -EINVAL;
8654 				}
8655 				ret = mark_chain_precision(env, regno);
8656 				if (ret < 0)
8657 					return ret;
8658 				meta->arg_constant.found = true;
8659 				meta->arg_constant.value = reg->var_off.value;
8660 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
8661 				meta->r0_rdonly = true;
8662 				is_ret_buf_sz = true;
8663 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
8664 				is_ret_buf_sz = true;
8665 			}
8666 
8667 			if (is_ret_buf_sz) {
8668 				if (meta->r0_size) {
8669 					verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
8670 					return -EINVAL;
8671 				}
8672 
8673 				if (!tnum_is_const(reg->var_off)) {
8674 					verbose(env, "R%d is not a const\n", regno);
8675 					return -EINVAL;
8676 				}
8677 
8678 				meta->r0_size = reg->var_off.value;
8679 				ret = mark_chain_precision(env, regno);
8680 				if (ret)
8681 					return ret;
8682 			}
8683 			continue;
8684 		}
8685 
8686 		if (!btf_type_is_ptr(t)) {
8687 			verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
8688 			return -EINVAL;
8689 		}
8690 
8691 		if (reg->ref_obj_id) {
8692 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
8693 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
8694 					regno, reg->ref_obj_id,
8695 					meta->ref_obj_id);
8696 				return -EFAULT;
8697 			}
8698 			meta->ref_obj_id = reg->ref_obj_id;
8699 			if (is_kfunc_release(meta))
8700 				meta->release_regno = regno;
8701 		}
8702 
8703 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
8704 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
8705 
8706 		kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
8707 		if (kf_arg_type < 0)
8708 			return kf_arg_type;
8709 
8710 		switch (kf_arg_type) {
8711 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
8712 		case KF_ARG_PTR_TO_BTF_ID:
8713 			if (!is_kfunc_trusted_args(meta))
8714 				break;
8715 
8716 			if (!is_trusted_reg(reg)) {
8717 				verbose(env, "R%d must be referenced or trusted\n", regno);
8718 				return -EINVAL;
8719 			}
8720 			fallthrough;
8721 		case KF_ARG_PTR_TO_CTX:
8722 			/* Trusted arguments have the same offset checks as release arguments */
8723 			arg_type |= OBJ_RELEASE;
8724 			break;
8725 		case KF_ARG_PTR_TO_KPTR:
8726 		case KF_ARG_PTR_TO_DYNPTR:
8727 		case KF_ARG_PTR_TO_LIST_HEAD:
8728 		case KF_ARG_PTR_TO_LIST_NODE:
8729 		case KF_ARG_PTR_TO_MEM:
8730 		case KF_ARG_PTR_TO_MEM_SIZE:
8731 			/* Trusted by default */
8732 			break;
8733 		default:
8734 			WARN_ON_ONCE(1);
8735 			return -EFAULT;
8736 		}
8737 
8738 		if (is_kfunc_release(meta) && reg->ref_obj_id)
8739 			arg_type |= OBJ_RELEASE;
8740 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
8741 		if (ret < 0)
8742 			return ret;
8743 
8744 		switch (kf_arg_type) {
8745 		case KF_ARG_PTR_TO_CTX:
8746 			if (reg->type != PTR_TO_CTX) {
8747 				verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
8748 				return -EINVAL;
8749 			}
8750 
8751 			if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
8752 				ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
8753 				if (ret < 0)
8754 					return -EINVAL;
8755 				meta->ret_btf_id  = ret;
8756 			}
8757 			break;
8758 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
8759 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
8760 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
8761 				return -EINVAL;
8762 			}
8763 			if (!reg->ref_obj_id) {
8764 				verbose(env, "allocated object must be referenced\n");
8765 				return -EINVAL;
8766 			}
8767 			if (meta->btf == btf_vmlinux &&
8768 			    meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
8769 				meta->arg_obj_drop.btf = reg->btf;
8770 				meta->arg_obj_drop.btf_id = reg->btf_id;
8771 			}
8772 			break;
8773 		case KF_ARG_PTR_TO_KPTR:
8774 			if (reg->type != PTR_TO_MAP_VALUE) {
8775 				verbose(env, "arg#0 expected pointer to map value\n");
8776 				return -EINVAL;
8777 			}
8778 			ret = process_kf_arg_ptr_to_kptr(env, reg, ref_t, ref_tname, meta, i);
8779 			if (ret < 0)
8780 				return ret;
8781 			break;
8782 		case KF_ARG_PTR_TO_DYNPTR:
8783 			if (reg->type != PTR_TO_STACK) {
8784 				verbose(env, "arg#%d expected pointer to stack\n", i);
8785 				return -EINVAL;
8786 			}
8787 
8788 			if (!is_dynptr_reg_valid_init(env, reg)) {
8789 				verbose(env, "arg#%d pointer type %s %s must be valid and initialized\n",
8790 					i, btf_type_str(ref_t), ref_tname);
8791 				return -EINVAL;
8792 			}
8793 
8794 			if (!is_dynptr_type_expected(env, reg, ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL)) {
8795 				verbose(env, "arg#%d pointer type %s %s points to unsupported dynamic pointer type\n",
8796 					i, btf_type_str(ref_t), ref_tname);
8797 				return -EINVAL;
8798 			}
8799 			break;
8800 		case KF_ARG_PTR_TO_LIST_HEAD:
8801 			if (reg->type != PTR_TO_MAP_VALUE &&
8802 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
8803 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
8804 				return -EINVAL;
8805 			}
8806 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
8807 				verbose(env, "allocated object must be referenced\n");
8808 				return -EINVAL;
8809 			}
8810 			ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
8811 			if (ret < 0)
8812 				return ret;
8813 			break;
8814 		case KF_ARG_PTR_TO_LIST_NODE:
8815 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
8816 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
8817 				return -EINVAL;
8818 			}
8819 			if (!reg->ref_obj_id) {
8820 				verbose(env, "allocated object must be referenced\n");
8821 				return -EINVAL;
8822 			}
8823 			ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
8824 			if (ret < 0)
8825 				return ret;
8826 			break;
8827 		case KF_ARG_PTR_TO_BTF_ID:
8828 			/* Only base_type is checked, further checks are done here */
8829 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
8830 			     bpf_type_has_unsafe_modifiers(reg->type)) &&
8831 			    !reg2btf_ids[base_type(reg->type)]) {
8832 				verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
8833 				verbose(env, "expected %s or socket\n",
8834 					reg_type_str(env, base_type(reg->type) |
8835 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
8836 				return -EINVAL;
8837 			}
8838 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
8839 			if (ret < 0)
8840 				return ret;
8841 			break;
8842 		case KF_ARG_PTR_TO_MEM:
8843 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
8844 			if (IS_ERR(resolve_ret)) {
8845 				verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
8846 					i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
8847 				return -EINVAL;
8848 			}
8849 			ret = check_mem_reg(env, reg, regno, type_size);
8850 			if (ret < 0)
8851 				return ret;
8852 			break;
8853 		case KF_ARG_PTR_TO_MEM_SIZE:
8854 			ret = check_kfunc_mem_size_reg(env, &regs[regno + 1], regno + 1);
8855 			if (ret < 0) {
8856 				verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
8857 				return ret;
8858 			}
8859 			/* Skip next '__sz' argument */
8860 			i++;
8861 			break;
8862 		}
8863 	}
8864 
8865 	if (is_kfunc_release(meta) && !meta->release_regno) {
8866 		verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
8867 			func_name);
8868 		return -EINVAL;
8869 	}
8870 
8871 	return 0;
8872 }
8873 
8874 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8875 			    int *insn_idx_p)
8876 {
8877 	const struct btf_type *t, *func, *func_proto, *ptr_type;
8878 	struct bpf_reg_state *regs = cur_regs(env);
8879 	const char *func_name, *ptr_type_name;
8880 	bool sleepable, rcu_lock, rcu_unlock;
8881 	struct bpf_kfunc_call_arg_meta meta;
8882 	u32 i, nargs, func_id, ptr_type_id;
8883 	int err, insn_idx = *insn_idx_p;
8884 	const struct btf_param *args;
8885 	const struct btf_type *ret_t;
8886 	struct btf *desc_btf;
8887 	u32 *kfunc_flags;
8888 
8889 	/* skip for now, but return error when we find this in fixup_kfunc_call */
8890 	if (!insn->imm)
8891 		return 0;
8892 
8893 	desc_btf = find_kfunc_desc_btf(env, insn->off);
8894 	if (IS_ERR(desc_btf))
8895 		return PTR_ERR(desc_btf);
8896 
8897 	func_id = insn->imm;
8898 	func = btf_type_by_id(desc_btf, func_id);
8899 	func_name = btf_name_by_offset(desc_btf, func->name_off);
8900 	func_proto = btf_type_by_id(desc_btf, func->type);
8901 
8902 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
8903 	if (!kfunc_flags) {
8904 		verbose(env, "calling kernel function %s is not allowed\n",
8905 			func_name);
8906 		return -EACCES;
8907 	}
8908 
8909 	/* Prepare kfunc call metadata */
8910 	memset(&meta, 0, sizeof(meta));
8911 	meta.btf = desc_btf;
8912 	meta.func_id = func_id;
8913 	meta.kfunc_flags = *kfunc_flags;
8914 	meta.func_proto = func_proto;
8915 	meta.func_name = func_name;
8916 
8917 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
8918 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
8919 		return -EACCES;
8920 	}
8921 
8922 	sleepable = is_kfunc_sleepable(&meta);
8923 	if (sleepable && !env->prog->aux->sleepable) {
8924 		verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
8925 		return -EACCES;
8926 	}
8927 
8928 	rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
8929 	rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
8930 	if ((rcu_lock || rcu_unlock) && !env->rcu_tag_supported) {
8931 		verbose(env, "no vmlinux btf rcu tag support for kfunc %s\n", func_name);
8932 		return -EACCES;
8933 	}
8934 
8935 	if (env->cur_state->active_rcu_lock) {
8936 		struct bpf_func_state *state;
8937 		struct bpf_reg_state *reg;
8938 
8939 		if (rcu_lock) {
8940 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
8941 			return -EINVAL;
8942 		} else if (rcu_unlock) {
8943 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
8944 				if (reg->type & MEM_RCU) {
8945 					reg->type &= ~(MEM_RCU | PTR_TRUSTED);
8946 					reg->type |= PTR_UNTRUSTED;
8947 				}
8948 			}));
8949 			env->cur_state->active_rcu_lock = false;
8950 		} else if (sleepable) {
8951 			verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
8952 			return -EACCES;
8953 		}
8954 	} else if (rcu_lock) {
8955 		env->cur_state->active_rcu_lock = true;
8956 	} else if (rcu_unlock) {
8957 		verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
8958 		return -EINVAL;
8959 	}
8960 
8961 	/* Check the arguments */
8962 	err = check_kfunc_args(env, &meta);
8963 	if (err < 0)
8964 		return err;
8965 	/* In case of release function, we get register number of refcounted
8966 	 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
8967 	 */
8968 	if (meta.release_regno) {
8969 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
8970 		if (err) {
8971 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
8972 				func_name, func_id);
8973 			return err;
8974 		}
8975 	}
8976 
8977 	for (i = 0; i < CALLER_SAVED_REGS; i++)
8978 		mark_reg_not_init(env, regs, caller_saved[i]);
8979 
8980 	/* Check return type */
8981 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
8982 
8983 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
8984 		/* Only exception is bpf_obj_new_impl */
8985 		if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) {
8986 			verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
8987 			return -EINVAL;
8988 		}
8989 	}
8990 
8991 	if (btf_type_is_scalar(t)) {
8992 		mark_reg_unknown(env, regs, BPF_REG_0);
8993 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
8994 	} else if (btf_type_is_ptr(t)) {
8995 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
8996 
8997 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
8998 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
8999 				struct btf *ret_btf;
9000 				u32 ret_btf_id;
9001 
9002 				if (unlikely(!bpf_global_ma_set))
9003 					return -ENOMEM;
9004 
9005 				if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
9006 					verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
9007 					return -EINVAL;
9008 				}
9009 
9010 				ret_btf = env->prog->aux->btf;
9011 				ret_btf_id = meta.arg_constant.value;
9012 
9013 				/* This may be NULL due to user not supplying a BTF */
9014 				if (!ret_btf) {
9015 					verbose(env, "bpf_obj_new requires prog BTF\n");
9016 					return -EINVAL;
9017 				}
9018 
9019 				ret_t = btf_type_by_id(ret_btf, ret_btf_id);
9020 				if (!ret_t || !__btf_type_is_struct(ret_t)) {
9021 					verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
9022 					return -EINVAL;
9023 				}
9024 
9025 				mark_reg_known_zero(env, regs, BPF_REG_0);
9026 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
9027 				regs[BPF_REG_0].btf = ret_btf;
9028 				regs[BPF_REG_0].btf_id = ret_btf_id;
9029 
9030 				env->insn_aux_data[insn_idx].obj_new_size = ret_t->size;
9031 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9032 					btf_find_struct_meta(ret_btf, ret_btf_id);
9033 			} else if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
9034 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9035 					btf_find_struct_meta(meta.arg_obj_drop.btf,
9036 							     meta.arg_obj_drop.btf_id);
9037 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9038 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
9039 				struct btf_field *field = meta.arg_list_head.field;
9040 
9041 				mark_reg_known_zero(env, regs, BPF_REG_0);
9042 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
9043 				regs[BPF_REG_0].btf = field->list_head.btf;
9044 				regs[BPF_REG_0].btf_id = field->list_head.value_btf_id;
9045 				regs[BPF_REG_0].off = field->list_head.node_offset;
9046 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
9047 				mark_reg_known_zero(env, regs, BPF_REG_0);
9048 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
9049 				regs[BPF_REG_0].btf = desc_btf;
9050 				regs[BPF_REG_0].btf_id = meta.ret_btf_id;
9051 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
9052 				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
9053 				if (!ret_t || !btf_type_is_struct(ret_t)) {
9054 					verbose(env,
9055 						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
9056 					return -EINVAL;
9057 				}
9058 
9059 				mark_reg_known_zero(env, regs, BPF_REG_0);
9060 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
9061 				regs[BPF_REG_0].btf = desc_btf;
9062 				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
9063 			} else {
9064 				verbose(env, "kernel function %s unhandled dynamic return type\n",
9065 					meta.func_name);
9066 				return -EFAULT;
9067 			}
9068 		} else if (!__btf_type_is_struct(ptr_type)) {
9069 			if (!meta.r0_size) {
9070 				ptr_type_name = btf_name_by_offset(desc_btf,
9071 								   ptr_type->name_off);
9072 				verbose(env,
9073 					"kernel function %s returns pointer type %s %s is not supported\n",
9074 					func_name,
9075 					btf_type_str(ptr_type),
9076 					ptr_type_name);
9077 				return -EINVAL;
9078 			}
9079 
9080 			mark_reg_known_zero(env, regs, BPF_REG_0);
9081 			regs[BPF_REG_0].type = PTR_TO_MEM;
9082 			regs[BPF_REG_0].mem_size = meta.r0_size;
9083 
9084 			if (meta.r0_rdonly)
9085 				regs[BPF_REG_0].type |= MEM_RDONLY;
9086 
9087 			/* Ensures we don't access the memory after a release_reference() */
9088 			if (meta.ref_obj_id)
9089 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
9090 		} else {
9091 			mark_reg_known_zero(env, regs, BPF_REG_0);
9092 			regs[BPF_REG_0].btf = desc_btf;
9093 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
9094 			regs[BPF_REG_0].btf_id = ptr_type_id;
9095 		}
9096 
9097 		if (is_kfunc_ret_null(&meta)) {
9098 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
9099 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
9100 			regs[BPF_REG_0].id = ++env->id_gen;
9101 		}
9102 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
9103 		if (is_kfunc_acquire(&meta)) {
9104 			int id = acquire_reference_state(env, insn_idx);
9105 
9106 			if (id < 0)
9107 				return id;
9108 			if (is_kfunc_ret_null(&meta))
9109 				regs[BPF_REG_0].id = id;
9110 			regs[BPF_REG_0].ref_obj_id = id;
9111 		}
9112 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
9113 			regs[BPF_REG_0].id = ++env->id_gen;
9114 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
9115 
9116 	nargs = btf_type_vlen(func_proto);
9117 	args = (const struct btf_param *)(func_proto + 1);
9118 	for (i = 0; i < nargs; i++) {
9119 		u32 regno = i + 1;
9120 
9121 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
9122 		if (btf_type_is_ptr(t))
9123 			mark_btf_func_reg_size(env, regno, sizeof(void *));
9124 		else
9125 			/* scalar. ensured by btf_check_kfunc_arg_match() */
9126 			mark_btf_func_reg_size(env, regno, t->size);
9127 	}
9128 
9129 	return 0;
9130 }
9131 
9132 static bool signed_add_overflows(s64 a, s64 b)
9133 {
9134 	/* Do the add in u64, where overflow is well-defined */
9135 	s64 res = (s64)((u64)a + (u64)b);
9136 
9137 	if (b < 0)
9138 		return res > a;
9139 	return res < a;
9140 }
9141 
9142 static bool signed_add32_overflows(s32 a, s32 b)
9143 {
9144 	/* Do the add in u32, where overflow is well-defined */
9145 	s32 res = (s32)((u32)a + (u32)b);
9146 
9147 	if (b < 0)
9148 		return res > a;
9149 	return res < a;
9150 }
9151 
9152 static bool signed_sub_overflows(s64 a, s64 b)
9153 {
9154 	/* Do the sub in u64, where overflow is well-defined */
9155 	s64 res = (s64)((u64)a - (u64)b);
9156 
9157 	if (b < 0)
9158 		return res < a;
9159 	return res > a;
9160 }
9161 
9162 static bool signed_sub32_overflows(s32 a, s32 b)
9163 {
9164 	/* Do the sub in u32, where overflow is well-defined */
9165 	s32 res = (s32)((u32)a - (u32)b);
9166 
9167 	if (b < 0)
9168 		return res < a;
9169 	return res > a;
9170 }
9171 
9172 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
9173 				  const struct bpf_reg_state *reg,
9174 				  enum bpf_reg_type type)
9175 {
9176 	bool known = tnum_is_const(reg->var_off);
9177 	s64 val = reg->var_off.value;
9178 	s64 smin = reg->smin_value;
9179 
9180 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
9181 		verbose(env, "math between %s pointer and %lld is not allowed\n",
9182 			reg_type_str(env, type), val);
9183 		return false;
9184 	}
9185 
9186 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
9187 		verbose(env, "%s pointer offset %d is not allowed\n",
9188 			reg_type_str(env, type), reg->off);
9189 		return false;
9190 	}
9191 
9192 	if (smin == S64_MIN) {
9193 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
9194 			reg_type_str(env, type));
9195 		return false;
9196 	}
9197 
9198 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
9199 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
9200 			smin, reg_type_str(env, type));
9201 		return false;
9202 	}
9203 
9204 	return true;
9205 }
9206 
9207 enum {
9208 	REASON_BOUNDS	= -1,
9209 	REASON_TYPE	= -2,
9210 	REASON_PATHS	= -3,
9211 	REASON_LIMIT	= -4,
9212 	REASON_STACK	= -5,
9213 };
9214 
9215 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
9216 			      u32 *alu_limit, bool mask_to_left)
9217 {
9218 	u32 max = 0, ptr_limit = 0;
9219 
9220 	switch (ptr_reg->type) {
9221 	case PTR_TO_STACK:
9222 		/* Offset 0 is out-of-bounds, but acceptable start for the
9223 		 * left direction, see BPF_REG_FP. Also, unknown scalar
9224 		 * offset where we would need to deal with min/max bounds is
9225 		 * currently prohibited for unprivileged.
9226 		 */
9227 		max = MAX_BPF_STACK + mask_to_left;
9228 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
9229 		break;
9230 	case PTR_TO_MAP_VALUE:
9231 		max = ptr_reg->map_ptr->value_size;
9232 		ptr_limit = (mask_to_left ?
9233 			     ptr_reg->smin_value :
9234 			     ptr_reg->umax_value) + ptr_reg->off;
9235 		break;
9236 	default:
9237 		return REASON_TYPE;
9238 	}
9239 
9240 	if (ptr_limit >= max)
9241 		return REASON_LIMIT;
9242 	*alu_limit = ptr_limit;
9243 	return 0;
9244 }
9245 
9246 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
9247 				    const struct bpf_insn *insn)
9248 {
9249 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
9250 }
9251 
9252 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
9253 				       u32 alu_state, u32 alu_limit)
9254 {
9255 	/* If we arrived here from different branches with different
9256 	 * state or limits to sanitize, then this won't work.
9257 	 */
9258 	if (aux->alu_state &&
9259 	    (aux->alu_state != alu_state ||
9260 	     aux->alu_limit != alu_limit))
9261 		return REASON_PATHS;
9262 
9263 	/* Corresponding fixup done in do_misc_fixups(). */
9264 	aux->alu_state = alu_state;
9265 	aux->alu_limit = alu_limit;
9266 	return 0;
9267 }
9268 
9269 static int sanitize_val_alu(struct bpf_verifier_env *env,
9270 			    struct bpf_insn *insn)
9271 {
9272 	struct bpf_insn_aux_data *aux = cur_aux(env);
9273 
9274 	if (can_skip_alu_sanitation(env, insn))
9275 		return 0;
9276 
9277 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
9278 }
9279 
9280 static bool sanitize_needed(u8 opcode)
9281 {
9282 	return opcode == BPF_ADD || opcode == BPF_SUB;
9283 }
9284 
9285 struct bpf_sanitize_info {
9286 	struct bpf_insn_aux_data aux;
9287 	bool mask_to_left;
9288 };
9289 
9290 static struct bpf_verifier_state *
9291 sanitize_speculative_path(struct bpf_verifier_env *env,
9292 			  const struct bpf_insn *insn,
9293 			  u32 next_idx, u32 curr_idx)
9294 {
9295 	struct bpf_verifier_state *branch;
9296 	struct bpf_reg_state *regs;
9297 
9298 	branch = push_stack(env, next_idx, curr_idx, true);
9299 	if (branch && insn) {
9300 		regs = branch->frame[branch->curframe]->regs;
9301 		if (BPF_SRC(insn->code) == BPF_K) {
9302 			mark_reg_unknown(env, regs, insn->dst_reg);
9303 		} else if (BPF_SRC(insn->code) == BPF_X) {
9304 			mark_reg_unknown(env, regs, insn->dst_reg);
9305 			mark_reg_unknown(env, regs, insn->src_reg);
9306 		}
9307 	}
9308 	return branch;
9309 }
9310 
9311 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
9312 			    struct bpf_insn *insn,
9313 			    const struct bpf_reg_state *ptr_reg,
9314 			    const struct bpf_reg_state *off_reg,
9315 			    struct bpf_reg_state *dst_reg,
9316 			    struct bpf_sanitize_info *info,
9317 			    const bool commit_window)
9318 {
9319 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
9320 	struct bpf_verifier_state *vstate = env->cur_state;
9321 	bool off_is_imm = tnum_is_const(off_reg->var_off);
9322 	bool off_is_neg = off_reg->smin_value < 0;
9323 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
9324 	u8 opcode = BPF_OP(insn->code);
9325 	u32 alu_state, alu_limit;
9326 	struct bpf_reg_state tmp;
9327 	bool ret;
9328 	int err;
9329 
9330 	if (can_skip_alu_sanitation(env, insn))
9331 		return 0;
9332 
9333 	/* We already marked aux for masking from non-speculative
9334 	 * paths, thus we got here in the first place. We only care
9335 	 * to explore bad access from here.
9336 	 */
9337 	if (vstate->speculative)
9338 		goto do_sim;
9339 
9340 	if (!commit_window) {
9341 		if (!tnum_is_const(off_reg->var_off) &&
9342 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
9343 			return REASON_BOUNDS;
9344 
9345 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
9346 				     (opcode == BPF_SUB && !off_is_neg);
9347 	}
9348 
9349 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
9350 	if (err < 0)
9351 		return err;
9352 
9353 	if (commit_window) {
9354 		/* In commit phase we narrow the masking window based on
9355 		 * the observed pointer move after the simulated operation.
9356 		 */
9357 		alu_state = info->aux.alu_state;
9358 		alu_limit = abs(info->aux.alu_limit - alu_limit);
9359 	} else {
9360 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
9361 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
9362 		alu_state |= ptr_is_dst_reg ?
9363 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
9364 
9365 		/* Limit pruning on unknown scalars to enable deep search for
9366 		 * potential masking differences from other program paths.
9367 		 */
9368 		if (!off_is_imm)
9369 			env->explore_alu_limits = true;
9370 	}
9371 
9372 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
9373 	if (err < 0)
9374 		return err;
9375 do_sim:
9376 	/* If we're in commit phase, we're done here given we already
9377 	 * pushed the truncated dst_reg into the speculative verification
9378 	 * stack.
9379 	 *
9380 	 * Also, when register is a known constant, we rewrite register-based
9381 	 * operation to immediate-based, and thus do not need masking (and as
9382 	 * a consequence, do not need to simulate the zero-truncation either).
9383 	 */
9384 	if (commit_window || off_is_imm)
9385 		return 0;
9386 
9387 	/* Simulate and find potential out-of-bounds access under
9388 	 * speculative execution from truncation as a result of
9389 	 * masking when off was not within expected range. If off
9390 	 * sits in dst, then we temporarily need to move ptr there
9391 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
9392 	 * for cases where we use K-based arithmetic in one direction
9393 	 * and truncated reg-based in the other in order to explore
9394 	 * bad access.
9395 	 */
9396 	if (!ptr_is_dst_reg) {
9397 		tmp = *dst_reg;
9398 		*dst_reg = *ptr_reg;
9399 	}
9400 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
9401 					env->insn_idx);
9402 	if (!ptr_is_dst_reg && ret)
9403 		*dst_reg = tmp;
9404 	return !ret ? REASON_STACK : 0;
9405 }
9406 
9407 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
9408 {
9409 	struct bpf_verifier_state *vstate = env->cur_state;
9410 
9411 	/* If we simulate paths under speculation, we don't update the
9412 	 * insn as 'seen' such that when we verify unreachable paths in
9413 	 * the non-speculative domain, sanitize_dead_code() can still
9414 	 * rewrite/sanitize them.
9415 	 */
9416 	if (!vstate->speculative)
9417 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
9418 }
9419 
9420 static int sanitize_err(struct bpf_verifier_env *env,
9421 			const struct bpf_insn *insn, int reason,
9422 			const struct bpf_reg_state *off_reg,
9423 			const struct bpf_reg_state *dst_reg)
9424 {
9425 	static const char *err = "pointer arithmetic with it prohibited for !root";
9426 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
9427 	u32 dst = insn->dst_reg, src = insn->src_reg;
9428 
9429 	switch (reason) {
9430 	case REASON_BOUNDS:
9431 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
9432 			off_reg == dst_reg ? dst : src, err);
9433 		break;
9434 	case REASON_TYPE:
9435 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
9436 			off_reg == dst_reg ? src : dst, err);
9437 		break;
9438 	case REASON_PATHS:
9439 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
9440 			dst, op, err);
9441 		break;
9442 	case REASON_LIMIT:
9443 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
9444 			dst, op, err);
9445 		break;
9446 	case REASON_STACK:
9447 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
9448 			dst, err);
9449 		break;
9450 	default:
9451 		verbose(env, "verifier internal error: unknown reason (%d)\n",
9452 			reason);
9453 		break;
9454 	}
9455 
9456 	return -EACCES;
9457 }
9458 
9459 /* check that stack access falls within stack limits and that 'reg' doesn't
9460  * have a variable offset.
9461  *
9462  * Variable offset is prohibited for unprivileged mode for simplicity since it
9463  * requires corresponding support in Spectre masking for stack ALU.  See also
9464  * retrieve_ptr_limit().
9465  *
9466  *
9467  * 'off' includes 'reg->off'.
9468  */
9469 static int check_stack_access_for_ptr_arithmetic(
9470 				struct bpf_verifier_env *env,
9471 				int regno,
9472 				const struct bpf_reg_state *reg,
9473 				int off)
9474 {
9475 	if (!tnum_is_const(reg->var_off)) {
9476 		char tn_buf[48];
9477 
9478 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
9479 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
9480 			regno, tn_buf, off);
9481 		return -EACCES;
9482 	}
9483 
9484 	if (off >= 0 || off < -MAX_BPF_STACK) {
9485 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
9486 			"prohibited for !root; off=%d\n", regno, off);
9487 		return -EACCES;
9488 	}
9489 
9490 	return 0;
9491 }
9492 
9493 static int sanitize_check_bounds(struct bpf_verifier_env *env,
9494 				 const struct bpf_insn *insn,
9495 				 const struct bpf_reg_state *dst_reg)
9496 {
9497 	u32 dst = insn->dst_reg;
9498 
9499 	/* For unprivileged we require that resulting offset must be in bounds
9500 	 * in order to be able to sanitize access later on.
9501 	 */
9502 	if (env->bypass_spec_v1)
9503 		return 0;
9504 
9505 	switch (dst_reg->type) {
9506 	case PTR_TO_STACK:
9507 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
9508 					dst_reg->off + dst_reg->var_off.value))
9509 			return -EACCES;
9510 		break;
9511 	case PTR_TO_MAP_VALUE:
9512 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
9513 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
9514 				"prohibited for !root\n", dst);
9515 			return -EACCES;
9516 		}
9517 		break;
9518 	default:
9519 		break;
9520 	}
9521 
9522 	return 0;
9523 }
9524 
9525 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
9526  * Caller should also handle BPF_MOV case separately.
9527  * If we return -EACCES, caller may want to try again treating pointer as a
9528  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
9529  */
9530 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
9531 				   struct bpf_insn *insn,
9532 				   const struct bpf_reg_state *ptr_reg,
9533 				   const struct bpf_reg_state *off_reg)
9534 {
9535 	struct bpf_verifier_state *vstate = env->cur_state;
9536 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9537 	struct bpf_reg_state *regs = state->regs, *dst_reg;
9538 	bool known = tnum_is_const(off_reg->var_off);
9539 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
9540 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
9541 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
9542 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9543 	struct bpf_sanitize_info info = {};
9544 	u8 opcode = BPF_OP(insn->code);
9545 	u32 dst = insn->dst_reg;
9546 	int ret;
9547 
9548 	dst_reg = &regs[dst];
9549 
9550 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
9551 	    smin_val > smax_val || umin_val > umax_val) {
9552 		/* Taint dst register if offset had invalid bounds derived from
9553 		 * e.g. dead branches.
9554 		 */
9555 		__mark_reg_unknown(env, dst_reg);
9556 		return 0;
9557 	}
9558 
9559 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
9560 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
9561 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
9562 			__mark_reg_unknown(env, dst_reg);
9563 			return 0;
9564 		}
9565 
9566 		verbose(env,
9567 			"R%d 32-bit pointer arithmetic prohibited\n",
9568 			dst);
9569 		return -EACCES;
9570 	}
9571 
9572 	if (ptr_reg->type & PTR_MAYBE_NULL) {
9573 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
9574 			dst, reg_type_str(env, ptr_reg->type));
9575 		return -EACCES;
9576 	}
9577 
9578 	switch (base_type(ptr_reg->type)) {
9579 	case CONST_PTR_TO_MAP:
9580 		/* smin_val represents the known value */
9581 		if (known && smin_val == 0 && opcode == BPF_ADD)
9582 			break;
9583 		fallthrough;
9584 	case PTR_TO_PACKET_END:
9585 	case PTR_TO_SOCKET:
9586 	case PTR_TO_SOCK_COMMON:
9587 	case PTR_TO_TCP_SOCK:
9588 	case PTR_TO_XDP_SOCK:
9589 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
9590 			dst, reg_type_str(env, ptr_reg->type));
9591 		return -EACCES;
9592 	default:
9593 		break;
9594 	}
9595 
9596 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
9597 	 * The id may be overwritten later if we create a new variable offset.
9598 	 */
9599 	dst_reg->type = ptr_reg->type;
9600 	dst_reg->id = ptr_reg->id;
9601 
9602 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
9603 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
9604 		return -EINVAL;
9605 
9606 	/* pointer types do not carry 32-bit bounds at the moment. */
9607 	__mark_reg32_unbounded(dst_reg);
9608 
9609 	if (sanitize_needed(opcode)) {
9610 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
9611 				       &info, false);
9612 		if (ret < 0)
9613 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
9614 	}
9615 
9616 	switch (opcode) {
9617 	case BPF_ADD:
9618 		/* We can take a fixed offset as long as it doesn't overflow
9619 		 * the s32 'off' field
9620 		 */
9621 		if (known && (ptr_reg->off + smin_val ==
9622 			      (s64)(s32)(ptr_reg->off + smin_val))) {
9623 			/* pointer += K.  Accumulate it into fixed offset */
9624 			dst_reg->smin_value = smin_ptr;
9625 			dst_reg->smax_value = smax_ptr;
9626 			dst_reg->umin_value = umin_ptr;
9627 			dst_reg->umax_value = umax_ptr;
9628 			dst_reg->var_off = ptr_reg->var_off;
9629 			dst_reg->off = ptr_reg->off + smin_val;
9630 			dst_reg->raw = ptr_reg->raw;
9631 			break;
9632 		}
9633 		/* A new variable offset is created.  Note that off_reg->off
9634 		 * == 0, since it's a scalar.
9635 		 * dst_reg gets the pointer type and since some positive
9636 		 * integer value was added to the pointer, give it a new 'id'
9637 		 * if it's a PTR_TO_PACKET.
9638 		 * this creates a new 'base' pointer, off_reg (variable) gets
9639 		 * added into the variable offset, and we copy the fixed offset
9640 		 * from ptr_reg.
9641 		 */
9642 		if (signed_add_overflows(smin_ptr, smin_val) ||
9643 		    signed_add_overflows(smax_ptr, smax_val)) {
9644 			dst_reg->smin_value = S64_MIN;
9645 			dst_reg->smax_value = S64_MAX;
9646 		} else {
9647 			dst_reg->smin_value = smin_ptr + smin_val;
9648 			dst_reg->smax_value = smax_ptr + smax_val;
9649 		}
9650 		if (umin_ptr + umin_val < umin_ptr ||
9651 		    umax_ptr + umax_val < umax_ptr) {
9652 			dst_reg->umin_value = 0;
9653 			dst_reg->umax_value = U64_MAX;
9654 		} else {
9655 			dst_reg->umin_value = umin_ptr + umin_val;
9656 			dst_reg->umax_value = umax_ptr + umax_val;
9657 		}
9658 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
9659 		dst_reg->off = ptr_reg->off;
9660 		dst_reg->raw = ptr_reg->raw;
9661 		if (reg_is_pkt_pointer(ptr_reg)) {
9662 			dst_reg->id = ++env->id_gen;
9663 			/* something was added to pkt_ptr, set range to zero */
9664 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
9665 		}
9666 		break;
9667 	case BPF_SUB:
9668 		if (dst_reg == off_reg) {
9669 			/* scalar -= pointer.  Creates an unknown scalar */
9670 			verbose(env, "R%d tried to subtract pointer from scalar\n",
9671 				dst);
9672 			return -EACCES;
9673 		}
9674 		/* We don't allow subtraction from FP, because (according to
9675 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
9676 		 * be able to deal with it.
9677 		 */
9678 		if (ptr_reg->type == PTR_TO_STACK) {
9679 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
9680 				dst);
9681 			return -EACCES;
9682 		}
9683 		if (known && (ptr_reg->off - smin_val ==
9684 			      (s64)(s32)(ptr_reg->off - smin_val))) {
9685 			/* pointer -= K.  Subtract it from fixed offset */
9686 			dst_reg->smin_value = smin_ptr;
9687 			dst_reg->smax_value = smax_ptr;
9688 			dst_reg->umin_value = umin_ptr;
9689 			dst_reg->umax_value = umax_ptr;
9690 			dst_reg->var_off = ptr_reg->var_off;
9691 			dst_reg->id = ptr_reg->id;
9692 			dst_reg->off = ptr_reg->off - smin_val;
9693 			dst_reg->raw = ptr_reg->raw;
9694 			break;
9695 		}
9696 		/* A new variable offset is created.  If the subtrahend is known
9697 		 * nonnegative, then any reg->range we had before is still good.
9698 		 */
9699 		if (signed_sub_overflows(smin_ptr, smax_val) ||
9700 		    signed_sub_overflows(smax_ptr, smin_val)) {
9701 			/* Overflow possible, we know nothing */
9702 			dst_reg->smin_value = S64_MIN;
9703 			dst_reg->smax_value = S64_MAX;
9704 		} else {
9705 			dst_reg->smin_value = smin_ptr - smax_val;
9706 			dst_reg->smax_value = smax_ptr - smin_val;
9707 		}
9708 		if (umin_ptr < umax_val) {
9709 			/* Overflow possible, we know nothing */
9710 			dst_reg->umin_value = 0;
9711 			dst_reg->umax_value = U64_MAX;
9712 		} else {
9713 			/* Cannot overflow (as long as bounds are consistent) */
9714 			dst_reg->umin_value = umin_ptr - umax_val;
9715 			dst_reg->umax_value = umax_ptr - umin_val;
9716 		}
9717 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
9718 		dst_reg->off = ptr_reg->off;
9719 		dst_reg->raw = ptr_reg->raw;
9720 		if (reg_is_pkt_pointer(ptr_reg)) {
9721 			dst_reg->id = ++env->id_gen;
9722 			/* something was added to pkt_ptr, set range to zero */
9723 			if (smin_val < 0)
9724 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
9725 		}
9726 		break;
9727 	case BPF_AND:
9728 	case BPF_OR:
9729 	case BPF_XOR:
9730 		/* bitwise ops on pointers are troublesome, prohibit. */
9731 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
9732 			dst, bpf_alu_string[opcode >> 4]);
9733 		return -EACCES;
9734 	default:
9735 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
9736 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
9737 			dst, bpf_alu_string[opcode >> 4]);
9738 		return -EACCES;
9739 	}
9740 
9741 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
9742 		return -EINVAL;
9743 	reg_bounds_sync(dst_reg);
9744 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
9745 		return -EACCES;
9746 	if (sanitize_needed(opcode)) {
9747 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
9748 				       &info, true);
9749 		if (ret < 0)
9750 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
9751 	}
9752 
9753 	return 0;
9754 }
9755 
9756 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
9757 				 struct bpf_reg_state *src_reg)
9758 {
9759 	s32 smin_val = src_reg->s32_min_value;
9760 	s32 smax_val = src_reg->s32_max_value;
9761 	u32 umin_val = src_reg->u32_min_value;
9762 	u32 umax_val = src_reg->u32_max_value;
9763 
9764 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
9765 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
9766 		dst_reg->s32_min_value = S32_MIN;
9767 		dst_reg->s32_max_value = S32_MAX;
9768 	} else {
9769 		dst_reg->s32_min_value += smin_val;
9770 		dst_reg->s32_max_value += smax_val;
9771 	}
9772 	if (dst_reg->u32_min_value + umin_val < umin_val ||
9773 	    dst_reg->u32_max_value + umax_val < umax_val) {
9774 		dst_reg->u32_min_value = 0;
9775 		dst_reg->u32_max_value = U32_MAX;
9776 	} else {
9777 		dst_reg->u32_min_value += umin_val;
9778 		dst_reg->u32_max_value += umax_val;
9779 	}
9780 }
9781 
9782 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
9783 			       struct bpf_reg_state *src_reg)
9784 {
9785 	s64 smin_val = src_reg->smin_value;
9786 	s64 smax_val = src_reg->smax_value;
9787 	u64 umin_val = src_reg->umin_value;
9788 	u64 umax_val = src_reg->umax_value;
9789 
9790 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
9791 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
9792 		dst_reg->smin_value = S64_MIN;
9793 		dst_reg->smax_value = S64_MAX;
9794 	} else {
9795 		dst_reg->smin_value += smin_val;
9796 		dst_reg->smax_value += smax_val;
9797 	}
9798 	if (dst_reg->umin_value + umin_val < umin_val ||
9799 	    dst_reg->umax_value + umax_val < umax_val) {
9800 		dst_reg->umin_value = 0;
9801 		dst_reg->umax_value = U64_MAX;
9802 	} else {
9803 		dst_reg->umin_value += umin_val;
9804 		dst_reg->umax_value += umax_val;
9805 	}
9806 }
9807 
9808 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
9809 				 struct bpf_reg_state *src_reg)
9810 {
9811 	s32 smin_val = src_reg->s32_min_value;
9812 	s32 smax_val = src_reg->s32_max_value;
9813 	u32 umin_val = src_reg->u32_min_value;
9814 	u32 umax_val = src_reg->u32_max_value;
9815 
9816 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
9817 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
9818 		/* Overflow possible, we know nothing */
9819 		dst_reg->s32_min_value = S32_MIN;
9820 		dst_reg->s32_max_value = S32_MAX;
9821 	} else {
9822 		dst_reg->s32_min_value -= smax_val;
9823 		dst_reg->s32_max_value -= smin_val;
9824 	}
9825 	if (dst_reg->u32_min_value < umax_val) {
9826 		/* Overflow possible, we know nothing */
9827 		dst_reg->u32_min_value = 0;
9828 		dst_reg->u32_max_value = U32_MAX;
9829 	} else {
9830 		/* Cannot overflow (as long as bounds are consistent) */
9831 		dst_reg->u32_min_value -= umax_val;
9832 		dst_reg->u32_max_value -= umin_val;
9833 	}
9834 }
9835 
9836 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
9837 			       struct bpf_reg_state *src_reg)
9838 {
9839 	s64 smin_val = src_reg->smin_value;
9840 	s64 smax_val = src_reg->smax_value;
9841 	u64 umin_val = src_reg->umin_value;
9842 	u64 umax_val = src_reg->umax_value;
9843 
9844 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
9845 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
9846 		/* Overflow possible, we know nothing */
9847 		dst_reg->smin_value = S64_MIN;
9848 		dst_reg->smax_value = S64_MAX;
9849 	} else {
9850 		dst_reg->smin_value -= smax_val;
9851 		dst_reg->smax_value -= smin_val;
9852 	}
9853 	if (dst_reg->umin_value < umax_val) {
9854 		/* Overflow possible, we know nothing */
9855 		dst_reg->umin_value = 0;
9856 		dst_reg->umax_value = U64_MAX;
9857 	} else {
9858 		/* Cannot overflow (as long as bounds are consistent) */
9859 		dst_reg->umin_value -= umax_val;
9860 		dst_reg->umax_value -= umin_val;
9861 	}
9862 }
9863 
9864 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
9865 				 struct bpf_reg_state *src_reg)
9866 {
9867 	s32 smin_val = src_reg->s32_min_value;
9868 	u32 umin_val = src_reg->u32_min_value;
9869 	u32 umax_val = src_reg->u32_max_value;
9870 
9871 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
9872 		/* Ain't nobody got time to multiply that sign */
9873 		__mark_reg32_unbounded(dst_reg);
9874 		return;
9875 	}
9876 	/* Both values are positive, so we can work with unsigned and
9877 	 * copy the result to signed (unless it exceeds S32_MAX).
9878 	 */
9879 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
9880 		/* Potential overflow, we know nothing */
9881 		__mark_reg32_unbounded(dst_reg);
9882 		return;
9883 	}
9884 	dst_reg->u32_min_value *= umin_val;
9885 	dst_reg->u32_max_value *= umax_val;
9886 	if (dst_reg->u32_max_value > S32_MAX) {
9887 		/* Overflow possible, we know nothing */
9888 		dst_reg->s32_min_value = S32_MIN;
9889 		dst_reg->s32_max_value = S32_MAX;
9890 	} else {
9891 		dst_reg->s32_min_value = dst_reg->u32_min_value;
9892 		dst_reg->s32_max_value = dst_reg->u32_max_value;
9893 	}
9894 }
9895 
9896 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
9897 			       struct bpf_reg_state *src_reg)
9898 {
9899 	s64 smin_val = src_reg->smin_value;
9900 	u64 umin_val = src_reg->umin_value;
9901 	u64 umax_val = src_reg->umax_value;
9902 
9903 	if (smin_val < 0 || dst_reg->smin_value < 0) {
9904 		/* Ain't nobody got time to multiply that sign */
9905 		__mark_reg64_unbounded(dst_reg);
9906 		return;
9907 	}
9908 	/* Both values are positive, so we can work with unsigned and
9909 	 * copy the result to signed (unless it exceeds S64_MAX).
9910 	 */
9911 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
9912 		/* Potential overflow, we know nothing */
9913 		__mark_reg64_unbounded(dst_reg);
9914 		return;
9915 	}
9916 	dst_reg->umin_value *= umin_val;
9917 	dst_reg->umax_value *= umax_val;
9918 	if (dst_reg->umax_value > S64_MAX) {
9919 		/* Overflow possible, we know nothing */
9920 		dst_reg->smin_value = S64_MIN;
9921 		dst_reg->smax_value = S64_MAX;
9922 	} else {
9923 		dst_reg->smin_value = dst_reg->umin_value;
9924 		dst_reg->smax_value = dst_reg->umax_value;
9925 	}
9926 }
9927 
9928 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
9929 				 struct bpf_reg_state *src_reg)
9930 {
9931 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
9932 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
9933 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
9934 	s32 smin_val = src_reg->s32_min_value;
9935 	u32 umax_val = src_reg->u32_max_value;
9936 
9937 	if (src_known && dst_known) {
9938 		__mark_reg32_known(dst_reg, var32_off.value);
9939 		return;
9940 	}
9941 
9942 	/* We get our minimum from the var_off, since that's inherently
9943 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
9944 	 */
9945 	dst_reg->u32_min_value = var32_off.value;
9946 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
9947 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
9948 		/* Lose signed bounds when ANDing negative numbers,
9949 		 * ain't nobody got time for that.
9950 		 */
9951 		dst_reg->s32_min_value = S32_MIN;
9952 		dst_reg->s32_max_value = S32_MAX;
9953 	} else {
9954 		/* ANDing two positives gives a positive, so safe to
9955 		 * cast result into s64.
9956 		 */
9957 		dst_reg->s32_min_value = dst_reg->u32_min_value;
9958 		dst_reg->s32_max_value = dst_reg->u32_max_value;
9959 	}
9960 }
9961 
9962 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
9963 			       struct bpf_reg_state *src_reg)
9964 {
9965 	bool src_known = tnum_is_const(src_reg->var_off);
9966 	bool dst_known = tnum_is_const(dst_reg->var_off);
9967 	s64 smin_val = src_reg->smin_value;
9968 	u64 umax_val = src_reg->umax_value;
9969 
9970 	if (src_known && dst_known) {
9971 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
9972 		return;
9973 	}
9974 
9975 	/* We get our minimum from the var_off, since that's inherently
9976 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
9977 	 */
9978 	dst_reg->umin_value = dst_reg->var_off.value;
9979 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
9980 	if (dst_reg->smin_value < 0 || smin_val < 0) {
9981 		/* Lose signed bounds when ANDing negative numbers,
9982 		 * ain't nobody got time for that.
9983 		 */
9984 		dst_reg->smin_value = S64_MIN;
9985 		dst_reg->smax_value = S64_MAX;
9986 	} else {
9987 		/* ANDing two positives gives a positive, so safe to
9988 		 * cast result into s64.
9989 		 */
9990 		dst_reg->smin_value = dst_reg->umin_value;
9991 		dst_reg->smax_value = dst_reg->umax_value;
9992 	}
9993 	/* We may learn something more from the var_off */
9994 	__update_reg_bounds(dst_reg);
9995 }
9996 
9997 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
9998 				struct bpf_reg_state *src_reg)
9999 {
10000 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10001 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10002 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10003 	s32 smin_val = src_reg->s32_min_value;
10004 	u32 umin_val = src_reg->u32_min_value;
10005 
10006 	if (src_known && dst_known) {
10007 		__mark_reg32_known(dst_reg, var32_off.value);
10008 		return;
10009 	}
10010 
10011 	/* We get our maximum from the var_off, and our minimum is the
10012 	 * maximum of the operands' minima
10013 	 */
10014 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
10015 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
10016 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
10017 		/* Lose signed bounds when ORing negative numbers,
10018 		 * ain't nobody got time for that.
10019 		 */
10020 		dst_reg->s32_min_value = S32_MIN;
10021 		dst_reg->s32_max_value = S32_MAX;
10022 	} else {
10023 		/* ORing two positives gives a positive, so safe to
10024 		 * cast result into s64.
10025 		 */
10026 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10027 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10028 	}
10029 }
10030 
10031 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
10032 			      struct bpf_reg_state *src_reg)
10033 {
10034 	bool src_known = tnum_is_const(src_reg->var_off);
10035 	bool dst_known = tnum_is_const(dst_reg->var_off);
10036 	s64 smin_val = src_reg->smin_value;
10037 	u64 umin_val = src_reg->umin_value;
10038 
10039 	if (src_known && dst_known) {
10040 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10041 		return;
10042 	}
10043 
10044 	/* We get our maximum from the var_off, and our minimum is the
10045 	 * maximum of the operands' minima
10046 	 */
10047 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
10048 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
10049 	if (dst_reg->smin_value < 0 || smin_val < 0) {
10050 		/* Lose signed bounds when ORing negative numbers,
10051 		 * ain't nobody got time for that.
10052 		 */
10053 		dst_reg->smin_value = S64_MIN;
10054 		dst_reg->smax_value = S64_MAX;
10055 	} else {
10056 		/* ORing two positives gives a positive, so safe to
10057 		 * cast result into s64.
10058 		 */
10059 		dst_reg->smin_value = dst_reg->umin_value;
10060 		dst_reg->smax_value = dst_reg->umax_value;
10061 	}
10062 	/* We may learn something more from the var_off */
10063 	__update_reg_bounds(dst_reg);
10064 }
10065 
10066 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
10067 				 struct bpf_reg_state *src_reg)
10068 {
10069 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10070 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10071 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10072 	s32 smin_val = src_reg->s32_min_value;
10073 
10074 	if (src_known && dst_known) {
10075 		__mark_reg32_known(dst_reg, var32_off.value);
10076 		return;
10077 	}
10078 
10079 	/* We get both minimum and maximum from the var32_off. */
10080 	dst_reg->u32_min_value = var32_off.value;
10081 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
10082 
10083 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
10084 		/* XORing two positive sign numbers gives a positive,
10085 		 * so safe to cast u32 result into s32.
10086 		 */
10087 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10088 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10089 	} else {
10090 		dst_reg->s32_min_value = S32_MIN;
10091 		dst_reg->s32_max_value = S32_MAX;
10092 	}
10093 }
10094 
10095 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
10096 			       struct bpf_reg_state *src_reg)
10097 {
10098 	bool src_known = tnum_is_const(src_reg->var_off);
10099 	bool dst_known = tnum_is_const(dst_reg->var_off);
10100 	s64 smin_val = src_reg->smin_value;
10101 
10102 	if (src_known && dst_known) {
10103 		/* dst_reg->var_off.value has been updated earlier */
10104 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10105 		return;
10106 	}
10107 
10108 	/* We get both minimum and maximum from the var_off. */
10109 	dst_reg->umin_value = dst_reg->var_off.value;
10110 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
10111 
10112 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
10113 		/* XORing two positive sign numbers gives a positive,
10114 		 * so safe to cast u64 result into s64.
10115 		 */
10116 		dst_reg->smin_value = dst_reg->umin_value;
10117 		dst_reg->smax_value = dst_reg->umax_value;
10118 	} else {
10119 		dst_reg->smin_value = S64_MIN;
10120 		dst_reg->smax_value = S64_MAX;
10121 	}
10122 
10123 	__update_reg_bounds(dst_reg);
10124 }
10125 
10126 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
10127 				   u64 umin_val, u64 umax_val)
10128 {
10129 	/* We lose all sign bit information (except what we can pick
10130 	 * up from var_off)
10131 	 */
10132 	dst_reg->s32_min_value = S32_MIN;
10133 	dst_reg->s32_max_value = S32_MAX;
10134 	/* If we might shift our top bit out, then we know nothing */
10135 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
10136 		dst_reg->u32_min_value = 0;
10137 		dst_reg->u32_max_value = U32_MAX;
10138 	} else {
10139 		dst_reg->u32_min_value <<= umin_val;
10140 		dst_reg->u32_max_value <<= umax_val;
10141 	}
10142 }
10143 
10144 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
10145 				 struct bpf_reg_state *src_reg)
10146 {
10147 	u32 umax_val = src_reg->u32_max_value;
10148 	u32 umin_val = src_reg->u32_min_value;
10149 	/* u32 alu operation will zext upper bits */
10150 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
10151 
10152 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
10153 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
10154 	/* Not required but being careful mark reg64 bounds as unknown so
10155 	 * that we are forced to pick them up from tnum and zext later and
10156 	 * if some path skips this step we are still safe.
10157 	 */
10158 	__mark_reg64_unbounded(dst_reg);
10159 	__update_reg32_bounds(dst_reg);
10160 }
10161 
10162 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
10163 				   u64 umin_val, u64 umax_val)
10164 {
10165 	/* Special case <<32 because it is a common compiler pattern to sign
10166 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
10167 	 * positive we know this shift will also be positive so we can track
10168 	 * bounds correctly. Otherwise we lose all sign bit information except
10169 	 * what we can pick up from var_off. Perhaps we can generalize this
10170 	 * later to shifts of any length.
10171 	 */
10172 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
10173 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
10174 	else
10175 		dst_reg->smax_value = S64_MAX;
10176 
10177 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
10178 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
10179 	else
10180 		dst_reg->smin_value = S64_MIN;
10181 
10182 	/* If we might shift our top bit out, then we know nothing */
10183 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
10184 		dst_reg->umin_value = 0;
10185 		dst_reg->umax_value = U64_MAX;
10186 	} else {
10187 		dst_reg->umin_value <<= umin_val;
10188 		dst_reg->umax_value <<= umax_val;
10189 	}
10190 }
10191 
10192 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
10193 			       struct bpf_reg_state *src_reg)
10194 {
10195 	u64 umax_val = src_reg->umax_value;
10196 	u64 umin_val = src_reg->umin_value;
10197 
10198 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
10199 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
10200 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
10201 
10202 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
10203 	/* We may learn something more from the var_off */
10204 	__update_reg_bounds(dst_reg);
10205 }
10206 
10207 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
10208 				 struct bpf_reg_state *src_reg)
10209 {
10210 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
10211 	u32 umax_val = src_reg->u32_max_value;
10212 	u32 umin_val = src_reg->u32_min_value;
10213 
10214 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
10215 	 * be negative, then either:
10216 	 * 1) src_reg might be zero, so the sign bit of the result is
10217 	 *    unknown, so we lose our signed bounds
10218 	 * 2) it's known negative, thus the unsigned bounds capture the
10219 	 *    signed bounds
10220 	 * 3) the signed bounds cross zero, so they tell us nothing
10221 	 *    about the result
10222 	 * If the value in dst_reg is known nonnegative, then again the
10223 	 * unsigned bounds capture the signed bounds.
10224 	 * Thus, in all cases it suffices to blow away our signed bounds
10225 	 * and rely on inferring new ones from the unsigned bounds and
10226 	 * var_off of the result.
10227 	 */
10228 	dst_reg->s32_min_value = S32_MIN;
10229 	dst_reg->s32_max_value = S32_MAX;
10230 
10231 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
10232 	dst_reg->u32_min_value >>= umax_val;
10233 	dst_reg->u32_max_value >>= umin_val;
10234 
10235 	__mark_reg64_unbounded(dst_reg);
10236 	__update_reg32_bounds(dst_reg);
10237 }
10238 
10239 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
10240 			       struct bpf_reg_state *src_reg)
10241 {
10242 	u64 umax_val = src_reg->umax_value;
10243 	u64 umin_val = src_reg->umin_value;
10244 
10245 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
10246 	 * be negative, then either:
10247 	 * 1) src_reg might be zero, so the sign bit of the result is
10248 	 *    unknown, so we lose our signed bounds
10249 	 * 2) it's known negative, thus the unsigned bounds capture the
10250 	 *    signed bounds
10251 	 * 3) the signed bounds cross zero, so they tell us nothing
10252 	 *    about the result
10253 	 * If the value in dst_reg is known nonnegative, then again the
10254 	 * unsigned bounds capture the signed bounds.
10255 	 * Thus, in all cases it suffices to blow away our signed bounds
10256 	 * and rely on inferring new ones from the unsigned bounds and
10257 	 * var_off of the result.
10258 	 */
10259 	dst_reg->smin_value = S64_MIN;
10260 	dst_reg->smax_value = S64_MAX;
10261 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
10262 	dst_reg->umin_value >>= umax_val;
10263 	dst_reg->umax_value >>= umin_val;
10264 
10265 	/* Its not easy to operate on alu32 bounds here because it depends
10266 	 * on bits being shifted in. Take easy way out and mark unbounded
10267 	 * so we can recalculate later from tnum.
10268 	 */
10269 	__mark_reg32_unbounded(dst_reg);
10270 	__update_reg_bounds(dst_reg);
10271 }
10272 
10273 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
10274 				  struct bpf_reg_state *src_reg)
10275 {
10276 	u64 umin_val = src_reg->u32_min_value;
10277 
10278 	/* Upon reaching here, src_known is true and
10279 	 * umax_val is equal to umin_val.
10280 	 */
10281 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
10282 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
10283 
10284 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
10285 
10286 	/* blow away the dst_reg umin_value/umax_value and rely on
10287 	 * dst_reg var_off to refine the result.
10288 	 */
10289 	dst_reg->u32_min_value = 0;
10290 	dst_reg->u32_max_value = U32_MAX;
10291 
10292 	__mark_reg64_unbounded(dst_reg);
10293 	__update_reg32_bounds(dst_reg);
10294 }
10295 
10296 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
10297 				struct bpf_reg_state *src_reg)
10298 {
10299 	u64 umin_val = src_reg->umin_value;
10300 
10301 	/* Upon reaching here, src_known is true and umax_val is equal
10302 	 * to umin_val.
10303 	 */
10304 	dst_reg->smin_value >>= umin_val;
10305 	dst_reg->smax_value >>= umin_val;
10306 
10307 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
10308 
10309 	/* blow away the dst_reg umin_value/umax_value and rely on
10310 	 * dst_reg var_off to refine the result.
10311 	 */
10312 	dst_reg->umin_value = 0;
10313 	dst_reg->umax_value = U64_MAX;
10314 
10315 	/* Its not easy to operate on alu32 bounds here because it depends
10316 	 * on bits being shifted in from upper 32-bits. Take easy way out
10317 	 * and mark unbounded so we can recalculate later from tnum.
10318 	 */
10319 	__mark_reg32_unbounded(dst_reg);
10320 	__update_reg_bounds(dst_reg);
10321 }
10322 
10323 /* WARNING: This function does calculations on 64-bit values, but the actual
10324  * execution may occur on 32-bit values. Therefore, things like bitshifts
10325  * need extra checks in the 32-bit case.
10326  */
10327 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
10328 				      struct bpf_insn *insn,
10329 				      struct bpf_reg_state *dst_reg,
10330 				      struct bpf_reg_state src_reg)
10331 {
10332 	struct bpf_reg_state *regs = cur_regs(env);
10333 	u8 opcode = BPF_OP(insn->code);
10334 	bool src_known;
10335 	s64 smin_val, smax_val;
10336 	u64 umin_val, umax_val;
10337 	s32 s32_min_val, s32_max_val;
10338 	u32 u32_min_val, u32_max_val;
10339 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
10340 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
10341 	int ret;
10342 
10343 	smin_val = src_reg.smin_value;
10344 	smax_val = src_reg.smax_value;
10345 	umin_val = src_reg.umin_value;
10346 	umax_val = src_reg.umax_value;
10347 
10348 	s32_min_val = src_reg.s32_min_value;
10349 	s32_max_val = src_reg.s32_max_value;
10350 	u32_min_val = src_reg.u32_min_value;
10351 	u32_max_val = src_reg.u32_max_value;
10352 
10353 	if (alu32) {
10354 		src_known = tnum_subreg_is_const(src_reg.var_off);
10355 		if ((src_known &&
10356 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
10357 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
10358 			/* Taint dst register if offset had invalid bounds
10359 			 * derived from e.g. dead branches.
10360 			 */
10361 			__mark_reg_unknown(env, dst_reg);
10362 			return 0;
10363 		}
10364 	} else {
10365 		src_known = tnum_is_const(src_reg.var_off);
10366 		if ((src_known &&
10367 		     (smin_val != smax_val || umin_val != umax_val)) ||
10368 		    smin_val > smax_val || umin_val > umax_val) {
10369 			/* Taint dst register if offset had invalid bounds
10370 			 * derived from e.g. dead branches.
10371 			 */
10372 			__mark_reg_unknown(env, dst_reg);
10373 			return 0;
10374 		}
10375 	}
10376 
10377 	if (!src_known &&
10378 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
10379 		__mark_reg_unknown(env, dst_reg);
10380 		return 0;
10381 	}
10382 
10383 	if (sanitize_needed(opcode)) {
10384 		ret = sanitize_val_alu(env, insn);
10385 		if (ret < 0)
10386 			return sanitize_err(env, insn, ret, NULL, NULL);
10387 	}
10388 
10389 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
10390 	 * There are two classes of instructions: The first class we track both
10391 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
10392 	 * greatest amount of precision when alu operations are mixed with jmp32
10393 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
10394 	 * and BPF_OR. This is possible because these ops have fairly easy to
10395 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
10396 	 * See alu32 verifier tests for examples. The second class of
10397 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
10398 	 * with regards to tracking sign/unsigned bounds because the bits may
10399 	 * cross subreg boundaries in the alu64 case. When this happens we mark
10400 	 * the reg unbounded in the subreg bound space and use the resulting
10401 	 * tnum to calculate an approximation of the sign/unsigned bounds.
10402 	 */
10403 	switch (opcode) {
10404 	case BPF_ADD:
10405 		scalar32_min_max_add(dst_reg, &src_reg);
10406 		scalar_min_max_add(dst_reg, &src_reg);
10407 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
10408 		break;
10409 	case BPF_SUB:
10410 		scalar32_min_max_sub(dst_reg, &src_reg);
10411 		scalar_min_max_sub(dst_reg, &src_reg);
10412 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
10413 		break;
10414 	case BPF_MUL:
10415 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
10416 		scalar32_min_max_mul(dst_reg, &src_reg);
10417 		scalar_min_max_mul(dst_reg, &src_reg);
10418 		break;
10419 	case BPF_AND:
10420 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
10421 		scalar32_min_max_and(dst_reg, &src_reg);
10422 		scalar_min_max_and(dst_reg, &src_reg);
10423 		break;
10424 	case BPF_OR:
10425 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
10426 		scalar32_min_max_or(dst_reg, &src_reg);
10427 		scalar_min_max_or(dst_reg, &src_reg);
10428 		break;
10429 	case BPF_XOR:
10430 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
10431 		scalar32_min_max_xor(dst_reg, &src_reg);
10432 		scalar_min_max_xor(dst_reg, &src_reg);
10433 		break;
10434 	case BPF_LSH:
10435 		if (umax_val >= insn_bitness) {
10436 			/* Shifts greater than 31 or 63 are undefined.
10437 			 * This includes shifts by a negative number.
10438 			 */
10439 			mark_reg_unknown(env, regs, insn->dst_reg);
10440 			break;
10441 		}
10442 		if (alu32)
10443 			scalar32_min_max_lsh(dst_reg, &src_reg);
10444 		else
10445 			scalar_min_max_lsh(dst_reg, &src_reg);
10446 		break;
10447 	case BPF_RSH:
10448 		if (umax_val >= insn_bitness) {
10449 			/* Shifts greater than 31 or 63 are undefined.
10450 			 * This includes shifts by a negative number.
10451 			 */
10452 			mark_reg_unknown(env, regs, insn->dst_reg);
10453 			break;
10454 		}
10455 		if (alu32)
10456 			scalar32_min_max_rsh(dst_reg, &src_reg);
10457 		else
10458 			scalar_min_max_rsh(dst_reg, &src_reg);
10459 		break;
10460 	case BPF_ARSH:
10461 		if (umax_val >= insn_bitness) {
10462 			/* Shifts greater than 31 or 63 are undefined.
10463 			 * This includes shifts by a negative number.
10464 			 */
10465 			mark_reg_unknown(env, regs, insn->dst_reg);
10466 			break;
10467 		}
10468 		if (alu32)
10469 			scalar32_min_max_arsh(dst_reg, &src_reg);
10470 		else
10471 			scalar_min_max_arsh(dst_reg, &src_reg);
10472 		break;
10473 	default:
10474 		mark_reg_unknown(env, regs, insn->dst_reg);
10475 		break;
10476 	}
10477 
10478 	/* ALU32 ops are zero extended into 64bit register */
10479 	if (alu32)
10480 		zext_32_to_64(dst_reg);
10481 	reg_bounds_sync(dst_reg);
10482 	return 0;
10483 }
10484 
10485 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
10486  * and var_off.
10487  */
10488 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
10489 				   struct bpf_insn *insn)
10490 {
10491 	struct bpf_verifier_state *vstate = env->cur_state;
10492 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
10493 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
10494 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
10495 	u8 opcode = BPF_OP(insn->code);
10496 	int err;
10497 
10498 	dst_reg = &regs[insn->dst_reg];
10499 	src_reg = NULL;
10500 	if (dst_reg->type != SCALAR_VALUE)
10501 		ptr_reg = dst_reg;
10502 	else
10503 		/* Make sure ID is cleared otherwise dst_reg min/max could be
10504 		 * incorrectly propagated into other registers by find_equal_scalars()
10505 		 */
10506 		dst_reg->id = 0;
10507 	if (BPF_SRC(insn->code) == BPF_X) {
10508 		src_reg = &regs[insn->src_reg];
10509 		if (src_reg->type != SCALAR_VALUE) {
10510 			if (dst_reg->type != SCALAR_VALUE) {
10511 				/* Combining two pointers by any ALU op yields
10512 				 * an arbitrary scalar. Disallow all math except
10513 				 * pointer subtraction
10514 				 */
10515 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
10516 					mark_reg_unknown(env, regs, insn->dst_reg);
10517 					return 0;
10518 				}
10519 				verbose(env, "R%d pointer %s pointer prohibited\n",
10520 					insn->dst_reg,
10521 					bpf_alu_string[opcode >> 4]);
10522 				return -EACCES;
10523 			} else {
10524 				/* scalar += pointer
10525 				 * This is legal, but we have to reverse our
10526 				 * src/dest handling in computing the range
10527 				 */
10528 				err = mark_chain_precision(env, insn->dst_reg);
10529 				if (err)
10530 					return err;
10531 				return adjust_ptr_min_max_vals(env, insn,
10532 							       src_reg, dst_reg);
10533 			}
10534 		} else if (ptr_reg) {
10535 			/* pointer += scalar */
10536 			err = mark_chain_precision(env, insn->src_reg);
10537 			if (err)
10538 				return err;
10539 			return adjust_ptr_min_max_vals(env, insn,
10540 						       dst_reg, src_reg);
10541 		} else if (dst_reg->precise) {
10542 			/* if dst_reg is precise, src_reg should be precise as well */
10543 			err = mark_chain_precision(env, insn->src_reg);
10544 			if (err)
10545 				return err;
10546 		}
10547 	} else {
10548 		/* Pretend the src is a reg with a known value, since we only
10549 		 * need to be able to read from this state.
10550 		 */
10551 		off_reg.type = SCALAR_VALUE;
10552 		__mark_reg_known(&off_reg, insn->imm);
10553 		src_reg = &off_reg;
10554 		if (ptr_reg) /* pointer += K */
10555 			return adjust_ptr_min_max_vals(env, insn,
10556 						       ptr_reg, src_reg);
10557 	}
10558 
10559 	/* Got here implies adding two SCALAR_VALUEs */
10560 	if (WARN_ON_ONCE(ptr_reg)) {
10561 		print_verifier_state(env, state, true);
10562 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
10563 		return -EINVAL;
10564 	}
10565 	if (WARN_ON(!src_reg)) {
10566 		print_verifier_state(env, state, true);
10567 		verbose(env, "verifier internal error: no src_reg\n");
10568 		return -EINVAL;
10569 	}
10570 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
10571 }
10572 
10573 /* check validity of 32-bit and 64-bit arithmetic operations */
10574 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
10575 {
10576 	struct bpf_reg_state *regs = cur_regs(env);
10577 	u8 opcode = BPF_OP(insn->code);
10578 	int err;
10579 
10580 	if (opcode == BPF_END || opcode == BPF_NEG) {
10581 		if (opcode == BPF_NEG) {
10582 			if (BPF_SRC(insn->code) != BPF_K ||
10583 			    insn->src_reg != BPF_REG_0 ||
10584 			    insn->off != 0 || insn->imm != 0) {
10585 				verbose(env, "BPF_NEG uses reserved fields\n");
10586 				return -EINVAL;
10587 			}
10588 		} else {
10589 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
10590 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
10591 			    BPF_CLASS(insn->code) == BPF_ALU64) {
10592 				verbose(env, "BPF_END uses reserved fields\n");
10593 				return -EINVAL;
10594 			}
10595 		}
10596 
10597 		/* check src operand */
10598 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
10599 		if (err)
10600 			return err;
10601 
10602 		if (is_pointer_value(env, insn->dst_reg)) {
10603 			verbose(env, "R%d pointer arithmetic prohibited\n",
10604 				insn->dst_reg);
10605 			return -EACCES;
10606 		}
10607 
10608 		/* check dest operand */
10609 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
10610 		if (err)
10611 			return err;
10612 
10613 	} else if (opcode == BPF_MOV) {
10614 
10615 		if (BPF_SRC(insn->code) == BPF_X) {
10616 			if (insn->imm != 0 || insn->off != 0) {
10617 				verbose(env, "BPF_MOV uses reserved fields\n");
10618 				return -EINVAL;
10619 			}
10620 
10621 			/* check src operand */
10622 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
10623 			if (err)
10624 				return err;
10625 		} else {
10626 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
10627 				verbose(env, "BPF_MOV uses reserved fields\n");
10628 				return -EINVAL;
10629 			}
10630 		}
10631 
10632 		/* check dest operand, mark as required later */
10633 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
10634 		if (err)
10635 			return err;
10636 
10637 		if (BPF_SRC(insn->code) == BPF_X) {
10638 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
10639 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
10640 
10641 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
10642 				/* case: R1 = R2
10643 				 * copy register state to dest reg
10644 				 */
10645 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
10646 					/* Assign src and dst registers the same ID
10647 					 * that will be used by find_equal_scalars()
10648 					 * to propagate min/max range.
10649 					 */
10650 					src_reg->id = ++env->id_gen;
10651 				*dst_reg = *src_reg;
10652 				dst_reg->live |= REG_LIVE_WRITTEN;
10653 				dst_reg->subreg_def = DEF_NOT_SUBREG;
10654 			} else {
10655 				/* R1 = (u32) R2 */
10656 				if (is_pointer_value(env, insn->src_reg)) {
10657 					verbose(env,
10658 						"R%d partial copy of pointer\n",
10659 						insn->src_reg);
10660 					return -EACCES;
10661 				} else if (src_reg->type == SCALAR_VALUE) {
10662 					*dst_reg = *src_reg;
10663 					/* Make sure ID is cleared otherwise
10664 					 * dst_reg min/max could be incorrectly
10665 					 * propagated into src_reg by find_equal_scalars()
10666 					 */
10667 					dst_reg->id = 0;
10668 					dst_reg->live |= REG_LIVE_WRITTEN;
10669 					dst_reg->subreg_def = env->insn_idx + 1;
10670 				} else {
10671 					mark_reg_unknown(env, regs,
10672 							 insn->dst_reg);
10673 				}
10674 				zext_32_to_64(dst_reg);
10675 				reg_bounds_sync(dst_reg);
10676 			}
10677 		} else {
10678 			/* case: R = imm
10679 			 * remember the value we stored into this reg
10680 			 */
10681 			/* clear any state __mark_reg_known doesn't set */
10682 			mark_reg_unknown(env, regs, insn->dst_reg);
10683 			regs[insn->dst_reg].type = SCALAR_VALUE;
10684 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
10685 				__mark_reg_known(regs + insn->dst_reg,
10686 						 insn->imm);
10687 			} else {
10688 				__mark_reg_known(regs + insn->dst_reg,
10689 						 (u32)insn->imm);
10690 			}
10691 		}
10692 
10693 	} else if (opcode > BPF_END) {
10694 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
10695 		return -EINVAL;
10696 
10697 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
10698 
10699 		if (BPF_SRC(insn->code) == BPF_X) {
10700 			if (insn->imm != 0 || insn->off != 0) {
10701 				verbose(env, "BPF_ALU uses reserved fields\n");
10702 				return -EINVAL;
10703 			}
10704 			/* check src1 operand */
10705 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
10706 			if (err)
10707 				return err;
10708 		} else {
10709 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
10710 				verbose(env, "BPF_ALU uses reserved fields\n");
10711 				return -EINVAL;
10712 			}
10713 		}
10714 
10715 		/* check src2 operand */
10716 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
10717 		if (err)
10718 			return err;
10719 
10720 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
10721 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
10722 			verbose(env, "div by zero\n");
10723 			return -EINVAL;
10724 		}
10725 
10726 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
10727 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
10728 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
10729 
10730 			if (insn->imm < 0 || insn->imm >= size) {
10731 				verbose(env, "invalid shift %d\n", insn->imm);
10732 				return -EINVAL;
10733 			}
10734 		}
10735 
10736 		/* check dest operand */
10737 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
10738 		if (err)
10739 			return err;
10740 
10741 		return adjust_reg_min_max_vals(env, insn);
10742 	}
10743 
10744 	return 0;
10745 }
10746 
10747 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
10748 				   struct bpf_reg_state *dst_reg,
10749 				   enum bpf_reg_type type,
10750 				   bool range_right_open)
10751 {
10752 	struct bpf_func_state *state;
10753 	struct bpf_reg_state *reg;
10754 	int new_range;
10755 
10756 	if (dst_reg->off < 0 ||
10757 	    (dst_reg->off == 0 && range_right_open))
10758 		/* This doesn't give us any range */
10759 		return;
10760 
10761 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
10762 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
10763 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
10764 		 * than pkt_end, but that's because it's also less than pkt.
10765 		 */
10766 		return;
10767 
10768 	new_range = dst_reg->off;
10769 	if (range_right_open)
10770 		new_range++;
10771 
10772 	/* Examples for register markings:
10773 	 *
10774 	 * pkt_data in dst register:
10775 	 *
10776 	 *   r2 = r3;
10777 	 *   r2 += 8;
10778 	 *   if (r2 > pkt_end) goto <handle exception>
10779 	 *   <access okay>
10780 	 *
10781 	 *   r2 = r3;
10782 	 *   r2 += 8;
10783 	 *   if (r2 < pkt_end) goto <access okay>
10784 	 *   <handle exception>
10785 	 *
10786 	 *   Where:
10787 	 *     r2 == dst_reg, pkt_end == src_reg
10788 	 *     r2=pkt(id=n,off=8,r=0)
10789 	 *     r3=pkt(id=n,off=0,r=0)
10790 	 *
10791 	 * pkt_data in src register:
10792 	 *
10793 	 *   r2 = r3;
10794 	 *   r2 += 8;
10795 	 *   if (pkt_end >= r2) goto <access okay>
10796 	 *   <handle exception>
10797 	 *
10798 	 *   r2 = r3;
10799 	 *   r2 += 8;
10800 	 *   if (pkt_end <= r2) goto <handle exception>
10801 	 *   <access okay>
10802 	 *
10803 	 *   Where:
10804 	 *     pkt_end == dst_reg, r2 == src_reg
10805 	 *     r2=pkt(id=n,off=8,r=0)
10806 	 *     r3=pkt(id=n,off=0,r=0)
10807 	 *
10808 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
10809 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
10810 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
10811 	 * the check.
10812 	 */
10813 
10814 	/* If our ids match, then we must have the same max_value.  And we
10815 	 * don't care about the other reg's fixed offset, since if it's too big
10816 	 * the range won't allow anything.
10817 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
10818 	 */
10819 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
10820 		if (reg->type == type && reg->id == dst_reg->id)
10821 			/* keep the maximum range already checked */
10822 			reg->range = max(reg->range, new_range);
10823 	}));
10824 }
10825 
10826 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
10827 {
10828 	struct tnum subreg = tnum_subreg(reg->var_off);
10829 	s32 sval = (s32)val;
10830 
10831 	switch (opcode) {
10832 	case BPF_JEQ:
10833 		if (tnum_is_const(subreg))
10834 			return !!tnum_equals_const(subreg, val);
10835 		break;
10836 	case BPF_JNE:
10837 		if (tnum_is_const(subreg))
10838 			return !tnum_equals_const(subreg, val);
10839 		break;
10840 	case BPF_JSET:
10841 		if ((~subreg.mask & subreg.value) & val)
10842 			return 1;
10843 		if (!((subreg.mask | subreg.value) & val))
10844 			return 0;
10845 		break;
10846 	case BPF_JGT:
10847 		if (reg->u32_min_value > val)
10848 			return 1;
10849 		else if (reg->u32_max_value <= val)
10850 			return 0;
10851 		break;
10852 	case BPF_JSGT:
10853 		if (reg->s32_min_value > sval)
10854 			return 1;
10855 		else if (reg->s32_max_value <= sval)
10856 			return 0;
10857 		break;
10858 	case BPF_JLT:
10859 		if (reg->u32_max_value < val)
10860 			return 1;
10861 		else if (reg->u32_min_value >= val)
10862 			return 0;
10863 		break;
10864 	case BPF_JSLT:
10865 		if (reg->s32_max_value < sval)
10866 			return 1;
10867 		else if (reg->s32_min_value >= sval)
10868 			return 0;
10869 		break;
10870 	case BPF_JGE:
10871 		if (reg->u32_min_value >= val)
10872 			return 1;
10873 		else if (reg->u32_max_value < val)
10874 			return 0;
10875 		break;
10876 	case BPF_JSGE:
10877 		if (reg->s32_min_value >= sval)
10878 			return 1;
10879 		else if (reg->s32_max_value < sval)
10880 			return 0;
10881 		break;
10882 	case BPF_JLE:
10883 		if (reg->u32_max_value <= val)
10884 			return 1;
10885 		else if (reg->u32_min_value > val)
10886 			return 0;
10887 		break;
10888 	case BPF_JSLE:
10889 		if (reg->s32_max_value <= sval)
10890 			return 1;
10891 		else if (reg->s32_min_value > sval)
10892 			return 0;
10893 		break;
10894 	}
10895 
10896 	return -1;
10897 }
10898 
10899 
10900 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
10901 {
10902 	s64 sval = (s64)val;
10903 
10904 	switch (opcode) {
10905 	case BPF_JEQ:
10906 		if (tnum_is_const(reg->var_off))
10907 			return !!tnum_equals_const(reg->var_off, val);
10908 		break;
10909 	case BPF_JNE:
10910 		if (tnum_is_const(reg->var_off))
10911 			return !tnum_equals_const(reg->var_off, val);
10912 		break;
10913 	case BPF_JSET:
10914 		if ((~reg->var_off.mask & reg->var_off.value) & val)
10915 			return 1;
10916 		if (!((reg->var_off.mask | reg->var_off.value) & val))
10917 			return 0;
10918 		break;
10919 	case BPF_JGT:
10920 		if (reg->umin_value > val)
10921 			return 1;
10922 		else if (reg->umax_value <= val)
10923 			return 0;
10924 		break;
10925 	case BPF_JSGT:
10926 		if (reg->smin_value > sval)
10927 			return 1;
10928 		else if (reg->smax_value <= sval)
10929 			return 0;
10930 		break;
10931 	case BPF_JLT:
10932 		if (reg->umax_value < val)
10933 			return 1;
10934 		else if (reg->umin_value >= val)
10935 			return 0;
10936 		break;
10937 	case BPF_JSLT:
10938 		if (reg->smax_value < sval)
10939 			return 1;
10940 		else if (reg->smin_value >= sval)
10941 			return 0;
10942 		break;
10943 	case BPF_JGE:
10944 		if (reg->umin_value >= val)
10945 			return 1;
10946 		else if (reg->umax_value < val)
10947 			return 0;
10948 		break;
10949 	case BPF_JSGE:
10950 		if (reg->smin_value >= sval)
10951 			return 1;
10952 		else if (reg->smax_value < sval)
10953 			return 0;
10954 		break;
10955 	case BPF_JLE:
10956 		if (reg->umax_value <= val)
10957 			return 1;
10958 		else if (reg->umin_value > val)
10959 			return 0;
10960 		break;
10961 	case BPF_JSLE:
10962 		if (reg->smax_value <= sval)
10963 			return 1;
10964 		else if (reg->smin_value > sval)
10965 			return 0;
10966 		break;
10967 	}
10968 
10969 	return -1;
10970 }
10971 
10972 /* compute branch direction of the expression "if (reg opcode val) goto target;"
10973  * and return:
10974  *  1 - branch will be taken and "goto target" will be executed
10975  *  0 - branch will not be taken and fall-through to next insn
10976  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
10977  *      range [0,10]
10978  */
10979 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
10980 			   bool is_jmp32)
10981 {
10982 	if (__is_pointer_value(false, reg)) {
10983 		if (!reg_type_not_null(reg->type))
10984 			return -1;
10985 
10986 		/* If pointer is valid tests against zero will fail so we can
10987 		 * use this to direct branch taken.
10988 		 */
10989 		if (val != 0)
10990 			return -1;
10991 
10992 		switch (opcode) {
10993 		case BPF_JEQ:
10994 			return 0;
10995 		case BPF_JNE:
10996 			return 1;
10997 		default:
10998 			return -1;
10999 		}
11000 	}
11001 
11002 	if (is_jmp32)
11003 		return is_branch32_taken(reg, val, opcode);
11004 	return is_branch64_taken(reg, val, opcode);
11005 }
11006 
11007 static int flip_opcode(u32 opcode)
11008 {
11009 	/* How can we transform "a <op> b" into "b <op> a"? */
11010 	static const u8 opcode_flip[16] = {
11011 		/* these stay the same */
11012 		[BPF_JEQ  >> 4] = BPF_JEQ,
11013 		[BPF_JNE  >> 4] = BPF_JNE,
11014 		[BPF_JSET >> 4] = BPF_JSET,
11015 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
11016 		[BPF_JGE  >> 4] = BPF_JLE,
11017 		[BPF_JGT  >> 4] = BPF_JLT,
11018 		[BPF_JLE  >> 4] = BPF_JGE,
11019 		[BPF_JLT  >> 4] = BPF_JGT,
11020 		[BPF_JSGE >> 4] = BPF_JSLE,
11021 		[BPF_JSGT >> 4] = BPF_JSLT,
11022 		[BPF_JSLE >> 4] = BPF_JSGE,
11023 		[BPF_JSLT >> 4] = BPF_JSGT
11024 	};
11025 	return opcode_flip[opcode >> 4];
11026 }
11027 
11028 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
11029 				   struct bpf_reg_state *src_reg,
11030 				   u8 opcode)
11031 {
11032 	struct bpf_reg_state *pkt;
11033 
11034 	if (src_reg->type == PTR_TO_PACKET_END) {
11035 		pkt = dst_reg;
11036 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
11037 		pkt = src_reg;
11038 		opcode = flip_opcode(opcode);
11039 	} else {
11040 		return -1;
11041 	}
11042 
11043 	if (pkt->range >= 0)
11044 		return -1;
11045 
11046 	switch (opcode) {
11047 	case BPF_JLE:
11048 		/* pkt <= pkt_end */
11049 		fallthrough;
11050 	case BPF_JGT:
11051 		/* pkt > pkt_end */
11052 		if (pkt->range == BEYOND_PKT_END)
11053 			/* pkt has at last one extra byte beyond pkt_end */
11054 			return opcode == BPF_JGT;
11055 		break;
11056 	case BPF_JLT:
11057 		/* pkt < pkt_end */
11058 		fallthrough;
11059 	case BPF_JGE:
11060 		/* pkt >= pkt_end */
11061 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
11062 			return opcode == BPF_JGE;
11063 		break;
11064 	}
11065 	return -1;
11066 }
11067 
11068 /* Adjusts the register min/max values in the case that the dst_reg is the
11069  * variable register that we are working on, and src_reg is a constant or we're
11070  * simply doing a BPF_K check.
11071  * In JEQ/JNE cases we also adjust the var_off values.
11072  */
11073 static void reg_set_min_max(struct bpf_reg_state *true_reg,
11074 			    struct bpf_reg_state *false_reg,
11075 			    u64 val, u32 val32,
11076 			    u8 opcode, bool is_jmp32)
11077 {
11078 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
11079 	struct tnum false_64off = false_reg->var_off;
11080 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
11081 	struct tnum true_64off = true_reg->var_off;
11082 	s64 sval = (s64)val;
11083 	s32 sval32 = (s32)val32;
11084 
11085 	/* If the dst_reg is a pointer, we can't learn anything about its
11086 	 * variable offset from the compare (unless src_reg were a pointer into
11087 	 * the same object, but we don't bother with that.
11088 	 * Since false_reg and true_reg have the same type by construction, we
11089 	 * only need to check one of them for pointerness.
11090 	 */
11091 	if (__is_pointer_value(false, false_reg))
11092 		return;
11093 
11094 	switch (opcode) {
11095 	/* JEQ/JNE comparison doesn't change the register equivalence.
11096 	 *
11097 	 * r1 = r2;
11098 	 * if (r1 == 42) goto label;
11099 	 * ...
11100 	 * label: // here both r1 and r2 are known to be 42.
11101 	 *
11102 	 * Hence when marking register as known preserve it's ID.
11103 	 */
11104 	case BPF_JEQ:
11105 		if (is_jmp32) {
11106 			__mark_reg32_known(true_reg, val32);
11107 			true_32off = tnum_subreg(true_reg->var_off);
11108 		} else {
11109 			___mark_reg_known(true_reg, val);
11110 			true_64off = true_reg->var_off;
11111 		}
11112 		break;
11113 	case BPF_JNE:
11114 		if (is_jmp32) {
11115 			__mark_reg32_known(false_reg, val32);
11116 			false_32off = tnum_subreg(false_reg->var_off);
11117 		} else {
11118 			___mark_reg_known(false_reg, val);
11119 			false_64off = false_reg->var_off;
11120 		}
11121 		break;
11122 	case BPF_JSET:
11123 		if (is_jmp32) {
11124 			false_32off = tnum_and(false_32off, tnum_const(~val32));
11125 			if (is_power_of_2(val32))
11126 				true_32off = tnum_or(true_32off,
11127 						     tnum_const(val32));
11128 		} else {
11129 			false_64off = tnum_and(false_64off, tnum_const(~val));
11130 			if (is_power_of_2(val))
11131 				true_64off = tnum_or(true_64off,
11132 						     tnum_const(val));
11133 		}
11134 		break;
11135 	case BPF_JGE:
11136 	case BPF_JGT:
11137 	{
11138 		if (is_jmp32) {
11139 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
11140 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
11141 
11142 			false_reg->u32_max_value = min(false_reg->u32_max_value,
11143 						       false_umax);
11144 			true_reg->u32_min_value = max(true_reg->u32_min_value,
11145 						      true_umin);
11146 		} else {
11147 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
11148 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
11149 
11150 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
11151 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
11152 		}
11153 		break;
11154 	}
11155 	case BPF_JSGE:
11156 	case BPF_JSGT:
11157 	{
11158 		if (is_jmp32) {
11159 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
11160 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
11161 
11162 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
11163 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
11164 		} else {
11165 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
11166 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
11167 
11168 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
11169 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
11170 		}
11171 		break;
11172 	}
11173 	case BPF_JLE:
11174 	case BPF_JLT:
11175 	{
11176 		if (is_jmp32) {
11177 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
11178 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
11179 
11180 			false_reg->u32_min_value = max(false_reg->u32_min_value,
11181 						       false_umin);
11182 			true_reg->u32_max_value = min(true_reg->u32_max_value,
11183 						      true_umax);
11184 		} else {
11185 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
11186 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
11187 
11188 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
11189 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
11190 		}
11191 		break;
11192 	}
11193 	case BPF_JSLE:
11194 	case BPF_JSLT:
11195 	{
11196 		if (is_jmp32) {
11197 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
11198 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
11199 
11200 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
11201 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
11202 		} else {
11203 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
11204 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
11205 
11206 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
11207 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
11208 		}
11209 		break;
11210 	}
11211 	default:
11212 		return;
11213 	}
11214 
11215 	if (is_jmp32) {
11216 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
11217 					     tnum_subreg(false_32off));
11218 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
11219 					    tnum_subreg(true_32off));
11220 		__reg_combine_32_into_64(false_reg);
11221 		__reg_combine_32_into_64(true_reg);
11222 	} else {
11223 		false_reg->var_off = false_64off;
11224 		true_reg->var_off = true_64off;
11225 		__reg_combine_64_into_32(false_reg);
11226 		__reg_combine_64_into_32(true_reg);
11227 	}
11228 }
11229 
11230 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
11231  * the variable reg.
11232  */
11233 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
11234 				struct bpf_reg_state *false_reg,
11235 				u64 val, u32 val32,
11236 				u8 opcode, bool is_jmp32)
11237 {
11238 	opcode = flip_opcode(opcode);
11239 	/* This uses zero as "not present in table"; luckily the zero opcode,
11240 	 * BPF_JA, can't get here.
11241 	 */
11242 	if (opcode)
11243 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
11244 }
11245 
11246 /* Regs are known to be equal, so intersect their min/max/var_off */
11247 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
11248 				  struct bpf_reg_state *dst_reg)
11249 {
11250 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
11251 							dst_reg->umin_value);
11252 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
11253 							dst_reg->umax_value);
11254 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
11255 							dst_reg->smin_value);
11256 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
11257 							dst_reg->smax_value);
11258 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
11259 							     dst_reg->var_off);
11260 	reg_bounds_sync(src_reg);
11261 	reg_bounds_sync(dst_reg);
11262 }
11263 
11264 static void reg_combine_min_max(struct bpf_reg_state *true_src,
11265 				struct bpf_reg_state *true_dst,
11266 				struct bpf_reg_state *false_src,
11267 				struct bpf_reg_state *false_dst,
11268 				u8 opcode)
11269 {
11270 	switch (opcode) {
11271 	case BPF_JEQ:
11272 		__reg_combine_min_max(true_src, true_dst);
11273 		break;
11274 	case BPF_JNE:
11275 		__reg_combine_min_max(false_src, false_dst);
11276 		break;
11277 	}
11278 }
11279 
11280 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
11281 				 struct bpf_reg_state *reg, u32 id,
11282 				 bool is_null)
11283 {
11284 	if (type_may_be_null(reg->type) && reg->id == id &&
11285 	    !WARN_ON_ONCE(!reg->id)) {
11286 		/* Old offset (both fixed and variable parts) should have been
11287 		 * known-zero, because we don't allow pointer arithmetic on
11288 		 * pointers that might be NULL. If we see this happening, don't
11289 		 * convert the register.
11290 		 *
11291 		 * But in some cases, some helpers that return local kptrs
11292 		 * advance offset for the returned pointer. In those cases, it
11293 		 * is fine to expect to see reg->off.
11294 		 */
11295 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
11296 			return;
11297 		if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off))
11298 			return;
11299 		if (is_null) {
11300 			reg->type = SCALAR_VALUE;
11301 			/* We don't need id and ref_obj_id from this point
11302 			 * onwards anymore, thus we should better reset it,
11303 			 * so that state pruning has chances to take effect.
11304 			 */
11305 			reg->id = 0;
11306 			reg->ref_obj_id = 0;
11307 
11308 			return;
11309 		}
11310 
11311 		mark_ptr_not_null_reg(reg);
11312 
11313 		if (!reg_may_point_to_spin_lock(reg)) {
11314 			/* For not-NULL ptr, reg->ref_obj_id will be reset
11315 			 * in release_reference().
11316 			 *
11317 			 * reg->id is still used by spin_lock ptr. Other
11318 			 * than spin_lock ptr type, reg->id can be reset.
11319 			 */
11320 			reg->id = 0;
11321 		}
11322 	}
11323 }
11324 
11325 /* The logic is similar to find_good_pkt_pointers(), both could eventually
11326  * be folded together at some point.
11327  */
11328 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
11329 				  bool is_null)
11330 {
11331 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
11332 	struct bpf_reg_state *regs = state->regs, *reg;
11333 	u32 ref_obj_id = regs[regno].ref_obj_id;
11334 	u32 id = regs[regno].id;
11335 
11336 	if (ref_obj_id && ref_obj_id == id && is_null)
11337 		/* regs[regno] is in the " == NULL" branch.
11338 		 * No one could have freed the reference state before
11339 		 * doing the NULL check.
11340 		 */
11341 		WARN_ON_ONCE(release_reference_state(state, id));
11342 
11343 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11344 		mark_ptr_or_null_reg(state, reg, id, is_null);
11345 	}));
11346 }
11347 
11348 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
11349 				   struct bpf_reg_state *dst_reg,
11350 				   struct bpf_reg_state *src_reg,
11351 				   struct bpf_verifier_state *this_branch,
11352 				   struct bpf_verifier_state *other_branch)
11353 {
11354 	if (BPF_SRC(insn->code) != BPF_X)
11355 		return false;
11356 
11357 	/* Pointers are always 64-bit. */
11358 	if (BPF_CLASS(insn->code) == BPF_JMP32)
11359 		return false;
11360 
11361 	switch (BPF_OP(insn->code)) {
11362 	case BPF_JGT:
11363 		if ((dst_reg->type == PTR_TO_PACKET &&
11364 		     src_reg->type == PTR_TO_PACKET_END) ||
11365 		    (dst_reg->type == PTR_TO_PACKET_META &&
11366 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11367 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
11368 			find_good_pkt_pointers(this_branch, dst_reg,
11369 					       dst_reg->type, false);
11370 			mark_pkt_end(other_branch, insn->dst_reg, true);
11371 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11372 			    src_reg->type == PTR_TO_PACKET) ||
11373 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11374 			    src_reg->type == PTR_TO_PACKET_META)) {
11375 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
11376 			find_good_pkt_pointers(other_branch, src_reg,
11377 					       src_reg->type, true);
11378 			mark_pkt_end(this_branch, insn->src_reg, false);
11379 		} else {
11380 			return false;
11381 		}
11382 		break;
11383 	case BPF_JLT:
11384 		if ((dst_reg->type == PTR_TO_PACKET &&
11385 		     src_reg->type == PTR_TO_PACKET_END) ||
11386 		    (dst_reg->type == PTR_TO_PACKET_META &&
11387 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11388 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
11389 			find_good_pkt_pointers(other_branch, dst_reg,
11390 					       dst_reg->type, true);
11391 			mark_pkt_end(this_branch, insn->dst_reg, false);
11392 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11393 			    src_reg->type == PTR_TO_PACKET) ||
11394 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11395 			    src_reg->type == PTR_TO_PACKET_META)) {
11396 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
11397 			find_good_pkt_pointers(this_branch, src_reg,
11398 					       src_reg->type, false);
11399 			mark_pkt_end(other_branch, insn->src_reg, true);
11400 		} else {
11401 			return false;
11402 		}
11403 		break;
11404 	case BPF_JGE:
11405 		if ((dst_reg->type == PTR_TO_PACKET &&
11406 		     src_reg->type == PTR_TO_PACKET_END) ||
11407 		    (dst_reg->type == PTR_TO_PACKET_META &&
11408 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11409 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
11410 			find_good_pkt_pointers(this_branch, dst_reg,
11411 					       dst_reg->type, true);
11412 			mark_pkt_end(other_branch, insn->dst_reg, false);
11413 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11414 			    src_reg->type == PTR_TO_PACKET) ||
11415 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11416 			    src_reg->type == PTR_TO_PACKET_META)) {
11417 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
11418 			find_good_pkt_pointers(other_branch, src_reg,
11419 					       src_reg->type, false);
11420 			mark_pkt_end(this_branch, insn->src_reg, true);
11421 		} else {
11422 			return false;
11423 		}
11424 		break;
11425 	case BPF_JLE:
11426 		if ((dst_reg->type == PTR_TO_PACKET &&
11427 		     src_reg->type == PTR_TO_PACKET_END) ||
11428 		    (dst_reg->type == PTR_TO_PACKET_META &&
11429 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11430 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
11431 			find_good_pkt_pointers(other_branch, dst_reg,
11432 					       dst_reg->type, false);
11433 			mark_pkt_end(this_branch, insn->dst_reg, true);
11434 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11435 			    src_reg->type == PTR_TO_PACKET) ||
11436 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11437 			    src_reg->type == PTR_TO_PACKET_META)) {
11438 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
11439 			find_good_pkt_pointers(this_branch, src_reg,
11440 					       src_reg->type, true);
11441 			mark_pkt_end(other_branch, insn->src_reg, false);
11442 		} else {
11443 			return false;
11444 		}
11445 		break;
11446 	default:
11447 		return false;
11448 	}
11449 
11450 	return true;
11451 }
11452 
11453 static void find_equal_scalars(struct bpf_verifier_state *vstate,
11454 			       struct bpf_reg_state *known_reg)
11455 {
11456 	struct bpf_func_state *state;
11457 	struct bpf_reg_state *reg;
11458 
11459 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11460 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
11461 			*reg = *known_reg;
11462 	}));
11463 }
11464 
11465 static int check_cond_jmp_op(struct bpf_verifier_env *env,
11466 			     struct bpf_insn *insn, int *insn_idx)
11467 {
11468 	struct bpf_verifier_state *this_branch = env->cur_state;
11469 	struct bpf_verifier_state *other_branch;
11470 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
11471 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
11472 	struct bpf_reg_state *eq_branch_regs;
11473 	u8 opcode = BPF_OP(insn->code);
11474 	bool is_jmp32;
11475 	int pred = -1;
11476 	int err;
11477 
11478 	/* Only conditional jumps are expected to reach here. */
11479 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
11480 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
11481 		return -EINVAL;
11482 	}
11483 
11484 	if (BPF_SRC(insn->code) == BPF_X) {
11485 		if (insn->imm != 0) {
11486 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
11487 			return -EINVAL;
11488 		}
11489 
11490 		/* check src1 operand */
11491 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
11492 		if (err)
11493 			return err;
11494 
11495 		if (is_pointer_value(env, insn->src_reg)) {
11496 			verbose(env, "R%d pointer comparison prohibited\n",
11497 				insn->src_reg);
11498 			return -EACCES;
11499 		}
11500 		src_reg = &regs[insn->src_reg];
11501 	} else {
11502 		if (insn->src_reg != BPF_REG_0) {
11503 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
11504 			return -EINVAL;
11505 		}
11506 	}
11507 
11508 	/* check src2 operand */
11509 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11510 	if (err)
11511 		return err;
11512 
11513 	dst_reg = &regs[insn->dst_reg];
11514 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
11515 
11516 	if (BPF_SRC(insn->code) == BPF_K) {
11517 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
11518 	} else if (src_reg->type == SCALAR_VALUE &&
11519 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
11520 		pred = is_branch_taken(dst_reg,
11521 				       tnum_subreg(src_reg->var_off).value,
11522 				       opcode,
11523 				       is_jmp32);
11524 	} else if (src_reg->type == SCALAR_VALUE &&
11525 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
11526 		pred = is_branch_taken(dst_reg,
11527 				       src_reg->var_off.value,
11528 				       opcode,
11529 				       is_jmp32);
11530 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
11531 		   reg_is_pkt_pointer_any(src_reg) &&
11532 		   !is_jmp32) {
11533 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
11534 	}
11535 
11536 	if (pred >= 0) {
11537 		/* If we get here with a dst_reg pointer type it is because
11538 		 * above is_branch_taken() special cased the 0 comparison.
11539 		 */
11540 		if (!__is_pointer_value(false, dst_reg))
11541 			err = mark_chain_precision(env, insn->dst_reg);
11542 		if (BPF_SRC(insn->code) == BPF_X && !err &&
11543 		    !__is_pointer_value(false, src_reg))
11544 			err = mark_chain_precision(env, insn->src_reg);
11545 		if (err)
11546 			return err;
11547 	}
11548 
11549 	if (pred == 1) {
11550 		/* Only follow the goto, ignore fall-through. If needed, push
11551 		 * the fall-through branch for simulation under speculative
11552 		 * execution.
11553 		 */
11554 		if (!env->bypass_spec_v1 &&
11555 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
11556 					       *insn_idx))
11557 			return -EFAULT;
11558 		*insn_idx += insn->off;
11559 		return 0;
11560 	} else if (pred == 0) {
11561 		/* Only follow the fall-through branch, since that's where the
11562 		 * program will go. If needed, push the goto branch for
11563 		 * simulation under speculative execution.
11564 		 */
11565 		if (!env->bypass_spec_v1 &&
11566 		    !sanitize_speculative_path(env, insn,
11567 					       *insn_idx + insn->off + 1,
11568 					       *insn_idx))
11569 			return -EFAULT;
11570 		return 0;
11571 	}
11572 
11573 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
11574 				  false);
11575 	if (!other_branch)
11576 		return -EFAULT;
11577 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
11578 
11579 	/* detect if we are comparing against a constant value so we can adjust
11580 	 * our min/max values for our dst register.
11581 	 * this is only legit if both are scalars (or pointers to the same
11582 	 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
11583 	 * because otherwise the different base pointers mean the offsets aren't
11584 	 * comparable.
11585 	 */
11586 	if (BPF_SRC(insn->code) == BPF_X) {
11587 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
11588 
11589 		if (dst_reg->type == SCALAR_VALUE &&
11590 		    src_reg->type == SCALAR_VALUE) {
11591 			if (tnum_is_const(src_reg->var_off) ||
11592 			    (is_jmp32 &&
11593 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
11594 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
11595 						dst_reg,
11596 						src_reg->var_off.value,
11597 						tnum_subreg(src_reg->var_off).value,
11598 						opcode, is_jmp32);
11599 			else if (tnum_is_const(dst_reg->var_off) ||
11600 				 (is_jmp32 &&
11601 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
11602 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
11603 						    src_reg,
11604 						    dst_reg->var_off.value,
11605 						    tnum_subreg(dst_reg->var_off).value,
11606 						    opcode, is_jmp32);
11607 			else if (!is_jmp32 &&
11608 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
11609 				/* Comparing for equality, we can combine knowledge */
11610 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
11611 						    &other_branch_regs[insn->dst_reg],
11612 						    src_reg, dst_reg, opcode);
11613 			if (src_reg->id &&
11614 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
11615 				find_equal_scalars(this_branch, src_reg);
11616 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
11617 			}
11618 
11619 		}
11620 	} else if (dst_reg->type == SCALAR_VALUE) {
11621 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
11622 					dst_reg, insn->imm, (u32)insn->imm,
11623 					opcode, is_jmp32);
11624 	}
11625 
11626 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
11627 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
11628 		find_equal_scalars(this_branch, dst_reg);
11629 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
11630 	}
11631 
11632 	/* if one pointer register is compared to another pointer
11633 	 * register check if PTR_MAYBE_NULL could be lifted.
11634 	 * E.g. register A - maybe null
11635 	 *      register B - not null
11636 	 * for JNE A, B, ... - A is not null in the false branch;
11637 	 * for JEQ A, B, ... - A is not null in the true branch.
11638 	 */
11639 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
11640 	    __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
11641 	    type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type)) {
11642 		eq_branch_regs = NULL;
11643 		switch (opcode) {
11644 		case BPF_JEQ:
11645 			eq_branch_regs = other_branch_regs;
11646 			break;
11647 		case BPF_JNE:
11648 			eq_branch_regs = regs;
11649 			break;
11650 		default:
11651 			/* do nothing */
11652 			break;
11653 		}
11654 		if (eq_branch_regs) {
11655 			if (type_may_be_null(src_reg->type))
11656 				mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
11657 			else
11658 				mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
11659 		}
11660 	}
11661 
11662 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
11663 	 * NOTE: these optimizations below are related with pointer comparison
11664 	 *       which will never be JMP32.
11665 	 */
11666 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
11667 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
11668 	    type_may_be_null(dst_reg->type)) {
11669 		/* Mark all identical registers in each branch as either
11670 		 * safe or unknown depending R == 0 or R != 0 conditional.
11671 		 */
11672 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
11673 				      opcode == BPF_JNE);
11674 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
11675 				      opcode == BPF_JEQ);
11676 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
11677 					   this_branch, other_branch) &&
11678 		   is_pointer_value(env, insn->dst_reg)) {
11679 		verbose(env, "R%d pointer comparison prohibited\n",
11680 			insn->dst_reg);
11681 		return -EACCES;
11682 	}
11683 	if (env->log.level & BPF_LOG_LEVEL)
11684 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
11685 	return 0;
11686 }
11687 
11688 /* verify BPF_LD_IMM64 instruction */
11689 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
11690 {
11691 	struct bpf_insn_aux_data *aux = cur_aux(env);
11692 	struct bpf_reg_state *regs = cur_regs(env);
11693 	struct bpf_reg_state *dst_reg;
11694 	struct bpf_map *map;
11695 	int err;
11696 
11697 	if (BPF_SIZE(insn->code) != BPF_DW) {
11698 		verbose(env, "invalid BPF_LD_IMM insn\n");
11699 		return -EINVAL;
11700 	}
11701 	if (insn->off != 0) {
11702 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
11703 		return -EINVAL;
11704 	}
11705 
11706 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
11707 	if (err)
11708 		return err;
11709 
11710 	dst_reg = &regs[insn->dst_reg];
11711 	if (insn->src_reg == 0) {
11712 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
11713 
11714 		dst_reg->type = SCALAR_VALUE;
11715 		__mark_reg_known(&regs[insn->dst_reg], imm);
11716 		return 0;
11717 	}
11718 
11719 	/* All special src_reg cases are listed below. From this point onwards
11720 	 * we either succeed and assign a corresponding dst_reg->type after
11721 	 * zeroing the offset, or fail and reject the program.
11722 	 */
11723 	mark_reg_known_zero(env, regs, insn->dst_reg);
11724 
11725 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
11726 		dst_reg->type = aux->btf_var.reg_type;
11727 		switch (base_type(dst_reg->type)) {
11728 		case PTR_TO_MEM:
11729 			dst_reg->mem_size = aux->btf_var.mem_size;
11730 			break;
11731 		case PTR_TO_BTF_ID:
11732 			dst_reg->btf = aux->btf_var.btf;
11733 			dst_reg->btf_id = aux->btf_var.btf_id;
11734 			break;
11735 		default:
11736 			verbose(env, "bpf verifier is misconfigured\n");
11737 			return -EFAULT;
11738 		}
11739 		return 0;
11740 	}
11741 
11742 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
11743 		struct bpf_prog_aux *aux = env->prog->aux;
11744 		u32 subprogno = find_subprog(env,
11745 					     env->insn_idx + insn->imm + 1);
11746 
11747 		if (!aux->func_info) {
11748 			verbose(env, "missing btf func_info\n");
11749 			return -EINVAL;
11750 		}
11751 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
11752 			verbose(env, "callback function not static\n");
11753 			return -EINVAL;
11754 		}
11755 
11756 		dst_reg->type = PTR_TO_FUNC;
11757 		dst_reg->subprogno = subprogno;
11758 		return 0;
11759 	}
11760 
11761 	map = env->used_maps[aux->map_index];
11762 	dst_reg->map_ptr = map;
11763 
11764 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
11765 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
11766 		dst_reg->type = PTR_TO_MAP_VALUE;
11767 		dst_reg->off = aux->map_off;
11768 		WARN_ON_ONCE(map->max_entries != 1);
11769 		/* We want reg->id to be same (0) as map_value is not distinct */
11770 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
11771 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
11772 		dst_reg->type = CONST_PTR_TO_MAP;
11773 	} else {
11774 		verbose(env, "bpf verifier is misconfigured\n");
11775 		return -EINVAL;
11776 	}
11777 
11778 	return 0;
11779 }
11780 
11781 static bool may_access_skb(enum bpf_prog_type type)
11782 {
11783 	switch (type) {
11784 	case BPF_PROG_TYPE_SOCKET_FILTER:
11785 	case BPF_PROG_TYPE_SCHED_CLS:
11786 	case BPF_PROG_TYPE_SCHED_ACT:
11787 		return true;
11788 	default:
11789 		return false;
11790 	}
11791 }
11792 
11793 /* verify safety of LD_ABS|LD_IND instructions:
11794  * - they can only appear in the programs where ctx == skb
11795  * - since they are wrappers of function calls, they scratch R1-R5 registers,
11796  *   preserve R6-R9, and store return value into R0
11797  *
11798  * Implicit input:
11799  *   ctx == skb == R6 == CTX
11800  *
11801  * Explicit input:
11802  *   SRC == any register
11803  *   IMM == 32-bit immediate
11804  *
11805  * Output:
11806  *   R0 - 8/16/32-bit skb data converted to cpu endianness
11807  */
11808 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
11809 {
11810 	struct bpf_reg_state *regs = cur_regs(env);
11811 	static const int ctx_reg = BPF_REG_6;
11812 	u8 mode = BPF_MODE(insn->code);
11813 	int i, err;
11814 
11815 	if (!may_access_skb(resolve_prog_type(env->prog))) {
11816 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
11817 		return -EINVAL;
11818 	}
11819 
11820 	if (!env->ops->gen_ld_abs) {
11821 		verbose(env, "bpf verifier is misconfigured\n");
11822 		return -EINVAL;
11823 	}
11824 
11825 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
11826 	    BPF_SIZE(insn->code) == BPF_DW ||
11827 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
11828 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
11829 		return -EINVAL;
11830 	}
11831 
11832 	/* check whether implicit source operand (register R6) is readable */
11833 	err = check_reg_arg(env, ctx_reg, SRC_OP);
11834 	if (err)
11835 		return err;
11836 
11837 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
11838 	 * gen_ld_abs() may terminate the program at runtime, leading to
11839 	 * reference leak.
11840 	 */
11841 	err = check_reference_leak(env);
11842 	if (err) {
11843 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
11844 		return err;
11845 	}
11846 
11847 	if (env->cur_state->active_lock.ptr) {
11848 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
11849 		return -EINVAL;
11850 	}
11851 
11852 	if (env->cur_state->active_rcu_lock) {
11853 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
11854 		return -EINVAL;
11855 	}
11856 
11857 	if (regs[ctx_reg].type != PTR_TO_CTX) {
11858 		verbose(env,
11859 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
11860 		return -EINVAL;
11861 	}
11862 
11863 	if (mode == BPF_IND) {
11864 		/* check explicit source operand */
11865 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
11866 		if (err)
11867 			return err;
11868 	}
11869 
11870 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
11871 	if (err < 0)
11872 		return err;
11873 
11874 	/* reset caller saved regs to unreadable */
11875 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
11876 		mark_reg_not_init(env, regs, caller_saved[i]);
11877 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
11878 	}
11879 
11880 	/* mark destination R0 register as readable, since it contains
11881 	 * the value fetched from the packet.
11882 	 * Already marked as written above.
11883 	 */
11884 	mark_reg_unknown(env, regs, BPF_REG_0);
11885 	/* ld_abs load up to 32-bit skb data. */
11886 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
11887 	return 0;
11888 }
11889 
11890 static int check_return_code(struct bpf_verifier_env *env)
11891 {
11892 	struct tnum enforce_attach_type_range = tnum_unknown;
11893 	const struct bpf_prog *prog = env->prog;
11894 	struct bpf_reg_state *reg;
11895 	struct tnum range = tnum_range(0, 1);
11896 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
11897 	int err;
11898 	struct bpf_func_state *frame = env->cur_state->frame[0];
11899 	const bool is_subprog = frame->subprogno;
11900 
11901 	/* LSM and struct_ops func-ptr's return type could be "void" */
11902 	if (!is_subprog) {
11903 		switch (prog_type) {
11904 		case BPF_PROG_TYPE_LSM:
11905 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
11906 				/* See below, can be 0 or 0-1 depending on hook. */
11907 				break;
11908 			fallthrough;
11909 		case BPF_PROG_TYPE_STRUCT_OPS:
11910 			if (!prog->aux->attach_func_proto->type)
11911 				return 0;
11912 			break;
11913 		default:
11914 			break;
11915 		}
11916 	}
11917 
11918 	/* eBPF calling convention is such that R0 is used
11919 	 * to return the value from eBPF program.
11920 	 * Make sure that it's readable at this time
11921 	 * of bpf_exit, which means that program wrote
11922 	 * something into it earlier
11923 	 */
11924 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
11925 	if (err)
11926 		return err;
11927 
11928 	if (is_pointer_value(env, BPF_REG_0)) {
11929 		verbose(env, "R0 leaks addr as return value\n");
11930 		return -EACCES;
11931 	}
11932 
11933 	reg = cur_regs(env) + BPF_REG_0;
11934 
11935 	if (frame->in_async_callback_fn) {
11936 		/* enforce return zero from async callbacks like timer */
11937 		if (reg->type != SCALAR_VALUE) {
11938 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
11939 				reg_type_str(env, reg->type));
11940 			return -EINVAL;
11941 		}
11942 
11943 		if (!tnum_in(tnum_const(0), reg->var_off)) {
11944 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
11945 			return -EINVAL;
11946 		}
11947 		return 0;
11948 	}
11949 
11950 	if (is_subprog) {
11951 		if (reg->type != SCALAR_VALUE) {
11952 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
11953 				reg_type_str(env, reg->type));
11954 			return -EINVAL;
11955 		}
11956 		return 0;
11957 	}
11958 
11959 	switch (prog_type) {
11960 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
11961 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
11962 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
11963 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
11964 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
11965 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
11966 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
11967 			range = tnum_range(1, 1);
11968 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
11969 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
11970 			range = tnum_range(0, 3);
11971 		break;
11972 	case BPF_PROG_TYPE_CGROUP_SKB:
11973 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
11974 			range = tnum_range(0, 3);
11975 			enforce_attach_type_range = tnum_range(2, 3);
11976 		}
11977 		break;
11978 	case BPF_PROG_TYPE_CGROUP_SOCK:
11979 	case BPF_PROG_TYPE_SOCK_OPS:
11980 	case BPF_PROG_TYPE_CGROUP_DEVICE:
11981 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
11982 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
11983 		break;
11984 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
11985 		if (!env->prog->aux->attach_btf_id)
11986 			return 0;
11987 		range = tnum_const(0);
11988 		break;
11989 	case BPF_PROG_TYPE_TRACING:
11990 		switch (env->prog->expected_attach_type) {
11991 		case BPF_TRACE_FENTRY:
11992 		case BPF_TRACE_FEXIT:
11993 			range = tnum_const(0);
11994 			break;
11995 		case BPF_TRACE_RAW_TP:
11996 		case BPF_MODIFY_RETURN:
11997 			return 0;
11998 		case BPF_TRACE_ITER:
11999 			break;
12000 		default:
12001 			return -ENOTSUPP;
12002 		}
12003 		break;
12004 	case BPF_PROG_TYPE_SK_LOOKUP:
12005 		range = tnum_range(SK_DROP, SK_PASS);
12006 		break;
12007 
12008 	case BPF_PROG_TYPE_LSM:
12009 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
12010 			/* Regular BPF_PROG_TYPE_LSM programs can return
12011 			 * any value.
12012 			 */
12013 			return 0;
12014 		}
12015 		if (!env->prog->aux->attach_func_proto->type) {
12016 			/* Make sure programs that attach to void
12017 			 * hooks don't try to modify return value.
12018 			 */
12019 			range = tnum_range(1, 1);
12020 		}
12021 		break;
12022 
12023 	case BPF_PROG_TYPE_EXT:
12024 		/* freplace program can return anything as its return value
12025 		 * depends on the to-be-replaced kernel func or bpf program.
12026 		 */
12027 	default:
12028 		return 0;
12029 	}
12030 
12031 	if (reg->type != SCALAR_VALUE) {
12032 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
12033 			reg_type_str(env, reg->type));
12034 		return -EINVAL;
12035 	}
12036 
12037 	if (!tnum_in(range, reg->var_off)) {
12038 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
12039 		if (prog->expected_attach_type == BPF_LSM_CGROUP &&
12040 		    prog_type == BPF_PROG_TYPE_LSM &&
12041 		    !prog->aux->attach_func_proto->type)
12042 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
12043 		return -EINVAL;
12044 	}
12045 
12046 	if (!tnum_is_unknown(enforce_attach_type_range) &&
12047 	    tnum_in(enforce_attach_type_range, reg->var_off))
12048 		env->prog->enforce_expected_attach_type = 1;
12049 	return 0;
12050 }
12051 
12052 /* non-recursive DFS pseudo code
12053  * 1  procedure DFS-iterative(G,v):
12054  * 2      label v as discovered
12055  * 3      let S be a stack
12056  * 4      S.push(v)
12057  * 5      while S is not empty
12058  * 6            t <- S.peek()
12059  * 7            if t is what we're looking for:
12060  * 8                return t
12061  * 9            for all edges e in G.adjacentEdges(t) do
12062  * 10               if edge e is already labelled
12063  * 11                   continue with the next edge
12064  * 12               w <- G.adjacentVertex(t,e)
12065  * 13               if vertex w is not discovered and not explored
12066  * 14                   label e as tree-edge
12067  * 15                   label w as discovered
12068  * 16                   S.push(w)
12069  * 17                   continue at 5
12070  * 18               else if vertex w is discovered
12071  * 19                   label e as back-edge
12072  * 20               else
12073  * 21                   // vertex w is explored
12074  * 22                   label e as forward- or cross-edge
12075  * 23           label t as explored
12076  * 24           S.pop()
12077  *
12078  * convention:
12079  * 0x10 - discovered
12080  * 0x11 - discovered and fall-through edge labelled
12081  * 0x12 - discovered and fall-through and branch edges labelled
12082  * 0x20 - explored
12083  */
12084 
12085 enum {
12086 	DISCOVERED = 0x10,
12087 	EXPLORED = 0x20,
12088 	FALLTHROUGH = 1,
12089 	BRANCH = 2,
12090 };
12091 
12092 static u32 state_htab_size(struct bpf_verifier_env *env)
12093 {
12094 	return env->prog->len;
12095 }
12096 
12097 static struct bpf_verifier_state_list **explored_state(
12098 					struct bpf_verifier_env *env,
12099 					int idx)
12100 {
12101 	struct bpf_verifier_state *cur = env->cur_state;
12102 	struct bpf_func_state *state = cur->frame[cur->curframe];
12103 
12104 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
12105 }
12106 
12107 static void init_explored_state(struct bpf_verifier_env *env, int idx)
12108 {
12109 	env->insn_aux_data[idx].prune_point = true;
12110 }
12111 
12112 enum {
12113 	DONE_EXPLORING = 0,
12114 	KEEP_EXPLORING = 1,
12115 };
12116 
12117 /* t, w, e - match pseudo-code above:
12118  * t - index of current instruction
12119  * w - next instruction
12120  * e - edge
12121  */
12122 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
12123 		     bool loop_ok)
12124 {
12125 	int *insn_stack = env->cfg.insn_stack;
12126 	int *insn_state = env->cfg.insn_state;
12127 
12128 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
12129 		return DONE_EXPLORING;
12130 
12131 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
12132 		return DONE_EXPLORING;
12133 
12134 	if (w < 0 || w >= env->prog->len) {
12135 		verbose_linfo(env, t, "%d: ", t);
12136 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
12137 		return -EINVAL;
12138 	}
12139 
12140 	if (e == BRANCH)
12141 		/* mark branch target for state pruning */
12142 		init_explored_state(env, w);
12143 
12144 	if (insn_state[w] == 0) {
12145 		/* tree-edge */
12146 		insn_state[t] = DISCOVERED | e;
12147 		insn_state[w] = DISCOVERED;
12148 		if (env->cfg.cur_stack >= env->prog->len)
12149 			return -E2BIG;
12150 		insn_stack[env->cfg.cur_stack++] = w;
12151 		return KEEP_EXPLORING;
12152 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
12153 		if (loop_ok && env->bpf_capable)
12154 			return DONE_EXPLORING;
12155 		verbose_linfo(env, t, "%d: ", t);
12156 		verbose_linfo(env, w, "%d: ", w);
12157 		verbose(env, "back-edge from insn %d to %d\n", t, w);
12158 		return -EINVAL;
12159 	} else if (insn_state[w] == EXPLORED) {
12160 		/* forward- or cross-edge */
12161 		insn_state[t] = DISCOVERED | e;
12162 	} else {
12163 		verbose(env, "insn state internal bug\n");
12164 		return -EFAULT;
12165 	}
12166 	return DONE_EXPLORING;
12167 }
12168 
12169 static int visit_func_call_insn(int t, int insn_cnt,
12170 				struct bpf_insn *insns,
12171 				struct bpf_verifier_env *env,
12172 				bool visit_callee)
12173 {
12174 	int ret;
12175 
12176 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
12177 	if (ret)
12178 		return ret;
12179 
12180 	if (t + 1 < insn_cnt)
12181 		init_explored_state(env, t + 1);
12182 	if (visit_callee) {
12183 		init_explored_state(env, t);
12184 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
12185 				/* It's ok to allow recursion from CFG point of
12186 				 * view. __check_func_call() will do the actual
12187 				 * check.
12188 				 */
12189 				bpf_pseudo_func(insns + t));
12190 	}
12191 	return ret;
12192 }
12193 
12194 /* Visits the instruction at index t and returns one of the following:
12195  *  < 0 - an error occurred
12196  *  DONE_EXPLORING - the instruction was fully explored
12197  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
12198  */
12199 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
12200 {
12201 	struct bpf_insn *insns = env->prog->insnsi;
12202 	int ret;
12203 
12204 	if (bpf_pseudo_func(insns + t))
12205 		return visit_func_call_insn(t, insn_cnt, insns, env, true);
12206 
12207 	/* All non-branch instructions have a single fall-through edge. */
12208 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
12209 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
12210 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
12211 
12212 	switch (BPF_OP(insns[t].code)) {
12213 	case BPF_EXIT:
12214 		return DONE_EXPLORING;
12215 
12216 	case BPF_CALL:
12217 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
12218 			/* Mark this call insn to trigger is_state_visited() check
12219 			 * before call itself is processed by __check_func_call().
12220 			 * Otherwise new async state will be pushed for further
12221 			 * exploration.
12222 			 */
12223 			init_explored_state(env, t);
12224 		return visit_func_call_insn(t, insn_cnt, insns, env,
12225 					    insns[t].src_reg == BPF_PSEUDO_CALL);
12226 
12227 	case BPF_JA:
12228 		if (BPF_SRC(insns[t].code) != BPF_K)
12229 			return -EINVAL;
12230 
12231 		/* unconditional jump with single edge */
12232 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
12233 				true);
12234 		if (ret)
12235 			return ret;
12236 
12237 		/* unconditional jmp is not a good pruning point,
12238 		 * but it's marked, since backtracking needs
12239 		 * to record jmp history in is_state_visited().
12240 		 */
12241 		init_explored_state(env, t + insns[t].off + 1);
12242 		/* tell verifier to check for equivalent states
12243 		 * after every call and jump
12244 		 */
12245 		if (t + 1 < insn_cnt)
12246 			init_explored_state(env, t + 1);
12247 
12248 		return ret;
12249 
12250 	default:
12251 		/* conditional jump with two edges */
12252 		init_explored_state(env, t);
12253 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
12254 		if (ret)
12255 			return ret;
12256 
12257 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
12258 	}
12259 }
12260 
12261 /* non-recursive depth-first-search to detect loops in BPF program
12262  * loop == back-edge in directed graph
12263  */
12264 static int check_cfg(struct bpf_verifier_env *env)
12265 {
12266 	int insn_cnt = env->prog->len;
12267 	int *insn_stack, *insn_state;
12268 	int ret = 0;
12269 	int i;
12270 
12271 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
12272 	if (!insn_state)
12273 		return -ENOMEM;
12274 
12275 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
12276 	if (!insn_stack) {
12277 		kvfree(insn_state);
12278 		return -ENOMEM;
12279 	}
12280 
12281 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
12282 	insn_stack[0] = 0; /* 0 is the first instruction */
12283 	env->cfg.cur_stack = 1;
12284 
12285 	while (env->cfg.cur_stack > 0) {
12286 		int t = insn_stack[env->cfg.cur_stack - 1];
12287 
12288 		ret = visit_insn(t, insn_cnt, env);
12289 		switch (ret) {
12290 		case DONE_EXPLORING:
12291 			insn_state[t] = EXPLORED;
12292 			env->cfg.cur_stack--;
12293 			break;
12294 		case KEEP_EXPLORING:
12295 			break;
12296 		default:
12297 			if (ret > 0) {
12298 				verbose(env, "visit_insn internal bug\n");
12299 				ret = -EFAULT;
12300 			}
12301 			goto err_free;
12302 		}
12303 	}
12304 
12305 	if (env->cfg.cur_stack < 0) {
12306 		verbose(env, "pop stack internal bug\n");
12307 		ret = -EFAULT;
12308 		goto err_free;
12309 	}
12310 
12311 	for (i = 0; i < insn_cnt; i++) {
12312 		if (insn_state[i] != EXPLORED) {
12313 			verbose(env, "unreachable insn %d\n", i);
12314 			ret = -EINVAL;
12315 			goto err_free;
12316 		}
12317 	}
12318 	ret = 0; /* cfg looks good */
12319 
12320 err_free:
12321 	kvfree(insn_state);
12322 	kvfree(insn_stack);
12323 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
12324 	return ret;
12325 }
12326 
12327 static int check_abnormal_return(struct bpf_verifier_env *env)
12328 {
12329 	int i;
12330 
12331 	for (i = 1; i < env->subprog_cnt; i++) {
12332 		if (env->subprog_info[i].has_ld_abs) {
12333 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
12334 			return -EINVAL;
12335 		}
12336 		if (env->subprog_info[i].has_tail_call) {
12337 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
12338 			return -EINVAL;
12339 		}
12340 	}
12341 	return 0;
12342 }
12343 
12344 /* The minimum supported BTF func info size */
12345 #define MIN_BPF_FUNCINFO_SIZE	8
12346 #define MAX_FUNCINFO_REC_SIZE	252
12347 
12348 static int check_btf_func(struct bpf_verifier_env *env,
12349 			  const union bpf_attr *attr,
12350 			  bpfptr_t uattr)
12351 {
12352 	const struct btf_type *type, *func_proto, *ret_type;
12353 	u32 i, nfuncs, urec_size, min_size;
12354 	u32 krec_size = sizeof(struct bpf_func_info);
12355 	struct bpf_func_info *krecord;
12356 	struct bpf_func_info_aux *info_aux = NULL;
12357 	struct bpf_prog *prog;
12358 	const struct btf *btf;
12359 	bpfptr_t urecord;
12360 	u32 prev_offset = 0;
12361 	bool scalar_return;
12362 	int ret = -ENOMEM;
12363 
12364 	nfuncs = attr->func_info_cnt;
12365 	if (!nfuncs) {
12366 		if (check_abnormal_return(env))
12367 			return -EINVAL;
12368 		return 0;
12369 	}
12370 
12371 	if (nfuncs != env->subprog_cnt) {
12372 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
12373 		return -EINVAL;
12374 	}
12375 
12376 	urec_size = attr->func_info_rec_size;
12377 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
12378 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
12379 	    urec_size % sizeof(u32)) {
12380 		verbose(env, "invalid func info rec size %u\n", urec_size);
12381 		return -EINVAL;
12382 	}
12383 
12384 	prog = env->prog;
12385 	btf = prog->aux->btf;
12386 
12387 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
12388 	min_size = min_t(u32, krec_size, urec_size);
12389 
12390 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
12391 	if (!krecord)
12392 		return -ENOMEM;
12393 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
12394 	if (!info_aux)
12395 		goto err_free;
12396 
12397 	for (i = 0; i < nfuncs; i++) {
12398 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
12399 		if (ret) {
12400 			if (ret == -E2BIG) {
12401 				verbose(env, "nonzero tailing record in func info");
12402 				/* set the size kernel expects so loader can zero
12403 				 * out the rest of the record.
12404 				 */
12405 				if (copy_to_bpfptr_offset(uattr,
12406 							  offsetof(union bpf_attr, func_info_rec_size),
12407 							  &min_size, sizeof(min_size)))
12408 					ret = -EFAULT;
12409 			}
12410 			goto err_free;
12411 		}
12412 
12413 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
12414 			ret = -EFAULT;
12415 			goto err_free;
12416 		}
12417 
12418 		/* check insn_off */
12419 		ret = -EINVAL;
12420 		if (i == 0) {
12421 			if (krecord[i].insn_off) {
12422 				verbose(env,
12423 					"nonzero insn_off %u for the first func info record",
12424 					krecord[i].insn_off);
12425 				goto err_free;
12426 			}
12427 		} else if (krecord[i].insn_off <= prev_offset) {
12428 			verbose(env,
12429 				"same or smaller insn offset (%u) than previous func info record (%u)",
12430 				krecord[i].insn_off, prev_offset);
12431 			goto err_free;
12432 		}
12433 
12434 		if (env->subprog_info[i].start != krecord[i].insn_off) {
12435 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
12436 			goto err_free;
12437 		}
12438 
12439 		/* check type_id */
12440 		type = btf_type_by_id(btf, krecord[i].type_id);
12441 		if (!type || !btf_type_is_func(type)) {
12442 			verbose(env, "invalid type id %d in func info",
12443 				krecord[i].type_id);
12444 			goto err_free;
12445 		}
12446 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
12447 
12448 		func_proto = btf_type_by_id(btf, type->type);
12449 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
12450 			/* btf_func_check() already verified it during BTF load */
12451 			goto err_free;
12452 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
12453 		scalar_return =
12454 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
12455 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
12456 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
12457 			goto err_free;
12458 		}
12459 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
12460 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
12461 			goto err_free;
12462 		}
12463 
12464 		prev_offset = krecord[i].insn_off;
12465 		bpfptr_add(&urecord, urec_size);
12466 	}
12467 
12468 	prog->aux->func_info = krecord;
12469 	prog->aux->func_info_cnt = nfuncs;
12470 	prog->aux->func_info_aux = info_aux;
12471 	return 0;
12472 
12473 err_free:
12474 	kvfree(krecord);
12475 	kfree(info_aux);
12476 	return ret;
12477 }
12478 
12479 static void adjust_btf_func(struct bpf_verifier_env *env)
12480 {
12481 	struct bpf_prog_aux *aux = env->prog->aux;
12482 	int i;
12483 
12484 	if (!aux->func_info)
12485 		return;
12486 
12487 	for (i = 0; i < env->subprog_cnt; i++)
12488 		aux->func_info[i].insn_off = env->subprog_info[i].start;
12489 }
12490 
12491 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
12492 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
12493 
12494 static int check_btf_line(struct bpf_verifier_env *env,
12495 			  const union bpf_attr *attr,
12496 			  bpfptr_t uattr)
12497 {
12498 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
12499 	struct bpf_subprog_info *sub;
12500 	struct bpf_line_info *linfo;
12501 	struct bpf_prog *prog;
12502 	const struct btf *btf;
12503 	bpfptr_t ulinfo;
12504 	int err;
12505 
12506 	nr_linfo = attr->line_info_cnt;
12507 	if (!nr_linfo)
12508 		return 0;
12509 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
12510 		return -EINVAL;
12511 
12512 	rec_size = attr->line_info_rec_size;
12513 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
12514 	    rec_size > MAX_LINEINFO_REC_SIZE ||
12515 	    rec_size & (sizeof(u32) - 1))
12516 		return -EINVAL;
12517 
12518 	/* Need to zero it in case the userspace may
12519 	 * pass in a smaller bpf_line_info object.
12520 	 */
12521 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
12522 			 GFP_KERNEL | __GFP_NOWARN);
12523 	if (!linfo)
12524 		return -ENOMEM;
12525 
12526 	prog = env->prog;
12527 	btf = prog->aux->btf;
12528 
12529 	s = 0;
12530 	sub = env->subprog_info;
12531 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
12532 	expected_size = sizeof(struct bpf_line_info);
12533 	ncopy = min_t(u32, expected_size, rec_size);
12534 	for (i = 0; i < nr_linfo; i++) {
12535 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
12536 		if (err) {
12537 			if (err == -E2BIG) {
12538 				verbose(env, "nonzero tailing record in line_info");
12539 				if (copy_to_bpfptr_offset(uattr,
12540 							  offsetof(union bpf_attr, line_info_rec_size),
12541 							  &expected_size, sizeof(expected_size)))
12542 					err = -EFAULT;
12543 			}
12544 			goto err_free;
12545 		}
12546 
12547 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
12548 			err = -EFAULT;
12549 			goto err_free;
12550 		}
12551 
12552 		/*
12553 		 * Check insn_off to ensure
12554 		 * 1) strictly increasing AND
12555 		 * 2) bounded by prog->len
12556 		 *
12557 		 * The linfo[0].insn_off == 0 check logically falls into
12558 		 * the later "missing bpf_line_info for func..." case
12559 		 * because the first linfo[0].insn_off must be the
12560 		 * first sub also and the first sub must have
12561 		 * subprog_info[0].start == 0.
12562 		 */
12563 		if ((i && linfo[i].insn_off <= prev_offset) ||
12564 		    linfo[i].insn_off >= prog->len) {
12565 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
12566 				i, linfo[i].insn_off, prev_offset,
12567 				prog->len);
12568 			err = -EINVAL;
12569 			goto err_free;
12570 		}
12571 
12572 		if (!prog->insnsi[linfo[i].insn_off].code) {
12573 			verbose(env,
12574 				"Invalid insn code at line_info[%u].insn_off\n",
12575 				i);
12576 			err = -EINVAL;
12577 			goto err_free;
12578 		}
12579 
12580 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
12581 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
12582 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
12583 			err = -EINVAL;
12584 			goto err_free;
12585 		}
12586 
12587 		if (s != env->subprog_cnt) {
12588 			if (linfo[i].insn_off == sub[s].start) {
12589 				sub[s].linfo_idx = i;
12590 				s++;
12591 			} else if (sub[s].start < linfo[i].insn_off) {
12592 				verbose(env, "missing bpf_line_info for func#%u\n", s);
12593 				err = -EINVAL;
12594 				goto err_free;
12595 			}
12596 		}
12597 
12598 		prev_offset = linfo[i].insn_off;
12599 		bpfptr_add(&ulinfo, rec_size);
12600 	}
12601 
12602 	if (s != env->subprog_cnt) {
12603 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
12604 			env->subprog_cnt - s, s);
12605 		err = -EINVAL;
12606 		goto err_free;
12607 	}
12608 
12609 	prog->aux->linfo = linfo;
12610 	prog->aux->nr_linfo = nr_linfo;
12611 
12612 	return 0;
12613 
12614 err_free:
12615 	kvfree(linfo);
12616 	return err;
12617 }
12618 
12619 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
12620 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
12621 
12622 static int check_core_relo(struct bpf_verifier_env *env,
12623 			   const union bpf_attr *attr,
12624 			   bpfptr_t uattr)
12625 {
12626 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
12627 	struct bpf_core_relo core_relo = {};
12628 	struct bpf_prog *prog = env->prog;
12629 	const struct btf *btf = prog->aux->btf;
12630 	struct bpf_core_ctx ctx = {
12631 		.log = &env->log,
12632 		.btf = btf,
12633 	};
12634 	bpfptr_t u_core_relo;
12635 	int err;
12636 
12637 	nr_core_relo = attr->core_relo_cnt;
12638 	if (!nr_core_relo)
12639 		return 0;
12640 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
12641 		return -EINVAL;
12642 
12643 	rec_size = attr->core_relo_rec_size;
12644 	if (rec_size < MIN_CORE_RELO_SIZE ||
12645 	    rec_size > MAX_CORE_RELO_SIZE ||
12646 	    rec_size % sizeof(u32))
12647 		return -EINVAL;
12648 
12649 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
12650 	expected_size = sizeof(struct bpf_core_relo);
12651 	ncopy = min_t(u32, expected_size, rec_size);
12652 
12653 	/* Unlike func_info and line_info, copy and apply each CO-RE
12654 	 * relocation record one at a time.
12655 	 */
12656 	for (i = 0; i < nr_core_relo; i++) {
12657 		/* future proofing when sizeof(bpf_core_relo) changes */
12658 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
12659 		if (err) {
12660 			if (err == -E2BIG) {
12661 				verbose(env, "nonzero tailing record in core_relo");
12662 				if (copy_to_bpfptr_offset(uattr,
12663 							  offsetof(union bpf_attr, core_relo_rec_size),
12664 							  &expected_size, sizeof(expected_size)))
12665 					err = -EFAULT;
12666 			}
12667 			break;
12668 		}
12669 
12670 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
12671 			err = -EFAULT;
12672 			break;
12673 		}
12674 
12675 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
12676 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
12677 				i, core_relo.insn_off, prog->len);
12678 			err = -EINVAL;
12679 			break;
12680 		}
12681 
12682 		err = bpf_core_apply(&ctx, &core_relo, i,
12683 				     &prog->insnsi[core_relo.insn_off / 8]);
12684 		if (err)
12685 			break;
12686 		bpfptr_add(&u_core_relo, rec_size);
12687 	}
12688 	return err;
12689 }
12690 
12691 static int check_btf_info(struct bpf_verifier_env *env,
12692 			  const union bpf_attr *attr,
12693 			  bpfptr_t uattr)
12694 {
12695 	struct btf *btf;
12696 	int err;
12697 
12698 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
12699 		if (check_abnormal_return(env))
12700 			return -EINVAL;
12701 		return 0;
12702 	}
12703 
12704 	btf = btf_get_by_fd(attr->prog_btf_fd);
12705 	if (IS_ERR(btf))
12706 		return PTR_ERR(btf);
12707 	if (btf_is_kernel(btf)) {
12708 		btf_put(btf);
12709 		return -EACCES;
12710 	}
12711 	env->prog->aux->btf = btf;
12712 
12713 	err = check_btf_func(env, attr, uattr);
12714 	if (err)
12715 		return err;
12716 
12717 	err = check_btf_line(env, attr, uattr);
12718 	if (err)
12719 		return err;
12720 
12721 	err = check_core_relo(env, attr, uattr);
12722 	if (err)
12723 		return err;
12724 
12725 	return 0;
12726 }
12727 
12728 /* check %cur's range satisfies %old's */
12729 static bool range_within(struct bpf_reg_state *old,
12730 			 struct bpf_reg_state *cur)
12731 {
12732 	return old->umin_value <= cur->umin_value &&
12733 	       old->umax_value >= cur->umax_value &&
12734 	       old->smin_value <= cur->smin_value &&
12735 	       old->smax_value >= cur->smax_value &&
12736 	       old->u32_min_value <= cur->u32_min_value &&
12737 	       old->u32_max_value >= cur->u32_max_value &&
12738 	       old->s32_min_value <= cur->s32_min_value &&
12739 	       old->s32_max_value >= cur->s32_max_value;
12740 }
12741 
12742 /* If in the old state two registers had the same id, then they need to have
12743  * the same id in the new state as well.  But that id could be different from
12744  * the old state, so we need to track the mapping from old to new ids.
12745  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
12746  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
12747  * regs with a different old id could still have new id 9, we don't care about
12748  * that.
12749  * So we look through our idmap to see if this old id has been seen before.  If
12750  * so, we require the new id to match; otherwise, we add the id pair to the map.
12751  */
12752 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
12753 {
12754 	unsigned int i;
12755 
12756 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
12757 		if (!idmap[i].old) {
12758 			/* Reached an empty slot; haven't seen this id before */
12759 			idmap[i].old = old_id;
12760 			idmap[i].cur = cur_id;
12761 			return true;
12762 		}
12763 		if (idmap[i].old == old_id)
12764 			return idmap[i].cur == cur_id;
12765 	}
12766 	/* We ran out of idmap slots, which should be impossible */
12767 	WARN_ON_ONCE(1);
12768 	return false;
12769 }
12770 
12771 static void clean_func_state(struct bpf_verifier_env *env,
12772 			     struct bpf_func_state *st)
12773 {
12774 	enum bpf_reg_liveness live;
12775 	int i, j;
12776 
12777 	for (i = 0; i < BPF_REG_FP; i++) {
12778 		live = st->regs[i].live;
12779 		/* liveness must not touch this register anymore */
12780 		st->regs[i].live |= REG_LIVE_DONE;
12781 		if (!(live & REG_LIVE_READ))
12782 			/* since the register is unused, clear its state
12783 			 * to make further comparison simpler
12784 			 */
12785 			__mark_reg_not_init(env, &st->regs[i]);
12786 	}
12787 
12788 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
12789 		live = st->stack[i].spilled_ptr.live;
12790 		/* liveness must not touch this stack slot anymore */
12791 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
12792 		if (!(live & REG_LIVE_READ)) {
12793 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
12794 			for (j = 0; j < BPF_REG_SIZE; j++)
12795 				st->stack[i].slot_type[j] = STACK_INVALID;
12796 		}
12797 	}
12798 }
12799 
12800 static void clean_verifier_state(struct bpf_verifier_env *env,
12801 				 struct bpf_verifier_state *st)
12802 {
12803 	int i;
12804 
12805 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
12806 		/* all regs in this state in all frames were already marked */
12807 		return;
12808 
12809 	for (i = 0; i <= st->curframe; i++)
12810 		clean_func_state(env, st->frame[i]);
12811 }
12812 
12813 /* the parentage chains form a tree.
12814  * the verifier states are added to state lists at given insn and
12815  * pushed into state stack for future exploration.
12816  * when the verifier reaches bpf_exit insn some of the verifer states
12817  * stored in the state lists have their final liveness state already,
12818  * but a lot of states will get revised from liveness point of view when
12819  * the verifier explores other branches.
12820  * Example:
12821  * 1: r0 = 1
12822  * 2: if r1 == 100 goto pc+1
12823  * 3: r0 = 2
12824  * 4: exit
12825  * when the verifier reaches exit insn the register r0 in the state list of
12826  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
12827  * of insn 2 and goes exploring further. At the insn 4 it will walk the
12828  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
12829  *
12830  * Since the verifier pushes the branch states as it sees them while exploring
12831  * the program the condition of walking the branch instruction for the second
12832  * time means that all states below this branch were already explored and
12833  * their final liveness marks are already propagated.
12834  * Hence when the verifier completes the search of state list in is_state_visited()
12835  * we can call this clean_live_states() function to mark all liveness states
12836  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
12837  * will not be used.
12838  * This function also clears the registers and stack for states that !READ
12839  * to simplify state merging.
12840  *
12841  * Important note here that walking the same branch instruction in the callee
12842  * doesn't meant that the states are DONE. The verifier has to compare
12843  * the callsites
12844  */
12845 static void clean_live_states(struct bpf_verifier_env *env, int insn,
12846 			      struct bpf_verifier_state *cur)
12847 {
12848 	struct bpf_verifier_state_list *sl;
12849 	int i;
12850 
12851 	sl = *explored_state(env, insn);
12852 	while (sl) {
12853 		if (sl->state.branches)
12854 			goto next;
12855 		if (sl->state.insn_idx != insn ||
12856 		    sl->state.curframe != cur->curframe)
12857 			goto next;
12858 		for (i = 0; i <= cur->curframe; i++)
12859 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
12860 				goto next;
12861 		clean_verifier_state(env, &sl->state);
12862 next:
12863 		sl = sl->next;
12864 	}
12865 }
12866 
12867 /* Returns true if (rold safe implies rcur safe) */
12868 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
12869 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
12870 {
12871 	bool equal;
12872 
12873 	if (!(rold->live & REG_LIVE_READ))
12874 		/* explored state didn't use this */
12875 		return true;
12876 
12877 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
12878 
12879 	if (rold->type == PTR_TO_STACK)
12880 		/* two stack pointers are equal only if they're pointing to
12881 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
12882 		 */
12883 		return equal && rold->frameno == rcur->frameno;
12884 
12885 	if (equal)
12886 		return true;
12887 
12888 	if (rold->type == NOT_INIT)
12889 		/* explored state can't have used this */
12890 		return true;
12891 	if (rcur->type == NOT_INIT)
12892 		return false;
12893 	switch (base_type(rold->type)) {
12894 	case SCALAR_VALUE:
12895 		if (env->explore_alu_limits)
12896 			return false;
12897 		if (rcur->type == SCALAR_VALUE) {
12898 			if (!rold->precise)
12899 				return true;
12900 			/* new val must satisfy old val knowledge */
12901 			return range_within(rold, rcur) &&
12902 			       tnum_in(rold->var_off, rcur->var_off);
12903 		} else {
12904 			/* We're trying to use a pointer in place of a scalar.
12905 			 * Even if the scalar was unbounded, this could lead to
12906 			 * pointer leaks because scalars are allowed to leak
12907 			 * while pointers are not. We could make this safe in
12908 			 * special cases if root is calling us, but it's
12909 			 * probably not worth the hassle.
12910 			 */
12911 			return false;
12912 		}
12913 	case PTR_TO_MAP_KEY:
12914 	case PTR_TO_MAP_VALUE:
12915 		/* a PTR_TO_MAP_VALUE could be safe to use as a
12916 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
12917 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
12918 		 * checked, doing so could have affected others with the same
12919 		 * id, and we can't check for that because we lost the id when
12920 		 * we converted to a PTR_TO_MAP_VALUE.
12921 		 */
12922 		if (type_may_be_null(rold->type)) {
12923 			if (!type_may_be_null(rcur->type))
12924 				return false;
12925 			if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
12926 				return false;
12927 			/* Check our ids match any regs they're supposed to */
12928 			return check_ids(rold->id, rcur->id, idmap);
12929 		}
12930 
12931 		/* If the new min/max/var_off satisfy the old ones and
12932 		 * everything else matches, we are OK.
12933 		 * 'id' is not compared, since it's only used for maps with
12934 		 * bpf_spin_lock inside map element and in such cases if
12935 		 * the rest of the prog is valid for one map element then
12936 		 * it's valid for all map elements regardless of the key
12937 		 * used in bpf_map_lookup()
12938 		 */
12939 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
12940 		       range_within(rold, rcur) &&
12941 		       tnum_in(rold->var_off, rcur->var_off);
12942 	case PTR_TO_PACKET_META:
12943 	case PTR_TO_PACKET:
12944 		if (rcur->type != rold->type)
12945 			return false;
12946 		/* We must have at least as much range as the old ptr
12947 		 * did, so that any accesses which were safe before are
12948 		 * still safe.  This is true even if old range < old off,
12949 		 * since someone could have accessed through (ptr - k), or
12950 		 * even done ptr -= k in a register, to get a safe access.
12951 		 */
12952 		if (rold->range > rcur->range)
12953 			return false;
12954 		/* If the offsets don't match, we can't trust our alignment;
12955 		 * nor can we be sure that we won't fall out of range.
12956 		 */
12957 		if (rold->off != rcur->off)
12958 			return false;
12959 		/* id relations must be preserved */
12960 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
12961 			return false;
12962 		/* new val must satisfy old val knowledge */
12963 		return range_within(rold, rcur) &&
12964 		       tnum_in(rold->var_off, rcur->var_off);
12965 	case PTR_TO_CTX:
12966 	case CONST_PTR_TO_MAP:
12967 	case PTR_TO_PACKET_END:
12968 	case PTR_TO_FLOW_KEYS:
12969 	case PTR_TO_SOCKET:
12970 	case PTR_TO_SOCK_COMMON:
12971 	case PTR_TO_TCP_SOCK:
12972 	case PTR_TO_XDP_SOCK:
12973 		/* Only valid matches are exact, which memcmp() above
12974 		 * would have accepted
12975 		 */
12976 	default:
12977 		/* Don't know what's going on, just say it's not safe */
12978 		return false;
12979 	}
12980 
12981 	/* Shouldn't get here; if we do, say it's not safe */
12982 	WARN_ON_ONCE(1);
12983 	return false;
12984 }
12985 
12986 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
12987 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
12988 {
12989 	int i, spi;
12990 
12991 	/* walk slots of the explored stack and ignore any additional
12992 	 * slots in the current stack, since explored(safe) state
12993 	 * didn't use them
12994 	 */
12995 	for (i = 0; i < old->allocated_stack; i++) {
12996 		spi = i / BPF_REG_SIZE;
12997 
12998 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
12999 			i += BPF_REG_SIZE - 1;
13000 			/* explored state didn't use this */
13001 			continue;
13002 		}
13003 
13004 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
13005 			continue;
13006 
13007 		/* explored stack has more populated slots than current stack
13008 		 * and these slots were used
13009 		 */
13010 		if (i >= cur->allocated_stack)
13011 			return false;
13012 
13013 		/* if old state was safe with misc data in the stack
13014 		 * it will be safe with zero-initialized stack.
13015 		 * The opposite is not true
13016 		 */
13017 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
13018 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
13019 			continue;
13020 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
13021 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
13022 			/* Ex: old explored (safe) state has STACK_SPILL in
13023 			 * this stack slot, but current has STACK_MISC ->
13024 			 * this verifier states are not equivalent,
13025 			 * return false to continue verification of this path
13026 			 */
13027 			return false;
13028 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
13029 			continue;
13030 		if (!is_spilled_reg(&old->stack[spi]))
13031 			continue;
13032 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
13033 			     &cur->stack[spi].spilled_ptr, idmap))
13034 			/* when explored and current stack slot are both storing
13035 			 * spilled registers, check that stored pointers types
13036 			 * are the same as well.
13037 			 * Ex: explored safe path could have stored
13038 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
13039 			 * but current path has stored:
13040 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
13041 			 * such verifier states are not equivalent.
13042 			 * return false to continue verification of this path
13043 			 */
13044 			return false;
13045 	}
13046 	return true;
13047 }
13048 
13049 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
13050 {
13051 	if (old->acquired_refs != cur->acquired_refs)
13052 		return false;
13053 	return !memcmp(old->refs, cur->refs,
13054 		       sizeof(*old->refs) * old->acquired_refs);
13055 }
13056 
13057 /* compare two verifier states
13058  *
13059  * all states stored in state_list are known to be valid, since
13060  * verifier reached 'bpf_exit' instruction through them
13061  *
13062  * this function is called when verifier exploring different branches of
13063  * execution popped from the state stack. If it sees an old state that has
13064  * more strict register state and more strict stack state then this execution
13065  * branch doesn't need to be explored further, since verifier already
13066  * concluded that more strict state leads to valid finish.
13067  *
13068  * Therefore two states are equivalent if register state is more conservative
13069  * and explored stack state is more conservative than the current one.
13070  * Example:
13071  *       explored                   current
13072  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
13073  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
13074  *
13075  * In other words if current stack state (one being explored) has more
13076  * valid slots than old one that already passed validation, it means
13077  * the verifier can stop exploring and conclude that current state is valid too
13078  *
13079  * Similarly with registers. If explored state has register type as invalid
13080  * whereas register type in current state is meaningful, it means that
13081  * the current state will reach 'bpf_exit' instruction safely
13082  */
13083 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
13084 			      struct bpf_func_state *cur)
13085 {
13086 	int i;
13087 
13088 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
13089 	for (i = 0; i < MAX_BPF_REG; i++)
13090 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
13091 			     env->idmap_scratch))
13092 			return false;
13093 
13094 	if (!stacksafe(env, old, cur, env->idmap_scratch))
13095 		return false;
13096 
13097 	if (!refsafe(old, cur))
13098 		return false;
13099 
13100 	return true;
13101 }
13102 
13103 static bool states_equal(struct bpf_verifier_env *env,
13104 			 struct bpf_verifier_state *old,
13105 			 struct bpf_verifier_state *cur)
13106 {
13107 	int i;
13108 
13109 	if (old->curframe != cur->curframe)
13110 		return false;
13111 
13112 	/* Verification state from speculative execution simulation
13113 	 * must never prune a non-speculative execution one.
13114 	 */
13115 	if (old->speculative && !cur->speculative)
13116 		return false;
13117 
13118 	if (old->active_lock.ptr != cur->active_lock.ptr ||
13119 	    old->active_lock.id != cur->active_lock.id)
13120 		return false;
13121 
13122 	if (old->active_rcu_lock != cur->active_rcu_lock)
13123 		return false;
13124 
13125 	/* for states to be equal callsites have to be the same
13126 	 * and all frame states need to be equivalent
13127 	 */
13128 	for (i = 0; i <= old->curframe; i++) {
13129 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
13130 			return false;
13131 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
13132 			return false;
13133 	}
13134 	return true;
13135 }
13136 
13137 /* Return 0 if no propagation happened. Return negative error code if error
13138  * happened. Otherwise, return the propagated bit.
13139  */
13140 static int propagate_liveness_reg(struct bpf_verifier_env *env,
13141 				  struct bpf_reg_state *reg,
13142 				  struct bpf_reg_state *parent_reg)
13143 {
13144 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
13145 	u8 flag = reg->live & REG_LIVE_READ;
13146 	int err;
13147 
13148 	/* When comes here, read flags of PARENT_REG or REG could be any of
13149 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
13150 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
13151 	 */
13152 	if (parent_flag == REG_LIVE_READ64 ||
13153 	    /* Or if there is no read flag from REG. */
13154 	    !flag ||
13155 	    /* Or if the read flag from REG is the same as PARENT_REG. */
13156 	    parent_flag == flag)
13157 		return 0;
13158 
13159 	err = mark_reg_read(env, reg, parent_reg, flag);
13160 	if (err)
13161 		return err;
13162 
13163 	return flag;
13164 }
13165 
13166 /* A write screens off any subsequent reads; but write marks come from the
13167  * straight-line code between a state and its parent.  When we arrive at an
13168  * equivalent state (jump target or such) we didn't arrive by the straight-line
13169  * code, so read marks in the state must propagate to the parent regardless
13170  * of the state's write marks. That's what 'parent == state->parent' comparison
13171  * in mark_reg_read() is for.
13172  */
13173 static int propagate_liveness(struct bpf_verifier_env *env,
13174 			      const struct bpf_verifier_state *vstate,
13175 			      struct bpf_verifier_state *vparent)
13176 {
13177 	struct bpf_reg_state *state_reg, *parent_reg;
13178 	struct bpf_func_state *state, *parent;
13179 	int i, frame, err = 0;
13180 
13181 	if (vparent->curframe != vstate->curframe) {
13182 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
13183 		     vparent->curframe, vstate->curframe);
13184 		return -EFAULT;
13185 	}
13186 	/* Propagate read liveness of registers... */
13187 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
13188 	for (frame = 0; frame <= vstate->curframe; frame++) {
13189 		parent = vparent->frame[frame];
13190 		state = vstate->frame[frame];
13191 		parent_reg = parent->regs;
13192 		state_reg = state->regs;
13193 		/* We don't need to worry about FP liveness, it's read-only */
13194 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
13195 			err = propagate_liveness_reg(env, &state_reg[i],
13196 						     &parent_reg[i]);
13197 			if (err < 0)
13198 				return err;
13199 			if (err == REG_LIVE_READ64)
13200 				mark_insn_zext(env, &parent_reg[i]);
13201 		}
13202 
13203 		/* Propagate stack slots. */
13204 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
13205 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
13206 			parent_reg = &parent->stack[i].spilled_ptr;
13207 			state_reg = &state->stack[i].spilled_ptr;
13208 			err = propagate_liveness_reg(env, state_reg,
13209 						     parent_reg);
13210 			if (err < 0)
13211 				return err;
13212 		}
13213 	}
13214 	return 0;
13215 }
13216 
13217 /* find precise scalars in the previous equivalent state and
13218  * propagate them into the current state
13219  */
13220 static int propagate_precision(struct bpf_verifier_env *env,
13221 			       const struct bpf_verifier_state *old)
13222 {
13223 	struct bpf_reg_state *state_reg;
13224 	struct bpf_func_state *state;
13225 	int i, err = 0, fr;
13226 
13227 	for (fr = old->curframe; fr >= 0; fr--) {
13228 		state = old->frame[fr];
13229 		state_reg = state->regs;
13230 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
13231 			if (state_reg->type != SCALAR_VALUE ||
13232 			    !state_reg->precise)
13233 				continue;
13234 			if (env->log.level & BPF_LOG_LEVEL2)
13235 				verbose(env, "frame %d: propagating r%d\n", i, fr);
13236 			err = mark_chain_precision_frame(env, fr, i);
13237 			if (err < 0)
13238 				return err;
13239 		}
13240 
13241 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
13242 			if (!is_spilled_reg(&state->stack[i]))
13243 				continue;
13244 			state_reg = &state->stack[i].spilled_ptr;
13245 			if (state_reg->type != SCALAR_VALUE ||
13246 			    !state_reg->precise)
13247 				continue;
13248 			if (env->log.level & BPF_LOG_LEVEL2)
13249 				verbose(env, "frame %d: propagating fp%d\n",
13250 					(-i - 1) * BPF_REG_SIZE, fr);
13251 			err = mark_chain_precision_stack_frame(env, fr, i);
13252 			if (err < 0)
13253 				return err;
13254 		}
13255 	}
13256 	return 0;
13257 }
13258 
13259 static bool states_maybe_looping(struct bpf_verifier_state *old,
13260 				 struct bpf_verifier_state *cur)
13261 {
13262 	struct bpf_func_state *fold, *fcur;
13263 	int i, fr = cur->curframe;
13264 
13265 	if (old->curframe != fr)
13266 		return false;
13267 
13268 	fold = old->frame[fr];
13269 	fcur = cur->frame[fr];
13270 	for (i = 0; i < MAX_BPF_REG; i++)
13271 		if (memcmp(&fold->regs[i], &fcur->regs[i],
13272 			   offsetof(struct bpf_reg_state, parent)))
13273 			return false;
13274 	return true;
13275 }
13276 
13277 
13278 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
13279 {
13280 	struct bpf_verifier_state_list *new_sl;
13281 	struct bpf_verifier_state_list *sl, **pprev;
13282 	struct bpf_verifier_state *cur = env->cur_state, *new;
13283 	int i, j, err, states_cnt = 0;
13284 	bool add_new_state = env->test_state_freq ? true : false;
13285 
13286 	cur->last_insn_idx = env->prev_insn_idx;
13287 	if (!env->insn_aux_data[insn_idx].prune_point)
13288 		/* this 'insn_idx' instruction wasn't marked, so we will not
13289 		 * be doing state search here
13290 		 */
13291 		return 0;
13292 
13293 	/* bpf progs typically have pruning point every 4 instructions
13294 	 * http://vger.kernel.org/bpfconf2019.html#session-1
13295 	 * Do not add new state for future pruning if the verifier hasn't seen
13296 	 * at least 2 jumps and at least 8 instructions.
13297 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
13298 	 * In tests that amounts to up to 50% reduction into total verifier
13299 	 * memory consumption and 20% verifier time speedup.
13300 	 */
13301 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
13302 	    env->insn_processed - env->prev_insn_processed >= 8)
13303 		add_new_state = true;
13304 
13305 	pprev = explored_state(env, insn_idx);
13306 	sl = *pprev;
13307 
13308 	clean_live_states(env, insn_idx, cur);
13309 
13310 	while (sl) {
13311 		states_cnt++;
13312 		if (sl->state.insn_idx != insn_idx)
13313 			goto next;
13314 
13315 		if (sl->state.branches) {
13316 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
13317 
13318 			if (frame->in_async_callback_fn &&
13319 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
13320 				/* Different async_entry_cnt means that the verifier is
13321 				 * processing another entry into async callback.
13322 				 * Seeing the same state is not an indication of infinite
13323 				 * loop or infinite recursion.
13324 				 * But finding the same state doesn't mean that it's safe
13325 				 * to stop processing the current state. The previous state
13326 				 * hasn't yet reached bpf_exit, since state.branches > 0.
13327 				 * Checking in_async_callback_fn alone is not enough either.
13328 				 * Since the verifier still needs to catch infinite loops
13329 				 * inside async callbacks.
13330 				 */
13331 			} else if (states_maybe_looping(&sl->state, cur) &&
13332 				   states_equal(env, &sl->state, cur)) {
13333 				verbose_linfo(env, insn_idx, "; ");
13334 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
13335 				return -EINVAL;
13336 			}
13337 			/* if the verifier is processing a loop, avoid adding new state
13338 			 * too often, since different loop iterations have distinct
13339 			 * states and may not help future pruning.
13340 			 * This threshold shouldn't be too low to make sure that
13341 			 * a loop with large bound will be rejected quickly.
13342 			 * The most abusive loop will be:
13343 			 * r1 += 1
13344 			 * if r1 < 1000000 goto pc-2
13345 			 * 1M insn_procssed limit / 100 == 10k peak states.
13346 			 * This threshold shouldn't be too high either, since states
13347 			 * at the end of the loop are likely to be useful in pruning.
13348 			 */
13349 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
13350 			    env->insn_processed - env->prev_insn_processed < 100)
13351 				add_new_state = false;
13352 			goto miss;
13353 		}
13354 		if (states_equal(env, &sl->state, cur)) {
13355 			sl->hit_cnt++;
13356 			/* reached equivalent register/stack state,
13357 			 * prune the search.
13358 			 * Registers read by the continuation are read by us.
13359 			 * If we have any write marks in env->cur_state, they
13360 			 * will prevent corresponding reads in the continuation
13361 			 * from reaching our parent (an explored_state).  Our
13362 			 * own state will get the read marks recorded, but
13363 			 * they'll be immediately forgotten as we're pruning
13364 			 * this state and will pop a new one.
13365 			 */
13366 			err = propagate_liveness(env, &sl->state, cur);
13367 
13368 			/* if previous state reached the exit with precision and
13369 			 * current state is equivalent to it (except precsion marks)
13370 			 * the precision needs to be propagated back in
13371 			 * the current state.
13372 			 */
13373 			err = err ? : push_jmp_history(env, cur);
13374 			err = err ? : propagate_precision(env, &sl->state);
13375 			if (err)
13376 				return err;
13377 			return 1;
13378 		}
13379 miss:
13380 		/* when new state is not going to be added do not increase miss count.
13381 		 * Otherwise several loop iterations will remove the state
13382 		 * recorded earlier. The goal of these heuristics is to have
13383 		 * states from some iterations of the loop (some in the beginning
13384 		 * and some at the end) to help pruning.
13385 		 */
13386 		if (add_new_state)
13387 			sl->miss_cnt++;
13388 		/* heuristic to determine whether this state is beneficial
13389 		 * to keep checking from state equivalence point of view.
13390 		 * Higher numbers increase max_states_per_insn and verification time,
13391 		 * but do not meaningfully decrease insn_processed.
13392 		 */
13393 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
13394 			/* the state is unlikely to be useful. Remove it to
13395 			 * speed up verification
13396 			 */
13397 			*pprev = sl->next;
13398 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
13399 				u32 br = sl->state.branches;
13400 
13401 				WARN_ONCE(br,
13402 					  "BUG live_done but branches_to_explore %d\n",
13403 					  br);
13404 				free_verifier_state(&sl->state, false);
13405 				kfree(sl);
13406 				env->peak_states--;
13407 			} else {
13408 				/* cannot free this state, since parentage chain may
13409 				 * walk it later. Add it for free_list instead to
13410 				 * be freed at the end of verification
13411 				 */
13412 				sl->next = env->free_list;
13413 				env->free_list = sl;
13414 			}
13415 			sl = *pprev;
13416 			continue;
13417 		}
13418 next:
13419 		pprev = &sl->next;
13420 		sl = *pprev;
13421 	}
13422 
13423 	if (env->max_states_per_insn < states_cnt)
13424 		env->max_states_per_insn = states_cnt;
13425 
13426 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
13427 		return push_jmp_history(env, cur);
13428 
13429 	if (!add_new_state)
13430 		return push_jmp_history(env, cur);
13431 
13432 	/* There were no equivalent states, remember the current one.
13433 	 * Technically the current state is not proven to be safe yet,
13434 	 * but it will either reach outer most bpf_exit (which means it's safe)
13435 	 * or it will be rejected. When there are no loops the verifier won't be
13436 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
13437 	 * again on the way to bpf_exit.
13438 	 * When looping the sl->state.branches will be > 0 and this state
13439 	 * will not be considered for equivalence until branches == 0.
13440 	 */
13441 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
13442 	if (!new_sl)
13443 		return -ENOMEM;
13444 	env->total_states++;
13445 	env->peak_states++;
13446 	env->prev_jmps_processed = env->jmps_processed;
13447 	env->prev_insn_processed = env->insn_processed;
13448 
13449 	/* forget precise markings we inherited, see __mark_chain_precision */
13450 	if (env->bpf_capable)
13451 		mark_all_scalars_imprecise(env, cur);
13452 
13453 	/* add new state to the head of linked list */
13454 	new = &new_sl->state;
13455 	err = copy_verifier_state(new, cur);
13456 	if (err) {
13457 		free_verifier_state(new, false);
13458 		kfree(new_sl);
13459 		return err;
13460 	}
13461 	new->insn_idx = insn_idx;
13462 	WARN_ONCE(new->branches != 1,
13463 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
13464 
13465 	cur->parent = new;
13466 	cur->first_insn_idx = insn_idx;
13467 	clear_jmp_history(cur);
13468 	new_sl->next = *explored_state(env, insn_idx);
13469 	*explored_state(env, insn_idx) = new_sl;
13470 	/* connect new state to parentage chain. Current frame needs all
13471 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
13472 	 * to the stack implicitly by JITs) so in callers' frames connect just
13473 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
13474 	 * the state of the call instruction (with WRITTEN set), and r0 comes
13475 	 * from callee with its full parentage chain, anyway.
13476 	 */
13477 	/* clear write marks in current state: the writes we did are not writes
13478 	 * our child did, so they don't screen off its reads from us.
13479 	 * (There are no read marks in current state, because reads always mark
13480 	 * their parent and current state never has children yet.  Only
13481 	 * explored_states can get read marks.)
13482 	 */
13483 	for (j = 0; j <= cur->curframe; j++) {
13484 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
13485 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
13486 		for (i = 0; i < BPF_REG_FP; i++)
13487 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
13488 	}
13489 
13490 	/* all stack frames are accessible from callee, clear them all */
13491 	for (j = 0; j <= cur->curframe; j++) {
13492 		struct bpf_func_state *frame = cur->frame[j];
13493 		struct bpf_func_state *newframe = new->frame[j];
13494 
13495 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
13496 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
13497 			frame->stack[i].spilled_ptr.parent =
13498 						&newframe->stack[i].spilled_ptr;
13499 		}
13500 	}
13501 	return 0;
13502 }
13503 
13504 /* Return true if it's OK to have the same insn return a different type. */
13505 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
13506 {
13507 	switch (base_type(type)) {
13508 	case PTR_TO_CTX:
13509 	case PTR_TO_SOCKET:
13510 	case PTR_TO_SOCK_COMMON:
13511 	case PTR_TO_TCP_SOCK:
13512 	case PTR_TO_XDP_SOCK:
13513 	case PTR_TO_BTF_ID:
13514 		return false;
13515 	default:
13516 		return true;
13517 	}
13518 }
13519 
13520 /* If an instruction was previously used with particular pointer types, then we
13521  * need to be careful to avoid cases such as the below, where it may be ok
13522  * for one branch accessing the pointer, but not ok for the other branch:
13523  *
13524  * R1 = sock_ptr
13525  * goto X;
13526  * ...
13527  * R1 = some_other_valid_ptr;
13528  * goto X;
13529  * ...
13530  * R2 = *(u32 *)(R1 + 0);
13531  */
13532 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
13533 {
13534 	return src != prev && (!reg_type_mismatch_ok(src) ||
13535 			       !reg_type_mismatch_ok(prev));
13536 }
13537 
13538 static int do_check(struct bpf_verifier_env *env)
13539 {
13540 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
13541 	struct bpf_verifier_state *state = env->cur_state;
13542 	struct bpf_insn *insns = env->prog->insnsi;
13543 	struct bpf_reg_state *regs;
13544 	int insn_cnt = env->prog->len;
13545 	bool do_print_state = false;
13546 	int prev_insn_idx = -1;
13547 
13548 	for (;;) {
13549 		struct bpf_insn *insn;
13550 		u8 class;
13551 		int err;
13552 
13553 		env->prev_insn_idx = prev_insn_idx;
13554 		if (env->insn_idx >= insn_cnt) {
13555 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
13556 				env->insn_idx, insn_cnt);
13557 			return -EFAULT;
13558 		}
13559 
13560 		insn = &insns[env->insn_idx];
13561 		class = BPF_CLASS(insn->code);
13562 
13563 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
13564 			verbose(env,
13565 				"BPF program is too large. Processed %d insn\n",
13566 				env->insn_processed);
13567 			return -E2BIG;
13568 		}
13569 
13570 		err = is_state_visited(env, env->insn_idx);
13571 		if (err < 0)
13572 			return err;
13573 		if (err == 1) {
13574 			/* found equivalent state, can prune the search */
13575 			if (env->log.level & BPF_LOG_LEVEL) {
13576 				if (do_print_state)
13577 					verbose(env, "\nfrom %d to %d%s: safe\n",
13578 						env->prev_insn_idx, env->insn_idx,
13579 						env->cur_state->speculative ?
13580 						" (speculative execution)" : "");
13581 				else
13582 					verbose(env, "%d: safe\n", env->insn_idx);
13583 			}
13584 			goto process_bpf_exit;
13585 		}
13586 
13587 		if (signal_pending(current))
13588 			return -EAGAIN;
13589 
13590 		if (need_resched())
13591 			cond_resched();
13592 
13593 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
13594 			verbose(env, "\nfrom %d to %d%s:",
13595 				env->prev_insn_idx, env->insn_idx,
13596 				env->cur_state->speculative ?
13597 				" (speculative execution)" : "");
13598 			print_verifier_state(env, state->frame[state->curframe], true);
13599 			do_print_state = false;
13600 		}
13601 
13602 		if (env->log.level & BPF_LOG_LEVEL) {
13603 			const struct bpf_insn_cbs cbs = {
13604 				.cb_call	= disasm_kfunc_name,
13605 				.cb_print	= verbose,
13606 				.private_data	= env,
13607 			};
13608 
13609 			if (verifier_state_scratched(env))
13610 				print_insn_state(env, state->frame[state->curframe]);
13611 
13612 			verbose_linfo(env, env->insn_idx, "; ");
13613 			env->prev_log_len = env->log.len_used;
13614 			verbose(env, "%d: ", env->insn_idx);
13615 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
13616 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
13617 			env->prev_log_len = env->log.len_used;
13618 		}
13619 
13620 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
13621 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
13622 							   env->prev_insn_idx);
13623 			if (err)
13624 				return err;
13625 		}
13626 
13627 		regs = cur_regs(env);
13628 		sanitize_mark_insn_seen(env);
13629 		prev_insn_idx = env->insn_idx;
13630 
13631 		if (class == BPF_ALU || class == BPF_ALU64) {
13632 			err = check_alu_op(env, insn);
13633 			if (err)
13634 				return err;
13635 
13636 		} else if (class == BPF_LDX) {
13637 			enum bpf_reg_type *prev_src_type, src_reg_type;
13638 
13639 			/* check for reserved fields is already done */
13640 
13641 			/* check src operand */
13642 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
13643 			if (err)
13644 				return err;
13645 
13646 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
13647 			if (err)
13648 				return err;
13649 
13650 			src_reg_type = regs[insn->src_reg].type;
13651 
13652 			/* check that memory (src_reg + off) is readable,
13653 			 * the state of dst_reg will be updated by this func
13654 			 */
13655 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
13656 					       insn->off, BPF_SIZE(insn->code),
13657 					       BPF_READ, insn->dst_reg, false);
13658 			if (err)
13659 				return err;
13660 
13661 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
13662 
13663 			if (*prev_src_type == NOT_INIT) {
13664 				/* saw a valid insn
13665 				 * dst_reg = *(u32 *)(src_reg + off)
13666 				 * save type to validate intersecting paths
13667 				 */
13668 				*prev_src_type = src_reg_type;
13669 
13670 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
13671 				/* ABuser program is trying to use the same insn
13672 				 * dst_reg = *(u32*) (src_reg + off)
13673 				 * with different pointer types:
13674 				 * src_reg == ctx in one branch and
13675 				 * src_reg == stack|map in some other branch.
13676 				 * Reject it.
13677 				 */
13678 				verbose(env, "same insn cannot be used with different pointers\n");
13679 				return -EINVAL;
13680 			}
13681 
13682 		} else if (class == BPF_STX) {
13683 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
13684 
13685 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
13686 				err = check_atomic(env, env->insn_idx, insn);
13687 				if (err)
13688 					return err;
13689 				env->insn_idx++;
13690 				continue;
13691 			}
13692 
13693 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
13694 				verbose(env, "BPF_STX uses reserved fields\n");
13695 				return -EINVAL;
13696 			}
13697 
13698 			/* check src1 operand */
13699 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
13700 			if (err)
13701 				return err;
13702 			/* check src2 operand */
13703 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
13704 			if (err)
13705 				return err;
13706 
13707 			dst_reg_type = regs[insn->dst_reg].type;
13708 
13709 			/* check that memory (dst_reg + off) is writeable */
13710 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
13711 					       insn->off, BPF_SIZE(insn->code),
13712 					       BPF_WRITE, insn->src_reg, false);
13713 			if (err)
13714 				return err;
13715 
13716 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
13717 
13718 			if (*prev_dst_type == NOT_INIT) {
13719 				*prev_dst_type = dst_reg_type;
13720 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
13721 				verbose(env, "same insn cannot be used with different pointers\n");
13722 				return -EINVAL;
13723 			}
13724 
13725 		} else if (class == BPF_ST) {
13726 			if (BPF_MODE(insn->code) != BPF_MEM ||
13727 			    insn->src_reg != BPF_REG_0) {
13728 				verbose(env, "BPF_ST uses reserved fields\n");
13729 				return -EINVAL;
13730 			}
13731 			/* check src operand */
13732 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
13733 			if (err)
13734 				return err;
13735 
13736 			if (is_ctx_reg(env, insn->dst_reg)) {
13737 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
13738 					insn->dst_reg,
13739 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
13740 				return -EACCES;
13741 			}
13742 
13743 			/* check that memory (dst_reg + off) is writeable */
13744 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
13745 					       insn->off, BPF_SIZE(insn->code),
13746 					       BPF_WRITE, -1, false);
13747 			if (err)
13748 				return err;
13749 
13750 		} else if (class == BPF_JMP || class == BPF_JMP32) {
13751 			u8 opcode = BPF_OP(insn->code);
13752 
13753 			env->jmps_processed++;
13754 			if (opcode == BPF_CALL) {
13755 				if (BPF_SRC(insn->code) != BPF_K ||
13756 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
13757 				     && insn->off != 0) ||
13758 				    (insn->src_reg != BPF_REG_0 &&
13759 				     insn->src_reg != BPF_PSEUDO_CALL &&
13760 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
13761 				    insn->dst_reg != BPF_REG_0 ||
13762 				    class == BPF_JMP32) {
13763 					verbose(env, "BPF_CALL uses reserved fields\n");
13764 					return -EINVAL;
13765 				}
13766 
13767 				if (env->cur_state->active_lock.ptr) {
13768 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
13769 					    (insn->src_reg == BPF_PSEUDO_CALL) ||
13770 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
13771 					     (insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
13772 						verbose(env, "function calls are not allowed while holding a lock\n");
13773 						return -EINVAL;
13774 					}
13775 				}
13776 				if (insn->src_reg == BPF_PSEUDO_CALL)
13777 					err = check_func_call(env, insn, &env->insn_idx);
13778 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
13779 					err = check_kfunc_call(env, insn, &env->insn_idx);
13780 				else
13781 					err = check_helper_call(env, insn, &env->insn_idx);
13782 				if (err)
13783 					return err;
13784 			} else if (opcode == BPF_JA) {
13785 				if (BPF_SRC(insn->code) != BPF_K ||
13786 				    insn->imm != 0 ||
13787 				    insn->src_reg != BPF_REG_0 ||
13788 				    insn->dst_reg != BPF_REG_0 ||
13789 				    class == BPF_JMP32) {
13790 					verbose(env, "BPF_JA uses reserved fields\n");
13791 					return -EINVAL;
13792 				}
13793 
13794 				env->insn_idx += insn->off + 1;
13795 				continue;
13796 
13797 			} else if (opcode == BPF_EXIT) {
13798 				if (BPF_SRC(insn->code) != BPF_K ||
13799 				    insn->imm != 0 ||
13800 				    insn->src_reg != BPF_REG_0 ||
13801 				    insn->dst_reg != BPF_REG_0 ||
13802 				    class == BPF_JMP32) {
13803 					verbose(env, "BPF_EXIT uses reserved fields\n");
13804 					return -EINVAL;
13805 				}
13806 
13807 				if (env->cur_state->active_lock.ptr) {
13808 					verbose(env, "bpf_spin_unlock is missing\n");
13809 					return -EINVAL;
13810 				}
13811 
13812 				if (env->cur_state->active_rcu_lock) {
13813 					verbose(env, "bpf_rcu_read_unlock is missing\n");
13814 					return -EINVAL;
13815 				}
13816 
13817 				/* We must do check_reference_leak here before
13818 				 * prepare_func_exit to handle the case when
13819 				 * state->curframe > 0, it may be a callback
13820 				 * function, for which reference_state must
13821 				 * match caller reference state when it exits.
13822 				 */
13823 				err = check_reference_leak(env);
13824 				if (err)
13825 					return err;
13826 
13827 				if (state->curframe) {
13828 					/* exit from nested function */
13829 					err = prepare_func_exit(env, &env->insn_idx);
13830 					if (err)
13831 						return err;
13832 					do_print_state = true;
13833 					continue;
13834 				}
13835 
13836 				err = check_return_code(env);
13837 				if (err)
13838 					return err;
13839 process_bpf_exit:
13840 				mark_verifier_state_scratched(env);
13841 				update_branch_counts(env, env->cur_state);
13842 				err = pop_stack(env, &prev_insn_idx,
13843 						&env->insn_idx, pop_log);
13844 				if (err < 0) {
13845 					if (err != -ENOENT)
13846 						return err;
13847 					break;
13848 				} else {
13849 					do_print_state = true;
13850 					continue;
13851 				}
13852 			} else {
13853 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
13854 				if (err)
13855 					return err;
13856 			}
13857 		} else if (class == BPF_LD) {
13858 			u8 mode = BPF_MODE(insn->code);
13859 
13860 			if (mode == BPF_ABS || mode == BPF_IND) {
13861 				err = check_ld_abs(env, insn);
13862 				if (err)
13863 					return err;
13864 
13865 			} else if (mode == BPF_IMM) {
13866 				err = check_ld_imm(env, insn);
13867 				if (err)
13868 					return err;
13869 
13870 				env->insn_idx++;
13871 				sanitize_mark_insn_seen(env);
13872 			} else {
13873 				verbose(env, "invalid BPF_LD mode\n");
13874 				return -EINVAL;
13875 			}
13876 		} else {
13877 			verbose(env, "unknown insn class %d\n", class);
13878 			return -EINVAL;
13879 		}
13880 
13881 		env->insn_idx++;
13882 	}
13883 
13884 	return 0;
13885 }
13886 
13887 static int find_btf_percpu_datasec(struct btf *btf)
13888 {
13889 	const struct btf_type *t;
13890 	const char *tname;
13891 	int i, n;
13892 
13893 	/*
13894 	 * Both vmlinux and module each have their own ".data..percpu"
13895 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
13896 	 * types to look at only module's own BTF types.
13897 	 */
13898 	n = btf_nr_types(btf);
13899 	if (btf_is_module(btf))
13900 		i = btf_nr_types(btf_vmlinux);
13901 	else
13902 		i = 1;
13903 
13904 	for(; i < n; i++) {
13905 		t = btf_type_by_id(btf, i);
13906 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
13907 			continue;
13908 
13909 		tname = btf_name_by_offset(btf, t->name_off);
13910 		if (!strcmp(tname, ".data..percpu"))
13911 			return i;
13912 	}
13913 
13914 	return -ENOENT;
13915 }
13916 
13917 /* replace pseudo btf_id with kernel symbol address */
13918 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
13919 			       struct bpf_insn *insn,
13920 			       struct bpf_insn_aux_data *aux)
13921 {
13922 	const struct btf_var_secinfo *vsi;
13923 	const struct btf_type *datasec;
13924 	struct btf_mod_pair *btf_mod;
13925 	const struct btf_type *t;
13926 	const char *sym_name;
13927 	bool percpu = false;
13928 	u32 type, id = insn->imm;
13929 	struct btf *btf;
13930 	s32 datasec_id;
13931 	u64 addr;
13932 	int i, btf_fd, err;
13933 
13934 	btf_fd = insn[1].imm;
13935 	if (btf_fd) {
13936 		btf = btf_get_by_fd(btf_fd);
13937 		if (IS_ERR(btf)) {
13938 			verbose(env, "invalid module BTF object FD specified.\n");
13939 			return -EINVAL;
13940 		}
13941 	} else {
13942 		if (!btf_vmlinux) {
13943 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
13944 			return -EINVAL;
13945 		}
13946 		btf = btf_vmlinux;
13947 		btf_get(btf);
13948 	}
13949 
13950 	t = btf_type_by_id(btf, id);
13951 	if (!t) {
13952 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
13953 		err = -ENOENT;
13954 		goto err_put;
13955 	}
13956 
13957 	if (!btf_type_is_var(t)) {
13958 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
13959 		err = -EINVAL;
13960 		goto err_put;
13961 	}
13962 
13963 	sym_name = btf_name_by_offset(btf, t->name_off);
13964 	addr = kallsyms_lookup_name(sym_name);
13965 	if (!addr) {
13966 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
13967 			sym_name);
13968 		err = -ENOENT;
13969 		goto err_put;
13970 	}
13971 
13972 	datasec_id = find_btf_percpu_datasec(btf);
13973 	if (datasec_id > 0) {
13974 		datasec = btf_type_by_id(btf, datasec_id);
13975 		for_each_vsi(i, datasec, vsi) {
13976 			if (vsi->type == id) {
13977 				percpu = true;
13978 				break;
13979 			}
13980 		}
13981 	}
13982 
13983 	insn[0].imm = (u32)addr;
13984 	insn[1].imm = addr >> 32;
13985 
13986 	type = t->type;
13987 	t = btf_type_skip_modifiers(btf, type, NULL);
13988 	if (percpu) {
13989 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
13990 		aux->btf_var.btf = btf;
13991 		aux->btf_var.btf_id = type;
13992 	} else if (!btf_type_is_struct(t)) {
13993 		const struct btf_type *ret;
13994 		const char *tname;
13995 		u32 tsize;
13996 
13997 		/* resolve the type size of ksym. */
13998 		ret = btf_resolve_size(btf, t, &tsize);
13999 		if (IS_ERR(ret)) {
14000 			tname = btf_name_by_offset(btf, t->name_off);
14001 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
14002 				tname, PTR_ERR(ret));
14003 			err = -EINVAL;
14004 			goto err_put;
14005 		}
14006 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
14007 		aux->btf_var.mem_size = tsize;
14008 	} else {
14009 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
14010 		aux->btf_var.btf = btf;
14011 		aux->btf_var.btf_id = type;
14012 	}
14013 
14014 	/* check whether we recorded this BTF (and maybe module) already */
14015 	for (i = 0; i < env->used_btf_cnt; i++) {
14016 		if (env->used_btfs[i].btf == btf) {
14017 			btf_put(btf);
14018 			return 0;
14019 		}
14020 	}
14021 
14022 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
14023 		err = -E2BIG;
14024 		goto err_put;
14025 	}
14026 
14027 	btf_mod = &env->used_btfs[env->used_btf_cnt];
14028 	btf_mod->btf = btf;
14029 	btf_mod->module = NULL;
14030 
14031 	/* if we reference variables from kernel module, bump its refcount */
14032 	if (btf_is_module(btf)) {
14033 		btf_mod->module = btf_try_get_module(btf);
14034 		if (!btf_mod->module) {
14035 			err = -ENXIO;
14036 			goto err_put;
14037 		}
14038 	}
14039 
14040 	env->used_btf_cnt++;
14041 
14042 	return 0;
14043 err_put:
14044 	btf_put(btf);
14045 	return err;
14046 }
14047 
14048 static bool is_tracing_prog_type(enum bpf_prog_type type)
14049 {
14050 	switch (type) {
14051 	case BPF_PROG_TYPE_KPROBE:
14052 	case BPF_PROG_TYPE_TRACEPOINT:
14053 	case BPF_PROG_TYPE_PERF_EVENT:
14054 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
14055 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
14056 		return true;
14057 	default:
14058 		return false;
14059 	}
14060 }
14061 
14062 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
14063 					struct bpf_map *map,
14064 					struct bpf_prog *prog)
14065 
14066 {
14067 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
14068 
14069 	if (btf_record_has_field(map->record, BPF_LIST_HEAD)) {
14070 		if (is_tracing_prog_type(prog_type)) {
14071 			verbose(env, "tracing progs cannot use bpf_list_head yet\n");
14072 			return -EINVAL;
14073 		}
14074 	}
14075 
14076 	if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
14077 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
14078 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
14079 			return -EINVAL;
14080 		}
14081 
14082 		if (is_tracing_prog_type(prog_type)) {
14083 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
14084 			return -EINVAL;
14085 		}
14086 
14087 		if (prog->aux->sleepable) {
14088 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
14089 			return -EINVAL;
14090 		}
14091 	}
14092 
14093 	if (btf_record_has_field(map->record, BPF_TIMER)) {
14094 		if (is_tracing_prog_type(prog_type)) {
14095 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
14096 			return -EINVAL;
14097 		}
14098 	}
14099 
14100 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
14101 	    !bpf_offload_prog_map_match(prog, map)) {
14102 		verbose(env, "offload device mismatch between prog and map\n");
14103 		return -EINVAL;
14104 	}
14105 
14106 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
14107 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
14108 		return -EINVAL;
14109 	}
14110 
14111 	if (prog->aux->sleepable)
14112 		switch (map->map_type) {
14113 		case BPF_MAP_TYPE_HASH:
14114 		case BPF_MAP_TYPE_LRU_HASH:
14115 		case BPF_MAP_TYPE_ARRAY:
14116 		case BPF_MAP_TYPE_PERCPU_HASH:
14117 		case BPF_MAP_TYPE_PERCPU_ARRAY:
14118 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
14119 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
14120 		case BPF_MAP_TYPE_HASH_OF_MAPS:
14121 		case BPF_MAP_TYPE_RINGBUF:
14122 		case BPF_MAP_TYPE_USER_RINGBUF:
14123 		case BPF_MAP_TYPE_INODE_STORAGE:
14124 		case BPF_MAP_TYPE_SK_STORAGE:
14125 		case BPF_MAP_TYPE_TASK_STORAGE:
14126 			break;
14127 		default:
14128 			verbose(env,
14129 				"Sleepable programs can only use array, hash, and ringbuf maps\n");
14130 			return -EINVAL;
14131 		}
14132 
14133 	return 0;
14134 }
14135 
14136 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
14137 {
14138 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
14139 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
14140 }
14141 
14142 /* find and rewrite pseudo imm in ld_imm64 instructions:
14143  *
14144  * 1. if it accesses map FD, replace it with actual map pointer.
14145  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
14146  *
14147  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
14148  */
14149 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
14150 {
14151 	struct bpf_insn *insn = env->prog->insnsi;
14152 	int insn_cnt = env->prog->len;
14153 	int i, j, err;
14154 
14155 	err = bpf_prog_calc_tag(env->prog);
14156 	if (err)
14157 		return err;
14158 
14159 	for (i = 0; i < insn_cnt; i++, insn++) {
14160 		if (BPF_CLASS(insn->code) == BPF_LDX &&
14161 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
14162 			verbose(env, "BPF_LDX uses reserved fields\n");
14163 			return -EINVAL;
14164 		}
14165 
14166 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
14167 			struct bpf_insn_aux_data *aux;
14168 			struct bpf_map *map;
14169 			struct fd f;
14170 			u64 addr;
14171 			u32 fd;
14172 
14173 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
14174 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
14175 			    insn[1].off != 0) {
14176 				verbose(env, "invalid bpf_ld_imm64 insn\n");
14177 				return -EINVAL;
14178 			}
14179 
14180 			if (insn[0].src_reg == 0)
14181 				/* valid generic load 64-bit imm */
14182 				goto next_insn;
14183 
14184 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
14185 				aux = &env->insn_aux_data[i];
14186 				err = check_pseudo_btf_id(env, insn, aux);
14187 				if (err)
14188 					return err;
14189 				goto next_insn;
14190 			}
14191 
14192 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
14193 				aux = &env->insn_aux_data[i];
14194 				aux->ptr_type = PTR_TO_FUNC;
14195 				goto next_insn;
14196 			}
14197 
14198 			/* In final convert_pseudo_ld_imm64() step, this is
14199 			 * converted into regular 64-bit imm load insn.
14200 			 */
14201 			switch (insn[0].src_reg) {
14202 			case BPF_PSEUDO_MAP_VALUE:
14203 			case BPF_PSEUDO_MAP_IDX_VALUE:
14204 				break;
14205 			case BPF_PSEUDO_MAP_FD:
14206 			case BPF_PSEUDO_MAP_IDX:
14207 				if (insn[1].imm == 0)
14208 					break;
14209 				fallthrough;
14210 			default:
14211 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
14212 				return -EINVAL;
14213 			}
14214 
14215 			switch (insn[0].src_reg) {
14216 			case BPF_PSEUDO_MAP_IDX_VALUE:
14217 			case BPF_PSEUDO_MAP_IDX:
14218 				if (bpfptr_is_null(env->fd_array)) {
14219 					verbose(env, "fd_idx without fd_array is invalid\n");
14220 					return -EPROTO;
14221 				}
14222 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
14223 							    insn[0].imm * sizeof(fd),
14224 							    sizeof(fd)))
14225 					return -EFAULT;
14226 				break;
14227 			default:
14228 				fd = insn[0].imm;
14229 				break;
14230 			}
14231 
14232 			f = fdget(fd);
14233 			map = __bpf_map_get(f);
14234 			if (IS_ERR(map)) {
14235 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
14236 					insn[0].imm);
14237 				return PTR_ERR(map);
14238 			}
14239 
14240 			err = check_map_prog_compatibility(env, map, env->prog);
14241 			if (err) {
14242 				fdput(f);
14243 				return err;
14244 			}
14245 
14246 			aux = &env->insn_aux_data[i];
14247 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
14248 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
14249 				addr = (unsigned long)map;
14250 			} else {
14251 				u32 off = insn[1].imm;
14252 
14253 				if (off >= BPF_MAX_VAR_OFF) {
14254 					verbose(env, "direct value offset of %u is not allowed\n", off);
14255 					fdput(f);
14256 					return -EINVAL;
14257 				}
14258 
14259 				if (!map->ops->map_direct_value_addr) {
14260 					verbose(env, "no direct value access support for this map type\n");
14261 					fdput(f);
14262 					return -EINVAL;
14263 				}
14264 
14265 				err = map->ops->map_direct_value_addr(map, &addr, off);
14266 				if (err) {
14267 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
14268 						map->value_size, off);
14269 					fdput(f);
14270 					return err;
14271 				}
14272 
14273 				aux->map_off = off;
14274 				addr += off;
14275 			}
14276 
14277 			insn[0].imm = (u32)addr;
14278 			insn[1].imm = addr >> 32;
14279 
14280 			/* check whether we recorded this map already */
14281 			for (j = 0; j < env->used_map_cnt; j++) {
14282 				if (env->used_maps[j] == map) {
14283 					aux->map_index = j;
14284 					fdput(f);
14285 					goto next_insn;
14286 				}
14287 			}
14288 
14289 			if (env->used_map_cnt >= MAX_USED_MAPS) {
14290 				fdput(f);
14291 				return -E2BIG;
14292 			}
14293 
14294 			/* hold the map. If the program is rejected by verifier,
14295 			 * the map will be released by release_maps() or it
14296 			 * will be used by the valid program until it's unloaded
14297 			 * and all maps are released in free_used_maps()
14298 			 */
14299 			bpf_map_inc(map);
14300 
14301 			aux->map_index = env->used_map_cnt;
14302 			env->used_maps[env->used_map_cnt++] = map;
14303 
14304 			if (bpf_map_is_cgroup_storage(map) &&
14305 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
14306 				verbose(env, "only one cgroup storage of each type is allowed\n");
14307 				fdput(f);
14308 				return -EBUSY;
14309 			}
14310 
14311 			fdput(f);
14312 next_insn:
14313 			insn++;
14314 			i++;
14315 			continue;
14316 		}
14317 
14318 		/* Basic sanity check before we invest more work here. */
14319 		if (!bpf_opcode_in_insntable(insn->code)) {
14320 			verbose(env, "unknown opcode %02x\n", insn->code);
14321 			return -EINVAL;
14322 		}
14323 	}
14324 
14325 	/* now all pseudo BPF_LD_IMM64 instructions load valid
14326 	 * 'struct bpf_map *' into a register instead of user map_fd.
14327 	 * These pointers will be used later by verifier to validate map access.
14328 	 */
14329 	return 0;
14330 }
14331 
14332 /* drop refcnt of maps used by the rejected program */
14333 static void release_maps(struct bpf_verifier_env *env)
14334 {
14335 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
14336 			     env->used_map_cnt);
14337 }
14338 
14339 /* drop refcnt of maps used by the rejected program */
14340 static void release_btfs(struct bpf_verifier_env *env)
14341 {
14342 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
14343 			     env->used_btf_cnt);
14344 }
14345 
14346 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
14347 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
14348 {
14349 	struct bpf_insn *insn = env->prog->insnsi;
14350 	int insn_cnt = env->prog->len;
14351 	int i;
14352 
14353 	for (i = 0; i < insn_cnt; i++, insn++) {
14354 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
14355 			continue;
14356 		if (insn->src_reg == BPF_PSEUDO_FUNC)
14357 			continue;
14358 		insn->src_reg = 0;
14359 	}
14360 }
14361 
14362 /* single env->prog->insni[off] instruction was replaced with the range
14363  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
14364  * [0, off) and [off, end) to new locations, so the patched range stays zero
14365  */
14366 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
14367 				 struct bpf_insn_aux_data *new_data,
14368 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
14369 {
14370 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
14371 	struct bpf_insn *insn = new_prog->insnsi;
14372 	u32 old_seen = old_data[off].seen;
14373 	u32 prog_len;
14374 	int i;
14375 
14376 	/* aux info at OFF always needs adjustment, no matter fast path
14377 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
14378 	 * original insn at old prog.
14379 	 */
14380 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
14381 
14382 	if (cnt == 1)
14383 		return;
14384 	prog_len = new_prog->len;
14385 
14386 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
14387 	memcpy(new_data + off + cnt - 1, old_data + off,
14388 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
14389 	for (i = off; i < off + cnt - 1; i++) {
14390 		/* Expand insni[off]'s seen count to the patched range. */
14391 		new_data[i].seen = old_seen;
14392 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
14393 	}
14394 	env->insn_aux_data = new_data;
14395 	vfree(old_data);
14396 }
14397 
14398 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
14399 {
14400 	int i;
14401 
14402 	if (len == 1)
14403 		return;
14404 	/* NOTE: fake 'exit' subprog should be updated as well. */
14405 	for (i = 0; i <= env->subprog_cnt; i++) {
14406 		if (env->subprog_info[i].start <= off)
14407 			continue;
14408 		env->subprog_info[i].start += len - 1;
14409 	}
14410 }
14411 
14412 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
14413 {
14414 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
14415 	int i, sz = prog->aux->size_poke_tab;
14416 	struct bpf_jit_poke_descriptor *desc;
14417 
14418 	for (i = 0; i < sz; i++) {
14419 		desc = &tab[i];
14420 		if (desc->insn_idx <= off)
14421 			continue;
14422 		desc->insn_idx += len - 1;
14423 	}
14424 }
14425 
14426 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
14427 					    const struct bpf_insn *patch, u32 len)
14428 {
14429 	struct bpf_prog *new_prog;
14430 	struct bpf_insn_aux_data *new_data = NULL;
14431 
14432 	if (len > 1) {
14433 		new_data = vzalloc(array_size(env->prog->len + len - 1,
14434 					      sizeof(struct bpf_insn_aux_data)));
14435 		if (!new_data)
14436 			return NULL;
14437 	}
14438 
14439 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
14440 	if (IS_ERR(new_prog)) {
14441 		if (PTR_ERR(new_prog) == -ERANGE)
14442 			verbose(env,
14443 				"insn %d cannot be patched due to 16-bit range\n",
14444 				env->insn_aux_data[off].orig_idx);
14445 		vfree(new_data);
14446 		return NULL;
14447 	}
14448 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
14449 	adjust_subprog_starts(env, off, len);
14450 	adjust_poke_descs(new_prog, off, len);
14451 	return new_prog;
14452 }
14453 
14454 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
14455 					      u32 off, u32 cnt)
14456 {
14457 	int i, j;
14458 
14459 	/* find first prog starting at or after off (first to remove) */
14460 	for (i = 0; i < env->subprog_cnt; i++)
14461 		if (env->subprog_info[i].start >= off)
14462 			break;
14463 	/* find first prog starting at or after off + cnt (first to stay) */
14464 	for (j = i; j < env->subprog_cnt; j++)
14465 		if (env->subprog_info[j].start >= off + cnt)
14466 			break;
14467 	/* if j doesn't start exactly at off + cnt, we are just removing
14468 	 * the front of previous prog
14469 	 */
14470 	if (env->subprog_info[j].start != off + cnt)
14471 		j--;
14472 
14473 	if (j > i) {
14474 		struct bpf_prog_aux *aux = env->prog->aux;
14475 		int move;
14476 
14477 		/* move fake 'exit' subprog as well */
14478 		move = env->subprog_cnt + 1 - j;
14479 
14480 		memmove(env->subprog_info + i,
14481 			env->subprog_info + j,
14482 			sizeof(*env->subprog_info) * move);
14483 		env->subprog_cnt -= j - i;
14484 
14485 		/* remove func_info */
14486 		if (aux->func_info) {
14487 			move = aux->func_info_cnt - j;
14488 
14489 			memmove(aux->func_info + i,
14490 				aux->func_info + j,
14491 				sizeof(*aux->func_info) * move);
14492 			aux->func_info_cnt -= j - i;
14493 			/* func_info->insn_off is set after all code rewrites,
14494 			 * in adjust_btf_func() - no need to adjust
14495 			 */
14496 		}
14497 	} else {
14498 		/* convert i from "first prog to remove" to "first to adjust" */
14499 		if (env->subprog_info[i].start == off)
14500 			i++;
14501 	}
14502 
14503 	/* update fake 'exit' subprog as well */
14504 	for (; i <= env->subprog_cnt; i++)
14505 		env->subprog_info[i].start -= cnt;
14506 
14507 	return 0;
14508 }
14509 
14510 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
14511 				      u32 cnt)
14512 {
14513 	struct bpf_prog *prog = env->prog;
14514 	u32 i, l_off, l_cnt, nr_linfo;
14515 	struct bpf_line_info *linfo;
14516 
14517 	nr_linfo = prog->aux->nr_linfo;
14518 	if (!nr_linfo)
14519 		return 0;
14520 
14521 	linfo = prog->aux->linfo;
14522 
14523 	/* find first line info to remove, count lines to be removed */
14524 	for (i = 0; i < nr_linfo; i++)
14525 		if (linfo[i].insn_off >= off)
14526 			break;
14527 
14528 	l_off = i;
14529 	l_cnt = 0;
14530 	for (; i < nr_linfo; i++)
14531 		if (linfo[i].insn_off < off + cnt)
14532 			l_cnt++;
14533 		else
14534 			break;
14535 
14536 	/* First live insn doesn't match first live linfo, it needs to "inherit"
14537 	 * last removed linfo.  prog is already modified, so prog->len == off
14538 	 * means no live instructions after (tail of the program was removed).
14539 	 */
14540 	if (prog->len != off && l_cnt &&
14541 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
14542 		l_cnt--;
14543 		linfo[--i].insn_off = off + cnt;
14544 	}
14545 
14546 	/* remove the line info which refer to the removed instructions */
14547 	if (l_cnt) {
14548 		memmove(linfo + l_off, linfo + i,
14549 			sizeof(*linfo) * (nr_linfo - i));
14550 
14551 		prog->aux->nr_linfo -= l_cnt;
14552 		nr_linfo = prog->aux->nr_linfo;
14553 	}
14554 
14555 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
14556 	for (i = l_off; i < nr_linfo; i++)
14557 		linfo[i].insn_off -= cnt;
14558 
14559 	/* fix up all subprogs (incl. 'exit') which start >= off */
14560 	for (i = 0; i <= env->subprog_cnt; i++)
14561 		if (env->subprog_info[i].linfo_idx > l_off) {
14562 			/* program may have started in the removed region but
14563 			 * may not be fully removed
14564 			 */
14565 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
14566 				env->subprog_info[i].linfo_idx -= l_cnt;
14567 			else
14568 				env->subprog_info[i].linfo_idx = l_off;
14569 		}
14570 
14571 	return 0;
14572 }
14573 
14574 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
14575 {
14576 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
14577 	unsigned int orig_prog_len = env->prog->len;
14578 	int err;
14579 
14580 	if (bpf_prog_is_dev_bound(env->prog->aux))
14581 		bpf_prog_offload_remove_insns(env, off, cnt);
14582 
14583 	err = bpf_remove_insns(env->prog, off, cnt);
14584 	if (err)
14585 		return err;
14586 
14587 	err = adjust_subprog_starts_after_remove(env, off, cnt);
14588 	if (err)
14589 		return err;
14590 
14591 	err = bpf_adj_linfo_after_remove(env, off, cnt);
14592 	if (err)
14593 		return err;
14594 
14595 	memmove(aux_data + off,	aux_data + off + cnt,
14596 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
14597 
14598 	return 0;
14599 }
14600 
14601 /* The verifier does more data flow analysis than llvm and will not
14602  * explore branches that are dead at run time. Malicious programs can
14603  * have dead code too. Therefore replace all dead at-run-time code
14604  * with 'ja -1'.
14605  *
14606  * Just nops are not optimal, e.g. if they would sit at the end of the
14607  * program and through another bug we would manage to jump there, then
14608  * we'd execute beyond program memory otherwise. Returning exception
14609  * code also wouldn't work since we can have subprogs where the dead
14610  * code could be located.
14611  */
14612 static void sanitize_dead_code(struct bpf_verifier_env *env)
14613 {
14614 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
14615 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
14616 	struct bpf_insn *insn = env->prog->insnsi;
14617 	const int insn_cnt = env->prog->len;
14618 	int i;
14619 
14620 	for (i = 0; i < insn_cnt; i++) {
14621 		if (aux_data[i].seen)
14622 			continue;
14623 		memcpy(insn + i, &trap, sizeof(trap));
14624 		aux_data[i].zext_dst = false;
14625 	}
14626 }
14627 
14628 static bool insn_is_cond_jump(u8 code)
14629 {
14630 	u8 op;
14631 
14632 	if (BPF_CLASS(code) == BPF_JMP32)
14633 		return true;
14634 
14635 	if (BPF_CLASS(code) != BPF_JMP)
14636 		return false;
14637 
14638 	op = BPF_OP(code);
14639 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
14640 }
14641 
14642 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
14643 {
14644 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
14645 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
14646 	struct bpf_insn *insn = env->prog->insnsi;
14647 	const int insn_cnt = env->prog->len;
14648 	int i;
14649 
14650 	for (i = 0; i < insn_cnt; i++, insn++) {
14651 		if (!insn_is_cond_jump(insn->code))
14652 			continue;
14653 
14654 		if (!aux_data[i + 1].seen)
14655 			ja.off = insn->off;
14656 		else if (!aux_data[i + 1 + insn->off].seen)
14657 			ja.off = 0;
14658 		else
14659 			continue;
14660 
14661 		if (bpf_prog_is_dev_bound(env->prog->aux))
14662 			bpf_prog_offload_replace_insn(env, i, &ja);
14663 
14664 		memcpy(insn, &ja, sizeof(ja));
14665 	}
14666 }
14667 
14668 static int opt_remove_dead_code(struct bpf_verifier_env *env)
14669 {
14670 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
14671 	int insn_cnt = env->prog->len;
14672 	int i, err;
14673 
14674 	for (i = 0; i < insn_cnt; i++) {
14675 		int j;
14676 
14677 		j = 0;
14678 		while (i + j < insn_cnt && !aux_data[i + j].seen)
14679 			j++;
14680 		if (!j)
14681 			continue;
14682 
14683 		err = verifier_remove_insns(env, i, j);
14684 		if (err)
14685 			return err;
14686 		insn_cnt = env->prog->len;
14687 	}
14688 
14689 	return 0;
14690 }
14691 
14692 static int opt_remove_nops(struct bpf_verifier_env *env)
14693 {
14694 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
14695 	struct bpf_insn *insn = env->prog->insnsi;
14696 	int insn_cnt = env->prog->len;
14697 	int i, err;
14698 
14699 	for (i = 0; i < insn_cnt; i++) {
14700 		if (memcmp(&insn[i], &ja, sizeof(ja)))
14701 			continue;
14702 
14703 		err = verifier_remove_insns(env, i, 1);
14704 		if (err)
14705 			return err;
14706 		insn_cnt--;
14707 		i--;
14708 	}
14709 
14710 	return 0;
14711 }
14712 
14713 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
14714 					 const union bpf_attr *attr)
14715 {
14716 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
14717 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
14718 	int i, patch_len, delta = 0, len = env->prog->len;
14719 	struct bpf_insn *insns = env->prog->insnsi;
14720 	struct bpf_prog *new_prog;
14721 	bool rnd_hi32;
14722 
14723 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
14724 	zext_patch[1] = BPF_ZEXT_REG(0);
14725 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
14726 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
14727 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
14728 	for (i = 0; i < len; i++) {
14729 		int adj_idx = i + delta;
14730 		struct bpf_insn insn;
14731 		int load_reg;
14732 
14733 		insn = insns[adj_idx];
14734 		load_reg = insn_def_regno(&insn);
14735 		if (!aux[adj_idx].zext_dst) {
14736 			u8 code, class;
14737 			u32 imm_rnd;
14738 
14739 			if (!rnd_hi32)
14740 				continue;
14741 
14742 			code = insn.code;
14743 			class = BPF_CLASS(code);
14744 			if (load_reg == -1)
14745 				continue;
14746 
14747 			/* NOTE: arg "reg" (the fourth one) is only used for
14748 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
14749 			 *       here.
14750 			 */
14751 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
14752 				if (class == BPF_LD &&
14753 				    BPF_MODE(code) == BPF_IMM)
14754 					i++;
14755 				continue;
14756 			}
14757 
14758 			/* ctx load could be transformed into wider load. */
14759 			if (class == BPF_LDX &&
14760 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
14761 				continue;
14762 
14763 			imm_rnd = get_random_u32();
14764 			rnd_hi32_patch[0] = insn;
14765 			rnd_hi32_patch[1].imm = imm_rnd;
14766 			rnd_hi32_patch[3].dst_reg = load_reg;
14767 			patch = rnd_hi32_patch;
14768 			patch_len = 4;
14769 			goto apply_patch_buffer;
14770 		}
14771 
14772 		/* Add in an zero-extend instruction if a) the JIT has requested
14773 		 * it or b) it's a CMPXCHG.
14774 		 *
14775 		 * The latter is because: BPF_CMPXCHG always loads a value into
14776 		 * R0, therefore always zero-extends. However some archs'
14777 		 * equivalent instruction only does this load when the
14778 		 * comparison is successful. This detail of CMPXCHG is
14779 		 * orthogonal to the general zero-extension behaviour of the
14780 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
14781 		 */
14782 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
14783 			continue;
14784 
14785 		if (WARN_ON(load_reg == -1)) {
14786 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
14787 			return -EFAULT;
14788 		}
14789 
14790 		zext_patch[0] = insn;
14791 		zext_patch[1].dst_reg = load_reg;
14792 		zext_patch[1].src_reg = load_reg;
14793 		patch = zext_patch;
14794 		patch_len = 2;
14795 apply_patch_buffer:
14796 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
14797 		if (!new_prog)
14798 			return -ENOMEM;
14799 		env->prog = new_prog;
14800 		insns = new_prog->insnsi;
14801 		aux = env->insn_aux_data;
14802 		delta += patch_len - 1;
14803 	}
14804 
14805 	return 0;
14806 }
14807 
14808 /* convert load instructions that access fields of a context type into a
14809  * sequence of instructions that access fields of the underlying structure:
14810  *     struct __sk_buff    -> struct sk_buff
14811  *     struct bpf_sock_ops -> struct sock
14812  */
14813 static int convert_ctx_accesses(struct bpf_verifier_env *env)
14814 {
14815 	const struct bpf_verifier_ops *ops = env->ops;
14816 	int i, cnt, size, ctx_field_size, delta = 0;
14817 	const int insn_cnt = env->prog->len;
14818 	struct bpf_insn insn_buf[16], *insn;
14819 	u32 target_size, size_default, off;
14820 	struct bpf_prog *new_prog;
14821 	enum bpf_access_type type;
14822 	bool is_narrower_load;
14823 
14824 	if (ops->gen_prologue || env->seen_direct_write) {
14825 		if (!ops->gen_prologue) {
14826 			verbose(env, "bpf verifier is misconfigured\n");
14827 			return -EINVAL;
14828 		}
14829 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
14830 					env->prog);
14831 		if (cnt >= ARRAY_SIZE(insn_buf)) {
14832 			verbose(env, "bpf verifier is misconfigured\n");
14833 			return -EINVAL;
14834 		} else if (cnt) {
14835 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
14836 			if (!new_prog)
14837 				return -ENOMEM;
14838 
14839 			env->prog = new_prog;
14840 			delta += cnt - 1;
14841 		}
14842 	}
14843 
14844 	if (bpf_prog_is_dev_bound(env->prog->aux))
14845 		return 0;
14846 
14847 	insn = env->prog->insnsi + delta;
14848 
14849 	for (i = 0; i < insn_cnt; i++, insn++) {
14850 		bpf_convert_ctx_access_t convert_ctx_access;
14851 		bool ctx_access;
14852 
14853 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
14854 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
14855 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
14856 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
14857 			type = BPF_READ;
14858 			ctx_access = true;
14859 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
14860 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
14861 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
14862 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
14863 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
14864 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
14865 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
14866 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
14867 			type = BPF_WRITE;
14868 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
14869 		} else {
14870 			continue;
14871 		}
14872 
14873 		if (type == BPF_WRITE &&
14874 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
14875 			struct bpf_insn patch[] = {
14876 				*insn,
14877 				BPF_ST_NOSPEC(),
14878 			};
14879 
14880 			cnt = ARRAY_SIZE(patch);
14881 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
14882 			if (!new_prog)
14883 				return -ENOMEM;
14884 
14885 			delta    += cnt - 1;
14886 			env->prog = new_prog;
14887 			insn      = new_prog->insnsi + i + delta;
14888 			continue;
14889 		}
14890 
14891 		if (!ctx_access)
14892 			continue;
14893 
14894 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
14895 		case PTR_TO_CTX:
14896 			if (!ops->convert_ctx_access)
14897 				continue;
14898 			convert_ctx_access = ops->convert_ctx_access;
14899 			break;
14900 		case PTR_TO_SOCKET:
14901 		case PTR_TO_SOCK_COMMON:
14902 			convert_ctx_access = bpf_sock_convert_ctx_access;
14903 			break;
14904 		case PTR_TO_TCP_SOCK:
14905 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
14906 			break;
14907 		case PTR_TO_XDP_SOCK:
14908 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
14909 			break;
14910 		case PTR_TO_BTF_ID:
14911 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
14912 		/* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
14913 		 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
14914 		 * be said once it is marked PTR_UNTRUSTED, hence we must handle
14915 		 * any faults for loads into such types. BPF_WRITE is disallowed
14916 		 * for this case.
14917 		 */
14918 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
14919 			if (type == BPF_READ) {
14920 				insn->code = BPF_LDX | BPF_PROBE_MEM |
14921 					BPF_SIZE((insn)->code);
14922 				env->prog->aux->num_exentries++;
14923 			}
14924 			continue;
14925 		default:
14926 			continue;
14927 		}
14928 
14929 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
14930 		size = BPF_LDST_BYTES(insn);
14931 
14932 		/* If the read access is a narrower load of the field,
14933 		 * convert to a 4/8-byte load, to minimum program type specific
14934 		 * convert_ctx_access changes. If conversion is successful,
14935 		 * we will apply proper mask to the result.
14936 		 */
14937 		is_narrower_load = size < ctx_field_size;
14938 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
14939 		off = insn->off;
14940 		if (is_narrower_load) {
14941 			u8 size_code;
14942 
14943 			if (type == BPF_WRITE) {
14944 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
14945 				return -EINVAL;
14946 			}
14947 
14948 			size_code = BPF_H;
14949 			if (ctx_field_size == 4)
14950 				size_code = BPF_W;
14951 			else if (ctx_field_size == 8)
14952 				size_code = BPF_DW;
14953 
14954 			insn->off = off & ~(size_default - 1);
14955 			insn->code = BPF_LDX | BPF_MEM | size_code;
14956 		}
14957 
14958 		target_size = 0;
14959 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
14960 					 &target_size);
14961 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
14962 		    (ctx_field_size && !target_size)) {
14963 			verbose(env, "bpf verifier is misconfigured\n");
14964 			return -EINVAL;
14965 		}
14966 
14967 		if (is_narrower_load && size < target_size) {
14968 			u8 shift = bpf_ctx_narrow_access_offset(
14969 				off, size, size_default) * 8;
14970 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
14971 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
14972 				return -EINVAL;
14973 			}
14974 			if (ctx_field_size <= 4) {
14975 				if (shift)
14976 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
14977 									insn->dst_reg,
14978 									shift);
14979 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
14980 								(1 << size * 8) - 1);
14981 			} else {
14982 				if (shift)
14983 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
14984 									insn->dst_reg,
14985 									shift);
14986 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
14987 								(1ULL << size * 8) - 1);
14988 			}
14989 		}
14990 
14991 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14992 		if (!new_prog)
14993 			return -ENOMEM;
14994 
14995 		delta += cnt - 1;
14996 
14997 		/* keep walking new program and skip insns we just inserted */
14998 		env->prog = new_prog;
14999 		insn      = new_prog->insnsi + i + delta;
15000 	}
15001 
15002 	return 0;
15003 }
15004 
15005 static int jit_subprogs(struct bpf_verifier_env *env)
15006 {
15007 	struct bpf_prog *prog = env->prog, **func, *tmp;
15008 	int i, j, subprog_start, subprog_end = 0, len, subprog;
15009 	struct bpf_map *map_ptr;
15010 	struct bpf_insn *insn;
15011 	void *old_bpf_func;
15012 	int err, num_exentries;
15013 
15014 	if (env->subprog_cnt <= 1)
15015 		return 0;
15016 
15017 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15018 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
15019 			continue;
15020 
15021 		/* Upon error here we cannot fall back to interpreter but
15022 		 * need a hard reject of the program. Thus -EFAULT is
15023 		 * propagated in any case.
15024 		 */
15025 		subprog = find_subprog(env, i + insn->imm + 1);
15026 		if (subprog < 0) {
15027 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
15028 				  i + insn->imm + 1);
15029 			return -EFAULT;
15030 		}
15031 		/* temporarily remember subprog id inside insn instead of
15032 		 * aux_data, since next loop will split up all insns into funcs
15033 		 */
15034 		insn->off = subprog;
15035 		/* remember original imm in case JIT fails and fallback
15036 		 * to interpreter will be needed
15037 		 */
15038 		env->insn_aux_data[i].call_imm = insn->imm;
15039 		/* point imm to __bpf_call_base+1 from JITs point of view */
15040 		insn->imm = 1;
15041 		if (bpf_pseudo_func(insn))
15042 			/* jit (e.g. x86_64) may emit fewer instructions
15043 			 * if it learns a u32 imm is the same as a u64 imm.
15044 			 * Force a non zero here.
15045 			 */
15046 			insn[1].imm = 1;
15047 	}
15048 
15049 	err = bpf_prog_alloc_jited_linfo(prog);
15050 	if (err)
15051 		goto out_undo_insn;
15052 
15053 	err = -ENOMEM;
15054 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
15055 	if (!func)
15056 		goto out_undo_insn;
15057 
15058 	for (i = 0; i < env->subprog_cnt; i++) {
15059 		subprog_start = subprog_end;
15060 		subprog_end = env->subprog_info[i + 1].start;
15061 
15062 		len = subprog_end - subprog_start;
15063 		/* bpf_prog_run() doesn't call subprogs directly,
15064 		 * hence main prog stats include the runtime of subprogs.
15065 		 * subprogs don't have IDs and not reachable via prog_get_next_id
15066 		 * func[i]->stats will never be accessed and stays NULL
15067 		 */
15068 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
15069 		if (!func[i])
15070 			goto out_free;
15071 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
15072 		       len * sizeof(struct bpf_insn));
15073 		func[i]->type = prog->type;
15074 		func[i]->len = len;
15075 		if (bpf_prog_calc_tag(func[i]))
15076 			goto out_free;
15077 		func[i]->is_func = 1;
15078 		func[i]->aux->func_idx = i;
15079 		/* Below members will be freed only at prog->aux */
15080 		func[i]->aux->btf = prog->aux->btf;
15081 		func[i]->aux->func_info = prog->aux->func_info;
15082 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
15083 		func[i]->aux->poke_tab = prog->aux->poke_tab;
15084 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
15085 
15086 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
15087 			struct bpf_jit_poke_descriptor *poke;
15088 
15089 			poke = &prog->aux->poke_tab[j];
15090 			if (poke->insn_idx < subprog_end &&
15091 			    poke->insn_idx >= subprog_start)
15092 				poke->aux = func[i]->aux;
15093 		}
15094 
15095 		func[i]->aux->name[0] = 'F';
15096 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
15097 		func[i]->jit_requested = 1;
15098 		func[i]->blinding_requested = prog->blinding_requested;
15099 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
15100 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
15101 		func[i]->aux->linfo = prog->aux->linfo;
15102 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
15103 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
15104 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
15105 		num_exentries = 0;
15106 		insn = func[i]->insnsi;
15107 		for (j = 0; j < func[i]->len; j++, insn++) {
15108 			if (BPF_CLASS(insn->code) == BPF_LDX &&
15109 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
15110 				num_exentries++;
15111 		}
15112 		func[i]->aux->num_exentries = num_exentries;
15113 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
15114 		func[i] = bpf_int_jit_compile(func[i]);
15115 		if (!func[i]->jited) {
15116 			err = -ENOTSUPP;
15117 			goto out_free;
15118 		}
15119 		cond_resched();
15120 	}
15121 
15122 	/* at this point all bpf functions were successfully JITed
15123 	 * now populate all bpf_calls with correct addresses and
15124 	 * run last pass of JIT
15125 	 */
15126 	for (i = 0; i < env->subprog_cnt; i++) {
15127 		insn = func[i]->insnsi;
15128 		for (j = 0; j < func[i]->len; j++, insn++) {
15129 			if (bpf_pseudo_func(insn)) {
15130 				subprog = insn->off;
15131 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
15132 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
15133 				continue;
15134 			}
15135 			if (!bpf_pseudo_call(insn))
15136 				continue;
15137 			subprog = insn->off;
15138 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
15139 		}
15140 
15141 		/* we use the aux data to keep a list of the start addresses
15142 		 * of the JITed images for each function in the program
15143 		 *
15144 		 * for some architectures, such as powerpc64, the imm field
15145 		 * might not be large enough to hold the offset of the start
15146 		 * address of the callee's JITed image from __bpf_call_base
15147 		 *
15148 		 * in such cases, we can lookup the start address of a callee
15149 		 * by using its subprog id, available from the off field of
15150 		 * the call instruction, as an index for this list
15151 		 */
15152 		func[i]->aux->func = func;
15153 		func[i]->aux->func_cnt = env->subprog_cnt;
15154 	}
15155 	for (i = 0; i < env->subprog_cnt; i++) {
15156 		old_bpf_func = func[i]->bpf_func;
15157 		tmp = bpf_int_jit_compile(func[i]);
15158 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
15159 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
15160 			err = -ENOTSUPP;
15161 			goto out_free;
15162 		}
15163 		cond_resched();
15164 	}
15165 
15166 	/* finally lock prog and jit images for all functions and
15167 	 * populate kallsysm
15168 	 */
15169 	for (i = 0; i < env->subprog_cnt; i++) {
15170 		bpf_prog_lock_ro(func[i]);
15171 		bpf_prog_kallsyms_add(func[i]);
15172 	}
15173 
15174 	/* Last step: make now unused interpreter insns from main
15175 	 * prog consistent for later dump requests, so they can
15176 	 * later look the same as if they were interpreted only.
15177 	 */
15178 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15179 		if (bpf_pseudo_func(insn)) {
15180 			insn[0].imm = env->insn_aux_data[i].call_imm;
15181 			insn[1].imm = insn->off;
15182 			insn->off = 0;
15183 			continue;
15184 		}
15185 		if (!bpf_pseudo_call(insn))
15186 			continue;
15187 		insn->off = env->insn_aux_data[i].call_imm;
15188 		subprog = find_subprog(env, i + insn->off + 1);
15189 		insn->imm = subprog;
15190 	}
15191 
15192 	prog->jited = 1;
15193 	prog->bpf_func = func[0]->bpf_func;
15194 	prog->jited_len = func[0]->jited_len;
15195 	prog->aux->func = func;
15196 	prog->aux->func_cnt = env->subprog_cnt;
15197 	bpf_prog_jit_attempt_done(prog);
15198 	return 0;
15199 out_free:
15200 	/* We failed JIT'ing, so at this point we need to unregister poke
15201 	 * descriptors from subprogs, so that kernel is not attempting to
15202 	 * patch it anymore as we're freeing the subprog JIT memory.
15203 	 */
15204 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
15205 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
15206 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
15207 	}
15208 	/* At this point we're guaranteed that poke descriptors are not
15209 	 * live anymore. We can just unlink its descriptor table as it's
15210 	 * released with the main prog.
15211 	 */
15212 	for (i = 0; i < env->subprog_cnt; i++) {
15213 		if (!func[i])
15214 			continue;
15215 		func[i]->aux->poke_tab = NULL;
15216 		bpf_jit_free(func[i]);
15217 	}
15218 	kfree(func);
15219 out_undo_insn:
15220 	/* cleanup main prog to be interpreted */
15221 	prog->jit_requested = 0;
15222 	prog->blinding_requested = 0;
15223 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15224 		if (!bpf_pseudo_call(insn))
15225 			continue;
15226 		insn->off = 0;
15227 		insn->imm = env->insn_aux_data[i].call_imm;
15228 	}
15229 	bpf_prog_jit_attempt_done(prog);
15230 	return err;
15231 }
15232 
15233 static int fixup_call_args(struct bpf_verifier_env *env)
15234 {
15235 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
15236 	struct bpf_prog *prog = env->prog;
15237 	struct bpf_insn *insn = prog->insnsi;
15238 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
15239 	int i, depth;
15240 #endif
15241 	int err = 0;
15242 
15243 	if (env->prog->jit_requested &&
15244 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
15245 		err = jit_subprogs(env);
15246 		if (err == 0)
15247 			return 0;
15248 		if (err == -EFAULT)
15249 			return err;
15250 	}
15251 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
15252 	if (has_kfunc_call) {
15253 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
15254 		return -EINVAL;
15255 	}
15256 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
15257 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
15258 		 * have to be rejected, since interpreter doesn't support them yet.
15259 		 */
15260 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
15261 		return -EINVAL;
15262 	}
15263 	for (i = 0; i < prog->len; i++, insn++) {
15264 		if (bpf_pseudo_func(insn)) {
15265 			/* When JIT fails the progs with callback calls
15266 			 * have to be rejected, since interpreter doesn't support them yet.
15267 			 */
15268 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
15269 			return -EINVAL;
15270 		}
15271 
15272 		if (!bpf_pseudo_call(insn))
15273 			continue;
15274 		depth = get_callee_stack_depth(env, insn, i);
15275 		if (depth < 0)
15276 			return depth;
15277 		bpf_patch_call_args(insn, depth);
15278 	}
15279 	err = 0;
15280 #endif
15281 	return err;
15282 }
15283 
15284 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
15285 			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
15286 {
15287 	const struct bpf_kfunc_desc *desc;
15288 
15289 	if (!insn->imm) {
15290 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
15291 		return -EINVAL;
15292 	}
15293 
15294 	/* insn->imm has the btf func_id. Replace it with
15295 	 * an address (relative to __bpf_base_call).
15296 	 */
15297 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
15298 	if (!desc) {
15299 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
15300 			insn->imm);
15301 		return -EFAULT;
15302 	}
15303 
15304 	*cnt = 0;
15305 	insn->imm = desc->imm;
15306 	if (insn->off)
15307 		return 0;
15308 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
15309 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
15310 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
15311 		u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
15312 
15313 		insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
15314 		insn_buf[1] = addr[0];
15315 		insn_buf[2] = addr[1];
15316 		insn_buf[3] = *insn;
15317 		*cnt = 4;
15318 	} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
15319 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
15320 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
15321 
15322 		insn_buf[0] = addr[0];
15323 		insn_buf[1] = addr[1];
15324 		insn_buf[2] = *insn;
15325 		*cnt = 3;
15326 	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
15327 		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
15328 		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
15329 		*cnt = 1;
15330 	}
15331 	return 0;
15332 }
15333 
15334 /* Do various post-verification rewrites in a single program pass.
15335  * These rewrites simplify JIT and interpreter implementations.
15336  */
15337 static int do_misc_fixups(struct bpf_verifier_env *env)
15338 {
15339 	struct bpf_prog *prog = env->prog;
15340 	enum bpf_attach_type eatype = prog->expected_attach_type;
15341 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
15342 	struct bpf_insn *insn = prog->insnsi;
15343 	const struct bpf_func_proto *fn;
15344 	const int insn_cnt = prog->len;
15345 	const struct bpf_map_ops *ops;
15346 	struct bpf_insn_aux_data *aux;
15347 	struct bpf_insn insn_buf[16];
15348 	struct bpf_prog *new_prog;
15349 	struct bpf_map *map_ptr;
15350 	int i, ret, cnt, delta = 0;
15351 
15352 	for (i = 0; i < insn_cnt; i++, insn++) {
15353 		/* Make divide-by-zero exceptions impossible. */
15354 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
15355 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
15356 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
15357 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
15358 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
15359 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
15360 			struct bpf_insn *patchlet;
15361 			struct bpf_insn chk_and_div[] = {
15362 				/* [R,W]x div 0 -> 0 */
15363 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
15364 					     BPF_JNE | BPF_K, insn->src_reg,
15365 					     0, 2, 0),
15366 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
15367 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15368 				*insn,
15369 			};
15370 			struct bpf_insn chk_and_mod[] = {
15371 				/* [R,W]x mod 0 -> [R,W]x */
15372 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
15373 					     BPF_JEQ | BPF_K, insn->src_reg,
15374 					     0, 1 + (is64 ? 0 : 1), 0),
15375 				*insn,
15376 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15377 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
15378 			};
15379 
15380 			patchlet = isdiv ? chk_and_div : chk_and_mod;
15381 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
15382 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
15383 
15384 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
15385 			if (!new_prog)
15386 				return -ENOMEM;
15387 
15388 			delta    += cnt - 1;
15389 			env->prog = prog = new_prog;
15390 			insn      = new_prog->insnsi + i + delta;
15391 			continue;
15392 		}
15393 
15394 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
15395 		if (BPF_CLASS(insn->code) == BPF_LD &&
15396 		    (BPF_MODE(insn->code) == BPF_ABS ||
15397 		     BPF_MODE(insn->code) == BPF_IND)) {
15398 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
15399 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
15400 				verbose(env, "bpf verifier is misconfigured\n");
15401 				return -EINVAL;
15402 			}
15403 
15404 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15405 			if (!new_prog)
15406 				return -ENOMEM;
15407 
15408 			delta    += cnt - 1;
15409 			env->prog = prog = new_prog;
15410 			insn      = new_prog->insnsi + i + delta;
15411 			continue;
15412 		}
15413 
15414 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
15415 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
15416 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
15417 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
15418 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
15419 			struct bpf_insn *patch = &insn_buf[0];
15420 			bool issrc, isneg, isimm;
15421 			u32 off_reg;
15422 
15423 			aux = &env->insn_aux_data[i + delta];
15424 			if (!aux->alu_state ||
15425 			    aux->alu_state == BPF_ALU_NON_POINTER)
15426 				continue;
15427 
15428 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
15429 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
15430 				BPF_ALU_SANITIZE_SRC;
15431 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
15432 
15433 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
15434 			if (isimm) {
15435 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
15436 			} else {
15437 				if (isneg)
15438 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
15439 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
15440 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
15441 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
15442 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
15443 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
15444 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
15445 			}
15446 			if (!issrc)
15447 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
15448 			insn->src_reg = BPF_REG_AX;
15449 			if (isneg)
15450 				insn->code = insn->code == code_add ?
15451 					     code_sub : code_add;
15452 			*patch++ = *insn;
15453 			if (issrc && isneg && !isimm)
15454 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
15455 			cnt = patch - insn_buf;
15456 
15457 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15458 			if (!new_prog)
15459 				return -ENOMEM;
15460 
15461 			delta    += cnt - 1;
15462 			env->prog = prog = new_prog;
15463 			insn      = new_prog->insnsi + i + delta;
15464 			continue;
15465 		}
15466 
15467 		if (insn->code != (BPF_JMP | BPF_CALL))
15468 			continue;
15469 		if (insn->src_reg == BPF_PSEUDO_CALL)
15470 			continue;
15471 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
15472 			ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
15473 			if (ret)
15474 				return ret;
15475 			if (cnt == 0)
15476 				continue;
15477 
15478 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15479 			if (!new_prog)
15480 				return -ENOMEM;
15481 
15482 			delta	 += cnt - 1;
15483 			env->prog = prog = new_prog;
15484 			insn	  = new_prog->insnsi + i + delta;
15485 			continue;
15486 		}
15487 
15488 		if (insn->imm == BPF_FUNC_get_route_realm)
15489 			prog->dst_needed = 1;
15490 		if (insn->imm == BPF_FUNC_get_prandom_u32)
15491 			bpf_user_rnd_init_once();
15492 		if (insn->imm == BPF_FUNC_override_return)
15493 			prog->kprobe_override = 1;
15494 		if (insn->imm == BPF_FUNC_tail_call) {
15495 			/* If we tail call into other programs, we
15496 			 * cannot make any assumptions since they can
15497 			 * be replaced dynamically during runtime in
15498 			 * the program array.
15499 			 */
15500 			prog->cb_access = 1;
15501 			if (!allow_tail_call_in_subprogs(env))
15502 				prog->aux->stack_depth = MAX_BPF_STACK;
15503 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
15504 
15505 			/* mark bpf_tail_call as different opcode to avoid
15506 			 * conditional branch in the interpreter for every normal
15507 			 * call and to prevent accidental JITing by JIT compiler
15508 			 * that doesn't support bpf_tail_call yet
15509 			 */
15510 			insn->imm = 0;
15511 			insn->code = BPF_JMP | BPF_TAIL_CALL;
15512 
15513 			aux = &env->insn_aux_data[i + delta];
15514 			if (env->bpf_capable && !prog->blinding_requested &&
15515 			    prog->jit_requested &&
15516 			    !bpf_map_key_poisoned(aux) &&
15517 			    !bpf_map_ptr_poisoned(aux) &&
15518 			    !bpf_map_ptr_unpriv(aux)) {
15519 				struct bpf_jit_poke_descriptor desc = {
15520 					.reason = BPF_POKE_REASON_TAIL_CALL,
15521 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
15522 					.tail_call.key = bpf_map_key_immediate(aux),
15523 					.insn_idx = i + delta,
15524 				};
15525 
15526 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
15527 				if (ret < 0) {
15528 					verbose(env, "adding tail call poke descriptor failed\n");
15529 					return ret;
15530 				}
15531 
15532 				insn->imm = ret + 1;
15533 				continue;
15534 			}
15535 
15536 			if (!bpf_map_ptr_unpriv(aux))
15537 				continue;
15538 
15539 			/* instead of changing every JIT dealing with tail_call
15540 			 * emit two extra insns:
15541 			 * if (index >= max_entries) goto out;
15542 			 * index &= array->index_mask;
15543 			 * to avoid out-of-bounds cpu speculation
15544 			 */
15545 			if (bpf_map_ptr_poisoned(aux)) {
15546 				verbose(env, "tail_call abusing map_ptr\n");
15547 				return -EINVAL;
15548 			}
15549 
15550 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
15551 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
15552 						  map_ptr->max_entries, 2);
15553 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
15554 						    container_of(map_ptr,
15555 								 struct bpf_array,
15556 								 map)->index_mask);
15557 			insn_buf[2] = *insn;
15558 			cnt = 3;
15559 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15560 			if (!new_prog)
15561 				return -ENOMEM;
15562 
15563 			delta    += cnt - 1;
15564 			env->prog = prog = new_prog;
15565 			insn      = new_prog->insnsi + i + delta;
15566 			continue;
15567 		}
15568 
15569 		if (insn->imm == BPF_FUNC_timer_set_callback) {
15570 			/* The verifier will process callback_fn as many times as necessary
15571 			 * with different maps and the register states prepared by
15572 			 * set_timer_callback_state will be accurate.
15573 			 *
15574 			 * The following use case is valid:
15575 			 *   map1 is shared by prog1, prog2, prog3.
15576 			 *   prog1 calls bpf_timer_init for some map1 elements
15577 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
15578 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
15579 			 *   prog3 calls bpf_timer_start for some map1 elements.
15580 			 *     Those that were not both bpf_timer_init-ed and
15581 			 *     bpf_timer_set_callback-ed will return -EINVAL.
15582 			 */
15583 			struct bpf_insn ld_addrs[2] = {
15584 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
15585 			};
15586 
15587 			insn_buf[0] = ld_addrs[0];
15588 			insn_buf[1] = ld_addrs[1];
15589 			insn_buf[2] = *insn;
15590 			cnt = 3;
15591 
15592 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15593 			if (!new_prog)
15594 				return -ENOMEM;
15595 
15596 			delta    += cnt - 1;
15597 			env->prog = prog = new_prog;
15598 			insn      = new_prog->insnsi + i + delta;
15599 			goto patch_call_imm;
15600 		}
15601 
15602 		if (is_storage_get_function(insn->imm)) {
15603 			if (!env->prog->aux->sleepable ||
15604 			    env->insn_aux_data[i + delta].storage_get_func_atomic)
15605 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
15606 			else
15607 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
15608 			insn_buf[1] = *insn;
15609 			cnt = 2;
15610 
15611 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15612 			if (!new_prog)
15613 				return -ENOMEM;
15614 
15615 			delta += cnt - 1;
15616 			env->prog = prog = new_prog;
15617 			insn = new_prog->insnsi + i + delta;
15618 			goto patch_call_imm;
15619 		}
15620 
15621 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
15622 		 * and other inlining handlers are currently limited to 64 bit
15623 		 * only.
15624 		 */
15625 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
15626 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
15627 		     insn->imm == BPF_FUNC_map_update_elem ||
15628 		     insn->imm == BPF_FUNC_map_delete_elem ||
15629 		     insn->imm == BPF_FUNC_map_push_elem   ||
15630 		     insn->imm == BPF_FUNC_map_pop_elem    ||
15631 		     insn->imm == BPF_FUNC_map_peek_elem   ||
15632 		     insn->imm == BPF_FUNC_redirect_map    ||
15633 		     insn->imm == BPF_FUNC_for_each_map_elem ||
15634 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
15635 			aux = &env->insn_aux_data[i + delta];
15636 			if (bpf_map_ptr_poisoned(aux))
15637 				goto patch_call_imm;
15638 
15639 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
15640 			ops = map_ptr->ops;
15641 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
15642 			    ops->map_gen_lookup) {
15643 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
15644 				if (cnt == -EOPNOTSUPP)
15645 					goto patch_map_ops_generic;
15646 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
15647 					verbose(env, "bpf verifier is misconfigured\n");
15648 					return -EINVAL;
15649 				}
15650 
15651 				new_prog = bpf_patch_insn_data(env, i + delta,
15652 							       insn_buf, cnt);
15653 				if (!new_prog)
15654 					return -ENOMEM;
15655 
15656 				delta    += cnt - 1;
15657 				env->prog = prog = new_prog;
15658 				insn      = new_prog->insnsi + i + delta;
15659 				continue;
15660 			}
15661 
15662 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
15663 				     (void *(*)(struct bpf_map *map, void *key))NULL));
15664 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
15665 				     (int (*)(struct bpf_map *map, void *key))NULL));
15666 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
15667 				     (int (*)(struct bpf_map *map, void *key, void *value,
15668 					      u64 flags))NULL));
15669 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
15670 				     (int (*)(struct bpf_map *map, void *value,
15671 					      u64 flags))NULL));
15672 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
15673 				     (int (*)(struct bpf_map *map, void *value))NULL));
15674 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
15675 				     (int (*)(struct bpf_map *map, void *value))NULL));
15676 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
15677 				     (int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
15678 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
15679 				     (int (*)(struct bpf_map *map,
15680 					      bpf_callback_t callback_fn,
15681 					      void *callback_ctx,
15682 					      u64 flags))NULL));
15683 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
15684 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
15685 
15686 patch_map_ops_generic:
15687 			switch (insn->imm) {
15688 			case BPF_FUNC_map_lookup_elem:
15689 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
15690 				continue;
15691 			case BPF_FUNC_map_update_elem:
15692 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
15693 				continue;
15694 			case BPF_FUNC_map_delete_elem:
15695 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
15696 				continue;
15697 			case BPF_FUNC_map_push_elem:
15698 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
15699 				continue;
15700 			case BPF_FUNC_map_pop_elem:
15701 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
15702 				continue;
15703 			case BPF_FUNC_map_peek_elem:
15704 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
15705 				continue;
15706 			case BPF_FUNC_redirect_map:
15707 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
15708 				continue;
15709 			case BPF_FUNC_for_each_map_elem:
15710 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
15711 				continue;
15712 			case BPF_FUNC_map_lookup_percpu_elem:
15713 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
15714 				continue;
15715 			}
15716 
15717 			goto patch_call_imm;
15718 		}
15719 
15720 		/* Implement bpf_jiffies64 inline. */
15721 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
15722 		    insn->imm == BPF_FUNC_jiffies64) {
15723 			struct bpf_insn ld_jiffies_addr[2] = {
15724 				BPF_LD_IMM64(BPF_REG_0,
15725 					     (unsigned long)&jiffies),
15726 			};
15727 
15728 			insn_buf[0] = ld_jiffies_addr[0];
15729 			insn_buf[1] = ld_jiffies_addr[1];
15730 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
15731 						  BPF_REG_0, 0);
15732 			cnt = 3;
15733 
15734 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
15735 						       cnt);
15736 			if (!new_prog)
15737 				return -ENOMEM;
15738 
15739 			delta    += cnt - 1;
15740 			env->prog = prog = new_prog;
15741 			insn      = new_prog->insnsi + i + delta;
15742 			continue;
15743 		}
15744 
15745 		/* Implement bpf_get_func_arg inline. */
15746 		if (prog_type == BPF_PROG_TYPE_TRACING &&
15747 		    insn->imm == BPF_FUNC_get_func_arg) {
15748 			/* Load nr_args from ctx - 8 */
15749 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
15750 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
15751 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
15752 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
15753 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
15754 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
15755 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
15756 			insn_buf[7] = BPF_JMP_A(1);
15757 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
15758 			cnt = 9;
15759 
15760 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15761 			if (!new_prog)
15762 				return -ENOMEM;
15763 
15764 			delta    += cnt - 1;
15765 			env->prog = prog = new_prog;
15766 			insn      = new_prog->insnsi + i + delta;
15767 			continue;
15768 		}
15769 
15770 		/* Implement bpf_get_func_ret inline. */
15771 		if (prog_type == BPF_PROG_TYPE_TRACING &&
15772 		    insn->imm == BPF_FUNC_get_func_ret) {
15773 			if (eatype == BPF_TRACE_FEXIT ||
15774 			    eatype == BPF_MODIFY_RETURN) {
15775 				/* Load nr_args from ctx - 8 */
15776 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
15777 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
15778 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
15779 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
15780 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
15781 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
15782 				cnt = 6;
15783 			} else {
15784 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
15785 				cnt = 1;
15786 			}
15787 
15788 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15789 			if (!new_prog)
15790 				return -ENOMEM;
15791 
15792 			delta    += cnt - 1;
15793 			env->prog = prog = new_prog;
15794 			insn      = new_prog->insnsi + i + delta;
15795 			continue;
15796 		}
15797 
15798 		/* Implement get_func_arg_cnt inline. */
15799 		if (prog_type == BPF_PROG_TYPE_TRACING &&
15800 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
15801 			/* Load nr_args from ctx - 8 */
15802 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
15803 
15804 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
15805 			if (!new_prog)
15806 				return -ENOMEM;
15807 
15808 			env->prog = prog = new_prog;
15809 			insn      = new_prog->insnsi + i + delta;
15810 			continue;
15811 		}
15812 
15813 		/* Implement bpf_get_func_ip inline. */
15814 		if (prog_type == BPF_PROG_TYPE_TRACING &&
15815 		    insn->imm == BPF_FUNC_get_func_ip) {
15816 			/* Load IP address from ctx - 16 */
15817 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
15818 
15819 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
15820 			if (!new_prog)
15821 				return -ENOMEM;
15822 
15823 			env->prog = prog = new_prog;
15824 			insn      = new_prog->insnsi + i + delta;
15825 			continue;
15826 		}
15827 
15828 patch_call_imm:
15829 		fn = env->ops->get_func_proto(insn->imm, env->prog);
15830 		/* all functions that have prototype and verifier allowed
15831 		 * programs to call them, must be real in-kernel functions
15832 		 */
15833 		if (!fn->func) {
15834 			verbose(env,
15835 				"kernel subsystem misconfigured func %s#%d\n",
15836 				func_id_name(insn->imm), insn->imm);
15837 			return -EFAULT;
15838 		}
15839 		insn->imm = fn->func - __bpf_call_base;
15840 	}
15841 
15842 	/* Since poke tab is now finalized, publish aux to tracker. */
15843 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
15844 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
15845 		if (!map_ptr->ops->map_poke_track ||
15846 		    !map_ptr->ops->map_poke_untrack ||
15847 		    !map_ptr->ops->map_poke_run) {
15848 			verbose(env, "bpf verifier is misconfigured\n");
15849 			return -EINVAL;
15850 		}
15851 
15852 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
15853 		if (ret < 0) {
15854 			verbose(env, "tracking tail call prog failed\n");
15855 			return ret;
15856 		}
15857 	}
15858 
15859 	sort_kfunc_descs_by_imm(env->prog);
15860 
15861 	return 0;
15862 }
15863 
15864 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
15865 					int position,
15866 					s32 stack_base,
15867 					u32 callback_subprogno,
15868 					u32 *cnt)
15869 {
15870 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
15871 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
15872 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
15873 	int reg_loop_max = BPF_REG_6;
15874 	int reg_loop_cnt = BPF_REG_7;
15875 	int reg_loop_ctx = BPF_REG_8;
15876 
15877 	struct bpf_prog *new_prog;
15878 	u32 callback_start;
15879 	u32 call_insn_offset;
15880 	s32 callback_offset;
15881 
15882 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
15883 	 * be careful to modify this code in sync.
15884 	 */
15885 	struct bpf_insn insn_buf[] = {
15886 		/* Return error and jump to the end of the patch if
15887 		 * expected number of iterations is too big.
15888 		 */
15889 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
15890 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
15891 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
15892 		/* spill R6, R7, R8 to use these as loop vars */
15893 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
15894 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
15895 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
15896 		/* initialize loop vars */
15897 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
15898 		BPF_MOV32_IMM(reg_loop_cnt, 0),
15899 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
15900 		/* loop header,
15901 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
15902 		 */
15903 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
15904 		/* callback call,
15905 		 * correct callback offset would be set after patching
15906 		 */
15907 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
15908 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
15909 		BPF_CALL_REL(0),
15910 		/* increment loop counter */
15911 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
15912 		/* jump to loop header if callback returned 0 */
15913 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
15914 		/* return value of bpf_loop,
15915 		 * set R0 to the number of iterations
15916 		 */
15917 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
15918 		/* restore original values of R6, R7, R8 */
15919 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
15920 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
15921 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
15922 	};
15923 
15924 	*cnt = ARRAY_SIZE(insn_buf);
15925 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
15926 	if (!new_prog)
15927 		return new_prog;
15928 
15929 	/* callback start is known only after patching */
15930 	callback_start = env->subprog_info[callback_subprogno].start;
15931 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
15932 	call_insn_offset = position + 12;
15933 	callback_offset = callback_start - call_insn_offset - 1;
15934 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
15935 
15936 	return new_prog;
15937 }
15938 
15939 static bool is_bpf_loop_call(struct bpf_insn *insn)
15940 {
15941 	return insn->code == (BPF_JMP | BPF_CALL) &&
15942 		insn->src_reg == 0 &&
15943 		insn->imm == BPF_FUNC_loop;
15944 }
15945 
15946 /* For all sub-programs in the program (including main) check
15947  * insn_aux_data to see if there are bpf_loop calls that require
15948  * inlining. If such calls are found the calls are replaced with a
15949  * sequence of instructions produced by `inline_bpf_loop` function and
15950  * subprog stack_depth is increased by the size of 3 registers.
15951  * This stack space is used to spill values of the R6, R7, R8.  These
15952  * registers are used to store the loop bound, counter and context
15953  * variables.
15954  */
15955 static int optimize_bpf_loop(struct bpf_verifier_env *env)
15956 {
15957 	struct bpf_subprog_info *subprogs = env->subprog_info;
15958 	int i, cur_subprog = 0, cnt, delta = 0;
15959 	struct bpf_insn *insn = env->prog->insnsi;
15960 	int insn_cnt = env->prog->len;
15961 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
15962 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
15963 	u16 stack_depth_extra = 0;
15964 
15965 	for (i = 0; i < insn_cnt; i++, insn++) {
15966 		struct bpf_loop_inline_state *inline_state =
15967 			&env->insn_aux_data[i + delta].loop_inline_state;
15968 
15969 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
15970 			struct bpf_prog *new_prog;
15971 
15972 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
15973 			new_prog = inline_bpf_loop(env,
15974 						   i + delta,
15975 						   -(stack_depth + stack_depth_extra),
15976 						   inline_state->callback_subprogno,
15977 						   &cnt);
15978 			if (!new_prog)
15979 				return -ENOMEM;
15980 
15981 			delta     += cnt - 1;
15982 			env->prog  = new_prog;
15983 			insn       = new_prog->insnsi + i + delta;
15984 		}
15985 
15986 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
15987 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
15988 			cur_subprog++;
15989 			stack_depth = subprogs[cur_subprog].stack_depth;
15990 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
15991 			stack_depth_extra = 0;
15992 		}
15993 	}
15994 
15995 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
15996 
15997 	return 0;
15998 }
15999 
16000 static void free_states(struct bpf_verifier_env *env)
16001 {
16002 	struct bpf_verifier_state_list *sl, *sln;
16003 	int i;
16004 
16005 	sl = env->free_list;
16006 	while (sl) {
16007 		sln = sl->next;
16008 		free_verifier_state(&sl->state, false);
16009 		kfree(sl);
16010 		sl = sln;
16011 	}
16012 	env->free_list = NULL;
16013 
16014 	if (!env->explored_states)
16015 		return;
16016 
16017 	for (i = 0; i < state_htab_size(env); i++) {
16018 		sl = env->explored_states[i];
16019 
16020 		while (sl) {
16021 			sln = sl->next;
16022 			free_verifier_state(&sl->state, false);
16023 			kfree(sl);
16024 			sl = sln;
16025 		}
16026 		env->explored_states[i] = NULL;
16027 	}
16028 }
16029 
16030 static int do_check_common(struct bpf_verifier_env *env, int subprog)
16031 {
16032 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
16033 	struct bpf_verifier_state *state;
16034 	struct bpf_reg_state *regs;
16035 	int ret, i;
16036 
16037 	env->prev_linfo = NULL;
16038 	env->pass_cnt++;
16039 
16040 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
16041 	if (!state)
16042 		return -ENOMEM;
16043 	state->curframe = 0;
16044 	state->speculative = false;
16045 	state->branches = 1;
16046 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
16047 	if (!state->frame[0]) {
16048 		kfree(state);
16049 		return -ENOMEM;
16050 	}
16051 	env->cur_state = state;
16052 	init_func_state(env, state->frame[0],
16053 			BPF_MAIN_FUNC /* callsite */,
16054 			0 /* frameno */,
16055 			subprog);
16056 	state->first_insn_idx = env->subprog_info[subprog].start;
16057 	state->last_insn_idx = -1;
16058 
16059 	regs = state->frame[state->curframe]->regs;
16060 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
16061 		ret = btf_prepare_func_args(env, subprog, regs);
16062 		if (ret)
16063 			goto out;
16064 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
16065 			if (regs[i].type == PTR_TO_CTX)
16066 				mark_reg_known_zero(env, regs, i);
16067 			else if (regs[i].type == SCALAR_VALUE)
16068 				mark_reg_unknown(env, regs, i);
16069 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
16070 				const u32 mem_size = regs[i].mem_size;
16071 
16072 				mark_reg_known_zero(env, regs, i);
16073 				regs[i].mem_size = mem_size;
16074 				regs[i].id = ++env->id_gen;
16075 			}
16076 		}
16077 	} else {
16078 		/* 1st arg to a function */
16079 		regs[BPF_REG_1].type = PTR_TO_CTX;
16080 		mark_reg_known_zero(env, regs, BPF_REG_1);
16081 		ret = btf_check_subprog_arg_match(env, subprog, regs);
16082 		if (ret == -EFAULT)
16083 			/* unlikely verifier bug. abort.
16084 			 * ret == 0 and ret < 0 are sadly acceptable for
16085 			 * main() function due to backward compatibility.
16086 			 * Like socket filter program may be written as:
16087 			 * int bpf_prog(struct pt_regs *ctx)
16088 			 * and never dereference that ctx in the program.
16089 			 * 'struct pt_regs' is a type mismatch for socket
16090 			 * filter that should be using 'struct __sk_buff'.
16091 			 */
16092 			goto out;
16093 	}
16094 
16095 	ret = do_check(env);
16096 out:
16097 	/* check for NULL is necessary, since cur_state can be freed inside
16098 	 * do_check() under memory pressure.
16099 	 */
16100 	if (env->cur_state) {
16101 		free_verifier_state(env->cur_state, true);
16102 		env->cur_state = NULL;
16103 	}
16104 	while (!pop_stack(env, NULL, NULL, false));
16105 	if (!ret && pop_log)
16106 		bpf_vlog_reset(&env->log, 0);
16107 	free_states(env);
16108 	return ret;
16109 }
16110 
16111 /* Verify all global functions in a BPF program one by one based on their BTF.
16112  * All global functions must pass verification. Otherwise the whole program is rejected.
16113  * Consider:
16114  * int bar(int);
16115  * int foo(int f)
16116  * {
16117  *    return bar(f);
16118  * }
16119  * int bar(int b)
16120  * {
16121  *    ...
16122  * }
16123  * foo() will be verified first for R1=any_scalar_value. During verification it
16124  * will be assumed that bar() already verified successfully and call to bar()
16125  * from foo() will be checked for type match only. Later bar() will be verified
16126  * independently to check that it's safe for R1=any_scalar_value.
16127  */
16128 static int do_check_subprogs(struct bpf_verifier_env *env)
16129 {
16130 	struct bpf_prog_aux *aux = env->prog->aux;
16131 	int i, ret;
16132 
16133 	if (!aux->func_info)
16134 		return 0;
16135 
16136 	for (i = 1; i < env->subprog_cnt; i++) {
16137 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
16138 			continue;
16139 		env->insn_idx = env->subprog_info[i].start;
16140 		WARN_ON_ONCE(env->insn_idx == 0);
16141 		ret = do_check_common(env, i);
16142 		if (ret) {
16143 			return ret;
16144 		} else if (env->log.level & BPF_LOG_LEVEL) {
16145 			verbose(env,
16146 				"Func#%d is safe for any args that match its prototype\n",
16147 				i);
16148 		}
16149 	}
16150 	return 0;
16151 }
16152 
16153 static int do_check_main(struct bpf_verifier_env *env)
16154 {
16155 	int ret;
16156 
16157 	env->insn_idx = 0;
16158 	ret = do_check_common(env, 0);
16159 	if (!ret)
16160 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
16161 	return ret;
16162 }
16163 
16164 
16165 static void print_verification_stats(struct bpf_verifier_env *env)
16166 {
16167 	int i;
16168 
16169 	if (env->log.level & BPF_LOG_STATS) {
16170 		verbose(env, "verification time %lld usec\n",
16171 			div_u64(env->verification_time, 1000));
16172 		verbose(env, "stack depth ");
16173 		for (i = 0; i < env->subprog_cnt; i++) {
16174 			u32 depth = env->subprog_info[i].stack_depth;
16175 
16176 			verbose(env, "%d", depth);
16177 			if (i + 1 < env->subprog_cnt)
16178 				verbose(env, "+");
16179 		}
16180 		verbose(env, "\n");
16181 	}
16182 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
16183 		"total_states %d peak_states %d mark_read %d\n",
16184 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
16185 		env->max_states_per_insn, env->total_states,
16186 		env->peak_states, env->longest_mark_read_walk);
16187 }
16188 
16189 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
16190 {
16191 	const struct btf_type *t, *func_proto;
16192 	const struct bpf_struct_ops *st_ops;
16193 	const struct btf_member *member;
16194 	struct bpf_prog *prog = env->prog;
16195 	u32 btf_id, member_idx;
16196 	const char *mname;
16197 
16198 	if (!prog->gpl_compatible) {
16199 		verbose(env, "struct ops programs must have a GPL compatible license\n");
16200 		return -EINVAL;
16201 	}
16202 
16203 	btf_id = prog->aux->attach_btf_id;
16204 	st_ops = bpf_struct_ops_find(btf_id);
16205 	if (!st_ops) {
16206 		verbose(env, "attach_btf_id %u is not a supported struct\n",
16207 			btf_id);
16208 		return -ENOTSUPP;
16209 	}
16210 
16211 	t = st_ops->type;
16212 	member_idx = prog->expected_attach_type;
16213 	if (member_idx >= btf_type_vlen(t)) {
16214 		verbose(env, "attach to invalid member idx %u of struct %s\n",
16215 			member_idx, st_ops->name);
16216 		return -EINVAL;
16217 	}
16218 
16219 	member = &btf_type_member(t)[member_idx];
16220 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
16221 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
16222 					       NULL);
16223 	if (!func_proto) {
16224 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
16225 			mname, member_idx, st_ops->name);
16226 		return -EINVAL;
16227 	}
16228 
16229 	if (st_ops->check_member) {
16230 		int err = st_ops->check_member(t, member);
16231 
16232 		if (err) {
16233 			verbose(env, "attach to unsupported member %s of struct %s\n",
16234 				mname, st_ops->name);
16235 			return err;
16236 		}
16237 	}
16238 
16239 	prog->aux->attach_func_proto = func_proto;
16240 	prog->aux->attach_func_name = mname;
16241 	env->ops = st_ops->verifier_ops;
16242 
16243 	return 0;
16244 }
16245 #define SECURITY_PREFIX "security_"
16246 
16247 static int check_attach_modify_return(unsigned long addr, const char *func_name)
16248 {
16249 	if (within_error_injection_list(addr) ||
16250 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
16251 		return 0;
16252 
16253 	return -EINVAL;
16254 }
16255 
16256 /* list of non-sleepable functions that are otherwise on
16257  * ALLOW_ERROR_INJECTION list
16258  */
16259 BTF_SET_START(btf_non_sleepable_error_inject)
16260 /* Three functions below can be called from sleepable and non-sleepable context.
16261  * Assume non-sleepable from bpf safety point of view.
16262  */
16263 BTF_ID(func, __filemap_add_folio)
16264 BTF_ID(func, should_fail_alloc_page)
16265 BTF_ID(func, should_failslab)
16266 BTF_SET_END(btf_non_sleepable_error_inject)
16267 
16268 static int check_non_sleepable_error_inject(u32 btf_id)
16269 {
16270 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
16271 }
16272 
16273 int bpf_check_attach_target(struct bpf_verifier_log *log,
16274 			    const struct bpf_prog *prog,
16275 			    const struct bpf_prog *tgt_prog,
16276 			    u32 btf_id,
16277 			    struct bpf_attach_target_info *tgt_info)
16278 {
16279 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
16280 	const char prefix[] = "btf_trace_";
16281 	int ret = 0, subprog = -1, i;
16282 	const struct btf_type *t;
16283 	bool conservative = true;
16284 	const char *tname;
16285 	struct btf *btf;
16286 	long addr = 0;
16287 
16288 	if (!btf_id) {
16289 		bpf_log(log, "Tracing programs must provide btf_id\n");
16290 		return -EINVAL;
16291 	}
16292 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
16293 	if (!btf) {
16294 		bpf_log(log,
16295 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
16296 		return -EINVAL;
16297 	}
16298 	t = btf_type_by_id(btf, btf_id);
16299 	if (!t) {
16300 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
16301 		return -EINVAL;
16302 	}
16303 	tname = btf_name_by_offset(btf, t->name_off);
16304 	if (!tname) {
16305 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
16306 		return -EINVAL;
16307 	}
16308 	if (tgt_prog) {
16309 		struct bpf_prog_aux *aux = tgt_prog->aux;
16310 
16311 		for (i = 0; i < aux->func_info_cnt; i++)
16312 			if (aux->func_info[i].type_id == btf_id) {
16313 				subprog = i;
16314 				break;
16315 			}
16316 		if (subprog == -1) {
16317 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
16318 			return -EINVAL;
16319 		}
16320 		conservative = aux->func_info_aux[subprog].unreliable;
16321 		if (prog_extension) {
16322 			if (conservative) {
16323 				bpf_log(log,
16324 					"Cannot replace static functions\n");
16325 				return -EINVAL;
16326 			}
16327 			if (!prog->jit_requested) {
16328 				bpf_log(log,
16329 					"Extension programs should be JITed\n");
16330 				return -EINVAL;
16331 			}
16332 		}
16333 		if (!tgt_prog->jited) {
16334 			bpf_log(log, "Can attach to only JITed progs\n");
16335 			return -EINVAL;
16336 		}
16337 		if (tgt_prog->type == prog->type) {
16338 			/* Cannot fentry/fexit another fentry/fexit program.
16339 			 * Cannot attach program extension to another extension.
16340 			 * It's ok to attach fentry/fexit to extension program.
16341 			 */
16342 			bpf_log(log, "Cannot recursively attach\n");
16343 			return -EINVAL;
16344 		}
16345 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
16346 		    prog_extension &&
16347 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
16348 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
16349 			/* Program extensions can extend all program types
16350 			 * except fentry/fexit. The reason is the following.
16351 			 * The fentry/fexit programs are used for performance
16352 			 * analysis, stats and can be attached to any program
16353 			 * type except themselves. When extension program is
16354 			 * replacing XDP function it is necessary to allow
16355 			 * performance analysis of all functions. Both original
16356 			 * XDP program and its program extension. Hence
16357 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
16358 			 * allowed. If extending of fentry/fexit was allowed it
16359 			 * would be possible to create long call chain
16360 			 * fentry->extension->fentry->extension beyond
16361 			 * reasonable stack size. Hence extending fentry is not
16362 			 * allowed.
16363 			 */
16364 			bpf_log(log, "Cannot extend fentry/fexit\n");
16365 			return -EINVAL;
16366 		}
16367 	} else {
16368 		if (prog_extension) {
16369 			bpf_log(log, "Cannot replace kernel functions\n");
16370 			return -EINVAL;
16371 		}
16372 	}
16373 
16374 	switch (prog->expected_attach_type) {
16375 	case BPF_TRACE_RAW_TP:
16376 		if (tgt_prog) {
16377 			bpf_log(log,
16378 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
16379 			return -EINVAL;
16380 		}
16381 		if (!btf_type_is_typedef(t)) {
16382 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
16383 				btf_id);
16384 			return -EINVAL;
16385 		}
16386 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
16387 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
16388 				btf_id, tname);
16389 			return -EINVAL;
16390 		}
16391 		tname += sizeof(prefix) - 1;
16392 		t = btf_type_by_id(btf, t->type);
16393 		if (!btf_type_is_ptr(t))
16394 			/* should never happen in valid vmlinux build */
16395 			return -EINVAL;
16396 		t = btf_type_by_id(btf, t->type);
16397 		if (!btf_type_is_func_proto(t))
16398 			/* should never happen in valid vmlinux build */
16399 			return -EINVAL;
16400 
16401 		break;
16402 	case BPF_TRACE_ITER:
16403 		if (!btf_type_is_func(t)) {
16404 			bpf_log(log, "attach_btf_id %u is not a function\n",
16405 				btf_id);
16406 			return -EINVAL;
16407 		}
16408 		t = btf_type_by_id(btf, t->type);
16409 		if (!btf_type_is_func_proto(t))
16410 			return -EINVAL;
16411 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
16412 		if (ret)
16413 			return ret;
16414 		break;
16415 	default:
16416 		if (!prog_extension)
16417 			return -EINVAL;
16418 		fallthrough;
16419 	case BPF_MODIFY_RETURN:
16420 	case BPF_LSM_MAC:
16421 	case BPF_LSM_CGROUP:
16422 	case BPF_TRACE_FENTRY:
16423 	case BPF_TRACE_FEXIT:
16424 		if (!btf_type_is_func(t)) {
16425 			bpf_log(log, "attach_btf_id %u is not a function\n",
16426 				btf_id);
16427 			return -EINVAL;
16428 		}
16429 		if (prog_extension &&
16430 		    btf_check_type_match(log, prog, btf, t))
16431 			return -EINVAL;
16432 		t = btf_type_by_id(btf, t->type);
16433 		if (!btf_type_is_func_proto(t))
16434 			return -EINVAL;
16435 
16436 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
16437 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
16438 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
16439 			return -EINVAL;
16440 
16441 		if (tgt_prog && conservative)
16442 			t = NULL;
16443 
16444 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
16445 		if (ret < 0)
16446 			return ret;
16447 
16448 		if (tgt_prog) {
16449 			if (subprog == 0)
16450 				addr = (long) tgt_prog->bpf_func;
16451 			else
16452 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
16453 		} else {
16454 			addr = kallsyms_lookup_name(tname);
16455 			if (!addr) {
16456 				bpf_log(log,
16457 					"The address of function %s cannot be found\n",
16458 					tname);
16459 				return -ENOENT;
16460 			}
16461 		}
16462 
16463 		if (prog->aux->sleepable) {
16464 			ret = -EINVAL;
16465 			switch (prog->type) {
16466 			case BPF_PROG_TYPE_TRACING:
16467 				/* fentry/fexit/fmod_ret progs can be sleepable only if they are
16468 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
16469 				 */
16470 				if (!check_non_sleepable_error_inject(btf_id) &&
16471 				    within_error_injection_list(addr))
16472 					ret = 0;
16473 				break;
16474 			case BPF_PROG_TYPE_LSM:
16475 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
16476 				 * Only some of them are sleepable.
16477 				 */
16478 				if (bpf_lsm_is_sleepable_hook(btf_id))
16479 					ret = 0;
16480 				break;
16481 			default:
16482 				break;
16483 			}
16484 			if (ret) {
16485 				bpf_log(log, "%s is not sleepable\n", tname);
16486 				return ret;
16487 			}
16488 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
16489 			if (tgt_prog) {
16490 				bpf_log(log, "can't modify return codes of BPF programs\n");
16491 				return -EINVAL;
16492 			}
16493 			ret = check_attach_modify_return(addr, tname);
16494 			if (ret) {
16495 				bpf_log(log, "%s() is not modifiable\n", tname);
16496 				return ret;
16497 			}
16498 		}
16499 
16500 		break;
16501 	}
16502 	tgt_info->tgt_addr = addr;
16503 	tgt_info->tgt_name = tname;
16504 	tgt_info->tgt_type = t;
16505 	return 0;
16506 }
16507 
16508 BTF_SET_START(btf_id_deny)
16509 BTF_ID_UNUSED
16510 #ifdef CONFIG_SMP
16511 BTF_ID(func, migrate_disable)
16512 BTF_ID(func, migrate_enable)
16513 #endif
16514 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
16515 BTF_ID(func, rcu_read_unlock_strict)
16516 #endif
16517 BTF_SET_END(btf_id_deny)
16518 
16519 static int check_attach_btf_id(struct bpf_verifier_env *env)
16520 {
16521 	struct bpf_prog *prog = env->prog;
16522 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
16523 	struct bpf_attach_target_info tgt_info = {};
16524 	u32 btf_id = prog->aux->attach_btf_id;
16525 	struct bpf_trampoline *tr;
16526 	int ret;
16527 	u64 key;
16528 
16529 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
16530 		if (prog->aux->sleepable)
16531 			/* attach_btf_id checked to be zero already */
16532 			return 0;
16533 		verbose(env, "Syscall programs can only be sleepable\n");
16534 		return -EINVAL;
16535 	}
16536 
16537 	if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
16538 	    prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
16539 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
16540 		return -EINVAL;
16541 	}
16542 
16543 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
16544 		return check_struct_ops_btf_id(env);
16545 
16546 	if (prog->type != BPF_PROG_TYPE_TRACING &&
16547 	    prog->type != BPF_PROG_TYPE_LSM &&
16548 	    prog->type != BPF_PROG_TYPE_EXT)
16549 		return 0;
16550 
16551 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
16552 	if (ret)
16553 		return ret;
16554 
16555 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
16556 		/* to make freplace equivalent to their targets, they need to
16557 		 * inherit env->ops and expected_attach_type for the rest of the
16558 		 * verification
16559 		 */
16560 		env->ops = bpf_verifier_ops[tgt_prog->type];
16561 		prog->expected_attach_type = tgt_prog->expected_attach_type;
16562 	}
16563 
16564 	/* store info about the attachment target that will be used later */
16565 	prog->aux->attach_func_proto = tgt_info.tgt_type;
16566 	prog->aux->attach_func_name = tgt_info.tgt_name;
16567 
16568 	if (tgt_prog) {
16569 		prog->aux->saved_dst_prog_type = tgt_prog->type;
16570 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
16571 	}
16572 
16573 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
16574 		prog->aux->attach_btf_trace = true;
16575 		return 0;
16576 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
16577 		if (!bpf_iter_prog_supported(prog))
16578 			return -EINVAL;
16579 		return 0;
16580 	}
16581 
16582 	if (prog->type == BPF_PROG_TYPE_LSM) {
16583 		ret = bpf_lsm_verify_prog(&env->log, prog);
16584 		if (ret < 0)
16585 			return ret;
16586 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
16587 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
16588 		return -EINVAL;
16589 	}
16590 
16591 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
16592 	tr = bpf_trampoline_get(key, &tgt_info);
16593 	if (!tr)
16594 		return -ENOMEM;
16595 
16596 	prog->aux->dst_trampoline = tr;
16597 	return 0;
16598 }
16599 
16600 struct btf *bpf_get_btf_vmlinux(void)
16601 {
16602 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
16603 		mutex_lock(&bpf_verifier_lock);
16604 		if (!btf_vmlinux)
16605 			btf_vmlinux = btf_parse_vmlinux();
16606 		mutex_unlock(&bpf_verifier_lock);
16607 	}
16608 	return btf_vmlinux;
16609 }
16610 
16611 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
16612 {
16613 	u64 start_time = ktime_get_ns();
16614 	struct bpf_verifier_env *env;
16615 	struct bpf_verifier_log *log;
16616 	int i, len, ret = -EINVAL;
16617 	bool is_priv;
16618 
16619 	/* no program is valid */
16620 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
16621 		return -EINVAL;
16622 
16623 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
16624 	 * allocate/free it every time bpf_check() is called
16625 	 */
16626 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
16627 	if (!env)
16628 		return -ENOMEM;
16629 	log = &env->log;
16630 
16631 	len = (*prog)->len;
16632 	env->insn_aux_data =
16633 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
16634 	ret = -ENOMEM;
16635 	if (!env->insn_aux_data)
16636 		goto err_free_env;
16637 	for (i = 0; i < len; i++)
16638 		env->insn_aux_data[i].orig_idx = i;
16639 	env->prog = *prog;
16640 	env->ops = bpf_verifier_ops[env->prog->type];
16641 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
16642 	is_priv = bpf_capable();
16643 
16644 	bpf_get_btf_vmlinux();
16645 
16646 	/* grab the mutex to protect few globals used by verifier */
16647 	if (!is_priv)
16648 		mutex_lock(&bpf_verifier_lock);
16649 
16650 	if (attr->log_level || attr->log_buf || attr->log_size) {
16651 		/* user requested verbose verifier output
16652 		 * and supplied buffer to store the verification trace
16653 		 */
16654 		log->level = attr->log_level;
16655 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
16656 		log->len_total = attr->log_size;
16657 
16658 		/* log attributes have to be sane */
16659 		if (!bpf_verifier_log_attr_valid(log)) {
16660 			ret = -EINVAL;
16661 			goto err_unlock;
16662 		}
16663 	}
16664 
16665 	mark_verifier_state_clean(env);
16666 
16667 	if (IS_ERR(btf_vmlinux)) {
16668 		/* Either gcc or pahole or kernel are broken. */
16669 		verbose(env, "in-kernel BTF is malformed\n");
16670 		ret = PTR_ERR(btf_vmlinux);
16671 		goto skip_full_check;
16672 	}
16673 
16674 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
16675 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
16676 		env->strict_alignment = true;
16677 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
16678 		env->strict_alignment = false;
16679 
16680 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
16681 	env->allow_uninit_stack = bpf_allow_uninit_stack();
16682 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
16683 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
16684 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
16685 	env->bpf_capable = bpf_capable();
16686 	env->rcu_tag_supported = btf_vmlinux &&
16687 		btf_find_by_name_kind(btf_vmlinux, "rcu", BTF_KIND_TYPE_TAG) > 0;
16688 
16689 	if (is_priv)
16690 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
16691 
16692 	env->explored_states = kvcalloc(state_htab_size(env),
16693 				       sizeof(struct bpf_verifier_state_list *),
16694 				       GFP_USER);
16695 	ret = -ENOMEM;
16696 	if (!env->explored_states)
16697 		goto skip_full_check;
16698 
16699 	ret = add_subprog_and_kfunc(env);
16700 	if (ret < 0)
16701 		goto skip_full_check;
16702 
16703 	ret = check_subprogs(env);
16704 	if (ret < 0)
16705 		goto skip_full_check;
16706 
16707 	ret = check_btf_info(env, attr, uattr);
16708 	if (ret < 0)
16709 		goto skip_full_check;
16710 
16711 	ret = check_attach_btf_id(env);
16712 	if (ret)
16713 		goto skip_full_check;
16714 
16715 	ret = resolve_pseudo_ldimm64(env);
16716 	if (ret < 0)
16717 		goto skip_full_check;
16718 
16719 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
16720 		ret = bpf_prog_offload_verifier_prep(env->prog);
16721 		if (ret)
16722 			goto skip_full_check;
16723 	}
16724 
16725 	ret = check_cfg(env);
16726 	if (ret < 0)
16727 		goto skip_full_check;
16728 
16729 	ret = do_check_subprogs(env);
16730 	ret = ret ?: do_check_main(env);
16731 
16732 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
16733 		ret = bpf_prog_offload_finalize(env);
16734 
16735 skip_full_check:
16736 	kvfree(env->explored_states);
16737 
16738 	if (ret == 0)
16739 		ret = check_max_stack_depth(env);
16740 
16741 	/* instruction rewrites happen after this point */
16742 	if (ret == 0)
16743 		ret = optimize_bpf_loop(env);
16744 
16745 	if (is_priv) {
16746 		if (ret == 0)
16747 			opt_hard_wire_dead_code_branches(env);
16748 		if (ret == 0)
16749 			ret = opt_remove_dead_code(env);
16750 		if (ret == 0)
16751 			ret = opt_remove_nops(env);
16752 	} else {
16753 		if (ret == 0)
16754 			sanitize_dead_code(env);
16755 	}
16756 
16757 	if (ret == 0)
16758 		/* program is valid, convert *(u32*)(ctx + off) accesses */
16759 		ret = convert_ctx_accesses(env);
16760 
16761 	if (ret == 0)
16762 		ret = do_misc_fixups(env);
16763 
16764 	/* do 32-bit optimization after insn patching has done so those patched
16765 	 * insns could be handled correctly.
16766 	 */
16767 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
16768 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
16769 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
16770 								     : false;
16771 	}
16772 
16773 	if (ret == 0)
16774 		ret = fixup_call_args(env);
16775 
16776 	env->verification_time = ktime_get_ns() - start_time;
16777 	print_verification_stats(env);
16778 	env->prog->aux->verified_insns = env->insn_processed;
16779 
16780 	if (log->level && bpf_verifier_log_full(log))
16781 		ret = -ENOSPC;
16782 	if (log->level && !log->ubuf) {
16783 		ret = -EFAULT;
16784 		goto err_release_maps;
16785 	}
16786 
16787 	if (ret)
16788 		goto err_release_maps;
16789 
16790 	if (env->used_map_cnt) {
16791 		/* if program passed verifier, update used_maps in bpf_prog_info */
16792 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
16793 							  sizeof(env->used_maps[0]),
16794 							  GFP_KERNEL);
16795 
16796 		if (!env->prog->aux->used_maps) {
16797 			ret = -ENOMEM;
16798 			goto err_release_maps;
16799 		}
16800 
16801 		memcpy(env->prog->aux->used_maps, env->used_maps,
16802 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
16803 		env->prog->aux->used_map_cnt = env->used_map_cnt;
16804 	}
16805 	if (env->used_btf_cnt) {
16806 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
16807 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
16808 							  sizeof(env->used_btfs[0]),
16809 							  GFP_KERNEL);
16810 		if (!env->prog->aux->used_btfs) {
16811 			ret = -ENOMEM;
16812 			goto err_release_maps;
16813 		}
16814 
16815 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
16816 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
16817 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
16818 	}
16819 	if (env->used_map_cnt || env->used_btf_cnt) {
16820 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
16821 		 * bpf_ld_imm64 instructions
16822 		 */
16823 		convert_pseudo_ld_imm64(env);
16824 	}
16825 
16826 	adjust_btf_func(env);
16827 
16828 err_release_maps:
16829 	if (!env->prog->aux->used_maps)
16830 		/* if we didn't copy map pointers into bpf_prog_info, release
16831 		 * them now. Otherwise free_used_maps() will release them.
16832 		 */
16833 		release_maps(env);
16834 	if (!env->prog->aux->used_btfs)
16835 		release_btfs(env);
16836 
16837 	/* extension progs temporarily inherit the attach_type of their targets
16838 	   for verification purposes, so set it back to zero before returning
16839 	 */
16840 	if (env->prog->type == BPF_PROG_TYPE_EXT)
16841 		env->prog->expected_attach_type = 0;
16842 
16843 	*prog = env->prog;
16844 err_unlock:
16845 	if (!is_priv)
16846 		mutex_unlock(&bpf_verifier_lock);
16847 	vfree(env->insn_aux_data);
16848 err_free_env:
16849 	kfree(env);
16850 	return ret;
16851 }
16852