xref: /openbmc/linux/kernel/bpf/verifier.c (revision 9d5dbfe0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 
28 #include "disasm.h"
29 
30 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
31 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
32 	[_id] = & _name ## _verifier_ops,
33 #define BPF_MAP_TYPE(_id, _ops)
34 #define BPF_LINK_TYPE(_id, _name)
35 #include <linux/bpf_types.h>
36 #undef BPF_PROG_TYPE
37 #undef BPF_MAP_TYPE
38 #undef BPF_LINK_TYPE
39 };
40 
41 /* bpf_check() is a static code analyzer that walks eBPF program
42  * instruction by instruction and updates register/stack state.
43  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
44  *
45  * The first pass is depth-first-search to check that the program is a DAG.
46  * It rejects the following programs:
47  * - larger than BPF_MAXINSNS insns
48  * - if loop is present (detected via back-edge)
49  * - unreachable insns exist (shouldn't be a forest. program = one function)
50  * - out of bounds or malformed jumps
51  * The second pass is all possible path descent from the 1st insn.
52  * Since it's analyzing all paths through the program, the length of the
53  * analysis is limited to 64k insn, which may be hit even if total number of
54  * insn is less then 4K, but there are too many branches that change stack/regs.
55  * Number of 'branches to be analyzed' is limited to 1k
56  *
57  * On entry to each instruction, each register has a type, and the instruction
58  * changes the types of the registers depending on instruction semantics.
59  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60  * copied to R1.
61  *
62  * All registers are 64-bit.
63  * R0 - return register
64  * R1-R5 argument passing registers
65  * R6-R9 callee saved registers
66  * R10 - frame pointer read-only
67  *
68  * At the start of BPF program the register R1 contains a pointer to bpf_context
69  * and has type PTR_TO_CTX.
70  *
71  * Verifier tracks arithmetic operations on pointers in case:
72  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
74  * 1st insn copies R10 (which has FRAME_PTR) type into R1
75  * and 2nd arithmetic instruction is pattern matched to recognize
76  * that it wants to construct a pointer to some element within stack.
77  * So after 2nd insn, the register R1 has type PTR_TO_STACK
78  * (and -20 constant is saved for further stack bounds checking).
79  * Meaning that this reg is a pointer to stack plus known immediate constant.
80  *
81  * Most of the time the registers have SCALAR_VALUE type, which
82  * means the register has some value, but it's not a valid pointer.
83  * (like pointer plus pointer becomes SCALAR_VALUE type)
84  *
85  * When verifier sees load or store instructions the type of base register
86  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
87  * four pointer types recognized by check_mem_access() function.
88  *
89  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
90  * and the range of [ptr, ptr + map's value_size) is accessible.
91  *
92  * registers used to pass values to function calls are checked against
93  * function argument constraints.
94  *
95  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
96  * It means that the register type passed to this function must be
97  * PTR_TO_STACK and it will be used inside the function as
98  * 'pointer to map element key'
99  *
100  * For example the argument constraints for bpf_map_lookup_elem():
101  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
102  *   .arg1_type = ARG_CONST_MAP_PTR,
103  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
104  *
105  * ret_type says that this function returns 'pointer to map elem value or null'
106  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
107  * 2nd argument should be a pointer to stack, which will be used inside
108  * the helper function as a pointer to map element key.
109  *
110  * On the kernel side the helper function looks like:
111  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
112  * {
113  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114  *    void *key = (void *) (unsigned long) r2;
115  *    void *value;
116  *
117  *    here kernel can access 'key' and 'map' pointers safely, knowing that
118  *    [key, key + map->key_size) bytes are valid and were initialized on
119  *    the stack of eBPF program.
120  * }
121  *
122  * Corresponding eBPF program may look like:
123  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
124  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
125  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
126  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
127  * here verifier looks at prototype of map_lookup_elem() and sees:
128  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
129  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
130  *
131  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
132  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
133  * and were initialized prior to this call.
134  * If it's ok, then verifier allows this BPF_CALL insn and looks at
135  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
136  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
137  * returns either pointer to map value or NULL.
138  *
139  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
140  * insn, the register holding that pointer in the true branch changes state to
141  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
142  * branch. See check_cond_jmp_op().
143  *
144  * After the call R0 is set to return type of the function and registers R1-R5
145  * are set to NOT_INIT to indicate that they are no longer readable.
146  *
147  * The following reference types represent a potential reference to a kernel
148  * resource which, after first being allocated, must be checked and freed by
149  * the BPF program:
150  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
151  *
152  * When the verifier sees a helper call return a reference type, it allocates a
153  * pointer id for the reference and stores it in the current function state.
154  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
155  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
156  * passes through a NULL-check conditional. For the branch wherein the state is
157  * changed to CONST_IMM, the verifier releases the reference.
158  *
159  * For each helper function that allocates a reference, such as
160  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
161  * bpf_sk_release(). When a reference type passes into the release function,
162  * the verifier also releases the reference. If any unchecked or unreleased
163  * reference remains at the end of the program, the verifier rejects it.
164  */
165 
166 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
167 struct bpf_verifier_stack_elem {
168 	/* verifer state is 'st'
169 	 * before processing instruction 'insn_idx'
170 	 * and after processing instruction 'prev_insn_idx'
171 	 */
172 	struct bpf_verifier_state st;
173 	int insn_idx;
174 	int prev_insn_idx;
175 	struct bpf_verifier_stack_elem *next;
176 	/* length of verifier log at the time this state was pushed on stack */
177 	u32 log_pos;
178 };
179 
180 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_KEY_POISON	(1ULL << 63)
184 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
185 
186 #define BPF_MAP_PTR_UNPRIV	1UL
187 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
188 					  POISON_POINTER_DELTA))
189 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
190 
191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
192 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
193 
194 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
195 {
196 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
197 }
198 
199 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
200 {
201 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
202 }
203 
204 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
205 			      const struct bpf_map *map, bool unpriv)
206 {
207 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
208 	unpriv |= bpf_map_ptr_unpriv(aux);
209 	aux->map_ptr_state = (unsigned long)map |
210 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
211 }
212 
213 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
214 {
215 	return aux->map_key_state & BPF_MAP_KEY_POISON;
216 }
217 
218 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
219 {
220 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
221 }
222 
223 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
224 {
225 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
226 }
227 
228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
229 {
230 	bool poisoned = bpf_map_key_poisoned(aux);
231 
232 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
233 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
234 }
235 
236 static bool bpf_pseudo_call(const struct bpf_insn *insn)
237 {
238 	return insn->code == (BPF_JMP | BPF_CALL) &&
239 	       insn->src_reg == BPF_PSEUDO_CALL;
240 }
241 
242 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
243 {
244 	return insn->code == (BPF_JMP | BPF_CALL) &&
245 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
246 }
247 
248 struct bpf_call_arg_meta {
249 	struct bpf_map *map_ptr;
250 	bool raw_mode;
251 	bool pkt_access;
252 	u8 release_regno;
253 	int regno;
254 	int access_size;
255 	int mem_size;
256 	u64 msize_max_value;
257 	int ref_obj_id;
258 	int dynptr_id;
259 	int map_uid;
260 	int func_id;
261 	struct btf *btf;
262 	u32 btf_id;
263 	struct btf *ret_btf;
264 	u32 ret_btf_id;
265 	u32 subprogno;
266 	struct btf_field *kptr_field;
267 	u8 uninit_dynptr_regno;
268 };
269 
270 struct btf *btf_vmlinux;
271 
272 static DEFINE_MUTEX(bpf_verifier_lock);
273 
274 static const struct bpf_line_info *
275 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
276 {
277 	const struct bpf_line_info *linfo;
278 	const struct bpf_prog *prog;
279 	u32 i, nr_linfo;
280 
281 	prog = env->prog;
282 	nr_linfo = prog->aux->nr_linfo;
283 
284 	if (!nr_linfo || insn_off >= prog->len)
285 		return NULL;
286 
287 	linfo = prog->aux->linfo;
288 	for (i = 1; i < nr_linfo; i++)
289 		if (insn_off < linfo[i].insn_off)
290 			break;
291 
292 	return &linfo[i - 1];
293 }
294 
295 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
296 		       va_list args)
297 {
298 	unsigned int n;
299 
300 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
301 
302 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
303 		  "verifier log line truncated - local buffer too short\n");
304 
305 	if (log->level == BPF_LOG_KERNEL) {
306 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
307 
308 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
309 		return;
310 	}
311 
312 	n = min(log->len_total - log->len_used - 1, n);
313 	log->kbuf[n] = '\0';
314 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
315 		log->len_used += n;
316 	else
317 		log->ubuf = NULL;
318 }
319 
320 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
321 {
322 	char zero = 0;
323 
324 	if (!bpf_verifier_log_needed(log))
325 		return;
326 
327 	log->len_used = new_pos;
328 	if (put_user(zero, log->ubuf + new_pos))
329 		log->ubuf = NULL;
330 }
331 
332 /* log_level controls verbosity level of eBPF verifier.
333  * bpf_verifier_log_write() is used to dump the verification trace to the log,
334  * so the user can figure out what's wrong with the program
335  */
336 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
337 					   const char *fmt, ...)
338 {
339 	va_list args;
340 
341 	if (!bpf_verifier_log_needed(&env->log))
342 		return;
343 
344 	va_start(args, fmt);
345 	bpf_verifier_vlog(&env->log, fmt, args);
346 	va_end(args);
347 }
348 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
349 
350 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
351 {
352 	struct bpf_verifier_env *env = private_data;
353 	va_list args;
354 
355 	if (!bpf_verifier_log_needed(&env->log))
356 		return;
357 
358 	va_start(args, fmt);
359 	bpf_verifier_vlog(&env->log, fmt, args);
360 	va_end(args);
361 }
362 
363 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
364 			    const char *fmt, ...)
365 {
366 	va_list args;
367 
368 	if (!bpf_verifier_log_needed(log))
369 		return;
370 
371 	va_start(args, fmt);
372 	bpf_verifier_vlog(log, fmt, args);
373 	va_end(args);
374 }
375 EXPORT_SYMBOL_GPL(bpf_log);
376 
377 static const char *ltrim(const char *s)
378 {
379 	while (isspace(*s))
380 		s++;
381 
382 	return s;
383 }
384 
385 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
386 					 u32 insn_off,
387 					 const char *prefix_fmt, ...)
388 {
389 	const struct bpf_line_info *linfo;
390 
391 	if (!bpf_verifier_log_needed(&env->log))
392 		return;
393 
394 	linfo = find_linfo(env, insn_off);
395 	if (!linfo || linfo == env->prev_linfo)
396 		return;
397 
398 	if (prefix_fmt) {
399 		va_list args;
400 
401 		va_start(args, prefix_fmt);
402 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
403 		va_end(args);
404 	}
405 
406 	verbose(env, "%s\n",
407 		ltrim(btf_name_by_offset(env->prog->aux->btf,
408 					 linfo->line_off)));
409 
410 	env->prev_linfo = linfo;
411 }
412 
413 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
414 				   struct bpf_reg_state *reg,
415 				   struct tnum *range, const char *ctx,
416 				   const char *reg_name)
417 {
418 	char tn_buf[48];
419 
420 	verbose(env, "At %s the register %s ", ctx, reg_name);
421 	if (!tnum_is_unknown(reg->var_off)) {
422 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
423 		verbose(env, "has value %s", tn_buf);
424 	} else {
425 		verbose(env, "has unknown scalar value");
426 	}
427 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
428 	verbose(env, " should have been in %s\n", tn_buf);
429 }
430 
431 static bool type_is_pkt_pointer(enum bpf_reg_type type)
432 {
433 	type = base_type(type);
434 	return type == PTR_TO_PACKET ||
435 	       type == PTR_TO_PACKET_META;
436 }
437 
438 static bool type_is_sk_pointer(enum bpf_reg_type type)
439 {
440 	return type == PTR_TO_SOCKET ||
441 		type == PTR_TO_SOCK_COMMON ||
442 		type == PTR_TO_TCP_SOCK ||
443 		type == PTR_TO_XDP_SOCK;
444 }
445 
446 static bool reg_type_not_null(enum bpf_reg_type type)
447 {
448 	return type == PTR_TO_SOCKET ||
449 		type == PTR_TO_TCP_SOCK ||
450 		type == PTR_TO_MAP_VALUE ||
451 		type == PTR_TO_MAP_KEY ||
452 		type == PTR_TO_SOCK_COMMON;
453 }
454 
455 static bool type_is_ptr_alloc_obj(u32 type)
456 {
457 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
458 }
459 
460 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
461 {
462 	struct btf_record *rec = NULL;
463 	struct btf_struct_meta *meta;
464 
465 	if (reg->type == PTR_TO_MAP_VALUE) {
466 		rec = reg->map_ptr->record;
467 	} else if (type_is_ptr_alloc_obj(reg->type)) {
468 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
469 		if (meta)
470 			rec = meta->record;
471 	}
472 	return rec;
473 }
474 
475 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
476 {
477 	return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
478 }
479 
480 static bool type_is_rdonly_mem(u32 type)
481 {
482 	return type & MEM_RDONLY;
483 }
484 
485 static bool type_may_be_null(u32 type)
486 {
487 	return type & PTR_MAYBE_NULL;
488 }
489 
490 static bool is_acquire_function(enum bpf_func_id func_id,
491 				const struct bpf_map *map)
492 {
493 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
494 
495 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
496 	    func_id == BPF_FUNC_sk_lookup_udp ||
497 	    func_id == BPF_FUNC_skc_lookup_tcp ||
498 	    func_id == BPF_FUNC_ringbuf_reserve ||
499 	    func_id == BPF_FUNC_kptr_xchg)
500 		return true;
501 
502 	if (func_id == BPF_FUNC_map_lookup_elem &&
503 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
504 	     map_type == BPF_MAP_TYPE_SOCKHASH))
505 		return true;
506 
507 	return false;
508 }
509 
510 static bool is_ptr_cast_function(enum bpf_func_id func_id)
511 {
512 	return func_id == BPF_FUNC_tcp_sock ||
513 		func_id == BPF_FUNC_sk_fullsock ||
514 		func_id == BPF_FUNC_skc_to_tcp_sock ||
515 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
516 		func_id == BPF_FUNC_skc_to_udp6_sock ||
517 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
518 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
519 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
520 }
521 
522 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
523 {
524 	return func_id == BPF_FUNC_dynptr_data;
525 }
526 
527 static bool is_callback_calling_function(enum bpf_func_id func_id)
528 {
529 	return func_id == BPF_FUNC_for_each_map_elem ||
530 	       func_id == BPF_FUNC_timer_set_callback ||
531 	       func_id == BPF_FUNC_find_vma ||
532 	       func_id == BPF_FUNC_loop ||
533 	       func_id == BPF_FUNC_user_ringbuf_drain;
534 }
535 
536 static bool is_storage_get_function(enum bpf_func_id func_id)
537 {
538 	return func_id == BPF_FUNC_sk_storage_get ||
539 	       func_id == BPF_FUNC_inode_storage_get ||
540 	       func_id == BPF_FUNC_task_storage_get ||
541 	       func_id == BPF_FUNC_cgrp_storage_get;
542 }
543 
544 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
545 					const struct bpf_map *map)
546 {
547 	int ref_obj_uses = 0;
548 
549 	if (is_ptr_cast_function(func_id))
550 		ref_obj_uses++;
551 	if (is_acquire_function(func_id, map))
552 		ref_obj_uses++;
553 	if (is_dynptr_ref_function(func_id))
554 		ref_obj_uses++;
555 
556 	return ref_obj_uses > 1;
557 }
558 
559 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
560 {
561 	return BPF_CLASS(insn->code) == BPF_STX &&
562 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
563 	       insn->imm == BPF_CMPXCHG;
564 }
565 
566 /* string representation of 'enum bpf_reg_type'
567  *
568  * Note that reg_type_str() can not appear more than once in a single verbose()
569  * statement.
570  */
571 static const char *reg_type_str(struct bpf_verifier_env *env,
572 				enum bpf_reg_type type)
573 {
574 	char postfix[16] = {0}, prefix[64] = {0};
575 	static const char * const str[] = {
576 		[NOT_INIT]		= "?",
577 		[SCALAR_VALUE]		= "scalar",
578 		[PTR_TO_CTX]		= "ctx",
579 		[CONST_PTR_TO_MAP]	= "map_ptr",
580 		[PTR_TO_MAP_VALUE]	= "map_value",
581 		[PTR_TO_STACK]		= "fp",
582 		[PTR_TO_PACKET]		= "pkt",
583 		[PTR_TO_PACKET_META]	= "pkt_meta",
584 		[PTR_TO_PACKET_END]	= "pkt_end",
585 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
586 		[PTR_TO_SOCKET]		= "sock",
587 		[PTR_TO_SOCK_COMMON]	= "sock_common",
588 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
589 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
590 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
591 		[PTR_TO_BTF_ID]		= "ptr_",
592 		[PTR_TO_MEM]		= "mem",
593 		[PTR_TO_BUF]		= "buf",
594 		[PTR_TO_FUNC]		= "func",
595 		[PTR_TO_MAP_KEY]	= "map_key",
596 		[CONST_PTR_TO_DYNPTR]	= "dynptr_ptr",
597 	};
598 
599 	if (type & PTR_MAYBE_NULL) {
600 		if (base_type(type) == PTR_TO_BTF_ID)
601 			strncpy(postfix, "or_null_", 16);
602 		else
603 			strncpy(postfix, "_or_null", 16);
604 	}
605 
606 	snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
607 		 type & MEM_RDONLY ? "rdonly_" : "",
608 		 type & MEM_RINGBUF ? "ringbuf_" : "",
609 		 type & MEM_USER ? "user_" : "",
610 		 type & MEM_PERCPU ? "percpu_" : "",
611 		 type & MEM_RCU ? "rcu_" : "",
612 		 type & PTR_UNTRUSTED ? "untrusted_" : "",
613 		 type & PTR_TRUSTED ? "trusted_" : ""
614 	);
615 
616 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
617 		 prefix, str[base_type(type)], postfix);
618 	return env->type_str_buf;
619 }
620 
621 static char slot_type_char[] = {
622 	[STACK_INVALID]	= '?',
623 	[STACK_SPILL]	= 'r',
624 	[STACK_MISC]	= 'm',
625 	[STACK_ZERO]	= '0',
626 	[STACK_DYNPTR]	= 'd',
627 };
628 
629 static void print_liveness(struct bpf_verifier_env *env,
630 			   enum bpf_reg_liveness live)
631 {
632 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
633 	    verbose(env, "_");
634 	if (live & REG_LIVE_READ)
635 		verbose(env, "r");
636 	if (live & REG_LIVE_WRITTEN)
637 		verbose(env, "w");
638 	if (live & REG_LIVE_DONE)
639 		verbose(env, "D");
640 }
641 
642 static int __get_spi(s32 off)
643 {
644 	return (-off - 1) / BPF_REG_SIZE;
645 }
646 
647 static struct bpf_func_state *func(struct bpf_verifier_env *env,
648 				   const struct bpf_reg_state *reg)
649 {
650 	struct bpf_verifier_state *cur = env->cur_state;
651 
652 	return cur->frame[reg->frameno];
653 }
654 
655 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
656 {
657        int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
658 
659        /* We need to check that slots between [spi - nr_slots + 1, spi] are
660 	* within [0, allocated_stack).
661 	*
662 	* Please note that the spi grows downwards. For example, a dynptr
663 	* takes the size of two stack slots; the first slot will be at
664 	* spi and the second slot will be at spi - 1.
665 	*/
666        return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
667 }
668 
669 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
670 {
671 	int off, spi;
672 
673 	if (!tnum_is_const(reg->var_off)) {
674 		verbose(env, "dynptr has to be at a constant offset\n");
675 		return -EINVAL;
676 	}
677 
678 	off = reg->off + reg->var_off.value;
679 	if (off % BPF_REG_SIZE) {
680 		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
681 		return -EINVAL;
682 	}
683 
684 	spi = __get_spi(off);
685 	if (spi < 1) {
686 		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
687 		return -EINVAL;
688 	}
689 
690 	if (!is_spi_bounds_valid(func(env, reg), spi, BPF_DYNPTR_NR_SLOTS))
691 		return -ERANGE;
692 	return spi;
693 }
694 
695 static const char *kernel_type_name(const struct btf* btf, u32 id)
696 {
697 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
698 }
699 
700 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
701 {
702 	env->scratched_regs |= 1U << regno;
703 }
704 
705 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
706 {
707 	env->scratched_stack_slots |= 1ULL << spi;
708 }
709 
710 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
711 {
712 	return (env->scratched_regs >> regno) & 1;
713 }
714 
715 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
716 {
717 	return (env->scratched_stack_slots >> regno) & 1;
718 }
719 
720 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
721 {
722 	return env->scratched_regs || env->scratched_stack_slots;
723 }
724 
725 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
726 {
727 	env->scratched_regs = 0U;
728 	env->scratched_stack_slots = 0ULL;
729 }
730 
731 /* Used for printing the entire verifier state. */
732 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
733 {
734 	env->scratched_regs = ~0U;
735 	env->scratched_stack_slots = ~0ULL;
736 }
737 
738 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
739 {
740 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
741 	case DYNPTR_TYPE_LOCAL:
742 		return BPF_DYNPTR_TYPE_LOCAL;
743 	case DYNPTR_TYPE_RINGBUF:
744 		return BPF_DYNPTR_TYPE_RINGBUF;
745 	default:
746 		return BPF_DYNPTR_TYPE_INVALID;
747 	}
748 }
749 
750 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
751 {
752 	return type == BPF_DYNPTR_TYPE_RINGBUF;
753 }
754 
755 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
756 			      enum bpf_dynptr_type type,
757 			      bool first_slot, int dynptr_id);
758 
759 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
760 				struct bpf_reg_state *reg);
761 
762 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
763 				   struct bpf_reg_state *sreg1,
764 				   struct bpf_reg_state *sreg2,
765 				   enum bpf_dynptr_type type)
766 {
767 	int id = ++env->id_gen;
768 
769 	__mark_dynptr_reg(sreg1, type, true, id);
770 	__mark_dynptr_reg(sreg2, type, false, id);
771 }
772 
773 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
774 			       struct bpf_reg_state *reg,
775 			       enum bpf_dynptr_type type)
776 {
777 	__mark_dynptr_reg(reg, type, true, ++env->id_gen);
778 }
779 
780 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
781 				        struct bpf_func_state *state, int spi);
782 
783 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
784 				   enum bpf_arg_type arg_type, int insn_idx)
785 {
786 	struct bpf_func_state *state = func(env, reg);
787 	enum bpf_dynptr_type type;
788 	int spi, i, id, err;
789 
790 	spi = dynptr_get_spi(env, reg);
791 	if (spi < 0)
792 		return spi;
793 
794 	/* We cannot assume both spi and spi - 1 belong to the same dynptr,
795 	 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
796 	 * to ensure that for the following example:
797 	 *	[d1][d1][d2][d2]
798 	 * spi    3   2   1   0
799 	 * So marking spi = 2 should lead to destruction of both d1 and d2. In
800 	 * case they do belong to same dynptr, second call won't see slot_type
801 	 * as STACK_DYNPTR and will simply skip destruction.
802 	 */
803 	err = destroy_if_dynptr_stack_slot(env, state, spi);
804 	if (err)
805 		return err;
806 	err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
807 	if (err)
808 		return err;
809 
810 	for (i = 0; i < BPF_REG_SIZE; i++) {
811 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
812 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
813 	}
814 
815 	type = arg_to_dynptr_type(arg_type);
816 	if (type == BPF_DYNPTR_TYPE_INVALID)
817 		return -EINVAL;
818 
819 	mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
820 			       &state->stack[spi - 1].spilled_ptr, type);
821 
822 	if (dynptr_type_refcounted(type)) {
823 		/* The id is used to track proper releasing */
824 		id = acquire_reference_state(env, insn_idx);
825 		if (id < 0)
826 			return id;
827 
828 		state->stack[spi].spilled_ptr.ref_obj_id = id;
829 		state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
830 	}
831 
832 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
833 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
834 
835 	return 0;
836 }
837 
838 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
839 {
840 	struct bpf_func_state *state = func(env, reg);
841 	int spi, i;
842 
843 	spi = dynptr_get_spi(env, reg);
844 	if (spi < 0)
845 		return spi;
846 
847 	for (i = 0; i < BPF_REG_SIZE; i++) {
848 		state->stack[spi].slot_type[i] = STACK_INVALID;
849 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
850 	}
851 
852 	/* Invalidate any slices associated with this dynptr */
853 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type))
854 		WARN_ON_ONCE(release_reference(env, state->stack[spi].spilled_ptr.ref_obj_id));
855 
856 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
857 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
858 
859 	/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
860 	 *
861 	 * While we don't allow reading STACK_INVALID, it is still possible to
862 	 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
863 	 * helpers or insns can do partial read of that part without failing,
864 	 * but check_stack_range_initialized, check_stack_read_var_off, and
865 	 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
866 	 * the slot conservatively. Hence we need to prevent those liveness
867 	 * marking walks.
868 	 *
869 	 * This was not a problem before because STACK_INVALID is only set by
870 	 * default (where the default reg state has its reg->parent as NULL), or
871 	 * in clean_live_states after REG_LIVE_DONE (at which point
872 	 * mark_reg_read won't walk reg->parent chain), but not randomly during
873 	 * verifier state exploration (like we did above). Hence, for our case
874 	 * parentage chain will still be live (i.e. reg->parent may be
875 	 * non-NULL), while earlier reg->parent was NULL, so we need
876 	 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
877 	 * done later on reads or by mark_dynptr_read as well to unnecessary
878 	 * mark registers in verifier state.
879 	 */
880 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
881 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
882 
883 	return 0;
884 }
885 
886 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
887 			       struct bpf_reg_state *reg);
888 
889 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
890 				        struct bpf_func_state *state, int spi)
891 {
892 	struct bpf_func_state *fstate;
893 	struct bpf_reg_state *dreg;
894 	int i, dynptr_id;
895 
896 	/* We always ensure that STACK_DYNPTR is never set partially,
897 	 * hence just checking for slot_type[0] is enough. This is
898 	 * different for STACK_SPILL, where it may be only set for
899 	 * 1 byte, so code has to use is_spilled_reg.
900 	 */
901 	if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
902 		return 0;
903 
904 	/* Reposition spi to first slot */
905 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
906 		spi = spi + 1;
907 
908 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
909 		verbose(env, "cannot overwrite referenced dynptr\n");
910 		return -EINVAL;
911 	}
912 
913 	mark_stack_slot_scratched(env, spi);
914 	mark_stack_slot_scratched(env, spi - 1);
915 
916 	/* Writing partially to one dynptr stack slot destroys both. */
917 	for (i = 0; i < BPF_REG_SIZE; i++) {
918 		state->stack[spi].slot_type[i] = STACK_INVALID;
919 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
920 	}
921 
922 	dynptr_id = state->stack[spi].spilled_ptr.id;
923 	/* Invalidate any slices associated with this dynptr */
924 	bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
925 		/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
926 		if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
927 			continue;
928 		if (dreg->dynptr_id == dynptr_id) {
929 			if (!env->allow_ptr_leaks)
930 				__mark_reg_not_init(env, dreg);
931 			else
932 				__mark_reg_unknown(env, dreg);
933 		}
934 	}));
935 
936 	/* Do not release reference state, we are destroying dynptr on stack,
937 	 * not using some helper to release it. Just reset register.
938 	 */
939 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
940 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
941 
942 	/* Same reason as unmark_stack_slots_dynptr above */
943 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
944 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
945 
946 	return 0;
947 }
948 
949 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
950 				       int spi)
951 {
952 	if (reg->type == CONST_PTR_TO_DYNPTR)
953 		return false;
954 
955 	/* For -ERANGE (i.e. spi not falling into allocated stack slots), we
956 	 * will do check_mem_access to check and update stack bounds later, so
957 	 * return true for that case.
958 	 */
959 	if (spi < 0)
960 		return spi == -ERANGE;
961 	/* We allow overwriting existing unreferenced STACK_DYNPTR slots, see
962 	 * mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to
963 	 * ensure dynptr objects at the slots we are touching are completely
964 	 * destructed before we reinitialize them for a new one. For referenced
965 	 * ones, destroy_if_dynptr_stack_slot returns an error early instead of
966 	 * delaying it until the end where the user will get "Unreleased
967 	 * reference" error.
968 	 */
969 	return true;
970 }
971 
972 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
973 				     int spi)
974 {
975 	struct bpf_func_state *state = func(env, reg);
976 	int i;
977 
978 	/* This already represents first slot of initialized bpf_dynptr */
979 	if (reg->type == CONST_PTR_TO_DYNPTR)
980 		return true;
981 
982 	if (spi < 0)
983 		return false;
984 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
985 		return false;
986 
987 	for (i = 0; i < BPF_REG_SIZE; i++) {
988 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
989 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
990 			return false;
991 	}
992 
993 	return true;
994 }
995 
996 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
997 				    enum bpf_arg_type arg_type)
998 {
999 	struct bpf_func_state *state = func(env, reg);
1000 	enum bpf_dynptr_type dynptr_type;
1001 	int spi;
1002 
1003 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1004 	if (arg_type == ARG_PTR_TO_DYNPTR)
1005 		return true;
1006 
1007 	dynptr_type = arg_to_dynptr_type(arg_type);
1008 	if (reg->type == CONST_PTR_TO_DYNPTR) {
1009 		return reg->dynptr.type == dynptr_type;
1010 	} else {
1011 		spi = dynptr_get_spi(env, reg);
1012 		if (spi < 0)
1013 			return false;
1014 		return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
1015 	}
1016 }
1017 
1018 /* The reg state of a pointer or a bounded scalar was saved when
1019  * it was spilled to the stack.
1020  */
1021 static bool is_spilled_reg(const struct bpf_stack_state *stack)
1022 {
1023 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1024 }
1025 
1026 static void scrub_spilled_slot(u8 *stype)
1027 {
1028 	if (*stype != STACK_INVALID)
1029 		*stype = STACK_MISC;
1030 }
1031 
1032 static void print_verifier_state(struct bpf_verifier_env *env,
1033 				 const struct bpf_func_state *state,
1034 				 bool print_all)
1035 {
1036 	const struct bpf_reg_state *reg;
1037 	enum bpf_reg_type t;
1038 	int i;
1039 
1040 	if (state->frameno)
1041 		verbose(env, " frame%d:", state->frameno);
1042 	for (i = 0; i < MAX_BPF_REG; i++) {
1043 		reg = &state->regs[i];
1044 		t = reg->type;
1045 		if (t == NOT_INIT)
1046 			continue;
1047 		if (!print_all && !reg_scratched(env, i))
1048 			continue;
1049 		verbose(env, " R%d", i);
1050 		print_liveness(env, reg->live);
1051 		verbose(env, "=");
1052 		if (t == SCALAR_VALUE && reg->precise)
1053 			verbose(env, "P");
1054 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
1055 		    tnum_is_const(reg->var_off)) {
1056 			/* reg->off should be 0 for SCALAR_VALUE */
1057 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1058 			verbose(env, "%lld", reg->var_off.value + reg->off);
1059 		} else {
1060 			const char *sep = "";
1061 
1062 			verbose(env, "%s", reg_type_str(env, t));
1063 			if (base_type(t) == PTR_TO_BTF_ID)
1064 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
1065 			verbose(env, "(");
1066 /*
1067  * _a stands for append, was shortened to avoid multiline statements below.
1068  * This macro is used to output a comma separated list of attributes.
1069  */
1070 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
1071 
1072 			if (reg->id)
1073 				verbose_a("id=%d", reg->id);
1074 			if (reg->ref_obj_id)
1075 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
1076 			if (t != SCALAR_VALUE)
1077 				verbose_a("off=%d", reg->off);
1078 			if (type_is_pkt_pointer(t))
1079 				verbose_a("r=%d", reg->range);
1080 			else if (base_type(t) == CONST_PTR_TO_MAP ||
1081 				 base_type(t) == PTR_TO_MAP_KEY ||
1082 				 base_type(t) == PTR_TO_MAP_VALUE)
1083 				verbose_a("ks=%d,vs=%d",
1084 					  reg->map_ptr->key_size,
1085 					  reg->map_ptr->value_size);
1086 			if (tnum_is_const(reg->var_off)) {
1087 				/* Typically an immediate SCALAR_VALUE, but
1088 				 * could be a pointer whose offset is too big
1089 				 * for reg->off
1090 				 */
1091 				verbose_a("imm=%llx", reg->var_off.value);
1092 			} else {
1093 				if (reg->smin_value != reg->umin_value &&
1094 				    reg->smin_value != S64_MIN)
1095 					verbose_a("smin=%lld", (long long)reg->smin_value);
1096 				if (reg->smax_value != reg->umax_value &&
1097 				    reg->smax_value != S64_MAX)
1098 					verbose_a("smax=%lld", (long long)reg->smax_value);
1099 				if (reg->umin_value != 0)
1100 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
1101 				if (reg->umax_value != U64_MAX)
1102 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
1103 				if (!tnum_is_unknown(reg->var_off)) {
1104 					char tn_buf[48];
1105 
1106 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1107 					verbose_a("var_off=%s", tn_buf);
1108 				}
1109 				if (reg->s32_min_value != reg->smin_value &&
1110 				    reg->s32_min_value != S32_MIN)
1111 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
1112 				if (reg->s32_max_value != reg->smax_value &&
1113 				    reg->s32_max_value != S32_MAX)
1114 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
1115 				if (reg->u32_min_value != reg->umin_value &&
1116 				    reg->u32_min_value != U32_MIN)
1117 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
1118 				if (reg->u32_max_value != reg->umax_value &&
1119 				    reg->u32_max_value != U32_MAX)
1120 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
1121 			}
1122 #undef verbose_a
1123 
1124 			verbose(env, ")");
1125 		}
1126 	}
1127 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1128 		char types_buf[BPF_REG_SIZE + 1];
1129 		bool valid = false;
1130 		int j;
1131 
1132 		for (j = 0; j < BPF_REG_SIZE; j++) {
1133 			if (state->stack[i].slot_type[j] != STACK_INVALID)
1134 				valid = true;
1135 			types_buf[j] = slot_type_char[
1136 					state->stack[i].slot_type[j]];
1137 		}
1138 		types_buf[BPF_REG_SIZE] = 0;
1139 		if (!valid)
1140 			continue;
1141 		if (!print_all && !stack_slot_scratched(env, i))
1142 			continue;
1143 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1144 		print_liveness(env, state->stack[i].spilled_ptr.live);
1145 		if (is_spilled_reg(&state->stack[i])) {
1146 			reg = &state->stack[i].spilled_ptr;
1147 			t = reg->type;
1148 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
1149 			if (t == SCALAR_VALUE && reg->precise)
1150 				verbose(env, "P");
1151 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
1152 				verbose(env, "%lld", reg->var_off.value + reg->off);
1153 		} else {
1154 			verbose(env, "=%s", types_buf);
1155 		}
1156 	}
1157 	if (state->acquired_refs && state->refs[0].id) {
1158 		verbose(env, " refs=%d", state->refs[0].id);
1159 		for (i = 1; i < state->acquired_refs; i++)
1160 			if (state->refs[i].id)
1161 				verbose(env, ",%d", state->refs[i].id);
1162 	}
1163 	if (state->in_callback_fn)
1164 		verbose(env, " cb");
1165 	if (state->in_async_callback_fn)
1166 		verbose(env, " async_cb");
1167 	verbose(env, "\n");
1168 	mark_verifier_state_clean(env);
1169 }
1170 
1171 static inline u32 vlog_alignment(u32 pos)
1172 {
1173 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1174 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1175 }
1176 
1177 static void print_insn_state(struct bpf_verifier_env *env,
1178 			     const struct bpf_func_state *state)
1179 {
1180 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
1181 		/* remove new line character */
1182 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
1183 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
1184 	} else {
1185 		verbose(env, "%d:", env->insn_idx);
1186 	}
1187 	print_verifier_state(env, state, false);
1188 }
1189 
1190 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1191  * small to hold src. This is different from krealloc since we don't want to preserve
1192  * the contents of dst.
1193  *
1194  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1195  * not be allocated.
1196  */
1197 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1198 {
1199 	size_t alloc_bytes;
1200 	void *orig = dst;
1201 	size_t bytes;
1202 
1203 	if (ZERO_OR_NULL_PTR(src))
1204 		goto out;
1205 
1206 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1207 		return NULL;
1208 
1209 	alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1210 	dst = krealloc(orig, alloc_bytes, flags);
1211 	if (!dst) {
1212 		kfree(orig);
1213 		return NULL;
1214 	}
1215 
1216 	memcpy(dst, src, bytes);
1217 out:
1218 	return dst ? dst : ZERO_SIZE_PTR;
1219 }
1220 
1221 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1222  * small to hold new_n items. new items are zeroed out if the array grows.
1223  *
1224  * Contrary to krealloc_array, does not free arr if new_n is zero.
1225  */
1226 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1227 {
1228 	size_t alloc_size;
1229 	void *new_arr;
1230 
1231 	if (!new_n || old_n == new_n)
1232 		goto out;
1233 
1234 	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1235 	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
1236 	if (!new_arr) {
1237 		kfree(arr);
1238 		return NULL;
1239 	}
1240 	arr = new_arr;
1241 
1242 	if (new_n > old_n)
1243 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1244 
1245 out:
1246 	return arr ? arr : ZERO_SIZE_PTR;
1247 }
1248 
1249 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1250 {
1251 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1252 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1253 	if (!dst->refs)
1254 		return -ENOMEM;
1255 
1256 	dst->acquired_refs = src->acquired_refs;
1257 	return 0;
1258 }
1259 
1260 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1261 {
1262 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1263 
1264 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1265 				GFP_KERNEL);
1266 	if (!dst->stack)
1267 		return -ENOMEM;
1268 
1269 	dst->allocated_stack = src->allocated_stack;
1270 	return 0;
1271 }
1272 
1273 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1274 {
1275 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1276 				    sizeof(struct bpf_reference_state));
1277 	if (!state->refs)
1278 		return -ENOMEM;
1279 
1280 	state->acquired_refs = n;
1281 	return 0;
1282 }
1283 
1284 static int grow_stack_state(struct bpf_func_state *state, int size)
1285 {
1286 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1287 
1288 	if (old_n >= n)
1289 		return 0;
1290 
1291 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1292 	if (!state->stack)
1293 		return -ENOMEM;
1294 
1295 	state->allocated_stack = size;
1296 	return 0;
1297 }
1298 
1299 /* Acquire a pointer id from the env and update the state->refs to include
1300  * this new pointer reference.
1301  * On success, returns a valid pointer id to associate with the register
1302  * On failure, returns a negative errno.
1303  */
1304 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1305 {
1306 	struct bpf_func_state *state = cur_func(env);
1307 	int new_ofs = state->acquired_refs;
1308 	int id, err;
1309 
1310 	err = resize_reference_state(state, state->acquired_refs + 1);
1311 	if (err)
1312 		return err;
1313 	id = ++env->id_gen;
1314 	state->refs[new_ofs].id = id;
1315 	state->refs[new_ofs].insn_idx = insn_idx;
1316 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1317 
1318 	return id;
1319 }
1320 
1321 /* release function corresponding to acquire_reference_state(). Idempotent. */
1322 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1323 {
1324 	int i, last_idx;
1325 
1326 	last_idx = state->acquired_refs - 1;
1327 	for (i = 0; i < state->acquired_refs; i++) {
1328 		if (state->refs[i].id == ptr_id) {
1329 			/* Cannot release caller references in callbacks */
1330 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1331 				return -EINVAL;
1332 			if (last_idx && i != last_idx)
1333 				memcpy(&state->refs[i], &state->refs[last_idx],
1334 				       sizeof(*state->refs));
1335 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1336 			state->acquired_refs--;
1337 			return 0;
1338 		}
1339 	}
1340 	return -EINVAL;
1341 }
1342 
1343 static void free_func_state(struct bpf_func_state *state)
1344 {
1345 	if (!state)
1346 		return;
1347 	kfree(state->refs);
1348 	kfree(state->stack);
1349 	kfree(state);
1350 }
1351 
1352 static void clear_jmp_history(struct bpf_verifier_state *state)
1353 {
1354 	kfree(state->jmp_history);
1355 	state->jmp_history = NULL;
1356 	state->jmp_history_cnt = 0;
1357 }
1358 
1359 static void free_verifier_state(struct bpf_verifier_state *state,
1360 				bool free_self)
1361 {
1362 	int i;
1363 
1364 	for (i = 0; i <= state->curframe; i++) {
1365 		free_func_state(state->frame[i]);
1366 		state->frame[i] = NULL;
1367 	}
1368 	clear_jmp_history(state);
1369 	if (free_self)
1370 		kfree(state);
1371 }
1372 
1373 /* copy verifier state from src to dst growing dst stack space
1374  * when necessary to accommodate larger src stack
1375  */
1376 static int copy_func_state(struct bpf_func_state *dst,
1377 			   const struct bpf_func_state *src)
1378 {
1379 	int err;
1380 
1381 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1382 	err = copy_reference_state(dst, src);
1383 	if (err)
1384 		return err;
1385 	return copy_stack_state(dst, src);
1386 }
1387 
1388 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1389 			       const struct bpf_verifier_state *src)
1390 {
1391 	struct bpf_func_state *dst;
1392 	int i, err;
1393 
1394 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1395 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1396 					    GFP_USER);
1397 	if (!dst_state->jmp_history)
1398 		return -ENOMEM;
1399 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1400 
1401 	/* if dst has more stack frames then src frame, free them */
1402 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1403 		free_func_state(dst_state->frame[i]);
1404 		dst_state->frame[i] = NULL;
1405 	}
1406 	dst_state->speculative = src->speculative;
1407 	dst_state->active_rcu_lock = src->active_rcu_lock;
1408 	dst_state->curframe = src->curframe;
1409 	dst_state->active_lock.ptr = src->active_lock.ptr;
1410 	dst_state->active_lock.id = src->active_lock.id;
1411 	dst_state->branches = src->branches;
1412 	dst_state->parent = src->parent;
1413 	dst_state->first_insn_idx = src->first_insn_idx;
1414 	dst_state->last_insn_idx = src->last_insn_idx;
1415 	for (i = 0; i <= src->curframe; i++) {
1416 		dst = dst_state->frame[i];
1417 		if (!dst) {
1418 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1419 			if (!dst)
1420 				return -ENOMEM;
1421 			dst_state->frame[i] = dst;
1422 		}
1423 		err = copy_func_state(dst, src->frame[i]);
1424 		if (err)
1425 			return err;
1426 	}
1427 	return 0;
1428 }
1429 
1430 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1431 {
1432 	while (st) {
1433 		u32 br = --st->branches;
1434 
1435 		/* WARN_ON(br > 1) technically makes sense here,
1436 		 * but see comment in push_stack(), hence:
1437 		 */
1438 		WARN_ONCE((int)br < 0,
1439 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1440 			  br);
1441 		if (br)
1442 			break;
1443 		st = st->parent;
1444 	}
1445 }
1446 
1447 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1448 		     int *insn_idx, bool pop_log)
1449 {
1450 	struct bpf_verifier_state *cur = env->cur_state;
1451 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1452 	int err;
1453 
1454 	if (env->head == NULL)
1455 		return -ENOENT;
1456 
1457 	if (cur) {
1458 		err = copy_verifier_state(cur, &head->st);
1459 		if (err)
1460 			return err;
1461 	}
1462 	if (pop_log)
1463 		bpf_vlog_reset(&env->log, head->log_pos);
1464 	if (insn_idx)
1465 		*insn_idx = head->insn_idx;
1466 	if (prev_insn_idx)
1467 		*prev_insn_idx = head->prev_insn_idx;
1468 	elem = head->next;
1469 	free_verifier_state(&head->st, false);
1470 	kfree(head);
1471 	env->head = elem;
1472 	env->stack_size--;
1473 	return 0;
1474 }
1475 
1476 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1477 					     int insn_idx, int prev_insn_idx,
1478 					     bool speculative)
1479 {
1480 	struct bpf_verifier_state *cur = env->cur_state;
1481 	struct bpf_verifier_stack_elem *elem;
1482 	int err;
1483 
1484 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1485 	if (!elem)
1486 		goto err;
1487 
1488 	elem->insn_idx = insn_idx;
1489 	elem->prev_insn_idx = prev_insn_idx;
1490 	elem->next = env->head;
1491 	elem->log_pos = env->log.len_used;
1492 	env->head = elem;
1493 	env->stack_size++;
1494 	err = copy_verifier_state(&elem->st, cur);
1495 	if (err)
1496 		goto err;
1497 	elem->st.speculative |= speculative;
1498 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1499 		verbose(env, "The sequence of %d jumps is too complex.\n",
1500 			env->stack_size);
1501 		goto err;
1502 	}
1503 	if (elem->st.parent) {
1504 		++elem->st.parent->branches;
1505 		/* WARN_ON(branches > 2) technically makes sense here,
1506 		 * but
1507 		 * 1. speculative states will bump 'branches' for non-branch
1508 		 * instructions
1509 		 * 2. is_state_visited() heuristics may decide not to create
1510 		 * a new state for a sequence of branches and all such current
1511 		 * and cloned states will be pointing to a single parent state
1512 		 * which might have large 'branches' count.
1513 		 */
1514 	}
1515 	return &elem->st;
1516 err:
1517 	free_verifier_state(env->cur_state, true);
1518 	env->cur_state = NULL;
1519 	/* pop all elements and return */
1520 	while (!pop_stack(env, NULL, NULL, false));
1521 	return NULL;
1522 }
1523 
1524 #define CALLER_SAVED_REGS 6
1525 static const int caller_saved[CALLER_SAVED_REGS] = {
1526 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1527 };
1528 
1529 /* This helper doesn't clear reg->id */
1530 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1531 {
1532 	reg->var_off = tnum_const(imm);
1533 	reg->smin_value = (s64)imm;
1534 	reg->smax_value = (s64)imm;
1535 	reg->umin_value = imm;
1536 	reg->umax_value = imm;
1537 
1538 	reg->s32_min_value = (s32)imm;
1539 	reg->s32_max_value = (s32)imm;
1540 	reg->u32_min_value = (u32)imm;
1541 	reg->u32_max_value = (u32)imm;
1542 }
1543 
1544 /* Mark the unknown part of a register (variable offset or scalar value) as
1545  * known to have the value @imm.
1546  */
1547 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1548 {
1549 	/* Clear off and union(map_ptr, range) */
1550 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1551 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1552 	reg->id = 0;
1553 	reg->ref_obj_id = 0;
1554 	___mark_reg_known(reg, imm);
1555 }
1556 
1557 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1558 {
1559 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1560 	reg->s32_min_value = (s32)imm;
1561 	reg->s32_max_value = (s32)imm;
1562 	reg->u32_min_value = (u32)imm;
1563 	reg->u32_max_value = (u32)imm;
1564 }
1565 
1566 /* Mark the 'variable offset' part of a register as zero.  This should be
1567  * used only on registers holding a pointer type.
1568  */
1569 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1570 {
1571 	__mark_reg_known(reg, 0);
1572 }
1573 
1574 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1575 {
1576 	__mark_reg_known(reg, 0);
1577 	reg->type = SCALAR_VALUE;
1578 }
1579 
1580 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1581 				struct bpf_reg_state *regs, u32 regno)
1582 {
1583 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1584 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1585 		/* Something bad happened, let's kill all regs */
1586 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1587 			__mark_reg_not_init(env, regs + regno);
1588 		return;
1589 	}
1590 	__mark_reg_known_zero(regs + regno);
1591 }
1592 
1593 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
1594 			      bool first_slot, int dynptr_id)
1595 {
1596 	/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1597 	 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1598 	 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1599 	 */
1600 	__mark_reg_known_zero(reg);
1601 	reg->type = CONST_PTR_TO_DYNPTR;
1602 	/* Give each dynptr a unique id to uniquely associate slices to it. */
1603 	reg->id = dynptr_id;
1604 	reg->dynptr.type = type;
1605 	reg->dynptr.first_slot = first_slot;
1606 }
1607 
1608 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1609 {
1610 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1611 		const struct bpf_map *map = reg->map_ptr;
1612 
1613 		if (map->inner_map_meta) {
1614 			reg->type = CONST_PTR_TO_MAP;
1615 			reg->map_ptr = map->inner_map_meta;
1616 			/* transfer reg's id which is unique for every map_lookup_elem
1617 			 * as UID of the inner map.
1618 			 */
1619 			if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
1620 				reg->map_uid = reg->id;
1621 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1622 			reg->type = PTR_TO_XDP_SOCK;
1623 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1624 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1625 			reg->type = PTR_TO_SOCKET;
1626 		} else {
1627 			reg->type = PTR_TO_MAP_VALUE;
1628 		}
1629 		return;
1630 	}
1631 
1632 	reg->type &= ~PTR_MAYBE_NULL;
1633 }
1634 
1635 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1636 {
1637 	return type_is_pkt_pointer(reg->type);
1638 }
1639 
1640 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1641 {
1642 	return reg_is_pkt_pointer(reg) ||
1643 	       reg->type == PTR_TO_PACKET_END;
1644 }
1645 
1646 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1647 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1648 				    enum bpf_reg_type which)
1649 {
1650 	/* The register can already have a range from prior markings.
1651 	 * This is fine as long as it hasn't been advanced from its
1652 	 * origin.
1653 	 */
1654 	return reg->type == which &&
1655 	       reg->id == 0 &&
1656 	       reg->off == 0 &&
1657 	       tnum_equals_const(reg->var_off, 0);
1658 }
1659 
1660 /* Reset the min/max bounds of a register */
1661 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1662 {
1663 	reg->smin_value = S64_MIN;
1664 	reg->smax_value = S64_MAX;
1665 	reg->umin_value = 0;
1666 	reg->umax_value = U64_MAX;
1667 
1668 	reg->s32_min_value = S32_MIN;
1669 	reg->s32_max_value = S32_MAX;
1670 	reg->u32_min_value = 0;
1671 	reg->u32_max_value = U32_MAX;
1672 }
1673 
1674 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1675 {
1676 	reg->smin_value = S64_MIN;
1677 	reg->smax_value = S64_MAX;
1678 	reg->umin_value = 0;
1679 	reg->umax_value = U64_MAX;
1680 }
1681 
1682 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1683 {
1684 	reg->s32_min_value = S32_MIN;
1685 	reg->s32_max_value = S32_MAX;
1686 	reg->u32_min_value = 0;
1687 	reg->u32_max_value = U32_MAX;
1688 }
1689 
1690 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1691 {
1692 	struct tnum var32_off = tnum_subreg(reg->var_off);
1693 
1694 	/* min signed is max(sign bit) | min(other bits) */
1695 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1696 			var32_off.value | (var32_off.mask & S32_MIN));
1697 	/* max signed is min(sign bit) | max(other bits) */
1698 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1699 			var32_off.value | (var32_off.mask & S32_MAX));
1700 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1701 	reg->u32_max_value = min(reg->u32_max_value,
1702 				 (u32)(var32_off.value | var32_off.mask));
1703 }
1704 
1705 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1706 {
1707 	/* min signed is max(sign bit) | min(other bits) */
1708 	reg->smin_value = max_t(s64, reg->smin_value,
1709 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1710 	/* max signed is min(sign bit) | max(other bits) */
1711 	reg->smax_value = min_t(s64, reg->smax_value,
1712 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1713 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1714 	reg->umax_value = min(reg->umax_value,
1715 			      reg->var_off.value | reg->var_off.mask);
1716 }
1717 
1718 static void __update_reg_bounds(struct bpf_reg_state *reg)
1719 {
1720 	__update_reg32_bounds(reg);
1721 	__update_reg64_bounds(reg);
1722 }
1723 
1724 /* Uses signed min/max values to inform unsigned, and vice-versa */
1725 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1726 {
1727 	/* Learn sign from signed bounds.
1728 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1729 	 * are the same, so combine.  This works even in the negative case, e.g.
1730 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1731 	 */
1732 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1733 		reg->s32_min_value = reg->u32_min_value =
1734 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1735 		reg->s32_max_value = reg->u32_max_value =
1736 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1737 		return;
1738 	}
1739 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1740 	 * boundary, so we must be careful.
1741 	 */
1742 	if ((s32)reg->u32_max_value >= 0) {
1743 		/* Positive.  We can't learn anything from the smin, but smax
1744 		 * is positive, hence safe.
1745 		 */
1746 		reg->s32_min_value = reg->u32_min_value;
1747 		reg->s32_max_value = reg->u32_max_value =
1748 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1749 	} else if ((s32)reg->u32_min_value < 0) {
1750 		/* Negative.  We can't learn anything from the smax, but smin
1751 		 * is negative, hence safe.
1752 		 */
1753 		reg->s32_min_value = reg->u32_min_value =
1754 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1755 		reg->s32_max_value = reg->u32_max_value;
1756 	}
1757 }
1758 
1759 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1760 {
1761 	/* Learn sign from signed bounds.
1762 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1763 	 * are the same, so combine.  This works even in the negative case, e.g.
1764 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1765 	 */
1766 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1767 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1768 							  reg->umin_value);
1769 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1770 							  reg->umax_value);
1771 		return;
1772 	}
1773 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1774 	 * boundary, so we must be careful.
1775 	 */
1776 	if ((s64)reg->umax_value >= 0) {
1777 		/* Positive.  We can't learn anything from the smin, but smax
1778 		 * is positive, hence safe.
1779 		 */
1780 		reg->smin_value = reg->umin_value;
1781 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1782 							  reg->umax_value);
1783 	} else if ((s64)reg->umin_value < 0) {
1784 		/* Negative.  We can't learn anything from the smax, but smin
1785 		 * is negative, hence safe.
1786 		 */
1787 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1788 							  reg->umin_value);
1789 		reg->smax_value = reg->umax_value;
1790 	}
1791 }
1792 
1793 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1794 {
1795 	__reg32_deduce_bounds(reg);
1796 	__reg64_deduce_bounds(reg);
1797 }
1798 
1799 /* Attempts to improve var_off based on unsigned min/max information */
1800 static void __reg_bound_offset(struct bpf_reg_state *reg)
1801 {
1802 	struct tnum var64_off = tnum_intersect(reg->var_off,
1803 					       tnum_range(reg->umin_value,
1804 							  reg->umax_value));
1805 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1806 						tnum_range(reg->u32_min_value,
1807 							   reg->u32_max_value));
1808 
1809 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1810 }
1811 
1812 static void reg_bounds_sync(struct bpf_reg_state *reg)
1813 {
1814 	/* We might have learned new bounds from the var_off. */
1815 	__update_reg_bounds(reg);
1816 	/* We might have learned something about the sign bit. */
1817 	__reg_deduce_bounds(reg);
1818 	/* We might have learned some bits from the bounds. */
1819 	__reg_bound_offset(reg);
1820 	/* Intersecting with the old var_off might have improved our bounds
1821 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1822 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1823 	 */
1824 	__update_reg_bounds(reg);
1825 }
1826 
1827 static bool __reg32_bound_s64(s32 a)
1828 {
1829 	return a >= 0 && a <= S32_MAX;
1830 }
1831 
1832 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1833 {
1834 	reg->umin_value = reg->u32_min_value;
1835 	reg->umax_value = reg->u32_max_value;
1836 
1837 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1838 	 * be positive otherwise set to worse case bounds and refine later
1839 	 * from tnum.
1840 	 */
1841 	if (__reg32_bound_s64(reg->s32_min_value) &&
1842 	    __reg32_bound_s64(reg->s32_max_value)) {
1843 		reg->smin_value = reg->s32_min_value;
1844 		reg->smax_value = reg->s32_max_value;
1845 	} else {
1846 		reg->smin_value = 0;
1847 		reg->smax_value = U32_MAX;
1848 	}
1849 }
1850 
1851 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1852 {
1853 	/* special case when 64-bit register has upper 32-bit register
1854 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1855 	 * allowing us to use 32-bit bounds directly,
1856 	 */
1857 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1858 		__reg_assign_32_into_64(reg);
1859 	} else {
1860 		/* Otherwise the best we can do is push lower 32bit known and
1861 		 * unknown bits into register (var_off set from jmp logic)
1862 		 * then learn as much as possible from the 64-bit tnum
1863 		 * known and unknown bits. The previous smin/smax bounds are
1864 		 * invalid here because of jmp32 compare so mark them unknown
1865 		 * so they do not impact tnum bounds calculation.
1866 		 */
1867 		__mark_reg64_unbounded(reg);
1868 	}
1869 	reg_bounds_sync(reg);
1870 }
1871 
1872 static bool __reg64_bound_s32(s64 a)
1873 {
1874 	return a >= S32_MIN && a <= S32_MAX;
1875 }
1876 
1877 static bool __reg64_bound_u32(u64 a)
1878 {
1879 	return a >= U32_MIN && a <= U32_MAX;
1880 }
1881 
1882 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1883 {
1884 	__mark_reg32_unbounded(reg);
1885 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1886 		reg->s32_min_value = (s32)reg->smin_value;
1887 		reg->s32_max_value = (s32)reg->smax_value;
1888 	}
1889 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1890 		reg->u32_min_value = (u32)reg->umin_value;
1891 		reg->u32_max_value = (u32)reg->umax_value;
1892 	}
1893 	reg_bounds_sync(reg);
1894 }
1895 
1896 /* Mark a register as having a completely unknown (scalar) value. */
1897 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1898 			       struct bpf_reg_state *reg)
1899 {
1900 	/*
1901 	 * Clear type, off, and union(map_ptr, range) and
1902 	 * padding between 'type' and union
1903 	 */
1904 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1905 	reg->type = SCALAR_VALUE;
1906 	reg->id = 0;
1907 	reg->ref_obj_id = 0;
1908 	reg->var_off = tnum_unknown;
1909 	reg->frameno = 0;
1910 	reg->precise = !env->bpf_capable;
1911 	__mark_reg_unbounded(reg);
1912 }
1913 
1914 static void mark_reg_unknown(struct bpf_verifier_env *env,
1915 			     struct bpf_reg_state *regs, u32 regno)
1916 {
1917 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1918 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1919 		/* Something bad happened, let's kill all regs except FP */
1920 		for (regno = 0; regno < BPF_REG_FP; regno++)
1921 			__mark_reg_not_init(env, regs + regno);
1922 		return;
1923 	}
1924 	__mark_reg_unknown(env, regs + regno);
1925 }
1926 
1927 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1928 				struct bpf_reg_state *reg)
1929 {
1930 	__mark_reg_unknown(env, reg);
1931 	reg->type = NOT_INIT;
1932 }
1933 
1934 static void mark_reg_not_init(struct bpf_verifier_env *env,
1935 			      struct bpf_reg_state *regs, u32 regno)
1936 {
1937 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1938 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1939 		/* Something bad happened, let's kill all regs except FP */
1940 		for (regno = 0; regno < BPF_REG_FP; regno++)
1941 			__mark_reg_not_init(env, regs + regno);
1942 		return;
1943 	}
1944 	__mark_reg_not_init(env, regs + regno);
1945 }
1946 
1947 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1948 			    struct bpf_reg_state *regs, u32 regno,
1949 			    enum bpf_reg_type reg_type,
1950 			    struct btf *btf, u32 btf_id,
1951 			    enum bpf_type_flag flag)
1952 {
1953 	if (reg_type == SCALAR_VALUE) {
1954 		mark_reg_unknown(env, regs, regno);
1955 		return;
1956 	}
1957 	mark_reg_known_zero(env, regs, regno);
1958 	regs[regno].type = PTR_TO_BTF_ID | flag;
1959 	regs[regno].btf = btf;
1960 	regs[regno].btf_id = btf_id;
1961 }
1962 
1963 #define DEF_NOT_SUBREG	(0)
1964 static void init_reg_state(struct bpf_verifier_env *env,
1965 			   struct bpf_func_state *state)
1966 {
1967 	struct bpf_reg_state *regs = state->regs;
1968 	int i;
1969 
1970 	for (i = 0; i < MAX_BPF_REG; i++) {
1971 		mark_reg_not_init(env, regs, i);
1972 		regs[i].live = REG_LIVE_NONE;
1973 		regs[i].parent = NULL;
1974 		regs[i].subreg_def = DEF_NOT_SUBREG;
1975 	}
1976 
1977 	/* frame pointer */
1978 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1979 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1980 	regs[BPF_REG_FP].frameno = state->frameno;
1981 }
1982 
1983 #define BPF_MAIN_FUNC (-1)
1984 static void init_func_state(struct bpf_verifier_env *env,
1985 			    struct bpf_func_state *state,
1986 			    int callsite, int frameno, int subprogno)
1987 {
1988 	state->callsite = callsite;
1989 	state->frameno = frameno;
1990 	state->subprogno = subprogno;
1991 	state->callback_ret_range = tnum_range(0, 0);
1992 	init_reg_state(env, state);
1993 	mark_verifier_state_scratched(env);
1994 }
1995 
1996 /* Similar to push_stack(), but for async callbacks */
1997 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1998 						int insn_idx, int prev_insn_idx,
1999 						int subprog)
2000 {
2001 	struct bpf_verifier_stack_elem *elem;
2002 	struct bpf_func_state *frame;
2003 
2004 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2005 	if (!elem)
2006 		goto err;
2007 
2008 	elem->insn_idx = insn_idx;
2009 	elem->prev_insn_idx = prev_insn_idx;
2010 	elem->next = env->head;
2011 	elem->log_pos = env->log.len_used;
2012 	env->head = elem;
2013 	env->stack_size++;
2014 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2015 		verbose(env,
2016 			"The sequence of %d jumps is too complex for async cb.\n",
2017 			env->stack_size);
2018 		goto err;
2019 	}
2020 	/* Unlike push_stack() do not copy_verifier_state().
2021 	 * The caller state doesn't matter.
2022 	 * This is async callback. It starts in a fresh stack.
2023 	 * Initialize it similar to do_check_common().
2024 	 */
2025 	elem->st.branches = 1;
2026 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2027 	if (!frame)
2028 		goto err;
2029 	init_func_state(env, frame,
2030 			BPF_MAIN_FUNC /* callsite */,
2031 			0 /* frameno within this callchain */,
2032 			subprog /* subprog number within this prog */);
2033 	elem->st.frame[0] = frame;
2034 	return &elem->st;
2035 err:
2036 	free_verifier_state(env->cur_state, true);
2037 	env->cur_state = NULL;
2038 	/* pop all elements and return */
2039 	while (!pop_stack(env, NULL, NULL, false));
2040 	return NULL;
2041 }
2042 
2043 
2044 enum reg_arg_type {
2045 	SRC_OP,		/* register is used as source operand */
2046 	DST_OP,		/* register is used as destination operand */
2047 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
2048 };
2049 
2050 static int cmp_subprogs(const void *a, const void *b)
2051 {
2052 	return ((struct bpf_subprog_info *)a)->start -
2053 	       ((struct bpf_subprog_info *)b)->start;
2054 }
2055 
2056 static int find_subprog(struct bpf_verifier_env *env, int off)
2057 {
2058 	struct bpf_subprog_info *p;
2059 
2060 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2061 		    sizeof(env->subprog_info[0]), cmp_subprogs);
2062 	if (!p)
2063 		return -ENOENT;
2064 	return p - env->subprog_info;
2065 
2066 }
2067 
2068 static int add_subprog(struct bpf_verifier_env *env, int off)
2069 {
2070 	int insn_cnt = env->prog->len;
2071 	int ret;
2072 
2073 	if (off >= insn_cnt || off < 0) {
2074 		verbose(env, "call to invalid destination\n");
2075 		return -EINVAL;
2076 	}
2077 	ret = find_subprog(env, off);
2078 	if (ret >= 0)
2079 		return ret;
2080 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
2081 		verbose(env, "too many subprograms\n");
2082 		return -E2BIG;
2083 	}
2084 	/* determine subprog starts. The end is one before the next starts */
2085 	env->subprog_info[env->subprog_cnt++].start = off;
2086 	sort(env->subprog_info, env->subprog_cnt,
2087 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
2088 	return env->subprog_cnt - 1;
2089 }
2090 
2091 #define MAX_KFUNC_DESCS 256
2092 #define MAX_KFUNC_BTFS	256
2093 
2094 struct bpf_kfunc_desc {
2095 	struct btf_func_model func_model;
2096 	u32 func_id;
2097 	s32 imm;
2098 	u16 offset;
2099 };
2100 
2101 struct bpf_kfunc_btf {
2102 	struct btf *btf;
2103 	struct module *module;
2104 	u16 offset;
2105 };
2106 
2107 struct bpf_kfunc_desc_tab {
2108 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2109 	u32 nr_descs;
2110 };
2111 
2112 struct bpf_kfunc_btf_tab {
2113 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2114 	u32 nr_descs;
2115 };
2116 
2117 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
2118 {
2119 	const struct bpf_kfunc_desc *d0 = a;
2120 	const struct bpf_kfunc_desc *d1 = b;
2121 
2122 	/* func_id is not greater than BTF_MAX_TYPE */
2123 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2124 }
2125 
2126 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2127 {
2128 	const struct bpf_kfunc_btf *d0 = a;
2129 	const struct bpf_kfunc_btf *d1 = b;
2130 
2131 	return d0->offset - d1->offset;
2132 }
2133 
2134 static const struct bpf_kfunc_desc *
2135 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
2136 {
2137 	struct bpf_kfunc_desc desc = {
2138 		.func_id = func_id,
2139 		.offset = offset,
2140 	};
2141 	struct bpf_kfunc_desc_tab *tab;
2142 
2143 	tab = prog->aux->kfunc_tab;
2144 	return bsearch(&desc, tab->descs, tab->nr_descs,
2145 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2146 }
2147 
2148 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
2149 					 s16 offset)
2150 {
2151 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
2152 	struct bpf_kfunc_btf_tab *tab;
2153 	struct bpf_kfunc_btf *b;
2154 	struct module *mod;
2155 	struct btf *btf;
2156 	int btf_fd;
2157 
2158 	tab = env->prog->aux->kfunc_btf_tab;
2159 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2160 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2161 	if (!b) {
2162 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
2163 			verbose(env, "too many different module BTFs\n");
2164 			return ERR_PTR(-E2BIG);
2165 		}
2166 
2167 		if (bpfptr_is_null(env->fd_array)) {
2168 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2169 			return ERR_PTR(-EPROTO);
2170 		}
2171 
2172 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2173 					    offset * sizeof(btf_fd),
2174 					    sizeof(btf_fd)))
2175 			return ERR_PTR(-EFAULT);
2176 
2177 		btf = btf_get_by_fd(btf_fd);
2178 		if (IS_ERR(btf)) {
2179 			verbose(env, "invalid module BTF fd specified\n");
2180 			return btf;
2181 		}
2182 
2183 		if (!btf_is_module(btf)) {
2184 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
2185 			btf_put(btf);
2186 			return ERR_PTR(-EINVAL);
2187 		}
2188 
2189 		mod = btf_try_get_module(btf);
2190 		if (!mod) {
2191 			btf_put(btf);
2192 			return ERR_PTR(-ENXIO);
2193 		}
2194 
2195 		b = &tab->descs[tab->nr_descs++];
2196 		b->btf = btf;
2197 		b->module = mod;
2198 		b->offset = offset;
2199 
2200 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2201 		     kfunc_btf_cmp_by_off, NULL);
2202 	}
2203 	return b->btf;
2204 }
2205 
2206 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2207 {
2208 	if (!tab)
2209 		return;
2210 
2211 	while (tab->nr_descs--) {
2212 		module_put(tab->descs[tab->nr_descs].module);
2213 		btf_put(tab->descs[tab->nr_descs].btf);
2214 	}
2215 	kfree(tab);
2216 }
2217 
2218 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2219 {
2220 	if (offset) {
2221 		if (offset < 0) {
2222 			/* In the future, this can be allowed to increase limit
2223 			 * of fd index into fd_array, interpreted as u16.
2224 			 */
2225 			verbose(env, "negative offset disallowed for kernel module function call\n");
2226 			return ERR_PTR(-EINVAL);
2227 		}
2228 
2229 		return __find_kfunc_desc_btf(env, offset);
2230 	}
2231 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2232 }
2233 
2234 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2235 {
2236 	const struct btf_type *func, *func_proto;
2237 	struct bpf_kfunc_btf_tab *btf_tab;
2238 	struct bpf_kfunc_desc_tab *tab;
2239 	struct bpf_prog_aux *prog_aux;
2240 	struct bpf_kfunc_desc *desc;
2241 	const char *func_name;
2242 	struct btf *desc_btf;
2243 	unsigned long call_imm;
2244 	unsigned long addr;
2245 	int err;
2246 
2247 	prog_aux = env->prog->aux;
2248 	tab = prog_aux->kfunc_tab;
2249 	btf_tab = prog_aux->kfunc_btf_tab;
2250 	if (!tab) {
2251 		if (!btf_vmlinux) {
2252 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2253 			return -ENOTSUPP;
2254 		}
2255 
2256 		if (!env->prog->jit_requested) {
2257 			verbose(env, "JIT is required for calling kernel function\n");
2258 			return -ENOTSUPP;
2259 		}
2260 
2261 		if (!bpf_jit_supports_kfunc_call()) {
2262 			verbose(env, "JIT does not support calling kernel function\n");
2263 			return -ENOTSUPP;
2264 		}
2265 
2266 		if (!env->prog->gpl_compatible) {
2267 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2268 			return -EINVAL;
2269 		}
2270 
2271 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2272 		if (!tab)
2273 			return -ENOMEM;
2274 		prog_aux->kfunc_tab = tab;
2275 	}
2276 
2277 	/* func_id == 0 is always invalid, but instead of returning an error, be
2278 	 * conservative and wait until the code elimination pass before returning
2279 	 * error, so that invalid calls that get pruned out can be in BPF programs
2280 	 * loaded from userspace.  It is also required that offset be untouched
2281 	 * for such calls.
2282 	 */
2283 	if (!func_id && !offset)
2284 		return 0;
2285 
2286 	if (!btf_tab && offset) {
2287 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2288 		if (!btf_tab)
2289 			return -ENOMEM;
2290 		prog_aux->kfunc_btf_tab = btf_tab;
2291 	}
2292 
2293 	desc_btf = find_kfunc_desc_btf(env, offset);
2294 	if (IS_ERR(desc_btf)) {
2295 		verbose(env, "failed to find BTF for kernel function\n");
2296 		return PTR_ERR(desc_btf);
2297 	}
2298 
2299 	if (find_kfunc_desc(env->prog, func_id, offset))
2300 		return 0;
2301 
2302 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2303 		verbose(env, "too many different kernel function calls\n");
2304 		return -E2BIG;
2305 	}
2306 
2307 	func = btf_type_by_id(desc_btf, func_id);
2308 	if (!func || !btf_type_is_func(func)) {
2309 		verbose(env, "kernel btf_id %u is not a function\n",
2310 			func_id);
2311 		return -EINVAL;
2312 	}
2313 	func_proto = btf_type_by_id(desc_btf, func->type);
2314 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2315 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2316 			func_id);
2317 		return -EINVAL;
2318 	}
2319 
2320 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2321 	addr = kallsyms_lookup_name(func_name);
2322 	if (!addr) {
2323 		verbose(env, "cannot find address for kernel function %s\n",
2324 			func_name);
2325 		return -EINVAL;
2326 	}
2327 
2328 	call_imm = BPF_CALL_IMM(addr);
2329 	/* Check whether or not the relative offset overflows desc->imm */
2330 	if ((unsigned long)(s32)call_imm != call_imm) {
2331 		verbose(env, "address of kernel function %s is out of range\n",
2332 			func_name);
2333 		return -EINVAL;
2334 	}
2335 
2336 	if (bpf_dev_bound_kfunc_id(func_id)) {
2337 		err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2338 		if (err)
2339 			return err;
2340 	}
2341 
2342 	desc = &tab->descs[tab->nr_descs++];
2343 	desc->func_id = func_id;
2344 	desc->imm = call_imm;
2345 	desc->offset = offset;
2346 	err = btf_distill_func_proto(&env->log, desc_btf,
2347 				     func_proto, func_name,
2348 				     &desc->func_model);
2349 	if (!err)
2350 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2351 		     kfunc_desc_cmp_by_id_off, NULL);
2352 	return err;
2353 }
2354 
2355 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2356 {
2357 	const struct bpf_kfunc_desc *d0 = a;
2358 	const struct bpf_kfunc_desc *d1 = b;
2359 
2360 	if (d0->imm > d1->imm)
2361 		return 1;
2362 	else if (d0->imm < d1->imm)
2363 		return -1;
2364 	return 0;
2365 }
2366 
2367 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2368 {
2369 	struct bpf_kfunc_desc_tab *tab;
2370 
2371 	tab = prog->aux->kfunc_tab;
2372 	if (!tab)
2373 		return;
2374 
2375 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2376 	     kfunc_desc_cmp_by_imm, NULL);
2377 }
2378 
2379 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2380 {
2381 	return !!prog->aux->kfunc_tab;
2382 }
2383 
2384 const struct btf_func_model *
2385 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2386 			 const struct bpf_insn *insn)
2387 {
2388 	const struct bpf_kfunc_desc desc = {
2389 		.imm = insn->imm,
2390 	};
2391 	const struct bpf_kfunc_desc *res;
2392 	struct bpf_kfunc_desc_tab *tab;
2393 
2394 	tab = prog->aux->kfunc_tab;
2395 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2396 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2397 
2398 	return res ? &res->func_model : NULL;
2399 }
2400 
2401 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2402 {
2403 	struct bpf_subprog_info *subprog = env->subprog_info;
2404 	struct bpf_insn *insn = env->prog->insnsi;
2405 	int i, ret, insn_cnt = env->prog->len;
2406 
2407 	/* Add entry function. */
2408 	ret = add_subprog(env, 0);
2409 	if (ret)
2410 		return ret;
2411 
2412 	for (i = 0; i < insn_cnt; i++, insn++) {
2413 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2414 		    !bpf_pseudo_kfunc_call(insn))
2415 			continue;
2416 
2417 		if (!env->bpf_capable) {
2418 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2419 			return -EPERM;
2420 		}
2421 
2422 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2423 			ret = add_subprog(env, i + insn->imm + 1);
2424 		else
2425 			ret = add_kfunc_call(env, insn->imm, insn->off);
2426 
2427 		if (ret < 0)
2428 			return ret;
2429 	}
2430 
2431 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2432 	 * logic. 'subprog_cnt' should not be increased.
2433 	 */
2434 	subprog[env->subprog_cnt].start = insn_cnt;
2435 
2436 	if (env->log.level & BPF_LOG_LEVEL2)
2437 		for (i = 0; i < env->subprog_cnt; i++)
2438 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2439 
2440 	return 0;
2441 }
2442 
2443 static int check_subprogs(struct bpf_verifier_env *env)
2444 {
2445 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2446 	struct bpf_subprog_info *subprog = env->subprog_info;
2447 	struct bpf_insn *insn = env->prog->insnsi;
2448 	int insn_cnt = env->prog->len;
2449 
2450 	/* now check that all jumps are within the same subprog */
2451 	subprog_start = subprog[cur_subprog].start;
2452 	subprog_end = subprog[cur_subprog + 1].start;
2453 	for (i = 0; i < insn_cnt; i++) {
2454 		u8 code = insn[i].code;
2455 
2456 		if (code == (BPF_JMP | BPF_CALL) &&
2457 		    insn[i].imm == BPF_FUNC_tail_call &&
2458 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2459 			subprog[cur_subprog].has_tail_call = true;
2460 		if (BPF_CLASS(code) == BPF_LD &&
2461 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2462 			subprog[cur_subprog].has_ld_abs = true;
2463 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2464 			goto next;
2465 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2466 			goto next;
2467 		off = i + insn[i].off + 1;
2468 		if (off < subprog_start || off >= subprog_end) {
2469 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2470 			return -EINVAL;
2471 		}
2472 next:
2473 		if (i == subprog_end - 1) {
2474 			/* to avoid fall-through from one subprog into another
2475 			 * the last insn of the subprog should be either exit
2476 			 * or unconditional jump back
2477 			 */
2478 			if (code != (BPF_JMP | BPF_EXIT) &&
2479 			    code != (BPF_JMP | BPF_JA)) {
2480 				verbose(env, "last insn is not an exit or jmp\n");
2481 				return -EINVAL;
2482 			}
2483 			subprog_start = subprog_end;
2484 			cur_subprog++;
2485 			if (cur_subprog < env->subprog_cnt)
2486 				subprog_end = subprog[cur_subprog + 1].start;
2487 		}
2488 	}
2489 	return 0;
2490 }
2491 
2492 /* Parentage chain of this register (or stack slot) should take care of all
2493  * issues like callee-saved registers, stack slot allocation time, etc.
2494  */
2495 static int mark_reg_read(struct bpf_verifier_env *env,
2496 			 const struct bpf_reg_state *state,
2497 			 struct bpf_reg_state *parent, u8 flag)
2498 {
2499 	bool writes = parent == state->parent; /* Observe write marks */
2500 	int cnt = 0;
2501 
2502 	while (parent) {
2503 		/* if read wasn't screened by an earlier write ... */
2504 		if (writes && state->live & REG_LIVE_WRITTEN)
2505 			break;
2506 		if (parent->live & REG_LIVE_DONE) {
2507 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2508 				reg_type_str(env, parent->type),
2509 				parent->var_off.value, parent->off);
2510 			return -EFAULT;
2511 		}
2512 		/* The first condition is more likely to be true than the
2513 		 * second, checked it first.
2514 		 */
2515 		if ((parent->live & REG_LIVE_READ) == flag ||
2516 		    parent->live & REG_LIVE_READ64)
2517 			/* The parentage chain never changes and
2518 			 * this parent was already marked as LIVE_READ.
2519 			 * There is no need to keep walking the chain again and
2520 			 * keep re-marking all parents as LIVE_READ.
2521 			 * This case happens when the same register is read
2522 			 * multiple times without writes into it in-between.
2523 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2524 			 * then no need to set the weak REG_LIVE_READ32.
2525 			 */
2526 			break;
2527 		/* ... then we depend on parent's value */
2528 		parent->live |= flag;
2529 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2530 		if (flag == REG_LIVE_READ64)
2531 			parent->live &= ~REG_LIVE_READ32;
2532 		state = parent;
2533 		parent = state->parent;
2534 		writes = true;
2535 		cnt++;
2536 	}
2537 
2538 	if (env->longest_mark_read_walk < cnt)
2539 		env->longest_mark_read_walk = cnt;
2540 	return 0;
2541 }
2542 
2543 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2544 {
2545 	struct bpf_func_state *state = func(env, reg);
2546 	int spi, ret;
2547 
2548 	/* For CONST_PTR_TO_DYNPTR, it must have already been done by
2549 	 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
2550 	 * check_kfunc_call.
2551 	 */
2552 	if (reg->type == CONST_PTR_TO_DYNPTR)
2553 		return 0;
2554 	spi = dynptr_get_spi(env, reg);
2555 	if (spi < 0)
2556 		return spi;
2557 	/* Caller ensures dynptr is valid and initialized, which means spi is in
2558 	 * bounds and spi is the first dynptr slot. Simply mark stack slot as
2559 	 * read.
2560 	 */
2561 	ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
2562 			    state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
2563 	if (ret)
2564 		return ret;
2565 	return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
2566 			     state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
2567 }
2568 
2569 /* This function is supposed to be used by the following 32-bit optimization
2570  * code only. It returns TRUE if the source or destination register operates
2571  * on 64-bit, otherwise return FALSE.
2572  */
2573 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2574 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2575 {
2576 	u8 code, class, op;
2577 
2578 	code = insn->code;
2579 	class = BPF_CLASS(code);
2580 	op = BPF_OP(code);
2581 	if (class == BPF_JMP) {
2582 		/* BPF_EXIT for "main" will reach here. Return TRUE
2583 		 * conservatively.
2584 		 */
2585 		if (op == BPF_EXIT)
2586 			return true;
2587 		if (op == BPF_CALL) {
2588 			/* BPF to BPF call will reach here because of marking
2589 			 * caller saved clobber with DST_OP_NO_MARK for which we
2590 			 * don't care the register def because they are anyway
2591 			 * marked as NOT_INIT already.
2592 			 */
2593 			if (insn->src_reg == BPF_PSEUDO_CALL)
2594 				return false;
2595 			/* Helper call will reach here because of arg type
2596 			 * check, conservatively return TRUE.
2597 			 */
2598 			if (t == SRC_OP)
2599 				return true;
2600 
2601 			return false;
2602 		}
2603 	}
2604 
2605 	if (class == BPF_ALU64 || class == BPF_JMP ||
2606 	    /* BPF_END always use BPF_ALU class. */
2607 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2608 		return true;
2609 
2610 	if (class == BPF_ALU || class == BPF_JMP32)
2611 		return false;
2612 
2613 	if (class == BPF_LDX) {
2614 		if (t != SRC_OP)
2615 			return BPF_SIZE(code) == BPF_DW;
2616 		/* LDX source must be ptr. */
2617 		return true;
2618 	}
2619 
2620 	if (class == BPF_STX) {
2621 		/* BPF_STX (including atomic variants) has multiple source
2622 		 * operands, one of which is a ptr. Check whether the caller is
2623 		 * asking about it.
2624 		 */
2625 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2626 			return true;
2627 		return BPF_SIZE(code) == BPF_DW;
2628 	}
2629 
2630 	if (class == BPF_LD) {
2631 		u8 mode = BPF_MODE(code);
2632 
2633 		/* LD_IMM64 */
2634 		if (mode == BPF_IMM)
2635 			return true;
2636 
2637 		/* Both LD_IND and LD_ABS return 32-bit data. */
2638 		if (t != SRC_OP)
2639 			return  false;
2640 
2641 		/* Implicit ctx ptr. */
2642 		if (regno == BPF_REG_6)
2643 			return true;
2644 
2645 		/* Explicit source could be any width. */
2646 		return true;
2647 	}
2648 
2649 	if (class == BPF_ST)
2650 		/* The only source register for BPF_ST is a ptr. */
2651 		return true;
2652 
2653 	/* Conservatively return true at default. */
2654 	return true;
2655 }
2656 
2657 /* Return the regno defined by the insn, or -1. */
2658 static int insn_def_regno(const struct bpf_insn *insn)
2659 {
2660 	switch (BPF_CLASS(insn->code)) {
2661 	case BPF_JMP:
2662 	case BPF_JMP32:
2663 	case BPF_ST:
2664 		return -1;
2665 	case BPF_STX:
2666 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2667 		    (insn->imm & BPF_FETCH)) {
2668 			if (insn->imm == BPF_CMPXCHG)
2669 				return BPF_REG_0;
2670 			else
2671 				return insn->src_reg;
2672 		} else {
2673 			return -1;
2674 		}
2675 	default:
2676 		return insn->dst_reg;
2677 	}
2678 }
2679 
2680 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2681 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2682 {
2683 	int dst_reg = insn_def_regno(insn);
2684 
2685 	if (dst_reg == -1)
2686 		return false;
2687 
2688 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2689 }
2690 
2691 static void mark_insn_zext(struct bpf_verifier_env *env,
2692 			   struct bpf_reg_state *reg)
2693 {
2694 	s32 def_idx = reg->subreg_def;
2695 
2696 	if (def_idx == DEF_NOT_SUBREG)
2697 		return;
2698 
2699 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2700 	/* The dst will be zero extended, so won't be sub-register anymore. */
2701 	reg->subreg_def = DEF_NOT_SUBREG;
2702 }
2703 
2704 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2705 			 enum reg_arg_type t)
2706 {
2707 	struct bpf_verifier_state *vstate = env->cur_state;
2708 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2709 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2710 	struct bpf_reg_state *reg, *regs = state->regs;
2711 	bool rw64;
2712 
2713 	if (regno >= MAX_BPF_REG) {
2714 		verbose(env, "R%d is invalid\n", regno);
2715 		return -EINVAL;
2716 	}
2717 
2718 	mark_reg_scratched(env, regno);
2719 
2720 	reg = &regs[regno];
2721 	rw64 = is_reg64(env, insn, regno, reg, t);
2722 	if (t == SRC_OP) {
2723 		/* check whether register used as source operand can be read */
2724 		if (reg->type == NOT_INIT) {
2725 			verbose(env, "R%d !read_ok\n", regno);
2726 			return -EACCES;
2727 		}
2728 		/* We don't need to worry about FP liveness because it's read-only */
2729 		if (regno == BPF_REG_FP)
2730 			return 0;
2731 
2732 		if (rw64)
2733 			mark_insn_zext(env, reg);
2734 
2735 		return mark_reg_read(env, reg, reg->parent,
2736 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2737 	} else {
2738 		/* check whether register used as dest operand can be written to */
2739 		if (regno == BPF_REG_FP) {
2740 			verbose(env, "frame pointer is read only\n");
2741 			return -EACCES;
2742 		}
2743 		reg->live |= REG_LIVE_WRITTEN;
2744 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2745 		if (t == DST_OP)
2746 			mark_reg_unknown(env, regs, regno);
2747 	}
2748 	return 0;
2749 }
2750 
2751 static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
2752 {
2753 	env->insn_aux_data[idx].jmp_point = true;
2754 }
2755 
2756 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
2757 {
2758 	return env->insn_aux_data[insn_idx].jmp_point;
2759 }
2760 
2761 /* for any branch, call, exit record the history of jmps in the given state */
2762 static int push_jmp_history(struct bpf_verifier_env *env,
2763 			    struct bpf_verifier_state *cur)
2764 {
2765 	u32 cnt = cur->jmp_history_cnt;
2766 	struct bpf_idx_pair *p;
2767 	size_t alloc_size;
2768 
2769 	if (!is_jmp_point(env, env->insn_idx))
2770 		return 0;
2771 
2772 	cnt++;
2773 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
2774 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
2775 	if (!p)
2776 		return -ENOMEM;
2777 	p[cnt - 1].idx = env->insn_idx;
2778 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2779 	cur->jmp_history = p;
2780 	cur->jmp_history_cnt = cnt;
2781 	return 0;
2782 }
2783 
2784 /* Backtrack one insn at a time. If idx is not at the top of recorded
2785  * history then previous instruction came from straight line execution.
2786  */
2787 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2788 			     u32 *history)
2789 {
2790 	u32 cnt = *history;
2791 
2792 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2793 		i = st->jmp_history[cnt - 1].prev_idx;
2794 		(*history)--;
2795 	} else {
2796 		i--;
2797 	}
2798 	return i;
2799 }
2800 
2801 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2802 {
2803 	const struct btf_type *func;
2804 	struct btf *desc_btf;
2805 
2806 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2807 		return NULL;
2808 
2809 	desc_btf = find_kfunc_desc_btf(data, insn->off);
2810 	if (IS_ERR(desc_btf))
2811 		return "<error>";
2812 
2813 	func = btf_type_by_id(desc_btf, insn->imm);
2814 	return btf_name_by_offset(desc_btf, func->name_off);
2815 }
2816 
2817 /* For given verifier state backtrack_insn() is called from the last insn to
2818  * the first insn. Its purpose is to compute a bitmask of registers and
2819  * stack slots that needs precision in the parent verifier state.
2820  */
2821 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2822 			  u32 *reg_mask, u64 *stack_mask)
2823 {
2824 	const struct bpf_insn_cbs cbs = {
2825 		.cb_call	= disasm_kfunc_name,
2826 		.cb_print	= verbose,
2827 		.private_data	= env,
2828 	};
2829 	struct bpf_insn *insn = env->prog->insnsi + idx;
2830 	u8 class = BPF_CLASS(insn->code);
2831 	u8 opcode = BPF_OP(insn->code);
2832 	u8 mode = BPF_MODE(insn->code);
2833 	u32 dreg = 1u << insn->dst_reg;
2834 	u32 sreg = 1u << insn->src_reg;
2835 	u32 spi;
2836 
2837 	if (insn->code == 0)
2838 		return 0;
2839 	if (env->log.level & BPF_LOG_LEVEL2) {
2840 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2841 		verbose(env, "%d: ", idx);
2842 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2843 	}
2844 
2845 	if (class == BPF_ALU || class == BPF_ALU64) {
2846 		if (!(*reg_mask & dreg))
2847 			return 0;
2848 		if (opcode == BPF_MOV) {
2849 			if (BPF_SRC(insn->code) == BPF_X) {
2850 				/* dreg = sreg
2851 				 * dreg needs precision after this insn
2852 				 * sreg needs precision before this insn
2853 				 */
2854 				*reg_mask &= ~dreg;
2855 				*reg_mask |= sreg;
2856 			} else {
2857 				/* dreg = K
2858 				 * dreg needs precision after this insn.
2859 				 * Corresponding register is already marked
2860 				 * as precise=true in this verifier state.
2861 				 * No further markings in parent are necessary
2862 				 */
2863 				*reg_mask &= ~dreg;
2864 			}
2865 		} else {
2866 			if (BPF_SRC(insn->code) == BPF_X) {
2867 				/* dreg += sreg
2868 				 * both dreg and sreg need precision
2869 				 * before this insn
2870 				 */
2871 				*reg_mask |= sreg;
2872 			} /* else dreg += K
2873 			   * dreg still needs precision before this insn
2874 			   */
2875 		}
2876 	} else if (class == BPF_LDX) {
2877 		if (!(*reg_mask & dreg))
2878 			return 0;
2879 		*reg_mask &= ~dreg;
2880 
2881 		/* scalars can only be spilled into stack w/o losing precision.
2882 		 * Load from any other memory can be zero extended.
2883 		 * The desire to keep that precision is already indicated
2884 		 * by 'precise' mark in corresponding register of this state.
2885 		 * No further tracking necessary.
2886 		 */
2887 		if (insn->src_reg != BPF_REG_FP)
2888 			return 0;
2889 
2890 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2891 		 * that [fp - off] slot contains scalar that needs to be
2892 		 * tracked with precision
2893 		 */
2894 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2895 		if (spi >= 64) {
2896 			verbose(env, "BUG spi %d\n", spi);
2897 			WARN_ONCE(1, "verifier backtracking bug");
2898 			return -EFAULT;
2899 		}
2900 		*stack_mask |= 1ull << spi;
2901 	} else if (class == BPF_STX || class == BPF_ST) {
2902 		if (*reg_mask & dreg)
2903 			/* stx & st shouldn't be using _scalar_ dst_reg
2904 			 * to access memory. It means backtracking
2905 			 * encountered a case of pointer subtraction.
2906 			 */
2907 			return -ENOTSUPP;
2908 		/* scalars can only be spilled into stack */
2909 		if (insn->dst_reg != BPF_REG_FP)
2910 			return 0;
2911 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2912 		if (spi >= 64) {
2913 			verbose(env, "BUG spi %d\n", spi);
2914 			WARN_ONCE(1, "verifier backtracking bug");
2915 			return -EFAULT;
2916 		}
2917 		if (!(*stack_mask & (1ull << spi)))
2918 			return 0;
2919 		*stack_mask &= ~(1ull << spi);
2920 		if (class == BPF_STX)
2921 			*reg_mask |= sreg;
2922 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2923 		if (opcode == BPF_CALL) {
2924 			if (insn->src_reg == BPF_PSEUDO_CALL)
2925 				return -ENOTSUPP;
2926 			/* BPF helpers that invoke callback subprogs are
2927 			 * equivalent to BPF_PSEUDO_CALL above
2928 			 */
2929 			if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
2930 				return -ENOTSUPP;
2931 			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
2932 			 * catch this error later. Make backtracking conservative
2933 			 * with ENOTSUPP.
2934 			 */
2935 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
2936 				return -ENOTSUPP;
2937 			/* regular helper call sets R0 */
2938 			*reg_mask &= ~1;
2939 			if (*reg_mask & 0x3f) {
2940 				/* if backtracing was looking for registers R1-R5
2941 				 * they should have been found already.
2942 				 */
2943 				verbose(env, "BUG regs %x\n", *reg_mask);
2944 				WARN_ONCE(1, "verifier backtracking bug");
2945 				return -EFAULT;
2946 			}
2947 		} else if (opcode == BPF_EXIT) {
2948 			return -ENOTSUPP;
2949 		}
2950 	} else if (class == BPF_LD) {
2951 		if (!(*reg_mask & dreg))
2952 			return 0;
2953 		*reg_mask &= ~dreg;
2954 		/* It's ld_imm64 or ld_abs or ld_ind.
2955 		 * For ld_imm64 no further tracking of precision
2956 		 * into parent is necessary
2957 		 */
2958 		if (mode == BPF_IND || mode == BPF_ABS)
2959 			/* to be analyzed */
2960 			return -ENOTSUPP;
2961 	}
2962 	return 0;
2963 }
2964 
2965 /* the scalar precision tracking algorithm:
2966  * . at the start all registers have precise=false.
2967  * . scalar ranges are tracked as normal through alu and jmp insns.
2968  * . once precise value of the scalar register is used in:
2969  *   .  ptr + scalar alu
2970  *   . if (scalar cond K|scalar)
2971  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2972  *   backtrack through the verifier states and mark all registers and
2973  *   stack slots with spilled constants that these scalar regisers
2974  *   should be precise.
2975  * . during state pruning two registers (or spilled stack slots)
2976  *   are equivalent if both are not precise.
2977  *
2978  * Note the verifier cannot simply walk register parentage chain,
2979  * since many different registers and stack slots could have been
2980  * used to compute single precise scalar.
2981  *
2982  * The approach of starting with precise=true for all registers and then
2983  * backtrack to mark a register as not precise when the verifier detects
2984  * that program doesn't care about specific value (e.g., when helper
2985  * takes register as ARG_ANYTHING parameter) is not safe.
2986  *
2987  * It's ok to walk single parentage chain of the verifier states.
2988  * It's possible that this backtracking will go all the way till 1st insn.
2989  * All other branches will be explored for needing precision later.
2990  *
2991  * The backtracking needs to deal with cases like:
2992  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2993  * r9 -= r8
2994  * r5 = r9
2995  * if r5 > 0x79f goto pc+7
2996  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2997  * r5 += 1
2998  * ...
2999  * call bpf_perf_event_output#25
3000  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3001  *
3002  * and this case:
3003  * r6 = 1
3004  * call foo // uses callee's r6 inside to compute r0
3005  * r0 += r6
3006  * if r0 == 0 goto
3007  *
3008  * to track above reg_mask/stack_mask needs to be independent for each frame.
3009  *
3010  * Also if parent's curframe > frame where backtracking started,
3011  * the verifier need to mark registers in both frames, otherwise callees
3012  * may incorrectly prune callers. This is similar to
3013  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3014  *
3015  * For now backtracking falls back into conservative marking.
3016  */
3017 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3018 				     struct bpf_verifier_state *st)
3019 {
3020 	struct bpf_func_state *func;
3021 	struct bpf_reg_state *reg;
3022 	int i, j;
3023 
3024 	/* big hammer: mark all scalars precise in this path.
3025 	 * pop_stack may still get !precise scalars.
3026 	 * We also skip current state and go straight to first parent state,
3027 	 * because precision markings in current non-checkpointed state are
3028 	 * not needed. See why in the comment in __mark_chain_precision below.
3029 	 */
3030 	for (st = st->parent; st; st = st->parent) {
3031 		for (i = 0; i <= st->curframe; i++) {
3032 			func = st->frame[i];
3033 			for (j = 0; j < BPF_REG_FP; j++) {
3034 				reg = &func->regs[j];
3035 				if (reg->type != SCALAR_VALUE)
3036 					continue;
3037 				reg->precise = true;
3038 			}
3039 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3040 				if (!is_spilled_reg(&func->stack[j]))
3041 					continue;
3042 				reg = &func->stack[j].spilled_ptr;
3043 				if (reg->type != SCALAR_VALUE)
3044 					continue;
3045 				reg->precise = true;
3046 			}
3047 		}
3048 	}
3049 }
3050 
3051 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3052 {
3053 	struct bpf_func_state *func;
3054 	struct bpf_reg_state *reg;
3055 	int i, j;
3056 
3057 	for (i = 0; i <= st->curframe; i++) {
3058 		func = st->frame[i];
3059 		for (j = 0; j < BPF_REG_FP; j++) {
3060 			reg = &func->regs[j];
3061 			if (reg->type != SCALAR_VALUE)
3062 				continue;
3063 			reg->precise = false;
3064 		}
3065 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3066 			if (!is_spilled_reg(&func->stack[j]))
3067 				continue;
3068 			reg = &func->stack[j].spilled_ptr;
3069 			if (reg->type != SCALAR_VALUE)
3070 				continue;
3071 			reg->precise = false;
3072 		}
3073 	}
3074 }
3075 
3076 /*
3077  * __mark_chain_precision() backtracks BPF program instruction sequence and
3078  * chain of verifier states making sure that register *regno* (if regno >= 0)
3079  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
3080  * SCALARS, as well as any other registers and slots that contribute to
3081  * a tracked state of given registers/stack slots, depending on specific BPF
3082  * assembly instructions (see backtrack_insns() for exact instruction handling
3083  * logic). This backtracking relies on recorded jmp_history and is able to
3084  * traverse entire chain of parent states. This process ends only when all the
3085  * necessary registers/slots and their transitive dependencies are marked as
3086  * precise.
3087  *
3088  * One important and subtle aspect is that precise marks *do not matter* in
3089  * the currently verified state (current state). It is important to understand
3090  * why this is the case.
3091  *
3092  * First, note that current state is the state that is not yet "checkpointed",
3093  * i.e., it is not yet put into env->explored_states, and it has no children
3094  * states as well. It's ephemeral, and can end up either a) being discarded if
3095  * compatible explored state is found at some point or BPF_EXIT instruction is
3096  * reached or b) checkpointed and put into env->explored_states, branching out
3097  * into one or more children states.
3098  *
3099  * In the former case, precise markings in current state are completely
3100  * ignored by state comparison code (see regsafe() for details). Only
3101  * checkpointed ("old") state precise markings are important, and if old
3102  * state's register/slot is precise, regsafe() assumes current state's
3103  * register/slot as precise and checks value ranges exactly and precisely. If
3104  * states turn out to be compatible, current state's necessary precise
3105  * markings and any required parent states' precise markings are enforced
3106  * after the fact with propagate_precision() logic, after the fact. But it's
3107  * important to realize that in this case, even after marking current state
3108  * registers/slots as precise, we immediately discard current state. So what
3109  * actually matters is any of the precise markings propagated into current
3110  * state's parent states, which are always checkpointed (due to b) case above).
3111  * As such, for scenario a) it doesn't matter if current state has precise
3112  * markings set or not.
3113  *
3114  * Now, for the scenario b), checkpointing and forking into child(ren)
3115  * state(s). Note that before current state gets to checkpointing step, any
3116  * processed instruction always assumes precise SCALAR register/slot
3117  * knowledge: if precise value or range is useful to prune jump branch, BPF
3118  * verifier takes this opportunity enthusiastically. Similarly, when
3119  * register's value is used to calculate offset or memory address, exact
3120  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
3121  * what we mentioned above about state comparison ignoring precise markings
3122  * during state comparison, BPF verifier ignores and also assumes precise
3123  * markings *at will* during instruction verification process. But as verifier
3124  * assumes precision, it also propagates any precision dependencies across
3125  * parent states, which are not yet finalized, so can be further restricted
3126  * based on new knowledge gained from restrictions enforced by their children
3127  * states. This is so that once those parent states are finalized, i.e., when
3128  * they have no more active children state, state comparison logic in
3129  * is_state_visited() would enforce strict and precise SCALAR ranges, if
3130  * required for correctness.
3131  *
3132  * To build a bit more intuition, note also that once a state is checkpointed,
3133  * the path we took to get to that state is not important. This is crucial
3134  * property for state pruning. When state is checkpointed and finalized at
3135  * some instruction index, it can be correctly and safely used to "short
3136  * circuit" any *compatible* state that reaches exactly the same instruction
3137  * index. I.e., if we jumped to that instruction from a completely different
3138  * code path than original finalized state was derived from, it doesn't
3139  * matter, current state can be discarded because from that instruction
3140  * forward having a compatible state will ensure we will safely reach the
3141  * exit. States describe preconditions for further exploration, but completely
3142  * forget the history of how we got here.
3143  *
3144  * This also means that even if we needed precise SCALAR range to get to
3145  * finalized state, but from that point forward *that same* SCALAR register is
3146  * never used in a precise context (i.e., it's precise value is not needed for
3147  * correctness), it's correct and safe to mark such register as "imprecise"
3148  * (i.e., precise marking set to false). This is what we rely on when we do
3149  * not set precise marking in current state. If no child state requires
3150  * precision for any given SCALAR register, it's safe to dictate that it can
3151  * be imprecise. If any child state does require this register to be precise,
3152  * we'll mark it precise later retroactively during precise markings
3153  * propagation from child state to parent states.
3154  *
3155  * Skipping precise marking setting in current state is a mild version of
3156  * relying on the above observation. But we can utilize this property even
3157  * more aggressively by proactively forgetting any precise marking in the
3158  * current state (which we inherited from the parent state), right before we
3159  * checkpoint it and branch off into new child state. This is done by
3160  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
3161  * finalized states which help in short circuiting more future states.
3162  */
3163 static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
3164 				  int spi)
3165 {
3166 	struct bpf_verifier_state *st = env->cur_state;
3167 	int first_idx = st->first_insn_idx;
3168 	int last_idx = env->insn_idx;
3169 	struct bpf_func_state *func;
3170 	struct bpf_reg_state *reg;
3171 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
3172 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
3173 	bool skip_first = true;
3174 	bool new_marks = false;
3175 	int i, err;
3176 
3177 	if (!env->bpf_capable)
3178 		return 0;
3179 
3180 	/* Do sanity checks against current state of register and/or stack
3181 	 * slot, but don't set precise flag in current state, as precision
3182 	 * tracking in the current state is unnecessary.
3183 	 */
3184 	func = st->frame[frame];
3185 	if (regno >= 0) {
3186 		reg = &func->regs[regno];
3187 		if (reg->type != SCALAR_VALUE) {
3188 			WARN_ONCE(1, "backtracing misuse");
3189 			return -EFAULT;
3190 		}
3191 		new_marks = true;
3192 	}
3193 
3194 	while (spi >= 0) {
3195 		if (!is_spilled_reg(&func->stack[spi])) {
3196 			stack_mask = 0;
3197 			break;
3198 		}
3199 		reg = &func->stack[spi].spilled_ptr;
3200 		if (reg->type != SCALAR_VALUE) {
3201 			stack_mask = 0;
3202 			break;
3203 		}
3204 		new_marks = true;
3205 		break;
3206 	}
3207 
3208 	if (!new_marks)
3209 		return 0;
3210 	if (!reg_mask && !stack_mask)
3211 		return 0;
3212 
3213 	for (;;) {
3214 		DECLARE_BITMAP(mask, 64);
3215 		u32 history = st->jmp_history_cnt;
3216 
3217 		if (env->log.level & BPF_LOG_LEVEL2)
3218 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
3219 
3220 		if (last_idx < 0) {
3221 			/* we are at the entry into subprog, which
3222 			 * is expected for global funcs, but only if
3223 			 * requested precise registers are R1-R5
3224 			 * (which are global func's input arguments)
3225 			 */
3226 			if (st->curframe == 0 &&
3227 			    st->frame[0]->subprogno > 0 &&
3228 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
3229 			    stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
3230 				bitmap_from_u64(mask, reg_mask);
3231 				for_each_set_bit(i, mask, 32) {
3232 					reg = &st->frame[0]->regs[i];
3233 					if (reg->type != SCALAR_VALUE) {
3234 						reg_mask &= ~(1u << i);
3235 						continue;
3236 					}
3237 					reg->precise = true;
3238 				}
3239 				return 0;
3240 			}
3241 
3242 			verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
3243 				st->frame[0]->subprogno, reg_mask, stack_mask);
3244 			WARN_ONCE(1, "verifier backtracking bug");
3245 			return -EFAULT;
3246 		}
3247 
3248 		for (i = last_idx;;) {
3249 			if (skip_first) {
3250 				err = 0;
3251 				skip_first = false;
3252 			} else {
3253 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
3254 			}
3255 			if (err == -ENOTSUPP) {
3256 				mark_all_scalars_precise(env, st);
3257 				return 0;
3258 			} else if (err) {
3259 				return err;
3260 			}
3261 			if (!reg_mask && !stack_mask)
3262 				/* Found assignment(s) into tracked register in this state.
3263 				 * Since this state is already marked, just return.
3264 				 * Nothing to be tracked further in the parent state.
3265 				 */
3266 				return 0;
3267 			if (i == first_idx)
3268 				break;
3269 			i = get_prev_insn_idx(st, i, &history);
3270 			if (i >= env->prog->len) {
3271 				/* This can happen if backtracking reached insn 0
3272 				 * and there are still reg_mask or stack_mask
3273 				 * to backtrack.
3274 				 * It means the backtracking missed the spot where
3275 				 * particular register was initialized with a constant.
3276 				 */
3277 				verbose(env, "BUG backtracking idx %d\n", i);
3278 				WARN_ONCE(1, "verifier backtracking bug");
3279 				return -EFAULT;
3280 			}
3281 		}
3282 		st = st->parent;
3283 		if (!st)
3284 			break;
3285 
3286 		new_marks = false;
3287 		func = st->frame[frame];
3288 		bitmap_from_u64(mask, reg_mask);
3289 		for_each_set_bit(i, mask, 32) {
3290 			reg = &func->regs[i];
3291 			if (reg->type != SCALAR_VALUE) {
3292 				reg_mask &= ~(1u << i);
3293 				continue;
3294 			}
3295 			if (!reg->precise)
3296 				new_marks = true;
3297 			reg->precise = true;
3298 		}
3299 
3300 		bitmap_from_u64(mask, stack_mask);
3301 		for_each_set_bit(i, mask, 64) {
3302 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
3303 				/* the sequence of instructions:
3304 				 * 2: (bf) r3 = r10
3305 				 * 3: (7b) *(u64 *)(r3 -8) = r0
3306 				 * 4: (79) r4 = *(u64 *)(r10 -8)
3307 				 * doesn't contain jmps. It's backtracked
3308 				 * as a single block.
3309 				 * During backtracking insn 3 is not recognized as
3310 				 * stack access, so at the end of backtracking
3311 				 * stack slot fp-8 is still marked in stack_mask.
3312 				 * However the parent state may not have accessed
3313 				 * fp-8 and it's "unallocated" stack space.
3314 				 * In such case fallback to conservative.
3315 				 */
3316 				mark_all_scalars_precise(env, st);
3317 				return 0;
3318 			}
3319 
3320 			if (!is_spilled_reg(&func->stack[i])) {
3321 				stack_mask &= ~(1ull << i);
3322 				continue;
3323 			}
3324 			reg = &func->stack[i].spilled_ptr;
3325 			if (reg->type != SCALAR_VALUE) {
3326 				stack_mask &= ~(1ull << i);
3327 				continue;
3328 			}
3329 			if (!reg->precise)
3330 				new_marks = true;
3331 			reg->precise = true;
3332 		}
3333 		if (env->log.level & BPF_LOG_LEVEL2) {
3334 			verbose(env, "parent %s regs=%x stack=%llx marks:",
3335 				new_marks ? "didn't have" : "already had",
3336 				reg_mask, stack_mask);
3337 			print_verifier_state(env, func, true);
3338 		}
3339 
3340 		if (!reg_mask && !stack_mask)
3341 			break;
3342 		if (!new_marks)
3343 			break;
3344 
3345 		last_idx = st->last_insn_idx;
3346 		first_idx = st->first_insn_idx;
3347 	}
3348 	return 0;
3349 }
3350 
3351 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
3352 {
3353 	return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
3354 }
3355 
3356 static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
3357 {
3358 	return __mark_chain_precision(env, frame, regno, -1);
3359 }
3360 
3361 static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
3362 {
3363 	return __mark_chain_precision(env, frame, -1, spi);
3364 }
3365 
3366 static bool is_spillable_regtype(enum bpf_reg_type type)
3367 {
3368 	switch (base_type(type)) {
3369 	case PTR_TO_MAP_VALUE:
3370 	case PTR_TO_STACK:
3371 	case PTR_TO_CTX:
3372 	case PTR_TO_PACKET:
3373 	case PTR_TO_PACKET_META:
3374 	case PTR_TO_PACKET_END:
3375 	case PTR_TO_FLOW_KEYS:
3376 	case CONST_PTR_TO_MAP:
3377 	case PTR_TO_SOCKET:
3378 	case PTR_TO_SOCK_COMMON:
3379 	case PTR_TO_TCP_SOCK:
3380 	case PTR_TO_XDP_SOCK:
3381 	case PTR_TO_BTF_ID:
3382 	case PTR_TO_BUF:
3383 	case PTR_TO_MEM:
3384 	case PTR_TO_FUNC:
3385 	case PTR_TO_MAP_KEY:
3386 		return true;
3387 	default:
3388 		return false;
3389 	}
3390 }
3391 
3392 /* Does this register contain a constant zero? */
3393 static bool register_is_null(struct bpf_reg_state *reg)
3394 {
3395 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
3396 }
3397 
3398 static bool register_is_const(struct bpf_reg_state *reg)
3399 {
3400 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
3401 }
3402 
3403 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
3404 {
3405 	return tnum_is_unknown(reg->var_off) &&
3406 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
3407 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
3408 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
3409 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
3410 }
3411 
3412 static bool register_is_bounded(struct bpf_reg_state *reg)
3413 {
3414 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
3415 }
3416 
3417 static bool __is_pointer_value(bool allow_ptr_leaks,
3418 			       const struct bpf_reg_state *reg)
3419 {
3420 	if (allow_ptr_leaks)
3421 		return false;
3422 
3423 	return reg->type != SCALAR_VALUE;
3424 }
3425 
3426 /* Copy src state preserving dst->parent and dst->live fields */
3427 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
3428 {
3429 	struct bpf_reg_state *parent = dst->parent;
3430 	enum bpf_reg_liveness live = dst->live;
3431 
3432 	*dst = *src;
3433 	dst->parent = parent;
3434 	dst->live = live;
3435 }
3436 
3437 static void save_register_state(struct bpf_func_state *state,
3438 				int spi, struct bpf_reg_state *reg,
3439 				int size)
3440 {
3441 	int i;
3442 
3443 	copy_register_state(&state->stack[spi].spilled_ptr, reg);
3444 	if (size == BPF_REG_SIZE)
3445 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3446 
3447 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3448 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
3449 
3450 	/* size < 8 bytes spill */
3451 	for (; i; i--)
3452 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
3453 }
3454 
3455 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
3456  * stack boundary and alignment are checked in check_mem_access()
3457  */
3458 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3459 				       /* stack frame we're writing to */
3460 				       struct bpf_func_state *state,
3461 				       int off, int size, int value_regno,
3462 				       int insn_idx)
3463 {
3464 	struct bpf_func_state *cur; /* state of the current function */
3465 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3466 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
3467 	struct bpf_reg_state *reg = NULL;
3468 
3469 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3470 	if (err)
3471 		return err;
3472 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3473 	 * so it's aligned access and [off, off + size) are within stack limits
3474 	 */
3475 	if (!env->allow_ptr_leaks &&
3476 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3477 	    size != BPF_REG_SIZE) {
3478 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3479 		return -EACCES;
3480 	}
3481 
3482 	cur = env->cur_state->frame[env->cur_state->curframe];
3483 	if (value_regno >= 0)
3484 		reg = &cur->regs[value_regno];
3485 	if (!env->bypass_spec_v4) {
3486 		bool sanitize = reg && is_spillable_regtype(reg->type);
3487 
3488 		for (i = 0; i < size; i++) {
3489 			u8 type = state->stack[spi].slot_type[i];
3490 
3491 			if (type != STACK_MISC && type != STACK_ZERO) {
3492 				sanitize = true;
3493 				break;
3494 			}
3495 		}
3496 
3497 		if (sanitize)
3498 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3499 	}
3500 
3501 	err = destroy_if_dynptr_stack_slot(env, state, spi);
3502 	if (err)
3503 		return err;
3504 
3505 	mark_stack_slot_scratched(env, spi);
3506 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3507 	    !register_is_null(reg) && env->bpf_capable) {
3508 		if (dst_reg != BPF_REG_FP) {
3509 			/* The backtracking logic can only recognize explicit
3510 			 * stack slot address like [fp - 8]. Other spill of
3511 			 * scalar via different register has to be conservative.
3512 			 * Backtrack from here and mark all registers as precise
3513 			 * that contributed into 'reg' being a constant.
3514 			 */
3515 			err = mark_chain_precision(env, value_regno);
3516 			if (err)
3517 				return err;
3518 		}
3519 		save_register_state(state, spi, reg, size);
3520 	} else if (reg && is_spillable_regtype(reg->type)) {
3521 		/* register containing pointer is being spilled into stack */
3522 		if (size != BPF_REG_SIZE) {
3523 			verbose_linfo(env, insn_idx, "; ");
3524 			verbose(env, "invalid size of register spill\n");
3525 			return -EACCES;
3526 		}
3527 		if (state != cur && reg->type == PTR_TO_STACK) {
3528 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3529 			return -EINVAL;
3530 		}
3531 		save_register_state(state, spi, reg, size);
3532 	} else {
3533 		u8 type = STACK_MISC;
3534 
3535 		/* regular write of data into stack destroys any spilled ptr */
3536 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3537 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
3538 		if (is_spilled_reg(&state->stack[spi]))
3539 			for (i = 0; i < BPF_REG_SIZE; i++)
3540 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3541 
3542 		/* only mark the slot as written if all 8 bytes were written
3543 		 * otherwise read propagation may incorrectly stop too soon
3544 		 * when stack slots are partially written.
3545 		 * This heuristic means that read propagation will be
3546 		 * conservative, since it will add reg_live_read marks
3547 		 * to stack slots all the way to first state when programs
3548 		 * writes+reads less than 8 bytes
3549 		 */
3550 		if (size == BPF_REG_SIZE)
3551 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3552 
3553 		/* when we zero initialize stack slots mark them as such */
3554 		if (reg && register_is_null(reg)) {
3555 			/* backtracking doesn't work for STACK_ZERO yet. */
3556 			err = mark_chain_precision(env, value_regno);
3557 			if (err)
3558 				return err;
3559 			type = STACK_ZERO;
3560 		}
3561 
3562 		/* Mark slots affected by this stack write. */
3563 		for (i = 0; i < size; i++)
3564 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3565 				type;
3566 	}
3567 	return 0;
3568 }
3569 
3570 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3571  * known to contain a variable offset.
3572  * This function checks whether the write is permitted and conservatively
3573  * tracks the effects of the write, considering that each stack slot in the
3574  * dynamic range is potentially written to.
3575  *
3576  * 'off' includes 'regno->off'.
3577  * 'value_regno' can be -1, meaning that an unknown value is being written to
3578  * the stack.
3579  *
3580  * Spilled pointers in range are not marked as written because we don't know
3581  * what's going to be actually written. This means that read propagation for
3582  * future reads cannot be terminated by this write.
3583  *
3584  * For privileged programs, uninitialized stack slots are considered
3585  * initialized by this write (even though we don't know exactly what offsets
3586  * are going to be written to). The idea is that we don't want the verifier to
3587  * reject future reads that access slots written to through variable offsets.
3588  */
3589 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3590 				     /* func where register points to */
3591 				     struct bpf_func_state *state,
3592 				     int ptr_regno, int off, int size,
3593 				     int value_regno, int insn_idx)
3594 {
3595 	struct bpf_func_state *cur; /* state of the current function */
3596 	int min_off, max_off;
3597 	int i, err;
3598 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3599 	bool writing_zero = false;
3600 	/* set if the fact that we're writing a zero is used to let any
3601 	 * stack slots remain STACK_ZERO
3602 	 */
3603 	bool zero_used = false;
3604 
3605 	cur = env->cur_state->frame[env->cur_state->curframe];
3606 	ptr_reg = &cur->regs[ptr_regno];
3607 	min_off = ptr_reg->smin_value + off;
3608 	max_off = ptr_reg->smax_value + off + size;
3609 	if (value_regno >= 0)
3610 		value_reg = &cur->regs[value_regno];
3611 	if (value_reg && register_is_null(value_reg))
3612 		writing_zero = true;
3613 
3614 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3615 	if (err)
3616 		return err;
3617 
3618 	for (i = min_off; i < max_off; i++) {
3619 		int spi;
3620 
3621 		spi = __get_spi(i);
3622 		err = destroy_if_dynptr_stack_slot(env, state, spi);
3623 		if (err)
3624 			return err;
3625 	}
3626 
3627 	/* Variable offset writes destroy any spilled pointers in range. */
3628 	for (i = min_off; i < max_off; i++) {
3629 		u8 new_type, *stype;
3630 		int slot, spi;
3631 
3632 		slot = -i - 1;
3633 		spi = slot / BPF_REG_SIZE;
3634 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3635 		mark_stack_slot_scratched(env, spi);
3636 
3637 		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
3638 			/* Reject the write if range we may write to has not
3639 			 * been initialized beforehand. If we didn't reject
3640 			 * here, the ptr status would be erased below (even
3641 			 * though not all slots are actually overwritten),
3642 			 * possibly opening the door to leaks.
3643 			 *
3644 			 * We do however catch STACK_INVALID case below, and
3645 			 * only allow reading possibly uninitialized memory
3646 			 * later for CAP_PERFMON, as the write may not happen to
3647 			 * that slot.
3648 			 */
3649 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3650 				insn_idx, i);
3651 			return -EINVAL;
3652 		}
3653 
3654 		/* Erase all spilled pointers. */
3655 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3656 
3657 		/* Update the slot type. */
3658 		new_type = STACK_MISC;
3659 		if (writing_zero && *stype == STACK_ZERO) {
3660 			new_type = STACK_ZERO;
3661 			zero_used = true;
3662 		}
3663 		/* If the slot is STACK_INVALID, we check whether it's OK to
3664 		 * pretend that it will be initialized by this write. The slot
3665 		 * might not actually be written to, and so if we mark it as
3666 		 * initialized future reads might leak uninitialized memory.
3667 		 * For privileged programs, we will accept such reads to slots
3668 		 * that may or may not be written because, if we're reject
3669 		 * them, the error would be too confusing.
3670 		 */
3671 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3672 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3673 					insn_idx, i);
3674 			return -EINVAL;
3675 		}
3676 		*stype = new_type;
3677 	}
3678 	if (zero_used) {
3679 		/* backtracking doesn't work for STACK_ZERO yet. */
3680 		err = mark_chain_precision(env, value_regno);
3681 		if (err)
3682 			return err;
3683 	}
3684 	return 0;
3685 }
3686 
3687 /* When register 'dst_regno' is assigned some values from stack[min_off,
3688  * max_off), we set the register's type according to the types of the
3689  * respective stack slots. If all the stack values are known to be zeros, then
3690  * so is the destination reg. Otherwise, the register is considered to be
3691  * SCALAR. This function does not deal with register filling; the caller must
3692  * ensure that all spilled registers in the stack range have been marked as
3693  * read.
3694  */
3695 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3696 				/* func where src register points to */
3697 				struct bpf_func_state *ptr_state,
3698 				int min_off, int max_off, int dst_regno)
3699 {
3700 	struct bpf_verifier_state *vstate = env->cur_state;
3701 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3702 	int i, slot, spi;
3703 	u8 *stype;
3704 	int zeros = 0;
3705 
3706 	for (i = min_off; i < max_off; i++) {
3707 		slot = -i - 1;
3708 		spi = slot / BPF_REG_SIZE;
3709 		stype = ptr_state->stack[spi].slot_type;
3710 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3711 			break;
3712 		zeros++;
3713 	}
3714 	if (zeros == max_off - min_off) {
3715 		/* any access_size read into register is zero extended,
3716 		 * so the whole register == const_zero
3717 		 */
3718 		__mark_reg_const_zero(&state->regs[dst_regno]);
3719 		/* backtracking doesn't support STACK_ZERO yet,
3720 		 * so mark it precise here, so that later
3721 		 * backtracking can stop here.
3722 		 * Backtracking may not need this if this register
3723 		 * doesn't participate in pointer adjustment.
3724 		 * Forward propagation of precise flag is not
3725 		 * necessary either. This mark is only to stop
3726 		 * backtracking. Any register that contributed
3727 		 * to const 0 was marked precise before spill.
3728 		 */
3729 		state->regs[dst_regno].precise = true;
3730 	} else {
3731 		/* have read misc data from the stack */
3732 		mark_reg_unknown(env, state->regs, dst_regno);
3733 	}
3734 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3735 }
3736 
3737 /* Read the stack at 'off' and put the results into the register indicated by
3738  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3739  * spilled reg.
3740  *
3741  * 'dst_regno' can be -1, meaning that the read value is not going to a
3742  * register.
3743  *
3744  * The access is assumed to be within the current stack bounds.
3745  */
3746 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3747 				      /* func where src register points to */
3748 				      struct bpf_func_state *reg_state,
3749 				      int off, int size, int dst_regno)
3750 {
3751 	struct bpf_verifier_state *vstate = env->cur_state;
3752 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3753 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3754 	struct bpf_reg_state *reg;
3755 	u8 *stype, type;
3756 
3757 	stype = reg_state->stack[spi].slot_type;
3758 	reg = &reg_state->stack[spi].spilled_ptr;
3759 
3760 	if (is_spilled_reg(&reg_state->stack[spi])) {
3761 		u8 spill_size = 1;
3762 
3763 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3764 			spill_size++;
3765 
3766 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3767 			if (reg->type != SCALAR_VALUE) {
3768 				verbose_linfo(env, env->insn_idx, "; ");
3769 				verbose(env, "invalid size of register fill\n");
3770 				return -EACCES;
3771 			}
3772 
3773 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3774 			if (dst_regno < 0)
3775 				return 0;
3776 
3777 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3778 				/* The earlier check_reg_arg() has decided the
3779 				 * subreg_def for this insn.  Save it first.
3780 				 */
3781 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3782 
3783 				copy_register_state(&state->regs[dst_regno], reg);
3784 				state->regs[dst_regno].subreg_def = subreg_def;
3785 			} else {
3786 				for (i = 0; i < size; i++) {
3787 					type = stype[(slot - i) % BPF_REG_SIZE];
3788 					if (type == STACK_SPILL)
3789 						continue;
3790 					if (type == STACK_MISC)
3791 						continue;
3792 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3793 						off, i, size);
3794 					return -EACCES;
3795 				}
3796 				mark_reg_unknown(env, state->regs, dst_regno);
3797 			}
3798 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3799 			return 0;
3800 		}
3801 
3802 		if (dst_regno >= 0) {
3803 			/* restore register state from stack */
3804 			copy_register_state(&state->regs[dst_regno], reg);
3805 			/* mark reg as written since spilled pointer state likely
3806 			 * has its liveness marks cleared by is_state_visited()
3807 			 * which resets stack/reg liveness for state transitions
3808 			 */
3809 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3810 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3811 			/* If dst_regno==-1, the caller is asking us whether
3812 			 * it is acceptable to use this value as a SCALAR_VALUE
3813 			 * (e.g. for XADD).
3814 			 * We must not allow unprivileged callers to do that
3815 			 * with spilled pointers.
3816 			 */
3817 			verbose(env, "leaking pointer from stack off %d\n",
3818 				off);
3819 			return -EACCES;
3820 		}
3821 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3822 	} else {
3823 		for (i = 0; i < size; i++) {
3824 			type = stype[(slot - i) % BPF_REG_SIZE];
3825 			if (type == STACK_MISC)
3826 				continue;
3827 			if (type == STACK_ZERO)
3828 				continue;
3829 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3830 				off, i, size);
3831 			return -EACCES;
3832 		}
3833 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3834 		if (dst_regno >= 0)
3835 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3836 	}
3837 	return 0;
3838 }
3839 
3840 enum bpf_access_src {
3841 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3842 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3843 };
3844 
3845 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3846 					 int regno, int off, int access_size,
3847 					 bool zero_size_allowed,
3848 					 enum bpf_access_src type,
3849 					 struct bpf_call_arg_meta *meta);
3850 
3851 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3852 {
3853 	return cur_regs(env) + regno;
3854 }
3855 
3856 /* Read the stack at 'ptr_regno + off' and put the result into the register
3857  * 'dst_regno'.
3858  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3859  * but not its variable offset.
3860  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3861  *
3862  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3863  * filling registers (i.e. reads of spilled register cannot be detected when
3864  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3865  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3866  * offset; for a fixed offset check_stack_read_fixed_off should be used
3867  * instead.
3868  */
3869 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3870 				    int ptr_regno, int off, int size, int dst_regno)
3871 {
3872 	/* The state of the source register. */
3873 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3874 	struct bpf_func_state *ptr_state = func(env, reg);
3875 	int err;
3876 	int min_off, max_off;
3877 
3878 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3879 	 */
3880 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3881 					    false, ACCESS_DIRECT, NULL);
3882 	if (err)
3883 		return err;
3884 
3885 	min_off = reg->smin_value + off;
3886 	max_off = reg->smax_value + off;
3887 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3888 	return 0;
3889 }
3890 
3891 /* check_stack_read dispatches to check_stack_read_fixed_off or
3892  * check_stack_read_var_off.
3893  *
3894  * The caller must ensure that the offset falls within the allocated stack
3895  * bounds.
3896  *
3897  * 'dst_regno' is a register which will receive the value from the stack. It
3898  * can be -1, meaning that the read value is not going to a register.
3899  */
3900 static int check_stack_read(struct bpf_verifier_env *env,
3901 			    int ptr_regno, int off, int size,
3902 			    int dst_regno)
3903 {
3904 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3905 	struct bpf_func_state *state = func(env, reg);
3906 	int err;
3907 	/* Some accesses are only permitted with a static offset. */
3908 	bool var_off = !tnum_is_const(reg->var_off);
3909 
3910 	/* The offset is required to be static when reads don't go to a
3911 	 * register, in order to not leak pointers (see
3912 	 * check_stack_read_fixed_off).
3913 	 */
3914 	if (dst_regno < 0 && var_off) {
3915 		char tn_buf[48];
3916 
3917 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3918 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3919 			tn_buf, off, size);
3920 		return -EACCES;
3921 	}
3922 	/* Variable offset is prohibited for unprivileged mode for simplicity
3923 	 * since it requires corresponding support in Spectre masking for stack
3924 	 * ALU. See also retrieve_ptr_limit().
3925 	 */
3926 	if (!env->bypass_spec_v1 && var_off) {
3927 		char tn_buf[48];
3928 
3929 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3930 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3931 				ptr_regno, tn_buf);
3932 		return -EACCES;
3933 	}
3934 
3935 	if (!var_off) {
3936 		off += reg->var_off.value;
3937 		err = check_stack_read_fixed_off(env, state, off, size,
3938 						 dst_regno);
3939 	} else {
3940 		/* Variable offset stack reads need more conservative handling
3941 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3942 		 * branch.
3943 		 */
3944 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3945 					       dst_regno);
3946 	}
3947 	return err;
3948 }
3949 
3950 
3951 /* check_stack_write dispatches to check_stack_write_fixed_off or
3952  * check_stack_write_var_off.
3953  *
3954  * 'ptr_regno' is the register used as a pointer into the stack.
3955  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3956  * 'value_regno' is the register whose value we're writing to the stack. It can
3957  * be -1, meaning that we're not writing from a register.
3958  *
3959  * The caller must ensure that the offset falls within the maximum stack size.
3960  */
3961 static int check_stack_write(struct bpf_verifier_env *env,
3962 			     int ptr_regno, int off, int size,
3963 			     int value_regno, int insn_idx)
3964 {
3965 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3966 	struct bpf_func_state *state = func(env, reg);
3967 	int err;
3968 
3969 	if (tnum_is_const(reg->var_off)) {
3970 		off += reg->var_off.value;
3971 		err = check_stack_write_fixed_off(env, state, off, size,
3972 						  value_regno, insn_idx);
3973 	} else {
3974 		/* Variable offset stack reads need more conservative handling
3975 		 * than fixed offset ones.
3976 		 */
3977 		err = check_stack_write_var_off(env, state,
3978 						ptr_regno, off, size,
3979 						value_regno, insn_idx);
3980 	}
3981 	return err;
3982 }
3983 
3984 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3985 				 int off, int size, enum bpf_access_type type)
3986 {
3987 	struct bpf_reg_state *regs = cur_regs(env);
3988 	struct bpf_map *map = regs[regno].map_ptr;
3989 	u32 cap = bpf_map_flags_to_cap(map);
3990 
3991 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3992 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3993 			map->value_size, off, size);
3994 		return -EACCES;
3995 	}
3996 
3997 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3998 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3999 			map->value_size, off, size);
4000 		return -EACCES;
4001 	}
4002 
4003 	return 0;
4004 }
4005 
4006 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
4007 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
4008 			      int off, int size, u32 mem_size,
4009 			      bool zero_size_allowed)
4010 {
4011 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
4012 	struct bpf_reg_state *reg;
4013 
4014 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
4015 		return 0;
4016 
4017 	reg = &cur_regs(env)[regno];
4018 	switch (reg->type) {
4019 	case PTR_TO_MAP_KEY:
4020 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
4021 			mem_size, off, size);
4022 		break;
4023 	case PTR_TO_MAP_VALUE:
4024 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
4025 			mem_size, off, size);
4026 		break;
4027 	case PTR_TO_PACKET:
4028 	case PTR_TO_PACKET_META:
4029 	case PTR_TO_PACKET_END:
4030 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
4031 			off, size, regno, reg->id, off, mem_size);
4032 		break;
4033 	case PTR_TO_MEM:
4034 	default:
4035 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
4036 			mem_size, off, size);
4037 	}
4038 
4039 	return -EACCES;
4040 }
4041 
4042 /* check read/write into a memory region with possible variable offset */
4043 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
4044 				   int off, int size, u32 mem_size,
4045 				   bool zero_size_allowed)
4046 {
4047 	struct bpf_verifier_state *vstate = env->cur_state;
4048 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4049 	struct bpf_reg_state *reg = &state->regs[regno];
4050 	int err;
4051 
4052 	/* We may have adjusted the register pointing to memory region, so we
4053 	 * need to try adding each of min_value and max_value to off
4054 	 * to make sure our theoretical access will be safe.
4055 	 *
4056 	 * The minimum value is only important with signed
4057 	 * comparisons where we can't assume the floor of a
4058 	 * value is 0.  If we are using signed variables for our
4059 	 * index'es we need to make sure that whatever we use
4060 	 * will have a set floor within our range.
4061 	 */
4062 	if (reg->smin_value < 0 &&
4063 	    (reg->smin_value == S64_MIN ||
4064 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
4065 	      reg->smin_value + off < 0)) {
4066 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4067 			regno);
4068 		return -EACCES;
4069 	}
4070 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
4071 				 mem_size, zero_size_allowed);
4072 	if (err) {
4073 		verbose(env, "R%d min value is outside of the allowed memory range\n",
4074 			regno);
4075 		return err;
4076 	}
4077 
4078 	/* If we haven't set a max value then we need to bail since we can't be
4079 	 * sure we won't do bad things.
4080 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
4081 	 */
4082 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
4083 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
4084 			regno);
4085 		return -EACCES;
4086 	}
4087 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
4088 				 mem_size, zero_size_allowed);
4089 	if (err) {
4090 		verbose(env, "R%d max value is outside of the allowed memory range\n",
4091 			regno);
4092 		return err;
4093 	}
4094 
4095 	return 0;
4096 }
4097 
4098 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
4099 			       const struct bpf_reg_state *reg, int regno,
4100 			       bool fixed_off_ok)
4101 {
4102 	/* Access to this pointer-typed register or passing it to a helper
4103 	 * is only allowed in its original, unmodified form.
4104 	 */
4105 
4106 	if (reg->off < 0) {
4107 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
4108 			reg_type_str(env, reg->type), regno, reg->off);
4109 		return -EACCES;
4110 	}
4111 
4112 	if (!fixed_off_ok && reg->off) {
4113 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
4114 			reg_type_str(env, reg->type), regno, reg->off);
4115 		return -EACCES;
4116 	}
4117 
4118 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4119 		char tn_buf[48];
4120 
4121 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4122 		verbose(env, "variable %s access var_off=%s disallowed\n",
4123 			reg_type_str(env, reg->type), tn_buf);
4124 		return -EACCES;
4125 	}
4126 
4127 	return 0;
4128 }
4129 
4130 int check_ptr_off_reg(struct bpf_verifier_env *env,
4131 		      const struct bpf_reg_state *reg, int regno)
4132 {
4133 	return __check_ptr_off_reg(env, reg, regno, false);
4134 }
4135 
4136 static int map_kptr_match_type(struct bpf_verifier_env *env,
4137 			       struct btf_field *kptr_field,
4138 			       struct bpf_reg_state *reg, u32 regno)
4139 {
4140 	const char *targ_name = kernel_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
4141 	int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED;
4142 	const char *reg_name = "";
4143 
4144 	/* Only unreferenced case accepts untrusted pointers */
4145 	if (kptr_field->type == BPF_KPTR_UNREF)
4146 		perm_flags |= PTR_UNTRUSTED;
4147 
4148 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
4149 		goto bad_type;
4150 
4151 	if (!btf_is_kernel(reg->btf)) {
4152 		verbose(env, "R%d must point to kernel BTF\n", regno);
4153 		return -EINVAL;
4154 	}
4155 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
4156 	reg_name = kernel_type_name(reg->btf, reg->btf_id);
4157 
4158 	/* For ref_ptr case, release function check should ensure we get one
4159 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
4160 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
4161 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
4162 	 * reg->off and reg->ref_obj_id are not needed here.
4163 	 */
4164 	if (__check_ptr_off_reg(env, reg, regno, true))
4165 		return -EACCES;
4166 
4167 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
4168 	 * we also need to take into account the reg->off.
4169 	 *
4170 	 * We want to support cases like:
4171 	 *
4172 	 * struct foo {
4173 	 *         struct bar br;
4174 	 *         struct baz bz;
4175 	 * };
4176 	 *
4177 	 * struct foo *v;
4178 	 * v = func();	      // PTR_TO_BTF_ID
4179 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
4180 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
4181 	 *                    // first member type of struct after comparison fails
4182 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
4183 	 *                    // to match type
4184 	 *
4185 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
4186 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
4187 	 * the struct to match type against first member of struct, i.e. reject
4188 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
4189 	 * strict mode to true for type match.
4190 	 */
4191 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
4192 				  kptr_field->kptr.btf, kptr_field->kptr.btf_id,
4193 				  kptr_field->type == BPF_KPTR_REF))
4194 		goto bad_type;
4195 	return 0;
4196 bad_type:
4197 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
4198 		reg_type_str(env, reg->type), reg_name);
4199 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
4200 	if (kptr_field->type == BPF_KPTR_UNREF)
4201 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
4202 			targ_name);
4203 	else
4204 		verbose(env, "\n");
4205 	return -EINVAL;
4206 }
4207 
4208 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
4209 				 int value_regno, int insn_idx,
4210 				 struct btf_field *kptr_field)
4211 {
4212 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4213 	int class = BPF_CLASS(insn->code);
4214 	struct bpf_reg_state *val_reg;
4215 
4216 	/* Things we already checked for in check_map_access and caller:
4217 	 *  - Reject cases where variable offset may touch kptr
4218 	 *  - size of access (must be BPF_DW)
4219 	 *  - tnum_is_const(reg->var_off)
4220 	 *  - kptr_field->offset == off + reg->var_off.value
4221 	 */
4222 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
4223 	if (BPF_MODE(insn->code) != BPF_MEM) {
4224 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
4225 		return -EACCES;
4226 	}
4227 
4228 	/* We only allow loading referenced kptr, since it will be marked as
4229 	 * untrusted, similar to unreferenced kptr.
4230 	 */
4231 	if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
4232 		verbose(env, "store to referenced kptr disallowed\n");
4233 		return -EACCES;
4234 	}
4235 
4236 	if (class == BPF_LDX) {
4237 		val_reg = reg_state(env, value_regno);
4238 		/* We can simply mark the value_regno receiving the pointer
4239 		 * value from map as PTR_TO_BTF_ID, with the correct type.
4240 		 */
4241 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
4242 				kptr_field->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
4243 		/* For mark_ptr_or_null_reg */
4244 		val_reg->id = ++env->id_gen;
4245 	} else if (class == BPF_STX) {
4246 		val_reg = reg_state(env, value_regno);
4247 		if (!register_is_null(val_reg) &&
4248 		    map_kptr_match_type(env, kptr_field, val_reg, value_regno))
4249 			return -EACCES;
4250 	} else if (class == BPF_ST) {
4251 		if (insn->imm) {
4252 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
4253 				kptr_field->offset);
4254 			return -EACCES;
4255 		}
4256 	} else {
4257 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
4258 		return -EACCES;
4259 	}
4260 	return 0;
4261 }
4262 
4263 /* check read/write into a map element with possible variable offset */
4264 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
4265 			    int off, int size, bool zero_size_allowed,
4266 			    enum bpf_access_src src)
4267 {
4268 	struct bpf_verifier_state *vstate = env->cur_state;
4269 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4270 	struct bpf_reg_state *reg = &state->regs[regno];
4271 	struct bpf_map *map = reg->map_ptr;
4272 	struct btf_record *rec;
4273 	int err, i;
4274 
4275 	err = check_mem_region_access(env, regno, off, size, map->value_size,
4276 				      zero_size_allowed);
4277 	if (err)
4278 		return err;
4279 
4280 	if (IS_ERR_OR_NULL(map->record))
4281 		return 0;
4282 	rec = map->record;
4283 	for (i = 0; i < rec->cnt; i++) {
4284 		struct btf_field *field = &rec->fields[i];
4285 		u32 p = field->offset;
4286 
4287 		/* If any part of a field  can be touched by load/store, reject
4288 		 * this program. To check that [x1, x2) overlaps with [y1, y2),
4289 		 * it is sufficient to check x1 < y2 && y1 < x2.
4290 		 */
4291 		if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
4292 		    p < reg->umax_value + off + size) {
4293 			switch (field->type) {
4294 			case BPF_KPTR_UNREF:
4295 			case BPF_KPTR_REF:
4296 				if (src != ACCESS_DIRECT) {
4297 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
4298 					return -EACCES;
4299 				}
4300 				if (!tnum_is_const(reg->var_off)) {
4301 					verbose(env, "kptr access cannot have variable offset\n");
4302 					return -EACCES;
4303 				}
4304 				if (p != off + reg->var_off.value) {
4305 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
4306 						p, off + reg->var_off.value);
4307 					return -EACCES;
4308 				}
4309 				if (size != bpf_size_to_bytes(BPF_DW)) {
4310 					verbose(env, "kptr access size must be BPF_DW\n");
4311 					return -EACCES;
4312 				}
4313 				break;
4314 			default:
4315 				verbose(env, "%s cannot be accessed directly by load/store\n",
4316 					btf_field_type_name(field->type));
4317 				return -EACCES;
4318 			}
4319 		}
4320 	}
4321 	return 0;
4322 }
4323 
4324 #define MAX_PACKET_OFF 0xffff
4325 
4326 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
4327 				       const struct bpf_call_arg_meta *meta,
4328 				       enum bpf_access_type t)
4329 {
4330 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
4331 
4332 	switch (prog_type) {
4333 	/* Program types only with direct read access go here! */
4334 	case BPF_PROG_TYPE_LWT_IN:
4335 	case BPF_PROG_TYPE_LWT_OUT:
4336 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
4337 	case BPF_PROG_TYPE_SK_REUSEPORT:
4338 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4339 	case BPF_PROG_TYPE_CGROUP_SKB:
4340 		if (t == BPF_WRITE)
4341 			return false;
4342 		fallthrough;
4343 
4344 	/* Program types with direct read + write access go here! */
4345 	case BPF_PROG_TYPE_SCHED_CLS:
4346 	case BPF_PROG_TYPE_SCHED_ACT:
4347 	case BPF_PROG_TYPE_XDP:
4348 	case BPF_PROG_TYPE_LWT_XMIT:
4349 	case BPF_PROG_TYPE_SK_SKB:
4350 	case BPF_PROG_TYPE_SK_MSG:
4351 		if (meta)
4352 			return meta->pkt_access;
4353 
4354 		env->seen_direct_write = true;
4355 		return true;
4356 
4357 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4358 		if (t == BPF_WRITE)
4359 			env->seen_direct_write = true;
4360 
4361 		return true;
4362 
4363 	default:
4364 		return false;
4365 	}
4366 }
4367 
4368 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
4369 			       int size, bool zero_size_allowed)
4370 {
4371 	struct bpf_reg_state *regs = cur_regs(env);
4372 	struct bpf_reg_state *reg = &regs[regno];
4373 	int err;
4374 
4375 	/* We may have added a variable offset to the packet pointer; but any
4376 	 * reg->range we have comes after that.  We are only checking the fixed
4377 	 * offset.
4378 	 */
4379 
4380 	/* We don't allow negative numbers, because we aren't tracking enough
4381 	 * detail to prove they're safe.
4382 	 */
4383 	if (reg->smin_value < 0) {
4384 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4385 			regno);
4386 		return -EACCES;
4387 	}
4388 
4389 	err = reg->range < 0 ? -EINVAL :
4390 	      __check_mem_access(env, regno, off, size, reg->range,
4391 				 zero_size_allowed);
4392 	if (err) {
4393 		verbose(env, "R%d offset is outside of the packet\n", regno);
4394 		return err;
4395 	}
4396 
4397 	/* __check_mem_access has made sure "off + size - 1" is within u16.
4398 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
4399 	 * otherwise find_good_pkt_pointers would have refused to set range info
4400 	 * that __check_mem_access would have rejected this pkt access.
4401 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
4402 	 */
4403 	env->prog->aux->max_pkt_offset =
4404 		max_t(u32, env->prog->aux->max_pkt_offset,
4405 		      off + reg->umax_value + size - 1);
4406 
4407 	return err;
4408 }
4409 
4410 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
4411 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
4412 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
4413 			    struct btf **btf, u32 *btf_id)
4414 {
4415 	struct bpf_insn_access_aux info = {
4416 		.reg_type = *reg_type,
4417 		.log = &env->log,
4418 	};
4419 
4420 	if (env->ops->is_valid_access &&
4421 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
4422 		/* A non zero info.ctx_field_size indicates that this field is a
4423 		 * candidate for later verifier transformation to load the whole
4424 		 * field and then apply a mask when accessed with a narrower
4425 		 * access than actual ctx access size. A zero info.ctx_field_size
4426 		 * will only allow for whole field access and rejects any other
4427 		 * type of narrower access.
4428 		 */
4429 		*reg_type = info.reg_type;
4430 
4431 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
4432 			*btf = info.btf;
4433 			*btf_id = info.btf_id;
4434 		} else {
4435 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
4436 		}
4437 		/* remember the offset of last byte accessed in ctx */
4438 		if (env->prog->aux->max_ctx_offset < off + size)
4439 			env->prog->aux->max_ctx_offset = off + size;
4440 		return 0;
4441 	}
4442 
4443 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
4444 	return -EACCES;
4445 }
4446 
4447 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4448 				  int size)
4449 {
4450 	if (size < 0 || off < 0 ||
4451 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
4452 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
4453 			off, size);
4454 		return -EACCES;
4455 	}
4456 	return 0;
4457 }
4458 
4459 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4460 			     u32 regno, int off, int size,
4461 			     enum bpf_access_type t)
4462 {
4463 	struct bpf_reg_state *regs = cur_regs(env);
4464 	struct bpf_reg_state *reg = &regs[regno];
4465 	struct bpf_insn_access_aux info = {};
4466 	bool valid;
4467 
4468 	if (reg->smin_value < 0) {
4469 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4470 			regno);
4471 		return -EACCES;
4472 	}
4473 
4474 	switch (reg->type) {
4475 	case PTR_TO_SOCK_COMMON:
4476 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4477 		break;
4478 	case PTR_TO_SOCKET:
4479 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4480 		break;
4481 	case PTR_TO_TCP_SOCK:
4482 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4483 		break;
4484 	case PTR_TO_XDP_SOCK:
4485 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4486 		break;
4487 	default:
4488 		valid = false;
4489 	}
4490 
4491 
4492 	if (valid) {
4493 		env->insn_aux_data[insn_idx].ctx_field_size =
4494 			info.ctx_field_size;
4495 		return 0;
4496 	}
4497 
4498 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4499 		regno, reg_type_str(env, reg->type), off, size);
4500 
4501 	return -EACCES;
4502 }
4503 
4504 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4505 {
4506 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4507 }
4508 
4509 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4510 {
4511 	const struct bpf_reg_state *reg = reg_state(env, regno);
4512 
4513 	return reg->type == PTR_TO_CTX;
4514 }
4515 
4516 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4517 {
4518 	const struct bpf_reg_state *reg = reg_state(env, regno);
4519 
4520 	return type_is_sk_pointer(reg->type);
4521 }
4522 
4523 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4524 {
4525 	const struct bpf_reg_state *reg = reg_state(env, regno);
4526 
4527 	return type_is_pkt_pointer(reg->type);
4528 }
4529 
4530 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4531 {
4532 	const struct bpf_reg_state *reg = reg_state(env, regno);
4533 
4534 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4535 	return reg->type == PTR_TO_FLOW_KEYS;
4536 }
4537 
4538 static bool is_trusted_reg(const struct bpf_reg_state *reg)
4539 {
4540 	/* A referenced register is always trusted. */
4541 	if (reg->ref_obj_id)
4542 		return true;
4543 
4544 	/* If a register is not referenced, it is trusted if it has the
4545 	 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
4546 	 * other type modifiers may be safe, but we elect to take an opt-in
4547 	 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
4548 	 * not.
4549 	 *
4550 	 * Eventually, we should make PTR_TRUSTED the single source of truth
4551 	 * for whether a register is trusted.
4552 	 */
4553 	return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
4554 	       !bpf_type_has_unsafe_modifiers(reg->type);
4555 }
4556 
4557 static bool is_rcu_reg(const struct bpf_reg_state *reg)
4558 {
4559 	return reg->type & MEM_RCU;
4560 }
4561 
4562 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4563 				   const struct bpf_reg_state *reg,
4564 				   int off, int size, bool strict)
4565 {
4566 	struct tnum reg_off;
4567 	int ip_align;
4568 
4569 	/* Byte size accesses are always allowed. */
4570 	if (!strict || size == 1)
4571 		return 0;
4572 
4573 	/* For platforms that do not have a Kconfig enabling
4574 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4575 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4576 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4577 	 * to this code only in strict mode where we want to emulate
4578 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4579 	 * unconditional IP align value of '2'.
4580 	 */
4581 	ip_align = 2;
4582 
4583 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4584 	if (!tnum_is_aligned(reg_off, size)) {
4585 		char tn_buf[48];
4586 
4587 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4588 		verbose(env,
4589 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4590 			ip_align, tn_buf, reg->off, off, size);
4591 		return -EACCES;
4592 	}
4593 
4594 	return 0;
4595 }
4596 
4597 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4598 				       const struct bpf_reg_state *reg,
4599 				       const char *pointer_desc,
4600 				       int off, int size, bool strict)
4601 {
4602 	struct tnum reg_off;
4603 
4604 	/* Byte size accesses are always allowed. */
4605 	if (!strict || size == 1)
4606 		return 0;
4607 
4608 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4609 	if (!tnum_is_aligned(reg_off, size)) {
4610 		char tn_buf[48];
4611 
4612 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4613 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
4614 			pointer_desc, tn_buf, reg->off, off, size);
4615 		return -EACCES;
4616 	}
4617 
4618 	return 0;
4619 }
4620 
4621 static int check_ptr_alignment(struct bpf_verifier_env *env,
4622 			       const struct bpf_reg_state *reg, int off,
4623 			       int size, bool strict_alignment_once)
4624 {
4625 	bool strict = env->strict_alignment || strict_alignment_once;
4626 	const char *pointer_desc = "";
4627 
4628 	switch (reg->type) {
4629 	case PTR_TO_PACKET:
4630 	case PTR_TO_PACKET_META:
4631 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
4632 		 * right in front, treat it the very same way.
4633 		 */
4634 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
4635 	case PTR_TO_FLOW_KEYS:
4636 		pointer_desc = "flow keys ";
4637 		break;
4638 	case PTR_TO_MAP_KEY:
4639 		pointer_desc = "key ";
4640 		break;
4641 	case PTR_TO_MAP_VALUE:
4642 		pointer_desc = "value ";
4643 		break;
4644 	case PTR_TO_CTX:
4645 		pointer_desc = "context ";
4646 		break;
4647 	case PTR_TO_STACK:
4648 		pointer_desc = "stack ";
4649 		/* The stack spill tracking logic in check_stack_write_fixed_off()
4650 		 * and check_stack_read_fixed_off() relies on stack accesses being
4651 		 * aligned.
4652 		 */
4653 		strict = true;
4654 		break;
4655 	case PTR_TO_SOCKET:
4656 		pointer_desc = "sock ";
4657 		break;
4658 	case PTR_TO_SOCK_COMMON:
4659 		pointer_desc = "sock_common ";
4660 		break;
4661 	case PTR_TO_TCP_SOCK:
4662 		pointer_desc = "tcp_sock ";
4663 		break;
4664 	case PTR_TO_XDP_SOCK:
4665 		pointer_desc = "xdp_sock ";
4666 		break;
4667 	default:
4668 		break;
4669 	}
4670 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4671 					   strict);
4672 }
4673 
4674 static int update_stack_depth(struct bpf_verifier_env *env,
4675 			      const struct bpf_func_state *func,
4676 			      int off)
4677 {
4678 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
4679 
4680 	if (stack >= -off)
4681 		return 0;
4682 
4683 	/* update known max for given subprogram */
4684 	env->subprog_info[func->subprogno].stack_depth = -off;
4685 	return 0;
4686 }
4687 
4688 /* starting from main bpf function walk all instructions of the function
4689  * and recursively walk all callees that given function can call.
4690  * Ignore jump and exit insns.
4691  * Since recursion is prevented by check_cfg() this algorithm
4692  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4693  */
4694 static int check_max_stack_depth(struct bpf_verifier_env *env)
4695 {
4696 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4697 	struct bpf_subprog_info *subprog = env->subprog_info;
4698 	struct bpf_insn *insn = env->prog->insnsi;
4699 	bool tail_call_reachable = false;
4700 	int ret_insn[MAX_CALL_FRAMES];
4701 	int ret_prog[MAX_CALL_FRAMES];
4702 	int j;
4703 
4704 process_func:
4705 	/* protect against potential stack overflow that might happen when
4706 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4707 	 * depth for such case down to 256 so that the worst case scenario
4708 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
4709 	 * 8k).
4710 	 *
4711 	 * To get the idea what might happen, see an example:
4712 	 * func1 -> sub rsp, 128
4713 	 *  subfunc1 -> sub rsp, 256
4714 	 *  tailcall1 -> add rsp, 256
4715 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4716 	 *   subfunc2 -> sub rsp, 64
4717 	 *   subfunc22 -> sub rsp, 128
4718 	 *   tailcall2 -> add rsp, 128
4719 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4720 	 *
4721 	 * tailcall will unwind the current stack frame but it will not get rid
4722 	 * of caller's stack as shown on the example above.
4723 	 */
4724 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
4725 		verbose(env,
4726 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4727 			depth);
4728 		return -EACCES;
4729 	}
4730 	/* round up to 32-bytes, since this is granularity
4731 	 * of interpreter stack size
4732 	 */
4733 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4734 	if (depth > MAX_BPF_STACK) {
4735 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
4736 			frame + 1, depth);
4737 		return -EACCES;
4738 	}
4739 continue_func:
4740 	subprog_end = subprog[idx + 1].start;
4741 	for (; i < subprog_end; i++) {
4742 		int next_insn;
4743 
4744 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
4745 			continue;
4746 		/* remember insn and function to return to */
4747 		ret_insn[frame] = i + 1;
4748 		ret_prog[frame] = idx;
4749 
4750 		/* find the callee */
4751 		next_insn = i + insn[i].imm + 1;
4752 		idx = find_subprog(env, next_insn);
4753 		if (idx < 0) {
4754 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4755 				  next_insn);
4756 			return -EFAULT;
4757 		}
4758 		if (subprog[idx].is_async_cb) {
4759 			if (subprog[idx].has_tail_call) {
4760 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4761 				return -EFAULT;
4762 			}
4763 			 /* async callbacks don't increase bpf prog stack size */
4764 			continue;
4765 		}
4766 		i = next_insn;
4767 
4768 		if (subprog[idx].has_tail_call)
4769 			tail_call_reachable = true;
4770 
4771 		frame++;
4772 		if (frame >= MAX_CALL_FRAMES) {
4773 			verbose(env, "the call stack of %d frames is too deep !\n",
4774 				frame);
4775 			return -E2BIG;
4776 		}
4777 		goto process_func;
4778 	}
4779 	/* if tail call got detected across bpf2bpf calls then mark each of the
4780 	 * currently present subprog frames as tail call reachable subprogs;
4781 	 * this info will be utilized by JIT so that we will be preserving the
4782 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
4783 	 */
4784 	if (tail_call_reachable)
4785 		for (j = 0; j < frame; j++)
4786 			subprog[ret_prog[j]].tail_call_reachable = true;
4787 	if (subprog[0].tail_call_reachable)
4788 		env->prog->aux->tail_call_reachable = true;
4789 
4790 	/* end of for() loop means the last insn of the 'subprog'
4791 	 * was reached. Doesn't matter whether it was JA or EXIT
4792 	 */
4793 	if (frame == 0)
4794 		return 0;
4795 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4796 	frame--;
4797 	i = ret_insn[frame];
4798 	idx = ret_prog[frame];
4799 	goto continue_func;
4800 }
4801 
4802 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
4803 static int get_callee_stack_depth(struct bpf_verifier_env *env,
4804 				  const struct bpf_insn *insn, int idx)
4805 {
4806 	int start = idx + insn->imm + 1, subprog;
4807 
4808 	subprog = find_subprog(env, start);
4809 	if (subprog < 0) {
4810 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4811 			  start);
4812 		return -EFAULT;
4813 	}
4814 	return env->subprog_info[subprog].stack_depth;
4815 }
4816 #endif
4817 
4818 static int __check_buffer_access(struct bpf_verifier_env *env,
4819 				 const char *buf_info,
4820 				 const struct bpf_reg_state *reg,
4821 				 int regno, int off, int size)
4822 {
4823 	if (off < 0) {
4824 		verbose(env,
4825 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4826 			regno, buf_info, off, size);
4827 		return -EACCES;
4828 	}
4829 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4830 		char tn_buf[48];
4831 
4832 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4833 		verbose(env,
4834 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4835 			regno, off, tn_buf);
4836 		return -EACCES;
4837 	}
4838 
4839 	return 0;
4840 }
4841 
4842 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4843 				  const struct bpf_reg_state *reg,
4844 				  int regno, int off, int size)
4845 {
4846 	int err;
4847 
4848 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4849 	if (err)
4850 		return err;
4851 
4852 	if (off + size > env->prog->aux->max_tp_access)
4853 		env->prog->aux->max_tp_access = off + size;
4854 
4855 	return 0;
4856 }
4857 
4858 static int check_buffer_access(struct bpf_verifier_env *env,
4859 			       const struct bpf_reg_state *reg,
4860 			       int regno, int off, int size,
4861 			       bool zero_size_allowed,
4862 			       u32 *max_access)
4863 {
4864 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4865 	int err;
4866 
4867 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4868 	if (err)
4869 		return err;
4870 
4871 	if (off + size > *max_access)
4872 		*max_access = off + size;
4873 
4874 	return 0;
4875 }
4876 
4877 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4878 static void zext_32_to_64(struct bpf_reg_state *reg)
4879 {
4880 	reg->var_off = tnum_subreg(reg->var_off);
4881 	__reg_assign_32_into_64(reg);
4882 }
4883 
4884 /* truncate register to smaller size (in bytes)
4885  * must be called with size < BPF_REG_SIZE
4886  */
4887 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4888 {
4889 	u64 mask;
4890 
4891 	/* clear high bits in bit representation */
4892 	reg->var_off = tnum_cast(reg->var_off, size);
4893 
4894 	/* fix arithmetic bounds */
4895 	mask = ((u64)1 << (size * 8)) - 1;
4896 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4897 		reg->umin_value &= mask;
4898 		reg->umax_value &= mask;
4899 	} else {
4900 		reg->umin_value = 0;
4901 		reg->umax_value = mask;
4902 	}
4903 	reg->smin_value = reg->umin_value;
4904 	reg->smax_value = reg->umax_value;
4905 
4906 	/* If size is smaller than 32bit register the 32bit register
4907 	 * values are also truncated so we push 64-bit bounds into
4908 	 * 32-bit bounds. Above were truncated < 32-bits already.
4909 	 */
4910 	if (size >= 4)
4911 		return;
4912 	__reg_combine_64_into_32(reg);
4913 }
4914 
4915 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4916 {
4917 	/* A map is considered read-only if the following condition are true:
4918 	 *
4919 	 * 1) BPF program side cannot change any of the map content. The
4920 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4921 	 *    and was set at map creation time.
4922 	 * 2) The map value(s) have been initialized from user space by a
4923 	 *    loader and then "frozen", such that no new map update/delete
4924 	 *    operations from syscall side are possible for the rest of
4925 	 *    the map's lifetime from that point onwards.
4926 	 * 3) Any parallel/pending map update/delete operations from syscall
4927 	 *    side have been completed. Only after that point, it's safe to
4928 	 *    assume that map value(s) are immutable.
4929 	 */
4930 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4931 	       READ_ONCE(map->frozen) &&
4932 	       !bpf_map_write_active(map);
4933 }
4934 
4935 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4936 {
4937 	void *ptr;
4938 	u64 addr;
4939 	int err;
4940 
4941 	err = map->ops->map_direct_value_addr(map, &addr, off);
4942 	if (err)
4943 		return err;
4944 	ptr = (void *)(long)addr + off;
4945 
4946 	switch (size) {
4947 	case sizeof(u8):
4948 		*val = (u64)*(u8 *)ptr;
4949 		break;
4950 	case sizeof(u16):
4951 		*val = (u64)*(u16 *)ptr;
4952 		break;
4953 	case sizeof(u32):
4954 		*val = (u64)*(u32 *)ptr;
4955 		break;
4956 	case sizeof(u64):
4957 		*val = *(u64 *)ptr;
4958 		break;
4959 	default:
4960 		return -EINVAL;
4961 	}
4962 	return 0;
4963 }
4964 
4965 #define BTF_TYPE_SAFE_NESTED(__type)  __PASTE(__type, __safe_fields)
4966 
4967 BTF_TYPE_SAFE_NESTED(struct task_struct) {
4968 	const cpumask_t *cpus_ptr;
4969 };
4970 
4971 static bool nested_ptr_is_trusted(struct bpf_verifier_env *env,
4972 				  struct bpf_reg_state *reg,
4973 				  int off)
4974 {
4975 	/* If its parent is not trusted, it can't regain its trusted status. */
4976 	if (!is_trusted_reg(reg))
4977 		return false;
4978 
4979 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct task_struct));
4980 
4981 	return btf_nested_type_is_trusted(&env->log, reg, off);
4982 }
4983 
4984 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4985 				   struct bpf_reg_state *regs,
4986 				   int regno, int off, int size,
4987 				   enum bpf_access_type atype,
4988 				   int value_regno)
4989 {
4990 	struct bpf_reg_state *reg = regs + regno;
4991 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4992 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4993 	enum bpf_type_flag flag = 0;
4994 	u32 btf_id;
4995 	int ret;
4996 
4997 	if (!env->allow_ptr_leaks) {
4998 		verbose(env,
4999 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5000 			tname);
5001 		return -EPERM;
5002 	}
5003 	if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
5004 		verbose(env,
5005 			"Cannot access kernel 'struct %s' from non-GPL compatible program\n",
5006 			tname);
5007 		return -EINVAL;
5008 	}
5009 	if (off < 0) {
5010 		verbose(env,
5011 			"R%d is ptr_%s invalid negative access: off=%d\n",
5012 			regno, tname, off);
5013 		return -EACCES;
5014 	}
5015 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5016 		char tn_buf[48];
5017 
5018 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5019 		verbose(env,
5020 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
5021 			regno, tname, off, tn_buf);
5022 		return -EACCES;
5023 	}
5024 
5025 	if (reg->type & MEM_USER) {
5026 		verbose(env,
5027 			"R%d is ptr_%s access user memory: off=%d\n",
5028 			regno, tname, off);
5029 		return -EACCES;
5030 	}
5031 
5032 	if (reg->type & MEM_PERCPU) {
5033 		verbose(env,
5034 			"R%d is ptr_%s access percpu memory: off=%d\n",
5035 			regno, tname, off);
5036 		return -EACCES;
5037 	}
5038 
5039 	if (env->ops->btf_struct_access && !type_is_alloc(reg->type)) {
5040 		if (!btf_is_kernel(reg->btf)) {
5041 			verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
5042 			return -EFAULT;
5043 		}
5044 		ret = env->ops->btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
5045 	} else {
5046 		/* Writes are permitted with default btf_struct_access for
5047 		 * program allocated objects (which always have ref_obj_id > 0),
5048 		 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
5049 		 */
5050 		if (atype != BPF_READ && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
5051 			verbose(env, "only read is supported\n");
5052 			return -EACCES;
5053 		}
5054 
5055 		if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
5056 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
5057 			return -EFAULT;
5058 		}
5059 
5060 		ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
5061 	}
5062 
5063 	if (ret < 0)
5064 		return ret;
5065 
5066 	/* If this is an untrusted pointer, all pointers formed by walking it
5067 	 * also inherit the untrusted flag.
5068 	 */
5069 	if (type_flag(reg->type) & PTR_UNTRUSTED)
5070 		flag |= PTR_UNTRUSTED;
5071 
5072 	/* By default any pointer obtained from walking a trusted pointer is no
5073 	 * longer trusted, unless the field being accessed has explicitly been
5074 	 * marked as inheriting its parent's state of trust.
5075 	 *
5076 	 * An RCU-protected pointer can also be deemed trusted if we are in an
5077 	 * RCU read region. This case is handled below.
5078 	 */
5079 	if (nested_ptr_is_trusted(env, reg, off))
5080 		flag |= PTR_TRUSTED;
5081 	else
5082 		flag &= ~PTR_TRUSTED;
5083 
5084 	if (flag & MEM_RCU) {
5085 		/* Mark value register as MEM_RCU only if it is protected by
5086 		 * bpf_rcu_read_lock() and the ptr reg is rcu or trusted. MEM_RCU
5087 		 * itself can already indicate trustedness inside the rcu
5088 		 * read lock region. Also mark rcu pointer as PTR_MAYBE_NULL since
5089 		 * it could be null in some cases.
5090 		 */
5091 		if (!env->cur_state->active_rcu_lock ||
5092 		    !(is_trusted_reg(reg) || is_rcu_reg(reg)))
5093 			flag &= ~MEM_RCU;
5094 		else
5095 			flag |= PTR_MAYBE_NULL;
5096 	} else if (reg->type & MEM_RCU) {
5097 		/* ptr (reg) is marked as MEM_RCU, but the struct field is not tagged
5098 		 * with __rcu. Mark the flag as PTR_UNTRUSTED conservatively.
5099 		 */
5100 		flag |= PTR_UNTRUSTED;
5101 	}
5102 
5103 	if (atype == BPF_READ && value_regno >= 0)
5104 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
5105 
5106 	return 0;
5107 }
5108 
5109 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
5110 				   struct bpf_reg_state *regs,
5111 				   int regno, int off, int size,
5112 				   enum bpf_access_type atype,
5113 				   int value_regno)
5114 {
5115 	struct bpf_reg_state *reg = regs + regno;
5116 	struct bpf_map *map = reg->map_ptr;
5117 	struct bpf_reg_state map_reg;
5118 	enum bpf_type_flag flag = 0;
5119 	const struct btf_type *t;
5120 	const char *tname;
5121 	u32 btf_id;
5122 	int ret;
5123 
5124 	if (!btf_vmlinux) {
5125 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
5126 		return -ENOTSUPP;
5127 	}
5128 
5129 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
5130 		verbose(env, "map_ptr access not supported for map type %d\n",
5131 			map->map_type);
5132 		return -ENOTSUPP;
5133 	}
5134 
5135 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
5136 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
5137 
5138 	if (!env->allow_ptr_leaks) {
5139 		verbose(env,
5140 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5141 			tname);
5142 		return -EPERM;
5143 	}
5144 
5145 	if (off < 0) {
5146 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
5147 			regno, tname, off);
5148 		return -EACCES;
5149 	}
5150 
5151 	if (atype != BPF_READ) {
5152 		verbose(env, "only read from %s is supported\n", tname);
5153 		return -EACCES;
5154 	}
5155 
5156 	/* Simulate access to a PTR_TO_BTF_ID */
5157 	memset(&map_reg, 0, sizeof(map_reg));
5158 	mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
5159 	ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag);
5160 	if (ret < 0)
5161 		return ret;
5162 
5163 	if (value_regno >= 0)
5164 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
5165 
5166 	return 0;
5167 }
5168 
5169 /* Check that the stack access at the given offset is within bounds. The
5170  * maximum valid offset is -1.
5171  *
5172  * The minimum valid offset is -MAX_BPF_STACK for writes, and
5173  * -state->allocated_stack for reads.
5174  */
5175 static int check_stack_slot_within_bounds(int off,
5176 					  struct bpf_func_state *state,
5177 					  enum bpf_access_type t)
5178 {
5179 	int min_valid_off;
5180 
5181 	if (t == BPF_WRITE)
5182 		min_valid_off = -MAX_BPF_STACK;
5183 	else
5184 		min_valid_off = -state->allocated_stack;
5185 
5186 	if (off < min_valid_off || off > -1)
5187 		return -EACCES;
5188 	return 0;
5189 }
5190 
5191 /* Check that the stack access at 'regno + off' falls within the maximum stack
5192  * bounds.
5193  *
5194  * 'off' includes `regno->offset`, but not its dynamic part (if any).
5195  */
5196 static int check_stack_access_within_bounds(
5197 		struct bpf_verifier_env *env,
5198 		int regno, int off, int access_size,
5199 		enum bpf_access_src src, enum bpf_access_type type)
5200 {
5201 	struct bpf_reg_state *regs = cur_regs(env);
5202 	struct bpf_reg_state *reg = regs + regno;
5203 	struct bpf_func_state *state = func(env, reg);
5204 	int min_off, max_off;
5205 	int err;
5206 	char *err_extra;
5207 
5208 	if (src == ACCESS_HELPER)
5209 		/* We don't know if helpers are reading or writing (or both). */
5210 		err_extra = " indirect access to";
5211 	else if (type == BPF_READ)
5212 		err_extra = " read from";
5213 	else
5214 		err_extra = " write to";
5215 
5216 	if (tnum_is_const(reg->var_off)) {
5217 		min_off = reg->var_off.value + off;
5218 		if (access_size > 0)
5219 			max_off = min_off + access_size - 1;
5220 		else
5221 			max_off = min_off;
5222 	} else {
5223 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
5224 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
5225 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
5226 				err_extra, regno);
5227 			return -EACCES;
5228 		}
5229 		min_off = reg->smin_value + off;
5230 		if (access_size > 0)
5231 			max_off = reg->smax_value + off + access_size - 1;
5232 		else
5233 			max_off = min_off;
5234 	}
5235 
5236 	err = check_stack_slot_within_bounds(min_off, state, type);
5237 	if (!err)
5238 		err = check_stack_slot_within_bounds(max_off, state, type);
5239 
5240 	if (err) {
5241 		if (tnum_is_const(reg->var_off)) {
5242 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
5243 				err_extra, regno, off, access_size);
5244 		} else {
5245 			char tn_buf[48];
5246 
5247 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5248 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
5249 				err_extra, regno, tn_buf, access_size);
5250 		}
5251 	}
5252 	return err;
5253 }
5254 
5255 /* check whether memory at (regno + off) is accessible for t = (read | write)
5256  * if t==write, value_regno is a register which value is stored into memory
5257  * if t==read, value_regno is a register which will receive the value from memory
5258  * if t==write && value_regno==-1, some unknown value is stored into memory
5259  * if t==read && value_regno==-1, don't care what we read from memory
5260  */
5261 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
5262 			    int off, int bpf_size, enum bpf_access_type t,
5263 			    int value_regno, bool strict_alignment_once)
5264 {
5265 	struct bpf_reg_state *regs = cur_regs(env);
5266 	struct bpf_reg_state *reg = regs + regno;
5267 	struct bpf_func_state *state;
5268 	int size, err = 0;
5269 
5270 	size = bpf_size_to_bytes(bpf_size);
5271 	if (size < 0)
5272 		return size;
5273 
5274 	/* alignment checks will add in reg->off themselves */
5275 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
5276 	if (err)
5277 		return err;
5278 
5279 	/* for access checks, reg->off is just part of off */
5280 	off += reg->off;
5281 
5282 	if (reg->type == PTR_TO_MAP_KEY) {
5283 		if (t == BPF_WRITE) {
5284 			verbose(env, "write to change key R%d not allowed\n", regno);
5285 			return -EACCES;
5286 		}
5287 
5288 		err = check_mem_region_access(env, regno, off, size,
5289 					      reg->map_ptr->key_size, false);
5290 		if (err)
5291 			return err;
5292 		if (value_regno >= 0)
5293 			mark_reg_unknown(env, regs, value_regno);
5294 	} else if (reg->type == PTR_TO_MAP_VALUE) {
5295 		struct btf_field *kptr_field = NULL;
5296 
5297 		if (t == BPF_WRITE && value_regno >= 0 &&
5298 		    is_pointer_value(env, value_regno)) {
5299 			verbose(env, "R%d leaks addr into map\n", value_regno);
5300 			return -EACCES;
5301 		}
5302 		err = check_map_access_type(env, regno, off, size, t);
5303 		if (err)
5304 			return err;
5305 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
5306 		if (err)
5307 			return err;
5308 		if (tnum_is_const(reg->var_off))
5309 			kptr_field = btf_record_find(reg->map_ptr->record,
5310 						     off + reg->var_off.value, BPF_KPTR);
5311 		if (kptr_field) {
5312 			err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
5313 		} else if (t == BPF_READ && value_regno >= 0) {
5314 			struct bpf_map *map = reg->map_ptr;
5315 
5316 			/* if map is read-only, track its contents as scalars */
5317 			if (tnum_is_const(reg->var_off) &&
5318 			    bpf_map_is_rdonly(map) &&
5319 			    map->ops->map_direct_value_addr) {
5320 				int map_off = off + reg->var_off.value;
5321 				u64 val = 0;
5322 
5323 				err = bpf_map_direct_read(map, map_off, size,
5324 							  &val);
5325 				if (err)
5326 					return err;
5327 
5328 				regs[value_regno].type = SCALAR_VALUE;
5329 				__mark_reg_known(&regs[value_regno], val);
5330 			} else {
5331 				mark_reg_unknown(env, regs, value_regno);
5332 			}
5333 		}
5334 	} else if (base_type(reg->type) == PTR_TO_MEM) {
5335 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5336 
5337 		if (type_may_be_null(reg->type)) {
5338 			verbose(env, "R%d invalid mem access '%s'\n", regno,
5339 				reg_type_str(env, reg->type));
5340 			return -EACCES;
5341 		}
5342 
5343 		if (t == BPF_WRITE && rdonly_mem) {
5344 			verbose(env, "R%d cannot write into %s\n",
5345 				regno, reg_type_str(env, reg->type));
5346 			return -EACCES;
5347 		}
5348 
5349 		if (t == BPF_WRITE && value_regno >= 0 &&
5350 		    is_pointer_value(env, value_regno)) {
5351 			verbose(env, "R%d leaks addr into mem\n", value_regno);
5352 			return -EACCES;
5353 		}
5354 
5355 		err = check_mem_region_access(env, regno, off, size,
5356 					      reg->mem_size, false);
5357 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
5358 			mark_reg_unknown(env, regs, value_regno);
5359 	} else if (reg->type == PTR_TO_CTX) {
5360 		enum bpf_reg_type reg_type = SCALAR_VALUE;
5361 		struct btf *btf = NULL;
5362 		u32 btf_id = 0;
5363 
5364 		if (t == BPF_WRITE && value_regno >= 0 &&
5365 		    is_pointer_value(env, value_regno)) {
5366 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
5367 			return -EACCES;
5368 		}
5369 
5370 		err = check_ptr_off_reg(env, reg, regno);
5371 		if (err < 0)
5372 			return err;
5373 
5374 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
5375 				       &btf_id);
5376 		if (err)
5377 			verbose_linfo(env, insn_idx, "; ");
5378 		if (!err && t == BPF_READ && value_regno >= 0) {
5379 			/* ctx access returns either a scalar, or a
5380 			 * PTR_TO_PACKET[_META,_END]. In the latter
5381 			 * case, we know the offset is zero.
5382 			 */
5383 			if (reg_type == SCALAR_VALUE) {
5384 				mark_reg_unknown(env, regs, value_regno);
5385 			} else {
5386 				mark_reg_known_zero(env, regs,
5387 						    value_regno);
5388 				if (type_may_be_null(reg_type))
5389 					regs[value_regno].id = ++env->id_gen;
5390 				/* A load of ctx field could have different
5391 				 * actual load size with the one encoded in the
5392 				 * insn. When the dst is PTR, it is for sure not
5393 				 * a sub-register.
5394 				 */
5395 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
5396 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
5397 					regs[value_regno].btf = btf;
5398 					regs[value_regno].btf_id = btf_id;
5399 				}
5400 			}
5401 			regs[value_regno].type = reg_type;
5402 		}
5403 
5404 	} else if (reg->type == PTR_TO_STACK) {
5405 		/* Basic bounds checks. */
5406 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
5407 		if (err)
5408 			return err;
5409 
5410 		state = func(env, reg);
5411 		err = update_stack_depth(env, state, off);
5412 		if (err)
5413 			return err;
5414 
5415 		if (t == BPF_READ)
5416 			err = check_stack_read(env, regno, off, size,
5417 					       value_regno);
5418 		else
5419 			err = check_stack_write(env, regno, off, size,
5420 						value_regno, insn_idx);
5421 	} else if (reg_is_pkt_pointer(reg)) {
5422 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
5423 			verbose(env, "cannot write into packet\n");
5424 			return -EACCES;
5425 		}
5426 		if (t == BPF_WRITE && value_regno >= 0 &&
5427 		    is_pointer_value(env, value_regno)) {
5428 			verbose(env, "R%d leaks addr into packet\n",
5429 				value_regno);
5430 			return -EACCES;
5431 		}
5432 		err = check_packet_access(env, regno, off, size, false);
5433 		if (!err && t == BPF_READ && value_regno >= 0)
5434 			mark_reg_unknown(env, regs, value_regno);
5435 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
5436 		if (t == BPF_WRITE && value_regno >= 0 &&
5437 		    is_pointer_value(env, value_regno)) {
5438 			verbose(env, "R%d leaks addr into flow keys\n",
5439 				value_regno);
5440 			return -EACCES;
5441 		}
5442 
5443 		err = check_flow_keys_access(env, off, size);
5444 		if (!err && t == BPF_READ && value_regno >= 0)
5445 			mark_reg_unknown(env, regs, value_regno);
5446 	} else if (type_is_sk_pointer(reg->type)) {
5447 		if (t == BPF_WRITE) {
5448 			verbose(env, "R%d cannot write into %s\n",
5449 				regno, reg_type_str(env, reg->type));
5450 			return -EACCES;
5451 		}
5452 		err = check_sock_access(env, insn_idx, regno, off, size, t);
5453 		if (!err && value_regno >= 0)
5454 			mark_reg_unknown(env, regs, value_regno);
5455 	} else if (reg->type == PTR_TO_TP_BUFFER) {
5456 		err = check_tp_buffer_access(env, reg, regno, off, size);
5457 		if (!err && t == BPF_READ && value_regno >= 0)
5458 			mark_reg_unknown(env, regs, value_regno);
5459 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
5460 		   !type_may_be_null(reg->type)) {
5461 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
5462 					      value_regno);
5463 	} else if (reg->type == CONST_PTR_TO_MAP) {
5464 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
5465 					      value_regno);
5466 	} else if (base_type(reg->type) == PTR_TO_BUF) {
5467 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
5468 		u32 *max_access;
5469 
5470 		if (rdonly_mem) {
5471 			if (t == BPF_WRITE) {
5472 				verbose(env, "R%d cannot write into %s\n",
5473 					regno, reg_type_str(env, reg->type));
5474 				return -EACCES;
5475 			}
5476 			max_access = &env->prog->aux->max_rdonly_access;
5477 		} else {
5478 			max_access = &env->prog->aux->max_rdwr_access;
5479 		}
5480 
5481 		err = check_buffer_access(env, reg, regno, off, size, false,
5482 					  max_access);
5483 
5484 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
5485 			mark_reg_unknown(env, regs, value_regno);
5486 	} else {
5487 		verbose(env, "R%d invalid mem access '%s'\n", regno,
5488 			reg_type_str(env, reg->type));
5489 		return -EACCES;
5490 	}
5491 
5492 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
5493 	    regs[value_regno].type == SCALAR_VALUE) {
5494 		/* b/h/w load zero-extends, mark upper bits as known 0 */
5495 		coerce_reg_to_size(&regs[value_regno], size);
5496 	}
5497 	return err;
5498 }
5499 
5500 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
5501 {
5502 	int load_reg;
5503 	int err;
5504 
5505 	switch (insn->imm) {
5506 	case BPF_ADD:
5507 	case BPF_ADD | BPF_FETCH:
5508 	case BPF_AND:
5509 	case BPF_AND | BPF_FETCH:
5510 	case BPF_OR:
5511 	case BPF_OR | BPF_FETCH:
5512 	case BPF_XOR:
5513 	case BPF_XOR | BPF_FETCH:
5514 	case BPF_XCHG:
5515 	case BPF_CMPXCHG:
5516 		break;
5517 	default:
5518 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
5519 		return -EINVAL;
5520 	}
5521 
5522 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
5523 		verbose(env, "invalid atomic operand size\n");
5524 		return -EINVAL;
5525 	}
5526 
5527 	/* check src1 operand */
5528 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
5529 	if (err)
5530 		return err;
5531 
5532 	/* check src2 operand */
5533 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5534 	if (err)
5535 		return err;
5536 
5537 	if (insn->imm == BPF_CMPXCHG) {
5538 		/* Check comparison of R0 with memory location */
5539 		const u32 aux_reg = BPF_REG_0;
5540 
5541 		err = check_reg_arg(env, aux_reg, SRC_OP);
5542 		if (err)
5543 			return err;
5544 
5545 		if (is_pointer_value(env, aux_reg)) {
5546 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
5547 			return -EACCES;
5548 		}
5549 	}
5550 
5551 	if (is_pointer_value(env, insn->src_reg)) {
5552 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
5553 		return -EACCES;
5554 	}
5555 
5556 	if (is_ctx_reg(env, insn->dst_reg) ||
5557 	    is_pkt_reg(env, insn->dst_reg) ||
5558 	    is_flow_key_reg(env, insn->dst_reg) ||
5559 	    is_sk_reg(env, insn->dst_reg)) {
5560 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
5561 			insn->dst_reg,
5562 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
5563 		return -EACCES;
5564 	}
5565 
5566 	if (insn->imm & BPF_FETCH) {
5567 		if (insn->imm == BPF_CMPXCHG)
5568 			load_reg = BPF_REG_0;
5569 		else
5570 			load_reg = insn->src_reg;
5571 
5572 		/* check and record load of old value */
5573 		err = check_reg_arg(env, load_reg, DST_OP);
5574 		if (err)
5575 			return err;
5576 	} else {
5577 		/* This instruction accesses a memory location but doesn't
5578 		 * actually load it into a register.
5579 		 */
5580 		load_reg = -1;
5581 	}
5582 
5583 	/* Check whether we can read the memory, with second call for fetch
5584 	 * case to simulate the register fill.
5585 	 */
5586 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5587 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
5588 	if (!err && load_reg >= 0)
5589 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5590 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
5591 				       true);
5592 	if (err)
5593 		return err;
5594 
5595 	/* Check whether we can write into the same memory. */
5596 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5597 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5598 	if (err)
5599 		return err;
5600 
5601 	return 0;
5602 }
5603 
5604 /* When register 'regno' is used to read the stack (either directly or through
5605  * a helper function) make sure that it's within stack boundary and, depending
5606  * on the access type, that all elements of the stack are initialized.
5607  *
5608  * 'off' includes 'regno->off', but not its dynamic part (if any).
5609  *
5610  * All registers that have been spilled on the stack in the slots within the
5611  * read offsets are marked as read.
5612  */
5613 static int check_stack_range_initialized(
5614 		struct bpf_verifier_env *env, int regno, int off,
5615 		int access_size, bool zero_size_allowed,
5616 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
5617 {
5618 	struct bpf_reg_state *reg = reg_state(env, regno);
5619 	struct bpf_func_state *state = func(env, reg);
5620 	int err, min_off, max_off, i, j, slot, spi;
5621 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5622 	enum bpf_access_type bounds_check_type;
5623 	/* Some accesses can write anything into the stack, others are
5624 	 * read-only.
5625 	 */
5626 	bool clobber = false;
5627 
5628 	if (access_size == 0 && !zero_size_allowed) {
5629 		verbose(env, "invalid zero-sized read\n");
5630 		return -EACCES;
5631 	}
5632 
5633 	if (type == ACCESS_HELPER) {
5634 		/* The bounds checks for writes are more permissive than for
5635 		 * reads. However, if raw_mode is not set, we'll do extra
5636 		 * checks below.
5637 		 */
5638 		bounds_check_type = BPF_WRITE;
5639 		clobber = true;
5640 	} else {
5641 		bounds_check_type = BPF_READ;
5642 	}
5643 	err = check_stack_access_within_bounds(env, regno, off, access_size,
5644 					       type, bounds_check_type);
5645 	if (err)
5646 		return err;
5647 
5648 
5649 	if (tnum_is_const(reg->var_off)) {
5650 		min_off = max_off = reg->var_off.value + off;
5651 	} else {
5652 		/* Variable offset is prohibited for unprivileged mode for
5653 		 * simplicity since it requires corresponding support in
5654 		 * Spectre masking for stack ALU.
5655 		 * See also retrieve_ptr_limit().
5656 		 */
5657 		if (!env->bypass_spec_v1) {
5658 			char tn_buf[48];
5659 
5660 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5661 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5662 				regno, err_extra, tn_buf);
5663 			return -EACCES;
5664 		}
5665 		/* Only initialized buffer on stack is allowed to be accessed
5666 		 * with variable offset. With uninitialized buffer it's hard to
5667 		 * guarantee that whole memory is marked as initialized on
5668 		 * helper return since specific bounds are unknown what may
5669 		 * cause uninitialized stack leaking.
5670 		 */
5671 		if (meta && meta->raw_mode)
5672 			meta = NULL;
5673 
5674 		min_off = reg->smin_value + off;
5675 		max_off = reg->smax_value + off;
5676 	}
5677 
5678 	if (meta && meta->raw_mode) {
5679 		/* Ensure we won't be overwriting dynptrs when simulating byte
5680 		 * by byte access in check_helper_call using meta.access_size.
5681 		 * This would be a problem if we have a helper in the future
5682 		 * which takes:
5683 		 *
5684 		 *	helper(uninit_mem, len, dynptr)
5685 		 *
5686 		 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
5687 		 * may end up writing to dynptr itself when touching memory from
5688 		 * arg 1. This can be relaxed on a case by case basis for known
5689 		 * safe cases, but reject due to the possibilitiy of aliasing by
5690 		 * default.
5691 		 */
5692 		for (i = min_off; i < max_off + access_size; i++) {
5693 			int stack_off = -i - 1;
5694 
5695 			spi = __get_spi(i);
5696 			/* raw_mode may write past allocated_stack */
5697 			if (state->allocated_stack <= stack_off)
5698 				continue;
5699 			if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
5700 				verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
5701 				return -EACCES;
5702 			}
5703 		}
5704 		meta->access_size = access_size;
5705 		meta->regno = regno;
5706 		return 0;
5707 	}
5708 
5709 	for (i = min_off; i < max_off + access_size; i++) {
5710 		u8 *stype;
5711 
5712 		slot = -i - 1;
5713 		spi = slot / BPF_REG_SIZE;
5714 		if (state->allocated_stack <= slot)
5715 			goto err;
5716 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5717 		if (*stype == STACK_MISC)
5718 			goto mark;
5719 		if (*stype == STACK_ZERO) {
5720 			if (clobber) {
5721 				/* helper can write anything into the stack */
5722 				*stype = STACK_MISC;
5723 			}
5724 			goto mark;
5725 		}
5726 
5727 		if (is_spilled_reg(&state->stack[spi]) &&
5728 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5729 		     env->allow_ptr_leaks)) {
5730 			if (clobber) {
5731 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5732 				for (j = 0; j < BPF_REG_SIZE; j++)
5733 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
5734 			}
5735 			goto mark;
5736 		}
5737 
5738 err:
5739 		if (tnum_is_const(reg->var_off)) {
5740 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5741 				err_extra, regno, min_off, i - min_off, access_size);
5742 		} else {
5743 			char tn_buf[48];
5744 
5745 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5746 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5747 				err_extra, regno, tn_buf, i - min_off, access_size);
5748 		}
5749 		return -EACCES;
5750 mark:
5751 		/* reading any byte out of 8-byte 'spill_slot' will cause
5752 		 * the whole slot to be marked as 'read'
5753 		 */
5754 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
5755 			      state->stack[spi].spilled_ptr.parent,
5756 			      REG_LIVE_READ64);
5757 		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
5758 		 * be sure that whether stack slot is written to or not. Hence,
5759 		 * we must still conservatively propagate reads upwards even if
5760 		 * helper may write to the entire memory range.
5761 		 */
5762 	}
5763 	return update_stack_depth(env, state, min_off);
5764 }
5765 
5766 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5767 				   int access_size, bool zero_size_allowed,
5768 				   struct bpf_call_arg_meta *meta)
5769 {
5770 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5771 	u32 *max_access;
5772 
5773 	switch (base_type(reg->type)) {
5774 	case PTR_TO_PACKET:
5775 	case PTR_TO_PACKET_META:
5776 		return check_packet_access(env, regno, reg->off, access_size,
5777 					   zero_size_allowed);
5778 	case PTR_TO_MAP_KEY:
5779 		if (meta && meta->raw_mode) {
5780 			verbose(env, "R%d cannot write into %s\n", regno,
5781 				reg_type_str(env, reg->type));
5782 			return -EACCES;
5783 		}
5784 		return check_mem_region_access(env, regno, reg->off, access_size,
5785 					       reg->map_ptr->key_size, false);
5786 	case PTR_TO_MAP_VALUE:
5787 		if (check_map_access_type(env, regno, reg->off, access_size,
5788 					  meta && meta->raw_mode ? BPF_WRITE :
5789 					  BPF_READ))
5790 			return -EACCES;
5791 		return check_map_access(env, regno, reg->off, access_size,
5792 					zero_size_allowed, ACCESS_HELPER);
5793 	case PTR_TO_MEM:
5794 		if (type_is_rdonly_mem(reg->type)) {
5795 			if (meta && meta->raw_mode) {
5796 				verbose(env, "R%d cannot write into %s\n", regno,
5797 					reg_type_str(env, reg->type));
5798 				return -EACCES;
5799 			}
5800 		}
5801 		return check_mem_region_access(env, regno, reg->off,
5802 					       access_size, reg->mem_size,
5803 					       zero_size_allowed);
5804 	case PTR_TO_BUF:
5805 		if (type_is_rdonly_mem(reg->type)) {
5806 			if (meta && meta->raw_mode) {
5807 				verbose(env, "R%d cannot write into %s\n", regno,
5808 					reg_type_str(env, reg->type));
5809 				return -EACCES;
5810 			}
5811 
5812 			max_access = &env->prog->aux->max_rdonly_access;
5813 		} else {
5814 			max_access = &env->prog->aux->max_rdwr_access;
5815 		}
5816 		return check_buffer_access(env, reg, regno, reg->off,
5817 					   access_size, zero_size_allowed,
5818 					   max_access);
5819 	case PTR_TO_STACK:
5820 		return check_stack_range_initialized(
5821 				env,
5822 				regno, reg->off, access_size,
5823 				zero_size_allowed, ACCESS_HELPER, meta);
5824 	case PTR_TO_CTX:
5825 		/* in case the function doesn't know how to access the context,
5826 		 * (because we are in a program of type SYSCALL for example), we
5827 		 * can not statically check its size.
5828 		 * Dynamically check it now.
5829 		 */
5830 		if (!env->ops->convert_ctx_access) {
5831 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
5832 			int offset = access_size - 1;
5833 
5834 			/* Allow zero-byte read from PTR_TO_CTX */
5835 			if (access_size == 0)
5836 				return zero_size_allowed ? 0 : -EACCES;
5837 
5838 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
5839 						atype, -1, false);
5840 		}
5841 
5842 		fallthrough;
5843 	default: /* scalar_value or invalid ptr */
5844 		/* Allow zero-byte read from NULL, regardless of pointer type */
5845 		if (zero_size_allowed && access_size == 0 &&
5846 		    register_is_null(reg))
5847 			return 0;
5848 
5849 		verbose(env, "R%d type=%s ", regno,
5850 			reg_type_str(env, reg->type));
5851 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
5852 		return -EACCES;
5853 	}
5854 }
5855 
5856 static int check_mem_size_reg(struct bpf_verifier_env *env,
5857 			      struct bpf_reg_state *reg, u32 regno,
5858 			      bool zero_size_allowed,
5859 			      struct bpf_call_arg_meta *meta)
5860 {
5861 	int err;
5862 
5863 	/* This is used to refine r0 return value bounds for helpers
5864 	 * that enforce this value as an upper bound on return values.
5865 	 * See do_refine_retval_range() for helpers that can refine
5866 	 * the return value. C type of helper is u32 so we pull register
5867 	 * bound from umax_value however, if negative verifier errors
5868 	 * out. Only upper bounds can be learned because retval is an
5869 	 * int type and negative retvals are allowed.
5870 	 */
5871 	meta->msize_max_value = reg->umax_value;
5872 
5873 	/* The register is SCALAR_VALUE; the access check
5874 	 * happens using its boundaries.
5875 	 */
5876 	if (!tnum_is_const(reg->var_off))
5877 		/* For unprivileged variable accesses, disable raw
5878 		 * mode so that the program is required to
5879 		 * initialize all the memory that the helper could
5880 		 * just partially fill up.
5881 		 */
5882 		meta = NULL;
5883 
5884 	if (reg->smin_value < 0) {
5885 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5886 			regno);
5887 		return -EACCES;
5888 	}
5889 
5890 	if (reg->umin_value == 0) {
5891 		err = check_helper_mem_access(env, regno - 1, 0,
5892 					      zero_size_allowed,
5893 					      meta);
5894 		if (err)
5895 			return err;
5896 	}
5897 
5898 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5899 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5900 			regno);
5901 		return -EACCES;
5902 	}
5903 	err = check_helper_mem_access(env, regno - 1,
5904 				      reg->umax_value,
5905 				      zero_size_allowed, meta);
5906 	if (!err)
5907 		err = mark_chain_precision(env, regno);
5908 	return err;
5909 }
5910 
5911 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5912 		   u32 regno, u32 mem_size)
5913 {
5914 	bool may_be_null = type_may_be_null(reg->type);
5915 	struct bpf_reg_state saved_reg;
5916 	struct bpf_call_arg_meta meta;
5917 	int err;
5918 
5919 	if (register_is_null(reg))
5920 		return 0;
5921 
5922 	memset(&meta, 0, sizeof(meta));
5923 	/* Assuming that the register contains a value check if the memory
5924 	 * access is safe. Temporarily save and restore the register's state as
5925 	 * the conversion shouldn't be visible to a caller.
5926 	 */
5927 	if (may_be_null) {
5928 		saved_reg = *reg;
5929 		mark_ptr_not_null_reg(reg);
5930 	}
5931 
5932 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5933 	/* Check access for BPF_WRITE */
5934 	meta.raw_mode = true;
5935 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5936 
5937 	if (may_be_null)
5938 		*reg = saved_reg;
5939 
5940 	return err;
5941 }
5942 
5943 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5944 				    u32 regno)
5945 {
5946 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5947 	bool may_be_null = type_may_be_null(mem_reg->type);
5948 	struct bpf_reg_state saved_reg;
5949 	struct bpf_call_arg_meta meta;
5950 	int err;
5951 
5952 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5953 
5954 	memset(&meta, 0, sizeof(meta));
5955 
5956 	if (may_be_null) {
5957 		saved_reg = *mem_reg;
5958 		mark_ptr_not_null_reg(mem_reg);
5959 	}
5960 
5961 	err = check_mem_size_reg(env, reg, regno, true, &meta);
5962 	/* Check access for BPF_WRITE */
5963 	meta.raw_mode = true;
5964 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
5965 
5966 	if (may_be_null)
5967 		*mem_reg = saved_reg;
5968 	return err;
5969 }
5970 
5971 /* Implementation details:
5972  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
5973  * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
5974  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5975  * Two separate bpf_obj_new will also have different reg->id.
5976  * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
5977  * clears reg->id after value_or_null->value transition, since the verifier only
5978  * cares about the range of access to valid map value pointer and doesn't care
5979  * about actual address of the map element.
5980  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5981  * reg->id > 0 after value_or_null->value transition. By doing so
5982  * two bpf_map_lookups will be considered two different pointers that
5983  * point to different bpf_spin_locks. Likewise for pointers to allocated objects
5984  * returned from bpf_obj_new.
5985  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5986  * dead-locks.
5987  * Since only one bpf_spin_lock is allowed the checks are simpler than
5988  * reg_is_refcounted() logic. The verifier needs to remember only
5989  * one spin_lock instead of array of acquired_refs.
5990  * cur_state->active_lock remembers which map value element or allocated
5991  * object got locked and clears it after bpf_spin_unlock.
5992  */
5993 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5994 			     bool is_lock)
5995 {
5996 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5997 	struct bpf_verifier_state *cur = env->cur_state;
5998 	bool is_const = tnum_is_const(reg->var_off);
5999 	u64 val = reg->var_off.value;
6000 	struct bpf_map *map = NULL;
6001 	struct btf *btf = NULL;
6002 	struct btf_record *rec;
6003 
6004 	if (!is_const) {
6005 		verbose(env,
6006 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
6007 			regno);
6008 		return -EINVAL;
6009 	}
6010 	if (reg->type == PTR_TO_MAP_VALUE) {
6011 		map = reg->map_ptr;
6012 		if (!map->btf) {
6013 			verbose(env,
6014 				"map '%s' has to have BTF in order to use bpf_spin_lock\n",
6015 				map->name);
6016 			return -EINVAL;
6017 		}
6018 	} else {
6019 		btf = reg->btf;
6020 	}
6021 
6022 	rec = reg_btf_record(reg);
6023 	if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
6024 		verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
6025 			map ? map->name : "kptr");
6026 		return -EINVAL;
6027 	}
6028 	if (rec->spin_lock_off != val + reg->off) {
6029 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
6030 			val + reg->off, rec->spin_lock_off);
6031 		return -EINVAL;
6032 	}
6033 	if (is_lock) {
6034 		if (cur->active_lock.ptr) {
6035 			verbose(env,
6036 				"Locking two bpf_spin_locks are not allowed\n");
6037 			return -EINVAL;
6038 		}
6039 		if (map)
6040 			cur->active_lock.ptr = map;
6041 		else
6042 			cur->active_lock.ptr = btf;
6043 		cur->active_lock.id = reg->id;
6044 	} else {
6045 		struct bpf_func_state *fstate = cur_func(env);
6046 		void *ptr;
6047 		int i;
6048 
6049 		if (map)
6050 			ptr = map;
6051 		else
6052 			ptr = btf;
6053 
6054 		if (!cur->active_lock.ptr) {
6055 			verbose(env, "bpf_spin_unlock without taking a lock\n");
6056 			return -EINVAL;
6057 		}
6058 		if (cur->active_lock.ptr != ptr ||
6059 		    cur->active_lock.id != reg->id) {
6060 			verbose(env, "bpf_spin_unlock of different lock\n");
6061 			return -EINVAL;
6062 		}
6063 		cur->active_lock.ptr = NULL;
6064 		cur->active_lock.id = 0;
6065 
6066 		for (i = fstate->acquired_refs - 1; i >= 0; i--) {
6067 			int err;
6068 
6069 			/* Complain on error because this reference state cannot
6070 			 * be freed before this point, as bpf_spin_lock critical
6071 			 * section does not allow functions that release the
6072 			 * allocated object immediately.
6073 			 */
6074 			if (!fstate->refs[i].release_on_unlock)
6075 				continue;
6076 			err = release_reference(env, fstate->refs[i].id);
6077 			if (err) {
6078 				verbose(env, "failed to release release_on_unlock reference");
6079 				return err;
6080 			}
6081 		}
6082 	}
6083 	return 0;
6084 }
6085 
6086 static int process_timer_func(struct bpf_verifier_env *env, int regno,
6087 			      struct bpf_call_arg_meta *meta)
6088 {
6089 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6090 	bool is_const = tnum_is_const(reg->var_off);
6091 	struct bpf_map *map = reg->map_ptr;
6092 	u64 val = reg->var_off.value;
6093 
6094 	if (!is_const) {
6095 		verbose(env,
6096 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
6097 			regno);
6098 		return -EINVAL;
6099 	}
6100 	if (!map->btf) {
6101 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
6102 			map->name);
6103 		return -EINVAL;
6104 	}
6105 	if (!btf_record_has_field(map->record, BPF_TIMER)) {
6106 		verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
6107 		return -EINVAL;
6108 	}
6109 	if (map->record->timer_off != val + reg->off) {
6110 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
6111 			val + reg->off, map->record->timer_off);
6112 		return -EINVAL;
6113 	}
6114 	if (meta->map_ptr) {
6115 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
6116 		return -EFAULT;
6117 	}
6118 	meta->map_uid = reg->map_uid;
6119 	meta->map_ptr = map;
6120 	return 0;
6121 }
6122 
6123 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
6124 			     struct bpf_call_arg_meta *meta)
6125 {
6126 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6127 	struct bpf_map *map_ptr = reg->map_ptr;
6128 	struct btf_field *kptr_field;
6129 	u32 kptr_off;
6130 
6131 	if (!tnum_is_const(reg->var_off)) {
6132 		verbose(env,
6133 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
6134 			regno);
6135 		return -EINVAL;
6136 	}
6137 	if (!map_ptr->btf) {
6138 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
6139 			map_ptr->name);
6140 		return -EINVAL;
6141 	}
6142 	if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
6143 		verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
6144 		return -EINVAL;
6145 	}
6146 
6147 	meta->map_ptr = map_ptr;
6148 	kptr_off = reg->off + reg->var_off.value;
6149 	kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
6150 	if (!kptr_field) {
6151 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
6152 		return -EACCES;
6153 	}
6154 	if (kptr_field->type != BPF_KPTR_REF) {
6155 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
6156 		return -EACCES;
6157 	}
6158 	meta->kptr_field = kptr_field;
6159 	return 0;
6160 }
6161 
6162 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
6163  * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
6164  *
6165  * In both cases we deal with the first 8 bytes, but need to mark the next 8
6166  * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
6167  * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
6168  *
6169  * Mutability of bpf_dynptr is at two levels, one is at the level of struct
6170  * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
6171  * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
6172  * mutate the view of the dynptr and also possibly destroy it. In the latter
6173  * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
6174  * memory that dynptr points to.
6175  *
6176  * The verifier will keep track both levels of mutation (bpf_dynptr's in
6177  * reg->type and the memory's in reg->dynptr.type), but there is no support for
6178  * readonly dynptr view yet, hence only the first case is tracked and checked.
6179  *
6180  * This is consistent with how C applies the const modifier to a struct object,
6181  * where the pointer itself inside bpf_dynptr becomes const but not what it
6182  * points to.
6183  *
6184  * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
6185  * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
6186  */
6187 int process_dynptr_func(struct bpf_verifier_env *env, int regno,
6188 			enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta)
6189 {
6190 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6191 	int spi = 0;
6192 
6193 	/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
6194 	 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
6195 	 */
6196 	if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
6197 		verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
6198 		return -EFAULT;
6199 	}
6200 	/* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
6201 	 * check_func_arg_reg_off's logic. We only need to check offset
6202 	 * and its alignment for PTR_TO_STACK.
6203 	 */
6204 	if (reg->type == PTR_TO_STACK) {
6205 		spi = dynptr_get_spi(env, reg);
6206 		if (spi < 0 && spi != -ERANGE)
6207 			return spi;
6208 	}
6209 
6210 	/*  MEM_UNINIT - Points to memory that is an appropriate candidate for
6211 	 *		 constructing a mutable bpf_dynptr object.
6212 	 *
6213 	 *		 Currently, this is only possible with PTR_TO_STACK
6214 	 *		 pointing to a region of at least 16 bytes which doesn't
6215 	 *		 contain an existing bpf_dynptr.
6216 	 *
6217 	 *  MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
6218 	 *		 mutated or destroyed. However, the memory it points to
6219 	 *		 may be mutated.
6220 	 *
6221 	 *  None       - Points to a initialized dynptr that can be mutated and
6222 	 *		 destroyed, including mutation of the memory it points
6223 	 *		 to.
6224 	 */
6225 	if (arg_type & MEM_UNINIT) {
6226 		if (!is_dynptr_reg_valid_uninit(env, reg, spi)) {
6227 			verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6228 			return -EINVAL;
6229 		}
6230 
6231 		/* We only support one dynptr being uninitialized at the moment,
6232 		 * which is sufficient for the helper functions we have right now.
6233 		 */
6234 		if (meta->uninit_dynptr_regno) {
6235 			verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6236 			return -EFAULT;
6237 		}
6238 
6239 		meta->uninit_dynptr_regno = regno;
6240 	} else /* MEM_RDONLY and None case from above */ {
6241 		int err;
6242 
6243 		/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
6244 		if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
6245 			verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
6246 			return -EINVAL;
6247 		}
6248 
6249 		if (!is_dynptr_reg_valid_init(env, reg, spi)) {
6250 			verbose(env,
6251 				"Expected an initialized dynptr as arg #%d\n",
6252 				regno);
6253 			return -EINVAL;
6254 		}
6255 
6256 		/* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
6257 		if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
6258 			const char *err_extra = "";
6259 
6260 			switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6261 			case DYNPTR_TYPE_LOCAL:
6262 				err_extra = "local";
6263 				break;
6264 			case DYNPTR_TYPE_RINGBUF:
6265 				err_extra = "ringbuf";
6266 				break;
6267 			default:
6268 				err_extra = "<unknown>";
6269 				break;
6270 			}
6271 			verbose(env,
6272 				"Expected a dynptr of type %s as arg #%d\n",
6273 				err_extra, regno);
6274 			return -EINVAL;
6275 		}
6276 
6277 		err = mark_dynptr_read(env, reg);
6278 		if (err)
6279 			return err;
6280 	}
6281 	return 0;
6282 }
6283 
6284 static bool arg_type_is_mem_size(enum bpf_arg_type type)
6285 {
6286 	return type == ARG_CONST_SIZE ||
6287 	       type == ARG_CONST_SIZE_OR_ZERO;
6288 }
6289 
6290 static bool arg_type_is_release(enum bpf_arg_type type)
6291 {
6292 	return type & OBJ_RELEASE;
6293 }
6294 
6295 static bool arg_type_is_dynptr(enum bpf_arg_type type)
6296 {
6297 	return base_type(type) == ARG_PTR_TO_DYNPTR;
6298 }
6299 
6300 static int int_ptr_type_to_size(enum bpf_arg_type type)
6301 {
6302 	if (type == ARG_PTR_TO_INT)
6303 		return sizeof(u32);
6304 	else if (type == ARG_PTR_TO_LONG)
6305 		return sizeof(u64);
6306 
6307 	return -EINVAL;
6308 }
6309 
6310 static int resolve_map_arg_type(struct bpf_verifier_env *env,
6311 				 const struct bpf_call_arg_meta *meta,
6312 				 enum bpf_arg_type *arg_type)
6313 {
6314 	if (!meta->map_ptr) {
6315 		/* kernel subsystem misconfigured verifier */
6316 		verbose(env, "invalid map_ptr to access map->type\n");
6317 		return -EACCES;
6318 	}
6319 
6320 	switch (meta->map_ptr->map_type) {
6321 	case BPF_MAP_TYPE_SOCKMAP:
6322 	case BPF_MAP_TYPE_SOCKHASH:
6323 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6324 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
6325 		} else {
6326 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
6327 			return -EINVAL;
6328 		}
6329 		break;
6330 	case BPF_MAP_TYPE_BLOOM_FILTER:
6331 		if (meta->func_id == BPF_FUNC_map_peek_elem)
6332 			*arg_type = ARG_PTR_TO_MAP_VALUE;
6333 		break;
6334 	default:
6335 		break;
6336 	}
6337 	return 0;
6338 }
6339 
6340 struct bpf_reg_types {
6341 	const enum bpf_reg_type types[10];
6342 	u32 *btf_id;
6343 };
6344 
6345 static const struct bpf_reg_types sock_types = {
6346 	.types = {
6347 		PTR_TO_SOCK_COMMON,
6348 		PTR_TO_SOCKET,
6349 		PTR_TO_TCP_SOCK,
6350 		PTR_TO_XDP_SOCK,
6351 	},
6352 };
6353 
6354 #ifdef CONFIG_NET
6355 static const struct bpf_reg_types btf_id_sock_common_types = {
6356 	.types = {
6357 		PTR_TO_SOCK_COMMON,
6358 		PTR_TO_SOCKET,
6359 		PTR_TO_TCP_SOCK,
6360 		PTR_TO_XDP_SOCK,
6361 		PTR_TO_BTF_ID,
6362 		PTR_TO_BTF_ID | PTR_TRUSTED,
6363 	},
6364 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
6365 };
6366 #endif
6367 
6368 static const struct bpf_reg_types mem_types = {
6369 	.types = {
6370 		PTR_TO_STACK,
6371 		PTR_TO_PACKET,
6372 		PTR_TO_PACKET_META,
6373 		PTR_TO_MAP_KEY,
6374 		PTR_TO_MAP_VALUE,
6375 		PTR_TO_MEM,
6376 		PTR_TO_MEM | MEM_RINGBUF,
6377 		PTR_TO_BUF,
6378 	},
6379 };
6380 
6381 static const struct bpf_reg_types int_ptr_types = {
6382 	.types = {
6383 		PTR_TO_STACK,
6384 		PTR_TO_PACKET,
6385 		PTR_TO_PACKET_META,
6386 		PTR_TO_MAP_KEY,
6387 		PTR_TO_MAP_VALUE,
6388 	},
6389 };
6390 
6391 static const struct bpf_reg_types spin_lock_types = {
6392 	.types = {
6393 		PTR_TO_MAP_VALUE,
6394 		PTR_TO_BTF_ID | MEM_ALLOC,
6395 	}
6396 };
6397 
6398 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
6399 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
6400 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
6401 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
6402 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
6403 static const struct bpf_reg_types btf_ptr_types = {
6404 	.types = {
6405 		PTR_TO_BTF_ID,
6406 		PTR_TO_BTF_ID | PTR_TRUSTED,
6407 		PTR_TO_BTF_ID | MEM_RCU,
6408 	},
6409 };
6410 static const struct bpf_reg_types percpu_btf_ptr_types = {
6411 	.types = {
6412 		PTR_TO_BTF_ID | MEM_PERCPU,
6413 		PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
6414 	}
6415 };
6416 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
6417 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
6418 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
6419 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
6420 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
6421 static const struct bpf_reg_types dynptr_types = {
6422 	.types = {
6423 		PTR_TO_STACK,
6424 		CONST_PTR_TO_DYNPTR,
6425 	}
6426 };
6427 
6428 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
6429 	[ARG_PTR_TO_MAP_KEY]		= &mem_types,
6430 	[ARG_PTR_TO_MAP_VALUE]		= &mem_types,
6431 	[ARG_CONST_SIZE]		= &scalar_types,
6432 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
6433 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
6434 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
6435 	[ARG_PTR_TO_CTX]		= &context_types,
6436 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
6437 #ifdef CONFIG_NET
6438 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
6439 #endif
6440 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
6441 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
6442 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
6443 	[ARG_PTR_TO_MEM]		= &mem_types,
6444 	[ARG_PTR_TO_RINGBUF_MEM]	= &ringbuf_mem_types,
6445 	[ARG_PTR_TO_INT]		= &int_ptr_types,
6446 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
6447 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
6448 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
6449 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
6450 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
6451 	[ARG_PTR_TO_TIMER]		= &timer_types,
6452 	[ARG_PTR_TO_KPTR]		= &kptr_types,
6453 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
6454 };
6455 
6456 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
6457 			  enum bpf_arg_type arg_type,
6458 			  const u32 *arg_btf_id,
6459 			  struct bpf_call_arg_meta *meta)
6460 {
6461 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6462 	enum bpf_reg_type expected, type = reg->type;
6463 	const struct bpf_reg_types *compatible;
6464 	int i, j;
6465 
6466 	compatible = compatible_reg_types[base_type(arg_type)];
6467 	if (!compatible) {
6468 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
6469 		return -EFAULT;
6470 	}
6471 
6472 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
6473 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
6474 	 *
6475 	 * Same for MAYBE_NULL:
6476 	 *
6477 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
6478 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
6479 	 *
6480 	 * Therefore we fold these flags depending on the arg_type before comparison.
6481 	 */
6482 	if (arg_type & MEM_RDONLY)
6483 		type &= ~MEM_RDONLY;
6484 	if (arg_type & PTR_MAYBE_NULL)
6485 		type &= ~PTR_MAYBE_NULL;
6486 
6487 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
6488 		expected = compatible->types[i];
6489 		if (expected == NOT_INIT)
6490 			break;
6491 
6492 		if (type == expected)
6493 			goto found;
6494 	}
6495 
6496 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
6497 	for (j = 0; j + 1 < i; j++)
6498 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
6499 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
6500 	return -EACCES;
6501 
6502 found:
6503 	if (reg->type == PTR_TO_BTF_ID || reg->type & PTR_TRUSTED) {
6504 		/* For bpf_sk_release, it needs to match against first member
6505 		 * 'struct sock_common', hence make an exception for it. This
6506 		 * allows bpf_sk_release to work for multiple socket types.
6507 		 */
6508 		bool strict_type_match = arg_type_is_release(arg_type) &&
6509 					 meta->func_id != BPF_FUNC_sk_release;
6510 
6511 		if (!arg_btf_id) {
6512 			if (!compatible->btf_id) {
6513 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
6514 				return -EFAULT;
6515 			}
6516 			arg_btf_id = compatible->btf_id;
6517 		}
6518 
6519 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
6520 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
6521 				return -EACCES;
6522 		} else {
6523 			if (arg_btf_id == BPF_PTR_POISON) {
6524 				verbose(env, "verifier internal error:");
6525 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
6526 					regno);
6527 				return -EACCES;
6528 			}
6529 
6530 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
6531 						  btf_vmlinux, *arg_btf_id,
6532 						  strict_type_match)) {
6533 				verbose(env, "R%d is of type %s but %s is expected\n",
6534 					regno, kernel_type_name(reg->btf, reg->btf_id),
6535 					kernel_type_name(btf_vmlinux, *arg_btf_id));
6536 				return -EACCES;
6537 			}
6538 		}
6539 	} else if (type_is_alloc(reg->type)) {
6540 		if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock) {
6541 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
6542 			return -EFAULT;
6543 		}
6544 	}
6545 
6546 	return 0;
6547 }
6548 
6549 int check_func_arg_reg_off(struct bpf_verifier_env *env,
6550 			   const struct bpf_reg_state *reg, int regno,
6551 			   enum bpf_arg_type arg_type)
6552 {
6553 	u32 type = reg->type;
6554 
6555 	/* When referenced register is passed to release function, its fixed
6556 	 * offset must be 0.
6557 	 *
6558 	 * We will check arg_type_is_release reg has ref_obj_id when storing
6559 	 * meta->release_regno.
6560 	 */
6561 	if (arg_type_is_release(arg_type)) {
6562 		/* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
6563 		 * may not directly point to the object being released, but to
6564 		 * dynptr pointing to such object, which might be at some offset
6565 		 * on the stack. In that case, we simply to fallback to the
6566 		 * default handling.
6567 		 */
6568 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
6569 			return 0;
6570 		/* Doing check_ptr_off_reg check for the offset will catch this
6571 		 * because fixed_off_ok is false, but checking here allows us
6572 		 * to give the user a better error message.
6573 		 */
6574 		if (reg->off) {
6575 			verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
6576 				regno);
6577 			return -EINVAL;
6578 		}
6579 		return __check_ptr_off_reg(env, reg, regno, false);
6580 	}
6581 
6582 	switch (type) {
6583 	/* Pointer types where both fixed and variable offset is explicitly allowed: */
6584 	case PTR_TO_STACK:
6585 	case PTR_TO_PACKET:
6586 	case PTR_TO_PACKET_META:
6587 	case PTR_TO_MAP_KEY:
6588 	case PTR_TO_MAP_VALUE:
6589 	case PTR_TO_MEM:
6590 	case PTR_TO_MEM | MEM_RDONLY:
6591 	case PTR_TO_MEM | MEM_RINGBUF:
6592 	case PTR_TO_BUF:
6593 	case PTR_TO_BUF | MEM_RDONLY:
6594 	case SCALAR_VALUE:
6595 		return 0;
6596 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
6597 	 * fixed offset.
6598 	 */
6599 	case PTR_TO_BTF_ID:
6600 	case PTR_TO_BTF_ID | MEM_ALLOC:
6601 	case PTR_TO_BTF_ID | PTR_TRUSTED:
6602 	case PTR_TO_BTF_ID | MEM_RCU:
6603 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
6604 		/* When referenced PTR_TO_BTF_ID is passed to release function,
6605 		 * its fixed offset must be 0. In the other cases, fixed offset
6606 		 * can be non-zero. This was already checked above. So pass
6607 		 * fixed_off_ok as true to allow fixed offset for all other
6608 		 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
6609 		 * still need to do checks instead of returning.
6610 		 */
6611 		return __check_ptr_off_reg(env, reg, regno, true);
6612 	default:
6613 		return __check_ptr_off_reg(env, reg, regno, false);
6614 	}
6615 }
6616 
6617 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
6618 {
6619 	struct bpf_func_state *state = func(env, reg);
6620 	int spi;
6621 
6622 	if (reg->type == CONST_PTR_TO_DYNPTR)
6623 		return reg->id;
6624 	spi = dynptr_get_spi(env, reg);
6625 	if (spi < 0)
6626 		return spi;
6627 	return state->stack[spi].spilled_ptr.id;
6628 }
6629 
6630 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
6631 {
6632 	struct bpf_func_state *state = func(env, reg);
6633 	int spi;
6634 
6635 	if (reg->type == CONST_PTR_TO_DYNPTR)
6636 		return reg->ref_obj_id;
6637 	spi = dynptr_get_spi(env, reg);
6638 	if (spi < 0)
6639 		return spi;
6640 	return state->stack[spi].spilled_ptr.ref_obj_id;
6641 }
6642 
6643 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
6644 			  struct bpf_call_arg_meta *meta,
6645 			  const struct bpf_func_proto *fn)
6646 {
6647 	u32 regno = BPF_REG_1 + arg;
6648 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6649 	enum bpf_arg_type arg_type = fn->arg_type[arg];
6650 	enum bpf_reg_type type = reg->type;
6651 	u32 *arg_btf_id = NULL;
6652 	int err = 0;
6653 
6654 	if (arg_type == ARG_DONTCARE)
6655 		return 0;
6656 
6657 	err = check_reg_arg(env, regno, SRC_OP);
6658 	if (err)
6659 		return err;
6660 
6661 	if (arg_type == ARG_ANYTHING) {
6662 		if (is_pointer_value(env, regno)) {
6663 			verbose(env, "R%d leaks addr into helper function\n",
6664 				regno);
6665 			return -EACCES;
6666 		}
6667 		return 0;
6668 	}
6669 
6670 	if (type_is_pkt_pointer(type) &&
6671 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
6672 		verbose(env, "helper access to the packet is not allowed\n");
6673 		return -EACCES;
6674 	}
6675 
6676 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
6677 		err = resolve_map_arg_type(env, meta, &arg_type);
6678 		if (err)
6679 			return err;
6680 	}
6681 
6682 	if (register_is_null(reg) && type_may_be_null(arg_type))
6683 		/* A NULL register has a SCALAR_VALUE type, so skip
6684 		 * type checking.
6685 		 */
6686 		goto skip_type_check;
6687 
6688 	/* arg_btf_id and arg_size are in a union. */
6689 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
6690 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
6691 		arg_btf_id = fn->arg_btf_id[arg];
6692 
6693 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
6694 	if (err)
6695 		return err;
6696 
6697 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
6698 	if (err)
6699 		return err;
6700 
6701 skip_type_check:
6702 	if (arg_type_is_release(arg_type)) {
6703 		if (arg_type_is_dynptr(arg_type)) {
6704 			struct bpf_func_state *state = func(env, reg);
6705 			int spi;
6706 
6707 			/* Only dynptr created on stack can be released, thus
6708 			 * the get_spi and stack state checks for spilled_ptr
6709 			 * should only be done before process_dynptr_func for
6710 			 * PTR_TO_STACK.
6711 			 */
6712 			if (reg->type == PTR_TO_STACK) {
6713 				spi = dynptr_get_spi(env, reg);
6714 				if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
6715 					verbose(env, "arg %d is an unacquired reference\n", regno);
6716 					return -EINVAL;
6717 				}
6718 			} else {
6719 				verbose(env, "cannot release unowned const bpf_dynptr\n");
6720 				return -EINVAL;
6721 			}
6722 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
6723 			verbose(env, "R%d must be referenced when passed to release function\n",
6724 				regno);
6725 			return -EINVAL;
6726 		}
6727 		if (meta->release_regno) {
6728 			verbose(env, "verifier internal error: more than one release argument\n");
6729 			return -EFAULT;
6730 		}
6731 		meta->release_regno = regno;
6732 	}
6733 
6734 	if (reg->ref_obj_id) {
6735 		if (meta->ref_obj_id) {
6736 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
6737 				regno, reg->ref_obj_id,
6738 				meta->ref_obj_id);
6739 			return -EFAULT;
6740 		}
6741 		meta->ref_obj_id = reg->ref_obj_id;
6742 	}
6743 
6744 	switch (base_type(arg_type)) {
6745 	case ARG_CONST_MAP_PTR:
6746 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
6747 		if (meta->map_ptr) {
6748 			/* Use map_uid (which is unique id of inner map) to reject:
6749 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
6750 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
6751 			 * if (inner_map1 && inner_map2) {
6752 			 *     timer = bpf_map_lookup_elem(inner_map1);
6753 			 *     if (timer)
6754 			 *         // mismatch would have been allowed
6755 			 *         bpf_timer_init(timer, inner_map2);
6756 			 * }
6757 			 *
6758 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
6759 			 */
6760 			if (meta->map_ptr != reg->map_ptr ||
6761 			    meta->map_uid != reg->map_uid) {
6762 				verbose(env,
6763 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
6764 					meta->map_uid, reg->map_uid);
6765 				return -EINVAL;
6766 			}
6767 		}
6768 		meta->map_ptr = reg->map_ptr;
6769 		meta->map_uid = reg->map_uid;
6770 		break;
6771 	case ARG_PTR_TO_MAP_KEY:
6772 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
6773 		 * check that [key, key + map->key_size) are within
6774 		 * stack limits and initialized
6775 		 */
6776 		if (!meta->map_ptr) {
6777 			/* in function declaration map_ptr must come before
6778 			 * map_key, so that it's verified and known before
6779 			 * we have to check map_key here. Otherwise it means
6780 			 * that kernel subsystem misconfigured verifier
6781 			 */
6782 			verbose(env, "invalid map_ptr to access map->key\n");
6783 			return -EACCES;
6784 		}
6785 		err = check_helper_mem_access(env, regno,
6786 					      meta->map_ptr->key_size, false,
6787 					      NULL);
6788 		break;
6789 	case ARG_PTR_TO_MAP_VALUE:
6790 		if (type_may_be_null(arg_type) && register_is_null(reg))
6791 			return 0;
6792 
6793 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
6794 		 * check [value, value + map->value_size) validity
6795 		 */
6796 		if (!meta->map_ptr) {
6797 			/* kernel subsystem misconfigured verifier */
6798 			verbose(env, "invalid map_ptr to access map->value\n");
6799 			return -EACCES;
6800 		}
6801 		meta->raw_mode = arg_type & MEM_UNINIT;
6802 		err = check_helper_mem_access(env, regno,
6803 					      meta->map_ptr->value_size, false,
6804 					      meta);
6805 		break;
6806 	case ARG_PTR_TO_PERCPU_BTF_ID:
6807 		if (!reg->btf_id) {
6808 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
6809 			return -EACCES;
6810 		}
6811 		meta->ret_btf = reg->btf;
6812 		meta->ret_btf_id = reg->btf_id;
6813 		break;
6814 	case ARG_PTR_TO_SPIN_LOCK:
6815 		if (meta->func_id == BPF_FUNC_spin_lock) {
6816 			err = process_spin_lock(env, regno, true);
6817 			if (err)
6818 				return err;
6819 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
6820 			err = process_spin_lock(env, regno, false);
6821 			if (err)
6822 				return err;
6823 		} else {
6824 			verbose(env, "verifier internal error\n");
6825 			return -EFAULT;
6826 		}
6827 		break;
6828 	case ARG_PTR_TO_TIMER:
6829 		err = process_timer_func(env, regno, meta);
6830 		if (err)
6831 			return err;
6832 		break;
6833 	case ARG_PTR_TO_FUNC:
6834 		meta->subprogno = reg->subprogno;
6835 		break;
6836 	case ARG_PTR_TO_MEM:
6837 		/* The access to this pointer is only checked when we hit the
6838 		 * next is_mem_size argument below.
6839 		 */
6840 		meta->raw_mode = arg_type & MEM_UNINIT;
6841 		if (arg_type & MEM_FIXED_SIZE) {
6842 			err = check_helper_mem_access(env, regno,
6843 						      fn->arg_size[arg], false,
6844 						      meta);
6845 		}
6846 		break;
6847 	case ARG_CONST_SIZE:
6848 		err = check_mem_size_reg(env, reg, regno, false, meta);
6849 		break;
6850 	case ARG_CONST_SIZE_OR_ZERO:
6851 		err = check_mem_size_reg(env, reg, regno, true, meta);
6852 		break;
6853 	case ARG_PTR_TO_DYNPTR:
6854 		err = process_dynptr_func(env, regno, arg_type, meta);
6855 		if (err)
6856 			return err;
6857 		break;
6858 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
6859 		if (!tnum_is_const(reg->var_off)) {
6860 			verbose(env, "R%d is not a known constant'\n",
6861 				regno);
6862 			return -EACCES;
6863 		}
6864 		meta->mem_size = reg->var_off.value;
6865 		err = mark_chain_precision(env, regno);
6866 		if (err)
6867 			return err;
6868 		break;
6869 	case ARG_PTR_TO_INT:
6870 	case ARG_PTR_TO_LONG:
6871 	{
6872 		int size = int_ptr_type_to_size(arg_type);
6873 
6874 		err = check_helper_mem_access(env, regno, size, false, meta);
6875 		if (err)
6876 			return err;
6877 		err = check_ptr_alignment(env, reg, 0, size, true);
6878 		break;
6879 	}
6880 	case ARG_PTR_TO_CONST_STR:
6881 	{
6882 		struct bpf_map *map = reg->map_ptr;
6883 		int map_off;
6884 		u64 map_addr;
6885 		char *str_ptr;
6886 
6887 		if (!bpf_map_is_rdonly(map)) {
6888 			verbose(env, "R%d does not point to a readonly map'\n", regno);
6889 			return -EACCES;
6890 		}
6891 
6892 		if (!tnum_is_const(reg->var_off)) {
6893 			verbose(env, "R%d is not a constant address'\n", regno);
6894 			return -EACCES;
6895 		}
6896 
6897 		if (!map->ops->map_direct_value_addr) {
6898 			verbose(env, "no direct value access support for this map type\n");
6899 			return -EACCES;
6900 		}
6901 
6902 		err = check_map_access(env, regno, reg->off,
6903 				       map->value_size - reg->off, false,
6904 				       ACCESS_HELPER);
6905 		if (err)
6906 			return err;
6907 
6908 		map_off = reg->off + reg->var_off.value;
6909 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6910 		if (err) {
6911 			verbose(env, "direct value access on string failed\n");
6912 			return err;
6913 		}
6914 
6915 		str_ptr = (char *)(long)(map_addr);
6916 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6917 			verbose(env, "string is not zero-terminated\n");
6918 			return -EINVAL;
6919 		}
6920 		break;
6921 	}
6922 	case ARG_PTR_TO_KPTR:
6923 		err = process_kptr_func(env, regno, meta);
6924 		if (err)
6925 			return err;
6926 		break;
6927 	}
6928 
6929 	return err;
6930 }
6931 
6932 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
6933 {
6934 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
6935 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6936 
6937 	if (func_id != BPF_FUNC_map_update_elem)
6938 		return false;
6939 
6940 	/* It's not possible to get access to a locked struct sock in these
6941 	 * contexts, so updating is safe.
6942 	 */
6943 	switch (type) {
6944 	case BPF_PROG_TYPE_TRACING:
6945 		if (eatype == BPF_TRACE_ITER)
6946 			return true;
6947 		break;
6948 	case BPF_PROG_TYPE_SOCKET_FILTER:
6949 	case BPF_PROG_TYPE_SCHED_CLS:
6950 	case BPF_PROG_TYPE_SCHED_ACT:
6951 	case BPF_PROG_TYPE_XDP:
6952 	case BPF_PROG_TYPE_SK_REUSEPORT:
6953 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
6954 	case BPF_PROG_TYPE_SK_LOOKUP:
6955 		return true;
6956 	default:
6957 		break;
6958 	}
6959 
6960 	verbose(env, "cannot update sockmap in this context\n");
6961 	return false;
6962 }
6963 
6964 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
6965 {
6966 	return env->prog->jit_requested &&
6967 	       bpf_jit_supports_subprog_tailcalls();
6968 }
6969 
6970 static int check_map_func_compatibility(struct bpf_verifier_env *env,
6971 					struct bpf_map *map, int func_id)
6972 {
6973 	if (!map)
6974 		return 0;
6975 
6976 	/* We need a two way check, first is from map perspective ... */
6977 	switch (map->map_type) {
6978 	case BPF_MAP_TYPE_PROG_ARRAY:
6979 		if (func_id != BPF_FUNC_tail_call)
6980 			goto error;
6981 		break;
6982 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
6983 		if (func_id != BPF_FUNC_perf_event_read &&
6984 		    func_id != BPF_FUNC_perf_event_output &&
6985 		    func_id != BPF_FUNC_skb_output &&
6986 		    func_id != BPF_FUNC_perf_event_read_value &&
6987 		    func_id != BPF_FUNC_xdp_output)
6988 			goto error;
6989 		break;
6990 	case BPF_MAP_TYPE_RINGBUF:
6991 		if (func_id != BPF_FUNC_ringbuf_output &&
6992 		    func_id != BPF_FUNC_ringbuf_reserve &&
6993 		    func_id != BPF_FUNC_ringbuf_query &&
6994 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
6995 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
6996 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
6997 			goto error;
6998 		break;
6999 	case BPF_MAP_TYPE_USER_RINGBUF:
7000 		if (func_id != BPF_FUNC_user_ringbuf_drain)
7001 			goto error;
7002 		break;
7003 	case BPF_MAP_TYPE_STACK_TRACE:
7004 		if (func_id != BPF_FUNC_get_stackid)
7005 			goto error;
7006 		break;
7007 	case BPF_MAP_TYPE_CGROUP_ARRAY:
7008 		if (func_id != BPF_FUNC_skb_under_cgroup &&
7009 		    func_id != BPF_FUNC_current_task_under_cgroup)
7010 			goto error;
7011 		break;
7012 	case BPF_MAP_TYPE_CGROUP_STORAGE:
7013 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
7014 		if (func_id != BPF_FUNC_get_local_storage)
7015 			goto error;
7016 		break;
7017 	case BPF_MAP_TYPE_DEVMAP:
7018 	case BPF_MAP_TYPE_DEVMAP_HASH:
7019 		if (func_id != BPF_FUNC_redirect_map &&
7020 		    func_id != BPF_FUNC_map_lookup_elem)
7021 			goto error;
7022 		break;
7023 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
7024 	 * appear.
7025 	 */
7026 	case BPF_MAP_TYPE_CPUMAP:
7027 		if (func_id != BPF_FUNC_redirect_map)
7028 			goto error;
7029 		break;
7030 	case BPF_MAP_TYPE_XSKMAP:
7031 		if (func_id != BPF_FUNC_redirect_map &&
7032 		    func_id != BPF_FUNC_map_lookup_elem)
7033 			goto error;
7034 		break;
7035 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
7036 	case BPF_MAP_TYPE_HASH_OF_MAPS:
7037 		if (func_id != BPF_FUNC_map_lookup_elem)
7038 			goto error;
7039 		break;
7040 	case BPF_MAP_TYPE_SOCKMAP:
7041 		if (func_id != BPF_FUNC_sk_redirect_map &&
7042 		    func_id != BPF_FUNC_sock_map_update &&
7043 		    func_id != BPF_FUNC_map_delete_elem &&
7044 		    func_id != BPF_FUNC_msg_redirect_map &&
7045 		    func_id != BPF_FUNC_sk_select_reuseport &&
7046 		    func_id != BPF_FUNC_map_lookup_elem &&
7047 		    !may_update_sockmap(env, func_id))
7048 			goto error;
7049 		break;
7050 	case BPF_MAP_TYPE_SOCKHASH:
7051 		if (func_id != BPF_FUNC_sk_redirect_hash &&
7052 		    func_id != BPF_FUNC_sock_hash_update &&
7053 		    func_id != BPF_FUNC_map_delete_elem &&
7054 		    func_id != BPF_FUNC_msg_redirect_hash &&
7055 		    func_id != BPF_FUNC_sk_select_reuseport &&
7056 		    func_id != BPF_FUNC_map_lookup_elem &&
7057 		    !may_update_sockmap(env, func_id))
7058 			goto error;
7059 		break;
7060 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
7061 		if (func_id != BPF_FUNC_sk_select_reuseport)
7062 			goto error;
7063 		break;
7064 	case BPF_MAP_TYPE_QUEUE:
7065 	case BPF_MAP_TYPE_STACK:
7066 		if (func_id != BPF_FUNC_map_peek_elem &&
7067 		    func_id != BPF_FUNC_map_pop_elem &&
7068 		    func_id != BPF_FUNC_map_push_elem)
7069 			goto error;
7070 		break;
7071 	case BPF_MAP_TYPE_SK_STORAGE:
7072 		if (func_id != BPF_FUNC_sk_storage_get &&
7073 		    func_id != BPF_FUNC_sk_storage_delete)
7074 			goto error;
7075 		break;
7076 	case BPF_MAP_TYPE_INODE_STORAGE:
7077 		if (func_id != BPF_FUNC_inode_storage_get &&
7078 		    func_id != BPF_FUNC_inode_storage_delete)
7079 			goto error;
7080 		break;
7081 	case BPF_MAP_TYPE_TASK_STORAGE:
7082 		if (func_id != BPF_FUNC_task_storage_get &&
7083 		    func_id != BPF_FUNC_task_storage_delete)
7084 			goto error;
7085 		break;
7086 	case BPF_MAP_TYPE_CGRP_STORAGE:
7087 		if (func_id != BPF_FUNC_cgrp_storage_get &&
7088 		    func_id != BPF_FUNC_cgrp_storage_delete)
7089 			goto error;
7090 		break;
7091 	case BPF_MAP_TYPE_BLOOM_FILTER:
7092 		if (func_id != BPF_FUNC_map_peek_elem &&
7093 		    func_id != BPF_FUNC_map_push_elem)
7094 			goto error;
7095 		break;
7096 	default:
7097 		break;
7098 	}
7099 
7100 	/* ... and second from the function itself. */
7101 	switch (func_id) {
7102 	case BPF_FUNC_tail_call:
7103 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
7104 			goto error;
7105 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
7106 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
7107 			return -EINVAL;
7108 		}
7109 		break;
7110 	case BPF_FUNC_perf_event_read:
7111 	case BPF_FUNC_perf_event_output:
7112 	case BPF_FUNC_perf_event_read_value:
7113 	case BPF_FUNC_skb_output:
7114 	case BPF_FUNC_xdp_output:
7115 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
7116 			goto error;
7117 		break;
7118 	case BPF_FUNC_ringbuf_output:
7119 	case BPF_FUNC_ringbuf_reserve:
7120 	case BPF_FUNC_ringbuf_query:
7121 	case BPF_FUNC_ringbuf_reserve_dynptr:
7122 	case BPF_FUNC_ringbuf_submit_dynptr:
7123 	case BPF_FUNC_ringbuf_discard_dynptr:
7124 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
7125 			goto error;
7126 		break;
7127 	case BPF_FUNC_user_ringbuf_drain:
7128 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
7129 			goto error;
7130 		break;
7131 	case BPF_FUNC_get_stackid:
7132 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
7133 			goto error;
7134 		break;
7135 	case BPF_FUNC_current_task_under_cgroup:
7136 	case BPF_FUNC_skb_under_cgroup:
7137 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
7138 			goto error;
7139 		break;
7140 	case BPF_FUNC_redirect_map:
7141 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
7142 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
7143 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
7144 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
7145 			goto error;
7146 		break;
7147 	case BPF_FUNC_sk_redirect_map:
7148 	case BPF_FUNC_msg_redirect_map:
7149 	case BPF_FUNC_sock_map_update:
7150 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
7151 			goto error;
7152 		break;
7153 	case BPF_FUNC_sk_redirect_hash:
7154 	case BPF_FUNC_msg_redirect_hash:
7155 	case BPF_FUNC_sock_hash_update:
7156 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
7157 			goto error;
7158 		break;
7159 	case BPF_FUNC_get_local_storage:
7160 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
7161 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
7162 			goto error;
7163 		break;
7164 	case BPF_FUNC_sk_select_reuseport:
7165 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
7166 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
7167 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
7168 			goto error;
7169 		break;
7170 	case BPF_FUNC_map_pop_elem:
7171 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7172 		    map->map_type != BPF_MAP_TYPE_STACK)
7173 			goto error;
7174 		break;
7175 	case BPF_FUNC_map_peek_elem:
7176 	case BPF_FUNC_map_push_elem:
7177 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7178 		    map->map_type != BPF_MAP_TYPE_STACK &&
7179 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
7180 			goto error;
7181 		break;
7182 	case BPF_FUNC_map_lookup_percpu_elem:
7183 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
7184 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
7185 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
7186 			goto error;
7187 		break;
7188 	case BPF_FUNC_sk_storage_get:
7189 	case BPF_FUNC_sk_storage_delete:
7190 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
7191 			goto error;
7192 		break;
7193 	case BPF_FUNC_inode_storage_get:
7194 	case BPF_FUNC_inode_storage_delete:
7195 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
7196 			goto error;
7197 		break;
7198 	case BPF_FUNC_task_storage_get:
7199 	case BPF_FUNC_task_storage_delete:
7200 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
7201 			goto error;
7202 		break;
7203 	case BPF_FUNC_cgrp_storage_get:
7204 	case BPF_FUNC_cgrp_storage_delete:
7205 		if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
7206 			goto error;
7207 		break;
7208 	default:
7209 		break;
7210 	}
7211 
7212 	return 0;
7213 error:
7214 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
7215 		map->map_type, func_id_name(func_id), func_id);
7216 	return -EINVAL;
7217 }
7218 
7219 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
7220 {
7221 	int count = 0;
7222 
7223 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
7224 		count++;
7225 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
7226 		count++;
7227 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
7228 		count++;
7229 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
7230 		count++;
7231 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
7232 		count++;
7233 
7234 	/* We only support one arg being in raw mode at the moment,
7235 	 * which is sufficient for the helper functions we have
7236 	 * right now.
7237 	 */
7238 	return count <= 1;
7239 }
7240 
7241 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
7242 {
7243 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
7244 	bool has_size = fn->arg_size[arg] != 0;
7245 	bool is_next_size = false;
7246 
7247 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
7248 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
7249 
7250 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
7251 		return is_next_size;
7252 
7253 	return has_size == is_next_size || is_next_size == is_fixed;
7254 }
7255 
7256 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
7257 {
7258 	/* bpf_xxx(..., buf, len) call will access 'len'
7259 	 * bytes from memory 'buf'. Both arg types need
7260 	 * to be paired, so make sure there's no buggy
7261 	 * helper function specification.
7262 	 */
7263 	if (arg_type_is_mem_size(fn->arg1_type) ||
7264 	    check_args_pair_invalid(fn, 0) ||
7265 	    check_args_pair_invalid(fn, 1) ||
7266 	    check_args_pair_invalid(fn, 2) ||
7267 	    check_args_pair_invalid(fn, 3) ||
7268 	    check_args_pair_invalid(fn, 4))
7269 		return false;
7270 
7271 	return true;
7272 }
7273 
7274 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
7275 {
7276 	int i;
7277 
7278 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
7279 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
7280 			return !!fn->arg_btf_id[i];
7281 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
7282 			return fn->arg_btf_id[i] == BPF_PTR_POISON;
7283 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
7284 		    /* arg_btf_id and arg_size are in a union. */
7285 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
7286 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
7287 			return false;
7288 	}
7289 
7290 	return true;
7291 }
7292 
7293 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
7294 {
7295 	return check_raw_mode_ok(fn) &&
7296 	       check_arg_pair_ok(fn) &&
7297 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
7298 }
7299 
7300 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
7301  * are now invalid, so turn them into unknown SCALAR_VALUE.
7302  */
7303 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
7304 {
7305 	struct bpf_func_state *state;
7306 	struct bpf_reg_state *reg;
7307 
7308 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
7309 		if (reg_is_pkt_pointer_any(reg))
7310 			__mark_reg_unknown(env, reg);
7311 	}));
7312 }
7313 
7314 enum {
7315 	AT_PKT_END = -1,
7316 	BEYOND_PKT_END = -2,
7317 };
7318 
7319 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
7320 {
7321 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
7322 	struct bpf_reg_state *reg = &state->regs[regn];
7323 
7324 	if (reg->type != PTR_TO_PACKET)
7325 		/* PTR_TO_PACKET_META is not supported yet */
7326 		return;
7327 
7328 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
7329 	 * How far beyond pkt_end it goes is unknown.
7330 	 * if (!range_open) it's the case of pkt >= pkt_end
7331 	 * if (range_open) it's the case of pkt > pkt_end
7332 	 * hence this pointer is at least 1 byte bigger than pkt_end
7333 	 */
7334 	if (range_open)
7335 		reg->range = BEYOND_PKT_END;
7336 	else
7337 		reg->range = AT_PKT_END;
7338 }
7339 
7340 /* The pointer with the specified id has released its reference to kernel
7341  * resources. Identify all copies of the same pointer and clear the reference.
7342  */
7343 static int release_reference(struct bpf_verifier_env *env,
7344 			     int ref_obj_id)
7345 {
7346 	struct bpf_func_state *state;
7347 	struct bpf_reg_state *reg;
7348 	int err;
7349 
7350 	err = release_reference_state(cur_func(env), ref_obj_id);
7351 	if (err)
7352 		return err;
7353 
7354 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
7355 		if (reg->ref_obj_id == ref_obj_id) {
7356 			if (!env->allow_ptr_leaks)
7357 				__mark_reg_not_init(env, reg);
7358 			else
7359 				__mark_reg_unknown(env, reg);
7360 		}
7361 	}));
7362 
7363 	return 0;
7364 }
7365 
7366 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
7367 				    struct bpf_reg_state *regs)
7368 {
7369 	int i;
7370 
7371 	/* after the call registers r0 - r5 were scratched */
7372 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
7373 		mark_reg_not_init(env, regs, caller_saved[i]);
7374 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7375 	}
7376 }
7377 
7378 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
7379 				   struct bpf_func_state *caller,
7380 				   struct bpf_func_state *callee,
7381 				   int insn_idx);
7382 
7383 static int set_callee_state(struct bpf_verifier_env *env,
7384 			    struct bpf_func_state *caller,
7385 			    struct bpf_func_state *callee, int insn_idx);
7386 
7387 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7388 			     int *insn_idx, int subprog,
7389 			     set_callee_state_fn set_callee_state_cb)
7390 {
7391 	struct bpf_verifier_state *state = env->cur_state;
7392 	struct bpf_func_info_aux *func_info_aux;
7393 	struct bpf_func_state *caller, *callee;
7394 	int err;
7395 	bool is_global = false;
7396 
7397 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
7398 		verbose(env, "the call stack of %d frames is too deep\n",
7399 			state->curframe + 2);
7400 		return -E2BIG;
7401 	}
7402 
7403 	caller = state->frame[state->curframe];
7404 	if (state->frame[state->curframe + 1]) {
7405 		verbose(env, "verifier bug. Frame %d already allocated\n",
7406 			state->curframe + 1);
7407 		return -EFAULT;
7408 	}
7409 
7410 	func_info_aux = env->prog->aux->func_info_aux;
7411 	if (func_info_aux)
7412 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
7413 	err = btf_check_subprog_call(env, subprog, caller->regs);
7414 	if (err == -EFAULT)
7415 		return err;
7416 	if (is_global) {
7417 		if (err) {
7418 			verbose(env, "Caller passes invalid args into func#%d\n",
7419 				subprog);
7420 			return err;
7421 		} else {
7422 			if (env->log.level & BPF_LOG_LEVEL)
7423 				verbose(env,
7424 					"Func#%d is global and valid. Skipping.\n",
7425 					subprog);
7426 			clear_caller_saved_regs(env, caller->regs);
7427 
7428 			/* All global functions return a 64-bit SCALAR_VALUE */
7429 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
7430 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7431 
7432 			/* continue with next insn after call */
7433 			return 0;
7434 		}
7435 	}
7436 
7437 	/* set_callee_state is used for direct subprog calls, but we are
7438 	 * interested in validating only BPF helpers that can call subprogs as
7439 	 * callbacks
7440 	 */
7441 	if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
7442 		verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
7443 			func_id_name(insn->imm), insn->imm);
7444 		return -EFAULT;
7445 	}
7446 
7447 	if (insn->code == (BPF_JMP | BPF_CALL) &&
7448 	    insn->src_reg == 0 &&
7449 	    insn->imm == BPF_FUNC_timer_set_callback) {
7450 		struct bpf_verifier_state *async_cb;
7451 
7452 		/* there is no real recursion here. timer callbacks are async */
7453 		env->subprog_info[subprog].is_async_cb = true;
7454 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
7455 					 *insn_idx, subprog);
7456 		if (!async_cb)
7457 			return -EFAULT;
7458 		callee = async_cb->frame[0];
7459 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
7460 
7461 		/* Convert bpf_timer_set_callback() args into timer callback args */
7462 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
7463 		if (err)
7464 			return err;
7465 
7466 		clear_caller_saved_regs(env, caller->regs);
7467 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
7468 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7469 		/* continue with next insn after call */
7470 		return 0;
7471 	}
7472 
7473 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
7474 	if (!callee)
7475 		return -ENOMEM;
7476 	state->frame[state->curframe + 1] = callee;
7477 
7478 	/* callee cannot access r0, r6 - r9 for reading and has to write
7479 	 * into its own stack before reading from it.
7480 	 * callee can read/write into caller's stack
7481 	 */
7482 	init_func_state(env, callee,
7483 			/* remember the callsite, it will be used by bpf_exit */
7484 			*insn_idx /* callsite */,
7485 			state->curframe + 1 /* frameno within this callchain */,
7486 			subprog /* subprog number within this prog */);
7487 
7488 	/* Transfer references to the callee */
7489 	err = copy_reference_state(callee, caller);
7490 	if (err)
7491 		goto err_out;
7492 
7493 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
7494 	if (err)
7495 		goto err_out;
7496 
7497 	clear_caller_saved_regs(env, caller->regs);
7498 
7499 	/* only increment it after check_reg_arg() finished */
7500 	state->curframe++;
7501 
7502 	/* and go analyze first insn of the callee */
7503 	*insn_idx = env->subprog_info[subprog].start - 1;
7504 
7505 	if (env->log.level & BPF_LOG_LEVEL) {
7506 		verbose(env, "caller:\n");
7507 		print_verifier_state(env, caller, true);
7508 		verbose(env, "callee:\n");
7509 		print_verifier_state(env, callee, true);
7510 	}
7511 	return 0;
7512 
7513 err_out:
7514 	free_func_state(callee);
7515 	state->frame[state->curframe + 1] = NULL;
7516 	return err;
7517 }
7518 
7519 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
7520 				   struct bpf_func_state *caller,
7521 				   struct bpf_func_state *callee)
7522 {
7523 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
7524 	 *      void *callback_ctx, u64 flags);
7525 	 * callback_fn(struct bpf_map *map, void *key, void *value,
7526 	 *      void *callback_ctx);
7527 	 */
7528 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7529 
7530 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7531 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7532 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7533 
7534 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7535 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7536 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
7537 
7538 	/* pointer to stack or null */
7539 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
7540 
7541 	/* unused */
7542 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7543 	return 0;
7544 }
7545 
7546 static int set_callee_state(struct bpf_verifier_env *env,
7547 			    struct bpf_func_state *caller,
7548 			    struct bpf_func_state *callee, int insn_idx)
7549 {
7550 	int i;
7551 
7552 	/* copy r1 - r5 args that callee can access.  The copy includes parent
7553 	 * pointers, which connects us up to the liveness chain
7554 	 */
7555 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
7556 		callee->regs[i] = caller->regs[i];
7557 	return 0;
7558 }
7559 
7560 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7561 			   int *insn_idx)
7562 {
7563 	int subprog, target_insn;
7564 
7565 	target_insn = *insn_idx + insn->imm + 1;
7566 	subprog = find_subprog(env, target_insn);
7567 	if (subprog < 0) {
7568 		verbose(env, "verifier bug. No program starts at insn %d\n",
7569 			target_insn);
7570 		return -EFAULT;
7571 	}
7572 
7573 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
7574 }
7575 
7576 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
7577 				       struct bpf_func_state *caller,
7578 				       struct bpf_func_state *callee,
7579 				       int insn_idx)
7580 {
7581 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
7582 	struct bpf_map *map;
7583 	int err;
7584 
7585 	if (bpf_map_ptr_poisoned(insn_aux)) {
7586 		verbose(env, "tail_call abusing map_ptr\n");
7587 		return -EINVAL;
7588 	}
7589 
7590 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
7591 	if (!map->ops->map_set_for_each_callback_args ||
7592 	    !map->ops->map_for_each_callback) {
7593 		verbose(env, "callback function not allowed for map\n");
7594 		return -ENOTSUPP;
7595 	}
7596 
7597 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
7598 	if (err)
7599 		return err;
7600 
7601 	callee->in_callback_fn = true;
7602 	callee->callback_ret_range = tnum_range(0, 1);
7603 	return 0;
7604 }
7605 
7606 static int set_loop_callback_state(struct bpf_verifier_env *env,
7607 				   struct bpf_func_state *caller,
7608 				   struct bpf_func_state *callee,
7609 				   int insn_idx)
7610 {
7611 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
7612 	 *	    u64 flags);
7613 	 * callback_fn(u32 index, void *callback_ctx);
7614 	 */
7615 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
7616 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7617 
7618 	/* unused */
7619 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7620 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7621 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7622 
7623 	callee->in_callback_fn = true;
7624 	callee->callback_ret_range = tnum_range(0, 1);
7625 	return 0;
7626 }
7627 
7628 static int set_timer_callback_state(struct bpf_verifier_env *env,
7629 				    struct bpf_func_state *caller,
7630 				    struct bpf_func_state *callee,
7631 				    int insn_idx)
7632 {
7633 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
7634 
7635 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
7636 	 * callback_fn(struct bpf_map *map, void *key, void *value);
7637 	 */
7638 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
7639 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
7640 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
7641 
7642 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
7643 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7644 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
7645 
7646 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
7647 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
7648 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
7649 
7650 	/* unused */
7651 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7652 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7653 	callee->in_async_callback_fn = true;
7654 	callee->callback_ret_range = tnum_range(0, 1);
7655 	return 0;
7656 }
7657 
7658 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
7659 				       struct bpf_func_state *caller,
7660 				       struct bpf_func_state *callee,
7661 				       int insn_idx)
7662 {
7663 	/* bpf_find_vma(struct task_struct *task, u64 addr,
7664 	 *               void *callback_fn, void *callback_ctx, u64 flags)
7665 	 * (callback_fn)(struct task_struct *task,
7666 	 *               struct vm_area_struct *vma, void *callback_ctx);
7667 	 */
7668 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
7669 
7670 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
7671 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
7672 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
7673 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7674 
7675 	/* pointer to stack or null */
7676 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
7677 
7678 	/* unused */
7679 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7680 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7681 	callee->in_callback_fn = true;
7682 	callee->callback_ret_range = tnum_range(0, 1);
7683 	return 0;
7684 }
7685 
7686 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
7687 					   struct bpf_func_state *caller,
7688 					   struct bpf_func_state *callee,
7689 					   int insn_idx)
7690 {
7691 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
7692 	 *			  callback_ctx, u64 flags);
7693 	 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
7694 	 */
7695 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
7696 	mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
7697 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
7698 
7699 	/* unused */
7700 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
7701 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
7702 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
7703 
7704 	callee->in_callback_fn = true;
7705 	callee->callback_ret_range = tnum_range(0, 1);
7706 	return 0;
7707 }
7708 
7709 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
7710 {
7711 	struct bpf_verifier_state *state = env->cur_state;
7712 	struct bpf_func_state *caller, *callee;
7713 	struct bpf_reg_state *r0;
7714 	int err;
7715 
7716 	callee = state->frame[state->curframe];
7717 	r0 = &callee->regs[BPF_REG_0];
7718 	if (r0->type == PTR_TO_STACK) {
7719 		/* technically it's ok to return caller's stack pointer
7720 		 * (or caller's caller's pointer) back to the caller,
7721 		 * since these pointers are valid. Only current stack
7722 		 * pointer will be invalid as soon as function exits,
7723 		 * but let's be conservative
7724 		 */
7725 		verbose(env, "cannot return stack pointer to the caller\n");
7726 		return -EINVAL;
7727 	}
7728 
7729 	caller = state->frame[state->curframe - 1];
7730 	if (callee->in_callback_fn) {
7731 		/* enforce R0 return value range [0, 1]. */
7732 		struct tnum range = callee->callback_ret_range;
7733 
7734 		if (r0->type != SCALAR_VALUE) {
7735 			verbose(env, "R0 not a scalar value\n");
7736 			return -EACCES;
7737 		}
7738 		if (!tnum_in(range, r0->var_off)) {
7739 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
7740 			return -EINVAL;
7741 		}
7742 	} else {
7743 		/* return to the caller whatever r0 had in the callee */
7744 		caller->regs[BPF_REG_0] = *r0;
7745 	}
7746 
7747 	/* callback_fn frame should have released its own additions to parent's
7748 	 * reference state at this point, or check_reference_leak would
7749 	 * complain, hence it must be the same as the caller. There is no need
7750 	 * to copy it back.
7751 	 */
7752 	if (!callee->in_callback_fn) {
7753 		/* Transfer references to the caller */
7754 		err = copy_reference_state(caller, callee);
7755 		if (err)
7756 			return err;
7757 	}
7758 
7759 	*insn_idx = callee->callsite + 1;
7760 	if (env->log.level & BPF_LOG_LEVEL) {
7761 		verbose(env, "returning from callee:\n");
7762 		print_verifier_state(env, callee, true);
7763 		verbose(env, "to caller at %d:\n", *insn_idx);
7764 		print_verifier_state(env, caller, true);
7765 	}
7766 	/* clear everything in the callee */
7767 	free_func_state(callee);
7768 	state->frame[state->curframe--] = NULL;
7769 	return 0;
7770 }
7771 
7772 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
7773 				   int func_id,
7774 				   struct bpf_call_arg_meta *meta)
7775 {
7776 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
7777 
7778 	if (ret_type != RET_INTEGER ||
7779 	    (func_id != BPF_FUNC_get_stack &&
7780 	     func_id != BPF_FUNC_get_task_stack &&
7781 	     func_id != BPF_FUNC_probe_read_str &&
7782 	     func_id != BPF_FUNC_probe_read_kernel_str &&
7783 	     func_id != BPF_FUNC_probe_read_user_str))
7784 		return;
7785 
7786 	ret_reg->smax_value = meta->msize_max_value;
7787 	ret_reg->s32_max_value = meta->msize_max_value;
7788 	ret_reg->smin_value = -MAX_ERRNO;
7789 	ret_reg->s32_min_value = -MAX_ERRNO;
7790 	reg_bounds_sync(ret_reg);
7791 }
7792 
7793 static int
7794 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7795 		int func_id, int insn_idx)
7796 {
7797 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7798 	struct bpf_map *map = meta->map_ptr;
7799 
7800 	if (func_id != BPF_FUNC_tail_call &&
7801 	    func_id != BPF_FUNC_map_lookup_elem &&
7802 	    func_id != BPF_FUNC_map_update_elem &&
7803 	    func_id != BPF_FUNC_map_delete_elem &&
7804 	    func_id != BPF_FUNC_map_push_elem &&
7805 	    func_id != BPF_FUNC_map_pop_elem &&
7806 	    func_id != BPF_FUNC_map_peek_elem &&
7807 	    func_id != BPF_FUNC_for_each_map_elem &&
7808 	    func_id != BPF_FUNC_redirect_map &&
7809 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
7810 		return 0;
7811 
7812 	if (map == NULL) {
7813 		verbose(env, "kernel subsystem misconfigured verifier\n");
7814 		return -EINVAL;
7815 	}
7816 
7817 	/* In case of read-only, some additional restrictions
7818 	 * need to be applied in order to prevent altering the
7819 	 * state of the map from program side.
7820 	 */
7821 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
7822 	    (func_id == BPF_FUNC_map_delete_elem ||
7823 	     func_id == BPF_FUNC_map_update_elem ||
7824 	     func_id == BPF_FUNC_map_push_elem ||
7825 	     func_id == BPF_FUNC_map_pop_elem)) {
7826 		verbose(env, "write into map forbidden\n");
7827 		return -EACCES;
7828 	}
7829 
7830 	if (!BPF_MAP_PTR(aux->map_ptr_state))
7831 		bpf_map_ptr_store(aux, meta->map_ptr,
7832 				  !meta->map_ptr->bypass_spec_v1);
7833 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
7834 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
7835 				  !meta->map_ptr->bypass_spec_v1);
7836 	return 0;
7837 }
7838 
7839 static int
7840 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7841 		int func_id, int insn_idx)
7842 {
7843 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7844 	struct bpf_reg_state *regs = cur_regs(env), *reg;
7845 	struct bpf_map *map = meta->map_ptr;
7846 	u64 val, max;
7847 	int err;
7848 
7849 	if (func_id != BPF_FUNC_tail_call)
7850 		return 0;
7851 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
7852 		verbose(env, "kernel subsystem misconfigured verifier\n");
7853 		return -EINVAL;
7854 	}
7855 
7856 	reg = &regs[BPF_REG_3];
7857 	val = reg->var_off.value;
7858 	max = map->max_entries;
7859 
7860 	if (!(register_is_const(reg) && val < max)) {
7861 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7862 		return 0;
7863 	}
7864 
7865 	err = mark_chain_precision(env, BPF_REG_3);
7866 	if (err)
7867 		return err;
7868 	if (bpf_map_key_unseen(aux))
7869 		bpf_map_key_store(aux, val);
7870 	else if (!bpf_map_key_poisoned(aux) &&
7871 		  bpf_map_key_immediate(aux) != val)
7872 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7873 	return 0;
7874 }
7875 
7876 static int check_reference_leak(struct bpf_verifier_env *env)
7877 {
7878 	struct bpf_func_state *state = cur_func(env);
7879 	bool refs_lingering = false;
7880 	int i;
7881 
7882 	if (state->frameno && !state->in_callback_fn)
7883 		return 0;
7884 
7885 	for (i = 0; i < state->acquired_refs; i++) {
7886 		if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
7887 			continue;
7888 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
7889 			state->refs[i].id, state->refs[i].insn_idx);
7890 		refs_lingering = true;
7891 	}
7892 	return refs_lingering ? -EINVAL : 0;
7893 }
7894 
7895 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
7896 				   struct bpf_reg_state *regs)
7897 {
7898 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
7899 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
7900 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
7901 	struct bpf_bprintf_data data = {};
7902 	int err, fmt_map_off, num_args;
7903 	u64 fmt_addr;
7904 	char *fmt;
7905 
7906 	/* data must be an array of u64 */
7907 	if (data_len_reg->var_off.value % 8)
7908 		return -EINVAL;
7909 	num_args = data_len_reg->var_off.value / 8;
7910 
7911 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
7912 	 * and map_direct_value_addr is set.
7913 	 */
7914 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
7915 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
7916 						  fmt_map_off);
7917 	if (err) {
7918 		verbose(env, "verifier bug\n");
7919 		return -EFAULT;
7920 	}
7921 	fmt = (char *)(long)fmt_addr + fmt_map_off;
7922 
7923 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
7924 	 * can focus on validating the format specifiers.
7925 	 */
7926 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
7927 	if (err < 0)
7928 		verbose(env, "Invalid format string\n");
7929 
7930 	return err;
7931 }
7932 
7933 static int check_get_func_ip(struct bpf_verifier_env *env)
7934 {
7935 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7936 	int func_id = BPF_FUNC_get_func_ip;
7937 
7938 	if (type == BPF_PROG_TYPE_TRACING) {
7939 		if (!bpf_prog_has_trampoline(env->prog)) {
7940 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
7941 				func_id_name(func_id), func_id);
7942 			return -ENOTSUPP;
7943 		}
7944 		return 0;
7945 	} else if (type == BPF_PROG_TYPE_KPROBE) {
7946 		return 0;
7947 	}
7948 
7949 	verbose(env, "func %s#%d not supported for program type %d\n",
7950 		func_id_name(func_id), func_id, type);
7951 	return -ENOTSUPP;
7952 }
7953 
7954 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7955 {
7956 	return &env->insn_aux_data[env->insn_idx];
7957 }
7958 
7959 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
7960 {
7961 	struct bpf_reg_state *regs = cur_regs(env);
7962 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
7963 	bool reg_is_null = register_is_null(reg);
7964 
7965 	if (reg_is_null)
7966 		mark_chain_precision(env, BPF_REG_4);
7967 
7968 	return reg_is_null;
7969 }
7970 
7971 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
7972 {
7973 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
7974 
7975 	if (!state->initialized) {
7976 		state->initialized = 1;
7977 		state->fit_for_inline = loop_flag_is_zero(env);
7978 		state->callback_subprogno = subprogno;
7979 		return;
7980 	}
7981 
7982 	if (!state->fit_for_inline)
7983 		return;
7984 
7985 	state->fit_for_inline = (loop_flag_is_zero(env) &&
7986 				 state->callback_subprogno == subprogno);
7987 }
7988 
7989 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7990 			     int *insn_idx_p)
7991 {
7992 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
7993 	const struct bpf_func_proto *fn = NULL;
7994 	enum bpf_return_type ret_type;
7995 	enum bpf_type_flag ret_flag;
7996 	struct bpf_reg_state *regs;
7997 	struct bpf_call_arg_meta meta;
7998 	int insn_idx = *insn_idx_p;
7999 	bool changes_data;
8000 	int i, err, func_id;
8001 
8002 	/* find function prototype */
8003 	func_id = insn->imm;
8004 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
8005 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
8006 			func_id);
8007 		return -EINVAL;
8008 	}
8009 
8010 	if (env->ops->get_func_proto)
8011 		fn = env->ops->get_func_proto(func_id, env->prog);
8012 	if (!fn) {
8013 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
8014 			func_id);
8015 		return -EINVAL;
8016 	}
8017 
8018 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
8019 	if (!env->prog->gpl_compatible && fn->gpl_only) {
8020 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
8021 		return -EINVAL;
8022 	}
8023 
8024 	if (fn->allowed && !fn->allowed(env->prog)) {
8025 		verbose(env, "helper call is not allowed in probe\n");
8026 		return -EINVAL;
8027 	}
8028 
8029 	if (!env->prog->aux->sleepable && fn->might_sleep) {
8030 		verbose(env, "helper call might sleep in a non-sleepable prog\n");
8031 		return -EINVAL;
8032 	}
8033 
8034 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
8035 	changes_data = bpf_helper_changes_pkt_data(fn->func);
8036 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
8037 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
8038 			func_id_name(func_id), func_id);
8039 		return -EINVAL;
8040 	}
8041 
8042 	memset(&meta, 0, sizeof(meta));
8043 	meta.pkt_access = fn->pkt_access;
8044 
8045 	err = check_func_proto(fn, func_id);
8046 	if (err) {
8047 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
8048 			func_id_name(func_id), func_id);
8049 		return err;
8050 	}
8051 
8052 	if (env->cur_state->active_rcu_lock) {
8053 		if (fn->might_sleep) {
8054 			verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
8055 				func_id_name(func_id), func_id);
8056 			return -EINVAL;
8057 		}
8058 
8059 		if (env->prog->aux->sleepable && is_storage_get_function(func_id))
8060 			env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
8061 	}
8062 
8063 	meta.func_id = func_id;
8064 	/* check args */
8065 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
8066 		err = check_func_arg(env, i, &meta, fn);
8067 		if (err)
8068 			return err;
8069 	}
8070 
8071 	err = record_func_map(env, &meta, func_id, insn_idx);
8072 	if (err)
8073 		return err;
8074 
8075 	err = record_func_key(env, &meta, func_id, insn_idx);
8076 	if (err)
8077 		return err;
8078 
8079 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
8080 	 * is inferred from register state.
8081 	 */
8082 	for (i = 0; i < meta.access_size; i++) {
8083 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
8084 				       BPF_WRITE, -1, false);
8085 		if (err)
8086 			return err;
8087 	}
8088 
8089 	regs = cur_regs(env);
8090 
8091 	/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8092 	 * be reinitialized by any dynptr helper. Hence, mark_stack_slots_dynptr
8093 	 * is safe to do directly.
8094 	 */
8095 	if (meta.uninit_dynptr_regno) {
8096 		if (regs[meta.uninit_dynptr_regno].type == CONST_PTR_TO_DYNPTR) {
8097 			verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be initialized\n");
8098 			return -EFAULT;
8099 		}
8100 		/* we write BPF_DW bits (8 bytes) at a time */
8101 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
8102 			err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
8103 					       i, BPF_DW, BPF_WRITE, -1, false);
8104 			if (err)
8105 				return err;
8106 		}
8107 
8108 		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
8109 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
8110 					      insn_idx);
8111 		if (err)
8112 			return err;
8113 	}
8114 
8115 	if (meta.release_regno) {
8116 		err = -EINVAL;
8117 		/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8118 		 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
8119 		 * is safe to do directly.
8120 		 */
8121 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
8122 			if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
8123 				verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
8124 				return -EFAULT;
8125 			}
8126 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
8127 		} else if (meta.ref_obj_id) {
8128 			err = release_reference(env, meta.ref_obj_id);
8129 		} else if (register_is_null(&regs[meta.release_regno])) {
8130 			/* meta.ref_obj_id can only be 0 if register that is meant to be
8131 			 * released is NULL, which must be > R0.
8132 			 */
8133 			err = 0;
8134 		}
8135 		if (err) {
8136 			verbose(env, "func %s#%d reference has not been acquired before\n",
8137 				func_id_name(func_id), func_id);
8138 			return err;
8139 		}
8140 	}
8141 
8142 	switch (func_id) {
8143 	case BPF_FUNC_tail_call:
8144 		err = check_reference_leak(env);
8145 		if (err) {
8146 			verbose(env, "tail_call would lead to reference leak\n");
8147 			return err;
8148 		}
8149 		break;
8150 	case BPF_FUNC_get_local_storage:
8151 		/* check that flags argument in get_local_storage(map, flags) is 0,
8152 		 * this is required because get_local_storage() can't return an error.
8153 		 */
8154 		if (!register_is_null(&regs[BPF_REG_2])) {
8155 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
8156 			return -EINVAL;
8157 		}
8158 		break;
8159 	case BPF_FUNC_for_each_map_elem:
8160 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8161 					set_map_elem_callback_state);
8162 		break;
8163 	case BPF_FUNC_timer_set_callback:
8164 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8165 					set_timer_callback_state);
8166 		break;
8167 	case BPF_FUNC_find_vma:
8168 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8169 					set_find_vma_callback_state);
8170 		break;
8171 	case BPF_FUNC_snprintf:
8172 		err = check_bpf_snprintf_call(env, regs);
8173 		break;
8174 	case BPF_FUNC_loop:
8175 		update_loop_inline_state(env, meta.subprogno);
8176 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8177 					set_loop_callback_state);
8178 		break;
8179 	case BPF_FUNC_dynptr_from_mem:
8180 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
8181 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
8182 				reg_type_str(env, regs[BPF_REG_1].type));
8183 			return -EACCES;
8184 		}
8185 		break;
8186 	case BPF_FUNC_set_retval:
8187 		if (prog_type == BPF_PROG_TYPE_LSM &&
8188 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
8189 			if (!env->prog->aux->attach_func_proto->type) {
8190 				/* Make sure programs that attach to void
8191 				 * hooks don't try to modify return value.
8192 				 */
8193 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
8194 				return -EINVAL;
8195 			}
8196 		}
8197 		break;
8198 	case BPF_FUNC_dynptr_data:
8199 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
8200 			if (arg_type_is_dynptr(fn->arg_type[i])) {
8201 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
8202 				int id, ref_obj_id;
8203 
8204 				if (meta.dynptr_id) {
8205 					verbose(env, "verifier internal error: meta.dynptr_id already set\n");
8206 					return -EFAULT;
8207 				}
8208 
8209 				if (meta.ref_obj_id) {
8210 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
8211 					return -EFAULT;
8212 				}
8213 
8214 				id = dynptr_id(env, reg);
8215 				if (id < 0) {
8216 					verbose(env, "verifier internal error: failed to obtain dynptr id\n");
8217 					return id;
8218 				}
8219 
8220 				ref_obj_id = dynptr_ref_obj_id(env, reg);
8221 				if (ref_obj_id < 0) {
8222 					verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
8223 					return ref_obj_id;
8224 				}
8225 
8226 				meta.dynptr_id = id;
8227 				meta.ref_obj_id = ref_obj_id;
8228 				break;
8229 			}
8230 		}
8231 		if (i == MAX_BPF_FUNC_REG_ARGS) {
8232 			verbose(env, "verifier internal error: no dynptr in bpf_dynptr_data()\n");
8233 			return -EFAULT;
8234 		}
8235 		break;
8236 	case BPF_FUNC_user_ringbuf_drain:
8237 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8238 					set_user_ringbuf_callback_state);
8239 		break;
8240 	}
8241 
8242 	if (err)
8243 		return err;
8244 
8245 	/* reset caller saved regs */
8246 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
8247 		mark_reg_not_init(env, regs, caller_saved[i]);
8248 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
8249 	}
8250 
8251 	/* helper call returns 64-bit value. */
8252 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8253 
8254 	/* update return register (already marked as written above) */
8255 	ret_type = fn->ret_type;
8256 	ret_flag = type_flag(ret_type);
8257 
8258 	switch (base_type(ret_type)) {
8259 	case RET_INTEGER:
8260 		/* sets type to SCALAR_VALUE */
8261 		mark_reg_unknown(env, regs, BPF_REG_0);
8262 		break;
8263 	case RET_VOID:
8264 		regs[BPF_REG_0].type = NOT_INIT;
8265 		break;
8266 	case RET_PTR_TO_MAP_VALUE:
8267 		/* There is no offset yet applied, variable or fixed */
8268 		mark_reg_known_zero(env, regs, BPF_REG_0);
8269 		/* remember map_ptr, so that check_map_access()
8270 		 * can check 'value_size' boundary of memory access
8271 		 * to map element returned from bpf_map_lookup_elem()
8272 		 */
8273 		if (meta.map_ptr == NULL) {
8274 			verbose(env,
8275 				"kernel subsystem misconfigured verifier\n");
8276 			return -EINVAL;
8277 		}
8278 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
8279 		regs[BPF_REG_0].map_uid = meta.map_uid;
8280 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
8281 		if (!type_may_be_null(ret_type) &&
8282 		    btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
8283 			regs[BPF_REG_0].id = ++env->id_gen;
8284 		}
8285 		break;
8286 	case RET_PTR_TO_SOCKET:
8287 		mark_reg_known_zero(env, regs, BPF_REG_0);
8288 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
8289 		break;
8290 	case RET_PTR_TO_SOCK_COMMON:
8291 		mark_reg_known_zero(env, regs, BPF_REG_0);
8292 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
8293 		break;
8294 	case RET_PTR_TO_TCP_SOCK:
8295 		mark_reg_known_zero(env, regs, BPF_REG_0);
8296 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
8297 		break;
8298 	case RET_PTR_TO_MEM:
8299 		mark_reg_known_zero(env, regs, BPF_REG_0);
8300 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
8301 		regs[BPF_REG_0].mem_size = meta.mem_size;
8302 		break;
8303 	case RET_PTR_TO_MEM_OR_BTF_ID:
8304 	{
8305 		const struct btf_type *t;
8306 
8307 		mark_reg_known_zero(env, regs, BPF_REG_0);
8308 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
8309 		if (!btf_type_is_struct(t)) {
8310 			u32 tsize;
8311 			const struct btf_type *ret;
8312 			const char *tname;
8313 
8314 			/* resolve the type size of ksym. */
8315 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
8316 			if (IS_ERR(ret)) {
8317 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
8318 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
8319 					tname, PTR_ERR(ret));
8320 				return -EINVAL;
8321 			}
8322 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
8323 			regs[BPF_REG_0].mem_size = tsize;
8324 		} else {
8325 			/* MEM_RDONLY may be carried from ret_flag, but it
8326 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
8327 			 * it will confuse the check of PTR_TO_BTF_ID in
8328 			 * check_mem_access().
8329 			 */
8330 			ret_flag &= ~MEM_RDONLY;
8331 
8332 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
8333 			regs[BPF_REG_0].btf = meta.ret_btf;
8334 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
8335 		}
8336 		break;
8337 	}
8338 	case RET_PTR_TO_BTF_ID:
8339 	{
8340 		struct btf *ret_btf;
8341 		int ret_btf_id;
8342 
8343 		mark_reg_known_zero(env, regs, BPF_REG_0);
8344 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
8345 		if (func_id == BPF_FUNC_kptr_xchg) {
8346 			ret_btf = meta.kptr_field->kptr.btf;
8347 			ret_btf_id = meta.kptr_field->kptr.btf_id;
8348 		} else {
8349 			if (fn->ret_btf_id == BPF_PTR_POISON) {
8350 				verbose(env, "verifier internal error:");
8351 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
8352 					func_id_name(func_id));
8353 				return -EINVAL;
8354 			}
8355 			ret_btf = btf_vmlinux;
8356 			ret_btf_id = *fn->ret_btf_id;
8357 		}
8358 		if (ret_btf_id == 0) {
8359 			verbose(env, "invalid return type %u of func %s#%d\n",
8360 				base_type(ret_type), func_id_name(func_id),
8361 				func_id);
8362 			return -EINVAL;
8363 		}
8364 		regs[BPF_REG_0].btf = ret_btf;
8365 		regs[BPF_REG_0].btf_id = ret_btf_id;
8366 		break;
8367 	}
8368 	default:
8369 		verbose(env, "unknown return type %u of func %s#%d\n",
8370 			base_type(ret_type), func_id_name(func_id), func_id);
8371 		return -EINVAL;
8372 	}
8373 
8374 	if (type_may_be_null(regs[BPF_REG_0].type))
8375 		regs[BPF_REG_0].id = ++env->id_gen;
8376 
8377 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
8378 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
8379 			func_id_name(func_id), func_id);
8380 		return -EFAULT;
8381 	}
8382 
8383 	if (is_dynptr_ref_function(func_id))
8384 		regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
8385 
8386 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
8387 		/* For release_reference() */
8388 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
8389 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
8390 		int id = acquire_reference_state(env, insn_idx);
8391 
8392 		if (id < 0)
8393 			return id;
8394 		/* For mark_ptr_or_null_reg() */
8395 		regs[BPF_REG_0].id = id;
8396 		/* For release_reference() */
8397 		regs[BPF_REG_0].ref_obj_id = id;
8398 	}
8399 
8400 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
8401 
8402 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
8403 	if (err)
8404 		return err;
8405 
8406 	if ((func_id == BPF_FUNC_get_stack ||
8407 	     func_id == BPF_FUNC_get_task_stack) &&
8408 	    !env->prog->has_callchain_buf) {
8409 		const char *err_str;
8410 
8411 #ifdef CONFIG_PERF_EVENTS
8412 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
8413 		err_str = "cannot get callchain buffer for func %s#%d\n";
8414 #else
8415 		err = -ENOTSUPP;
8416 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
8417 #endif
8418 		if (err) {
8419 			verbose(env, err_str, func_id_name(func_id), func_id);
8420 			return err;
8421 		}
8422 
8423 		env->prog->has_callchain_buf = true;
8424 	}
8425 
8426 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
8427 		env->prog->call_get_stack = true;
8428 
8429 	if (func_id == BPF_FUNC_get_func_ip) {
8430 		if (check_get_func_ip(env))
8431 			return -ENOTSUPP;
8432 		env->prog->call_get_func_ip = true;
8433 	}
8434 
8435 	if (changes_data)
8436 		clear_all_pkt_pointers(env);
8437 	return 0;
8438 }
8439 
8440 /* mark_btf_func_reg_size() is used when the reg size is determined by
8441  * the BTF func_proto's return value size and argument.
8442  */
8443 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
8444 				   size_t reg_size)
8445 {
8446 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
8447 
8448 	if (regno == BPF_REG_0) {
8449 		/* Function return value */
8450 		reg->live |= REG_LIVE_WRITTEN;
8451 		reg->subreg_def = reg_size == sizeof(u64) ?
8452 			DEF_NOT_SUBREG : env->insn_idx + 1;
8453 	} else {
8454 		/* Function argument */
8455 		if (reg_size == sizeof(u64)) {
8456 			mark_insn_zext(env, reg);
8457 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
8458 		} else {
8459 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
8460 		}
8461 	}
8462 }
8463 
8464 struct bpf_kfunc_call_arg_meta {
8465 	/* In parameters */
8466 	struct btf *btf;
8467 	u32 func_id;
8468 	u32 kfunc_flags;
8469 	const struct btf_type *func_proto;
8470 	const char *func_name;
8471 	/* Out parameters */
8472 	u32 ref_obj_id;
8473 	u8 release_regno;
8474 	bool r0_rdonly;
8475 	u32 ret_btf_id;
8476 	u64 r0_size;
8477 	struct {
8478 		u64 value;
8479 		bool found;
8480 	} arg_constant;
8481 	struct {
8482 		struct btf *btf;
8483 		u32 btf_id;
8484 	} arg_obj_drop;
8485 	struct {
8486 		struct btf_field *field;
8487 	} arg_list_head;
8488 };
8489 
8490 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
8491 {
8492 	return meta->kfunc_flags & KF_ACQUIRE;
8493 }
8494 
8495 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
8496 {
8497 	return meta->kfunc_flags & KF_RET_NULL;
8498 }
8499 
8500 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
8501 {
8502 	return meta->kfunc_flags & KF_RELEASE;
8503 }
8504 
8505 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
8506 {
8507 	return meta->kfunc_flags & KF_TRUSTED_ARGS;
8508 }
8509 
8510 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
8511 {
8512 	return meta->kfunc_flags & KF_SLEEPABLE;
8513 }
8514 
8515 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
8516 {
8517 	return meta->kfunc_flags & KF_DESTRUCTIVE;
8518 }
8519 
8520 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
8521 {
8522 	return meta->kfunc_flags & KF_RCU;
8523 }
8524 
8525 static bool is_kfunc_arg_kptr_get(struct bpf_kfunc_call_arg_meta *meta, int arg)
8526 {
8527 	return arg == 0 && (meta->kfunc_flags & KF_KPTR_GET);
8528 }
8529 
8530 static bool __kfunc_param_match_suffix(const struct btf *btf,
8531 				       const struct btf_param *arg,
8532 				       const char *suffix)
8533 {
8534 	int suffix_len = strlen(suffix), len;
8535 	const char *param_name;
8536 
8537 	/* In the future, this can be ported to use BTF tagging */
8538 	param_name = btf_name_by_offset(btf, arg->name_off);
8539 	if (str_is_empty(param_name))
8540 		return false;
8541 	len = strlen(param_name);
8542 	if (len < suffix_len)
8543 		return false;
8544 	param_name += len - suffix_len;
8545 	return !strncmp(param_name, suffix, suffix_len);
8546 }
8547 
8548 static bool is_kfunc_arg_mem_size(const struct btf *btf,
8549 				  const struct btf_param *arg,
8550 				  const struct bpf_reg_state *reg)
8551 {
8552 	const struct btf_type *t;
8553 
8554 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8555 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
8556 		return false;
8557 
8558 	return __kfunc_param_match_suffix(btf, arg, "__sz");
8559 }
8560 
8561 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
8562 {
8563 	return __kfunc_param_match_suffix(btf, arg, "__k");
8564 }
8565 
8566 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
8567 {
8568 	return __kfunc_param_match_suffix(btf, arg, "__ign");
8569 }
8570 
8571 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
8572 {
8573 	return __kfunc_param_match_suffix(btf, arg, "__alloc");
8574 }
8575 
8576 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
8577 					  const struct btf_param *arg,
8578 					  const char *name)
8579 {
8580 	int len, target_len = strlen(name);
8581 	const char *param_name;
8582 
8583 	param_name = btf_name_by_offset(btf, arg->name_off);
8584 	if (str_is_empty(param_name))
8585 		return false;
8586 	len = strlen(param_name);
8587 	if (len != target_len)
8588 		return false;
8589 	if (strcmp(param_name, name))
8590 		return false;
8591 
8592 	return true;
8593 }
8594 
8595 enum {
8596 	KF_ARG_DYNPTR_ID,
8597 	KF_ARG_LIST_HEAD_ID,
8598 	KF_ARG_LIST_NODE_ID,
8599 };
8600 
8601 BTF_ID_LIST(kf_arg_btf_ids)
8602 BTF_ID(struct, bpf_dynptr_kern)
8603 BTF_ID(struct, bpf_list_head)
8604 BTF_ID(struct, bpf_list_node)
8605 
8606 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
8607 				    const struct btf_param *arg, int type)
8608 {
8609 	const struct btf_type *t;
8610 	u32 res_id;
8611 
8612 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8613 	if (!t)
8614 		return false;
8615 	if (!btf_type_is_ptr(t))
8616 		return false;
8617 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
8618 	if (!t)
8619 		return false;
8620 	return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
8621 }
8622 
8623 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
8624 {
8625 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
8626 }
8627 
8628 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
8629 {
8630 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
8631 }
8632 
8633 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
8634 {
8635 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
8636 }
8637 
8638 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
8639 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
8640 					const struct btf *btf,
8641 					const struct btf_type *t, int rec)
8642 {
8643 	const struct btf_type *member_type;
8644 	const struct btf_member *member;
8645 	u32 i;
8646 
8647 	if (!btf_type_is_struct(t))
8648 		return false;
8649 
8650 	for_each_member(i, t, member) {
8651 		const struct btf_array *array;
8652 
8653 		member_type = btf_type_skip_modifiers(btf, member->type, NULL);
8654 		if (btf_type_is_struct(member_type)) {
8655 			if (rec >= 3) {
8656 				verbose(env, "max struct nesting depth exceeded\n");
8657 				return false;
8658 			}
8659 			if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
8660 				return false;
8661 			continue;
8662 		}
8663 		if (btf_type_is_array(member_type)) {
8664 			array = btf_array(member_type);
8665 			if (!array->nelems)
8666 				return false;
8667 			member_type = btf_type_skip_modifiers(btf, array->type, NULL);
8668 			if (!btf_type_is_scalar(member_type))
8669 				return false;
8670 			continue;
8671 		}
8672 		if (!btf_type_is_scalar(member_type))
8673 			return false;
8674 	}
8675 	return true;
8676 }
8677 
8678 
8679 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
8680 #ifdef CONFIG_NET
8681 	[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
8682 	[PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
8683 	[PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
8684 #endif
8685 };
8686 
8687 enum kfunc_ptr_arg_type {
8688 	KF_ARG_PTR_TO_CTX,
8689 	KF_ARG_PTR_TO_ALLOC_BTF_ID,  /* Allocated object */
8690 	KF_ARG_PTR_TO_KPTR,	     /* PTR_TO_KPTR but type specific */
8691 	KF_ARG_PTR_TO_DYNPTR,
8692 	KF_ARG_PTR_TO_LIST_HEAD,
8693 	KF_ARG_PTR_TO_LIST_NODE,
8694 	KF_ARG_PTR_TO_BTF_ID,	     /* Also covers reg2btf_ids conversions */
8695 	KF_ARG_PTR_TO_MEM,
8696 	KF_ARG_PTR_TO_MEM_SIZE,	     /* Size derived from next argument, skip it */
8697 };
8698 
8699 enum special_kfunc_type {
8700 	KF_bpf_obj_new_impl,
8701 	KF_bpf_obj_drop_impl,
8702 	KF_bpf_list_push_front,
8703 	KF_bpf_list_push_back,
8704 	KF_bpf_list_pop_front,
8705 	KF_bpf_list_pop_back,
8706 	KF_bpf_cast_to_kern_ctx,
8707 	KF_bpf_rdonly_cast,
8708 	KF_bpf_rcu_read_lock,
8709 	KF_bpf_rcu_read_unlock,
8710 };
8711 
8712 BTF_SET_START(special_kfunc_set)
8713 BTF_ID(func, bpf_obj_new_impl)
8714 BTF_ID(func, bpf_obj_drop_impl)
8715 BTF_ID(func, bpf_list_push_front)
8716 BTF_ID(func, bpf_list_push_back)
8717 BTF_ID(func, bpf_list_pop_front)
8718 BTF_ID(func, bpf_list_pop_back)
8719 BTF_ID(func, bpf_cast_to_kern_ctx)
8720 BTF_ID(func, bpf_rdonly_cast)
8721 BTF_SET_END(special_kfunc_set)
8722 
8723 BTF_ID_LIST(special_kfunc_list)
8724 BTF_ID(func, bpf_obj_new_impl)
8725 BTF_ID(func, bpf_obj_drop_impl)
8726 BTF_ID(func, bpf_list_push_front)
8727 BTF_ID(func, bpf_list_push_back)
8728 BTF_ID(func, bpf_list_pop_front)
8729 BTF_ID(func, bpf_list_pop_back)
8730 BTF_ID(func, bpf_cast_to_kern_ctx)
8731 BTF_ID(func, bpf_rdonly_cast)
8732 BTF_ID(func, bpf_rcu_read_lock)
8733 BTF_ID(func, bpf_rcu_read_unlock)
8734 
8735 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
8736 {
8737 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
8738 }
8739 
8740 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
8741 {
8742 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
8743 }
8744 
8745 static enum kfunc_ptr_arg_type
8746 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
8747 		       struct bpf_kfunc_call_arg_meta *meta,
8748 		       const struct btf_type *t, const struct btf_type *ref_t,
8749 		       const char *ref_tname, const struct btf_param *args,
8750 		       int argno, int nargs)
8751 {
8752 	u32 regno = argno + 1;
8753 	struct bpf_reg_state *regs = cur_regs(env);
8754 	struct bpf_reg_state *reg = &regs[regno];
8755 	bool arg_mem_size = false;
8756 
8757 	if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
8758 		return KF_ARG_PTR_TO_CTX;
8759 
8760 	/* In this function, we verify the kfunc's BTF as per the argument type,
8761 	 * leaving the rest of the verification with respect to the register
8762 	 * type to our caller. When a set of conditions hold in the BTF type of
8763 	 * arguments, we resolve it to a known kfunc_ptr_arg_type.
8764 	 */
8765 	if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
8766 		return KF_ARG_PTR_TO_CTX;
8767 
8768 	if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
8769 		return KF_ARG_PTR_TO_ALLOC_BTF_ID;
8770 
8771 	if (is_kfunc_arg_kptr_get(meta, argno)) {
8772 		if (!btf_type_is_ptr(ref_t)) {
8773 			verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
8774 			return -EINVAL;
8775 		}
8776 		ref_t = btf_type_by_id(meta->btf, ref_t->type);
8777 		ref_tname = btf_name_by_offset(meta->btf, ref_t->name_off);
8778 		if (!btf_type_is_struct(ref_t)) {
8779 			verbose(env, "kernel function %s args#0 pointer type %s %s is not supported\n",
8780 				meta->func_name, btf_type_str(ref_t), ref_tname);
8781 			return -EINVAL;
8782 		}
8783 		return KF_ARG_PTR_TO_KPTR;
8784 	}
8785 
8786 	if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
8787 		return KF_ARG_PTR_TO_DYNPTR;
8788 
8789 	if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
8790 		return KF_ARG_PTR_TO_LIST_HEAD;
8791 
8792 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
8793 		return KF_ARG_PTR_TO_LIST_NODE;
8794 
8795 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
8796 		if (!btf_type_is_struct(ref_t)) {
8797 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
8798 				meta->func_name, argno, btf_type_str(ref_t), ref_tname);
8799 			return -EINVAL;
8800 		}
8801 		return KF_ARG_PTR_TO_BTF_ID;
8802 	}
8803 
8804 	if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))
8805 		arg_mem_size = true;
8806 
8807 	/* This is the catch all argument type of register types supported by
8808 	 * check_helper_mem_access. However, we only allow when argument type is
8809 	 * pointer to scalar, or struct composed (recursively) of scalars. When
8810 	 * arg_mem_size is true, the pointer can be void *.
8811 	 */
8812 	if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
8813 	    (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
8814 		verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
8815 			argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
8816 		return -EINVAL;
8817 	}
8818 	return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
8819 }
8820 
8821 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
8822 					struct bpf_reg_state *reg,
8823 					const struct btf_type *ref_t,
8824 					const char *ref_tname, u32 ref_id,
8825 					struct bpf_kfunc_call_arg_meta *meta,
8826 					int argno)
8827 {
8828 	const struct btf_type *reg_ref_t;
8829 	bool strict_type_match = false;
8830 	const struct btf *reg_btf;
8831 	const char *reg_ref_tname;
8832 	u32 reg_ref_id;
8833 
8834 	if (base_type(reg->type) == PTR_TO_BTF_ID) {
8835 		reg_btf = reg->btf;
8836 		reg_ref_id = reg->btf_id;
8837 	} else {
8838 		reg_btf = btf_vmlinux;
8839 		reg_ref_id = *reg2btf_ids[base_type(reg->type)];
8840 	}
8841 
8842 	/* Enforce strict type matching for calls to kfuncs that are acquiring
8843 	 * or releasing a reference, or are no-cast aliases. We do _not_
8844 	 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
8845 	 * as we want to enable BPF programs to pass types that are bitwise
8846 	 * equivalent without forcing them to explicitly cast with something
8847 	 * like bpf_cast_to_kern_ctx().
8848 	 *
8849 	 * For example, say we had a type like the following:
8850 	 *
8851 	 * struct bpf_cpumask {
8852 	 *	cpumask_t cpumask;
8853 	 *	refcount_t usage;
8854 	 * };
8855 	 *
8856 	 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
8857 	 * to a struct cpumask, so it would be safe to pass a struct
8858 	 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
8859 	 *
8860 	 * The philosophy here is similar to how we allow scalars of different
8861 	 * types to be passed to kfuncs as long as the size is the same. The
8862 	 * only difference here is that we're simply allowing
8863 	 * btf_struct_ids_match() to walk the struct at the 0th offset, and
8864 	 * resolve types.
8865 	 */
8866 	if (is_kfunc_acquire(meta) ||
8867 	    (is_kfunc_release(meta) && reg->ref_obj_id) ||
8868 	    btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
8869 		strict_type_match = true;
8870 
8871 	WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
8872 
8873 	reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
8874 	reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
8875 	if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
8876 		verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
8877 			meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
8878 			btf_type_str(reg_ref_t), reg_ref_tname);
8879 		return -EINVAL;
8880 	}
8881 	return 0;
8882 }
8883 
8884 static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
8885 				      struct bpf_reg_state *reg,
8886 				      const struct btf_type *ref_t,
8887 				      const char *ref_tname,
8888 				      struct bpf_kfunc_call_arg_meta *meta,
8889 				      int argno)
8890 {
8891 	struct btf_field *kptr_field;
8892 
8893 	/* check_func_arg_reg_off allows var_off for
8894 	 * PTR_TO_MAP_VALUE, but we need fixed offset to find
8895 	 * off_desc.
8896 	 */
8897 	if (!tnum_is_const(reg->var_off)) {
8898 		verbose(env, "arg#0 must have constant offset\n");
8899 		return -EINVAL;
8900 	}
8901 
8902 	kptr_field = btf_record_find(reg->map_ptr->record, reg->off + reg->var_off.value, BPF_KPTR);
8903 	if (!kptr_field || kptr_field->type != BPF_KPTR_REF) {
8904 		verbose(env, "arg#0 no referenced kptr at map value offset=%llu\n",
8905 			reg->off + reg->var_off.value);
8906 		return -EINVAL;
8907 	}
8908 
8909 	if (!btf_struct_ids_match(&env->log, meta->btf, ref_t->type, 0, kptr_field->kptr.btf,
8910 				  kptr_field->kptr.btf_id, true)) {
8911 		verbose(env, "kernel function %s args#%d expected pointer to %s %s\n",
8912 			meta->func_name, argno, btf_type_str(ref_t), ref_tname);
8913 		return -EINVAL;
8914 	}
8915 	return 0;
8916 }
8917 
8918 static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
8919 {
8920 	struct bpf_func_state *state = cur_func(env);
8921 	struct bpf_reg_state *reg;
8922 	int i;
8923 
8924 	/* bpf_spin_lock only allows calling list_push and list_pop, no BPF
8925 	 * subprogs, no global functions. This means that the references would
8926 	 * not be released inside the critical section but they may be added to
8927 	 * the reference state, and the acquired_refs are never copied out for a
8928 	 * different frame as BPF to BPF calls don't work in bpf_spin_lock
8929 	 * critical sections.
8930 	 */
8931 	if (!ref_obj_id) {
8932 		verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
8933 		return -EFAULT;
8934 	}
8935 	for (i = 0; i < state->acquired_refs; i++) {
8936 		if (state->refs[i].id == ref_obj_id) {
8937 			if (state->refs[i].release_on_unlock) {
8938 				verbose(env, "verifier internal error: expected false release_on_unlock");
8939 				return -EFAULT;
8940 			}
8941 			state->refs[i].release_on_unlock = true;
8942 			/* Now mark everyone sharing same ref_obj_id as untrusted */
8943 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
8944 				if (reg->ref_obj_id == ref_obj_id)
8945 					reg->type |= PTR_UNTRUSTED;
8946 			}));
8947 			return 0;
8948 		}
8949 	}
8950 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
8951 	return -EFAULT;
8952 }
8953 
8954 /* Implementation details:
8955  *
8956  * Each register points to some region of memory, which we define as an
8957  * allocation. Each allocation may embed a bpf_spin_lock which protects any
8958  * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
8959  * allocation. The lock and the data it protects are colocated in the same
8960  * memory region.
8961  *
8962  * Hence, everytime a register holds a pointer value pointing to such
8963  * allocation, the verifier preserves a unique reg->id for it.
8964  *
8965  * The verifier remembers the lock 'ptr' and the lock 'id' whenever
8966  * bpf_spin_lock is called.
8967  *
8968  * To enable this, lock state in the verifier captures two values:
8969  *	active_lock.ptr = Register's type specific pointer
8970  *	active_lock.id  = A unique ID for each register pointer value
8971  *
8972  * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
8973  * supported register types.
8974  *
8975  * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
8976  * allocated objects is the reg->btf pointer.
8977  *
8978  * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
8979  * can establish the provenance of the map value statically for each distinct
8980  * lookup into such maps. They always contain a single map value hence unique
8981  * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
8982  *
8983  * So, in case of global variables, they use array maps with max_entries = 1,
8984  * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
8985  * into the same map value as max_entries is 1, as described above).
8986  *
8987  * In case of inner map lookups, the inner map pointer has same map_ptr as the
8988  * outer map pointer (in verifier context), but each lookup into an inner map
8989  * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
8990  * maps from the same outer map share the same map_ptr as active_lock.ptr, they
8991  * will get different reg->id assigned to each lookup, hence different
8992  * active_lock.id.
8993  *
8994  * In case of allocated objects, active_lock.ptr is the reg->btf, and the
8995  * reg->id is a unique ID preserved after the NULL pointer check on the pointer
8996  * returned from bpf_obj_new. Each allocation receives a new reg->id.
8997  */
8998 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
8999 {
9000 	void *ptr;
9001 	u32 id;
9002 
9003 	switch ((int)reg->type) {
9004 	case PTR_TO_MAP_VALUE:
9005 		ptr = reg->map_ptr;
9006 		break;
9007 	case PTR_TO_BTF_ID | MEM_ALLOC:
9008 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
9009 		ptr = reg->btf;
9010 		break;
9011 	default:
9012 		verbose(env, "verifier internal error: unknown reg type for lock check\n");
9013 		return -EFAULT;
9014 	}
9015 	id = reg->id;
9016 
9017 	if (!env->cur_state->active_lock.ptr)
9018 		return -EINVAL;
9019 	if (env->cur_state->active_lock.ptr != ptr ||
9020 	    env->cur_state->active_lock.id != id) {
9021 		verbose(env, "held lock and object are not in the same allocation\n");
9022 		return -EINVAL;
9023 	}
9024 	return 0;
9025 }
9026 
9027 static bool is_bpf_list_api_kfunc(u32 btf_id)
9028 {
9029 	return btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
9030 	       btf_id == special_kfunc_list[KF_bpf_list_push_back] ||
9031 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9032 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
9033 }
9034 
9035 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
9036 					   struct bpf_reg_state *reg, u32 regno,
9037 					   struct bpf_kfunc_call_arg_meta *meta)
9038 {
9039 	struct btf_field *field;
9040 	struct btf_record *rec;
9041 	u32 list_head_off;
9042 
9043 	if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
9044 		verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
9045 		return -EFAULT;
9046 	}
9047 
9048 	if (!tnum_is_const(reg->var_off)) {
9049 		verbose(env,
9050 			"R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
9051 			regno);
9052 		return -EINVAL;
9053 	}
9054 
9055 	rec = reg_btf_record(reg);
9056 	list_head_off = reg->off + reg->var_off.value;
9057 	field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
9058 	if (!field) {
9059 		verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
9060 		return -EINVAL;
9061 	}
9062 
9063 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
9064 	if (check_reg_allocation_locked(env, reg)) {
9065 		verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
9066 			rec->spin_lock_off);
9067 		return -EINVAL;
9068 	}
9069 
9070 	if (meta->arg_list_head.field) {
9071 		verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
9072 		return -EFAULT;
9073 	}
9074 	meta->arg_list_head.field = field;
9075 	return 0;
9076 }
9077 
9078 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
9079 					   struct bpf_reg_state *reg, u32 regno,
9080 					   struct bpf_kfunc_call_arg_meta *meta)
9081 {
9082 	const struct btf_type *et, *t;
9083 	struct btf_field *field;
9084 	struct btf_record *rec;
9085 	u32 list_node_off;
9086 
9087 	if (meta->btf != btf_vmlinux ||
9088 	    (meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
9089 	     meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
9090 		verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
9091 		return -EFAULT;
9092 	}
9093 
9094 	if (!tnum_is_const(reg->var_off)) {
9095 		verbose(env,
9096 			"R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
9097 			regno);
9098 		return -EINVAL;
9099 	}
9100 
9101 	rec = reg_btf_record(reg);
9102 	list_node_off = reg->off + reg->var_off.value;
9103 	field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
9104 	if (!field || field->offset != list_node_off) {
9105 		verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
9106 		return -EINVAL;
9107 	}
9108 
9109 	field = meta->arg_list_head.field;
9110 
9111 	et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
9112 	t = btf_type_by_id(reg->btf, reg->btf_id);
9113 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
9114 				  field->graph_root.value_btf_id, true)) {
9115 		verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
9116 			"in struct %s, but arg is at offset=%d in struct %s\n",
9117 			field->graph_root.node_offset,
9118 			btf_name_by_offset(field->graph_root.btf, et->name_off),
9119 			list_node_off, btf_name_by_offset(reg->btf, t->name_off));
9120 		return -EINVAL;
9121 	}
9122 
9123 	if (list_node_off != field->graph_root.node_offset) {
9124 		verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
9125 			list_node_off, field->graph_root.node_offset,
9126 			btf_name_by_offset(field->graph_root.btf, et->name_off));
9127 		return -EINVAL;
9128 	}
9129 	/* Set arg#1 for expiration after unlock */
9130 	return ref_set_release_on_unlock(env, reg->ref_obj_id);
9131 }
9132 
9133 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
9134 {
9135 	const char *func_name = meta->func_name, *ref_tname;
9136 	const struct btf *btf = meta->btf;
9137 	const struct btf_param *args;
9138 	u32 i, nargs;
9139 	int ret;
9140 
9141 	args = (const struct btf_param *)(meta->func_proto + 1);
9142 	nargs = btf_type_vlen(meta->func_proto);
9143 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
9144 		verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
9145 			MAX_BPF_FUNC_REG_ARGS);
9146 		return -EINVAL;
9147 	}
9148 
9149 	/* Check that BTF function arguments match actual types that the
9150 	 * verifier sees.
9151 	 */
9152 	for (i = 0; i < nargs; i++) {
9153 		struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
9154 		const struct btf_type *t, *ref_t, *resolve_ret;
9155 		enum bpf_arg_type arg_type = ARG_DONTCARE;
9156 		u32 regno = i + 1, ref_id, type_size;
9157 		bool is_ret_buf_sz = false;
9158 		int kf_arg_type;
9159 
9160 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
9161 
9162 		if (is_kfunc_arg_ignore(btf, &args[i]))
9163 			continue;
9164 
9165 		if (btf_type_is_scalar(t)) {
9166 			if (reg->type != SCALAR_VALUE) {
9167 				verbose(env, "R%d is not a scalar\n", regno);
9168 				return -EINVAL;
9169 			}
9170 
9171 			if (is_kfunc_arg_constant(meta->btf, &args[i])) {
9172 				if (meta->arg_constant.found) {
9173 					verbose(env, "verifier internal error: only one constant argument permitted\n");
9174 					return -EFAULT;
9175 				}
9176 				if (!tnum_is_const(reg->var_off)) {
9177 					verbose(env, "R%d must be a known constant\n", regno);
9178 					return -EINVAL;
9179 				}
9180 				ret = mark_chain_precision(env, regno);
9181 				if (ret < 0)
9182 					return ret;
9183 				meta->arg_constant.found = true;
9184 				meta->arg_constant.value = reg->var_off.value;
9185 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
9186 				meta->r0_rdonly = true;
9187 				is_ret_buf_sz = true;
9188 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
9189 				is_ret_buf_sz = true;
9190 			}
9191 
9192 			if (is_ret_buf_sz) {
9193 				if (meta->r0_size) {
9194 					verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
9195 					return -EINVAL;
9196 				}
9197 
9198 				if (!tnum_is_const(reg->var_off)) {
9199 					verbose(env, "R%d is not a const\n", regno);
9200 					return -EINVAL;
9201 				}
9202 
9203 				meta->r0_size = reg->var_off.value;
9204 				ret = mark_chain_precision(env, regno);
9205 				if (ret)
9206 					return ret;
9207 			}
9208 			continue;
9209 		}
9210 
9211 		if (!btf_type_is_ptr(t)) {
9212 			verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
9213 			return -EINVAL;
9214 		}
9215 
9216 		if (is_kfunc_trusted_args(meta) &&
9217 		    (register_is_null(reg) || type_may_be_null(reg->type))) {
9218 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
9219 			return -EACCES;
9220 		}
9221 
9222 		if (reg->ref_obj_id) {
9223 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
9224 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
9225 					regno, reg->ref_obj_id,
9226 					meta->ref_obj_id);
9227 				return -EFAULT;
9228 			}
9229 			meta->ref_obj_id = reg->ref_obj_id;
9230 			if (is_kfunc_release(meta))
9231 				meta->release_regno = regno;
9232 		}
9233 
9234 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
9235 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
9236 
9237 		kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
9238 		if (kf_arg_type < 0)
9239 			return kf_arg_type;
9240 
9241 		switch (kf_arg_type) {
9242 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
9243 		case KF_ARG_PTR_TO_BTF_ID:
9244 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
9245 				break;
9246 
9247 			if (!is_trusted_reg(reg)) {
9248 				if (!is_kfunc_rcu(meta)) {
9249 					verbose(env, "R%d must be referenced or trusted\n", regno);
9250 					return -EINVAL;
9251 				}
9252 				if (!is_rcu_reg(reg)) {
9253 					verbose(env, "R%d must be a rcu pointer\n", regno);
9254 					return -EINVAL;
9255 				}
9256 			}
9257 
9258 			fallthrough;
9259 		case KF_ARG_PTR_TO_CTX:
9260 			/* Trusted arguments have the same offset checks as release arguments */
9261 			arg_type |= OBJ_RELEASE;
9262 			break;
9263 		case KF_ARG_PTR_TO_KPTR:
9264 		case KF_ARG_PTR_TO_DYNPTR:
9265 		case KF_ARG_PTR_TO_LIST_HEAD:
9266 		case KF_ARG_PTR_TO_LIST_NODE:
9267 		case KF_ARG_PTR_TO_MEM:
9268 		case KF_ARG_PTR_TO_MEM_SIZE:
9269 			/* Trusted by default */
9270 			break;
9271 		default:
9272 			WARN_ON_ONCE(1);
9273 			return -EFAULT;
9274 		}
9275 
9276 		if (is_kfunc_release(meta) && reg->ref_obj_id)
9277 			arg_type |= OBJ_RELEASE;
9278 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
9279 		if (ret < 0)
9280 			return ret;
9281 
9282 		switch (kf_arg_type) {
9283 		case KF_ARG_PTR_TO_CTX:
9284 			if (reg->type != PTR_TO_CTX) {
9285 				verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
9286 				return -EINVAL;
9287 			}
9288 
9289 			if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
9290 				ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
9291 				if (ret < 0)
9292 					return -EINVAL;
9293 				meta->ret_btf_id  = ret;
9294 			}
9295 			break;
9296 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
9297 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9298 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
9299 				return -EINVAL;
9300 			}
9301 			if (!reg->ref_obj_id) {
9302 				verbose(env, "allocated object must be referenced\n");
9303 				return -EINVAL;
9304 			}
9305 			if (meta->btf == btf_vmlinux &&
9306 			    meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
9307 				meta->arg_obj_drop.btf = reg->btf;
9308 				meta->arg_obj_drop.btf_id = reg->btf_id;
9309 			}
9310 			break;
9311 		case KF_ARG_PTR_TO_KPTR:
9312 			if (reg->type != PTR_TO_MAP_VALUE) {
9313 				verbose(env, "arg#0 expected pointer to map value\n");
9314 				return -EINVAL;
9315 			}
9316 			ret = process_kf_arg_ptr_to_kptr(env, reg, ref_t, ref_tname, meta, i);
9317 			if (ret < 0)
9318 				return ret;
9319 			break;
9320 		case KF_ARG_PTR_TO_DYNPTR:
9321 			if (reg->type != PTR_TO_STACK &&
9322 			    reg->type != CONST_PTR_TO_DYNPTR) {
9323 				verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
9324 				return -EINVAL;
9325 			}
9326 
9327 			ret = process_dynptr_func(env, regno, ARG_PTR_TO_DYNPTR | MEM_RDONLY, NULL);
9328 			if (ret < 0)
9329 				return ret;
9330 			break;
9331 		case KF_ARG_PTR_TO_LIST_HEAD:
9332 			if (reg->type != PTR_TO_MAP_VALUE &&
9333 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9334 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
9335 				return -EINVAL;
9336 			}
9337 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
9338 				verbose(env, "allocated object must be referenced\n");
9339 				return -EINVAL;
9340 			}
9341 			ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
9342 			if (ret < 0)
9343 				return ret;
9344 			break;
9345 		case KF_ARG_PTR_TO_LIST_NODE:
9346 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
9347 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
9348 				return -EINVAL;
9349 			}
9350 			if (!reg->ref_obj_id) {
9351 				verbose(env, "allocated object must be referenced\n");
9352 				return -EINVAL;
9353 			}
9354 			ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
9355 			if (ret < 0)
9356 				return ret;
9357 			break;
9358 		case KF_ARG_PTR_TO_BTF_ID:
9359 			/* Only base_type is checked, further checks are done here */
9360 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
9361 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
9362 			    !reg2btf_ids[base_type(reg->type)]) {
9363 				verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
9364 				verbose(env, "expected %s or socket\n",
9365 					reg_type_str(env, base_type(reg->type) |
9366 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
9367 				return -EINVAL;
9368 			}
9369 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
9370 			if (ret < 0)
9371 				return ret;
9372 			break;
9373 		case KF_ARG_PTR_TO_MEM:
9374 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
9375 			if (IS_ERR(resolve_ret)) {
9376 				verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
9377 					i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
9378 				return -EINVAL;
9379 			}
9380 			ret = check_mem_reg(env, reg, regno, type_size);
9381 			if (ret < 0)
9382 				return ret;
9383 			break;
9384 		case KF_ARG_PTR_TO_MEM_SIZE:
9385 			ret = check_kfunc_mem_size_reg(env, &regs[regno + 1], regno + 1);
9386 			if (ret < 0) {
9387 				verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
9388 				return ret;
9389 			}
9390 			/* Skip next '__sz' argument */
9391 			i++;
9392 			break;
9393 		}
9394 	}
9395 
9396 	if (is_kfunc_release(meta) && !meta->release_regno) {
9397 		verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
9398 			func_name);
9399 		return -EINVAL;
9400 	}
9401 
9402 	return 0;
9403 }
9404 
9405 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9406 			    int *insn_idx_p)
9407 {
9408 	const struct btf_type *t, *func, *func_proto, *ptr_type;
9409 	struct bpf_reg_state *regs = cur_regs(env);
9410 	const char *func_name, *ptr_type_name;
9411 	bool sleepable, rcu_lock, rcu_unlock;
9412 	struct bpf_kfunc_call_arg_meta meta;
9413 	u32 i, nargs, func_id, ptr_type_id;
9414 	int err, insn_idx = *insn_idx_p;
9415 	const struct btf_param *args;
9416 	const struct btf_type *ret_t;
9417 	struct btf *desc_btf;
9418 	u32 *kfunc_flags;
9419 
9420 	/* skip for now, but return error when we find this in fixup_kfunc_call */
9421 	if (!insn->imm)
9422 		return 0;
9423 
9424 	desc_btf = find_kfunc_desc_btf(env, insn->off);
9425 	if (IS_ERR(desc_btf))
9426 		return PTR_ERR(desc_btf);
9427 
9428 	func_id = insn->imm;
9429 	func = btf_type_by_id(desc_btf, func_id);
9430 	func_name = btf_name_by_offset(desc_btf, func->name_off);
9431 	func_proto = btf_type_by_id(desc_btf, func->type);
9432 
9433 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
9434 	if (!kfunc_flags) {
9435 		verbose(env, "calling kernel function %s is not allowed\n",
9436 			func_name);
9437 		return -EACCES;
9438 	}
9439 
9440 	/* Prepare kfunc call metadata */
9441 	memset(&meta, 0, sizeof(meta));
9442 	meta.btf = desc_btf;
9443 	meta.func_id = func_id;
9444 	meta.kfunc_flags = *kfunc_flags;
9445 	meta.func_proto = func_proto;
9446 	meta.func_name = func_name;
9447 
9448 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
9449 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
9450 		return -EACCES;
9451 	}
9452 
9453 	sleepable = is_kfunc_sleepable(&meta);
9454 	if (sleepable && !env->prog->aux->sleepable) {
9455 		verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
9456 		return -EACCES;
9457 	}
9458 
9459 	rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
9460 	rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
9461 	if ((rcu_lock || rcu_unlock) && !env->rcu_tag_supported) {
9462 		verbose(env, "no vmlinux btf rcu tag support for kfunc %s\n", func_name);
9463 		return -EACCES;
9464 	}
9465 
9466 	if (env->cur_state->active_rcu_lock) {
9467 		struct bpf_func_state *state;
9468 		struct bpf_reg_state *reg;
9469 
9470 		if (rcu_lock) {
9471 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
9472 			return -EINVAL;
9473 		} else if (rcu_unlock) {
9474 			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
9475 				if (reg->type & MEM_RCU) {
9476 					reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
9477 					reg->type |= PTR_UNTRUSTED;
9478 				}
9479 			}));
9480 			env->cur_state->active_rcu_lock = false;
9481 		} else if (sleepable) {
9482 			verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
9483 			return -EACCES;
9484 		}
9485 	} else if (rcu_lock) {
9486 		env->cur_state->active_rcu_lock = true;
9487 	} else if (rcu_unlock) {
9488 		verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
9489 		return -EINVAL;
9490 	}
9491 
9492 	/* Check the arguments */
9493 	err = check_kfunc_args(env, &meta);
9494 	if (err < 0)
9495 		return err;
9496 	/* In case of release function, we get register number of refcounted
9497 	 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
9498 	 */
9499 	if (meta.release_regno) {
9500 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
9501 		if (err) {
9502 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
9503 				func_name, func_id);
9504 			return err;
9505 		}
9506 	}
9507 
9508 	for (i = 0; i < CALLER_SAVED_REGS; i++)
9509 		mark_reg_not_init(env, regs, caller_saved[i]);
9510 
9511 	/* Check return type */
9512 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
9513 
9514 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
9515 		/* Only exception is bpf_obj_new_impl */
9516 		if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) {
9517 			verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
9518 			return -EINVAL;
9519 		}
9520 	}
9521 
9522 	if (btf_type_is_scalar(t)) {
9523 		mark_reg_unknown(env, regs, BPF_REG_0);
9524 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
9525 	} else if (btf_type_is_ptr(t)) {
9526 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
9527 
9528 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
9529 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
9530 				struct btf *ret_btf;
9531 				u32 ret_btf_id;
9532 
9533 				if (unlikely(!bpf_global_ma_set))
9534 					return -ENOMEM;
9535 
9536 				if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
9537 					verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
9538 					return -EINVAL;
9539 				}
9540 
9541 				ret_btf = env->prog->aux->btf;
9542 				ret_btf_id = meta.arg_constant.value;
9543 
9544 				/* This may be NULL due to user not supplying a BTF */
9545 				if (!ret_btf) {
9546 					verbose(env, "bpf_obj_new requires prog BTF\n");
9547 					return -EINVAL;
9548 				}
9549 
9550 				ret_t = btf_type_by_id(ret_btf, ret_btf_id);
9551 				if (!ret_t || !__btf_type_is_struct(ret_t)) {
9552 					verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
9553 					return -EINVAL;
9554 				}
9555 
9556 				mark_reg_known_zero(env, regs, BPF_REG_0);
9557 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
9558 				regs[BPF_REG_0].btf = ret_btf;
9559 				regs[BPF_REG_0].btf_id = ret_btf_id;
9560 
9561 				env->insn_aux_data[insn_idx].obj_new_size = ret_t->size;
9562 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9563 					btf_find_struct_meta(ret_btf, ret_btf_id);
9564 			} else if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
9565 				env->insn_aux_data[insn_idx].kptr_struct_meta =
9566 					btf_find_struct_meta(meta.arg_obj_drop.btf,
9567 							     meta.arg_obj_drop.btf_id);
9568 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9569 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
9570 				struct btf_field *field = meta.arg_list_head.field;
9571 
9572 				mark_reg_known_zero(env, regs, BPF_REG_0);
9573 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
9574 				regs[BPF_REG_0].btf = field->graph_root.btf;
9575 				regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id;
9576 				regs[BPF_REG_0].off = field->graph_root.node_offset;
9577 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
9578 				mark_reg_known_zero(env, regs, BPF_REG_0);
9579 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
9580 				regs[BPF_REG_0].btf = desc_btf;
9581 				regs[BPF_REG_0].btf_id = meta.ret_btf_id;
9582 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
9583 				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
9584 				if (!ret_t || !btf_type_is_struct(ret_t)) {
9585 					verbose(env,
9586 						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
9587 					return -EINVAL;
9588 				}
9589 
9590 				mark_reg_known_zero(env, regs, BPF_REG_0);
9591 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
9592 				regs[BPF_REG_0].btf = desc_btf;
9593 				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
9594 			} else {
9595 				verbose(env, "kernel function %s unhandled dynamic return type\n",
9596 					meta.func_name);
9597 				return -EFAULT;
9598 			}
9599 		} else if (!__btf_type_is_struct(ptr_type)) {
9600 			if (!meta.r0_size) {
9601 				ptr_type_name = btf_name_by_offset(desc_btf,
9602 								   ptr_type->name_off);
9603 				verbose(env,
9604 					"kernel function %s returns pointer type %s %s is not supported\n",
9605 					func_name,
9606 					btf_type_str(ptr_type),
9607 					ptr_type_name);
9608 				return -EINVAL;
9609 			}
9610 
9611 			mark_reg_known_zero(env, regs, BPF_REG_0);
9612 			regs[BPF_REG_0].type = PTR_TO_MEM;
9613 			regs[BPF_REG_0].mem_size = meta.r0_size;
9614 
9615 			if (meta.r0_rdonly)
9616 				regs[BPF_REG_0].type |= MEM_RDONLY;
9617 
9618 			/* Ensures we don't access the memory after a release_reference() */
9619 			if (meta.ref_obj_id)
9620 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
9621 		} else {
9622 			mark_reg_known_zero(env, regs, BPF_REG_0);
9623 			regs[BPF_REG_0].btf = desc_btf;
9624 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
9625 			regs[BPF_REG_0].btf_id = ptr_type_id;
9626 		}
9627 
9628 		if (is_kfunc_ret_null(&meta)) {
9629 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
9630 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
9631 			regs[BPF_REG_0].id = ++env->id_gen;
9632 		}
9633 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
9634 		if (is_kfunc_acquire(&meta)) {
9635 			int id = acquire_reference_state(env, insn_idx);
9636 
9637 			if (id < 0)
9638 				return id;
9639 			if (is_kfunc_ret_null(&meta))
9640 				regs[BPF_REG_0].id = id;
9641 			regs[BPF_REG_0].ref_obj_id = id;
9642 		}
9643 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
9644 			regs[BPF_REG_0].id = ++env->id_gen;
9645 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
9646 
9647 	nargs = btf_type_vlen(func_proto);
9648 	args = (const struct btf_param *)(func_proto + 1);
9649 	for (i = 0; i < nargs; i++) {
9650 		u32 regno = i + 1;
9651 
9652 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
9653 		if (btf_type_is_ptr(t))
9654 			mark_btf_func_reg_size(env, regno, sizeof(void *));
9655 		else
9656 			/* scalar. ensured by btf_check_kfunc_arg_match() */
9657 			mark_btf_func_reg_size(env, regno, t->size);
9658 	}
9659 
9660 	return 0;
9661 }
9662 
9663 static bool signed_add_overflows(s64 a, s64 b)
9664 {
9665 	/* Do the add in u64, where overflow is well-defined */
9666 	s64 res = (s64)((u64)a + (u64)b);
9667 
9668 	if (b < 0)
9669 		return res > a;
9670 	return res < a;
9671 }
9672 
9673 static bool signed_add32_overflows(s32 a, s32 b)
9674 {
9675 	/* Do the add in u32, where overflow is well-defined */
9676 	s32 res = (s32)((u32)a + (u32)b);
9677 
9678 	if (b < 0)
9679 		return res > a;
9680 	return res < a;
9681 }
9682 
9683 static bool signed_sub_overflows(s64 a, s64 b)
9684 {
9685 	/* Do the sub in u64, where overflow is well-defined */
9686 	s64 res = (s64)((u64)a - (u64)b);
9687 
9688 	if (b < 0)
9689 		return res < a;
9690 	return res > a;
9691 }
9692 
9693 static bool signed_sub32_overflows(s32 a, s32 b)
9694 {
9695 	/* Do the sub in u32, where overflow is well-defined */
9696 	s32 res = (s32)((u32)a - (u32)b);
9697 
9698 	if (b < 0)
9699 		return res < a;
9700 	return res > a;
9701 }
9702 
9703 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
9704 				  const struct bpf_reg_state *reg,
9705 				  enum bpf_reg_type type)
9706 {
9707 	bool known = tnum_is_const(reg->var_off);
9708 	s64 val = reg->var_off.value;
9709 	s64 smin = reg->smin_value;
9710 
9711 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
9712 		verbose(env, "math between %s pointer and %lld is not allowed\n",
9713 			reg_type_str(env, type), val);
9714 		return false;
9715 	}
9716 
9717 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
9718 		verbose(env, "%s pointer offset %d is not allowed\n",
9719 			reg_type_str(env, type), reg->off);
9720 		return false;
9721 	}
9722 
9723 	if (smin == S64_MIN) {
9724 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
9725 			reg_type_str(env, type));
9726 		return false;
9727 	}
9728 
9729 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
9730 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
9731 			smin, reg_type_str(env, type));
9732 		return false;
9733 	}
9734 
9735 	return true;
9736 }
9737 
9738 enum {
9739 	REASON_BOUNDS	= -1,
9740 	REASON_TYPE	= -2,
9741 	REASON_PATHS	= -3,
9742 	REASON_LIMIT	= -4,
9743 	REASON_STACK	= -5,
9744 };
9745 
9746 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
9747 			      u32 *alu_limit, bool mask_to_left)
9748 {
9749 	u32 max = 0, ptr_limit = 0;
9750 
9751 	switch (ptr_reg->type) {
9752 	case PTR_TO_STACK:
9753 		/* Offset 0 is out-of-bounds, but acceptable start for the
9754 		 * left direction, see BPF_REG_FP. Also, unknown scalar
9755 		 * offset where we would need to deal with min/max bounds is
9756 		 * currently prohibited for unprivileged.
9757 		 */
9758 		max = MAX_BPF_STACK + mask_to_left;
9759 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
9760 		break;
9761 	case PTR_TO_MAP_VALUE:
9762 		max = ptr_reg->map_ptr->value_size;
9763 		ptr_limit = (mask_to_left ?
9764 			     ptr_reg->smin_value :
9765 			     ptr_reg->umax_value) + ptr_reg->off;
9766 		break;
9767 	default:
9768 		return REASON_TYPE;
9769 	}
9770 
9771 	if (ptr_limit >= max)
9772 		return REASON_LIMIT;
9773 	*alu_limit = ptr_limit;
9774 	return 0;
9775 }
9776 
9777 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
9778 				    const struct bpf_insn *insn)
9779 {
9780 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
9781 }
9782 
9783 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
9784 				       u32 alu_state, u32 alu_limit)
9785 {
9786 	/* If we arrived here from different branches with different
9787 	 * state or limits to sanitize, then this won't work.
9788 	 */
9789 	if (aux->alu_state &&
9790 	    (aux->alu_state != alu_state ||
9791 	     aux->alu_limit != alu_limit))
9792 		return REASON_PATHS;
9793 
9794 	/* Corresponding fixup done in do_misc_fixups(). */
9795 	aux->alu_state = alu_state;
9796 	aux->alu_limit = alu_limit;
9797 	return 0;
9798 }
9799 
9800 static int sanitize_val_alu(struct bpf_verifier_env *env,
9801 			    struct bpf_insn *insn)
9802 {
9803 	struct bpf_insn_aux_data *aux = cur_aux(env);
9804 
9805 	if (can_skip_alu_sanitation(env, insn))
9806 		return 0;
9807 
9808 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
9809 }
9810 
9811 static bool sanitize_needed(u8 opcode)
9812 {
9813 	return opcode == BPF_ADD || opcode == BPF_SUB;
9814 }
9815 
9816 struct bpf_sanitize_info {
9817 	struct bpf_insn_aux_data aux;
9818 	bool mask_to_left;
9819 };
9820 
9821 static struct bpf_verifier_state *
9822 sanitize_speculative_path(struct bpf_verifier_env *env,
9823 			  const struct bpf_insn *insn,
9824 			  u32 next_idx, u32 curr_idx)
9825 {
9826 	struct bpf_verifier_state *branch;
9827 	struct bpf_reg_state *regs;
9828 
9829 	branch = push_stack(env, next_idx, curr_idx, true);
9830 	if (branch && insn) {
9831 		regs = branch->frame[branch->curframe]->regs;
9832 		if (BPF_SRC(insn->code) == BPF_K) {
9833 			mark_reg_unknown(env, regs, insn->dst_reg);
9834 		} else if (BPF_SRC(insn->code) == BPF_X) {
9835 			mark_reg_unknown(env, regs, insn->dst_reg);
9836 			mark_reg_unknown(env, regs, insn->src_reg);
9837 		}
9838 	}
9839 	return branch;
9840 }
9841 
9842 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
9843 			    struct bpf_insn *insn,
9844 			    const struct bpf_reg_state *ptr_reg,
9845 			    const struct bpf_reg_state *off_reg,
9846 			    struct bpf_reg_state *dst_reg,
9847 			    struct bpf_sanitize_info *info,
9848 			    const bool commit_window)
9849 {
9850 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
9851 	struct bpf_verifier_state *vstate = env->cur_state;
9852 	bool off_is_imm = tnum_is_const(off_reg->var_off);
9853 	bool off_is_neg = off_reg->smin_value < 0;
9854 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
9855 	u8 opcode = BPF_OP(insn->code);
9856 	u32 alu_state, alu_limit;
9857 	struct bpf_reg_state tmp;
9858 	bool ret;
9859 	int err;
9860 
9861 	if (can_skip_alu_sanitation(env, insn))
9862 		return 0;
9863 
9864 	/* We already marked aux for masking from non-speculative
9865 	 * paths, thus we got here in the first place. We only care
9866 	 * to explore bad access from here.
9867 	 */
9868 	if (vstate->speculative)
9869 		goto do_sim;
9870 
9871 	if (!commit_window) {
9872 		if (!tnum_is_const(off_reg->var_off) &&
9873 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
9874 			return REASON_BOUNDS;
9875 
9876 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
9877 				     (opcode == BPF_SUB && !off_is_neg);
9878 	}
9879 
9880 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
9881 	if (err < 0)
9882 		return err;
9883 
9884 	if (commit_window) {
9885 		/* In commit phase we narrow the masking window based on
9886 		 * the observed pointer move after the simulated operation.
9887 		 */
9888 		alu_state = info->aux.alu_state;
9889 		alu_limit = abs(info->aux.alu_limit - alu_limit);
9890 	} else {
9891 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
9892 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
9893 		alu_state |= ptr_is_dst_reg ?
9894 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
9895 
9896 		/* Limit pruning on unknown scalars to enable deep search for
9897 		 * potential masking differences from other program paths.
9898 		 */
9899 		if (!off_is_imm)
9900 			env->explore_alu_limits = true;
9901 	}
9902 
9903 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
9904 	if (err < 0)
9905 		return err;
9906 do_sim:
9907 	/* If we're in commit phase, we're done here given we already
9908 	 * pushed the truncated dst_reg into the speculative verification
9909 	 * stack.
9910 	 *
9911 	 * Also, when register is a known constant, we rewrite register-based
9912 	 * operation to immediate-based, and thus do not need masking (and as
9913 	 * a consequence, do not need to simulate the zero-truncation either).
9914 	 */
9915 	if (commit_window || off_is_imm)
9916 		return 0;
9917 
9918 	/* Simulate and find potential out-of-bounds access under
9919 	 * speculative execution from truncation as a result of
9920 	 * masking when off was not within expected range. If off
9921 	 * sits in dst, then we temporarily need to move ptr there
9922 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
9923 	 * for cases where we use K-based arithmetic in one direction
9924 	 * and truncated reg-based in the other in order to explore
9925 	 * bad access.
9926 	 */
9927 	if (!ptr_is_dst_reg) {
9928 		tmp = *dst_reg;
9929 		copy_register_state(dst_reg, ptr_reg);
9930 	}
9931 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
9932 					env->insn_idx);
9933 	if (!ptr_is_dst_reg && ret)
9934 		*dst_reg = tmp;
9935 	return !ret ? REASON_STACK : 0;
9936 }
9937 
9938 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
9939 {
9940 	struct bpf_verifier_state *vstate = env->cur_state;
9941 
9942 	/* If we simulate paths under speculation, we don't update the
9943 	 * insn as 'seen' such that when we verify unreachable paths in
9944 	 * the non-speculative domain, sanitize_dead_code() can still
9945 	 * rewrite/sanitize them.
9946 	 */
9947 	if (!vstate->speculative)
9948 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
9949 }
9950 
9951 static int sanitize_err(struct bpf_verifier_env *env,
9952 			const struct bpf_insn *insn, int reason,
9953 			const struct bpf_reg_state *off_reg,
9954 			const struct bpf_reg_state *dst_reg)
9955 {
9956 	static const char *err = "pointer arithmetic with it prohibited for !root";
9957 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
9958 	u32 dst = insn->dst_reg, src = insn->src_reg;
9959 
9960 	switch (reason) {
9961 	case REASON_BOUNDS:
9962 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
9963 			off_reg == dst_reg ? dst : src, err);
9964 		break;
9965 	case REASON_TYPE:
9966 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
9967 			off_reg == dst_reg ? src : dst, err);
9968 		break;
9969 	case REASON_PATHS:
9970 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
9971 			dst, op, err);
9972 		break;
9973 	case REASON_LIMIT:
9974 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
9975 			dst, op, err);
9976 		break;
9977 	case REASON_STACK:
9978 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
9979 			dst, err);
9980 		break;
9981 	default:
9982 		verbose(env, "verifier internal error: unknown reason (%d)\n",
9983 			reason);
9984 		break;
9985 	}
9986 
9987 	return -EACCES;
9988 }
9989 
9990 /* check that stack access falls within stack limits and that 'reg' doesn't
9991  * have a variable offset.
9992  *
9993  * Variable offset is prohibited for unprivileged mode for simplicity since it
9994  * requires corresponding support in Spectre masking for stack ALU.  See also
9995  * retrieve_ptr_limit().
9996  *
9997  *
9998  * 'off' includes 'reg->off'.
9999  */
10000 static int check_stack_access_for_ptr_arithmetic(
10001 				struct bpf_verifier_env *env,
10002 				int regno,
10003 				const struct bpf_reg_state *reg,
10004 				int off)
10005 {
10006 	if (!tnum_is_const(reg->var_off)) {
10007 		char tn_buf[48];
10008 
10009 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
10010 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
10011 			regno, tn_buf, off);
10012 		return -EACCES;
10013 	}
10014 
10015 	if (off >= 0 || off < -MAX_BPF_STACK) {
10016 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
10017 			"prohibited for !root; off=%d\n", regno, off);
10018 		return -EACCES;
10019 	}
10020 
10021 	return 0;
10022 }
10023 
10024 static int sanitize_check_bounds(struct bpf_verifier_env *env,
10025 				 const struct bpf_insn *insn,
10026 				 const struct bpf_reg_state *dst_reg)
10027 {
10028 	u32 dst = insn->dst_reg;
10029 
10030 	/* For unprivileged we require that resulting offset must be in bounds
10031 	 * in order to be able to sanitize access later on.
10032 	 */
10033 	if (env->bypass_spec_v1)
10034 		return 0;
10035 
10036 	switch (dst_reg->type) {
10037 	case PTR_TO_STACK:
10038 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
10039 					dst_reg->off + dst_reg->var_off.value))
10040 			return -EACCES;
10041 		break;
10042 	case PTR_TO_MAP_VALUE:
10043 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
10044 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
10045 				"prohibited for !root\n", dst);
10046 			return -EACCES;
10047 		}
10048 		break;
10049 	default:
10050 		break;
10051 	}
10052 
10053 	return 0;
10054 }
10055 
10056 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
10057  * Caller should also handle BPF_MOV case separately.
10058  * If we return -EACCES, caller may want to try again treating pointer as a
10059  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
10060  */
10061 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
10062 				   struct bpf_insn *insn,
10063 				   const struct bpf_reg_state *ptr_reg,
10064 				   const struct bpf_reg_state *off_reg)
10065 {
10066 	struct bpf_verifier_state *vstate = env->cur_state;
10067 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
10068 	struct bpf_reg_state *regs = state->regs, *dst_reg;
10069 	bool known = tnum_is_const(off_reg->var_off);
10070 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
10071 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
10072 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
10073 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
10074 	struct bpf_sanitize_info info = {};
10075 	u8 opcode = BPF_OP(insn->code);
10076 	u32 dst = insn->dst_reg;
10077 	int ret;
10078 
10079 	dst_reg = &regs[dst];
10080 
10081 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
10082 	    smin_val > smax_val || umin_val > umax_val) {
10083 		/* Taint dst register if offset had invalid bounds derived from
10084 		 * e.g. dead branches.
10085 		 */
10086 		__mark_reg_unknown(env, dst_reg);
10087 		return 0;
10088 	}
10089 
10090 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
10091 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
10092 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
10093 			__mark_reg_unknown(env, dst_reg);
10094 			return 0;
10095 		}
10096 
10097 		verbose(env,
10098 			"R%d 32-bit pointer arithmetic prohibited\n",
10099 			dst);
10100 		return -EACCES;
10101 	}
10102 
10103 	if (ptr_reg->type & PTR_MAYBE_NULL) {
10104 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
10105 			dst, reg_type_str(env, ptr_reg->type));
10106 		return -EACCES;
10107 	}
10108 
10109 	switch (base_type(ptr_reg->type)) {
10110 	case CONST_PTR_TO_MAP:
10111 		/* smin_val represents the known value */
10112 		if (known && smin_val == 0 && opcode == BPF_ADD)
10113 			break;
10114 		fallthrough;
10115 	case PTR_TO_PACKET_END:
10116 	case PTR_TO_SOCKET:
10117 	case PTR_TO_SOCK_COMMON:
10118 	case PTR_TO_TCP_SOCK:
10119 	case PTR_TO_XDP_SOCK:
10120 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
10121 			dst, reg_type_str(env, ptr_reg->type));
10122 		return -EACCES;
10123 	default:
10124 		break;
10125 	}
10126 
10127 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
10128 	 * The id may be overwritten later if we create a new variable offset.
10129 	 */
10130 	dst_reg->type = ptr_reg->type;
10131 	dst_reg->id = ptr_reg->id;
10132 
10133 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
10134 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
10135 		return -EINVAL;
10136 
10137 	/* pointer types do not carry 32-bit bounds at the moment. */
10138 	__mark_reg32_unbounded(dst_reg);
10139 
10140 	if (sanitize_needed(opcode)) {
10141 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
10142 				       &info, false);
10143 		if (ret < 0)
10144 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
10145 	}
10146 
10147 	switch (opcode) {
10148 	case BPF_ADD:
10149 		/* We can take a fixed offset as long as it doesn't overflow
10150 		 * the s32 'off' field
10151 		 */
10152 		if (known && (ptr_reg->off + smin_val ==
10153 			      (s64)(s32)(ptr_reg->off + smin_val))) {
10154 			/* pointer += K.  Accumulate it into fixed offset */
10155 			dst_reg->smin_value = smin_ptr;
10156 			dst_reg->smax_value = smax_ptr;
10157 			dst_reg->umin_value = umin_ptr;
10158 			dst_reg->umax_value = umax_ptr;
10159 			dst_reg->var_off = ptr_reg->var_off;
10160 			dst_reg->off = ptr_reg->off + smin_val;
10161 			dst_reg->raw = ptr_reg->raw;
10162 			break;
10163 		}
10164 		/* A new variable offset is created.  Note that off_reg->off
10165 		 * == 0, since it's a scalar.
10166 		 * dst_reg gets the pointer type and since some positive
10167 		 * integer value was added to the pointer, give it a new 'id'
10168 		 * if it's a PTR_TO_PACKET.
10169 		 * this creates a new 'base' pointer, off_reg (variable) gets
10170 		 * added into the variable offset, and we copy the fixed offset
10171 		 * from ptr_reg.
10172 		 */
10173 		if (signed_add_overflows(smin_ptr, smin_val) ||
10174 		    signed_add_overflows(smax_ptr, smax_val)) {
10175 			dst_reg->smin_value = S64_MIN;
10176 			dst_reg->smax_value = S64_MAX;
10177 		} else {
10178 			dst_reg->smin_value = smin_ptr + smin_val;
10179 			dst_reg->smax_value = smax_ptr + smax_val;
10180 		}
10181 		if (umin_ptr + umin_val < umin_ptr ||
10182 		    umax_ptr + umax_val < umax_ptr) {
10183 			dst_reg->umin_value = 0;
10184 			dst_reg->umax_value = U64_MAX;
10185 		} else {
10186 			dst_reg->umin_value = umin_ptr + umin_val;
10187 			dst_reg->umax_value = umax_ptr + umax_val;
10188 		}
10189 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
10190 		dst_reg->off = ptr_reg->off;
10191 		dst_reg->raw = ptr_reg->raw;
10192 		if (reg_is_pkt_pointer(ptr_reg)) {
10193 			dst_reg->id = ++env->id_gen;
10194 			/* something was added to pkt_ptr, set range to zero */
10195 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
10196 		}
10197 		break;
10198 	case BPF_SUB:
10199 		if (dst_reg == off_reg) {
10200 			/* scalar -= pointer.  Creates an unknown scalar */
10201 			verbose(env, "R%d tried to subtract pointer from scalar\n",
10202 				dst);
10203 			return -EACCES;
10204 		}
10205 		/* We don't allow subtraction from FP, because (according to
10206 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
10207 		 * be able to deal with it.
10208 		 */
10209 		if (ptr_reg->type == PTR_TO_STACK) {
10210 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
10211 				dst);
10212 			return -EACCES;
10213 		}
10214 		if (known && (ptr_reg->off - smin_val ==
10215 			      (s64)(s32)(ptr_reg->off - smin_val))) {
10216 			/* pointer -= K.  Subtract it from fixed offset */
10217 			dst_reg->smin_value = smin_ptr;
10218 			dst_reg->smax_value = smax_ptr;
10219 			dst_reg->umin_value = umin_ptr;
10220 			dst_reg->umax_value = umax_ptr;
10221 			dst_reg->var_off = ptr_reg->var_off;
10222 			dst_reg->id = ptr_reg->id;
10223 			dst_reg->off = ptr_reg->off - smin_val;
10224 			dst_reg->raw = ptr_reg->raw;
10225 			break;
10226 		}
10227 		/* A new variable offset is created.  If the subtrahend is known
10228 		 * nonnegative, then any reg->range we had before is still good.
10229 		 */
10230 		if (signed_sub_overflows(smin_ptr, smax_val) ||
10231 		    signed_sub_overflows(smax_ptr, smin_val)) {
10232 			/* Overflow possible, we know nothing */
10233 			dst_reg->smin_value = S64_MIN;
10234 			dst_reg->smax_value = S64_MAX;
10235 		} else {
10236 			dst_reg->smin_value = smin_ptr - smax_val;
10237 			dst_reg->smax_value = smax_ptr - smin_val;
10238 		}
10239 		if (umin_ptr < umax_val) {
10240 			/* Overflow possible, we know nothing */
10241 			dst_reg->umin_value = 0;
10242 			dst_reg->umax_value = U64_MAX;
10243 		} else {
10244 			/* Cannot overflow (as long as bounds are consistent) */
10245 			dst_reg->umin_value = umin_ptr - umax_val;
10246 			dst_reg->umax_value = umax_ptr - umin_val;
10247 		}
10248 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
10249 		dst_reg->off = ptr_reg->off;
10250 		dst_reg->raw = ptr_reg->raw;
10251 		if (reg_is_pkt_pointer(ptr_reg)) {
10252 			dst_reg->id = ++env->id_gen;
10253 			/* something was added to pkt_ptr, set range to zero */
10254 			if (smin_val < 0)
10255 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
10256 		}
10257 		break;
10258 	case BPF_AND:
10259 	case BPF_OR:
10260 	case BPF_XOR:
10261 		/* bitwise ops on pointers are troublesome, prohibit. */
10262 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
10263 			dst, bpf_alu_string[opcode >> 4]);
10264 		return -EACCES;
10265 	default:
10266 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
10267 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
10268 			dst, bpf_alu_string[opcode >> 4]);
10269 		return -EACCES;
10270 	}
10271 
10272 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
10273 		return -EINVAL;
10274 	reg_bounds_sync(dst_reg);
10275 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
10276 		return -EACCES;
10277 	if (sanitize_needed(opcode)) {
10278 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
10279 				       &info, true);
10280 		if (ret < 0)
10281 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
10282 	}
10283 
10284 	return 0;
10285 }
10286 
10287 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
10288 				 struct bpf_reg_state *src_reg)
10289 {
10290 	s32 smin_val = src_reg->s32_min_value;
10291 	s32 smax_val = src_reg->s32_max_value;
10292 	u32 umin_val = src_reg->u32_min_value;
10293 	u32 umax_val = src_reg->u32_max_value;
10294 
10295 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
10296 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
10297 		dst_reg->s32_min_value = S32_MIN;
10298 		dst_reg->s32_max_value = S32_MAX;
10299 	} else {
10300 		dst_reg->s32_min_value += smin_val;
10301 		dst_reg->s32_max_value += smax_val;
10302 	}
10303 	if (dst_reg->u32_min_value + umin_val < umin_val ||
10304 	    dst_reg->u32_max_value + umax_val < umax_val) {
10305 		dst_reg->u32_min_value = 0;
10306 		dst_reg->u32_max_value = U32_MAX;
10307 	} else {
10308 		dst_reg->u32_min_value += umin_val;
10309 		dst_reg->u32_max_value += umax_val;
10310 	}
10311 }
10312 
10313 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
10314 			       struct bpf_reg_state *src_reg)
10315 {
10316 	s64 smin_val = src_reg->smin_value;
10317 	s64 smax_val = src_reg->smax_value;
10318 	u64 umin_val = src_reg->umin_value;
10319 	u64 umax_val = src_reg->umax_value;
10320 
10321 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
10322 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
10323 		dst_reg->smin_value = S64_MIN;
10324 		dst_reg->smax_value = S64_MAX;
10325 	} else {
10326 		dst_reg->smin_value += smin_val;
10327 		dst_reg->smax_value += smax_val;
10328 	}
10329 	if (dst_reg->umin_value + umin_val < umin_val ||
10330 	    dst_reg->umax_value + umax_val < umax_val) {
10331 		dst_reg->umin_value = 0;
10332 		dst_reg->umax_value = U64_MAX;
10333 	} else {
10334 		dst_reg->umin_value += umin_val;
10335 		dst_reg->umax_value += umax_val;
10336 	}
10337 }
10338 
10339 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
10340 				 struct bpf_reg_state *src_reg)
10341 {
10342 	s32 smin_val = src_reg->s32_min_value;
10343 	s32 smax_val = src_reg->s32_max_value;
10344 	u32 umin_val = src_reg->u32_min_value;
10345 	u32 umax_val = src_reg->u32_max_value;
10346 
10347 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
10348 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
10349 		/* Overflow possible, we know nothing */
10350 		dst_reg->s32_min_value = S32_MIN;
10351 		dst_reg->s32_max_value = S32_MAX;
10352 	} else {
10353 		dst_reg->s32_min_value -= smax_val;
10354 		dst_reg->s32_max_value -= smin_val;
10355 	}
10356 	if (dst_reg->u32_min_value < umax_val) {
10357 		/* Overflow possible, we know nothing */
10358 		dst_reg->u32_min_value = 0;
10359 		dst_reg->u32_max_value = U32_MAX;
10360 	} else {
10361 		/* Cannot overflow (as long as bounds are consistent) */
10362 		dst_reg->u32_min_value -= umax_val;
10363 		dst_reg->u32_max_value -= umin_val;
10364 	}
10365 }
10366 
10367 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
10368 			       struct bpf_reg_state *src_reg)
10369 {
10370 	s64 smin_val = src_reg->smin_value;
10371 	s64 smax_val = src_reg->smax_value;
10372 	u64 umin_val = src_reg->umin_value;
10373 	u64 umax_val = src_reg->umax_value;
10374 
10375 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
10376 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
10377 		/* Overflow possible, we know nothing */
10378 		dst_reg->smin_value = S64_MIN;
10379 		dst_reg->smax_value = S64_MAX;
10380 	} else {
10381 		dst_reg->smin_value -= smax_val;
10382 		dst_reg->smax_value -= smin_val;
10383 	}
10384 	if (dst_reg->umin_value < umax_val) {
10385 		/* Overflow possible, we know nothing */
10386 		dst_reg->umin_value = 0;
10387 		dst_reg->umax_value = U64_MAX;
10388 	} else {
10389 		/* Cannot overflow (as long as bounds are consistent) */
10390 		dst_reg->umin_value -= umax_val;
10391 		dst_reg->umax_value -= umin_val;
10392 	}
10393 }
10394 
10395 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
10396 				 struct bpf_reg_state *src_reg)
10397 {
10398 	s32 smin_val = src_reg->s32_min_value;
10399 	u32 umin_val = src_reg->u32_min_value;
10400 	u32 umax_val = src_reg->u32_max_value;
10401 
10402 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
10403 		/* Ain't nobody got time to multiply that sign */
10404 		__mark_reg32_unbounded(dst_reg);
10405 		return;
10406 	}
10407 	/* Both values are positive, so we can work with unsigned and
10408 	 * copy the result to signed (unless it exceeds S32_MAX).
10409 	 */
10410 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
10411 		/* Potential overflow, we know nothing */
10412 		__mark_reg32_unbounded(dst_reg);
10413 		return;
10414 	}
10415 	dst_reg->u32_min_value *= umin_val;
10416 	dst_reg->u32_max_value *= umax_val;
10417 	if (dst_reg->u32_max_value > S32_MAX) {
10418 		/* Overflow possible, we know nothing */
10419 		dst_reg->s32_min_value = S32_MIN;
10420 		dst_reg->s32_max_value = S32_MAX;
10421 	} else {
10422 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10423 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10424 	}
10425 }
10426 
10427 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
10428 			       struct bpf_reg_state *src_reg)
10429 {
10430 	s64 smin_val = src_reg->smin_value;
10431 	u64 umin_val = src_reg->umin_value;
10432 	u64 umax_val = src_reg->umax_value;
10433 
10434 	if (smin_val < 0 || dst_reg->smin_value < 0) {
10435 		/* Ain't nobody got time to multiply that sign */
10436 		__mark_reg64_unbounded(dst_reg);
10437 		return;
10438 	}
10439 	/* Both values are positive, so we can work with unsigned and
10440 	 * copy the result to signed (unless it exceeds S64_MAX).
10441 	 */
10442 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
10443 		/* Potential overflow, we know nothing */
10444 		__mark_reg64_unbounded(dst_reg);
10445 		return;
10446 	}
10447 	dst_reg->umin_value *= umin_val;
10448 	dst_reg->umax_value *= umax_val;
10449 	if (dst_reg->umax_value > S64_MAX) {
10450 		/* Overflow possible, we know nothing */
10451 		dst_reg->smin_value = S64_MIN;
10452 		dst_reg->smax_value = S64_MAX;
10453 	} else {
10454 		dst_reg->smin_value = dst_reg->umin_value;
10455 		dst_reg->smax_value = dst_reg->umax_value;
10456 	}
10457 }
10458 
10459 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
10460 				 struct bpf_reg_state *src_reg)
10461 {
10462 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10463 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10464 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10465 	s32 smin_val = src_reg->s32_min_value;
10466 	u32 umax_val = src_reg->u32_max_value;
10467 
10468 	if (src_known && dst_known) {
10469 		__mark_reg32_known(dst_reg, var32_off.value);
10470 		return;
10471 	}
10472 
10473 	/* We get our minimum from the var_off, since that's inherently
10474 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
10475 	 */
10476 	dst_reg->u32_min_value = var32_off.value;
10477 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
10478 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
10479 		/* Lose signed bounds when ANDing negative numbers,
10480 		 * ain't nobody got time for that.
10481 		 */
10482 		dst_reg->s32_min_value = S32_MIN;
10483 		dst_reg->s32_max_value = S32_MAX;
10484 	} else {
10485 		/* ANDing two positives gives a positive, so safe to
10486 		 * cast result into s64.
10487 		 */
10488 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10489 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10490 	}
10491 }
10492 
10493 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
10494 			       struct bpf_reg_state *src_reg)
10495 {
10496 	bool src_known = tnum_is_const(src_reg->var_off);
10497 	bool dst_known = tnum_is_const(dst_reg->var_off);
10498 	s64 smin_val = src_reg->smin_value;
10499 	u64 umax_val = src_reg->umax_value;
10500 
10501 	if (src_known && dst_known) {
10502 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10503 		return;
10504 	}
10505 
10506 	/* We get our minimum from the var_off, since that's inherently
10507 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
10508 	 */
10509 	dst_reg->umin_value = dst_reg->var_off.value;
10510 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
10511 	if (dst_reg->smin_value < 0 || smin_val < 0) {
10512 		/* Lose signed bounds when ANDing negative numbers,
10513 		 * ain't nobody got time for that.
10514 		 */
10515 		dst_reg->smin_value = S64_MIN;
10516 		dst_reg->smax_value = S64_MAX;
10517 	} else {
10518 		/* ANDing two positives gives a positive, so safe to
10519 		 * cast result into s64.
10520 		 */
10521 		dst_reg->smin_value = dst_reg->umin_value;
10522 		dst_reg->smax_value = dst_reg->umax_value;
10523 	}
10524 	/* We may learn something more from the var_off */
10525 	__update_reg_bounds(dst_reg);
10526 }
10527 
10528 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
10529 				struct bpf_reg_state *src_reg)
10530 {
10531 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10532 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10533 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10534 	s32 smin_val = src_reg->s32_min_value;
10535 	u32 umin_val = src_reg->u32_min_value;
10536 
10537 	if (src_known && dst_known) {
10538 		__mark_reg32_known(dst_reg, var32_off.value);
10539 		return;
10540 	}
10541 
10542 	/* We get our maximum from the var_off, and our minimum is the
10543 	 * maximum of the operands' minima
10544 	 */
10545 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
10546 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
10547 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
10548 		/* Lose signed bounds when ORing negative numbers,
10549 		 * ain't nobody got time for that.
10550 		 */
10551 		dst_reg->s32_min_value = S32_MIN;
10552 		dst_reg->s32_max_value = S32_MAX;
10553 	} else {
10554 		/* ORing two positives gives a positive, so safe to
10555 		 * cast result into s64.
10556 		 */
10557 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10558 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10559 	}
10560 }
10561 
10562 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
10563 			      struct bpf_reg_state *src_reg)
10564 {
10565 	bool src_known = tnum_is_const(src_reg->var_off);
10566 	bool dst_known = tnum_is_const(dst_reg->var_off);
10567 	s64 smin_val = src_reg->smin_value;
10568 	u64 umin_val = src_reg->umin_value;
10569 
10570 	if (src_known && dst_known) {
10571 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10572 		return;
10573 	}
10574 
10575 	/* We get our maximum from the var_off, and our minimum is the
10576 	 * maximum of the operands' minima
10577 	 */
10578 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
10579 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
10580 	if (dst_reg->smin_value < 0 || smin_val < 0) {
10581 		/* Lose signed bounds when ORing negative numbers,
10582 		 * ain't nobody got time for that.
10583 		 */
10584 		dst_reg->smin_value = S64_MIN;
10585 		dst_reg->smax_value = S64_MAX;
10586 	} else {
10587 		/* ORing two positives gives a positive, so safe to
10588 		 * cast result into s64.
10589 		 */
10590 		dst_reg->smin_value = dst_reg->umin_value;
10591 		dst_reg->smax_value = dst_reg->umax_value;
10592 	}
10593 	/* We may learn something more from the var_off */
10594 	__update_reg_bounds(dst_reg);
10595 }
10596 
10597 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
10598 				 struct bpf_reg_state *src_reg)
10599 {
10600 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
10601 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
10602 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
10603 	s32 smin_val = src_reg->s32_min_value;
10604 
10605 	if (src_known && dst_known) {
10606 		__mark_reg32_known(dst_reg, var32_off.value);
10607 		return;
10608 	}
10609 
10610 	/* We get both minimum and maximum from the var32_off. */
10611 	dst_reg->u32_min_value = var32_off.value;
10612 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
10613 
10614 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
10615 		/* XORing two positive sign numbers gives a positive,
10616 		 * so safe to cast u32 result into s32.
10617 		 */
10618 		dst_reg->s32_min_value = dst_reg->u32_min_value;
10619 		dst_reg->s32_max_value = dst_reg->u32_max_value;
10620 	} else {
10621 		dst_reg->s32_min_value = S32_MIN;
10622 		dst_reg->s32_max_value = S32_MAX;
10623 	}
10624 }
10625 
10626 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
10627 			       struct bpf_reg_state *src_reg)
10628 {
10629 	bool src_known = tnum_is_const(src_reg->var_off);
10630 	bool dst_known = tnum_is_const(dst_reg->var_off);
10631 	s64 smin_val = src_reg->smin_value;
10632 
10633 	if (src_known && dst_known) {
10634 		/* dst_reg->var_off.value has been updated earlier */
10635 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
10636 		return;
10637 	}
10638 
10639 	/* We get both minimum and maximum from the var_off. */
10640 	dst_reg->umin_value = dst_reg->var_off.value;
10641 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
10642 
10643 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
10644 		/* XORing two positive sign numbers gives a positive,
10645 		 * so safe to cast u64 result into s64.
10646 		 */
10647 		dst_reg->smin_value = dst_reg->umin_value;
10648 		dst_reg->smax_value = dst_reg->umax_value;
10649 	} else {
10650 		dst_reg->smin_value = S64_MIN;
10651 		dst_reg->smax_value = S64_MAX;
10652 	}
10653 
10654 	__update_reg_bounds(dst_reg);
10655 }
10656 
10657 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
10658 				   u64 umin_val, u64 umax_val)
10659 {
10660 	/* We lose all sign bit information (except what we can pick
10661 	 * up from var_off)
10662 	 */
10663 	dst_reg->s32_min_value = S32_MIN;
10664 	dst_reg->s32_max_value = S32_MAX;
10665 	/* If we might shift our top bit out, then we know nothing */
10666 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
10667 		dst_reg->u32_min_value = 0;
10668 		dst_reg->u32_max_value = U32_MAX;
10669 	} else {
10670 		dst_reg->u32_min_value <<= umin_val;
10671 		dst_reg->u32_max_value <<= umax_val;
10672 	}
10673 }
10674 
10675 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
10676 				 struct bpf_reg_state *src_reg)
10677 {
10678 	u32 umax_val = src_reg->u32_max_value;
10679 	u32 umin_val = src_reg->u32_min_value;
10680 	/* u32 alu operation will zext upper bits */
10681 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
10682 
10683 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
10684 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
10685 	/* Not required but being careful mark reg64 bounds as unknown so
10686 	 * that we are forced to pick them up from tnum and zext later and
10687 	 * if some path skips this step we are still safe.
10688 	 */
10689 	__mark_reg64_unbounded(dst_reg);
10690 	__update_reg32_bounds(dst_reg);
10691 }
10692 
10693 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
10694 				   u64 umin_val, u64 umax_val)
10695 {
10696 	/* Special case <<32 because it is a common compiler pattern to sign
10697 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
10698 	 * positive we know this shift will also be positive so we can track
10699 	 * bounds correctly. Otherwise we lose all sign bit information except
10700 	 * what we can pick up from var_off. Perhaps we can generalize this
10701 	 * later to shifts of any length.
10702 	 */
10703 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
10704 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
10705 	else
10706 		dst_reg->smax_value = S64_MAX;
10707 
10708 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
10709 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
10710 	else
10711 		dst_reg->smin_value = S64_MIN;
10712 
10713 	/* If we might shift our top bit out, then we know nothing */
10714 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
10715 		dst_reg->umin_value = 0;
10716 		dst_reg->umax_value = U64_MAX;
10717 	} else {
10718 		dst_reg->umin_value <<= umin_val;
10719 		dst_reg->umax_value <<= umax_val;
10720 	}
10721 }
10722 
10723 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
10724 			       struct bpf_reg_state *src_reg)
10725 {
10726 	u64 umax_val = src_reg->umax_value;
10727 	u64 umin_val = src_reg->umin_value;
10728 
10729 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
10730 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
10731 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
10732 
10733 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
10734 	/* We may learn something more from the var_off */
10735 	__update_reg_bounds(dst_reg);
10736 }
10737 
10738 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
10739 				 struct bpf_reg_state *src_reg)
10740 {
10741 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
10742 	u32 umax_val = src_reg->u32_max_value;
10743 	u32 umin_val = src_reg->u32_min_value;
10744 
10745 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
10746 	 * be negative, then either:
10747 	 * 1) src_reg might be zero, so the sign bit of the result is
10748 	 *    unknown, so we lose our signed bounds
10749 	 * 2) it's known negative, thus the unsigned bounds capture the
10750 	 *    signed bounds
10751 	 * 3) the signed bounds cross zero, so they tell us nothing
10752 	 *    about the result
10753 	 * If the value in dst_reg is known nonnegative, then again the
10754 	 * unsigned bounds capture the signed bounds.
10755 	 * Thus, in all cases it suffices to blow away our signed bounds
10756 	 * and rely on inferring new ones from the unsigned bounds and
10757 	 * var_off of the result.
10758 	 */
10759 	dst_reg->s32_min_value = S32_MIN;
10760 	dst_reg->s32_max_value = S32_MAX;
10761 
10762 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
10763 	dst_reg->u32_min_value >>= umax_val;
10764 	dst_reg->u32_max_value >>= umin_val;
10765 
10766 	__mark_reg64_unbounded(dst_reg);
10767 	__update_reg32_bounds(dst_reg);
10768 }
10769 
10770 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
10771 			       struct bpf_reg_state *src_reg)
10772 {
10773 	u64 umax_val = src_reg->umax_value;
10774 	u64 umin_val = src_reg->umin_value;
10775 
10776 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
10777 	 * be negative, then either:
10778 	 * 1) src_reg might be zero, so the sign bit of the result is
10779 	 *    unknown, so we lose our signed bounds
10780 	 * 2) it's known negative, thus the unsigned bounds capture the
10781 	 *    signed bounds
10782 	 * 3) the signed bounds cross zero, so they tell us nothing
10783 	 *    about the result
10784 	 * If the value in dst_reg is known nonnegative, then again the
10785 	 * unsigned bounds capture the signed bounds.
10786 	 * Thus, in all cases it suffices to blow away our signed bounds
10787 	 * and rely on inferring new ones from the unsigned bounds and
10788 	 * var_off of the result.
10789 	 */
10790 	dst_reg->smin_value = S64_MIN;
10791 	dst_reg->smax_value = S64_MAX;
10792 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
10793 	dst_reg->umin_value >>= umax_val;
10794 	dst_reg->umax_value >>= umin_val;
10795 
10796 	/* Its not easy to operate on alu32 bounds here because it depends
10797 	 * on bits being shifted in. Take easy way out and mark unbounded
10798 	 * so we can recalculate later from tnum.
10799 	 */
10800 	__mark_reg32_unbounded(dst_reg);
10801 	__update_reg_bounds(dst_reg);
10802 }
10803 
10804 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
10805 				  struct bpf_reg_state *src_reg)
10806 {
10807 	u64 umin_val = src_reg->u32_min_value;
10808 
10809 	/* Upon reaching here, src_known is true and
10810 	 * umax_val is equal to umin_val.
10811 	 */
10812 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
10813 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
10814 
10815 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
10816 
10817 	/* blow away the dst_reg umin_value/umax_value and rely on
10818 	 * dst_reg var_off to refine the result.
10819 	 */
10820 	dst_reg->u32_min_value = 0;
10821 	dst_reg->u32_max_value = U32_MAX;
10822 
10823 	__mark_reg64_unbounded(dst_reg);
10824 	__update_reg32_bounds(dst_reg);
10825 }
10826 
10827 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
10828 				struct bpf_reg_state *src_reg)
10829 {
10830 	u64 umin_val = src_reg->umin_value;
10831 
10832 	/* Upon reaching here, src_known is true and umax_val is equal
10833 	 * to umin_val.
10834 	 */
10835 	dst_reg->smin_value >>= umin_val;
10836 	dst_reg->smax_value >>= umin_val;
10837 
10838 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
10839 
10840 	/* blow away the dst_reg umin_value/umax_value and rely on
10841 	 * dst_reg var_off to refine the result.
10842 	 */
10843 	dst_reg->umin_value = 0;
10844 	dst_reg->umax_value = U64_MAX;
10845 
10846 	/* Its not easy to operate on alu32 bounds here because it depends
10847 	 * on bits being shifted in from upper 32-bits. Take easy way out
10848 	 * and mark unbounded so we can recalculate later from tnum.
10849 	 */
10850 	__mark_reg32_unbounded(dst_reg);
10851 	__update_reg_bounds(dst_reg);
10852 }
10853 
10854 /* WARNING: This function does calculations on 64-bit values, but the actual
10855  * execution may occur on 32-bit values. Therefore, things like bitshifts
10856  * need extra checks in the 32-bit case.
10857  */
10858 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
10859 				      struct bpf_insn *insn,
10860 				      struct bpf_reg_state *dst_reg,
10861 				      struct bpf_reg_state src_reg)
10862 {
10863 	struct bpf_reg_state *regs = cur_regs(env);
10864 	u8 opcode = BPF_OP(insn->code);
10865 	bool src_known;
10866 	s64 smin_val, smax_val;
10867 	u64 umin_val, umax_val;
10868 	s32 s32_min_val, s32_max_val;
10869 	u32 u32_min_val, u32_max_val;
10870 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
10871 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
10872 	int ret;
10873 
10874 	smin_val = src_reg.smin_value;
10875 	smax_val = src_reg.smax_value;
10876 	umin_val = src_reg.umin_value;
10877 	umax_val = src_reg.umax_value;
10878 
10879 	s32_min_val = src_reg.s32_min_value;
10880 	s32_max_val = src_reg.s32_max_value;
10881 	u32_min_val = src_reg.u32_min_value;
10882 	u32_max_val = src_reg.u32_max_value;
10883 
10884 	if (alu32) {
10885 		src_known = tnum_subreg_is_const(src_reg.var_off);
10886 		if ((src_known &&
10887 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
10888 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
10889 			/* Taint dst register if offset had invalid bounds
10890 			 * derived from e.g. dead branches.
10891 			 */
10892 			__mark_reg_unknown(env, dst_reg);
10893 			return 0;
10894 		}
10895 	} else {
10896 		src_known = tnum_is_const(src_reg.var_off);
10897 		if ((src_known &&
10898 		     (smin_val != smax_val || umin_val != umax_val)) ||
10899 		    smin_val > smax_val || umin_val > umax_val) {
10900 			/* Taint dst register if offset had invalid bounds
10901 			 * derived from e.g. dead branches.
10902 			 */
10903 			__mark_reg_unknown(env, dst_reg);
10904 			return 0;
10905 		}
10906 	}
10907 
10908 	if (!src_known &&
10909 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
10910 		__mark_reg_unknown(env, dst_reg);
10911 		return 0;
10912 	}
10913 
10914 	if (sanitize_needed(opcode)) {
10915 		ret = sanitize_val_alu(env, insn);
10916 		if (ret < 0)
10917 			return sanitize_err(env, insn, ret, NULL, NULL);
10918 	}
10919 
10920 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
10921 	 * There are two classes of instructions: The first class we track both
10922 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
10923 	 * greatest amount of precision when alu operations are mixed with jmp32
10924 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
10925 	 * and BPF_OR. This is possible because these ops have fairly easy to
10926 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
10927 	 * See alu32 verifier tests for examples. The second class of
10928 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
10929 	 * with regards to tracking sign/unsigned bounds because the bits may
10930 	 * cross subreg boundaries in the alu64 case. When this happens we mark
10931 	 * the reg unbounded in the subreg bound space and use the resulting
10932 	 * tnum to calculate an approximation of the sign/unsigned bounds.
10933 	 */
10934 	switch (opcode) {
10935 	case BPF_ADD:
10936 		scalar32_min_max_add(dst_reg, &src_reg);
10937 		scalar_min_max_add(dst_reg, &src_reg);
10938 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
10939 		break;
10940 	case BPF_SUB:
10941 		scalar32_min_max_sub(dst_reg, &src_reg);
10942 		scalar_min_max_sub(dst_reg, &src_reg);
10943 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
10944 		break;
10945 	case BPF_MUL:
10946 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
10947 		scalar32_min_max_mul(dst_reg, &src_reg);
10948 		scalar_min_max_mul(dst_reg, &src_reg);
10949 		break;
10950 	case BPF_AND:
10951 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
10952 		scalar32_min_max_and(dst_reg, &src_reg);
10953 		scalar_min_max_and(dst_reg, &src_reg);
10954 		break;
10955 	case BPF_OR:
10956 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
10957 		scalar32_min_max_or(dst_reg, &src_reg);
10958 		scalar_min_max_or(dst_reg, &src_reg);
10959 		break;
10960 	case BPF_XOR:
10961 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
10962 		scalar32_min_max_xor(dst_reg, &src_reg);
10963 		scalar_min_max_xor(dst_reg, &src_reg);
10964 		break;
10965 	case BPF_LSH:
10966 		if (umax_val >= insn_bitness) {
10967 			/* Shifts greater than 31 or 63 are undefined.
10968 			 * This includes shifts by a negative number.
10969 			 */
10970 			mark_reg_unknown(env, regs, insn->dst_reg);
10971 			break;
10972 		}
10973 		if (alu32)
10974 			scalar32_min_max_lsh(dst_reg, &src_reg);
10975 		else
10976 			scalar_min_max_lsh(dst_reg, &src_reg);
10977 		break;
10978 	case BPF_RSH:
10979 		if (umax_val >= insn_bitness) {
10980 			/* Shifts greater than 31 or 63 are undefined.
10981 			 * This includes shifts by a negative number.
10982 			 */
10983 			mark_reg_unknown(env, regs, insn->dst_reg);
10984 			break;
10985 		}
10986 		if (alu32)
10987 			scalar32_min_max_rsh(dst_reg, &src_reg);
10988 		else
10989 			scalar_min_max_rsh(dst_reg, &src_reg);
10990 		break;
10991 	case BPF_ARSH:
10992 		if (umax_val >= insn_bitness) {
10993 			/* Shifts greater than 31 or 63 are undefined.
10994 			 * This includes shifts by a negative number.
10995 			 */
10996 			mark_reg_unknown(env, regs, insn->dst_reg);
10997 			break;
10998 		}
10999 		if (alu32)
11000 			scalar32_min_max_arsh(dst_reg, &src_reg);
11001 		else
11002 			scalar_min_max_arsh(dst_reg, &src_reg);
11003 		break;
11004 	default:
11005 		mark_reg_unknown(env, regs, insn->dst_reg);
11006 		break;
11007 	}
11008 
11009 	/* ALU32 ops are zero extended into 64bit register */
11010 	if (alu32)
11011 		zext_32_to_64(dst_reg);
11012 	reg_bounds_sync(dst_reg);
11013 	return 0;
11014 }
11015 
11016 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
11017  * and var_off.
11018  */
11019 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
11020 				   struct bpf_insn *insn)
11021 {
11022 	struct bpf_verifier_state *vstate = env->cur_state;
11023 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
11024 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
11025 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
11026 	u8 opcode = BPF_OP(insn->code);
11027 	int err;
11028 
11029 	dst_reg = &regs[insn->dst_reg];
11030 	src_reg = NULL;
11031 	if (dst_reg->type != SCALAR_VALUE)
11032 		ptr_reg = dst_reg;
11033 	else
11034 		/* Make sure ID is cleared otherwise dst_reg min/max could be
11035 		 * incorrectly propagated into other registers by find_equal_scalars()
11036 		 */
11037 		dst_reg->id = 0;
11038 	if (BPF_SRC(insn->code) == BPF_X) {
11039 		src_reg = &regs[insn->src_reg];
11040 		if (src_reg->type != SCALAR_VALUE) {
11041 			if (dst_reg->type != SCALAR_VALUE) {
11042 				/* Combining two pointers by any ALU op yields
11043 				 * an arbitrary scalar. Disallow all math except
11044 				 * pointer subtraction
11045 				 */
11046 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
11047 					mark_reg_unknown(env, regs, insn->dst_reg);
11048 					return 0;
11049 				}
11050 				verbose(env, "R%d pointer %s pointer prohibited\n",
11051 					insn->dst_reg,
11052 					bpf_alu_string[opcode >> 4]);
11053 				return -EACCES;
11054 			} else {
11055 				/* scalar += pointer
11056 				 * This is legal, but we have to reverse our
11057 				 * src/dest handling in computing the range
11058 				 */
11059 				err = mark_chain_precision(env, insn->dst_reg);
11060 				if (err)
11061 					return err;
11062 				return adjust_ptr_min_max_vals(env, insn,
11063 							       src_reg, dst_reg);
11064 			}
11065 		} else if (ptr_reg) {
11066 			/* pointer += scalar */
11067 			err = mark_chain_precision(env, insn->src_reg);
11068 			if (err)
11069 				return err;
11070 			return adjust_ptr_min_max_vals(env, insn,
11071 						       dst_reg, src_reg);
11072 		} else if (dst_reg->precise) {
11073 			/* if dst_reg is precise, src_reg should be precise as well */
11074 			err = mark_chain_precision(env, insn->src_reg);
11075 			if (err)
11076 				return err;
11077 		}
11078 	} else {
11079 		/* Pretend the src is a reg with a known value, since we only
11080 		 * need to be able to read from this state.
11081 		 */
11082 		off_reg.type = SCALAR_VALUE;
11083 		__mark_reg_known(&off_reg, insn->imm);
11084 		src_reg = &off_reg;
11085 		if (ptr_reg) /* pointer += K */
11086 			return adjust_ptr_min_max_vals(env, insn,
11087 						       ptr_reg, src_reg);
11088 	}
11089 
11090 	/* Got here implies adding two SCALAR_VALUEs */
11091 	if (WARN_ON_ONCE(ptr_reg)) {
11092 		print_verifier_state(env, state, true);
11093 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
11094 		return -EINVAL;
11095 	}
11096 	if (WARN_ON(!src_reg)) {
11097 		print_verifier_state(env, state, true);
11098 		verbose(env, "verifier internal error: no src_reg\n");
11099 		return -EINVAL;
11100 	}
11101 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
11102 }
11103 
11104 /* check validity of 32-bit and 64-bit arithmetic operations */
11105 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
11106 {
11107 	struct bpf_reg_state *regs = cur_regs(env);
11108 	u8 opcode = BPF_OP(insn->code);
11109 	int err;
11110 
11111 	if (opcode == BPF_END || opcode == BPF_NEG) {
11112 		if (opcode == BPF_NEG) {
11113 			if (BPF_SRC(insn->code) != BPF_K ||
11114 			    insn->src_reg != BPF_REG_0 ||
11115 			    insn->off != 0 || insn->imm != 0) {
11116 				verbose(env, "BPF_NEG uses reserved fields\n");
11117 				return -EINVAL;
11118 			}
11119 		} else {
11120 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
11121 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
11122 			    BPF_CLASS(insn->code) == BPF_ALU64) {
11123 				verbose(env, "BPF_END uses reserved fields\n");
11124 				return -EINVAL;
11125 			}
11126 		}
11127 
11128 		/* check src operand */
11129 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11130 		if (err)
11131 			return err;
11132 
11133 		if (is_pointer_value(env, insn->dst_reg)) {
11134 			verbose(env, "R%d pointer arithmetic prohibited\n",
11135 				insn->dst_reg);
11136 			return -EACCES;
11137 		}
11138 
11139 		/* check dest operand */
11140 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
11141 		if (err)
11142 			return err;
11143 
11144 	} else if (opcode == BPF_MOV) {
11145 
11146 		if (BPF_SRC(insn->code) == BPF_X) {
11147 			if (insn->imm != 0 || insn->off != 0) {
11148 				verbose(env, "BPF_MOV uses reserved fields\n");
11149 				return -EINVAL;
11150 			}
11151 
11152 			/* check src operand */
11153 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11154 			if (err)
11155 				return err;
11156 		} else {
11157 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
11158 				verbose(env, "BPF_MOV uses reserved fields\n");
11159 				return -EINVAL;
11160 			}
11161 		}
11162 
11163 		/* check dest operand, mark as required later */
11164 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11165 		if (err)
11166 			return err;
11167 
11168 		if (BPF_SRC(insn->code) == BPF_X) {
11169 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
11170 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
11171 
11172 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
11173 				/* case: R1 = R2
11174 				 * copy register state to dest reg
11175 				 */
11176 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
11177 					/* Assign src and dst registers the same ID
11178 					 * that will be used by find_equal_scalars()
11179 					 * to propagate min/max range.
11180 					 */
11181 					src_reg->id = ++env->id_gen;
11182 				copy_register_state(dst_reg, src_reg);
11183 				dst_reg->live |= REG_LIVE_WRITTEN;
11184 				dst_reg->subreg_def = DEF_NOT_SUBREG;
11185 			} else {
11186 				/* R1 = (u32) R2 */
11187 				if (is_pointer_value(env, insn->src_reg)) {
11188 					verbose(env,
11189 						"R%d partial copy of pointer\n",
11190 						insn->src_reg);
11191 					return -EACCES;
11192 				} else if (src_reg->type == SCALAR_VALUE) {
11193 					copy_register_state(dst_reg, src_reg);
11194 					/* Make sure ID is cleared otherwise
11195 					 * dst_reg min/max could be incorrectly
11196 					 * propagated into src_reg by find_equal_scalars()
11197 					 */
11198 					dst_reg->id = 0;
11199 					dst_reg->live |= REG_LIVE_WRITTEN;
11200 					dst_reg->subreg_def = env->insn_idx + 1;
11201 				} else {
11202 					mark_reg_unknown(env, regs,
11203 							 insn->dst_reg);
11204 				}
11205 				zext_32_to_64(dst_reg);
11206 				reg_bounds_sync(dst_reg);
11207 			}
11208 		} else {
11209 			/* case: R = imm
11210 			 * remember the value we stored into this reg
11211 			 */
11212 			/* clear any state __mark_reg_known doesn't set */
11213 			mark_reg_unknown(env, regs, insn->dst_reg);
11214 			regs[insn->dst_reg].type = SCALAR_VALUE;
11215 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
11216 				__mark_reg_known(regs + insn->dst_reg,
11217 						 insn->imm);
11218 			} else {
11219 				__mark_reg_known(regs + insn->dst_reg,
11220 						 (u32)insn->imm);
11221 			}
11222 		}
11223 
11224 	} else if (opcode > BPF_END) {
11225 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
11226 		return -EINVAL;
11227 
11228 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
11229 
11230 		if (BPF_SRC(insn->code) == BPF_X) {
11231 			if (insn->imm != 0 || insn->off != 0) {
11232 				verbose(env, "BPF_ALU uses reserved fields\n");
11233 				return -EINVAL;
11234 			}
11235 			/* check src1 operand */
11236 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11237 			if (err)
11238 				return err;
11239 		} else {
11240 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
11241 				verbose(env, "BPF_ALU uses reserved fields\n");
11242 				return -EINVAL;
11243 			}
11244 		}
11245 
11246 		/* check src2 operand */
11247 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11248 		if (err)
11249 			return err;
11250 
11251 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
11252 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
11253 			verbose(env, "div by zero\n");
11254 			return -EINVAL;
11255 		}
11256 
11257 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
11258 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
11259 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
11260 
11261 			if (insn->imm < 0 || insn->imm >= size) {
11262 				verbose(env, "invalid shift %d\n", insn->imm);
11263 				return -EINVAL;
11264 			}
11265 		}
11266 
11267 		/* check dest operand */
11268 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11269 		if (err)
11270 			return err;
11271 
11272 		return adjust_reg_min_max_vals(env, insn);
11273 	}
11274 
11275 	return 0;
11276 }
11277 
11278 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
11279 				   struct bpf_reg_state *dst_reg,
11280 				   enum bpf_reg_type type,
11281 				   bool range_right_open)
11282 {
11283 	struct bpf_func_state *state;
11284 	struct bpf_reg_state *reg;
11285 	int new_range;
11286 
11287 	if (dst_reg->off < 0 ||
11288 	    (dst_reg->off == 0 && range_right_open))
11289 		/* This doesn't give us any range */
11290 		return;
11291 
11292 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
11293 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
11294 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
11295 		 * than pkt_end, but that's because it's also less than pkt.
11296 		 */
11297 		return;
11298 
11299 	new_range = dst_reg->off;
11300 	if (range_right_open)
11301 		new_range++;
11302 
11303 	/* Examples for register markings:
11304 	 *
11305 	 * pkt_data in dst register:
11306 	 *
11307 	 *   r2 = r3;
11308 	 *   r2 += 8;
11309 	 *   if (r2 > pkt_end) goto <handle exception>
11310 	 *   <access okay>
11311 	 *
11312 	 *   r2 = r3;
11313 	 *   r2 += 8;
11314 	 *   if (r2 < pkt_end) goto <access okay>
11315 	 *   <handle exception>
11316 	 *
11317 	 *   Where:
11318 	 *     r2 == dst_reg, pkt_end == src_reg
11319 	 *     r2=pkt(id=n,off=8,r=0)
11320 	 *     r3=pkt(id=n,off=0,r=0)
11321 	 *
11322 	 * pkt_data in src register:
11323 	 *
11324 	 *   r2 = r3;
11325 	 *   r2 += 8;
11326 	 *   if (pkt_end >= r2) goto <access okay>
11327 	 *   <handle exception>
11328 	 *
11329 	 *   r2 = r3;
11330 	 *   r2 += 8;
11331 	 *   if (pkt_end <= r2) goto <handle exception>
11332 	 *   <access okay>
11333 	 *
11334 	 *   Where:
11335 	 *     pkt_end == dst_reg, r2 == src_reg
11336 	 *     r2=pkt(id=n,off=8,r=0)
11337 	 *     r3=pkt(id=n,off=0,r=0)
11338 	 *
11339 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
11340 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
11341 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
11342 	 * the check.
11343 	 */
11344 
11345 	/* If our ids match, then we must have the same max_value.  And we
11346 	 * don't care about the other reg's fixed offset, since if it's too big
11347 	 * the range won't allow anything.
11348 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
11349 	 */
11350 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11351 		if (reg->type == type && reg->id == dst_reg->id)
11352 			/* keep the maximum range already checked */
11353 			reg->range = max(reg->range, new_range);
11354 	}));
11355 }
11356 
11357 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
11358 {
11359 	struct tnum subreg = tnum_subreg(reg->var_off);
11360 	s32 sval = (s32)val;
11361 
11362 	switch (opcode) {
11363 	case BPF_JEQ:
11364 		if (tnum_is_const(subreg))
11365 			return !!tnum_equals_const(subreg, val);
11366 		break;
11367 	case BPF_JNE:
11368 		if (tnum_is_const(subreg))
11369 			return !tnum_equals_const(subreg, val);
11370 		break;
11371 	case BPF_JSET:
11372 		if ((~subreg.mask & subreg.value) & val)
11373 			return 1;
11374 		if (!((subreg.mask | subreg.value) & val))
11375 			return 0;
11376 		break;
11377 	case BPF_JGT:
11378 		if (reg->u32_min_value > val)
11379 			return 1;
11380 		else if (reg->u32_max_value <= val)
11381 			return 0;
11382 		break;
11383 	case BPF_JSGT:
11384 		if (reg->s32_min_value > sval)
11385 			return 1;
11386 		else if (reg->s32_max_value <= sval)
11387 			return 0;
11388 		break;
11389 	case BPF_JLT:
11390 		if (reg->u32_max_value < val)
11391 			return 1;
11392 		else if (reg->u32_min_value >= val)
11393 			return 0;
11394 		break;
11395 	case BPF_JSLT:
11396 		if (reg->s32_max_value < sval)
11397 			return 1;
11398 		else if (reg->s32_min_value >= sval)
11399 			return 0;
11400 		break;
11401 	case BPF_JGE:
11402 		if (reg->u32_min_value >= val)
11403 			return 1;
11404 		else if (reg->u32_max_value < val)
11405 			return 0;
11406 		break;
11407 	case BPF_JSGE:
11408 		if (reg->s32_min_value >= sval)
11409 			return 1;
11410 		else if (reg->s32_max_value < sval)
11411 			return 0;
11412 		break;
11413 	case BPF_JLE:
11414 		if (reg->u32_max_value <= val)
11415 			return 1;
11416 		else if (reg->u32_min_value > val)
11417 			return 0;
11418 		break;
11419 	case BPF_JSLE:
11420 		if (reg->s32_max_value <= sval)
11421 			return 1;
11422 		else if (reg->s32_min_value > sval)
11423 			return 0;
11424 		break;
11425 	}
11426 
11427 	return -1;
11428 }
11429 
11430 
11431 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
11432 {
11433 	s64 sval = (s64)val;
11434 
11435 	switch (opcode) {
11436 	case BPF_JEQ:
11437 		if (tnum_is_const(reg->var_off))
11438 			return !!tnum_equals_const(reg->var_off, val);
11439 		break;
11440 	case BPF_JNE:
11441 		if (tnum_is_const(reg->var_off))
11442 			return !tnum_equals_const(reg->var_off, val);
11443 		break;
11444 	case BPF_JSET:
11445 		if ((~reg->var_off.mask & reg->var_off.value) & val)
11446 			return 1;
11447 		if (!((reg->var_off.mask | reg->var_off.value) & val))
11448 			return 0;
11449 		break;
11450 	case BPF_JGT:
11451 		if (reg->umin_value > val)
11452 			return 1;
11453 		else if (reg->umax_value <= val)
11454 			return 0;
11455 		break;
11456 	case BPF_JSGT:
11457 		if (reg->smin_value > sval)
11458 			return 1;
11459 		else if (reg->smax_value <= sval)
11460 			return 0;
11461 		break;
11462 	case BPF_JLT:
11463 		if (reg->umax_value < val)
11464 			return 1;
11465 		else if (reg->umin_value >= val)
11466 			return 0;
11467 		break;
11468 	case BPF_JSLT:
11469 		if (reg->smax_value < sval)
11470 			return 1;
11471 		else if (reg->smin_value >= sval)
11472 			return 0;
11473 		break;
11474 	case BPF_JGE:
11475 		if (reg->umin_value >= val)
11476 			return 1;
11477 		else if (reg->umax_value < val)
11478 			return 0;
11479 		break;
11480 	case BPF_JSGE:
11481 		if (reg->smin_value >= sval)
11482 			return 1;
11483 		else if (reg->smax_value < sval)
11484 			return 0;
11485 		break;
11486 	case BPF_JLE:
11487 		if (reg->umax_value <= val)
11488 			return 1;
11489 		else if (reg->umin_value > val)
11490 			return 0;
11491 		break;
11492 	case BPF_JSLE:
11493 		if (reg->smax_value <= sval)
11494 			return 1;
11495 		else if (reg->smin_value > sval)
11496 			return 0;
11497 		break;
11498 	}
11499 
11500 	return -1;
11501 }
11502 
11503 /* compute branch direction of the expression "if (reg opcode val) goto target;"
11504  * and return:
11505  *  1 - branch will be taken and "goto target" will be executed
11506  *  0 - branch will not be taken and fall-through to next insn
11507  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
11508  *      range [0,10]
11509  */
11510 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
11511 			   bool is_jmp32)
11512 {
11513 	if (__is_pointer_value(false, reg)) {
11514 		if (!reg_type_not_null(reg->type))
11515 			return -1;
11516 
11517 		/* If pointer is valid tests against zero will fail so we can
11518 		 * use this to direct branch taken.
11519 		 */
11520 		if (val != 0)
11521 			return -1;
11522 
11523 		switch (opcode) {
11524 		case BPF_JEQ:
11525 			return 0;
11526 		case BPF_JNE:
11527 			return 1;
11528 		default:
11529 			return -1;
11530 		}
11531 	}
11532 
11533 	if (is_jmp32)
11534 		return is_branch32_taken(reg, val, opcode);
11535 	return is_branch64_taken(reg, val, opcode);
11536 }
11537 
11538 static int flip_opcode(u32 opcode)
11539 {
11540 	/* How can we transform "a <op> b" into "b <op> a"? */
11541 	static const u8 opcode_flip[16] = {
11542 		/* these stay the same */
11543 		[BPF_JEQ  >> 4] = BPF_JEQ,
11544 		[BPF_JNE  >> 4] = BPF_JNE,
11545 		[BPF_JSET >> 4] = BPF_JSET,
11546 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
11547 		[BPF_JGE  >> 4] = BPF_JLE,
11548 		[BPF_JGT  >> 4] = BPF_JLT,
11549 		[BPF_JLE  >> 4] = BPF_JGE,
11550 		[BPF_JLT  >> 4] = BPF_JGT,
11551 		[BPF_JSGE >> 4] = BPF_JSLE,
11552 		[BPF_JSGT >> 4] = BPF_JSLT,
11553 		[BPF_JSLE >> 4] = BPF_JSGE,
11554 		[BPF_JSLT >> 4] = BPF_JSGT
11555 	};
11556 	return opcode_flip[opcode >> 4];
11557 }
11558 
11559 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
11560 				   struct bpf_reg_state *src_reg,
11561 				   u8 opcode)
11562 {
11563 	struct bpf_reg_state *pkt;
11564 
11565 	if (src_reg->type == PTR_TO_PACKET_END) {
11566 		pkt = dst_reg;
11567 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
11568 		pkt = src_reg;
11569 		opcode = flip_opcode(opcode);
11570 	} else {
11571 		return -1;
11572 	}
11573 
11574 	if (pkt->range >= 0)
11575 		return -1;
11576 
11577 	switch (opcode) {
11578 	case BPF_JLE:
11579 		/* pkt <= pkt_end */
11580 		fallthrough;
11581 	case BPF_JGT:
11582 		/* pkt > pkt_end */
11583 		if (pkt->range == BEYOND_PKT_END)
11584 			/* pkt has at last one extra byte beyond pkt_end */
11585 			return opcode == BPF_JGT;
11586 		break;
11587 	case BPF_JLT:
11588 		/* pkt < pkt_end */
11589 		fallthrough;
11590 	case BPF_JGE:
11591 		/* pkt >= pkt_end */
11592 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
11593 			return opcode == BPF_JGE;
11594 		break;
11595 	}
11596 	return -1;
11597 }
11598 
11599 /* Adjusts the register min/max values in the case that the dst_reg is the
11600  * variable register that we are working on, and src_reg is a constant or we're
11601  * simply doing a BPF_K check.
11602  * In JEQ/JNE cases we also adjust the var_off values.
11603  */
11604 static void reg_set_min_max(struct bpf_reg_state *true_reg,
11605 			    struct bpf_reg_state *false_reg,
11606 			    u64 val, u32 val32,
11607 			    u8 opcode, bool is_jmp32)
11608 {
11609 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
11610 	struct tnum false_64off = false_reg->var_off;
11611 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
11612 	struct tnum true_64off = true_reg->var_off;
11613 	s64 sval = (s64)val;
11614 	s32 sval32 = (s32)val32;
11615 
11616 	/* If the dst_reg is a pointer, we can't learn anything about its
11617 	 * variable offset from the compare (unless src_reg were a pointer into
11618 	 * the same object, but we don't bother with that.
11619 	 * Since false_reg and true_reg have the same type by construction, we
11620 	 * only need to check one of them for pointerness.
11621 	 */
11622 	if (__is_pointer_value(false, false_reg))
11623 		return;
11624 
11625 	switch (opcode) {
11626 	/* JEQ/JNE comparison doesn't change the register equivalence.
11627 	 *
11628 	 * r1 = r2;
11629 	 * if (r1 == 42) goto label;
11630 	 * ...
11631 	 * label: // here both r1 and r2 are known to be 42.
11632 	 *
11633 	 * Hence when marking register as known preserve it's ID.
11634 	 */
11635 	case BPF_JEQ:
11636 		if (is_jmp32) {
11637 			__mark_reg32_known(true_reg, val32);
11638 			true_32off = tnum_subreg(true_reg->var_off);
11639 		} else {
11640 			___mark_reg_known(true_reg, val);
11641 			true_64off = true_reg->var_off;
11642 		}
11643 		break;
11644 	case BPF_JNE:
11645 		if (is_jmp32) {
11646 			__mark_reg32_known(false_reg, val32);
11647 			false_32off = tnum_subreg(false_reg->var_off);
11648 		} else {
11649 			___mark_reg_known(false_reg, val);
11650 			false_64off = false_reg->var_off;
11651 		}
11652 		break;
11653 	case BPF_JSET:
11654 		if (is_jmp32) {
11655 			false_32off = tnum_and(false_32off, tnum_const(~val32));
11656 			if (is_power_of_2(val32))
11657 				true_32off = tnum_or(true_32off,
11658 						     tnum_const(val32));
11659 		} else {
11660 			false_64off = tnum_and(false_64off, tnum_const(~val));
11661 			if (is_power_of_2(val))
11662 				true_64off = tnum_or(true_64off,
11663 						     tnum_const(val));
11664 		}
11665 		break;
11666 	case BPF_JGE:
11667 	case BPF_JGT:
11668 	{
11669 		if (is_jmp32) {
11670 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
11671 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
11672 
11673 			false_reg->u32_max_value = min(false_reg->u32_max_value,
11674 						       false_umax);
11675 			true_reg->u32_min_value = max(true_reg->u32_min_value,
11676 						      true_umin);
11677 		} else {
11678 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
11679 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
11680 
11681 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
11682 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
11683 		}
11684 		break;
11685 	}
11686 	case BPF_JSGE:
11687 	case BPF_JSGT:
11688 	{
11689 		if (is_jmp32) {
11690 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
11691 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
11692 
11693 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
11694 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
11695 		} else {
11696 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
11697 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
11698 
11699 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
11700 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
11701 		}
11702 		break;
11703 	}
11704 	case BPF_JLE:
11705 	case BPF_JLT:
11706 	{
11707 		if (is_jmp32) {
11708 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
11709 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
11710 
11711 			false_reg->u32_min_value = max(false_reg->u32_min_value,
11712 						       false_umin);
11713 			true_reg->u32_max_value = min(true_reg->u32_max_value,
11714 						      true_umax);
11715 		} else {
11716 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
11717 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
11718 
11719 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
11720 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
11721 		}
11722 		break;
11723 	}
11724 	case BPF_JSLE:
11725 	case BPF_JSLT:
11726 	{
11727 		if (is_jmp32) {
11728 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
11729 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
11730 
11731 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
11732 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
11733 		} else {
11734 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
11735 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
11736 
11737 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
11738 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
11739 		}
11740 		break;
11741 	}
11742 	default:
11743 		return;
11744 	}
11745 
11746 	if (is_jmp32) {
11747 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
11748 					     tnum_subreg(false_32off));
11749 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
11750 					    tnum_subreg(true_32off));
11751 		__reg_combine_32_into_64(false_reg);
11752 		__reg_combine_32_into_64(true_reg);
11753 	} else {
11754 		false_reg->var_off = false_64off;
11755 		true_reg->var_off = true_64off;
11756 		__reg_combine_64_into_32(false_reg);
11757 		__reg_combine_64_into_32(true_reg);
11758 	}
11759 }
11760 
11761 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
11762  * the variable reg.
11763  */
11764 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
11765 				struct bpf_reg_state *false_reg,
11766 				u64 val, u32 val32,
11767 				u8 opcode, bool is_jmp32)
11768 {
11769 	opcode = flip_opcode(opcode);
11770 	/* This uses zero as "not present in table"; luckily the zero opcode,
11771 	 * BPF_JA, can't get here.
11772 	 */
11773 	if (opcode)
11774 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
11775 }
11776 
11777 /* Regs are known to be equal, so intersect their min/max/var_off */
11778 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
11779 				  struct bpf_reg_state *dst_reg)
11780 {
11781 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
11782 							dst_reg->umin_value);
11783 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
11784 							dst_reg->umax_value);
11785 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
11786 							dst_reg->smin_value);
11787 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
11788 							dst_reg->smax_value);
11789 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
11790 							     dst_reg->var_off);
11791 	reg_bounds_sync(src_reg);
11792 	reg_bounds_sync(dst_reg);
11793 }
11794 
11795 static void reg_combine_min_max(struct bpf_reg_state *true_src,
11796 				struct bpf_reg_state *true_dst,
11797 				struct bpf_reg_state *false_src,
11798 				struct bpf_reg_state *false_dst,
11799 				u8 opcode)
11800 {
11801 	switch (opcode) {
11802 	case BPF_JEQ:
11803 		__reg_combine_min_max(true_src, true_dst);
11804 		break;
11805 	case BPF_JNE:
11806 		__reg_combine_min_max(false_src, false_dst);
11807 		break;
11808 	}
11809 }
11810 
11811 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
11812 				 struct bpf_reg_state *reg, u32 id,
11813 				 bool is_null)
11814 {
11815 	if (type_may_be_null(reg->type) && reg->id == id &&
11816 	    (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
11817 		/* Old offset (both fixed and variable parts) should have been
11818 		 * known-zero, because we don't allow pointer arithmetic on
11819 		 * pointers that might be NULL. If we see this happening, don't
11820 		 * convert the register.
11821 		 *
11822 		 * But in some cases, some helpers that return local kptrs
11823 		 * advance offset for the returned pointer. In those cases, it
11824 		 * is fine to expect to see reg->off.
11825 		 */
11826 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
11827 			return;
11828 		if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off))
11829 			return;
11830 		if (is_null) {
11831 			reg->type = SCALAR_VALUE;
11832 			/* We don't need id and ref_obj_id from this point
11833 			 * onwards anymore, thus we should better reset it,
11834 			 * so that state pruning has chances to take effect.
11835 			 */
11836 			reg->id = 0;
11837 			reg->ref_obj_id = 0;
11838 
11839 			return;
11840 		}
11841 
11842 		mark_ptr_not_null_reg(reg);
11843 
11844 		if (!reg_may_point_to_spin_lock(reg)) {
11845 			/* For not-NULL ptr, reg->ref_obj_id will be reset
11846 			 * in release_reference().
11847 			 *
11848 			 * reg->id is still used by spin_lock ptr. Other
11849 			 * than spin_lock ptr type, reg->id can be reset.
11850 			 */
11851 			reg->id = 0;
11852 		}
11853 	}
11854 }
11855 
11856 /* The logic is similar to find_good_pkt_pointers(), both could eventually
11857  * be folded together at some point.
11858  */
11859 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
11860 				  bool is_null)
11861 {
11862 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
11863 	struct bpf_reg_state *regs = state->regs, *reg;
11864 	u32 ref_obj_id = regs[regno].ref_obj_id;
11865 	u32 id = regs[regno].id;
11866 
11867 	if (ref_obj_id && ref_obj_id == id && is_null)
11868 		/* regs[regno] is in the " == NULL" branch.
11869 		 * No one could have freed the reference state before
11870 		 * doing the NULL check.
11871 		 */
11872 		WARN_ON_ONCE(release_reference_state(state, id));
11873 
11874 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11875 		mark_ptr_or_null_reg(state, reg, id, is_null);
11876 	}));
11877 }
11878 
11879 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
11880 				   struct bpf_reg_state *dst_reg,
11881 				   struct bpf_reg_state *src_reg,
11882 				   struct bpf_verifier_state *this_branch,
11883 				   struct bpf_verifier_state *other_branch)
11884 {
11885 	if (BPF_SRC(insn->code) != BPF_X)
11886 		return false;
11887 
11888 	/* Pointers are always 64-bit. */
11889 	if (BPF_CLASS(insn->code) == BPF_JMP32)
11890 		return false;
11891 
11892 	switch (BPF_OP(insn->code)) {
11893 	case BPF_JGT:
11894 		if ((dst_reg->type == PTR_TO_PACKET &&
11895 		     src_reg->type == PTR_TO_PACKET_END) ||
11896 		    (dst_reg->type == PTR_TO_PACKET_META &&
11897 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11898 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
11899 			find_good_pkt_pointers(this_branch, dst_reg,
11900 					       dst_reg->type, false);
11901 			mark_pkt_end(other_branch, insn->dst_reg, true);
11902 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11903 			    src_reg->type == PTR_TO_PACKET) ||
11904 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11905 			    src_reg->type == PTR_TO_PACKET_META)) {
11906 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
11907 			find_good_pkt_pointers(other_branch, src_reg,
11908 					       src_reg->type, true);
11909 			mark_pkt_end(this_branch, insn->src_reg, false);
11910 		} else {
11911 			return false;
11912 		}
11913 		break;
11914 	case BPF_JLT:
11915 		if ((dst_reg->type == PTR_TO_PACKET &&
11916 		     src_reg->type == PTR_TO_PACKET_END) ||
11917 		    (dst_reg->type == PTR_TO_PACKET_META &&
11918 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11919 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
11920 			find_good_pkt_pointers(other_branch, dst_reg,
11921 					       dst_reg->type, true);
11922 			mark_pkt_end(this_branch, insn->dst_reg, false);
11923 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11924 			    src_reg->type == PTR_TO_PACKET) ||
11925 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11926 			    src_reg->type == PTR_TO_PACKET_META)) {
11927 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
11928 			find_good_pkt_pointers(this_branch, src_reg,
11929 					       src_reg->type, false);
11930 			mark_pkt_end(other_branch, insn->src_reg, true);
11931 		} else {
11932 			return false;
11933 		}
11934 		break;
11935 	case BPF_JGE:
11936 		if ((dst_reg->type == PTR_TO_PACKET &&
11937 		     src_reg->type == PTR_TO_PACKET_END) ||
11938 		    (dst_reg->type == PTR_TO_PACKET_META &&
11939 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11940 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
11941 			find_good_pkt_pointers(this_branch, dst_reg,
11942 					       dst_reg->type, true);
11943 			mark_pkt_end(other_branch, insn->dst_reg, false);
11944 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11945 			    src_reg->type == PTR_TO_PACKET) ||
11946 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11947 			    src_reg->type == PTR_TO_PACKET_META)) {
11948 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
11949 			find_good_pkt_pointers(other_branch, src_reg,
11950 					       src_reg->type, false);
11951 			mark_pkt_end(this_branch, insn->src_reg, true);
11952 		} else {
11953 			return false;
11954 		}
11955 		break;
11956 	case BPF_JLE:
11957 		if ((dst_reg->type == PTR_TO_PACKET &&
11958 		     src_reg->type == PTR_TO_PACKET_END) ||
11959 		    (dst_reg->type == PTR_TO_PACKET_META &&
11960 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
11961 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
11962 			find_good_pkt_pointers(other_branch, dst_reg,
11963 					       dst_reg->type, false);
11964 			mark_pkt_end(this_branch, insn->dst_reg, true);
11965 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
11966 			    src_reg->type == PTR_TO_PACKET) ||
11967 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
11968 			    src_reg->type == PTR_TO_PACKET_META)) {
11969 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
11970 			find_good_pkt_pointers(this_branch, src_reg,
11971 					       src_reg->type, true);
11972 			mark_pkt_end(other_branch, insn->src_reg, false);
11973 		} else {
11974 			return false;
11975 		}
11976 		break;
11977 	default:
11978 		return false;
11979 	}
11980 
11981 	return true;
11982 }
11983 
11984 static void find_equal_scalars(struct bpf_verifier_state *vstate,
11985 			       struct bpf_reg_state *known_reg)
11986 {
11987 	struct bpf_func_state *state;
11988 	struct bpf_reg_state *reg;
11989 
11990 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
11991 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
11992 			copy_register_state(reg, known_reg);
11993 	}));
11994 }
11995 
11996 static int check_cond_jmp_op(struct bpf_verifier_env *env,
11997 			     struct bpf_insn *insn, int *insn_idx)
11998 {
11999 	struct bpf_verifier_state *this_branch = env->cur_state;
12000 	struct bpf_verifier_state *other_branch;
12001 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
12002 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
12003 	struct bpf_reg_state *eq_branch_regs;
12004 	u8 opcode = BPF_OP(insn->code);
12005 	bool is_jmp32;
12006 	int pred = -1;
12007 	int err;
12008 
12009 	/* Only conditional jumps are expected to reach here. */
12010 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
12011 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
12012 		return -EINVAL;
12013 	}
12014 
12015 	if (BPF_SRC(insn->code) == BPF_X) {
12016 		if (insn->imm != 0) {
12017 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
12018 			return -EINVAL;
12019 		}
12020 
12021 		/* check src1 operand */
12022 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
12023 		if (err)
12024 			return err;
12025 
12026 		if (is_pointer_value(env, insn->src_reg)) {
12027 			verbose(env, "R%d pointer comparison prohibited\n",
12028 				insn->src_reg);
12029 			return -EACCES;
12030 		}
12031 		src_reg = &regs[insn->src_reg];
12032 	} else {
12033 		if (insn->src_reg != BPF_REG_0) {
12034 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
12035 			return -EINVAL;
12036 		}
12037 	}
12038 
12039 	/* check src2 operand */
12040 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12041 	if (err)
12042 		return err;
12043 
12044 	dst_reg = &regs[insn->dst_reg];
12045 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
12046 
12047 	if (BPF_SRC(insn->code) == BPF_K) {
12048 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
12049 	} else if (src_reg->type == SCALAR_VALUE &&
12050 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
12051 		pred = is_branch_taken(dst_reg,
12052 				       tnum_subreg(src_reg->var_off).value,
12053 				       opcode,
12054 				       is_jmp32);
12055 	} else if (src_reg->type == SCALAR_VALUE &&
12056 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
12057 		pred = is_branch_taken(dst_reg,
12058 				       src_reg->var_off.value,
12059 				       opcode,
12060 				       is_jmp32);
12061 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
12062 		   reg_is_pkt_pointer_any(src_reg) &&
12063 		   !is_jmp32) {
12064 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
12065 	}
12066 
12067 	if (pred >= 0) {
12068 		/* If we get here with a dst_reg pointer type it is because
12069 		 * above is_branch_taken() special cased the 0 comparison.
12070 		 */
12071 		if (!__is_pointer_value(false, dst_reg))
12072 			err = mark_chain_precision(env, insn->dst_reg);
12073 		if (BPF_SRC(insn->code) == BPF_X && !err &&
12074 		    !__is_pointer_value(false, src_reg))
12075 			err = mark_chain_precision(env, insn->src_reg);
12076 		if (err)
12077 			return err;
12078 	}
12079 
12080 	if (pred == 1) {
12081 		/* Only follow the goto, ignore fall-through. If needed, push
12082 		 * the fall-through branch for simulation under speculative
12083 		 * execution.
12084 		 */
12085 		if (!env->bypass_spec_v1 &&
12086 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
12087 					       *insn_idx))
12088 			return -EFAULT;
12089 		*insn_idx += insn->off;
12090 		return 0;
12091 	} else if (pred == 0) {
12092 		/* Only follow the fall-through branch, since that's where the
12093 		 * program will go. If needed, push the goto branch for
12094 		 * simulation under speculative execution.
12095 		 */
12096 		if (!env->bypass_spec_v1 &&
12097 		    !sanitize_speculative_path(env, insn,
12098 					       *insn_idx + insn->off + 1,
12099 					       *insn_idx))
12100 			return -EFAULT;
12101 		return 0;
12102 	}
12103 
12104 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
12105 				  false);
12106 	if (!other_branch)
12107 		return -EFAULT;
12108 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
12109 
12110 	/* detect if we are comparing against a constant value so we can adjust
12111 	 * our min/max values for our dst register.
12112 	 * this is only legit if both are scalars (or pointers to the same
12113 	 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
12114 	 * because otherwise the different base pointers mean the offsets aren't
12115 	 * comparable.
12116 	 */
12117 	if (BPF_SRC(insn->code) == BPF_X) {
12118 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
12119 
12120 		if (dst_reg->type == SCALAR_VALUE &&
12121 		    src_reg->type == SCALAR_VALUE) {
12122 			if (tnum_is_const(src_reg->var_off) ||
12123 			    (is_jmp32 &&
12124 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
12125 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
12126 						dst_reg,
12127 						src_reg->var_off.value,
12128 						tnum_subreg(src_reg->var_off).value,
12129 						opcode, is_jmp32);
12130 			else if (tnum_is_const(dst_reg->var_off) ||
12131 				 (is_jmp32 &&
12132 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
12133 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
12134 						    src_reg,
12135 						    dst_reg->var_off.value,
12136 						    tnum_subreg(dst_reg->var_off).value,
12137 						    opcode, is_jmp32);
12138 			else if (!is_jmp32 &&
12139 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
12140 				/* Comparing for equality, we can combine knowledge */
12141 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
12142 						    &other_branch_regs[insn->dst_reg],
12143 						    src_reg, dst_reg, opcode);
12144 			if (src_reg->id &&
12145 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
12146 				find_equal_scalars(this_branch, src_reg);
12147 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
12148 			}
12149 
12150 		}
12151 	} else if (dst_reg->type == SCALAR_VALUE) {
12152 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
12153 					dst_reg, insn->imm, (u32)insn->imm,
12154 					opcode, is_jmp32);
12155 	}
12156 
12157 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
12158 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
12159 		find_equal_scalars(this_branch, dst_reg);
12160 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
12161 	}
12162 
12163 	/* if one pointer register is compared to another pointer
12164 	 * register check if PTR_MAYBE_NULL could be lifted.
12165 	 * E.g. register A - maybe null
12166 	 *      register B - not null
12167 	 * for JNE A, B, ... - A is not null in the false branch;
12168 	 * for JEQ A, B, ... - A is not null in the true branch.
12169 	 *
12170 	 * Since PTR_TO_BTF_ID points to a kernel struct that does
12171 	 * not need to be null checked by the BPF program, i.e.,
12172 	 * could be null even without PTR_MAYBE_NULL marking, so
12173 	 * only propagate nullness when neither reg is that type.
12174 	 */
12175 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
12176 	    __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
12177 	    type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
12178 	    base_type(src_reg->type) != PTR_TO_BTF_ID &&
12179 	    base_type(dst_reg->type) != PTR_TO_BTF_ID) {
12180 		eq_branch_regs = NULL;
12181 		switch (opcode) {
12182 		case BPF_JEQ:
12183 			eq_branch_regs = other_branch_regs;
12184 			break;
12185 		case BPF_JNE:
12186 			eq_branch_regs = regs;
12187 			break;
12188 		default:
12189 			/* do nothing */
12190 			break;
12191 		}
12192 		if (eq_branch_regs) {
12193 			if (type_may_be_null(src_reg->type))
12194 				mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
12195 			else
12196 				mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
12197 		}
12198 	}
12199 
12200 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
12201 	 * NOTE: these optimizations below are related with pointer comparison
12202 	 *       which will never be JMP32.
12203 	 */
12204 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
12205 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
12206 	    type_may_be_null(dst_reg->type)) {
12207 		/* Mark all identical registers in each branch as either
12208 		 * safe or unknown depending R == 0 or R != 0 conditional.
12209 		 */
12210 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
12211 				      opcode == BPF_JNE);
12212 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
12213 				      opcode == BPF_JEQ);
12214 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
12215 					   this_branch, other_branch) &&
12216 		   is_pointer_value(env, insn->dst_reg)) {
12217 		verbose(env, "R%d pointer comparison prohibited\n",
12218 			insn->dst_reg);
12219 		return -EACCES;
12220 	}
12221 	if (env->log.level & BPF_LOG_LEVEL)
12222 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
12223 	return 0;
12224 }
12225 
12226 /* verify BPF_LD_IMM64 instruction */
12227 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
12228 {
12229 	struct bpf_insn_aux_data *aux = cur_aux(env);
12230 	struct bpf_reg_state *regs = cur_regs(env);
12231 	struct bpf_reg_state *dst_reg;
12232 	struct bpf_map *map;
12233 	int err;
12234 
12235 	if (BPF_SIZE(insn->code) != BPF_DW) {
12236 		verbose(env, "invalid BPF_LD_IMM insn\n");
12237 		return -EINVAL;
12238 	}
12239 	if (insn->off != 0) {
12240 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
12241 		return -EINVAL;
12242 	}
12243 
12244 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
12245 	if (err)
12246 		return err;
12247 
12248 	dst_reg = &regs[insn->dst_reg];
12249 	if (insn->src_reg == 0) {
12250 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
12251 
12252 		dst_reg->type = SCALAR_VALUE;
12253 		__mark_reg_known(&regs[insn->dst_reg], imm);
12254 		return 0;
12255 	}
12256 
12257 	/* All special src_reg cases are listed below. From this point onwards
12258 	 * we either succeed and assign a corresponding dst_reg->type after
12259 	 * zeroing the offset, or fail and reject the program.
12260 	 */
12261 	mark_reg_known_zero(env, regs, insn->dst_reg);
12262 
12263 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
12264 		dst_reg->type = aux->btf_var.reg_type;
12265 		switch (base_type(dst_reg->type)) {
12266 		case PTR_TO_MEM:
12267 			dst_reg->mem_size = aux->btf_var.mem_size;
12268 			break;
12269 		case PTR_TO_BTF_ID:
12270 			dst_reg->btf = aux->btf_var.btf;
12271 			dst_reg->btf_id = aux->btf_var.btf_id;
12272 			break;
12273 		default:
12274 			verbose(env, "bpf verifier is misconfigured\n");
12275 			return -EFAULT;
12276 		}
12277 		return 0;
12278 	}
12279 
12280 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
12281 		struct bpf_prog_aux *aux = env->prog->aux;
12282 		u32 subprogno = find_subprog(env,
12283 					     env->insn_idx + insn->imm + 1);
12284 
12285 		if (!aux->func_info) {
12286 			verbose(env, "missing btf func_info\n");
12287 			return -EINVAL;
12288 		}
12289 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
12290 			verbose(env, "callback function not static\n");
12291 			return -EINVAL;
12292 		}
12293 
12294 		dst_reg->type = PTR_TO_FUNC;
12295 		dst_reg->subprogno = subprogno;
12296 		return 0;
12297 	}
12298 
12299 	map = env->used_maps[aux->map_index];
12300 	dst_reg->map_ptr = map;
12301 
12302 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
12303 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
12304 		dst_reg->type = PTR_TO_MAP_VALUE;
12305 		dst_reg->off = aux->map_off;
12306 		WARN_ON_ONCE(map->max_entries != 1);
12307 		/* We want reg->id to be same (0) as map_value is not distinct */
12308 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
12309 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
12310 		dst_reg->type = CONST_PTR_TO_MAP;
12311 	} else {
12312 		verbose(env, "bpf verifier is misconfigured\n");
12313 		return -EINVAL;
12314 	}
12315 
12316 	return 0;
12317 }
12318 
12319 static bool may_access_skb(enum bpf_prog_type type)
12320 {
12321 	switch (type) {
12322 	case BPF_PROG_TYPE_SOCKET_FILTER:
12323 	case BPF_PROG_TYPE_SCHED_CLS:
12324 	case BPF_PROG_TYPE_SCHED_ACT:
12325 		return true;
12326 	default:
12327 		return false;
12328 	}
12329 }
12330 
12331 /* verify safety of LD_ABS|LD_IND instructions:
12332  * - they can only appear in the programs where ctx == skb
12333  * - since they are wrappers of function calls, they scratch R1-R5 registers,
12334  *   preserve R6-R9, and store return value into R0
12335  *
12336  * Implicit input:
12337  *   ctx == skb == R6 == CTX
12338  *
12339  * Explicit input:
12340  *   SRC == any register
12341  *   IMM == 32-bit immediate
12342  *
12343  * Output:
12344  *   R0 - 8/16/32-bit skb data converted to cpu endianness
12345  */
12346 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
12347 {
12348 	struct bpf_reg_state *regs = cur_regs(env);
12349 	static const int ctx_reg = BPF_REG_6;
12350 	u8 mode = BPF_MODE(insn->code);
12351 	int i, err;
12352 
12353 	if (!may_access_skb(resolve_prog_type(env->prog))) {
12354 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
12355 		return -EINVAL;
12356 	}
12357 
12358 	if (!env->ops->gen_ld_abs) {
12359 		verbose(env, "bpf verifier is misconfigured\n");
12360 		return -EINVAL;
12361 	}
12362 
12363 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
12364 	    BPF_SIZE(insn->code) == BPF_DW ||
12365 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
12366 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
12367 		return -EINVAL;
12368 	}
12369 
12370 	/* check whether implicit source operand (register R6) is readable */
12371 	err = check_reg_arg(env, ctx_reg, SRC_OP);
12372 	if (err)
12373 		return err;
12374 
12375 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
12376 	 * gen_ld_abs() may terminate the program at runtime, leading to
12377 	 * reference leak.
12378 	 */
12379 	err = check_reference_leak(env);
12380 	if (err) {
12381 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
12382 		return err;
12383 	}
12384 
12385 	if (env->cur_state->active_lock.ptr) {
12386 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
12387 		return -EINVAL;
12388 	}
12389 
12390 	if (env->cur_state->active_rcu_lock) {
12391 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
12392 		return -EINVAL;
12393 	}
12394 
12395 	if (regs[ctx_reg].type != PTR_TO_CTX) {
12396 		verbose(env,
12397 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
12398 		return -EINVAL;
12399 	}
12400 
12401 	if (mode == BPF_IND) {
12402 		/* check explicit source operand */
12403 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
12404 		if (err)
12405 			return err;
12406 	}
12407 
12408 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
12409 	if (err < 0)
12410 		return err;
12411 
12412 	/* reset caller saved regs to unreadable */
12413 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
12414 		mark_reg_not_init(env, regs, caller_saved[i]);
12415 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
12416 	}
12417 
12418 	/* mark destination R0 register as readable, since it contains
12419 	 * the value fetched from the packet.
12420 	 * Already marked as written above.
12421 	 */
12422 	mark_reg_unknown(env, regs, BPF_REG_0);
12423 	/* ld_abs load up to 32-bit skb data. */
12424 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
12425 	return 0;
12426 }
12427 
12428 static int check_return_code(struct bpf_verifier_env *env)
12429 {
12430 	struct tnum enforce_attach_type_range = tnum_unknown;
12431 	const struct bpf_prog *prog = env->prog;
12432 	struct bpf_reg_state *reg;
12433 	struct tnum range = tnum_range(0, 1);
12434 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
12435 	int err;
12436 	struct bpf_func_state *frame = env->cur_state->frame[0];
12437 	const bool is_subprog = frame->subprogno;
12438 
12439 	/* LSM and struct_ops func-ptr's return type could be "void" */
12440 	if (!is_subprog) {
12441 		switch (prog_type) {
12442 		case BPF_PROG_TYPE_LSM:
12443 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
12444 				/* See below, can be 0 or 0-1 depending on hook. */
12445 				break;
12446 			fallthrough;
12447 		case BPF_PROG_TYPE_STRUCT_OPS:
12448 			if (!prog->aux->attach_func_proto->type)
12449 				return 0;
12450 			break;
12451 		default:
12452 			break;
12453 		}
12454 	}
12455 
12456 	/* eBPF calling convention is such that R0 is used
12457 	 * to return the value from eBPF program.
12458 	 * Make sure that it's readable at this time
12459 	 * of bpf_exit, which means that program wrote
12460 	 * something into it earlier
12461 	 */
12462 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
12463 	if (err)
12464 		return err;
12465 
12466 	if (is_pointer_value(env, BPF_REG_0)) {
12467 		verbose(env, "R0 leaks addr as return value\n");
12468 		return -EACCES;
12469 	}
12470 
12471 	reg = cur_regs(env) + BPF_REG_0;
12472 
12473 	if (frame->in_async_callback_fn) {
12474 		/* enforce return zero from async callbacks like timer */
12475 		if (reg->type != SCALAR_VALUE) {
12476 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
12477 				reg_type_str(env, reg->type));
12478 			return -EINVAL;
12479 		}
12480 
12481 		if (!tnum_in(tnum_const(0), reg->var_off)) {
12482 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
12483 			return -EINVAL;
12484 		}
12485 		return 0;
12486 	}
12487 
12488 	if (is_subprog) {
12489 		if (reg->type != SCALAR_VALUE) {
12490 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
12491 				reg_type_str(env, reg->type));
12492 			return -EINVAL;
12493 		}
12494 		return 0;
12495 	}
12496 
12497 	switch (prog_type) {
12498 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
12499 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
12500 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
12501 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
12502 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
12503 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
12504 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
12505 			range = tnum_range(1, 1);
12506 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
12507 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
12508 			range = tnum_range(0, 3);
12509 		break;
12510 	case BPF_PROG_TYPE_CGROUP_SKB:
12511 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
12512 			range = tnum_range(0, 3);
12513 			enforce_attach_type_range = tnum_range(2, 3);
12514 		}
12515 		break;
12516 	case BPF_PROG_TYPE_CGROUP_SOCK:
12517 	case BPF_PROG_TYPE_SOCK_OPS:
12518 	case BPF_PROG_TYPE_CGROUP_DEVICE:
12519 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
12520 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
12521 		break;
12522 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
12523 		if (!env->prog->aux->attach_btf_id)
12524 			return 0;
12525 		range = tnum_const(0);
12526 		break;
12527 	case BPF_PROG_TYPE_TRACING:
12528 		switch (env->prog->expected_attach_type) {
12529 		case BPF_TRACE_FENTRY:
12530 		case BPF_TRACE_FEXIT:
12531 			range = tnum_const(0);
12532 			break;
12533 		case BPF_TRACE_RAW_TP:
12534 		case BPF_MODIFY_RETURN:
12535 			return 0;
12536 		case BPF_TRACE_ITER:
12537 			break;
12538 		default:
12539 			return -ENOTSUPP;
12540 		}
12541 		break;
12542 	case BPF_PROG_TYPE_SK_LOOKUP:
12543 		range = tnum_range(SK_DROP, SK_PASS);
12544 		break;
12545 
12546 	case BPF_PROG_TYPE_LSM:
12547 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
12548 			/* Regular BPF_PROG_TYPE_LSM programs can return
12549 			 * any value.
12550 			 */
12551 			return 0;
12552 		}
12553 		if (!env->prog->aux->attach_func_proto->type) {
12554 			/* Make sure programs that attach to void
12555 			 * hooks don't try to modify return value.
12556 			 */
12557 			range = tnum_range(1, 1);
12558 		}
12559 		break;
12560 
12561 	case BPF_PROG_TYPE_EXT:
12562 		/* freplace program can return anything as its return value
12563 		 * depends on the to-be-replaced kernel func or bpf program.
12564 		 */
12565 	default:
12566 		return 0;
12567 	}
12568 
12569 	if (reg->type != SCALAR_VALUE) {
12570 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
12571 			reg_type_str(env, reg->type));
12572 		return -EINVAL;
12573 	}
12574 
12575 	if (!tnum_in(range, reg->var_off)) {
12576 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
12577 		if (prog->expected_attach_type == BPF_LSM_CGROUP &&
12578 		    prog_type == BPF_PROG_TYPE_LSM &&
12579 		    !prog->aux->attach_func_proto->type)
12580 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
12581 		return -EINVAL;
12582 	}
12583 
12584 	if (!tnum_is_unknown(enforce_attach_type_range) &&
12585 	    tnum_in(enforce_attach_type_range, reg->var_off))
12586 		env->prog->enforce_expected_attach_type = 1;
12587 	return 0;
12588 }
12589 
12590 /* non-recursive DFS pseudo code
12591  * 1  procedure DFS-iterative(G,v):
12592  * 2      label v as discovered
12593  * 3      let S be a stack
12594  * 4      S.push(v)
12595  * 5      while S is not empty
12596  * 6            t <- S.peek()
12597  * 7            if t is what we're looking for:
12598  * 8                return t
12599  * 9            for all edges e in G.adjacentEdges(t) do
12600  * 10               if edge e is already labelled
12601  * 11                   continue with the next edge
12602  * 12               w <- G.adjacentVertex(t,e)
12603  * 13               if vertex w is not discovered and not explored
12604  * 14                   label e as tree-edge
12605  * 15                   label w as discovered
12606  * 16                   S.push(w)
12607  * 17                   continue at 5
12608  * 18               else if vertex w is discovered
12609  * 19                   label e as back-edge
12610  * 20               else
12611  * 21                   // vertex w is explored
12612  * 22                   label e as forward- or cross-edge
12613  * 23           label t as explored
12614  * 24           S.pop()
12615  *
12616  * convention:
12617  * 0x10 - discovered
12618  * 0x11 - discovered and fall-through edge labelled
12619  * 0x12 - discovered and fall-through and branch edges labelled
12620  * 0x20 - explored
12621  */
12622 
12623 enum {
12624 	DISCOVERED = 0x10,
12625 	EXPLORED = 0x20,
12626 	FALLTHROUGH = 1,
12627 	BRANCH = 2,
12628 };
12629 
12630 static u32 state_htab_size(struct bpf_verifier_env *env)
12631 {
12632 	return env->prog->len;
12633 }
12634 
12635 static struct bpf_verifier_state_list **explored_state(
12636 					struct bpf_verifier_env *env,
12637 					int idx)
12638 {
12639 	struct bpf_verifier_state *cur = env->cur_state;
12640 	struct bpf_func_state *state = cur->frame[cur->curframe];
12641 
12642 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
12643 }
12644 
12645 static void mark_prune_point(struct bpf_verifier_env *env, int idx)
12646 {
12647 	env->insn_aux_data[idx].prune_point = true;
12648 }
12649 
12650 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
12651 {
12652 	return env->insn_aux_data[insn_idx].prune_point;
12653 }
12654 
12655 enum {
12656 	DONE_EXPLORING = 0,
12657 	KEEP_EXPLORING = 1,
12658 };
12659 
12660 /* t, w, e - match pseudo-code above:
12661  * t - index of current instruction
12662  * w - next instruction
12663  * e - edge
12664  */
12665 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
12666 		     bool loop_ok)
12667 {
12668 	int *insn_stack = env->cfg.insn_stack;
12669 	int *insn_state = env->cfg.insn_state;
12670 
12671 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
12672 		return DONE_EXPLORING;
12673 
12674 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
12675 		return DONE_EXPLORING;
12676 
12677 	if (w < 0 || w >= env->prog->len) {
12678 		verbose_linfo(env, t, "%d: ", t);
12679 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
12680 		return -EINVAL;
12681 	}
12682 
12683 	if (e == BRANCH) {
12684 		/* mark branch target for state pruning */
12685 		mark_prune_point(env, w);
12686 		mark_jmp_point(env, w);
12687 	}
12688 
12689 	if (insn_state[w] == 0) {
12690 		/* tree-edge */
12691 		insn_state[t] = DISCOVERED | e;
12692 		insn_state[w] = DISCOVERED;
12693 		if (env->cfg.cur_stack >= env->prog->len)
12694 			return -E2BIG;
12695 		insn_stack[env->cfg.cur_stack++] = w;
12696 		return KEEP_EXPLORING;
12697 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
12698 		if (loop_ok && env->bpf_capable)
12699 			return DONE_EXPLORING;
12700 		verbose_linfo(env, t, "%d: ", t);
12701 		verbose_linfo(env, w, "%d: ", w);
12702 		verbose(env, "back-edge from insn %d to %d\n", t, w);
12703 		return -EINVAL;
12704 	} else if (insn_state[w] == EXPLORED) {
12705 		/* forward- or cross-edge */
12706 		insn_state[t] = DISCOVERED | e;
12707 	} else {
12708 		verbose(env, "insn state internal bug\n");
12709 		return -EFAULT;
12710 	}
12711 	return DONE_EXPLORING;
12712 }
12713 
12714 static int visit_func_call_insn(int t, struct bpf_insn *insns,
12715 				struct bpf_verifier_env *env,
12716 				bool visit_callee)
12717 {
12718 	int ret;
12719 
12720 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
12721 	if (ret)
12722 		return ret;
12723 
12724 	mark_prune_point(env, t + 1);
12725 	/* when we exit from subprog, we need to record non-linear history */
12726 	mark_jmp_point(env, t + 1);
12727 
12728 	if (visit_callee) {
12729 		mark_prune_point(env, t);
12730 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
12731 				/* It's ok to allow recursion from CFG point of
12732 				 * view. __check_func_call() will do the actual
12733 				 * check.
12734 				 */
12735 				bpf_pseudo_func(insns + t));
12736 	}
12737 	return ret;
12738 }
12739 
12740 /* Visits the instruction at index t and returns one of the following:
12741  *  < 0 - an error occurred
12742  *  DONE_EXPLORING - the instruction was fully explored
12743  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
12744  */
12745 static int visit_insn(int t, struct bpf_verifier_env *env)
12746 {
12747 	struct bpf_insn *insns = env->prog->insnsi;
12748 	int ret;
12749 
12750 	if (bpf_pseudo_func(insns + t))
12751 		return visit_func_call_insn(t, insns, env, true);
12752 
12753 	/* All non-branch instructions have a single fall-through edge. */
12754 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
12755 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
12756 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
12757 
12758 	switch (BPF_OP(insns[t].code)) {
12759 	case BPF_EXIT:
12760 		return DONE_EXPLORING;
12761 
12762 	case BPF_CALL:
12763 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
12764 			/* Mark this call insn as a prune point to trigger
12765 			 * is_state_visited() check before call itself is
12766 			 * processed by __check_func_call(). Otherwise new
12767 			 * async state will be pushed for further exploration.
12768 			 */
12769 			mark_prune_point(env, t);
12770 		return visit_func_call_insn(t, insns, env,
12771 					    insns[t].src_reg == BPF_PSEUDO_CALL);
12772 
12773 	case BPF_JA:
12774 		if (BPF_SRC(insns[t].code) != BPF_K)
12775 			return -EINVAL;
12776 
12777 		/* unconditional jump with single edge */
12778 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
12779 				true);
12780 		if (ret)
12781 			return ret;
12782 
12783 		mark_prune_point(env, t + insns[t].off + 1);
12784 		mark_jmp_point(env, t + insns[t].off + 1);
12785 
12786 		return ret;
12787 
12788 	default:
12789 		/* conditional jump with two edges */
12790 		mark_prune_point(env, t);
12791 
12792 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
12793 		if (ret)
12794 			return ret;
12795 
12796 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
12797 	}
12798 }
12799 
12800 /* non-recursive depth-first-search to detect loops in BPF program
12801  * loop == back-edge in directed graph
12802  */
12803 static int check_cfg(struct bpf_verifier_env *env)
12804 {
12805 	int insn_cnt = env->prog->len;
12806 	int *insn_stack, *insn_state;
12807 	int ret = 0;
12808 	int i;
12809 
12810 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
12811 	if (!insn_state)
12812 		return -ENOMEM;
12813 
12814 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
12815 	if (!insn_stack) {
12816 		kvfree(insn_state);
12817 		return -ENOMEM;
12818 	}
12819 
12820 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
12821 	insn_stack[0] = 0; /* 0 is the first instruction */
12822 	env->cfg.cur_stack = 1;
12823 
12824 	while (env->cfg.cur_stack > 0) {
12825 		int t = insn_stack[env->cfg.cur_stack - 1];
12826 
12827 		ret = visit_insn(t, env);
12828 		switch (ret) {
12829 		case DONE_EXPLORING:
12830 			insn_state[t] = EXPLORED;
12831 			env->cfg.cur_stack--;
12832 			break;
12833 		case KEEP_EXPLORING:
12834 			break;
12835 		default:
12836 			if (ret > 0) {
12837 				verbose(env, "visit_insn internal bug\n");
12838 				ret = -EFAULT;
12839 			}
12840 			goto err_free;
12841 		}
12842 	}
12843 
12844 	if (env->cfg.cur_stack < 0) {
12845 		verbose(env, "pop stack internal bug\n");
12846 		ret = -EFAULT;
12847 		goto err_free;
12848 	}
12849 
12850 	for (i = 0; i < insn_cnt; i++) {
12851 		if (insn_state[i] != EXPLORED) {
12852 			verbose(env, "unreachable insn %d\n", i);
12853 			ret = -EINVAL;
12854 			goto err_free;
12855 		}
12856 	}
12857 	ret = 0; /* cfg looks good */
12858 
12859 err_free:
12860 	kvfree(insn_state);
12861 	kvfree(insn_stack);
12862 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
12863 	return ret;
12864 }
12865 
12866 static int check_abnormal_return(struct bpf_verifier_env *env)
12867 {
12868 	int i;
12869 
12870 	for (i = 1; i < env->subprog_cnt; i++) {
12871 		if (env->subprog_info[i].has_ld_abs) {
12872 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
12873 			return -EINVAL;
12874 		}
12875 		if (env->subprog_info[i].has_tail_call) {
12876 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
12877 			return -EINVAL;
12878 		}
12879 	}
12880 	return 0;
12881 }
12882 
12883 /* The minimum supported BTF func info size */
12884 #define MIN_BPF_FUNCINFO_SIZE	8
12885 #define MAX_FUNCINFO_REC_SIZE	252
12886 
12887 static int check_btf_func(struct bpf_verifier_env *env,
12888 			  const union bpf_attr *attr,
12889 			  bpfptr_t uattr)
12890 {
12891 	const struct btf_type *type, *func_proto, *ret_type;
12892 	u32 i, nfuncs, urec_size, min_size;
12893 	u32 krec_size = sizeof(struct bpf_func_info);
12894 	struct bpf_func_info *krecord;
12895 	struct bpf_func_info_aux *info_aux = NULL;
12896 	struct bpf_prog *prog;
12897 	const struct btf *btf;
12898 	bpfptr_t urecord;
12899 	u32 prev_offset = 0;
12900 	bool scalar_return;
12901 	int ret = -ENOMEM;
12902 
12903 	nfuncs = attr->func_info_cnt;
12904 	if (!nfuncs) {
12905 		if (check_abnormal_return(env))
12906 			return -EINVAL;
12907 		return 0;
12908 	}
12909 
12910 	if (nfuncs != env->subprog_cnt) {
12911 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
12912 		return -EINVAL;
12913 	}
12914 
12915 	urec_size = attr->func_info_rec_size;
12916 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
12917 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
12918 	    urec_size % sizeof(u32)) {
12919 		verbose(env, "invalid func info rec size %u\n", urec_size);
12920 		return -EINVAL;
12921 	}
12922 
12923 	prog = env->prog;
12924 	btf = prog->aux->btf;
12925 
12926 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
12927 	min_size = min_t(u32, krec_size, urec_size);
12928 
12929 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
12930 	if (!krecord)
12931 		return -ENOMEM;
12932 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
12933 	if (!info_aux)
12934 		goto err_free;
12935 
12936 	for (i = 0; i < nfuncs; i++) {
12937 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
12938 		if (ret) {
12939 			if (ret == -E2BIG) {
12940 				verbose(env, "nonzero tailing record in func info");
12941 				/* set the size kernel expects so loader can zero
12942 				 * out the rest of the record.
12943 				 */
12944 				if (copy_to_bpfptr_offset(uattr,
12945 							  offsetof(union bpf_attr, func_info_rec_size),
12946 							  &min_size, sizeof(min_size)))
12947 					ret = -EFAULT;
12948 			}
12949 			goto err_free;
12950 		}
12951 
12952 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
12953 			ret = -EFAULT;
12954 			goto err_free;
12955 		}
12956 
12957 		/* check insn_off */
12958 		ret = -EINVAL;
12959 		if (i == 0) {
12960 			if (krecord[i].insn_off) {
12961 				verbose(env,
12962 					"nonzero insn_off %u for the first func info record",
12963 					krecord[i].insn_off);
12964 				goto err_free;
12965 			}
12966 		} else if (krecord[i].insn_off <= prev_offset) {
12967 			verbose(env,
12968 				"same or smaller insn offset (%u) than previous func info record (%u)",
12969 				krecord[i].insn_off, prev_offset);
12970 			goto err_free;
12971 		}
12972 
12973 		if (env->subprog_info[i].start != krecord[i].insn_off) {
12974 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
12975 			goto err_free;
12976 		}
12977 
12978 		/* check type_id */
12979 		type = btf_type_by_id(btf, krecord[i].type_id);
12980 		if (!type || !btf_type_is_func(type)) {
12981 			verbose(env, "invalid type id %d in func info",
12982 				krecord[i].type_id);
12983 			goto err_free;
12984 		}
12985 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
12986 
12987 		func_proto = btf_type_by_id(btf, type->type);
12988 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
12989 			/* btf_func_check() already verified it during BTF load */
12990 			goto err_free;
12991 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
12992 		scalar_return =
12993 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
12994 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
12995 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
12996 			goto err_free;
12997 		}
12998 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
12999 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
13000 			goto err_free;
13001 		}
13002 
13003 		prev_offset = krecord[i].insn_off;
13004 		bpfptr_add(&urecord, urec_size);
13005 	}
13006 
13007 	prog->aux->func_info = krecord;
13008 	prog->aux->func_info_cnt = nfuncs;
13009 	prog->aux->func_info_aux = info_aux;
13010 	return 0;
13011 
13012 err_free:
13013 	kvfree(krecord);
13014 	kfree(info_aux);
13015 	return ret;
13016 }
13017 
13018 static void adjust_btf_func(struct bpf_verifier_env *env)
13019 {
13020 	struct bpf_prog_aux *aux = env->prog->aux;
13021 	int i;
13022 
13023 	if (!aux->func_info)
13024 		return;
13025 
13026 	for (i = 0; i < env->subprog_cnt; i++)
13027 		aux->func_info[i].insn_off = env->subprog_info[i].start;
13028 }
13029 
13030 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
13031 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
13032 
13033 static int check_btf_line(struct bpf_verifier_env *env,
13034 			  const union bpf_attr *attr,
13035 			  bpfptr_t uattr)
13036 {
13037 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
13038 	struct bpf_subprog_info *sub;
13039 	struct bpf_line_info *linfo;
13040 	struct bpf_prog *prog;
13041 	const struct btf *btf;
13042 	bpfptr_t ulinfo;
13043 	int err;
13044 
13045 	nr_linfo = attr->line_info_cnt;
13046 	if (!nr_linfo)
13047 		return 0;
13048 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
13049 		return -EINVAL;
13050 
13051 	rec_size = attr->line_info_rec_size;
13052 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
13053 	    rec_size > MAX_LINEINFO_REC_SIZE ||
13054 	    rec_size & (sizeof(u32) - 1))
13055 		return -EINVAL;
13056 
13057 	/* Need to zero it in case the userspace may
13058 	 * pass in a smaller bpf_line_info object.
13059 	 */
13060 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
13061 			 GFP_KERNEL | __GFP_NOWARN);
13062 	if (!linfo)
13063 		return -ENOMEM;
13064 
13065 	prog = env->prog;
13066 	btf = prog->aux->btf;
13067 
13068 	s = 0;
13069 	sub = env->subprog_info;
13070 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
13071 	expected_size = sizeof(struct bpf_line_info);
13072 	ncopy = min_t(u32, expected_size, rec_size);
13073 	for (i = 0; i < nr_linfo; i++) {
13074 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
13075 		if (err) {
13076 			if (err == -E2BIG) {
13077 				verbose(env, "nonzero tailing record in line_info");
13078 				if (copy_to_bpfptr_offset(uattr,
13079 							  offsetof(union bpf_attr, line_info_rec_size),
13080 							  &expected_size, sizeof(expected_size)))
13081 					err = -EFAULT;
13082 			}
13083 			goto err_free;
13084 		}
13085 
13086 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
13087 			err = -EFAULT;
13088 			goto err_free;
13089 		}
13090 
13091 		/*
13092 		 * Check insn_off to ensure
13093 		 * 1) strictly increasing AND
13094 		 * 2) bounded by prog->len
13095 		 *
13096 		 * The linfo[0].insn_off == 0 check logically falls into
13097 		 * the later "missing bpf_line_info for func..." case
13098 		 * because the first linfo[0].insn_off must be the
13099 		 * first sub also and the first sub must have
13100 		 * subprog_info[0].start == 0.
13101 		 */
13102 		if ((i && linfo[i].insn_off <= prev_offset) ||
13103 		    linfo[i].insn_off >= prog->len) {
13104 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
13105 				i, linfo[i].insn_off, prev_offset,
13106 				prog->len);
13107 			err = -EINVAL;
13108 			goto err_free;
13109 		}
13110 
13111 		if (!prog->insnsi[linfo[i].insn_off].code) {
13112 			verbose(env,
13113 				"Invalid insn code at line_info[%u].insn_off\n",
13114 				i);
13115 			err = -EINVAL;
13116 			goto err_free;
13117 		}
13118 
13119 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
13120 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
13121 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
13122 			err = -EINVAL;
13123 			goto err_free;
13124 		}
13125 
13126 		if (s != env->subprog_cnt) {
13127 			if (linfo[i].insn_off == sub[s].start) {
13128 				sub[s].linfo_idx = i;
13129 				s++;
13130 			} else if (sub[s].start < linfo[i].insn_off) {
13131 				verbose(env, "missing bpf_line_info for func#%u\n", s);
13132 				err = -EINVAL;
13133 				goto err_free;
13134 			}
13135 		}
13136 
13137 		prev_offset = linfo[i].insn_off;
13138 		bpfptr_add(&ulinfo, rec_size);
13139 	}
13140 
13141 	if (s != env->subprog_cnt) {
13142 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
13143 			env->subprog_cnt - s, s);
13144 		err = -EINVAL;
13145 		goto err_free;
13146 	}
13147 
13148 	prog->aux->linfo = linfo;
13149 	prog->aux->nr_linfo = nr_linfo;
13150 
13151 	return 0;
13152 
13153 err_free:
13154 	kvfree(linfo);
13155 	return err;
13156 }
13157 
13158 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
13159 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
13160 
13161 static int check_core_relo(struct bpf_verifier_env *env,
13162 			   const union bpf_attr *attr,
13163 			   bpfptr_t uattr)
13164 {
13165 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
13166 	struct bpf_core_relo core_relo = {};
13167 	struct bpf_prog *prog = env->prog;
13168 	const struct btf *btf = prog->aux->btf;
13169 	struct bpf_core_ctx ctx = {
13170 		.log = &env->log,
13171 		.btf = btf,
13172 	};
13173 	bpfptr_t u_core_relo;
13174 	int err;
13175 
13176 	nr_core_relo = attr->core_relo_cnt;
13177 	if (!nr_core_relo)
13178 		return 0;
13179 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
13180 		return -EINVAL;
13181 
13182 	rec_size = attr->core_relo_rec_size;
13183 	if (rec_size < MIN_CORE_RELO_SIZE ||
13184 	    rec_size > MAX_CORE_RELO_SIZE ||
13185 	    rec_size % sizeof(u32))
13186 		return -EINVAL;
13187 
13188 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
13189 	expected_size = sizeof(struct bpf_core_relo);
13190 	ncopy = min_t(u32, expected_size, rec_size);
13191 
13192 	/* Unlike func_info and line_info, copy and apply each CO-RE
13193 	 * relocation record one at a time.
13194 	 */
13195 	for (i = 0; i < nr_core_relo; i++) {
13196 		/* future proofing when sizeof(bpf_core_relo) changes */
13197 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
13198 		if (err) {
13199 			if (err == -E2BIG) {
13200 				verbose(env, "nonzero tailing record in core_relo");
13201 				if (copy_to_bpfptr_offset(uattr,
13202 							  offsetof(union bpf_attr, core_relo_rec_size),
13203 							  &expected_size, sizeof(expected_size)))
13204 					err = -EFAULT;
13205 			}
13206 			break;
13207 		}
13208 
13209 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
13210 			err = -EFAULT;
13211 			break;
13212 		}
13213 
13214 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
13215 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
13216 				i, core_relo.insn_off, prog->len);
13217 			err = -EINVAL;
13218 			break;
13219 		}
13220 
13221 		err = bpf_core_apply(&ctx, &core_relo, i,
13222 				     &prog->insnsi[core_relo.insn_off / 8]);
13223 		if (err)
13224 			break;
13225 		bpfptr_add(&u_core_relo, rec_size);
13226 	}
13227 	return err;
13228 }
13229 
13230 static int check_btf_info(struct bpf_verifier_env *env,
13231 			  const union bpf_attr *attr,
13232 			  bpfptr_t uattr)
13233 {
13234 	struct btf *btf;
13235 	int err;
13236 
13237 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
13238 		if (check_abnormal_return(env))
13239 			return -EINVAL;
13240 		return 0;
13241 	}
13242 
13243 	btf = btf_get_by_fd(attr->prog_btf_fd);
13244 	if (IS_ERR(btf))
13245 		return PTR_ERR(btf);
13246 	if (btf_is_kernel(btf)) {
13247 		btf_put(btf);
13248 		return -EACCES;
13249 	}
13250 	env->prog->aux->btf = btf;
13251 
13252 	err = check_btf_func(env, attr, uattr);
13253 	if (err)
13254 		return err;
13255 
13256 	err = check_btf_line(env, attr, uattr);
13257 	if (err)
13258 		return err;
13259 
13260 	err = check_core_relo(env, attr, uattr);
13261 	if (err)
13262 		return err;
13263 
13264 	return 0;
13265 }
13266 
13267 /* check %cur's range satisfies %old's */
13268 static bool range_within(struct bpf_reg_state *old,
13269 			 struct bpf_reg_state *cur)
13270 {
13271 	return old->umin_value <= cur->umin_value &&
13272 	       old->umax_value >= cur->umax_value &&
13273 	       old->smin_value <= cur->smin_value &&
13274 	       old->smax_value >= cur->smax_value &&
13275 	       old->u32_min_value <= cur->u32_min_value &&
13276 	       old->u32_max_value >= cur->u32_max_value &&
13277 	       old->s32_min_value <= cur->s32_min_value &&
13278 	       old->s32_max_value >= cur->s32_max_value;
13279 }
13280 
13281 /* If in the old state two registers had the same id, then they need to have
13282  * the same id in the new state as well.  But that id could be different from
13283  * the old state, so we need to track the mapping from old to new ids.
13284  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
13285  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
13286  * regs with a different old id could still have new id 9, we don't care about
13287  * that.
13288  * So we look through our idmap to see if this old id has been seen before.  If
13289  * so, we require the new id to match; otherwise, we add the id pair to the map.
13290  */
13291 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
13292 {
13293 	unsigned int i;
13294 
13295 	/* either both IDs should be set or both should be zero */
13296 	if (!!old_id != !!cur_id)
13297 		return false;
13298 
13299 	if (old_id == 0) /* cur_id == 0 as well */
13300 		return true;
13301 
13302 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
13303 		if (!idmap[i].old) {
13304 			/* Reached an empty slot; haven't seen this id before */
13305 			idmap[i].old = old_id;
13306 			idmap[i].cur = cur_id;
13307 			return true;
13308 		}
13309 		if (idmap[i].old == old_id)
13310 			return idmap[i].cur == cur_id;
13311 	}
13312 	/* We ran out of idmap slots, which should be impossible */
13313 	WARN_ON_ONCE(1);
13314 	return false;
13315 }
13316 
13317 static void clean_func_state(struct bpf_verifier_env *env,
13318 			     struct bpf_func_state *st)
13319 {
13320 	enum bpf_reg_liveness live;
13321 	int i, j;
13322 
13323 	for (i = 0; i < BPF_REG_FP; i++) {
13324 		live = st->regs[i].live;
13325 		/* liveness must not touch this register anymore */
13326 		st->regs[i].live |= REG_LIVE_DONE;
13327 		if (!(live & REG_LIVE_READ))
13328 			/* since the register is unused, clear its state
13329 			 * to make further comparison simpler
13330 			 */
13331 			__mark_reg_not_init(env, &st->regs[i]);
13332 	}
13333 
13334 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
13335 		live = st->stack[i].spilled_ptr.live;
13336 		/* liveness must not touch this stack slot anymore */
13337 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
13338 		if (!(live & REG_LIVE_READ)) {
13339 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
13340 			for (j = 0; j < BPF_REG_SIZE; j++)
13341 				st->stack[i].slot_type[j] = STACK_INVALID;
13342 		}
13343 	}
13344 }
13345 
13346 static void clean_verifier_state(struct bpf_verifier_env *env,
13347 				 struct bpf_verifier_state *st)
13348 {
13349 	int i;
13350 
13351 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
13352 		/* all regs in this state in all frames were already marked */
13353 		return;
13354 
13355 	for (i = 0; i <= st->curframe; i++)
13356 		clean_func_state(env, st->frame[i]);
13357 }
13358 
13359 /* the parentage chains form a tree.
13360  * the verifier states are added to state lists at given insn and
13361  * pushed into state stack for future exploration.
13362  * when the verifier reaches bpf_exit insn some of the verifer states
13363  * stored in the state lists have their final liveness state already,
13364  * but a lot of states will get revised from liveness point of view when
13365  * the verifier explores other branches.
13366  * Example:
13367  * 1: r0 = 1
13368  * 2: if r1 == 100 goto pc+1
13369  * 3: r0 = 2
13370  * 4: exit
13371  * when the verifier reaches exit insn the register r0 in the state list of
13372  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
13373  * of insn 2 and goes exploring further. At the insn 4 it will walk the
13374  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
13375  *
13376  * Since the verifier pushes the branch states as it sees them while exploring
13377  * the program the condition of walking the branch instruction for the second
13378  * time means that all states below this branch were already explored and
13379  * their final liveness marks are already propagated.
13380  * Hence when the verifier completes the search of state list in is_state_visited()
13381  * we can call this clean_live_states() function to mark all liveness states
13382  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
13383  * will not be used.
13384  * This function also clears the registers and stack for states that !READ
13385  * to simplify state merging.
13386  *
13387  * Important note here that walking the same branch instruction in the callee
13388  * doesn't meant that the states are DONE. The verifier has to compare
13389  * the callsites
13390  */
13391 static void clean_live_states(struct bpf_verifier_env *env, int insn,
13392 			      struct bpf_verifier_state *cur)
13393 {
13394 	struct bpf_verifier_state_list *sl;
13395 	int i;
13396 
13397 	sl = *explored_state(env, insn);
13398 	while (sl) {
13399 		if (sl->state.branches)
13400 			goto next;
13401 		if (sl->state.insn_idx != insn ||
13402 		    sl->state.curframe != cur->curframe)
13403 			goto next;
13404 		for (i = 0; i <= cur->curframe; i++)
13405 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
13406 				goto next;
13407 		clean_verifier_state(env, &sl->state);
13408 next:
13409 		sl = sl->next;
13410 	}
13411 }
13412 
13413 static bool regs_exact(const struct bpf_reg_state *rold,
13414 		       const struct bpf_reg_state *rcur,
13415 		       struct bpf_id_pair *idmap)
13416 {
13417 	return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
13418 	       check_ids(rold->id, rcur->id, idmap) &&
13419 	       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
13420 }
13421 
13422 /* Returns true if (rold safe implies rcur safe) */
13423 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
13424 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
13425 {
13426 	if (!(rold->live & REG_LIVE_READ))
13427 		/* explored state didn't use this */
13428 		return true;
13429 	if (rold->type == NOT_INIT)
13430 		/* explored state can't have used this */
13431 		return true;
13432 	if (rcur->type == NOT_INIT)
13433 		return false;
13434 
13435 	/* Enforce that register types have to match exactly, including their
13436 	 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
13437 	 * rule.
13438 	 *
13439 	 * One can make a point that using a pointer register as unbounded
13440 	 * SCALAR would be technically acceptable, but this could lead to
13441 	 * pointer leaks because scalars are allowed to leak while pointers
13442 	 * are not. We could make this safe in special cases if root is
13443 	 * calling us, but it's probably not worth the hassle.
13444 	 *
13445 	 * Also, register types that are *not* MAYBE_NULL could technically be
13446 	 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
13447 	 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
13448 	 * to the same map).
13449 	 * However, if the old MAYBE_NULL register then got NULL checked,
13450 	 * doing so could have affected others with the same id, and we can't
13451 	 * check for that because we lost the id when we converted to
13452 	 * a non-MAYBE_NULL variant.
13453 	 * So, as a general rule we don't allow mixing MAYBE_NULL and
13454 	 * non-MAYBE_NULL registers as well.
13455 	 */
13456 	if (rold->type != rcur->type)
13457 		return false;
13458 
13459 	switch (base_type(rold->type)) {
13460 	case SCALAR_VALUE:
13461 		if (regs_exact(rold, rcur, idmap))
13462 			return true;
13463 		if (env->explore_alu_limits)
13464 			return false;
13465 		if (!rold->precise)
13466 			return true;
13467 		/* new val must satisfy old val knowledge */
13468 		return range_within(rold, rcur) &&
13469 		       tnum_in(rold->var_off, rcur->var_off);
13470 	case PTR_TO_MAP_KEY:
13471 	case PTR_TO_MAP_VALUE:
13472 		/* If the new min/max/var_off satisfy the old ones and
13473 		 * everything else matches, we are OK.
13474 		 */
13475 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
13476 		       range_within(rold, rcur) &&
13477 		       tnum_in(rold->var_off, rcur->var_off) &&
13478 		       check_ids(rold->id, rcur->id, idmap);
13479 	case PTR_TO_PACKET_META:
13480 	case PTR_TO_PACKET:
13481 		/* We must have at least as much range as the old ptr
13482 		 * did, so that any accesses which were safe before are
13483 		 * still safe.  This is true even if old range < old off,
13484 		 * since someone could have accessed through (ptr - k), or
13485 		 * even done ptr -= k in a register, to get a safe access.
13486 		 */
13487 		if (rold->range > rcur->range)
13488 			return false;
13489 		/* If the offsets don't match, we can't trust our alignment;
13490 		 * nor can we be sure that we won't fall out of range.
13491 		 */
13492 		if (rold->off != rcur->off)
13493 			return false;
13494 		/* id relations must be preserved */
13495 		if (!check_ids(rold->id, rcur->id, idmap))
13496 			return false;
13497 		/* new val must satisfy old val knowledge */
13498 		return range_within(rold, rcur) &&
13499 		       tnum_in(rold->var_off, rcur->var_off);
13500 	case PTR_TO_STACK:
13501 		/* two stack pointers are equal only if they're pointing to
13502 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
13503 		 */
13504 		return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
13505 	default:
13506 		return regs_exact(rold, rcur, idmap);
13507 	}
13508 }
13509 
13510 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
13511 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
13512 {
13513 	int i, spi;
13514 
13515 	/* walk slots of the explored stack and ignore any additional
13516 	 * slots in the current stack, since explored(safe) state
13517 	 * didn't use them
13518 	 */
13519 	for (i = 0; i < old->allocated_stack; i++) {
13520 		spi = i / BPF_REG_SIZE;
13521 
13522 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
13523 			i += BPF_REG_SIZE - 1;
13524 			/* explored state didn't use this */
13525 			continue;
13526 		}
13527 
13528 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
13529 			continue;
13530 
13531 		/* explored stack has more populated slots than current stack
13532 		 * and these slots were used
13533 		 */
13534 		if (i >= cur->allocated_stack)
13535 			return false;
13536 
13537 		/* if old state was safe with misc data in the stack
13538 		 * it will be safe with zero-initialized stack.
13539 		 * The opposite is not true
13540 		 */
13541 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
13542 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
13543 			continue;
13544 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
13545 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
13546 			/* Ex: old explored (safe) state has STACK_SPILL in
13547 			 * this stack slot, but current has STACK_MISC ->
13548 			 * this verifier states are not equivalent,
13549 			 * return false to continue verification of this path
13550 			 */
13551 			return false;
13552 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
13553 			continue;
13554 		/* Both old and cur are having same slot_type */
13555 		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
13556 		case STACK_SPILL:
13557 			/* when explored and current stack slot are both storing
13558 			 * spilled registers, check that stored pointers types
13559 			 * are the same as well.
13560 			 * Ex: explored safe path could have stored
13561 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
13562 			 * but current path has stored:
13563 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
13564 			 * such verifier states are not equivalent.
13565 			 * return false to continue verification of this path
13566 			 */
13567 			if (!regsafe(env, &old->stack[spi].spilled_ptr,
13568 				     &cur->stack[spi].spilled_ptr, idmap))
13569 				return false;
13570 			break;
13571 		case STACK_DYNPTR:
13572 		{
13573 			const struct bpf_reg_state *old_reg, *cur_reg;
13574 
13575 			old_reg = &old->stack[spi].spilled_ptr;
13576 			cur_reg = &cur->stack[spi].spilled_ptr;
13577 			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
13578 			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
13579 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
13580 				return false;
13581 			break;
13582 		}
13583 		case STACK_MISC:
13584 		case STACK_ZERO:
13585 		case STACK_INVALID:
13586 			continue;
13587 		/* Ensure that new unhandled slot types return false by default */
13588 		default:
13589 			return false;
13590 		}
13591 	}
13592 	return true;
13593 }
13594 
13595 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
13596 		    struct bpf_id_pair *idmap)
13597 {
13598 	int i;
13599 
13600 	if (old->acquired_refs != cur->acquired_refs)
13601 		return false;
13602 
13603 	for (i = 0; i < old->acquired_refs; i++) {
13604 		if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
13605 			return false;
13606 	}
13607 
13608 	return true;
13609 }
13610 
13611 /* compare two verifier states
13612  *
13613  * all states stored in state_list are known to be valid, since
13614  * verifier reached 'bpf_exit' instruction through them
13615  *
13616  * this function is called when verifier exploring different branches of
13617  * execution popped from the state stack. If it sees an old state that has
13618  * more strict register state and more strict stack state then this execution
13619  * branch doesn't need to be explored further, since verifier already
13620  * concluded that more strict state leads to valid finish.
13621  *
13622  * Therefore two states are equivalent if register state is more conservative
13623  * and explored stack state is more conservative than the current one.
13624  * Example:
13625  *       explored                   current
13626  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
13627  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
13628  *
13629  * In other words if current stack state (one being explored) has more
13630  * valid slots than old one that already passed validation, it means
13631  * the verifier can stop exploring and conclude that current state is valid too
13632  *
13633  * Similarly with registers. If explored state has register type as invalid
13634  * whereas register type in current state is meaningful, it means that
13635  * the current state will reach 'bpf_exit' instruction safely
13636  */
13637 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
13638 			      struct bpf_func_state *cur)
13639 {
13640 	int i;
13641 
13642 	for (i = 0; i < MAX_BPF_REG; i++)
13643 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
13644 			     env->idmap_scratch))
13645 			return false;
13646 
13647 	if (!stacksafe(env, old, cur, env->idmap_scratch))
13648 		return false;
13649 
13650 	if (!refsafe(old, cur, env->idmap_scratch))
13651 		return false;
13652 
13653 	return true;
13654 }
13655 
13656 static bool states_equal(struct bpf_verifier_env *env,
13657 			 struct bpf_verifier_state *old,
13658 			 struct bpf_verifier_state *cur)
13659 {
13660 	int i;
13661 
13662 	if (old->curframe != cur->curframe)
13663 		return false;
13664 
13665 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
13666 
13667 	/* Verification state from speculative execution simulation
13668 	 * must never prune a non-speculative execution one.
13669 	 */
13670 	if (old->speculative && !cur->speculative)
13671 		return false;
13672 
13673 	if (old->active_lock.ptr != cur->active_lock.ptr)
13674 		return false;
13675 
13676 	/* Old and cur active_lock's have to be either both present
13677 	 * or both absent.
13678 	 */
13679 	if (!!old->active_lock.id != !!cur->active_lock.id)
13680 		return false;
13681 
13682 	if (old->active_lock.id &&
13683 	    !check_ids(old->active_lock.id, cur->active_lock.id, env->idmap_scratch))
13684 		return false;
13685 
13686 	if (old->active_rcu_lock != cur->active_rcu_lock)
13687 		return false;
13688 
13689 	/* for states to be equal callsites have to be the same
13690 	 * and all frame states need to be equivalent
13691 	 */
13692 	for (i = 0; i <= old->curframe; i++) {
13693 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
13694 			return false;
13695 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
13696 			return false;
13697 	}
13698 	return true;
13699 }
13700 
13701 /* Return 0 if no propagation happened. Return negative error code if error
13702  * happened. Otherwise, return the propagated bit.
13703  */
13704 static int propagate_liveness_reg(struct bpf_verifier_env *env,
13705 				  struct bpf_reg_state *reg,
13706 				  struct bpf_reg_state *parent_reg)
13707 {
13708 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
13709 	u8 flag = reg->live & REG_LIVE_READ;
13710 	int err;
13711 
13712 	/* When comes here, read flags of PARENT_REG or REG could be any of
13713 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
13714 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
13715 	 */
13716 	if (parent_flag == REG_LIVE_READ64 ||
13717 	    /* Or if there is no read flag from REG. */
13718 	    !flag ||
13719 	    /* Or if the read flag from REG is the same as PARENT_REG. */
13720 	    parent_flag == flag)
13721 		return 0;
13722 
13723 	err = mark_reg_read(env, reg, parent_reg, flag);
13724 	if (err)
13725 		return err;
13726 
13727 	return flag;
13728 }
13729 
13730 /* A write screens off any subsequent reads; but write marks come from the
13731  * straight-line code between a state and its parent.  When we arrive at an
13732  * equivalent state (jump target or such) we didn't arrive by the straight-line
13733  * code, so read marks in the state must propagate to the parent regardless
13734  * of the state's write marks. That's what 'parent == state->parent' comparison
13735  * in mark_reg_read() is for.
13736  */
13737 static int propagate_liveness(struct bpf_verifier_env *env,
13738 			      const struct bpf_verifier_state *vstate,
13739 			      struct bpf_verifier_state *vparent)
13740 {
13741 	struct bpf_reg_state *state_reg, *parent_reg;
13742 	struct bpf_func_state *state, *parent;
13743 	int i, frame, err = 0;
13744 
13745 	if (vparent->curframe != vstate->curframe) {
13746 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
13747 		     vparent->curframe, vstate->curframe);
13748 		return -EFAULT;
13749 	}
13750 	/* Propagate read liveness of registers... */
13751 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
13752 	for (frame = 0; frame <= vstate->curframe; frame++) {
13753 		parent = vparent->frame[frame];
13754 		state = vstate->frame[frame];
13755 		parent_reg = parent->regs;
13756 		state_reg = state->regs;
13757 		/* We don't need to worry about FP liveness, it's read-only */
13758 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
13759 			err = propagate_liveness_reg(env, &state_reg[i],
13760 						     &parent_reg[i]);
13761 			if (err < 0)
13762 				return err;
13763 			if (err == REG_LIVE_READ64)
13764 				mark_insn_zext(env, &parent_reg[i]);
13765 		}
13766 
13767 		/* Propagate stack slots. */
13768 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
13769 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
13770 			parent_reg = &parent->stack[i].spilled_ptr;
13771 			state_reg = &state->stack[i].spilled_ptr;
13772 			err = propagate_liveness_reg(env, state_reg,
13773 						     parent_reg);
13774 			if (err < 0)
13775 				return err;
13776 		}
13777 	}
13778 	return 0;
13779 }
13780 
13781 /* find precise scalars in the previous equivalent state and
13782  * propagate them into the current state
13783  */
13784 static int propagate_precision(struct bpf_verifier_env *env,
13785 			       const struct bpf_verifier_state *old)
13786 {
13787 	struct bpf_reg_state *state_reg;
13788 	struct bpf_func_state *state;
13789 	int i, err = 0, fr;
13790 
13791 	for (fr = old->curframe; fr >= 0; fr--) {
13792 		state = old->frame[fr];
13793 		state_reg = state->regs;
13794 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
13795 			if (state_reg->type != SCALAR_VALUE ||
13796 			    !state_reg->precise)
13797 				continue;
13798 			if (env->log.level & BPF_LOG_LEVEL2)
13799 				verbose(env, "frame %d: propagating r%d\n", i, fr);
13800 			err = mark_chain_precision_frame(env, fr, i);
13801 			if (err < 0)
13802 				return err;
13803 		}
13804 
13805 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
13806 			if (!is_spilled_reg(&state->stack[i]))
13807 				continue;
13808 			state_reg = &state->stack[i].spilled_ptr;
13809 			if (state_reg->type != SCALAR_VALUE ||
13810 			    !state_reg->precise)
13811 				continue;
13812 			if (env->log.level & BPF_LOG_LEVEL2)
13813 				verbose(env, "frame %d: propagating fp%d\n",
13814 					(-i - 1) * BPF_REG_SIZE, fr);
13815 			err = mark_chain_precision_stack_frame(env, fr, i);
13816 			if (err < 0)
13817 				return err;
13818 		}
13819 	}
13820 	return 0;
13821 }
13822 
13823 static bool states_maybe_looping(struct bpf_verifier_state *old,
13824 				 struct bpf_verifier_state *cur)
13825 {
13826 	struct bpf_func_state *fold, *fcur;
13827 	int i, fr = cur->curframe;
13828 
13829 	if (old->curframe != fr)
13830 		return false;
13831 
13832 	fold = old->frame[fr];
13833 	fcur = cur->frame[fr];
13834 	for (i = 0; i < MAX_BPF_REG; i++)
13835 		if (memcmp(&fold->regs[i], &fcur->regs[i],
13836 			   offsetof(struct bpf_reg_state, parent)))
13837 			return false;
13838 	return true;
13839 }
13840 
13841 
13842 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
13843 {
13844 	struct bpf_verifier_state_list *new_sl;
13845 	struct bpf_verifier_state_list *sl, **pprev;
13846 	struct bpf_verifier_state *cur = env->cur_state, *new;
13847 	int i, j, err, states_cnt = 0;
13848 	bool add_new_state = env->test_state_freq ? true : false;
13849 
13850 	/* bpf progs typically have pruning point every 4 instructions
13851 	 * http://vger.kernel.org/bpfconf2019.html#session-1
13852 	 * Do not add new state for future pruning if the verifier hasn't seen
13853 	 * at least 2 jumps and at least 8 instructions.
13854 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
13855 	 * In tests that amounts to up to 50% reduction into total verifier
13856 	 * memory consumption and 20% verifier time speedup.
13857 	 */
13858 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
13859 	    env->insn_processed - env->prev_insn_processed >= 8)
13860 		add_new_state = true;
13861 
13862 	pprev = explored_state(env, insn_idx);
13863 	sl = *pprev;
13864 
13865 	clean_live_states(env, insn_idx, cur);
13866 
13867 	while (sl) {
13868 		states_cnt++;
13869 		if (sl->state.insn_idx != insn_idx)
13870 			goto next;
13871 
13872 		if (sl->state.branches) {
13873 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
13874 
13875 			if (frame->in_async_callback_fn &&
13876 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
13877 				/* Different async_entry_cnt means that the verifier is
13878 				 * processing another entry into async callback.
13879 				 * Seeing the same state is not an indication of infinite
13880 				 * loop or infinite recursion.
13881 				 * But finding the same state doesn't mean that it's safe
13882 				 * to stop processing the current state. The previous state
13883 				 * hasn't yet reached bpf_exit, since state.branches > 0.
13884 				 * Checking in_async_callback_fn alone is not enough either.
13885 				 * Since the verifier still needs to catch infinite loops
13886 				 * inside async callbacks.
13887 				 */
13888 			} else if (states_maybe_looping(&sl->state, cur) &&
13889 				   states_equal(env, &sl->state, cur)) {
13890 				verbose_linfo(env, insn_idx, "; ");
13891 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
13892 				return -EINVAL;
13893 			}
13894 			/* if the verifier is processing a loop, avoid adding new state
13895 			 * too often, since different loop iterations have distinct
13896 			 * states and may not help future pruning.
13897 			 * This threshold shouldn't be too low to make sure that
13898 			 * a loop with large bound will be rejected quickly.
13899 			 * The most abusive loop will be:
13900 			 * r1 += 1
13901 			 * if r1 < 1000000 goto pc-2
13902 			 * 1M insn_procssed limit / 100 == 10k peak states.
13903 			 * This threshold shouldn't be too high either, since states
13904 			 * at the end of the loop are likely to be useful in pruning.
13905 			 */
13906 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
13907 			    env->insn_processed - env->prev_insn_processed < 100)
13908 				add_new_state = false;
13909 			goto miss;
13910 		}
13911 		if (states_equal(env, &sl->state, cur)) {
13912 			sl->hit_cnt++;
13913 			/* reached equivalent register/stack state,
13914 			 * prune the search.
13915 			 * Registers read by the continuation are read by us.
13916 			 * If we have any write marks in env->cur_state, they
13917 			 * will prevent corresponding reads in the continuation
13918 			 * from reaching our parent (an explored_state).  Our
13919 			 * own state will get the read marks recorded, but
13920 			 * they'll be immediately forgotten as we're pruning
13921 			 * this state and will pop a new one.
13922 			 */
13923 			err = propagate_liveness(env, &sl->state, cur);
13924 
13925 			/* if previous state reached the exit with precision and
13926 			 * current state is equivalent to it (except precsion marks)
13927 			 * the precision needs to be propagated back in
13928 			 * the current state.
13929 			 */
13930 			err = err ? : push_jmp_history(env, cur);
13931 			err = err ? : propagate_precision(env, &sl->state);
13932 			if (err)
13933 				return err;
13934 			return 1;
13935 		}
13936 miss:
13937 		/* when new state is not going to be added do not increase miss count.
13938 		 * Otherwise several loop iterations will remove the state
13939 		 * recorded earlier. The goal of these heuristics is to have
13940 		 * states from some iterations of the loop (some in the beginning
13941 		 * and some at the end) to help pruning.
13942 		 */
13943 		if (add_new_state)
13944 			sl->miss_cnt++;
13945 		/* heuristic to determine whether this state is beneficial
13946 		 * to keep checking from state equivalence point of view.
13947 		 * Higher numbers increase max_states_per_insn and verification time,
13948 		 * but do not meaningfully decrease insn_processed.
13949 		 */
13950 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
13951 			/* the state is unlikely to be useful. Remove it to
13952 			 * speed up verification
13953 			 */
13954 			*pprev = sl->next;
13955 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
13956 				u32 br = sl->state.branches;
13957 
13958 				WARN_ONCE(br,
13959 					  "BUG live_done but branches_to_explore %d\n",
13960 					  br);
13961 				free_verifier_state(&sl->state, false);
13962 				kfree(sl);
13963 				env->peak_states--;
13964 			} else {
13965 				/* cannot free this state, since parentage chain may
13966 				 * walk it later. Add it for free_list instead to
13967 				 * be freed at the end of verification
13968 				 */
13969 				sl->next = env->free_list;
13970 				env->free_list = sl;
13971 			}
13972 			sl = *pprev;
13973 			continue;
13974 		}
13975 next:
13976 		pprev = &sl->next;
13977 		sl = *pprev;
13978 	}
13979 
13980 	if (env->max_states_per_insn < states_cnt)
13981 		env->max_states_per_insn = states_cnt;
13982 
13983 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
13984 		return 0;
13985 
13986 	if (!add_new_state)
13987 		return 0;
13988 
13989 	/* There were no equivalent states, remember the current one.
13990 	 * Technically the current state is not proven to be safe yet,
13991 	 * but it will either reach outer most bpf_exit (which means it's safe)
13992 	 * or it will be rejected. When there are no loops the verifier won't be
13993 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
13994 	 * again on the way to bpf_exit.
13995 	 * When looping the sl->state.branches will be > 0 and this state
13996 	 * will not be considered for equivalence until branches == 0.
13997 	 */
13998 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
13999 	if (!new_sl)
14000 		return -ENOMEM;
14001 	env->total_states++;
14002 	env->peak_states++;
14003 	env->prev_jmps_processed = env->jmps_processed;
14004 	env->prev_insn_processed = env->insn_processed;
14005 
14006 	/* forget precise markings we inherited, see __mark_chain_precision */
14007 	if (env->bpf_capable)
14008 		mark_all_scalars_imprecise(env, cur);
14009 
14010 	/* add new state to the head of linked list */
14011 	new = &new_sl->state;
14012 	err = copy_verifier_state(new, cur);
14013 	if (err) {
14014 		free_verifier_state(new, false);
14015 		kfree(new_sl);
14016 		return err;
14017 	}
14018 	new->insn_idx = insn_idx;
14019 	WARN_ONCE(new->branches != 1,
14020 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
14021 
14022 	cur->parent = new;
14023 	cur->first_insn_idx = insn_idx;
14024 	clear_jmp_history(cur);
14025 	new_sl->next = *explored_state(env, insn_idx);
14026 	*explored_state(env, insn_idx) = new_sl;
14027 	/* connect new state to parentage chain. Current frame needs all
14028 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
14029 	 * to the stack implicitly by JITs) so in callers' frames connect just
14030 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
14031 	 * the state of the call instruction (with WRITTEN set), and r0 comes
14032 	 * from callee with its full parentage chain, anyway.
14033 	 */
14034 	/* clear write marks in current state: the writes we did are not writes
14035 	 * our child did, so they don't screen off its reads from us.
14036 	 * (There are no read marks in current state, because reads always mark
14037 	 * their parent and current state never has children yet.  Only
14038 	 * explored_states can get read marks.)
14039 	 */
14040 	for (j = 0; j <= cur->curframe; j++) {
14041 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
14042 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
14043 		for (i = 0; i < BPF_REG_FP; i++)
14044 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
14045 	}
14046 
14047 	/* all stack frames are accessible from callee, clear them all */
14048 	for (j = 0; j <= cur->curframe; j++) {
14049 		struct bpf_func_state *frame = cur->frame[j];
14050 		struct bpf_func_state *newframe = new->frame[j];
14051 
14052 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
14053 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
14054 			frame->stack[i].spilled_ptr.parent =
14055 						&newframe->stack[i].spilled_ptr;
14056 		}
14057 	}
14058 	return 0;
14059 }
14060 
14061 /* Return true if it's OK to have the same insn return a different type. */
14062 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
14063 {
14064 	switch (base_type(type)) {
14065 	case PTR_TO_CTX:
14066 	case PTR_TO_SOCKET:
14067 	case PTR_TO_SOCK_COMMON:
14068 	case PTR_TO_TCP_SOCK:
14069 	case PTR_TO_XDP_SOCK:
14070 	case PTR_TO_BTF_ID:
14071 		return false;
14072 	default:
14073 		return true;
14074 	}
14075 }
14076 
14077 /* If an instruction was previously used with particular pointer types, then we
14078  * need to be careful to avoid cases such as the below, where it may be ok
14079  * for one branch accessing the pointer, but not ok for the other branch:
14080  *
14081  * R1 = sock_ptr
14082  * goto X;
14083  * ...
14084  * R1 = some_other_valid_ptr;
14085  * goto X;
14086  * ...
14087  * R2 = *(u32 *)(R1 + 0);
14088  */
14089 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
14090 {
14091 	return src != prev && (!reg_type_mismatch_ok(src) ||
14092 			       !reg_type_mismatch_ok(prev));
14093 }
14094 
14095 static int do_check(struct bpf_verifier_env *env)
14096 {
14097 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
14098 	struct bpf_verifier_state *state = env->cur_state;
14099 	struct bpf_insn *insns = env->prog->insnsi;
14100 	struct bpf_reg_state *regs;
14101 	int insn_cnt = env->prog->len;
14102 	bool do_print_state = false;
14103 	int prev_insn_idx = -1;
14104 
14105 	for (;;) {
14106 		struct bpf_insn *insn;
14107 		u8 class;
14108 		int err;
14109 
14110 		env->prev_insn_idx = prev_insn_idx;
14111 		if (env->insn_idx >= insn_cnt) {
14112 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
14113 				env->insn_idx, insn_cnt);
14114 			return -EFAULT;
14115 		}
14116 
14117 		insn = &insns[env->insn_idx];
14118 		class = BPF_CLASS(insn->code);
14119 
14120 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
14121 			verbose(env,
14122 				"BPF program is too large. Processed %d insn\n",
14123 				env->insn_processed);
14124 			return -E2BIG;
14125 		}
14126 
14127 		state->last_insn_idx = env->prev_insn_idx;
14128 
14129 		if (is_prune_point(env, env->insn_idx)) {
14130 			err = is_state_visited(env, env->insn_idx);
14131 			if (err < 0)
14132 				return err;
14133 			if (err == 1) {
14134 				/* found equivalent state, can prune the search */
14135 				if (env->log.level & BPF_LOG_LEVEL) {
14136 					if (do_print_state)
14137 						verbose(env, "\nfrom %d to %d%s: safe\n",
14138 							env->prev_insn_idx, env->insn_idx,
14139 							env->cur_state->speculative ?
14140 							" (speculative execution)" : "");
14141 					else
14142 						verbose(env, "%d: safe\n", env->insn_idx);
14143 				}
14144 				goto process_bpf_exit;
14145 			}
14146 		}
14147 
14148 		if (is_jmp_point(env, env->insn_idx)) {
14149 			err = push_jmp_history(env, state);
14150 			if (err)
14151 				return err;
14152 		}
14153 
14154 		if (signal_pending(current))
14155 			return -EAGAIN;
14156 
14157 		if (need_resched())
14158 			cond_resched();
14159 
14160 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
14161 			verbose(env, "\nfrom %d to %d%s:",
14162 				env->prev_insn_idx, env->insn_idx,
14163 				env->cur_state->speculative ?
14164 				" (speculative execution)" : "");
14165 			print_verifier_state(env, state->frame[state->curframe], true);
14166 			do_print_state = false;
14167 		}
14168 
14169 		if (env->log.level & BPF_LOG_LEVEL) {
14170 			const struct bpf_insn_cbs cbs = {
14171 				.cb_call	= disasm_kfunc_name,
14172 				.cb_print	= verbose,
14173 				.private_data	= env,
14174 			};
14175 
14176 			if (verifier_state_scratched(env))
14177 				print_insn_state(env, state->frame[state->curframe]);
14178 
14179 			verbose_linfo(env, env->insn_idx, "; ");
14180 			env->prev_log_len = env->log.len_used;
14181 			verbose(env, "%d: ", env->insn_idx);
14182 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
14183 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
14184 			env->prev_log_len = env->log.len_used;
14185 		}
14186 
14187 		if (bpf_prog_is_offloaded(env->prog->aux)) {
14188 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
14189 							   env->prev_insn_idx);
14190 			if (err)
14191 				return err;
14192 		}
14193 
14194 		regs = cur_regs(env);
14195 		sanitize_mark_insn_seen(env);
14196 		prev_insn_idx = env->insn_idx;
14197 
14198 		if (class == BPF_ALU || class == BPF_ALU64) {
14199 			err = check_alu_op(env, insn);
14200 			if (err)
14201 				return err;
14202 
14203 		} else if (class == BPF_LDX) {
14204 			enum bpf_reg_type *prev_src_type, src_reg_type;
14205 
14206 			/* check for reserved fields is already done */
14207 
14208 			/* check src operand */
14209 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
14210 			if (err)
14211 				return err;
14212 
14213 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14214 			if (err)
14215 				return err;
14216 
14217 			src_reg_type = regs[insn->src_reg].type;
14218 
14219 			/* check that memory (src_reg + off) is readable,
14220 			 * the state of dst_reg will be updated by this func
14221 			 */
14222 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
14223 					       insn->off, BPF_SIZE(insn->code),
14224 					       BPF_READ, insn->dst_reg, false);
14225 			if (err)
14226 				return err;
14227 
14228 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
14229 
14230 			if (*prev_src_type == NOT_INIT) {
14231 				/* saw a valid insn
14232 				 * dst_reg = *(u32 *)(src_reg + off)
14233 				 * save type to validate intersecting paths
14234 				 */
14235 				*prev_src_type = src_reg_type;
14236 
14237 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
14238 				/* ABuser program is trying to use the same insn
14239 				 * dst_reg = *(u32*) (src_reg + off)
14240 				 * with different pointer types:
14241 				 * src_reg == ctx in one branch and
14242 				 * src_reg == stack|map in some other branch.
14243 				 * Reject it.
14244 				 */
14245 				verbose(env, "same insn cannot be used with different pointers\n");
14246 				return -EINVAL;
14247 			}
14248 
14249 		} else if (class == BPF_STX) {
14250 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
14251 
14252 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
14253 				err = check_atomic(env, env->insn_idx, insn);
14254 				if (err)
14255 					return err;
14256 				env->insn_idx++;
14257 				continue;
14258 			}
14259 
14260 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
14261 				verbose(env, "BPF_STX uses reserved fields\n");
14262 				return -EINVAL;
14263 			}
14264 
14265 			/* check src1 operand */
14266 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
14267 			if (err)
14268 				return err;
14269 			/* check src2 operand */
14270 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14271 			if (err)
14272 				return err;
14273 
14274 			dst_reg_type = regs[insn->dst_reg].type;
14275 
14276 			/* check that memory (dst_reg + off) is writeable */
14277 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
14278 					       insn->off, BPF_SIZE(insn->code),
14279 					       BPF_WRITE, insn->src_reg, false);
14280 			if (err)
14281 				return err;
14282 
14283 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
14284 
14285 			if (*prev_dst_type == NOT_INIT) {
14286 				*prev_dst_type = dst_reg_type;
14287 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
14288 				verbose(env, "same insn cannot be used with different pointers\n");
14289 				return -EINVAL;
14290 			}
14291 
14292 		} else if (class == BPF_ST) {
14293 			if (BPF_MODE(insn->code) != BPF_MEM ||
14294 			    insn->src_reg != BPF_REG_0) {
14295 				verbose(env, "BPF_ST uses reserved fields\n");
14296 				return -EINVAL;
14297 			}
14298 			/* check src operand */
14299 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14300 			if (err)
14301 				return err;
14302 
14303 			if (is_ctx_reg(env, insn->dst_reg)) {
14304 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
14305 					insn->dst_reg,
14306 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
14307 				return -EACCES;
14308 			}
14309 
14310 			/* check that memory (dst_reg + off) is writeable */
14311 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
14312 					       insn->off, BPF_SIZE(insn->code),
14313 					       BPF_WRITE, -1, false);
14314 			if (err)
14315 				return err;
14316 
14317 		} else if (class == BPF_JMP || class == BPF_JMP32) {
14318 			u8 opcode = BPF_OP(insn->code);
14319 
14320 			env->jmps_processed++;
14321 			if (opcode == BPF_CALL) {
14322 				if (BPF_SRC(insn->code) != BPF_K ||
14323 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
14324 				     && insn->off != 0) ||
14325 				    (insn->src_reg != BPF_REG_0 &&
14326 				     insn->src_reg != BPF_PSEUDO_CALL &&
14327 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
14328 				    insn->dst_reg != BPF_REG_0 ||
14329 				    class == BPF_JMP32) {
14330 					verbose(env, "BPF_CALL uses reserved fields\n");
14331 					return -EINVAL;
14332 				}
14333 
14334 				if (env->cur_state->active_lock.ptr) {
14335 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
14336 					    (insn->src_reg == BPF_PSEUDO_CALL) ||
14337 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
14338 					     (insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
14339 						verbose(env, "function calls are not allowed while holding a lock\n");
14340 						return -EINVAL;
14341 					}
14342 				}
14343 				if (insn->src_reg == BPF_PSEUDO_CALL)
14344 					err = check_func_call(env, insn, &env->insn_idx);
14345 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
14346 					err = check_kfunc_call(env, insn, &env->insn_idx);
14347 				else
14348 					err = check_helper_call(env, insn, &env->insn_idx);
14349 				if (err)
14350 					return err;
14351 			} else if (opcode == BPF_JA) {
14352 				if (BPF_SRC(insn->code) != BPF_K ||
14353 				    insn->imm != 0 ||
14354 				    insn->src_reg != BPF_REG_0 ||
14355 				    insn->dst_reg != BPF_REG_0 ||
14356 				    class == BPF_JMP32) {
14357 					verbose(env, "BPF_JA uses reserved fields\n");
14358 					return -EINVAL;
14359 				}
14360 
14361 				env->insn_idx += insn->off + 1;
14362 				continue;
14363 
14364 			} else if (opcode == BPF_EXIT) {
14365 				if (BPF_SRC(insn->code) != BPF_K ||
14366 				    insn->imm != 0 ||
14367 				    insn->src_reg != BPF_REG_0 ||
14368 				    insn->dst_reg != BPF_REG_0 ||
14369 				    class == BPF_JMP32) {
14370 					verbose(env, "BPF_EXIT uses reserved fields\n");
14371 					return -EINVAL;
14372 				}
14373 
14374 				if (env->cur_state->active_lock.ptr) {
14375 					verbose(env, "bpf_spin_unlock is missing\n");
14376 					return -EINVAL;
14377 				}
14378 
14379 				if (env->cur_state->active_rcu_lock) {
14380 					verbose(env, "bpf_rcu_read_unlock is missing\n");
14381 					return -EINVAL;
14382 				}
14383 
14384 				/* We must do check_reference_leak here before
14385 				 * prepare_func_exit to handle the case when
14386 				 * state->curframe > 0, it may be a callback
14387 				 * function, for which reference_state must
14388 				 * match caller reference state when it exits.
14389 				 */
14390 				err = check_reference_leak(env);
14391 				if (err)
14392 					return err;
14393 
14394 				if (state->curframe) {
14395 					/* exit from nested function */
14396 					err = prepare_func_exit(env, &env->insn_idx);
14397 					if (err)
14398 						return err;
14399 					do_print_state = true;
14400 					continue;
14401 				}
14402 
14403 				err = check_return_code(env);
14404 				if (err)
14405 					return err;
14406 process_bpf_exit:
14407 				mark_verifier_state_scratched(env);
14408 				update_branch_counts(env, env->cur_state);
14409 				err = pop_stack(env, &prev_insn_idx,
14410 						&env->insn_idx, pop_log);
14411 				if (err < 0) {
14412 					if (err != -ENOENT)
14413 						return err;
14414 					break;
14415 				} else {
14416 					do_print_state = true;
14417 					continue;
14418 				}
14419 			} else {
14420 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
14421 				if (err)
14422 					return err;
14423 			}
14424 		} else if (class == BPF_LD) {
14425 			u8 mode = BPF_MODE(insn->code);
14426 
14427 			if (mode == BPF_ABS || mode == BPF_IND) {
14428 				err = check_ld_abs(env, insn);
14429 				if (err)
14430 					return err;
14431 
14432 			} else if (mode == BPF_IMM) {
14433 				err = check_ld_imm(env, insn);
14434 				if (err)
14435 					return err;
14436 
14437 				env->insn_idx++;
14438 				sanitize_mark_insn_seen(env);
14439 			} else {
14440 				verbose(env, "invalid BPF_LD mode\n");
14441 				return -EINVAL;
14442 			}
14443 		} else {
14444 			verbose(env, "unknown insn class %d\n", class);
14445 			return -EINVAL;
14446 		}
14447 
14448 		env->insn_idx++;
14449 	}
14450 
14451 	return 0;
14452 }
14453 
14454 static int find_btf_percpu_datasec(struct btf *btf)
14455 {
14456 	const struct btf_type *t;
14457 	const char *tname;
14458 	int i, n;
14459 
14460 	/*
14461 	 * Both vmlinux and module each have their own ".data..percpu"
14462 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
14463 	 * types to look at only module's own BTF types.
14464 	 */
14465 	n = btf_nr_types(btf);
14466 	if (btf_is_module(btf))
14467 		i = btf_nr_types(btf_vmlinux);
14468 	else
14469 		i = 1;
14470 
14471 	for(; i < n; i++) {
14472 		t = btf_type_by_id(btf, i);
14473 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
14474 			continue;
14475 
14476 		tname = btf_name_by_offset(btf, t->name_off);
14477 		if (!strcmp(tname, ".data..percpu"))
14478 			return i;
14479 	}
14480 
14481 	return -ENOENT;
14482 }
14483 
14484 /* replace pseudo btf_id with kernel symbol address */
14485 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
14486 			       struct bpf_insn *insn,
14487 			       struct bpf_insn_aux_data *aux)
14488 {
14489 	const struct btf_var_secinfo *vsi;
14490 	const struct btf_type *datasec;
14491 	struct btf_mod_pair *btf_mod;
14492 	const struct btf_type *t;
14493 	const char *sym_name;
14494 	bool percpu = false;
14495 	u32 type, id = insn->imm;
14496 	struct btf *btf;
14497 	s32 datasec_id;
14498 	u64 addr;
14499 	int i, btf_fd, err;
14500 
14501 	btf_fd = insn[1].imm;
14502 	if (btf_fd) {
14503 		btf = btf_get_by_fd(btf_fd);
14504 		if (IS_ERR(btf)) {
14505 			verbose(env, "invalid module BTF object FD specified.\n");
14506 			return -EINVAL;
14507 		}
14508 	} else {
14509 		if (!btf_vmlinux) {
14510 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
14511 			return -EINVAL;
14512 		}
14513 		btf = btf_vmlinux;
14514 		btf_get(btf);
14515 	}
14516 
14517 	t = btf_type_by_id(btf, id);
14518 	if (!t) {
14519 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
14520 		err = -ENOENT;
14521 		goto err_put;
14522 	}
14523 
14524 	if (!btf_type_is_var(t)) {
14525 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
14526 		err = -EINVAL;
14527 		goto err_put;
14528 	}
14529 
14530 	sym_name = btf_name_by_offset(btf, t->name_off);
14531 	addr = kallsyms_lookup_name(sym_name);
14532 	if (!addr) {
14533 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
14534 			sym_name);
14535 		err = -ENOENT;
14536 		goto err_put;
14537 	}
14538 
14539 	datasec_id = find_btf_percpu_datasec(btf);
14540 	if (datasec_id > 0) {
14541 		datasec = btf_type_by_id(btf, datasec_id);
14542 		for_each_vsi(i, datasec, vsi) {
14543 			if (vsi->type == id) {
14544 				percpu = true;
14545 				break;
14546 			}
14547 		}
14548 	}
14549 
14550 	insn[0].imm = (u32)addr;
14551 	insn[1].imm = addr >> 32;
14552 
14553 	type = t->type;
14554 	t = btf_type_skip_modifiers(btf, type, NULL);
14555 	if (percpu) {
14556 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
14557 		aux->btf_var.btf = btf;
14558 		aux->btf_var.btf_id = type;
14559 	} else if (!btf_type_is_struct(t)) {
14560 		const struct btf_type *ret;
14561 		const char *tname;
14562 		u32 tsize;
14563 
14564 		/* resolve the type size of ksym. */
14565 		ret = btf_resolve_size(btf, t, &tsize);
14566 		if (IS_ERR(ret)) {
14567 			tname = btf_name_by_offset(btf, t->name_off);
14568 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
14569 				tname, PTR_ERR(ret));
14570 			err = -EINVAL;
14571 			goto err_put;
14572 		}
14573 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
14574 		aux->btf_var.mem_size = tsize;
14575 	} else {
14576 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
14577 		aux->btf_var.btf = btf;
14578 		aux->btf_var.btf_id = type;
14579 	}
14580 
14581 	/* check whether we recorded this BTF (and maybe module) already */
14582 	for (i = 0; i < env->used_btf_cnt; i++) {
14583 		if (env->used_btfs[i].btf == btf) {
14584 			btf_put(btf);
14585 			return 0;
14586 		}
14587 	}
14588 
14589 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
14590 		err = -E2BIG;
14591 		goto err_put;
14592 	}
14593 
14594 	btf_mod = &env->used_btfs[env->used_btf_cnt];
14595 	btf_mod->btf = btf;
14596 	btf_mod->module = NULL;
14597 
14598 	/* if we reference variables from kernel module, bump its refcount */
14599 	if (btf_is_module(btf)) {
14600 		btf_mod->module = btf_try_get_module(btf);
14601 		if (!btf_mod->module) {
14602 			err = -ENXIO;
14603 			goto err_put;
14604 		}
14605 	}
14606 
14607 	env->used_btf_cnt++;
14608 
14609 	return 0;
14610 err_put:
14611 	btf_put(btf);
14612 	return err;
14613 }
14614 
14615 static bool is_tracing_prog_type(enum bpf_prog_type type)
14616 {
14617 	switch (type) {
14618 	case BPF_PROG_TYPE_KPROBE:
14619 	case BPF_PROG_TYPE_TRACEPOINT:
14620 	case BPF_PROG_TYPE_PERF_EVENT:
14621 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
14622 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
14623 		return true;
14624 	default:
14625 		return false;
14626 	}
14627 }
14628 
14629 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
14630 					struct bpf_map *map,
14631 					struct bpf_prog *prog)
14632 
14633 {
14634 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
14635 
14636 	if (btf_record_has_field(map->record, BPF_LIST_HEAD)) {
14637 		if (is_tracing_prog_type(prog_type)) {
14638 			verbose(env, "tracing progs cannot use bpf_list_head yet\n");
14639 			return -EINVAL;
14640 		}
14641 	}
14642 
14643 	if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
14644 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
14645 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
14646 			return -EINVAL;
14647 		}
14648 
14649 		if (is_tracing_prog_type(prog_type)) {
14650 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
14651 			return -EINVAL;
14652 		}
14653 
14654 		if (prog->aux->sleepable) {
14655 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
14656 			return -EINVAL;
14657 		}
14658 	}
14659 
14660 	if (btf_record_has_field(map->record, BPF_TIMER)) {
14661 		if (is_tracing_prog_type(prog_type)) {
14662 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
14663 			return -EINVAL;
14664 		}
14665 	}
14666 
14667 	if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
14668 	    !bpf_offload_prog_map_match(prog, map)) {
14669 		verbose(env, "offload device mismatch between prog and map\n");
14670 		return -EINVAL;
14671 	}
14672 
14673 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
14674 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
14675 		return -EINVAL;
14676 	}
14677 
14678 	if (prog->aux->sleepable)
14679 		switch (map->map_type) {
14680 		case BPF_MAP_TYPE_HASH:
14681 		case BPF_MAP_TYPE_LRU_HASH:
14682 		case BPF_MAP_TYPE_ARRAY:
14683 		case BPF_MAP_TYPE_PERCPU_HASH:
14684 		case BPF_MAP_TYPE_PERCPU_ARRAY:
14685 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
14686 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
14687 		case BPF_MAP_TYPE_HASH_OF_MAPS:
14688 		case BPF_MAP_TYPE_RINGBUF:
14689 		case BPF_MAP_TYPE_USER_RINGBUF:
14690 		case BPF_MAP_TYPE_INODE_STORAGE:
14691 		case BPF_MAP_TYPE_SK_STORAGE:
14692 		case BPF_MAP_TYPE_TASK_STORAGE:
14693 		case BPF_MAP_TYPE_CGRP_STORAGE:
14694 			break;
14695 		default:
14696 			verbose(env,
14697 				"Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
14698 			return -EINVAL;
14699 		}
14700 
14701 	return 0;
14702 }
14703 
14704 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
14705 {
14706 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
14707 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
14708 }
14709 
14710 /* find and rewrite pseudo imm in ld_imm64 instructions:
14711  *
14712  * 1. if it accesses map FD, replace it with actual map pointer.
14713  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
14714  *
14715  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
14716  */
14717 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
14718 {
14719 	struct bpf_insn *insn = env->prog->insnsi;
14720 	int insn_cnt = env->prog->len;
14721 	int i, j, err;
14722 
14723 	err = bpf_prog_calc_tag(env->prog);
14724 	if (err)
14725 		return err;
14726 
14727 	for (i = 0; i < insn_cnt; i++, insn++) {
14728 		if (BPF_CLASS(insn->code) == BPF_LDX &&
14729 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
14730 			verbose(env, "BPF_LDX uses reserved fields\n");
14731 			return -EINVAL;
14732 		}
14733 
14734 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
14735 			struct bpf_insn_aux_data *aux;
14736 			struct bpf_map *map;
14737 			struct fd f;
14738 			u64 addr;
14739 			u32 fd;
14740 
14741 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
14742 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
14743 			    insn[1].off != 0) {
14744 				verbose(env, "invalid bpf_ld_imm64 insn\n");
14745 				return -EINVAL;
14746 			}
14747 
14748 			if (insn[0].src_reg == 0)
14749 				/* valid generic load 64-bit imm */
14750 				goto next_insn;
14751 
14752 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
14753 				aux = &env->insn_aux_data[i];
14754 				err = check_pseudo_btf_id(env, insn, aux);
14755 				if (err)
14756 					return err;
14757 				goto next_insn;
14758 			}
14759 
14760 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
14761 				aux = &env->insn_aux_data[i];
14762 				aux->ptr_type = PTR_TO_FUNC;
14763 				goto next_insn;
14764 			}
14765 
14766 			/* In final convert_pseudo_ld_imm64() step, this is
14767 			 * converted into regular 64-bit imm load insn.
14768 			 */
14769 			switch (insn[0].src_reg) {
14770 			case BPF_PSEUDO_MAP_VALUE:
14771 			case BPF_PSEUDO_MAP_IDX_VALUE:
14772 				break;
14773 			case BPF_PSEUDO_MAP_FD:
14774 			case BPF_PSEUDO_MAP_IDX:
14775 				if (insn[1].imm == 0)
14776 					break;
14777 				fallthrough;
14778 			default:
14779 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
14780 				return -EINVAL;
14781 			}
14782 
14783 			switch (insn[0].src_reg) {
14784 			case BPF_PSEUDO_MAP_IDX_VALUE:
14785 			case BPF_PSEUDO_MAP_IDX:
14786 				if (bpfptr_is_null(env->fd_array)) {
14787 					verbose(env, "fd_idx without fd_array is invalid\n");
14788 					return -EPROTO;
14789 				}
14790 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
14791 							    insn[0].imm * sizeof(fd),
14792 							    sizeof(fd)))
14793 					return -EFAULT;
14794 				break;
14795 			default:
14796 				fd = insn[0].imm;
14797 				break;
14798 			}
14799 
14800 			f = fdget(fd);
14801 			map = __bpf_map_get(f);
14802 			if (IS_ERR(map)) {
14803 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
14804 					insn[0].imm);
14805 				return PTR_ERR(map);
14806 			}
14807 
14808 			err = check_map_prog_compatibility(env, map, env->prog);
14809 			if (err) {
14810 				fdput(f);
14811 				return err;
14812 			}
14813 
14814 			aux = &env->insn_aux_data[i];
14815 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
14816 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
14817 				addr = (unsigned long)map;
14818 			} else {
14819 				u32 off = insn[1].imm;
14820 
14821 				if (off >= BPF_MAX_VAR_OFF) {
14822 					verbose(env, "direct value offset of %u is not allowed\n", off);
14823 					fdput(f);
14824 					return -EINVAL;
14825 				}
14826 
14827 				if (!map->ops->map_direct_value_addr) {
14828 					verbose(env, "no direct value access support for this map type\n");
14829 					fdput(f);
14830 					return -EINVAL;
14831 				}
14832 
14833 				err = map->ops->map_direct_value_addr(map, &addr, off);
14834 				if (err) {
14835 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
14836 						map->value_size, off);
14837 					fdput(f);
14838 					return err;
14839 				}
14840 
14841 				aux->map_off = off;
14842 				addr += off;
14843 			}
14844 
14845 			insn[0].imm = (u32)addr;
14846 			insn[1].imm = addr >> 32;
14847 
14848 			/* check whether we recorded this map already */
14849 			for (j = 0; j < env->used_map_cnt; j++) {
14850 				if (env->used_maps[j] == map) {
14851 					aux->map_index = j;
14852 					fdput(f);
14853 					goto next_insn;
14854 				}
14855 			}
14856 
14857 			if (env->used_map_cnt >= MAX_USED_MAPS) {
14858 				fdput(f);
14859 				return -E2BIG;
14860 			}
14861 
14862 			/* hold the map. If the program is rejected by verifier,
14863 			 * the map will be released by release_maps() or it
14864 			 * will be used by the valid program until it's unloaded
14865 			 * and all maps are released in free_used_maps()
14866 			 */
14867 			bpf_map_inc(map);
14868 
14869 			aux->map_index = env->used_map_cnt;
14870 			env->used_maps[env->used_map_cnt++] = map;
14871 
14872 			if (bpf_map_is_cgroup_storage(map) &&
14873 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
14874 				verbose(env, "only one cgroup storage of each type is allowed\n");
14875 				fdput(f);
14876 				return -EBUSY;
14877 			}
14878 
14879 			fdput(f);
14880 next_insn:
14881 			insn++;
14882 			i++;
14883 			continue;
14884 		}
14885 
14886 		/* Basic sanity check before we invest more work here. */
14887 		if (!bpf_opcode_in_insntable(insn->code)) {
14888 			verbose(env, "unknown opcode %02x\n", insn->code);
14889 			return -EINVAL;
14890 		}
14891 	}
14892 
14893 	/* now all pseudo BPF_LD_IMM64 instructions load valid
14894 	 * 'struct bpf_map *' into a register instead of user map_fd.
14895 	 * These pointers will be used later by verifier to validate map access.
14896 	 */
14897 	return 0;
14898 }
14899 
14900 /* drop refcnt of maps used by the rejected program */
14901 static void release_maps(struct bpf_verifier_env *env)
14902 {
14903 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
14904 			     env->used_map_cnt);
14905 }
14906 
14907 /* drop refcnt of maps used by the rejected program */
14908 static void release_btfs(struct bpf_verifier_env *env)
14909 {
14910 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
14911 			     env->used_btf_cnt);
14912 }
14913 
14914 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
14915 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
14916 {
14917 	struct bpf_insn *insn = env->prog->insnsi;
14918 	int insn_cnt = env->prog->len;
14919 	int i;
14920 
14921 	for (i = 0; i < insn_cnt; i++, insn++) {
14922 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
14923 			continue;
14924 		if (insn->src_reg == BPF_PSEUDO_FUNC)
14925 			continue;
14926 		insn->src_reg = 0;
14927 	}
14928 }
14929 
14930 /* single env->prog->insni[off] instruction was replaced with the range
14931  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
14932  * [0, off) and [off, end) to new locations, so the patched range stays zero
14933  */
14934 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
14935 				 struct bpf_insn_aux_data *new_data,
14936 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
14937 {
14938 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
14939 	struct bpf_insn *insn = new_prog->insnsi;
14940 	u32 old_seen = old_data[off].seen;
14941 	u32 prog_len;
14942 	int i;
14943 
14944 	/* aux info at OFF always needs adjustment, no matter fast path
14945 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
14946 	 * original insn at old prog.
14947 	 */
14948 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
14949 
14950 	if (cnt == 1)
14951 		return;
14952 	prog_len = new_prog->len;
14953 
14954 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
14955 	memcpy(new_data + off + cnt - 1, old_data + off,
14956 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
14957 	for (i = off; i < off + cnt - 1; i++) {
14958 		/* Expand insni[off]'s seen count to the patched range. */
14959 		new_data[i].seen = old_seen;
14960 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
14961 	}
14962 	env->insn_aux_data = new_data;
14963 	vfree(old_data);
14964 }
14965 
14966 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
14967 {
14968 	int i;
14969 
14970 	if (len == 1)
14971 		return;
14972 	/* NOTE: fake 'exit' subprog should be updated as well. */
14973 	for (i = 0; i <= env->subprog_cnt; i++) {
14974 		if (env->subprog_info[i].start <= off)
14975 			continue;
14976 		env->subprog_info[i].start += len - 1;
14977 	}
14978 }
14979 
14980 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
14981 {
14982 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
14983 	int i, sz = prog->aux->size_poke_tab;
14984 	struct bpf_jit_poke_descriptor *desc;
14985 
14986 	for (i = 0; i < sz; i++) {
14987 		desc = &tab[i];
14988 		if (desc->insn_idx <= off)
14989 			continue;
14990 		desc->insn_idx += len - 1;
14991 	}
14992 }
14993 
14994 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
14995 					    const struct bpf_insn *patch, u32 len)
14996 {
14997 	struct bpf_prog *new_prog;
14998 	struct bpf_insn_aux_data *new_data = NULL;
14999 
15000 	if (len > 1) {
15001 		new_data = vzalloc(array_size(env->prog->len + len - 1,
15002 					      sizeof(struct bpf_insn_aux_data)));
15003 		if (!new_data)
15004 			return NULL;
15005 	}
15006 
15007 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
15008 	if (IS_ERR(new_prog)) {
15009 		if (PTR_ERR(new_prog) == -ERANGE)
15010 			verbose(env,
15011 				"insn %d cannot be patched due to 16-bit range\n",
15012 				env->insn_aux_data[off].orig_idx);
15013 		vfree(new_data);
15014 		return NULL;
15015 	}
15016 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
15017 	adjust_subprog_starts(env, off, len);
15018 	adjust_poke_descs(new_prog, off, len);
15019 	return new_prog;
15020 }
15021 
15022 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
15023 					      u32 off, u32 cnt)
15024 {
15025 	int i, j;
15026 
15027 	/* find first prog starting at or after off (first to remove) */
15028 	for (i = 0; i < env->subprog_cnt; i++)
15029 		if (env->subprog_info[i].start >= off)
15030 			break;
15031 	/* find first prog starting at or after off + cnt (first to stay) */
15032 	for (j = i; j < env->subprog_cnt; j++)
15033 		if (env->subprog_info[j].start >= off + cnt)
15034 			break;
15035 	/* if j doesn't start exactly at off + cnt, we are just removing
15036 	 * the front of previous prog
15037 	 */
15038 	if (env->subprog_info[j].start != off + cnt)
15039 		j--;
15040 
15041 	if (j > i) {
15042 		struct bpf_prog_aux *aux = env->prog->aux;
15043 		int move;
15044 
15045 		/* move fake 'exit' subprog as well */
15046 		move = env->subprog_cnt + 1 - j;
15047 
15048 		memmove(env->subprog_info + i,
15049 			env->subprog_info + j,
15050 			sizeof(*env->subprog_info) * move);
15051 		env->subprog_cnt -= j - i;
15052 
15053 		/* remove func_info */
15054 		if (aux->func_info) {
15055 			move = aux->func_info_cnt - j;
15056 
15057 			memmove(aux->func_info + i,
15058 				aux->func_info + j,
15059 				sizeof(*aux->func_info) * move);
15060 			aux->func_info_cnt -= j - i;
15061 			/* func_info->insn_off is set after all code rewrites,
15062 			 * in adjust_btf_func() - no need to adjust
15063 			 */
15064 		}
15065 	} else {
15066 		/* convert i from "first prog to remove" to "first to adjust" */
15067 		if (env->subprog_info[i].start == off)
15068 			i++;
15069 	}
15070 
15071 	/* update fake 'exit' subprog as well */
15072 	for (; i <= env->subprog_cnt; i++)
15073 		env->subprog_info[i].start -= cnt;
15074 
15075 	return 0;
15076 }
15077 
15078 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
15079 				      u32 cnt)
15080 {
15081 	struct bpf_prog *prog = env->prog;
15082 	u32 i, l_off, l_cnt, nr_linfo;
15083 	struct bpf_line_info *linfo;
15084 
15085 	nr_linfo = prog->aux->nr_linfo;
15086 	if (!nr_linfo)
15087 		return 0;
15088 
15089 	linfo = prog->aux->linfo;
15090 
15091 	/* find first line info to remove, count lines to be removed */
15092 	for (i = 0; i < nr_linfo; i++)
15093 		if (linfo[i].insn_off >= off)
15094 			break;
15095 
15096 	l_off = i;
15097 	l_cnt = 0;
15098 	for (; i < nr_linfo; i++)
15099 		if (linfo[i].insn_off < off + cnt)
15100 			l_cnt++;
15101 		else
15102 			break;
15103 
15104 	/* First live insn doesn't match first live linfo, it needs to "inherit"
15105 	 * last removed linfo.  prog is already modified, so prog->len == off
15106 	 * means no live instructions after (tail of the program was removed).
15107 	 */
15108 	if (prog->len != off && l_cnt &&
15109 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
15110 		l_cnt--;
15111 		linfo[--i].insn_off = off + cnt;
15112 	}
15113 
15114 	/* remove the line info which refer to the removed instructions */
15115 	if (l_cnt) {
15116 		memmove(linfo + l_off, linfo + i,
15117 			sizeof(*linfo) * (nr_linfo - i));
15118 
15119 		prog->aux->nr_linfo -= l_cnt;
15120 		nr_linfo = prog->aux->nr_linfo;
15121 	}
15122 
15123 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
15124 	for (i = l_off; i < nr_linfo; i++)
15125 		linfo[i].insn_off -= cnt;
15126 
15127 	/* fix up all subprogs (incl. 'exit') which start >= off */
15128 	for (i = 0; i <= env->subprog_cnt; i++)
15129 		if (env->subprog_info[i].linfo_idx > l_off) {
15130 			/* program may have started in the removed region but
15131 			 * may not be fully removed
15132 			 */
15133 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
15134 				env->subprog_info[i].linfo_idx -= l_cnt;
15135 			else
15136 				env->subprog_info[i].linfo_idx = l_off;
15137 		}
15138 
15139 	return 0;
15140 }
15141 
15142 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
15143 {
15144 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15145 	unsigned int orig_prog_len = env->prog->len;
15146 	int err;
15147 
15148 	if (bpf_prog_is_offloaded(env->prog->aux))
15149 		bpf_prog_offload_remove_insns(env, off, cnt);
15150 
15151 	err = bpf_remove_insns(env->prog, off, cnt);
15152 	if (err)
15153 		return err;
15154 
15155 	err = adjust_subprog_starts_after_remove(env, off, cnt);
15156 	if (err)
15157 		return err;
15158 
15159 	err = bpf_adj_linfo_after_remove(env, off, cnt);
15160 	if (err)
15161 		return err;
15162 
15163 	memmove(aux_data + off,	aux_data + off + cnt,
15164 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
15165 
15166 	return 0;
15167 }
15168 
15169 /* The verifier does more data flow analysis than llvm and will not
15170  * explore branches that are dead at run time. Malicious programs can
15171  * have dead code too. Therefore replace all dead at-run-time code
15172  * with 'ja -1'.
15173  *
15174  * Just nops are not optimal, e.g. if they would sit at the end of the
15175  * program and through another bug we would manage to jump there, then
15176  * we'd execute beyond program memory otherwise. Returning exception
15177  * code also wouldn't work since we can have subprogs where the dead
15178  * code could be located.
15179  */
15180 static void sanitize_dead_code(struct bpf_verifier_env *env)
15181 {
15182 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15183 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
15184 	struct bpf_insn *insn = env->prog->insnsi;
15185 	const int insn_cnt = env->prog->len;
15186 	int i;
15187 
15188 	for (i = 0; i < insn_cnt; i++) {
15189 		if (aux_data[i].seen)
15190 			continue;
15191 		memcpy(insn + i, &trap, sizeof(trap));
15192 		aux_data[i].zext_dst = false;
15193 	}
15194 }
15195 
15196 static bool insn_is_cond_jump(u8 code)
15197 {
15198 	u8 op;
15199 
15200 	if (BPF_CLASS(code) == BPF_JMP32)
15201 		return true;
15202 
15203 	if (BPF_CLASS(code) != BPF_JMP)
15204 		return false;
15205 
15206 	op = BPF_OP(code);
15207 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
15208 }
15209 
15210 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
15211 {
15212 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15213 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
15214 	struct bpf_insn *insn = env->prog->insnsi;
15215 	const int insn_cnt = env->prog->len;
15216 	int i;
15217 
15218 	for (i = 0; i < insn_cnt; i++, insn++) {
15219 		if (!insn_is_cond_jump(insn->code))
15220 			continue;
15221 
15222 		if (!aux_data[i + 1].seen)
15223 			ja.off = insn->off;
15224 		else if (!aux_data[i + 1 + insn->off].seen)
15225 			ja.off = 0;
15226 		else
15227 			continue;
15228 
15229 		if (bpf_prog_is_offloaded(env->prog->aux))
15230 			bpf_prog_offload_replace_insn(env, i, &ja);
15231 
15232 		memcpy(insn, &ja, sizeof(ja));
15233 	}
15234 }
15235 
15236 static int opt_remove_dead_code(struct bpf_verifier_env *env)
15237 {
15238 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
15239 	int insn_cnt = env->prog->len;
15240 	int i, err;
15241 
15242 	for (i = 0; i < insn_cnt; i++) {
15243 		int j;
15244 
15245 		j = 0;
15246 		while (i + j < insn_cnt && !aux_data[i + j].seen)
15247 			j++;
15248 		if (!j)
15249 			continue;
15250 
15251 		err = verifier_remove_insns(env, i, j);
15252 		if (err)
15253 			return err;
15254 		insn_cnt = env->prog->len;
15255 	}
15256 
15257 	return 0;
15258 }
15259 
15260 static int opt_remove_nops(struct bpf_verifier_env *env)
15261 {
15262 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
15263 	struct bpf_insn *insn = env->prog->insnsi;
15264 	int insn_cnt = env->prog->len;
15265 	int i, err;
15266 
15267 	for (i = 0; i < insn_cnt; i++) {
15268 		if (memcmp(&insn[i], &ja, sizeof(ja)))
15269 			continue;
15270 
15271 		err = verifier_remove_insns(env, i, 1);
15272 		if (err)
15273 			return err;
15274 		insn_cnt--;
15275 		i--;
15276 	}
15277 
15278 	return 0;
15279 }
15280 
15281 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
15282 					 const union bpf_attr *attr)
15283 {
15284 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
15285 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
15286 	int i, patch_len, delta = 0, len = env->prog->len;
15287 	struct bpf_insn *insns = env->prog->insnsi;
15288 	struct bpf_prog *new_prog;
15289 	bool rnd_hi32;
15290 
15291 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
15292 	zext_patch[1] = BPF_ZEXT_REG(0);
15293 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
15294 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
15295 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
15296 	for (i = 0; i < len; i++) {
15297 		int adj_idx = i + delta;
15298 		struct bpf_insn insn;
15299 		int load_reg;
15300 
15301 		insn = insns[adj_idx];
15302 		load_reg = insn_def_regno(&insn);
15303 		if (!aux[adj_idx].zext_dst) {
15304 			u8 code, class;
15305 			u32 imm_rnd;
15306 
15307 			if (!rnd_hi32)
15308 				continue;
15309 
15310 			code = insn.code;
15311 			class = BPF_CLASS(code);
15312 			if (load_reg == -1)
15313 				continue;
15314 
15315 			/* NOTE: arg "reg" (the fourth one) is only used for
15316 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
15317 			 *       here.
15318 			 */
15319 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
15320 				if (class == BPF_LD &&
15321 				    BPF_MODE(code) == BPF_IMM)
15322 					i++;
15323 				continue;
15324 			}
15325 
15326 			/* ctx load could be transformed into wider load. */
15327 			if (class == BPF_LDX &&
15328 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
15329 				continue;
15330 
15331 			imm_rnd = get_random_u32();
15332 			rnd_hi32_patch[0] = insn;
15333 			rnd_hi32_patch[1].imm = imm_rnd;
15334 			rnd_hi32_patch[3].dst_reg = load_reg;
15335 			patch = rnd_hi32_patch;
15336 			patch_len = 4;
15337 			goto apply_patch_buffer;
15338 		}
15339 
15340 		/* Add in an zero-extend instruction if a) the JIT has requested
15341 		 * it or b) it's a CMPXCHG.
15342 		 *
15343 		 * The latter is because: BPF_CMPXCHG always loads a value into
15344 		 * R0, therefore always zero-extends. However some archs'
15345 		 * equivalent instruction only does this load when the
15346 		 * comparison is successful. This detail of CMPXCHG is
15347 		 * orthogonal to the general zero-extension behaviour of the
15348 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
15349 		 */
15350 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
15351 			continue;
15352 
15353 		/* Zero-extension is done by the caller. */
15354 		if (bpf_pseudo_kfunc_call(&insn))
15355 			continue;
15356 
15357 		if (WARN_ON(load_reg == -1)) {
15358 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
15359 			return -EFAULT;
15360 		}
15361 
15362 		zext_patch[0] = insn;
15363 		zext_patch[1].dst_reg = load_reg;
15364 		zext_patch[1].src_reg = load_reg;
15365 		patch = zext_patch;
15366 		patch_len = 2;
15367 apply_patch_buffer:
15368 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
15369 		if (!new_prog)
15370 			return -ENOMEM;
15371 		env->prog = new_prog;
15372 		insns = new_prog->insnsi;
15373 		aux = env->insn_aux_data;
15374 		delta += patch_len - 1;
15375 	}
15376 
15377 	return 0;
15378 }
15379 
15380 /* convert load instructions that access fields of a context type into a
15381  * sequence of instructions that access fields of the underlying structure:
15382  *     struct __sk_buff    -> struct sk_buff
15383  *     struct bpf_sock_ops -> struct sock
15384  */
15385 static int convert_ctx_accesses(struct bpf_verifier_env *env)
15386 {
15387 	const struct bpf_verifier_ops *ops = env->ops;
15388 	int i, cnt, size, ctx_field_size, delta = 0;
15389 	const int insn_cnt = env->prog->len;
15390 	struct bpf_insn insn_buf[16], *insn;
15391 	u32 target_size, size_default, off;
15392 	struct bpf_prog *new_prog;
15393 	enum bpf_access_type type;
15394 	bool is_narrower_load;
15395 
15396 	if (ops->gen_prologue || env->seen_direct_write) {
15397 		if (!ops->gen_prologue) {
15398 			verbose(env, "bpf verifier is misconfigured\n");
15399 			return -EINVAL;
15400 		}
15401 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
15402 					env->prog);
15403 		if (cnt >= ARRAY_SIZE(insn_buf)) {
15404 			verbose(env, "bpf verifier is misconfigured\n");
15405 			return -EINVAL;
15406 		} else if (cnt) {
15407 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
15408 			if (!new_prog)
15409 				return -ENOMEM;
15410 
15411 			env->prog = new_prog;
15412 			delta += cnt - 1;
15413 		}
15414 	}
15415 
15416 	if (bpf_prog_is_offloaded(env->prog->aux))
15417 		return 0;
15418 
15419 	insn = env->prog->insnsi + delta;
15420 
15421 	for (i = 0; i < insn_cnt; i++, insn++) {
15422 		bpf_convert_ctx_access_t convert_ctx_access;
15423 		bool ctx_access;
15424 
15425 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
15426 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
15427 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
15428 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
15429 			type = BPF_READ;
15430 			ctx_access = true;
15431 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
15432 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
15433 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
15434 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
15435 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
15436 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
15437 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
15438 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
15439 			type = BPF_WRITE;
15440 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
15441 		} else {
15442 			continue;
15443 		}
15444 
15445 		if (type == BPF_WRITE &&
15446 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
15447 			struct bpf_insn patch[] = {
15448 				*insn,
15449 				BPF_ST_NOSPEC(),
15450 			};
15451 
15452 			cnt = ARRAY_SIZE(patch);
15453 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
15454 			if (!new_prog)
15455 				return -ENOMEM;
15456 
15457 			delta    += cnt - 1;
15458 			env->prog = new_prog;
15459 			insn      = new_prog->insnsi + i + delta;
15460 			continue;
15461 		}
15462 
15463 		if (!ctx_access)
15464 			continue;
15465 
15466 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
15467 		case PTR_TO_CTX:
15468 			if (!ops->convert_ctx_access)
15469 				continue;
15470 			convert_ctx_access = ops->convert_ctx_access;
15471 			break;
15472 		case PTR_TO_SOCKET:
15473 		case PTR_TO_SOCK_COMMON:
15474 			convert_ctx_access = bpf_sock_convert_ctx_access;
15475 			break;
15476 		case PTR_TO_TCP_SOCK:
15477 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
15478 			break;
15479 		case PTR_TO_XDP_SOCK:
15480 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
15481 			break;
15482 		case PTR_TO_BTF_ID:
15483 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
15484 		/* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
15485 		 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
15486 		 * be said once it is marked PTR_UNTRUSTED, hence we must handle
15487 		 * any faults for loads into such types. BPF_WRITE is disallowed
15488 		 * for this case.
15489 		 */
15490 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
15491 			if (type == BPF_READ) {
15492 				insn->code = BPF_LDX | BPF_PROBE_MEM |
15493 					BPF_SIZE((insn)->code);
15494 				env->prog->aux->num_exentries++;
15495 			}
15496 			continue;
15497 		default:
15498 			continue;
15499 		}
15500 
15501 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
15502 		size = BPF_LDST_BYTES(insn);
15503 
15504 		/* If the read access is a narrower load of the field,
15505 		 * convert to a 4/8-byte load, to minimum program type specific
15506 		 * convert_ctx_access changes. If conversion is successful,
15507 		 * we will apply proper mask to the result.
15508 		 */
15509 		is_narrower_load = size < ctx_field_size;
15510 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
15511 		off = insn->off;
15512 		if (is_narrower_load) {
15513 			u8 size_code;
15514 
15515 			if (type == BPF_WRITE) {
15516 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
15517 				return -EINVAL;
15518 			}
15519 
15520 			size_code = BPF_H;
15521 			if (ctx_field_size == 4)
15522 				size_code = BPF_W;
15523 			else if (ctx_field_size == 8)
15524 				size_code = BPF_DW;
15525 
15526 			insn->off = off & ~(size_default - 1);
15527 			insn->code = BPF_LDX | BPF_MEM | size_code;
15528 		}
15529 
15530 		target_size = 0;
15531 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
15532 					 &target_size);
15533 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
15534 		    (ctx_field_size && !target_size)) {
15535 			verbose(env, "bpf verifier is misconfigured\n");
15536 			return -EINVAL;
15537 		}
15538 
15539 		if (is_narrower_load && size < target_size) {
15540 			u8 shift = bpf_ctx_narrow_access_offset(
15541 				off, size, size_default) * 8;
15542 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
15543 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
15544 				return -EINVAL;
15545 			}
15546 			if (ctx_field_size <= 4) {
15547 				if (shift)
15548 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
15549 									insn->dst_reg,
15550 									shift);
15551 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
15552 								(1 << size * 8) - 1);
15553 			} else {
15554 				if (shift)
15555 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
15556 									insn->dst_reg,
15557 									shift);
15558 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
15559 								(1ULL << size * 8) - 1);
15560 			}
15561 		}
15562 
15563 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15564 		if (!new_prog)
15565 			return -ENOMEM;
15566 
15567 		delta += cnt - 1;
15568 
15569 		/* keep walking new program and skip insns we just inserted */
15570 		env->prog = new_prog;
15571 		insn      = new_prog->insnsi + i + delta;
15572 	}
15573 
15574 	return 0;
15575 }
15576 
15577 static int jit_subprogs(struct bpf_verifier_env *env)
15578 {
15579 	struct bpf_prog *prog = env->prog, **func, *tmp;
15580 	int i, j, subprog_start, subprog_end = 0, len, subprog;
15581 	struct bpf_map *map_ptr;
15582 	struct bpf_insn *insn;
15583 	void *old_bpf_func;
15584 	int err, num_exentries;
15585 
15586 	if (env->subprog_cnt <= 1)
15587 		return 0;
15588 
15589 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15590 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
15591 			continue;
15592 
15593 		/* Upon error here we cannot fall back to interpreter but
15594 		 * need a hard reject of the program. Thus -EFAULT is
15595 		 * propagated in any case.
15596 		 */
15597 		subprog = find_subprog(env, i + insn->imm + 1);
15598 		if (subprog < 0) {
15599 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
15600 				  i + insn->imm + 1);
15601 			return -EFAULT;
15602 		}
15603 		/* temporarily remember subprog id inside insn instead of
15604 		 * aux_data, since next loop will split up all insns into funcs
15605 		 */
15606 		insn->off = subprog;
15607 		/* remember original imm in case JIT fails and fallback
15608 		 * to interpreter will be needed
15609 		 */
15610 		env->insn_aux_data[i].call_imm = insn->imm;
15611 		/* point imm to __bpf_call_base+1 from JITs point of view */
15612 		insn->imm = 1;
15613 		if (bpf_pseudo_func(insn))
15614 			/* jit (e.g. x86_64) may emit fewer instructions
15615 			 * if it learns a u32 imm is the same as a u64 imm.
15616 			 * Force a non zero here.
15617 			 */
15618 			insn[1].imm = 1;
15619 	}
15620 
15621 	err = bpf_prog_alloc_jited_linfo(prog);
15622 	if (err)
15623 		goto out_undo_insn;
15624 
15625 	err = -ENOMEM;
15626 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
15627 	if (!func)
15628 		goto out_undo_insn;
15629 
15630 	for (i = 0; i < env->subprog_cnt; i++) {
15631 		subprog_start = subprog_end;
15632 		subprog_end = env->subprog_info[i + 1].start;
15633 
15634 		len = subprog_end - subprog_start;
15635 		/* bpf_prog_run() doesn't call subprogs directly,
15636 		 * hence main prog stats include the runtime of subprogs.
15637 		 * subprogs don't have IDs and not reachable via prog_get_next_id
15638 		 * func[i]->stats will never be accessed and stays NULL
15639 		 */
15640 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
15641 		if (!func[i])
15642 			goto out_free;
15643 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
15644 		       len * sizeof(struct bpf_insn));
15645 		func[i]->type = prog->type;
15646 		func[i]->len = len;
15647 		if (bpf_prog_calc_tag(func[i]))
15648 			goto out_free;
15649 		func[i]->is_func = 1;
15650 		func[i]->aux->func_idx = i;
15651 		/* Below members will be freed only at prog->aux */
15652 		func[i]->aux->btf = prog->aux->btf;
15653 		func[i]->aux->func_info = prog->aux->func_info;
15654 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
15655 		func[i]->aux->poke_tab = prog->aux->poke_tab;
15656 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
15657 
15658 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
15659 			struct bpf_jit_poke_descriptor *poke;
15660 
15661 			poke = &prog->aux->poke_tab[j];
15662 			if (poke->insn_idx < subprog_end &&
15663 			    poke->insn_idx >= subprog_start)
15664 				poke->aux = func[i]->aux;
15665 		}
15666 
15667 		func[i]->aux->name[0] = 'F';
15668 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
15669 		func[i]->jit_requested = 1;
15670 		func[i]->blinding_requested = prog->blinding_requested;
15671 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
15672 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
15673 		func[i]->aux->linfo = prog->aux->linfo;
15674 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
15675 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
15676 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
15677 		num_exentries = 0;
15678 		insn = func[i]->insnsi;
15679 		for (j = 0; j < func[i]->len; j++, insn++) {
15680 			if (BPF_CLASS(insn->code) == BPF_LDX &&
15681 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
15682 				num_exentries++;
15683 		}
15684 		func[i]->aux->num_exentries = num_exentries;
15685 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
15686 		func[i] = bpf_int_jit_compile(func[i]);
15687 		if (!func[i]->jited) {
15688 			err = -ENOTSUPP;
15689 			goto out_free;
15690 		}
15691 		cond_resched();
15692 	}
15693 
15694 	/* at this point all bpf functions were successfully JITed
15695 	 * now populate all bpf_calls with correct addresses and
15696 	 * run last pass of JIT
15697 	 */
15698 	for (i = 0; i < env->subprog_cnt; i++) {
15699 		insn = func[i]->insnsi;
15700 		for (j = 0; j < func[i]->len; j++, insn++) {
15701 			if (bpf_pseudo_func(insn)) {
15702 				subprog = insn->off;
15703 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
15704 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
15705 				continue;
15706 			}
15707 			if (!bpf_pseudo_call(insn))
15708 				continue;
15709 			subprog = insn->off;
15710 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
15711 		}
15712 
15713 		/* we use the aux data to keep a list of the start addresses
15714 		 * of the JITed images for each function in the program
15715 		 *
15716 		 * for some architectures, such as powerpc64, the imm field
15717 		 * might not be large enough to hold the offset of the start
15718 		 * address of the callee's JITed image from __bpf_call_base
15719 		 *
15720 		 * in such cases, we can lookup the start address of a callee
15721 		 * by using its subprog id, available from the off field of
15722 		 * the call instruction, as an index for this list
15723 		 */
15724 		func[i]->aux->func = func;
15725 		func[i]->aux->func_cnt = env->subprog_cnt;
15726 	}
15727 	for (i = 0; i < env->subprog_cnt; i++) {
15728 		old_bpf_func = func[i]->bpf_func;
15729 		tmp = bpf_int_jit_compile(func[i]);
15730 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
15731 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
15732 			err = -ENOTSUPP;
15733 			goto out_free;
15734 		}
15735 		cond_resched();
15736 	}
15737 
15738 	/* finally lock prog and jit images for all functions and
15739 	 * populate kallsysm
15740 	 */
15741 	for (i = 0; i < env->subprog_cnt; i++) {
15742 		bpf_prog_lock_ro(func[i]);
15743 		bpf_prog_kallsyms_add(func[i]);
15744 	}
15745 
15746 	/* Last step: make now unused interpreter insns from main
15747 	 * prog consistent for later dump requests, so they can
15748 	 * later look the same as if they were interpreted only.
15749 	 */
15750 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15751 		if (bpf_pseudo_func(insn)) {
15752 			insn[0].imm = env->insn_aux_data[i].call_imm;
15753 			insn[1].imm = insn->off;
15754 			insn->off = 0;
15755 			continue;
15756 		}
15757 		if (!bpf_pseudo_call(insn))
15758 			continue;
15759 		insn->off = env->insn_aux_data[i].call_imm;
15760 		subprog = find_subprog(env, i + insn->off + 1);
15761 		insn->imm = subprog;
15762 	}
15763 
15764 	prog->jited = 1;
15765 	prog->bpf_func = func[0]->bpf_func;
15766 	prog->jited_len = func[0]->jited_len;
15767 	prog->aux->func = func;
15768 	prog->aux->func_cnt = env->subprog_cnt;
15769 	bpf_prog_jit_attempt_done(prog);
15770 	return 0;
15771 out_free:
15772 	/* We failed JIT'ing, so at this point we need to unregister poke
15773 	 * descriptors from subprogs, so that kernel is not attempting to
15774 	 * patch it anymore as we're freeing the subprog JIT memory.
15775 	 */
15776 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
15777 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
15778 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
15779 	}
15780 	/* At this point we're guaranteed that poke descriptors are not
15781 	 * live anymore. We can just unlink its descriptor table as it's
15782 	 * released with the main prog.
15783 	 */
15784 	for (i = 0; i < env->subprog_cnt; i++) {
15785 		if (!func[i])
15786 			continue;
15787 		func[i]->aux->poke_tab = NULL;
15788 		bpf_jit_free(func[i]);
15789 	}
15790 	kfree(func);
15791 out_undo_insn:
15792 	/* cleanup main prog to be interpreted */
15793 	prog->jit_requested = 0;
15794 	prog->blinding_requested = 0;
15795 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
15796 		if (!bpf_pseudo_call(insn))
15797 			continue;
15798 		insn->off = 0;
15799 		insn->imm = env->insn_aux_data[i].call_imm;
15800 	}
15801 	bpf_prog_jit_attempt_done(prog);
15802 	return err;
15803 }
15804 
15805 static int fixup_call_args(struct bpf_verifier_env *env)
15806 {
15807 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
15808 	struct bpf_prog *prog = env->prog;
15809 	struct bpf_insn *insn = prog->insnsi;
15810 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
15811 	int i, depth;
15812 #endif
15813 	int err = 0;
15814 
15815 	if (env->prog->jit_requested &&
15816 	    !bpf_prog_is_offloaded(env->prog->aux)) {
15817 		err = jit_subprogs(env);
15818 		if (err == 0)
15819 			return 0;
15820 		if (err == -EFAULT)
15821 			return err;
15822 	}
15823 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
15824 	if (has_kfunc_call) {
15825 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
15826 		return -EINVAL;
15827 	}
15828 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
15829 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
15830 		 * have to be rejected, since interpreter doesn't support them yet.
15831 		 */
15832 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
15833 		return -EINVAL;
15834 	}
15835 	for (i = 0; i < prog->len; i++, insn++) {
15836 		if (bpf_pseudo_func(insn)) {
15837 			/* When JIT fails the progs with callback calls
15838 			 * have to be rejected, since interpreter doesn't support them yet.
15839 			 */
15840 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
15841 			return -EINVAL;
15842 		}
15843 
15844 		if (!bpf_pseudo_call(insn))
15845 			continue;
15846 		depth = get_callee_stack_depth(env, insn, i);
15847 		if (depth < 0)
15848 			return depth;
15849 		bpf_patch_call_args(insn, depth);
15850 	}
15851 	err = 0;
15852 #endif
15853 	return err;
15854 }
15855 
15856 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
15857 			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
15858 {
15859 	const struct bpf_kfunc_desc *desc;
15860 	void *xdp_kfunc;
15861 
15862 	if (!insn->imm) {
15863 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
15864 		return -EINVAL;
15865 	}
15866 
15867 	*cnt = 0;
15868 
15869 	if (bpf_dev_bound_kfunc_id(insn->imm)) {
15870 		xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm);
15871 		if (xdp_kfunc) {
15872 			insn->imm = BPF_CALL_IMM(xdp_kfunc);
15873 			return 0;
15874 		}
15875 
15876 		/* fallback to default kfunc when not supported by netdev */
15877 	}
15878 
15879 	/* insn->imm has the btf func_id. Replace it with
15880 	 * an address (relative to __bpf_call_base).
15881 	 */
15882 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
15883 	if (!desc) {
15884 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
15885 			insn->imm);
15886 		return -EFAULT;
15887 	}
15888 
15889 	insn->imm = desc->imm;
15890 	if (insn->off)
15891 		return 0;
15892 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
15893 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
15894 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
15895 		u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
15896 
15897 		insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
15898 		insn_buf[1] = addr[0];
15899 		insn_buf[2] = addr[1];
15900 		insn_buf[3] = *insn;
15901 		*cnt = 4;
15902 	} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
15903 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
15904 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
15905 
15906 		insn_buf[0] = addr[0];
15907 		insn_buf[1] = addr[1];
15908 		insn_buf[2] = *insn;
15909 		*cnt = 3;
15910 	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
15911 		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
15912 		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
15913 		*cnt = 1;
15914 	}
15915 	return 0;
15916 }
15917 
15918 /* Do various post-verification rewrites in a single program pass.
15919  * These rewrites simplify JIT and interpreter implementations.
15920  */
15921 static int do_misc_fixups(struct bpf_verifier_env *env)
15922 {
15923 	struct bpf_prog *prog = env->prog;
15924 	enum bpf_attach_type eatype = prog->expected_attach_type;
15925 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
15926 	struct bpf_insn *insn = prog->insnsi;
15927 	const struct bpf_func_proto *fn;
15928 	const int insn_cnt = prog->len;
15929 	const struct bpf_map_ops *ops;
15930 	struct bpf_insn_aux_data *aux;
15931 	struct bpf_insn insn_buf[16];
15932 	struct bpf_prog *new_prog;
15933 	struct bpf_map *map_ptr;
15934 	int i, ret, cnt, delta = 0;
15935 
15936 	for (i = 0; i < insn_cnt; i++, insn++) {
15937 		/* Make divide-by-zero exceptions impossible. */
15938 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
15939 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
15940 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
15941 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
15942 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
15943 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
15944 			struct bpf_insn *patchlet;
15945 			struct bpf_insn chk_and_div[] = {
15946 				/* [R,W]x div 0 -> 0 */
15947 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
15948 					     BPF_JNE | BPF_K, insn->src_reg,
15949 					     0, 2, 0),
15950 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
15951 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15952 				*insn,
15953 			};
15954 			struct bpf_insn chk_and_mod[] = {
15955 				/* [R,W]x mod 0 -> [R,W]x */
15956 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
15957 					     BPF_JEQ | BPF_K, insn->src_reg,
15958 					     0, 1 + (is64 ? 0 : 1), 0),
15959 				*insn,
15960 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15961 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
15962 			};
15963 
15964 			patchlet = isdiv ? chk_and_div : chk_and_mod;
15965 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
15966 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
15967 
15968 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
15969 			if (!new_prog)
15970 				return -ENOMEM;
15971 
15972 			delta    += cnt - 1;
15973 			env->prog = prog = new_prog;
15974 			insn      = new_prog->insnsi + i + delta;
15975 			continue;
15976 		}
15977 
15978 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
15979 		if (BPF_CLASS(insn->code) == BPF_LD &&
15980 		    (BPF_MODE(insn->code) == BPF_ABS ||
15981 		     BPF_MODE(insn->code) == BPF_IND)) {
15982 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
15983 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
15984 				verbose(env, "bpf verifier is misconfigured\n");
15985 				return -EINVAL;
15986 			}
15987 
15988 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
15989 			if (!new_prog)
15990 				return -ENOMEM;
15991 
15992 			delta    += cnt - 1;
15993 			env->prog = prog = new_prog;
15994 			insn      = new_prog->insnsi + i + delta;
15995 			continue;
15996 		}
15997 
15998 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
15999 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
16000 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
16001 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
16002 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
16003 			struct bpf_insn *patch = &insn_buf[0];
16004 			bool issrc, isneg, isimm;
16005 			u32 off_reg;
16006 
16007 			aux = &env->insn_aux_data[i + delta];
16008 			if (!aux->alu_state ||
16009 			    aux->alu_state == BPF_ALU_NON_POINTER)
16010 				continue;
16011 
16012 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
16013 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
16014 				BPF_ALU_SANITIZE_SRC;
16015 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
16016 
16017 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
16018 			if (isimm) {
16019 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
16020 			} else {
16021 				if (isneg)
16022 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
16023 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
16024 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
16025 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
16026 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
16027 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
16028 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
16029 			}
16030 			if (!issrc)
16031 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
16032 			insn->src_reg = BPF_REG_AX;
16033 			if (isneg)
16034 				insn->code = insn->code == code_add ?
16035 					     code_sub : code_add;
16036 			*patch++ = *insn;
16037 			if (issrc && isneg && !isimm)
16038 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
16039 			cnt = patch - insn_buf;
16040 
16041 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16042 			if (!new_prog)
16043 				return -ENOMEM;
16044 
16045 			delta    += cnt - 1;
16046 			env->prog = prog = new_prog;
16047 			insn      = new_prog->insnsi + i + delta;
16048 			continue;
16049 		}
16050 
16051 		if (insn->code != (BPF_JMP | BPF_CALL))
16052 			continue;
16053 		if (insn->src_reg == BPF_PSEUDO_CALL)
16054 			continue;
16055 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
16056 			ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
16057 			if (ret)
16058 				return ret;
16059 			if (cnt == 0)
16060 				continue;
16061 
16062 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16063 			if (!new_prog)
16064 				return -ENOMEM;
16065 
16066 			delta	 += cnt - 1;
16067 			env->prog = prog = new_prog;
16068 			insn	  = new_prog->insnsi + i + delta;
16069 			continue;
16070 		}
16071 
16072 		if (insn->imm == BPF_FUNC_get_route_realm)
16073 			prog->dst_needed = 1;
16074 		if (insn->imm == BPF_FUNC_get_prandom_u32)
16075 			bpf_user_rnd_init_once();
16076 		if (insn->imm == BPF_FUNC_override_return)
16077 			prog->kprobe_override = 1;
16078 		if (insn->imm == BPF_FUNC_tail_call) {
16079 			/* If we tail call into other programs, we
16080 			 * cannot make any assumptions since they can
16081 			 * be replaced dynamically during runtime in
16082 			 * the program array.
16083 			 */
16084 			prog->cb_access = 1;
16085 			if (!allow_tail_call_in_subprogs(env))
16086 				prog->aux->stack_depth = MAX_BPF_STACK;
16087 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
16088 
16089 			/* mark bpf_tail_call as different opcode to avoid
16090 			 * conditional branch in the interpreter for every normal
16091 			 * call and to prevent accidental JITing by JIT compiler
16092 			 * that doesn't support bpf_tail_call yet
16093 			 */
16094 			insn->imm = 0;
16095 			insn->code = BPF_JMP | BPF_TAIL_CALL;
16096 
16097 			aux = &env->insn_aux_data[i + delta];
16098 			if (env->bpf_capable && !prog->blinding_requested &&
16099 			    prog->jit_requested &&
16100 			    !bpf_map_key_poisoned(aux) &&
16101 			    !bpf_map_ptr_poisoned(aux) &&
16102 			    !bpf_map_ptr_unpriv(aux)) {
16103 				struct bpf_jit_poke_descriptor desc = {
16104 					.reason = BPF_POKE_REASON_TAIL_CALL,
16105 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
16106 					.tail_call.key = bpf_map_key_immediate(aux),
16107 					.insn_idx = i + delta,
16108 				};
16109 
16110 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
16111 				if (ret < 0) {
16112 					verbose(env, "adding tail call poke descriptor failed\n");
16113 					return ret;
16114 				}
16115 
16116 				insn->imm = ret + 1;
16117 				continue;
16118 			}
16119 
16120 			if (!bpf_map_ptr_unpriv(aux))
16121 				continue;
16122 
16123 			/* instead of changing every JIT dealing with tail_call
16124 			 * emit two extra insns:
16125 			 * if (index >= max_entries) goto out;
16126 			 * index &= array->index_mask;
16127 			 * to avoid out-of-bounds cpu speculation
16128 			 */
16129 			if (bpf_map_ptr_poisoned(aux)) {
16130 				verbose(env, "tail_call abusing map_ptr\n");
16131 				return -EINVAL;
16132 			}
16133 
16134 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
16135 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
16136 						  map_ptr->max_entries, 2);
16137 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
16138 						    container_of(map_ptr,
16139 								 struct bpf_array,
16140 								 map)->index_mask);
16141 			insn_buf[2] = *insn;
16142 			cnt = 3;
16143 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16144 			if (!new_prog)
16145 				return -ENOMEM;
16146 
16147 			delta    += cnt - 1;
16148 			env->prog = prog = new_prog;
16149 			insn      = new_prog->insnsi + i + delta;
16150 			continue;
16151 		}
16152 
16153 		if (insn->imm == BPF_FUNC_timer_set_callback) {
16154 			/* The verifier will process callback_fn as many times as necessary
16155 			 * with different maps and the register states prepared by
16156 			 * set_timer_callback_state will be accurate.
16157 			 *
16158 			 * The following use case is valid:
16159 			 *   map1 is shared by prog1, prog2, prog3.
16160 			 *   prog1 calls bpf_timer_init for some map1 elements
16161 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
16162 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
16163 			 *   prog3 calls bpf_timer_start for some map1 elements.
16164 			 *     Those that were not both bpf_timer_init-ed and
16165 			 *     bpf_timer_set_callback-ed will return -EINVAL.
16166 			 */
16167 			struct bpf_insn ld_addrs[2] = {
16168 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
16169 			};
16170 
16171 			insn_buf[0] = ld_addrs[0];
16172 			insn_buf[1] = ld_addrs[1];
16173 			insn_buf[2] = *insn;
16174 			cnt = 3;
16175 
16176 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16177 			if (!new_prog)
16178 				return -ENOMEM;
16179 
16180 			delta    += cnt - 1;
16181 			env->prog = prog = new_prog;
16182 			insn      = new_prog->insnsi + i + delta;
16183 			goto patch_call_imm;
16184 		}
16185 
16186 		if (is_storage_get_function(insn->imm)) {
16187 			if (!env->prog->aux->sleepable ||
16188 			    env->insn_aux_data[i + delta].storage_get_func_atomic)
16189 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
16190 			else
16191 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
16192 			insn_buf[1] = *insn;
16193 			cnt = 2;
16194 
16195 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16196 			if (!new_prog)
16197 				return -ENOMEM;
16198 
16199 			delta += cnt - 1;
16200 			env->prog = prog = new_prog;
16201 			insn = new_prog->insnsi + i + delta;
16202 			goto patch_call_imm;
16203 		}
16204 
16205 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
16206 		 * and other inlining handlers are currently limited to 64 bit
16207 		 * only.
16208 		 */
16209 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
16210 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
16211 		     insn->imm == BPF_FUNC_map_update_elem ||
16212 		     insn->imm == BPF_FUNC_map_delete_elem ||
16213 		     insn->imm == BPF_FUNC_map_push_elem   ||
16214 		     insn->imm == BPF_FUNC_map_pop_elem    ||
16215 		     insn->imm == BPF_FUNC_map_peek_elem   ||
16216 		     insn->imm == BPF_FUNC_redirect_map    ||
16217 		     insn->imm == BPF_FUNC_for_each_map_elem ||
16218 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
16219 			aux = &env->insn_aux_data[i + delta];
16220 			if (bpf_map_ptr_poisoned(aux))
16221 				goto patch_call_imm;
16222 
16223 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
16224 			ops = map_ptr->ops;
16225 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
16226 			    ops->map_gen_lookup) {
16227 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
16228 				if (cnt == -EOPNOTSUPP)
16229 					goto patch_map_ops_generic;
16230 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
16231 					verbose(env, "bpf verifier is misconfigured\n");
16232 					return -EINVAL;
16233 				}
16234 
16235 				new_prog = bpf_patch_insn_data(env, i + delta,
16236 							       insn_buf, cnt);
16237 				if (!new_prog)
16238 					return -ENOMEM;
16239 
16240 				delta    += cnt - 1;
16241 				env->prog = prog = new_prog;
16242 				insn      = new_prog->insnsi + i + delta;
16243 				continue;
16244 			}
16245 
16246 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
16247 				     (void *(*)(struct bpf_map *map, void *key))NULL));
16248 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
16249 				     (int (*)(struct bpf_map *map, void *key))NULL));
16250 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
16251 				     (int (*)(struct bpf_map *map, void *key, void *value,
16252 					      u64 flags))NULL));
16253 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
16254 				     (int (*)(struct bpf_map *map, void *value,
16255 					      u64 flags))NULL));
16256 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
16257 				     (int (*)(struct bpf_map *map, void *value))NULL));
16258 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
16259 				     (int (*)(struct bpf_map *map, void *value))NULL));
16260 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
16261 				     (int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
16262 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
16263 				     (int (*)(struct bpf_map *map,
16264 					      bpf_callback_t callback_fn,
16265 					      void *callback_ctx,
16266 					      u64 flags))NULL));
16267 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
16268 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
16269 
16270 patch_map_ops_generic:
16271 			switch (insn->imm) {
16272 			case BPF_FUNC_map_lookup_elem:
16273 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
16274 				continue;
16275 			case BPF_FUNC_map_update_elem:
16276 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
16277 				continue;
16278 			case BPF_FUNC_map_delete_elem:
16279 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
16280 				continue;
16281 			case BPF_FUNC_map_push_elem:
16282 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
16283 				continue;
16284 			case BPF_FUNC_map_pop_elem:
16285 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
16286 				continue;
16287 			case BPF_FUNC_map_peek_elem:
16288 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
16289 				continue;
16290 			case BPF_FUNC_redirect_map:
16291 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
16292 				continue;
16293 			case BPF_FUNC_for_each_map_elem:
16294 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
16295 				continue;
16296 			case BPF_FUNC_map_lookup_percpu_elem:
16297 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
16298 				continue;
16299 			}
16300 
16301 			goto patch_call_imm;
16302 		}
16303 
16304 		/* Implement bpf_jiffies64 inline. */
16305 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
16306 		    insn->imm == BPF_FUNC_jiffies64) {
16307 			struct bpf_insn ld_jiffies_addr[2] = {
16308 				BPF_LD_IMM64(BPF_REG_0,
16309 					     (unsigned long)&jiffies),
16310 			};
16311 
16312 			insn_buf[0] = ld_jiffies_addr[0];
16313 			insn_buf[1] = ld_jiffies_addr[1];
16314 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
16315 						  BPF_REG_0, 0);
16316 			cnt = 3;
16317 
16318 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
16319 						       cnt);
16320 			if (!new_prog)
16321 				return -ENOMEM;
16322 
16323 			delta    += cnt - 1;
16324 			env->prog = prog = new_prog;
16325 			insn      = new_prog->insnsi + i + delta;
16326 			continue;
16327 		}
16328 
16329 		/* Implement bpf_get_func_arg inline. */
16330 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16331 		    insn->imm == BPF_FUNC_get_func_arg) {
16332 			/* Load nr_args from ctx - 8 */
16333 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16334 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
16335 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
16336 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
16337 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
16338 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
16339 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
16340 			insn_buf[7] = BPF_JMP_A(1);
16341 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
16342 			cnt = 9;
16343 
16344 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16345 			if (!new_prog)
16346 				return -ENOMEM;
16347 
16348 			delta    += cnt - 1;
16349 			env->prog = prog = new_prog;
16350 			insn      = new_prog->insnsi + i + delta;
16351 			continue;
16352 		}
16353 
16354 		/* Implement bpf_get_func_ret inline. */
16355 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16356 		    insn->imm == BPF_FUNC_get_func_ret) {
16357 			if (eatype == BPF_TRACE_FEXIT ||
16358 			    eatype == BPF_MODIFY_RETURN) {
16359 				/* Load nr_args from ctx - 8 */
16360 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16361 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
16362 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
16363 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
16364 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
16365 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
16366 				cnt = 6;
16367 			} else {
16368 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
16369 				cnt = 1;
16370 			}
16371 
16372 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
16373 			if (!new_prog)
16374 				return -ENOMEM;
16375 
16376 			delta    += cnt - 1;
16377 			env->prog = prog = new_prog;
16378 			insn      = new_prog->insnsi + i + delta;
16379 			continue;
16380 		}
16381 
16382 		/* Implement get_func_arg_cnt inline. */
16383 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16384 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
16385 			/* Load nr_args from ctx - 8 */
16386 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
16387 
16388 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
16389 			if (!new_prog)
16390 				return -ENOMEM;
16391 
16392 			env->prog = prog = new_prog;
16393 			insn      = new_prog->insnsi + i + delta;
16394 			continue;
16395 		}
16396 
16397 		/* Implement bpf_get_func_ip inline. */
16398 		if (prog_type == BPF_PROG_TYPE_TRACING &&
16399 		    insn->imm == BPF_FUNC_get_func_ip) {
16400 			/* Load IP address from ctx - 16 */
16401 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
16402 
16403 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
16404 			if (!new_prog)
16405 				return -ENOMEM;
16406 
16407 			env->prog = prog = new_prog;
16408 			insn      = new_prog->insnsi + i + delta;
16409 			continue;
16410 		}
16411 
16412 patch_call_imm:
16413 		fn = env->ops->get_func_proto(insn->imm, env->prog);
16414 		/* all functions that have prototype and verifier allowed
16415 		 * programs to call them, must be real in-kernel functions
16416 		 */
16417 		if (!fn->func) {
16418 			verbose(env,
16419 				"kernel subsystem misconfigured func %s#%d\n",
16420 				func_id_name(insn->imm), insn->imm);
16421 			return -EFAULT;
16422 		}
16423 		insn->imm = fn->func - __bpf_call_base;
16424 	}
16425 
16426 	/* Since poke tab is now finalized, publish aux to tracker. */
16427 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
16428 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
16429 		if (!map_ptr->ops->map_poke_track ||
16430 		    !map_ptr->ops->map_poke_untrack ||
16431 		    !map_ptr->ops->map_poke_run) {
16432 			verbose(env, "bpf verifier is misconfigured\n");
16433 			return -EINVAL;
16434 		}
16435 
16436 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
16437 		if (ret < 0) {
16438 			verbose(env, "tracking tail call prog failed\n");
16439 			return ret;
16440 		}
16441 	}
16442 
16443 	sort_kfunc_descs_by_imm(env->prog);
16444 
16445 	return 0;
16446 }
16447 
16448 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
16449 					int position,
16450 					s32 stack_base,
16451 					u32 callback_subprogno,
16452 					u32 *cnt)
16453 {
16454 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
16455 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
16456 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
16457 	int reg_loop_max = BPF_REG_6;
16458 	int reg_loop_cnt = BPF_REG_7;
16459 	int reg_loop_ctx = BPF_REG_8;
16460 
16461 	struct bpf_prog *new_prog;
16462 	u32 callback_start;
16463 	u32 call_insn_offset;
16464 	s32 callback_offset;
16465 
16466 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
16467 	 * be careful to modify this code in sync.
16468 	 */
16469 	struct bpf_insn insn_buf[] = {
16470 		/* Return error and jump to the end of the patch if
16471 		 * expected number of iterations is too big.
16472 		 */
16473 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
16474 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
16475 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
16476 		/* spill R6, R7, R8 to use these as loop vars */
16477 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
16478 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
16479 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
16480 		/* initialize loop vars */
16481 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
16482 		BPF_MOV32_IMM(reg_loop_cnt, 0),
16483 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
16484 		/* loop header,
16485 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
16486 		 */
16487 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
16488 		/* callback call,
16489 		 * correct callback offset would be set after patching
16490 		 */
16491 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
16492 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
16493 		BPF_CALL_REL(0),
16494 		/* increment loop counter */
16495 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
16496 		/* jump to loop header if callback returned 0 */
16497 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
16498 		/* return value of bpf_loop,
16499 		 * set R0 to the number of iterations
16500 		 */
16501 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
16502 		/* restore original values of R6, R7, R8 */
16503 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
16504 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
16505 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
16506 	};
16507 
16508 	*cnt = ARRAY_SIZE(insn_buf);
16509 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
16510 	if (!new_prog)
16511 		return new_prog;
16512 
16513 	/* callback start is known only after patching */
16514 	callback_start = env->subprog_info[callback_subprogno].start;
16515 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
16516 	call_insn_offset = position + 12;
16517 	callback_offset = callback_start - call_insn_offset - 1;
16518 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
16519 
16520 	return new_prog;
16521 }
16522 
16523 static bool is_bpf_loop_call(struct bpf_insn *insn)
16524 {
16525 	return insn->code == (BPF_JMP | BPF_CALL) &&
16526 		insn->src_reg == 0 &&
16527 		insn->imm == BPF_FUNC_loop;
16528 }
16529 
16530 /* For all sub-programs in the program (including main) check
16531  * insn_aux_data to see if there are bpf_loop calls that require
16532  * inlining. If such calls are found the calls are replaced with a
16533  * sequence of instructions produced by `inline_bpf_loop` function and
16534  * subprog stack_depth is increased by the size of 3 registers.
16535  * This stack space is used to spill values of the R6, R7, R8.  These
16536  * registers are used to store the loop bound, counter and context
16537  * variables.
16538  */
16539 static int optimize_bpf_loop(struct bpf_verifier_env *env)
16540 {
16541 	struct bpf_subprog_info *subprogs = env->subprog_info;
16542 	int i, cur_subprog = 0, cnt, delta = 0;
16543 	struct bpf_insn *insn = env->prog->insnsi;
16544 	int insn_cnt = env->prog->len;
16545 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
16546 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
16547 	u16 stack_depth_extra = 0;
16548 
16549 	for (i = 0; i < insn_cnt; i++, insn++) {
16550 		struct bpf_loop_inline_state *inline_state =
16551 			&env->insn_aux_data[i + delta].loop_inline_state;
16552 
16553 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
16554 			struct bpf_prog *new_prog;
16555 
16556 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
16557 			new_prog = inline_bpf_loop(env,
16558 						   i + delta,
16559 						   -(stack_depth + stack_depth_extra),
16560 						   inline_state->callback_subprogno,
16561 						   &cnt);
16562 			if (!new_prog)
16563 				return -ENOMEM;
16564 
16565 			delta     += cnt - 1;
16566 			env->prog  = new_prog;
16567 			insn       = new_prog->insnsi + i + delta;
16568 		}
16569 
16570 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
16571 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
16572 			cur_subprog++;
16573 			stack_depth = subprogs[cur_subprog].stack_depth;
16574 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
16575 			stack_depth_extra = 0;
16576 		}
16577 	}
16578 
16579 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
16580 
16581 	return 0;
16582 }
16583 
16584 static void free_states(struct bpf_verifier_env *env)
16585 {
16586 	struct bpf_verifier_state_list *sl, *sln;
16587 	int i;
16588 
16589 	sl = env->free_list;
16590 	while (sl) {
16591 		sln = sl->next;
16592 		free_verifier_state(&sl->state, false);
16593 		kfree(sl);
16594 		sl = sln;
16595 	}
16596 	env->free_list = NULL;
16597 
16598 	if (!env->explored_states)
16599 		return;
16600 
16601 	for (i = 0; i < state_htab_size(env); i++) {
16602 		sl = env->explored_states[i];
16603 
16604 		while (sl) {
16605 			sln = sl->next;
16606 			free_verifier_state(&sl->state, false);
16607 			kfree(sl);
16608 			sl = sln;
16609 		}
16610 		env->explored_states[i] = NULL;
16611 	}
16612 }
16613 
16614 static int do_check_common(struct bpf_verifier_env *env, int subprog)
16615 {
16616 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
16617 	struct bpf_verifier_state *state;
16618 	struct bpf_reg_state *regs;
16619 	int ret, i;
16620 
16621 	env->prev_linfo = NULL;
16622 	env->pass_cnt++;
16623 
16624 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
16625 	if (!state)
16626 		return -ENOMEM;
16627 	state->curframe = 0;
16628 	state->speculative = false;
16629 	state->branches = 1;
16630 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
16631 	if (!state->frame[0]) {
16632 		kfree(state);
16633 		return -ENOMEM;
16634 	}
16635 	env->cur_state = state;
16636 	init_func_state(env, state->frame[0],
16637 			BPF_MAIN_FUNC /* callsite */,
16638 			0 /* frameno */,
16639 			subprog);
16640 	state->first_insn_idx = env->subprog_info[subprog].start;
16641 	state->last_insn_idx = -1;
16642 
16643 	regs = state->frame[state->curframe]->regs;
16644 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
16645 		ret = btf_prepare_func_args(env, subprog, regs);
16646 		if (ret)
16647 			goto out;
16648 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
16649 			if (regs[i].type == PTR_TO_CTX)
16650 				mark_reg_known_zero(env, regs, i);
16651 			else if (regs[i].type == SCALAR_VALUE)
16652 				mark_reg_unknown(env, regs, i);
16653 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
16654 				const u32 mem_size = regs[i].mem_size;
16655 
16656 				mark_reg_known_zero(env, regs, i);
16657 				regs[i].mem_size = mem_size;
16658 				regs[i].id = ++env->id_gen;
16659 			}
16660 		}
16661 	} else {
16662 		/* 1st arg to a function */
16663 		regs[BPF_REG_1].type = PTR_TO_CTX;
16664 		mark_reg_known_zero(env, regs, BPF_REG_1);
16665 		ret = btf_check_subprog_arg_match(env, subprog, regs);
16666 		if (ret == -EFAULT)
16667 			/* unlikely verifier bug. abort.
16668 			 * ret == 0 and ret < 0 are sadly acceptable for
16669 			 * main() function due to backward compatibility.
16670 			 * Like socket filter program may be written as:
16671 			 * int bpf_prog(struct pt_regs *ctx)
16672 			 * and never dereference that ctx in the program.
16673 			 * 'struct pt_regs' is a type mismatch for socket
16674 			 * filter that should be using 'struct __sk_buff'.
16675 			 */
16676 			goto out;
16677 	}
16678 
16679 	ret = do_check(env);
16680 out:
16681 	/* check for NULL is necessary, since cur_state can be freed inside
16682 	 * do_check() under memory pressure.
16683 	 */
16684 	if (env->cur_state) {
16685 		free_verifier_state(env->cur_state, true);
16686 		env->cur_state = NULL;
16687 	}
16688 	while (!pop_stack(env, NULL, NULL, false));
16689 	if (!ret && pop_log)
16690 		bpf_vlog_reset(&env->log, 0);
16691 	free_states(env);
16692 	return ret;
16693 }
16694 
16695 /* Verify all global functions in a BPF program one by one based on their BTF.
16696  * All global functions must pass verification. Otherwise the whole program is rejected.
16697  * Consider:
16698  * int bar(int);
16699  * int foo(int f)
16700  * {
16701  *    return bar(f);
16702  * }
16703  * int bar(int b)
16704  * {
16705  *    ...
16706  * }
16707  * foo() will be verified first for R1=any_scalar_value. During verification it
16708  * will be assumed that bar() already verified successfully and call to bar()
16709  * from foo() will be checked for type match only. Later bar() will be verified
16710  * independently to check that it's safe for R1=any_scalar_value.
16711  */
16712 static int do_check_subprogs(struct bpf_verifier_env *env)
16713 {
16714 	struct bpf_prog_aux *aux = env->prog->aux;
16715 	int i, ret;
16716 
16717 	if (!aux->func_info)
16718 		return 0;
16719 
16720 	for (i = 1; i < env->subprog_cnt; i++) {
16721 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
16722 			continue;
16723 		env->insn_idx = env->subprog_info[i].start;
16724 		WARN_ON_ONCE(env->insn_idx == 0);
16725 		ret = do_check_common(env, i);
16726 		if (ret) {
16727 			return ret;
16728 		} else if (env->log.level & BPF_LOG_LEVEL) {
16729 			verbose(env,
16730 				"Func#%d is safe for any args that match its prototype\n",
16731 				i);
16732 		}
16733 	}
16734 	return 0;
16735 }
16736 
16737 static int do_check_main(struct bpf_verifier_env *env)
16738 {
16739 	int ret;
16740 
16741 	env->insn_idx = 0;
16742 	ret = do_check_common(env, 0);
16743 	if (!ret)
16744 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
16745 	return ret;
16746 }
16747 
16748 
16749 static void print_verification_stats(struct bpf_verifier_env *env)
16750 {
16751 	int i;
16752 
16753 	if (env->log.level & BPF_LOG_STATS) {
16754 		verbose(env, "verification time %lld usec\n",
16755 			div_u64(env->verification_time, 1000));
16756 		verbose(env, "stack depth ");
16757 		for (i = 0; i < env->subprog_cnt; i++) {
16758 			u32 depth = env->subprog_info[i].stack_depth;
16759 
16760 			verbose(env, "%d", depth);
16761 			if (i + 1 < env->subprog_cnt)
16762 				verbose(env, "+");
16763 		}
16764 		verbose(env, "\n");
16765 	}
16766 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
16767 		"total_states %d peak_states %d mark_read %d\n",
16768 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
16769 		env->max_states_per_insn, env->total_states,
16770 		env->peak_states, env->longest_mark_read_walk);
16771 }
16772 
16773 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
16774 {
16775 	const struct btf_type *t, *func_proto;
16776 	const struct bpf_struct_ops *st_ops;
16777 	const struct btf_member *member;
16778 	struct bpf_prog *prog = env->prog;
16779 	u32 btf_id, member_idx;
16780 	const char *mname;
16781 
16782 	if (!prog->gpl_compatible) {
16783 		verbose(env, "struct ops programs must have a GPL compatible license\n");
16784 		return -EINVAL;
16785 	}
16786 
16787 	btf_id = prog->aux->attach_btf_id;
16788 	st_ops = bpf_struct_ops_find(btf_id);
16789 	if (!st_ops) {
16790 		verbose(env, "attach_btf_id %u is not a supported struct\n",
16791 			btf_id);
16792 		return -ENOTSUPP;
16793 	}
16794 
16795 	t = st_ops->type;
16796 	member_idx = prog->expected_attach_type;
16797 	if (member_idx >= btf_type_vlen(t)) {
16798 		verbose(env, "attach to invalid member idx %u of struct %s\n",
16799 			member_idx, st_ops->name);
16800 		return -EINVAL;
16801 	}
16802 
16803 	member = &btf_type_member(t)[member_idx];
16804 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
16805 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
16806 					       NULL);
16807 	if (!func_proto) {
16808 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
16809 			mname, member_idx, st_ops->name);
16810 		return -EINVAL;
16811 	}
16812 
16813 	if (st_ops->check_member) {
16814 		int err = st_ops->check_member(t, member, prog);
16815 
16816 		if (err) {
16817 			verbose(env, "attach to unsupported member %s of struct %s\n",
16818 				mname, st_ops->name);
16819 			return err;
16820 		}
16821 	}
16822 
16823 	prog->aux->attach_func_proto = func_proto;
16824 	prog->aux->attach_func_name = mname;
16825 	env->ops = st_ops->verifier_ops;
16826 
16827 	return 0;
16828 }
16829 #define SECURITY_PREFIX "security_"
16830 
16831 static int check_attach_modify_return(unsigned long addr, const char *func_name)
16832 {
16833 	if (within_error_injection_list(addr) ||
16834 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
16835 		return 0;
16836 
16837 	return -EINVAL;
16838 }
16839 
16840 /* list of non-sleepable functions that are otherwise on
16841  * ALLOW_ERROR_INJECTION list
16842  */
16843 BTF_SET_START(btf_non_sleepable_error_inject)
16844 /* Three functions below can be called from sleepable and non-sleepable context.
16845  * Assume non-sleepable from bpf safety point of view.
16846  */
16847 BTF_ID(func, __filemap_add_folio)
16848 BTF_ID(func, should_fail_alloc_page)
16849 BTF_ID(func, should_failslab)
16850 BTF_SET_END(btf_non_sleepable_error_inject)
16851 
16852 static int check_non_sleepable_error_inject(u32 btf_id)
16853 {
16854 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
16855 }
16856 
16857 int bpf_check_attach_target(struct bpf_verifier_log *log,
16858 			    const struct bpf_prog *prog,
16859 			    const struct bpf_prog *tgt_prog,
16860 			    u32 btf_id,
16861 			    struct bpf_attach_target_info *tgt_info)
16862 {
16863 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
16864 	const char prefix[] = "btf_trace_";
16865 	int ret = 0, subprog = -1, i;
16866 	const struct btf_type *t;
16867 	bool conservative = true;
16868 	const char *tname;
16869 	struct btf *btf;
16870 	long addr = 0;
16871 
16872 	if (!btf_id) {
16873 		bpf_log(log, "Tracing programs must provide btf_id\n");
16874 		return -EINVAL;
16875 	}
16876 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
16877 	if (!btf) {
16878 		bpf_log(log,
16879 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
16880 		return -EINVAL;
16881 	}
16882 	t = btf_type_by_id(btf, btf_id);
16883 	if (!t) {
16884 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
16885 		return -EINVAL;
16886 	}
16887 	tname = btf_name_by_offset(btf, t->name_off);
16888 	if (!tname) {
16889 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
16890 		return -EINVAL;
16891 	}
16892 	if (tgt_prog) {
16893 		struct bpf_prog_aux *aux = tgt_prog->aux;
16894 
16895 		if (bpf_prog_is_dev_bound(prog->aux) &&
16896 		    !bpf_prog_dev_bound_match(prog, tgt_prog)) {
16897 			bpf_log(log, "Target program bound device mismatch");
16898 			return -EINVAL;
16899 		}
16900 
16901 		for (i = 0; i < aux->func_info_cnt; i++)
16902 			if (aux->func_info[i].type_id == btf_id) {
16903 				subprog = i;
16904 				break;
16905 			}
16906 		if (subprog == -1) {
16907 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
16908 			return -EINVAL;
16909 		}
16910 		conservative = aux->func_info_aux[subprog].unreliable;
16911 		if (prog_extension) {
16912 			if (conservative) {
16913 				bpf_log(log,
16914 					"Cannot replace static functions\n");
16915 				return -EINVAL;
16916 			}
16917 			if (!prog->jit_requested) {
16918 				bpf_log(log,
16919 					"Extension programs should be JITed\n");
16920 				return -EINVAL;
16921 			}
16922 		}
16923 		if (!tgt_prog->jited) {
16924 			bpf_log(log, "Can attach to only JITed progs\n");
16925 			return -EINVAL;
16926 		}
16927 		if (tgt_prog->type == prog->type) {
16928 			/* Cannot fentry/fexit another fentry/fexit program.
16929 			 * Cannot attach program extension to another extension.
16930 			 * It's ok to attach fentry/fexit to extension program.
16931 			 */
16932 			bpf_log(log, "Cannot recursively attach\n");
16933 			return -EINVAL;
16934 		}
16935 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
16936 		    prog_extension &&
16937 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
16938 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
16939 			/* Program extensions can extend all program types
16940 			 * except fentry/fexit. The reason is the following.
16941 			 * The fentry/fexit programs are used for performance
16942 			 * analysis, stats and can be attached to any program
16943 			 * type except themselves. When extension program is
16944 			 * replacing XDP function it is necessary to allow
16945 			 * performance analysis of all functions. Both original
16946 			 * XDP program and its program extension. Hence
16947 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
16948 			 * allowed. If extending of fentry/fexit was allowed it
16949 			 * would be possible to create long call chain
16950 			 * fentry->extension->fentry->extension beyond
16951 			 * reasonable stack size. Hence extending fentry is not
16952 			 * allowed.
16953 			 */
16954 			bpf_log(log, "Cannot extend fentry/fexit\n");
16955 			return -EINVAL;
16956 		}
16957 	} else {
16958 		if (prog_extension) {
16959 			bpf_log(log, "Cannot replace kernel functions\n");
16960 			return -EINVAL;
16961 		}
16962 	}
16963 
16964 	switch (prog->expected_attach_type) {
16965 	case BPF_TRACE_RAW_TP:
16966 		if (tgt_prog) {
16967 			bpf_log(log,
16968 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
16969 			return -EINVAL;
16970 		}
16971 		if (!btf_type_is_typedef(t)) {
16972 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
16973 				btf_id);
16974 			return -EINVAL;
16975 		}
16976 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
16977 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
16978 				btf_id, tname);
16979 			return -EINVAL;
16980 		}
16981 		tname += sizeof(prefix) - 1;
16982 		t = btf_type_by_id(btf, t->type);
16983 		if (!btf_type_is_ptr(t))
16984 			/* should never happen in valid vmlinux build */
16985 			return -EINVAL;
16986 		t = btf_type_by_id(btf, t->type);
16987 		if (!btf_type_is_func_proto(t))
16988 			/* should never happen in valid vmlinux build */
16989 			return -EINVAL;
16990 
16991 		break;
16992 	case BPF_TRACE_ITER:
16993 		if (!btf_type_is_func(t)) {
16994 			bpf_log(log, "attach_btf_id %u is not a function\n",
16995 				btf_id);
16996 			return -EINVAL;
16997 		}
16998 		t = btf_type_by_id(btf, t->type);
16999 		if (!btf_type_is_func_proto(t))
17000 			return -EINVAL;
17001 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
17002 		if (ret)
17003 			return ret;
17004 		break;
17005 	default:
17006 		if (!prog_extension)
17007 			return -EINVAL;
17008 		fallthrough;
17009 	case BPF_MODIFY_RETURN:
17010 	case BPF_LSM_MAC:
17011 	case BPF_LSM_CGROUP:
17012 	case BPF_TRACE_FENTRY:
17013 	case BPF_TRACE_FEXIT:
17014 		if (!btf_type_is_func(t)) {
17015 			bpf_log(log, "attach_btf_id %u is not a function\n",
17016 				btf_id);
17017 			return -EINVAL;
17018 		}
17019 		if (prog_extension &&
17020 		    btf_check_type_match(log, prog, btf, t))
17021 			return -EINVAL;
17022 		t = btf_type_by_id(btf, t->type);
17023 		if (!btf_type_is_func_proto(t))
17024 			return -EINVAL;
17025 
17026 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
17027 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
17028 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
17029 			return -EINVAL;
17030 
17031 		if (tgt_prog && conservative)
17032 			t = NULL;
17033 
17034 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
17035 		if (ret < 0)
17036 			return ret;
17037 
17038 		if (tgt_prog) {
17039 			if (subprog == 0)
17040 				addr = (long) tgt_prog->bpf_func;
17041 			else
17042 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
17043 		} else {
17044 			addr = kallsyms_lookup_name(tname);
17045 			if (!addr) {
17046 				bpf_log(log,
17047 					"The address of function %s cannot be found\n",
17048 					tname);
17049 				return -ENOENT;
17050 			}
17051 		}
17052 
17053 		if (prog->aux->sleepable) {
17054 			ret = -EINVAL;
17055 			switch (prog->type) {
17056 			case BPF_PROG_TYPE_TRACING:
17057 
17058 				/* fentry/fexit/fmod_ret progs can be sleepable if they are
17059 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
17060 				 */
17061 				if (!check_non_sleepable_error_inject(btf_id) &&
17062 				    within_error_injection_list(addr))
17063 					ret = 0;
17064 				/* fentry/fexit/fmod_ret progs can also be sleepable if they are
17065 				 * in the fmodret id set with the KF_SLEEPABLE flag.
17066 				 */
17067 				else {
17068 					u32 *flags = btf_kfunc_is_modify_return(btf, btf_id);
17069 
17070 					if (flags && (*flags & KF_SLEEPABLE))
17071 						ret = 0;
17072 				}
17073 				break;
17074 			case BPF_PROG_TYPE_LSM:
17075 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
17076 				 * Only some of them are sleepable.
17077 				 */
17078 				if (bpf_lsm_is_sleepable_hook(btf_id))
17079 					ret = 0;
17080 				break;
17081 			default:
17082 				break;
17083 			}
17084 			if (ret) {
17085 				bpf_log(log, "%s is not sleepable\n", tname);
17086 				return ret;
17087 			}
17088 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
17089 			if (tgt_prog) {
17090 				bpf_log(log, "can't modify return codes of BPF programs\n");
17091 				return -EINVAL;
17092 			}
17093 			ret = -EINVAL;
17094 			if (btf_kfunc_is_modify_return(btf, btf_id) ||
17095 			    !check_attach_modify_return(addr, tname))
17096 				ret = 0;
17097 			if (ret) {
17098 				bpf_log(log, "%s() is not modifiable\n", tname);
17099 				return ret;
17100 			}
17101 		}
17102 
17103 		break;
17104 	}
17105 	tgt_info->tgt_addr = addr;
17106 	tgt_info->tgt_name = tname;
17107 	tgt_info->tgt_type = t;
17108 	return 0;
17109 }
17110 
17111 BTF_SET_START(btf_id_deny)
17112 BTF_ID_UNUSED
17113 #ifdef CONFIG_SMP
17114 BTF_ID(func, migrate_disable)
17115 BTF_ID(func, migrate_enable)
17116 #endif
17117 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
17118 BTF_ID(func, rcu_read_unlock_strict)
17119 #endif
17120 BTF_SET_END(btf_id_deny)
17121 
17122 static bool can_be_sleepable(struct bpf_prog *prog)
17123 {
17124 	if (prog->type == BPF_PROG_TYPE_TRACING) {
17125 		switch (prog->expected_attach_type) {
17126 		case BPF_TRACE_FENTRY:
17127 		case BPF_TRACE_FEXIT:
17128 		case BPF_MODIFY_RETURN:
17129 		case BPF_TRACE_ITER:
17130 			return true;
17131 		default:
17132 			return false;
17133 		}
17134 	}
17135 	return prog->type == BPF_PROG_TYPE_LSM ||
17136 	       prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
17137 	       prog->type == BPF_PROG_TYPE_STRUCT_OPS;
17138 }
17139 
17140 static int check_attach_btf_id(struct bpf_verifier_env *env)
17141 {
17142 	struct bpf_prog *prog = env->prog;
17143 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
17144 	struct bpf_attach_target_info tgt_info = {};
17145 	u32 btf_id = prog->aux->attach_btf_id;
17146 	struct bpf_trampoline *tr;
17147 	int ret;
17148 	u64 key;
17149 
17150 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
17151 		if (prog->aux->sleepable)
17152 			/* attach_btf_id checked to be zero already */
17153 			return 0;
17154 		verbose(env, "Syscall programs can only be sleepable\n");
17155 		return -EINVAL;
17156 	}
17157 
17158 	if (prog->aux->sleepable && !can_be_sleepable(prog)) {
17159 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
17160 		return -EINVAL;
17161 	}
17162 
17163 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
17164 		return check_struct_ops_btf_id(env);
17165 
17166 	if (prog->type != BPF_PROG_TYPE_TRACING &&
17167 	    prog->type != BPF_PROG_TYPE_LSM &&
17168 	    prog->type != BPF_PROG_TYPE_EXT)
17169 		return 0;
17170 
17171 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
17172 	if (ret)
17173 		return ret;
17174 
17175 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
17176 		/* to make freplace equivalent to their targets, they need to
17177 		 * inherit env->ops and expected_attach_type for the rest of the
17178 		 * verification
17179 		 */
17180 		env->ops = bpf_verifier_ops[tgt_prog->type];
17181 		prog->expected_attach_type = tgt_prog->expected_attach_type;
17182 	}
17183 
17184 	/* store info about the attachment target that will be used later */
17185 	prog->aux->attach_func_proto = tgt_info.tgt_type;
17186 	prog->aux->attach_func_name = tgt_info.tgt_name;
17187 
17188 	if (tgt_prog) {
17189 		prog->aux->saved_dst_prog_type = tgt_prog->type;
17190 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
17191 	}
17192 
17193 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
17194 		prog->aux->attach_btf_trace = true;
17195 		return 0;
17196 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
17197 		if (!bpf_iter_prog_supported(prog))
17198 			return -EINVAL;
17199 		return 0;
17200 	}
17201 
17202 	if (prog->type == BPF_PROG_TYPE_LSM) {
17203 		ret = bpf_lsm_verify_prog(&env->log, prog);
17204 		if (ret < 0)
17205 			return ret;
17206 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
17207 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
17208 		return -EINVAL;
17209 	}
17210 
17211 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
17212 	tr = bpf_trampoline_get(key, &tgt_info);
17213 	if (!tr)
17214 		return -ENOMEM;
17215 
17216 	prog->aux->dst_trampoline = tr;
17217 	return 0;
17218 }
17219 
17220 struct btf *bpf_get_btf_vmlinux(void)
17221 {
17222 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
17223 		mutex_lock(&bpf_verifier_lock);
17224 		if (!btf_vmlinux)
17225 			btf_vmlinux = btf_parse_vmlinux();
17226 		mutex_unlock(&bpf_verifier_lock);
17227 	}
17228 	return btf_vmlinux;
17229 }
17230 
17231 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
17232 {
17233 	u64 start_time = ktime_get_ns();
17234 	struct bpf_verifier_env *env;
17235 	struct bpf_verifier_log *log;
17236 	int i, len, ret = -EINVAL;
17237 	bool is_priv;
17238 
17239 	/* no program is valid */
17240 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
17241 		return -EINVAL;
17242 
17243 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
17244 	 * allocate/free it every time bpf_check() is called
17245 	 */
17246 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
17247 	if (!env)
17248 		return -ENOMEM;
17249 	log = &env->log;
17250 
17251 	len = (*prog)->len;
17252 	env->insn_aux_data =
17253 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
17254 	ret = -ENOMEM;
17255 	if (!env->insn_aux_data)
17256 		goto err_free_env;
17257 	for (i = 0; i < len; i++)
17258 		env->insn_aux_data[i].orig_idx = i;
17259 	env->prog = *prog;
17260 	env->ops = bpf_verifier_ops[env->prog->type];
17261 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
17262 	is_priv = bpf_capable();
17263 
17264 	bpf_get_btf_vmlinux();
17265 
17266 	/* grab the mutex to protect few globals used by verifier */
17267 	if (!is_priv)
17268 		mutex_lock(&bpf_verifier_lock);
17269 
17270 	if (attr->log_level || attr->log_buf || attr->log_size) {
17271 		/* user requested verbose verifier output
17272 		 * and supplied buffer to store the verification trace
17273 		 */
17274 		log->level = attr->log_level;
17275 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
17276 		log->len_total = attr->log_size;
17277 
17278 		/* log attributes have to be sane */
17279 		if (!bpf_verifier_log_attr_valid(log)) {
17280 			ret = -EINVAL;
17281 			goto err_unlock;
17282 		}
17283 	}
17284 
17285 	mark_verifier_state_clean(env);
17286 
17287 	if (IS_ERR(btf_vmlinux)) {
17288 		/* Either gcc or pahole or kernel are broken. */
17289 		verbose(env, "in-kernel BTF is malformed\n");
17290 		ret = PTR_ERR(btf_vmlinux);
17291 		goto skip_full_check;
17292 	}
17293 
17294 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
17295 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
17296 		env->strict_alignment = true;
17297 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
17298 		env->strict_alignment = false;
17299 
17300 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
17301 	env->allow_uninit_stack = bpf_allow_uninit_stack();
17302 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
17303 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
17304 	env->bpf_capable = bpf_capable();
17305 	env->rcu_tag_supported = btf_vmlinux &&
17306 		btf_find_by_name_kind(btf_vmlinux, "rcu", BTF_KIND_TYPE_TAG) > 0;
17307 
17308 	if (is_priv)
17309 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
17310 
17311 	env->explored_states = kvcalloc(state_htab_size(env),
17312 				       sizeof(struct bpf_verifier_state_list *),
17313 				       GFP_USER);
17314 	ret = -ENOMEM;
17315 	if (!env->explored_states)
17316 		goto skip_full_check;
17317 
17318 	ret = add_subprog_and_kfunc(env);
17319 	if (ret < 0)
17320 		goto skip_full_check;
17321 
17322 	ret = check_subprogs(env);
17323 	if (ret < 0)
17324 		goto skip_full_check;
17325 
17326 	ret = check_btf_info(env, attr, uattr);
17327 	if (ret < 0)
17328 		goto skip_full_check;
17329 
17330 	ret = check_attach_btf_id(env);
17331 	if (ret)
17332 		goto skip_full_check;
17333 
17334 	ret = resolve_pseudo_ldimm64(env);
17335 	if (ret < 0)
17336 		goto skip_full_check;
17337 
17338 	if (bpf_prog_is_offloaded(env->prog->aux)) {
17339 		ret = bpf_prog_offload_verifier_prep(env->prog);
17340 		if (ret)
17341 			goto skip_full_check;
17342 	}
17343 
17344 	ret = check_cfg(env);
17345 	if (ret < 0)
17346 		goto skip_full_check;
17347 
17348 	ret = do_check_subprogs(env);
17349 	ret = ret ?: do_check_main(env);
17350 
17351 	if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
17352 		ret = bpf_prog_offload_finalize(env);
17353 
17354 skip_full_check:
17355 	kvfree(env->explored_states);
17356 
17357 	if (ret == 0)
17358 		ret = check_max_stack_depth(env);
17359 
17360 	/* instruction rewrites happen after this point */
17361 	if (ret == 0)
17362 		ret = optimize_bpf_loop(env);
17363 
17364 	if (is_priv) {
17365 		if (ret == 0)
17366 			opt_hard_wire_dead_code_branches(env);
17367 		if (ret == 0)
17368 			ret = opt_remove_dead_code(env);
17369 		if (ret == 0)
17370 			ret = opt_remove_nops(env);
17371 	} else {
17372 		if (ret == 0)
17373 			sanitize_dead_code(env);
17374 	}
17375 
17376 	if (ret == 0)
17377 		/* program is valid, convert *(u32*)(ctx + off) accesses */
17378 		ret = convert_ctx_accesses(env);
17379 
17380 	if (ret == 0)
17381 		ret = do_misc_fixups(env);
17382 
17383 	/* do 32-bit optimization after insn patching has done so those patched
17384 	 * insns could be handled correctly.
17385 	 */
17386 	if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
17387 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
17388 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
17389 								     : false;
17390 	}
17391 
17392 	if (ret == 0)
17393 		ret = fixup_call_args(env);
17394 
17395 	env->verification_time = ktime_get_ns() - start_time;
17396 	print_verification_stats(env);
17397 	env->prog->aux->verified_insns = env->insn_processed;
17398 
17399 	if (log->level && bpf_verifier_log_full(log))
17400 		ret = -ENOSPC;
17401 	if (log->level && !log->ubuf) {
17402 		ret = -EFAULT;
17403 		goto err_release_maps;
17404 	}
17405 
17406 	if (ret)
17407 		goto err_release_maps;
17408 
17409 	if (env->used_map_cnt) {
17410 		/* if program passed verifier, update used_maps in bpf_prog_info */
17411 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
17412 							  sizeof(env->used_maps[0]),
17413 							  GFP_KERNEL);
17414 
17415 		if (!env->prog->aux->used_maps) {
17416 			ret = -ENOMEM;
17417 			goto err_release_maps;
17418 		}
17419 
17420 		memcpy(env->prog->aux->used_maps, env->used_maps,
17421 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
17422 		env->prog->aux->used_map_cnt = env->used_map_cnt;
17423 	}
17424 	if (env->used_btf_cnt) {
17425 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
17426 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
17427 							  sizeof(env->used_btfs[0]),
17428 							  GFP_KERNEL);
17429 		if (!env->prog->aux->used_btfs) {
17430 			ret = -ENOMEM;
17431 			goto err_release_maps;
17432 		}
17433 
17434 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
17435 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
17436 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
17437 	}
17438 	if (env->used_map_cnt || env->used_btf_cnt) {
17439 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
17440 		 * bpf_ld_imm64 instructions
17441 		 */
17442 		convert_pseudo_ld_imm64(env);
17443 	}
17444 
17445 	adjust_btf_func(env);
17446 
17447 err_release_maps:
17448 	if (!env->prog->aux->used_maps)
17449 		/* if we didn't copy map pointers into bpf_prog_info, release
17450 		 * them now. Otherwise free_used_maps() will release them.
17451 		 */
17452 		release_maps(env);
17453 	if (!env->prog->aux->used_btfs)
17454 		release_btfs(env);
17455 
17456 	/* extension progs temporarily inherit the attach_type of their targets
17457 	   for verification purposes, so set it back to zero before returning
17458 	 */
17459 	if (env->prog->type == BPF_PROG_TYPE_EXT)
17460 		env->prog->expected_attach_type = 0;
17461 
17462 	*prog = env->prog;
17463 err_unlock:
17464 	if (!is_priv)
17465 		mutex_unlock(&bpf_verifier_lock);
17466 	vfree(env->insn_aux_data);
17467 err_free_env:
17468 	kfree(env);
17469 	return ret;
17470 }
17471