xref: /openbmc/linux/kernel/bpf/verifier.c (revision 8a5aaf97)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  */
14 #include <uapi/linux/btf.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/bpf.h>
19 #include <linux/btf.h>
20 #include <linux/bpf_verifier.h>
21 #include <linux/filter.h>
22 #include <net/netlink.h>
23 #include <linux/file.h>
24 #include <linux/vmalloc.h>
25 #include <linux/stringify.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28 #include <linux/perf_event.h>
29 #include <linux/ctype.h>
30 
31 #include "disasm.h"
32 
33 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
34 #define BPF_PROG_TYPE(_id, _name) \
35 	[_id] = & _name ## _verifier_ops,
36 #define BPF_MAP_TYPE(_id, _ops)
37 #include <linux/bpf_types.h>
38 #undef BPF_PROG_TYPE
39 #undef BPF_MAP_TYPE
40 };
41 
42 /* bpf_check() is a static code analyzer that walks eBPF program
43  * instruction by instruction and updates register/stack state.
44  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
45  *
46  * The first pass is depth-first-search to check that the program is a DAG.
47  * It rejects the following programs:
48  * - larger than BPF_MAXINSNS insns
49  * - if loop is present (detected via back-edge)
50  * - unreachable insns exist (shouldn't be a forest. program = one function)
51  * - out of bounds or malformed jumps
52  * The second pass is all possible path descent from the 1st insn.
53  * Since it's analyzing all pathes through the program, the length of the
54  * analysis is limited to 64k insn, which may be hit even if total number of
55  * insn is less then 4K, but there are too many branches that change stack/regs.
56  * Number of 'branches to be analyzed' is limited to 1k
57  *
58  * On entry to each instruction, each register has a type, and the instruction
59  * changes the types of the registers depending on instruction semantics.
60  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
61  * copied to R1.
62  *
63  * All registers are 64-bit.
64  * R0 - return register
65  * R1-R5 argument passing registers
66  * R6-R9 callee saved registers
67  * R10 - frame pointer read-only
68  *
69  * At the start of BPF program the register R1 contains a pointer to bpf_context
70  * and has type PTR_TO_CTX.
71  *
72  * Verifier tracks arithmetic operations on pointers in case:
73  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
74  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
75  * 1st insn copies R10 (which has FRAME_PTR) type into R1
76  * and 2nd arithmetic instruction is pattern matched to recognize
77  * that it wants to construct a pointer to some element within stack.
78  * So after 2nd insn, the register R1 has type PTR_TO_STACK
79  * (and -20 constant is saved for further stack bounds checking).
80  * Meaning that this reg is a pointer to stack plus known immediate constant.
81  *
82  * Most of the time the registers have SCALAR_VALUE type, which
83  * means the register has some value, but it's not a valid pointer.
84  * (like pointer plus pointer becomes SCALAR_VALUE type)
85  *
86  * When verifier sees load or store instructions the type of base register
87  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
88  * four pointer types recognized by check_mem_access() function.
89  *
90  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
91  * and the range of [ptr, ptr + map's value_size) is accessible.
92  *
93  * registers used to pass values to function calls are checked against
94  * function argument constraints.
95  *
96  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
97  * It means that the register type passed to this function must be
98  * PTR_TO_STACK and it will be used inside the function as
99  * 'pointer to map element key'
100  *
101  * For example the argument constraints for bpf_map_lookup_elem():
102  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
103  *   .arg1_type = ARG_CONST_MAP_PTR,
104  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
105  *
106  * ret_type says that this function returns 'pointer to map elem value or null'
107  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
108  * 2nd argument should be a pointer to stack, which will be used inside
109  * the helper function as a pointer to map element key.
110  *
111  * On the kernel side the helper function looks like:
112  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
113  * {
114  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
115  *    void *key = (void *) (unsigned long) r2;
116  *    void *value;
117  *
118  *    here kernel can access 'key' and 'map' pointers safely, knowing that
119  *    [key, key + map->key_size) bytes are valid and were initialized on
120  *    the stack of eBPF program.
121  * }
122  *
123  * Corresponding eBPF program may look like:
124  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
125  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
126  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
127  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
128  * here verifier looks at prototype of map_lookup_elem() and sees:
129  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
130  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
131  *
132  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
133  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
134  * and were initialized prior to this call.
135  * If it's ok, then verifier allows this BPF_CALL insn and looks at
136  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
137  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
138  * returns ether pointer to map value or NULL.
139  *
140  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
141  * insn, the register holding that pointer in the true branch changes state to
142  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
143  * branch. See check_cond_jmp_op().
144  *
145  * After the call R0 is set to return type of the function and registers R1-R5
146  * are set to NOT_INIT to indicate that they are no longer readable.
147  *
148  * The following reference types represent a potential reference to a kernel
149  * resource which, after first being allocated, must be checked and freed by
150  * the BPF program:
151  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
152  *
153  * When the verifier sees a helper call return a reference type, it allocates a
154  * pointer id for the reference and stores it in the current function state.
155  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
156  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
157  * passes through a NULL-check conditional. For the branch wherein the state is
158  * changed to CONST_IMM, the verifier releases the reference.
159  *
160  * For each helper function that allocates a reference, such as
161  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
162  * bpf_sk_release(). When a reference type passes into the release function,
163  * the verifier also releases the reference. If any unchecked or unreleased
164  * reference remains at the end of the program, the verifier rejects it.
165  */
166 
167 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
168 struct bpf_verifier_stack_elem {
169 	/* verifer state is 'st'
170 	 * before processing instruction 'insn_idx'
171 	 * and after processing instruction 'prev_insn_idx'
172 	 */
173 	struct bpf_verifier_state st;
174 	int insn_idx;
175 	int prev_insn_idx;
176 	struct bpf_verifier_stack_elem *next;
177 };
178 
179 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
180 #define BPF_COMPLEXITY_LIMIT_STACK	1024
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_PTR_UNPRIV	1UL
184 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
185 					  POISON_POINTER_DELTA))
186 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
187 
188 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
189 {
190 	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
191 }
192 
193 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
194 {
195 	return aux->map_state & BPF_MAP_PTR_UNPRIV;
196 }
197 
198 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
199 			      const struct bpf_map *map, bool unpriv)
200 {
201 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
202 	unpriv |= bpf_map_ptr_unpriv(aux);
203 	aux->map_state = (unsigned long)map |
204 			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
205 }
206 
207 struct bpf_call_arg_meta {
208 	struct bpf_map *map_ptr;
209 	bool raw_mode;
210 	bool pkt_access;
211 	int regno;
212 	int access_size;
213 	s64 msize_smax_value;
214 	u64 msize_umax_value;
215 	int ptr_id;
216 };
217 
218 static DEFINE_MUTEX(bpf_verifier_lock);
219 
220 static const struct bpf_line_info *
221 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
222 {
223 	const struct bpf_line_info *linfo;
224 	const struct bpf_prog *prog;
225 	u32 i, nr_linfo;
226 
227 	prog = env->prog;
228 	nr_linfo = prog->aux->nr_linfo;
229 
230 	if (!nr_linfo || insn_off >= prog->len)
231 		return NULL;
232 
233 	linfo = prog->aux->linfo;
234 	for (i = 1; i < nr_linfo; i++)
235 		if (insn_off < linfo[i].insn_off)
236 			break;
237 
238 	return &linfo[i - 1];
239 }
240 
241 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
242 		       va_list args)
243 {
244 	unsigned int n;
245 
246 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
247 
248 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
249 		  "verifier log line truncated - local buffer too short\n");
250 
251 	n = min(log->len_total - log->len_used - 1, n);
252 	log->kbuf[n] = '\0';
253 
254 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
255 		log->len_used += n;
256 	else
257 		log->ubuf = NULL;
258 }
259 
260 /* log_level controls verbosity level of eBPF verifier.
261  * bpf_verifier_log_write() is used to dump the verification trace to the log,
262  * so the user can figure out what's wrong with the program
263  */
264 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
265 					   const char *fmt, ...)
266 {
267 	va_list args;
268 
269 	if (!bpf_verifier_log_needed(&env->log))
270 		return;
271 
272 	va_start(args, fmt);
273 	bpf_verifier_vlog(&env->log, fmt, args);
274 	va_end(args);
275 }
276 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
277 
278 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
279 {
280 	struct bpf_verifier_env *env = private_data;
281 	va_list args;
282 
283 	if (!bpf_verifier_log_needed(&env->log))
284 		return;
285 
286 	va_start(args, fmt);
287 	bpf_verifier_vlog(&env->log, fmt, args);
288 	va_end(args);
289 }
290 
291 static const char *ltrim(const char *s)
292 {
293 	while (isspace(*s))
294 		s++;
295 
296 	return s;
297 }
298 
299 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
300 					 u32 insn_off,
301 					 const char *prefix_fmt, ...)
302 {
303 	const struct bpf_line_info *linfo;
304 
305 	if (!bpf_verifier_log_needed(&env->log))
306 		return;
307 
308 	linfo = find_linfo(env, insn_off);
309 	if (!linfo || linfo == env->prev_linfo)
310 		return;
311 
312 	if (prefix_fmt) {
313 		va_list args;
314 
315 		va_start(args, prefix_fmt);
316 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
317 		va_end(args);
318 	}
319 
320 	verbose(env, "%s\n",
321 		ltrim(btf_name_by_offset(env->prog->aux->btf,
322 					 linfo->line_off)));
323 
324 	env->prev_linfo = linfo;
325 }
326 
327 static bool type_is_pkt_pointer(enum bpf_reg_type type)
328 {
329 	return type == PTR_TO_PACKET ||
330 	       type == PTR_TO_PACKET_META;
331 }
332 
333 static bool reg_type_may_be_null(enum bpf_reg_type type)
334 {
335 	return type == PTR_TO_MAP_VALUE_OR_NULL ||
336 	       type == PTR_TO_SOCKET_OR_NULL;
337 }
338 
339 static bool type_is_refcounted(enum bpf_reg_type type)
340 {
341 	return type == PTR_TO_SOCKET;
342 }
343 
344 static bool type_is_refcounted_or_null(enum bpf_reg_type type)
345 {
346 	return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
347 }
348 
349 static bool reg_is_refcounted(const struct bpf_reg_state *reg)
350 {
351 	return type_is_refcounted(reg->type);
352 }
353 
354 static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
355 {
356 	return type_is_refcounted_or_null(reg->type);
357 }
358 
359 static bool arg_type_is_refcounted(enum bpf_arg_type type)
360 {
361 	return type == ARG_PTR_TO_SOCKET;
362 }
363 
364 /* Determine whether the function releases some resources allocated by another
365  * function call. The first reference type argument will be assumed to be
366  * released by release_reference().
367  */
368 static bool is_release_function(enum bpf_func_id func_id)
369 {
370 	return func_id == BPF_FUNC_sk_release;
371 }
372 
373 /* string representation of 'enum bpf_reg_type' */
374 static const char * const reg_type_str[] = {
375 	[NOT_INIT]		= "?",
376 	[SCALAR_VALUE]		= "inv",
377 	[PTR_TO_CTX]		= "ctx",
378 	[CONST_PTR_TO_MAP]	= "map_ptr",
379 	[PTR_TO_MAP_VALUE]	= "map_value",
380 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
381 	[PTR_TO_STACK]		= "fp",
382 	[PTR_TO_PACKET]		= "pkt",
383 	[PTR_TO_PACKET_META]	= "pkt_meta",
384 	[PTR_TO_PACKET_END]	= "pkt_end",
385 	[PTR_TO_FLOW_KEYS]	= "flow_keys",
386 	[PTR_TO_SOCKET]		= "sock",
387 	[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
388 };
389 
390 static char slot_type_char[] = {
391 	[STACK_INVALID]	= '?',
392 	[STACK_SPILL]	= 'r',
393 	[STACK_MISC]	= 'm',
394 	[STACK_ZERO]	= '0',
395 };
396 
397 static void print_liveness(struct bpf_verifier_env *env,
398 			   enum bpf_reg_liveness live)
399 {
400 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
401 	    verbose(env, "_");
402 	if (live & REG_LIVE_READ)
403 		verbose(env, "r");
404 	if (live & REG_LIVE_WRITTEN)
405 		verbose(env, "w");
406 	if (live & REG_LIVE_DONE)
407 		verbose(env, "D");
408 }
409 
410 static struct bpf_func_state *func(struct bpf_verifier_env *env,
411 				   const struct bpf_reg_state *reg)
412 {
413 	struct bpf_verifier_state *cur = env->cur_state;
414 
415 	return cur->frame[reg->frameno];
416 }
417 
418 static void print_verifier_state(struct bpf_verifier_env *env,
419 				 const struct bpf_func_state *state)
420 {
421 	const struct bpf_reg_state *reg;
422 	enum bpf_reg_type t;
423 	int i;
424 
425 	if (state->frameno)
426 		verbose(env, " frame%d:", state->frameno);
427 	for (i = 0; i < MAX_BPF_REG; i++) {
428 		reg = &state->regs[i];
429 		t = reg->type;
430 		if (t == NOT_INIT)
431 			continue;
432 		verbose(env, " R%d", i);
433 		print_liveness(env, reg->live);
434 		verbose(env, "=%s", reg_type_str[t]);
435 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
436 		    tnum_is_const(reg->var_off)) {
437 			/* reg->off should be 0 for SCALAR_VALUE */
438 			verbose(env, "%lld", reg->var_off.value + reg->off);
439 			if (t == PTR_TO_STACK)
440 				verbose(env, ",call_%d", func(env, reg)->callsite);
441 		} else {
442 			verbose(env, "(id=%d", reg->id);
443 			if (t != SCALAR_VALUE)
444 				verbose(env, ",off=%d", reg->off);
445 			if (type_is_pkt_pointer(t))
446 				verbose(env, ",r=%d", reg->range);
447 			else if (t == CONST_PTR_TO_MAP ||
448 				 t == PTR_TO_MAP_VALUE ||
449 				 t == PTR_TO_MAP_VALUE_OR_NULL)
450 				verbose(env, ",ks=%d,vs=%d",
451 					reg->map_ptr->key_size,
452 					reg->map_ptr->value_size);
453 			if (tnum_is_const(reg->var_off)) {
454 				/* Typically an immediate SCALAR_VALUE, but
455 				 * could be a pointer whose offset is too big
456 				 * for reg->off
457 				 */
458 				verbose(env, ",imm=%llx", reg->var_off.value);
459 			} else {
460 				if (reg->smin_value != reg->umin_value &&
461 				    reg->smin_value != S64_MIN)
462 					verbose(env, ",smin_value=%lld",
463 						(long long)reg->smin_value);
464 				if (reg->smax_value != reg->umax_value &&
465 				    reg->smax_value != S64_MAX)
466 					verbose(env, ",smax_value=%lld",
467 						(long long)reg->smax_value);
468 				if (reg->umin_value != 0)
469 					verbose(env, ",umin_value=%llu",
470 						(unsigned long long)reg->umin_value);
471 				if (reg->umax_value != U64_MAX)
472 					verbose(env, ",umax_value=%llu",
473 						(unsigned long long)reg->umax_value);
474 				if (!tnum_is_unknown(reg->var_off)) {
475 					char tn_buf[48];
476 
477 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
478 					verbose(env, ",var_off=%s", tn_buf);
479 				}
480 			}
481 			verbose(env, ")");
482 		}
483 	}
484 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
485 		char types_buf[BPF_REG_SIZE + 1];
486 		bool valid = false;
487 		int j;
488 
489 		for (j = 0; j < BPF_REG_SIZE; j++) {
490 			if (state->stack[i].slot_type[j] != STACK_INVALID)
491 				valid = true;
492 			types_buf[j] = slot_type_char[
493 					state->stack[i].slot_type[j]];
494 		}
495 		types_buf[BPF_REG_SIZE] = 0;
496 		if (!valid)
497 			continue;
498 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
499 		print_liveness(env, state->stack[i].spilled_ptr.live);
500 		if (state->stack[i].slot_type[0] == STACK_SPILL)
501 			verbose(env, "=%s",
502 				reg_type_str[state->stack[i].spilled_ptr.type]);
503 		else
504 			verbose(env, "=%s", types_buf);
505 	}
506 	if (state->acquired_refs && state->refs[0].id) {
507 		verbose(env, " refs=%d", state->refs[0].id);
508 		for (i = 1; i < state->acquired_refs; i++)
509 			if (state->refs[i].id)
510 				verbose(env, ",%d", state->refs[i].id);
511 	}
512 	verbose(env, "\n");
513 }
514 
515 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE)				\
516 static int copy_##NAME##_state(struct bpf_func_state *dst,		\
517 			       const struct bpf_func_state *src)	\
518 {									\
519 	if (!src->FIELD)						\
520 		return 0;						\
521 	if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) {			\
522 		/* internal bug, make state invalid to reject the program */ \
523 		memset(dst, 0, sizeof(*dst));				\
524 		return -EFAULT;						\
525 	}								\
526 	memcpy(dst->FIELD, src->FIELD,					\
527 	       sizeof(*src->FIELD) * (src->COUNT / SIZE));		\
528 	return 0;							\
529 }
530 /* copy_reference_state() */
531 COPY_STATE_FN(reference, acquired_refs, refs, 1)
532 /* copy_stack_state() */
533 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
534 #undef COPY_STATE_FN
535 
536 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE)			\
537 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
538 				  bool copy_old)			\
539 {									\
540 	u32 old_size = state->COUNT;					\
541 	struct bpf_##NAME##_state *new_##FIELD;				\
542 	int slot = size / SIZE;						\
543 									\
544 	if (size <= old_size || !size) {				\
545 		if (copy_old)						\
546 			return 0;					\
547 		state->COUNT = slot * SIZE;				\
548 		if (!size && old_size) {				\
549 			kfree(state->FIELD);				\
550 			state->FIELD = NULL;				\
551 		}							\
552 		return 0;						\
553 	}								\
554 	new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
555 				    GFP_KERNEL);			\
556 	if (!new_##FIELD)						\
557 		return -ENOMEM;						\
558 	if (copy_old) {							\
559 		if (state->FIELD)					\
560 			memcpy(new_##FIELD, state->FIELD,		\
561 			       sizeof(*new_##FIELD) * (old_size / SIZE)); \
562 		memset(new_##FIELD + old_size / SIZE, 0,		\
563 		       sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
564 	}								\
565 	state->COUNT = slot * SIZE;					\
566 	kfree(state->FIELD);						\
567 	state->FIELD = new_##FIELD;					\
568 	return 0;							\
569 }
570 /* realloc_reference_state() */
571 REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
572 /* realloc_stack_state() */
573 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
574 #undef REALLOC_STATE_FN
575 
576 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
577  * make it consume minimal amount of memory. check_stack_write() access from
578  * the program calls into realloc_func_state() to grow the stack size.
579  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
580  * which realloc_stack_state() copies over. It points to previous
581  * bpf_verifier_state which is never reallocated.
582  */
583 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
584 			      int refs_size, bool copy_old)
585 {
586 	int err = realloc_reference_state(state, refs_size, copy_old);
587 	if (err)
588 		return err;
589 	return realloc_stack_state(state, stack_size, copy_old);
590 }
591 
592 /* Acquire a pointer id from the env and update the state->refs to include
593  * this new pointer reference.
594  * On success, returns a valid pointer id to associate with the register
595  * On failure, returns a negative errno.
596  */
597 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
598 {
599 	struct bpf_func_state *state = cur_func(env);
600 	int new_ofs = state->acquired_refs;
601 	int id, err;
602 
603 	err = realloc_reference_state(state, state->acquired_refs + 1, true);
604 	if (err)
605 		return err;
606 	id = ++env->id_gen;
607 	state->refs[new_ofs].id = id;
608 	state->refs[new_ofs].insn_idx = insn_idx;
609 
610 	return id;
611 }
612 
613 /* release function corresponding to acquire_reference_state(). Idempotent. */
614 static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
615 {
616 	int i, last_idx;
617 
618 	if (!ptr_id)
619 		return -EFAULT;
620 
621 	last_idx = state->acquired_refs - 1;
622 	for (i = 0; i < state->acquired_refs; i++) {
623 		if (state->refs[i].id == ptr_id) {
624 			if (last_idx && i != last_idx)
625 				memcpy(&state->refs[i], &state->refs[last_idx],
626 				       sizeof(*state->refs));
627 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
628 			state->acquired_refs--;
629 			return 0;
630 		}
631 	}
632 	return -EFAULT;
633 }
634 
635 /* variation on the above for cases where we expect that there must be an
636  * outstanding reference for the specified ptr_id.
637  */
638 static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
639 {
640 	struct bpf_func_state *state = cur_func(env);
641 	int err;
642 
643 	err = __release_reference_state(state, ptr_id);
644 	if (WARN_ON_ONCE(err != 0))
645 		verbose(env, "verifier internal error: can't release reference\n");
646 	return err;
647 }
648 
649 static int transfer_reference_state(struct bpf_func_state *dst,
650 				    struct bpf_func_state *src)
651 {
652 	int err = realloc_reference_state(dst, src->acquired_refs, false);
653 	if (err)
654 		return err;
655 	err = copy_reference_state(dst, src);
656 	if (err)
657 		return err;
658 	return 0;
659 }
660 
661 static void free_func_state(struct bpf_func_state *state)
662 {
663 	if (!state)
664 		return;
665 	kfree(state->refs);
666 	kfree(state->stack);
667 	kfree(state);
668 }
669 
670 static void free_verifier_state(struct bpf_verifier_state *state,
671 				bool free_self)
672 {
673 	int i;
674 
675 	for (i = 0; i <= state->curframe; i++) {
676 		free_func_state(state->frame[i]);
677 		state->frame[i] = NULL;
678 	}
679 	if (free_self)
680 		kfree(state);
681 }
682 
683 /* copy verifier state from src to dst growing dst stack space
684  * when necessary to accommodate larger src stack
685  */
686 static int copy_func_state(struct bpf_func_state *dst,
687 			   const struct bpf_func_state *src)
688 {
689 	int err;
690 
691 	err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
692 				 false);
693 	if (err)
694 		return err;
695 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
696 	err = copy_reference_state(dst, src);
697 	if (err)
698 		return err;
699 	return copy_stack_state(dst, src);
700 }
701 
702 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
703 			       const struct bpf_verifier_state *src)
704 {
705 	struct bpf_func_state *dst;
706 	int i, err;
707 
708 	/* if dst has more stack frames then src frame, free them */
709 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
710 		free_func_state(dst_state->frame[i]);
711 		dst_state->frame[i] = NULL;
712 	}
713 	dst_state->speculative = src->speculative;
714 	dst_state->curframe = src->curframe;
715 	for (i = 0; i <= src->curframe; i++) {
716 		dst = dst_state->frame[i];
717 		if (!dst) {
718 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
719 			if (!dst)
720 				return -ENOMEM;
721 			dst_state->frame[i] = dst;
722 		}
723 		err = copy_func_state(dst, src->frame[i]);
724 		if (err)
725 			return err;
726 	}
727 	return 0;
728 }
729 
730 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
731 		     int *insn_idx)
732 {
733 	struct bpf_verifier_state *cur = env->cur_state;
734 	struct bpf_verifier_stack_elem *elem, *head = env->head;
735 	int err;
736 
737 	if (env->head == NULL)
738 		return -ENOENT;
739 
740 	if (cur) {
741 		err = copy_verifier_state(cur, &head->st);
742 		if (err)
743 			return err;
744 	}
745 	if (insn_idx)
746 		*insn_idx = head->insn_idx;
747 	if (prev_insn_idx)
748 		*prev_insn_idx = head->prev_insn_idx;
749 	elem = head->next;
750 	free_verifier_state(&head->st, false);
751 	kfree(head);
752 	env->head = elem;
753 	env->stack_size--;
754 	return 0;
755 }
756 
757 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
758 					     int insn_idx, int prev_insn_idx,
759 					     bool speculative)
760 {
761 	struct bpf_verifier_state *cur = env->cur_state;
762 	struct bpf_verifier_stack_elem *elem;
763 	int err;
764 
765 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
766 	if (!elem)
767 		goto err;
768 
769 	elem->insn_idx = insn_idx;
770 	elem->prev_insn_idx = prev_insn_idx;
771 	elem->next = env->head;
772 	env->head = elem;
773 	env->stack_size++;
774 	err = copy_verifier_state(&elem->st, cur);
775 	if (err)
776 		goto err;
777 	elem->st.speculative |= speculative;
778 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
779 		verbose(env, "BPF program is too complex\n");
780 		goto err;
781 	}
782 	return &elem->st;
783 err:
784 	free_verifier_state(env->cur_state, true);
785 	env->cur_state = NULL;
786 	/* pop all elements and return */
787 	while (!pop_stack(env, NULL, NULL));
788 	return NULL;
789 }
790 
791 #define CALLER_SAVED_REGS 6
792 static const int caller_saved[CALLER_SAVED_REGS] = {
793 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
794 };
795 
796 static void __mark_reg_not_init(struct bpf_reg_state *reg);
797 
798 /* Mark the unknown part of a register (variable offset or scalar value) as
799  * known to have the value @imm.
800  */
801 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
802 {
803 	/* Clear id, off, and union(map_ptr, range) */
804 	memset(((u8 *)reg) + sizeof(reg->type), 0,
805 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
806 	reg->var_off = tnum_const(imm);
807 	reg->smin_value = (s64)imm;
808 	reg->smax_value = (s64)imm;
809 	reg->umin_value = imm;
810 	reg->umax_value = imm;
811 }
812 
813 /* Mark the 'variable offset' part of a register as zero.  This should be
814  * used only on registers holding a pointer type.
815  */
816 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
817 {
818 	__mark_reg_known(reg, 0);
819 }
820 
821 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
822 {
823 	__mark_reg_known(reg, 0);
824 	reg->type = SCALAR_VALUE;
825 }
826 
827 static void mark_reg_known_zero(struct bpf_verifier_env *env,
828 				struct bpf_reg_state *regs, u32 regno)
829 {
830 	if (WARN_ON(regno >= MAX_BPF_REG)) {
831 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
832 		/* Something bad happened, let's kill all regs */
833 		for (regno = 0; regno < MAX_BPF_REG; regno++)
834 			__mark_reg_not_init(regs + regno);
835 		return;
836 	}
837 	__mark_reg_known_zero(regs + regno);
838 }
839 
840 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
841 {
842 	return type_is_pkt_pointer(reg->type);
843 }
844 
845 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
846 {
847 	return reg_is_pkt_pointer(reg) ||
848 	       reg->type == PTR_TO_PACKET_END;
849 }
850 
851 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
852 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
853 				    enum bpf_reg_type which)
854 {
855 	/* The register can already have a range from prior markings.
856 	 * This is fine as long as it hasn't been advanced from its
857 	 * origin.
858 	 */
859 	return reg->type == which &&
860 	       reg->id == 0 &&
861 	       reg->off == 0 &&
862 	       tnum_equals_const(reg->var_off, 0);
863 }
864 
865 /* Attempts to improve min/max values based on var_off information */
866 static void __update_reg_bounds(struct bpf_reg_state *reg)
867 {
868 	/* min signed is max(sign bit) | min(other bits) */
869 	reg->smin_value = max_t(s64, reg->smin_value,
870 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
871 	/* max signed is min(sign bit) | max(other bits) */
872 	reg->smax_value = min_t(s64, reg->smax_value,
873 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
874 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
875 	reg->umax_value = min(reg->umax_value,
876 			      reg->var_off.value | reg->var_off.mask);
877 }
878 
879 /* Uses signed min/max values to inform unsigned, and vice-versa */
880 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
881 {
882 	/* Learn sign from signed bounds.
883 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
884 	 * are the same, so combine.  This works even in the negative case, e.g.
885 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
886 	 */
887 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
888 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
889 							  reg->umin_value);
890 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
891 							  reg->umax_value);
892 		return;
893 	}
894 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
895 	 * boundary, so we must be careful.
896 	 */
897 	if ((s64)reg->umax_value >= 0) {
898 		/* Positive.  We can't learn anything from the smin, but smax
899 		 * is positive, hence safe.
900 		 */
901 		reg->smin_value = reg->umin_value;
902 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
903 							  reg->umax_value);
904 	} else if ((s64)reg->umin_value < 0) {
905 		/* Negative.  We can't learn anything from the smax, but smin
906 		 * is negative, hence safe.
907 		 */
908 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
909 							  reg->umin_value);
910 		reg->smax_value = reg->umax_value;
911 	}
912 }
913 
914 /* Attempts to improve var_off based on unsigned min/max information */
915 static void __reg_bound_offset(struct bpf_reg_state *reg)
916 {
917 	reg->var_off = tnum_intersect(reg->var_off,
918 				      tnum_range(reg->umin_value,
919 						 reg->umax_value));
920 }
921 
922 /* Reset the min/max bounds of a register */
923 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
924 {
925 	reg->smin_value = S64_MIN;
926 	reg->smax_value = S64_MAX;
927 	reg->umin_value = 0;
928 	reg->umax_value = U64_MAX;
929 }
930 
931 /* Mark a register as having a completely unknown (scalar) value. */
932 static void __mark_reg_unknown(struct bpf_reg_state *reg)
933 {
934 	/*
935 	 * Clear type, id, off, and union(map_ptr, range) and
936 	 * padding between 'type' and union
937 	 */
938 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
939 	reg->type = SCALAR_VALUE;
940 	reg->var_off = tnum_unknown;
941 	reg->frameno = 0;
942 	__mark_reg_unbounded(reg);
943 }
944 
945 static void mark_reg_unknown(struct bpf_verifier_env *env,
946 			     struct bpf_reg_state *regs, u32 regno)
947 {
948 	if (WARN_ON(regno >= MAX_BPF_REG)) {
949 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
950 		/* Something bad happened, let's kill all regs except FP */
951 		for (regno = 0; regno < BPF_REG_FP; regno++)
952 			__mark_reg_not_init(regs + regno);
953 		return;
954 	}
955 	__mark_reg_unknown(regs + regno);
956 }
957 
958 static void __mark_reg_not_init(struct bpf_reg_state *reg)
959 {
960 	__mark_reg_unknown(reg);
961 	reg->type = NOT_INIT;
962 }
963 
964 static void mark_reg_not_init(struct bpf_verifier_env *env,
965 			      struct bpf_reg_state *regs, u32 regno)
966 {
967 	if (WARN_ON(regno >= MAX_BPF_REG)) {
968 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
969 		/* Something bad happened, let's kill all regs except FP */
970 		for (regno = 0; regno < BPF_REG_FP; regno++)
971 			__mark_reg_not_init(regs + regno);
972 		return;
973 	}
974 	__mark_reg_not_init(regs + regno);
975 }
976 
977 static void init_reg_state(struct bpf_verifier_env *env,
978 			   struct bpf_func_state *state)
979 {
980 	struct bpf_reg_state *regs = state->regs;
981 	int i;
982 
983 	for (i = 0; i < MAX_BPF_REG; i++) {
984 		mark_reg_not_init(env, regs, i);
985 		regs[i].live = REG_LIVE_NONE;
986 		regs[i].parent = NULL;
987 	}
988 
989 	/* frame pointer */
990 	regs[BPF_REG_FP].type = PTR_TO_STACK;
991 	mark_reg_known_zero(env, regs, BPF_REG_FP);
992 	regs[BPF_REG_FP].frameno = state->frameno;
993 
994 	/* 1st arg to a function */
995 	regs[BPF_REG_1].type = PTR_TO_CTX;
996 	mark_reg_known_zero(env, regs, BPF_REG_1);
997 }
998 
999 #define BPF_MAIN_FUNC (-1)
1000 static void init_func_state(struct bpf_verifier_env *env,
1001 			    struct bpf_func_state *state,
1002 			    int callsite, int frameno, int subprogno)
1003 {
1004 	state->callsite = callsite;
1005 	state->frameno = frameno;
1006 	state->subprogno = subprogno;
1007 	init_reg_state(env, state);
1008 }
1009 
1010 enum reg_arg_type {
1011 	SRC_OP,		/* register is used as source operand */
1012 	DST_OP,		/* register is used as destination operand */
1013 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1014 };
1015 
1016 static int cmp_subprogs(const void *a, const void *b)
1017 {
1018 	return ((struct bpf_subprog_info *)a)->start -
1019 	       ((struct bpf_subprog_info *)b)->start;
1020 }
1021 
1022 static int find_subprog(struct bpf_verifier_env *env, int off)
1023 {
1024 	struct bpf_subprog_info *p;
1025 
1026 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1027 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1028 	if (!p)
1029 		return -ENOENT;
1030 	return p - env->subprog_info;
1031 
1032 }
1033 
1034 static int add_subprog(struct bpf_verifier_env *env, int off)
1035 {
1036 	int insn_cnt = env->prog->len;
1037 	int ret;
1038 
1039 	if (off >= insn_cnt || off < 0) {
1040 		verbose(env, "call to invalid destination\n");
1041 		return -EINVAL;
1042 	}
1043 	ret = find_subprog(env, off);
1044 	if (ret >= 0)
1045 		return 0;
1046 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1047 		verbose(env, "too many subprograms\n");
1048 		return -E2BIG;
1049 	}
1050 	env->subprog_info[env->subprog_cnt++].start = off;
1051 	sort(env->subprog_info, env->subprog_cnt,
1052 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1053 	return 0;
1054 }
1055 
1056 static int check_subprogs(struct bpf_verifier_env *env)
1057 {
1058 	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
1059 	struct bpf_subprog_info *subprog = env->subprog_info;
1060 	struct bpf_insn *insn = env->prog->insnsi;
1061 	int insn_cnt = env->prog->len;
1062 
1063 	/* Add entry function. */
1064 	ret = add_subprog(env, 0);
1065 	if (ret < 0)
1066 		return ret;
1067 
1068 	/* determine subprog starts. The end is one before the next starts */
1069 	for (i = 0; i < insn_cnt; i++) {
1070 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1071 			continue;
1072 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1073 			continue;
1074 		if (!env->allow_ptr_leaks) {
1075 			verbose(env, "function calls to other bpf functions are allowed for root only\n");
1076 			return -EPERM;
1077 		}
1078 		ret = add_subprog(env, i + insn[i].imm + 1);
1079 		if (ret < 0)
1080 			return ret;
1081 	}
1082 
1083 	/* Add a fake 'exit' subprog which could simplify subprog iteration
1084 	 * logic. 'subprog_cnt' should not be increased.
1085 	 */
1086 	subprog[env->subprog_cnt].start = insn_cnt;
1087 
1088 	if (env->log.level > 1)
1089 		for (i = 0; i < env->subprog_cnt; i++)
1090 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
1091 
1092 	/* now check that all jumps are within the same subprog */
1093 	subprog_start = subprog[cur_subprog].start;
1094 	subprog_end = subprog[cur_subprog + 1].start;
1095 	for (i = 0; i < insn_cnt; i++) {
1096 		u8 code = insn[i].code;
1097 
1098 		if (BPF_CLASS(code) != BPF_JMP)
1099 			goto next;
1100 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1101 			goto next;
1102 		off = i + insn[i].off + 1;
1103 		if (off < subprog_start || off >= subprog_end) {
1104 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
1105 			return -EINVAL;
1106 		}
1107 next:
1108 		if (i == subprog_end - 1) {
1109 			/* to avoid fall-through from one subprog into another
1110 			 * the last insn of the subprog should be either exit
1111 			 * or unconditional jump back
1112 			 */
1113 			if (code != (BPF_JMP | BPF_EXIT) &&
1114 			    code != (BPF_JMP | BPF_JA)) {
1115 				verbose(env, "last insn is not an exit or jmp\n");
1116 				return -EINVAL;
1117 			}
1118 			subprog_start = subprog_end;
1119 			cur_subprog++;
1120 			if (cur_subprog < env->subprog_cnt)
1121 				subprog_end = subprog[cur_subprog + 1].start;
1122 		}
1123 	}
1124 	return 0;
1125 }
1126 
1127 /* Parentage chain of this register (or stack slot) should take care of all
1128  * issues like callee-saved registers, stack slot allocation time, etc.
1129  */
1130 static int mark_reg_read(struct bpf_verifier_env *env,
1131 			 const struct bpf_reg_state *state,
1132 			 struct bpf_reg_state *parent)
1133 {
1134 	bool writes = parent == state->parent; /* Observe write marks */
1135 
1136 	while (parent) {
1137 		/* if read wasn't screened by an earlier write ... */
1138 		if (writes && state->live & REG_LIVE_WRITTEN)
1139 			break;
1140 		if (parent->live & REG_LIVE_DONE) {
1141 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1142 				reg_type_str[parent->type],
1143 				parent->var_off.value, parent->off);
1144 			return -EFAULT;
1145 		}
1146 		/* ... then we depend on parent's value */
1147 		parent->live |= REG_LIVE_READ;
1148 		state = parent;
1149 		parent = state->parent;
1150 		writes = true;
1151 	}
1152 	return 0;
1153 }
1154 
1155 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
1156 			 enum reg_arg_type t)
1157 {
1158 	struct bpf_verifier_state *vstate = env->cur_state;
1159 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1160 	struct bpf_reg_state *regs = state->regs;
1161 
1162 	if (regno >= MAX_BPF_REG) {
1163 		verbose(env, "R%d is invalid\n", regno);
1164 		return -EINVAL;
1165 	}
1166 
1167 	if (t == SRC_OP) {
1168 		/* check whether register used as source operand can be read */
1169 		if (regs[regno].type == NOT_INIT) {
1170 			verbose(env, "R%d !read_ok\n", regno);
1171 			return -EACCES;
1172 		}
1173 		/* We don't need to worry about FP liveness because it's read-only */
1174 		if (regno != BPF_REG_FP)
1175 			return mark_reg_read(env, &regs[regno],
1176 					     regs[regno].parent);
1177 	} else {
1178 		/* check whether register used as dest operand can be written to */
1179 		if (regno == BPF_REG_FP) {
1180 			verbose(env, "frame pointer is read only\n");
1181 			return -EACCES;
1182 		}
1183 		regs[regno].live |= REG_LIVE_WRITTEN;
1184 		if (t == DST_OP)
1185 			mark_reg_unknown(env, regs, regno);
1186 	}
1187 	return 0;
1188 }
1189 
1190 static bool is_spillable_regtype(enum bpf_reg_type type)
1191 {
1192 	switch (type) {
1193 	case PTR_TO_MAP_VALUE:
1194 	case PTR_TO_MAP_VALUE_OR_NULL:
1195 	case PTR_TO_STACK:
1196 	case PTR_TO_CTX:
1197 	case PTR_TO_PACKET:
1198 	case PTR_TO_PACKET_META:
1199 	case PTR_TO_PACKET_END:
1200 	case PTR_TO_FLOW_KEYS:
1201 	case CONST_PTR_TO_MAP:
1202 	case PTR_TO_SOCKET:
1203 	case PTR_TO_SOCKET_OR_NULL:
1204 		return true;
1205 	default:
1206 		return false;
1207 	}
1208 }
1209 
1210 /* Does this register contain a constant zero? */
1211 static bool register_is_null(struct bpf_reg_state *reg)
1212 {
1213 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1214 }
1215 
1216 /* check_stack_read/write functions track spill/fill of registers,
1217  * stack boundary and alignment are checked in check_mem_access()
1218  */
1219 static int check_stack_write(struct bpf_verifier_env *env,
1220 			     struct bpf_func_state *state, /* func where register points to */
1221 			     int off, int size, int value_regno, int insn_idx)
1222 {
1223 	struct bpf_func_state *cur; /* state of the current function */
1224 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1225 	enum bpf_reg_type type;
1226 
1227 	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1228 				 state->acquired_refs, true);
1229 	if (err)
1230 		return err;
1231 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1232 	 * so it's aligned access and [off, off + size) are within stack limits
1233 	 */
1234 	if (!env->allow_ptr_leaks &&
1235 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
1236 	    size != BPF_REG_SIZE) {
1237 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
1238 		return -EACCES;
1239 	}
1240 
1241 	cur = env->cur_state->frame[env->cur_state->curframe];
1242 	if (value_regno >= 0 &&
1243 	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
1244 
1245 		/* register containing pointer is being spilled into stack */
1246 		if (size != BPF_REG_SIZE) {
1247 			verbose(env, "invalid size of register spill\n");
1248 			return -EACCES;
1249 		}
1250 
1251 		if (state != cur && type == PTR_TO_STACK) {
1252 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1253 			return -EINVAL;
1254 		}
1255 
1256 		/* save register state */
1257 		state->stack[spi].spilled_ptr = cur->regs[value_regno];
1258 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1259 
1260 		for (i = 0; i < BPF_REG_SIZE; i++) {
1261 			if (state->stack[spi].slot_type[i] == STACK_MISC &&
1262 			    !env->allow_ptr_leaks) {
1263 				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1264 				int soff = (-spi - 1) * BPF_REG_SIZE;
1265 
1266 				/* detected reuse of integer stack slot with a pointer
1267 				 * which means either llvm is reusing stack slot or
1268 				 * an attacker is trying to exploit CVE-2018-3639
1269 				 * (speculative store bypass)
1270 				 * Have to sanitize that slot with preemptive
1271 				 * store of zero.
1272 				 */
1273 				if (*poff && *poff != soff) {
1274 					/* disallow programs where single insn stores
1275 					 * into two different stack slots, since verifier
1276 					 * cannot sanitize them
1277 					 */
1278 					verbose(env,
1279 						"insn %d cannot access two stack slots fp%d and fp%d",
1280 						insn_idx, *poff, soff);
1281 					return -EINVAL;
1282 				}
1283 				*poff = soff;
1284 			}
1285 			state->stack[spi].slot_type[i] = STACK_SPILL;
1286 		}
1287 	} else {
1288 		u8 type = STACK_MISC;
1289 
1290 		/* regular write of data into stack destroys any spilled ptr */
1291 		state->stack[spi].spilled_ptr.type = NOT_INIT;
1292 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
1293 		if (state->stack[spi].slot_type[0] == STACK_SPILL)
1294 			for (i = 0; i < BPF_REG_SIZE; i++)
1295 				state->stack[spi].slot_type[i] = STACK_MISC;
1296 
1297 		/* only mark the slot as written if all 8 bytes were written
1298 		 * otherwise read propagation may incorrectly stop too soon
1299 		 * when stack slots are partially written.
1300 		 * This heuristic means that read propagation will be
1301 		 * conservative, since it will add reg_live_read marks
1302 		 * to stack slots all the way to first state when programs
1303 		 * writes+reads less than 8 bytes
1304 		 */
1305 		if (size == BPF_REG_SIZE)
1306 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1307 
1308 		/* when we zero initialize stack slots mark them as such */
1309 		if (value_regno >= 0 &&
1310 		    register_is_null(&cur->regs[value_regno]))
1311 			type = STACK_ZERO;
1312 
1313 		/* Mark slots affected by this stack write. */
1314 		for (i = 0; i < size; i++)
1315 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1316 				type;
1317 	}
1318 	return 0;
1319 }
1320 
1321 static int check_stack_read(struct bpf_verifier_env *env,
1322 			    struct bpf_func_state *reg_state /* func where register points to */,
1323 			    int off, int size, int value_regno)
1324 {
1325 	struct bpf_verifier_state *vstate = env->cur_state;
1326 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1327 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1328 	u8 *stype;
1329 
1330 	if (reg_state->allocated_stack <= slot) {
1331 		verbose(env, "invalid read from stack off %d+0 size %d\n",
1332 			off, size);
1333 		return -EACCES;
1334 	}
1335 	stype = reg_state->stack[spi].slot_type;
1336 
1337 	if (stype[0] == STACK_SPILL) {
1338 		if (size != BPF_REG_SIZE) {
1339 			verbose(env, "invalid size of register spill\n");
1340 			return -EACCES;
1341 		}
1342 		for (i = 1; i < BPF_REG_SIZE; i++) {
1343 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1344 				verbose(env, "corrupted spill memory\n");
1345 				return -EACCES;
1346 			}
1347 		}
1348 
1349 		if (value_regno >= 0) {
1350 			/* restore register state from stack */
1351 			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1352 			/* mark reg as written since spilled pointer state likely
1353 			 * has its liveness marks cleared by is_state_visited()
1354 			 * which resets stack/reg liveness for state transitions
1355 			 */
1356 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1357 		}
1358 		mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1359 			      reg_state->stack[spi].spilled_ptr.parent);
1360 		return 0;
1361 	} else {
1362 		int zeros = 0;
1363 
1364 		for (i = 0; i < size; i++) {
1365 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1366 				continue;
1367 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1368 				zeros++;
1369 				continue;
1370 			}
1371 			verbose(env, "invalid read from stack off %d+%d size %d\n",
1372 				off, i, size);
1373 			return -EACCES;
1374 		}
1375 		mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1376 			      reg_state->stack[spi].spilled_ptr.parent);
1377 		if (value_regno >= 0) {
1378 			if (zeros == size) {
1379 				/* any size read into register is zero extended,
1380 				 * so the whole register == const_zero
1381 				 */
1382 				__mark_reg_const_zero(&state->regs[value_regno]);
1383 			} else {
1384 				/* have read misc data from the stack */
1385 				mark_reg_unknown(env, state->regs, value_regno);
1386 			}
1387 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1388 		}
1389 		return 0;
1390 	}
1391 }
1392 
1393 static int check_stack_access(struct bpf_verifier_env *env,
1394 			      const struct bpf_reg_state *reg,
1395 			      int off, int size)
1396 {
1397 	/* Stack accesses must be at a fixed offset, so that we
1398 	 * can determine what type of data were returned. See
1399 	 * check_stack_read().
1400 	 */
1401 	if (!tnum_is_const(reg->var_off)) {
1402 		char tn_buf[48];
1403 
1404 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1405 		verbose(env, "variable stack access var_off=%s off=%d size=%d",
1406 			tn_buf, off, size);
1407 		return -EACCES;
1408 	}
1409 
1410 	if (off >= 0 || off < -MAX_BPF_STACK) {
1411 		verbose(env, "invalid stack off=%d size=%d\n", off, size);
1412 		return -EACCES;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 /* check read/write into map element returned by bpf_map_lookup_elem() */
1419 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1420 			      int size, bool zero_size_allowed)
1421 {
1422 	struct bpf_reg_state *regs = cur_regs(env);
1423 	struct bpf_map *map = regs[regno].map_ptr;
1424 
1425 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1426 	    off + size > map->value_size) {
1427 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1428 			map->value_size, off, size);
1429 		return -EACCES;
1430 	}
1431 	return 0;
1432 }
1433 
1434 /* check read/write into a map element with possible variable offset */
1435 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1436 			    int off, int size, bool zero_size_allowed)
1437 {
1438 	struct bpf_verifier_state *vstate = env->cur_state;
1439 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1440 	struct bpf_reg_state *reg = &state->regs[regno];
1441 	int err;
1442 
1443 	/* We may have adjusted the register to this map value, so we
1444 	 * need to try adding each of min_value and max_value to off
1445 	 * to make sure our theoretical access will be safe.
1446 	 */
1447 	if (env->log.level)
1448 		print_verifier_state(env, state);
1449 
1450 	/* The minimum value is only important with signed
1451 	 * comparisons where we can't assume the floor of a
1452 	 * value is 0.  If we are using signed variables for our
1453 	 * index'es we need to make sure that whatever we use
1454 	 * will have a set floor within our range.
1455 	 */
1456 	if (reg->smin_value < 0 &&
1457 	    (reg->smin_value == S64_MIN ||
1458 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
1459 	      reg->smin_value + off < 0)) {
1460 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1461 			regno);
1462 		return -EACCES;
1463 	}
1464 	err = __check_map_access(env, regno, reg->smin_value + off, size,
1465 				 zero_size_allowed);
1466 	if (err) {
1467 		verbose(env, "R%d min value is outside of the array range\n",
1468 			regno);
1469 		return err;
1470 	}
1471 
1472 	/* If we haven't set a max value then we need to bail since we can't be
1473 	 * sure we won't do bad things.
1474 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
1475 	 */
1476 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1477 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1478 			regno);
1479 		return -EACCES;
1480 	}
1481 	err = __check_map_access(env, regno, reg->umax_value + off, size,
1482 				 zero_size_allowed);
1483 	if (err)
1484 		verbose(env, "R%d max value is outside of the array range\n",
1485 			regno);
1486 	return err;
1487 }
1488 
1489 #define MAX_PACKET_OFF 0xffff
1490 
1491 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1492 				       const struct bpf_call_arg_meta *meta,
1493 				       enum bpf_access_type t)
1494 {
1495 	switch (env->prog->type) {
1496 	/* Program types only with direct read access go here! */
1497 	case BPF_PROG_TYPE_LWT_IN:
1498 	case BPF_PROG_TYPE_LWT_OUT:
1499 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1500 	case BPF_PROG_TYPE_SK_REUSEPORT:
1501 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1502 	case BPF_PROG_TYPE_CGROUP_SKB:
1503 		if (t == BPF_WRITE)
1504 			return false;
1505 		/* fallthrough */
1506 
1507 	/* Program types with direct read + write access go here! */
1508 	case BPF_PROG_TYPE_SCHED_CLS:
1509 	case BPF_PROG_TYPE_SCHED_ACT:
1510 	case BPF_PROG_TYPE_XDP:
1511 	case BPF_PROG_TYPE_LWT_XMIT:
1512 	case BPF_PROG_TYPE_SK_SKB:
1513 	case BPF_PROG_TYPE_SK_MSG:
1514 		if (meta)
1515 			return meta->pkt_access;
1516 
1517 		env->seen_direct_write = true;
1518 		return true;
1519 	default:
1520 		return false;
1521 	}
1522 }
1523 
1524 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1525 				 int off, int size, bool zero_size_allowed)
1526 {
1527 	struct bpf_reg_state *regs = cur_regs(env);
1528 	struct bpf_reg_state *reg = &regs[regno];
1529 
1530 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1531 	    (u64)off + size > reg->range) {
1532 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1533 			off, size, regno, reg->id, reg->off, reg->range);
1534 		return -EACCES;
1535 	}
1536 	return 0;
1537 }
1538 
1539 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1540 			       int size, bool zero_size_allowed)
1541 {
1542 	struct bpf_reg_state *regs = cur_regs(env);
1543 	struct bpf_reg_state *reg = &regs[regno];
1544 	int err;
1545 
1546 	/* We may have added a variable offset to the packet pointer; but any
1547 	 * reg->range we have comes after that.  We are only checking the fixed
1548 	 * offset.
1549 	 */
1550 
1551 	/* We don't allow negative numbers, because we aren't tracking enough
1552 	 * detail to prove they're safe.
1553 	 */
1554 	if (reg->smin_value < 0) {
1555 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1556 			regno);
1557 		return -EACCES;
1558 	}
1559 	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1560 	if (err) {
1561 		verbose(env, "R%d offset is outside of the packet\n", regno);
1562 		return err;
1563 	}
1564 
1565 	/* __check_packet_access has made sure "off + size - 1" is within u16.
1566 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
1567 	 * otherwise find_good_pkt_pointers would have refused to set range info
1568 	 * that __check_packet_access would have rejected this pkt access.
1569 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
1570 	 */
1571 	env->prog->aux->max_pkt_offset =
1572 		max_t(u32, env->prog->aux->max_pkt_offset,
1573 		      off + reg->umax_value + size - 1);
1574 
1575 	return err;
1576 }
1577 
1578 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
1579 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1580 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
1581 {
1582 	struct bpf_insn_access_aux info = {
1583 		.reg_type = *reg_type,
1584 	};
1585 
1586 	if (env->ops->is_valid_access &&
1587 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1588 		/* A non zero info.ctx_field_size indicates that this field is a
1589 		 * candidate for later verifier transformation to load the whole
1590 		 * field and then apply a mask when accessed with a narrower
1591 		 * access than actual ctx access size. A zero info.ctx_field_size
1592 		 * will only allow for whole field access and rejects any other
1593 		 * type of narrower access.
1594 		 */
1595 		*reg_type = info.reg_type;
1596 
1597 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1598 		/* remember the offset of last byte accessed in ctx */
1599 		if (env->prog->aux->max_ctx_offset < off + size)
1600 			env->prog->aux->max_ctx_offset = off + size;
1601 		return 0;
1602 	}
1603 
1604 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1605 	return -EACCES;
1606 }
1607 
1608 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1609 				  int size)
1610 {
1611 	if (size < 0 || off < 0 ||
1612 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
1613 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
1614 			off, size);
1615 		return -EACCES;
1616 	}
1617 	return 0;
1618 }
1619 
1620 static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
1621 			     int size, enum bpf_access_type t)
1622 {
1623 	struct bpf_reg_state *regs = cur_regs(env);
1624 	struct bpf_reg_state *reg = &regs[regno];
1625 	struct bpf_insn_access_aux info;
1626 
1627 	if (reg->smin_value < 0) {
1628 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1629 			regno);
1630 		return -EACCES;
1631 	}
1632 
1633 	if (!bpf_sock_is_valid_access(off, size, t, &info)) {
1634 		verbose(env, "invalid bpf_sock access off=%d size=%d\n",
1635 			off, size);
1636 		return -EACCES;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 static bool __is_pointer_value(bool allow_ptr_leaks,
1643 			       const struct bpf_reg_state *reg)
1644 {
1645 	if (allow_ptr_leaks)
1646 		return false;
1647 
1648 	return reg->type != SCALAR_VALUE;
1649 }
1650 
1651 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
1652 {
1653 	return cur_regs(env) + regno;
1654 }
1655 
1656 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1657 {
1658 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
1659 }
1660 
1661 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1662 {
1663 	const struct bpf_reg_state *reg = reg_state(env, regno);
1664 
1665 	return reg->type == PTR_TO_CTX ||
1666 	       reg->type == PTR_TO_SOCKET;
1667 }
1668 
1669 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1670 {
1671 	const struct bpf_reg_state *reg = reg_state(env, regno);
1672 
1673 	return type_is_pkt_pointer(reg->type);
1674 }
1675 
1676 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
1677 {
1678 	const struct bpf_reg_state *reg = reg_state(env, regno);
1679 
1680 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
1681 	return reg->type == PTR_TO_FLOW_KEYS;
1682 }
1683 
1684 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1685 				   const struct bpf_reg_state *reg,
1686 				   int off, int size, bool strict)
1687 {
1688 	struct tnum reg_off;
1689 	int ip_align;
1690 
1691 	/* Byte size accesses are always allowed. */
1692 	if (!strict || size == 1)
1693 		return 0;
1694 
1695 	/* For platforms that do not have a Kconfig enabling
1696 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1697 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
1698 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1699 	 * to this code only in strict mode where we want to emulate
1700 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
1701 	 * unconditional IP align value of '2'.
1702 	 */
1703 	ip_align = 2;
1704 
1705 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1706 	if (!tnum_is_aligned(reg_off, size)) {
1707 		char tn_buf[48];
1708 
1709 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1710 		verbose(env,
1711 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1712 			ip_align, tn_buf, reg->off, off, size);
1713 		return -EACCES;
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1720 				       const struct bpf_reg_state *reg,
1721 				       const char *pointer_desc,
1722 				       int off, int size, bool strict)
1723 {
1724 	struct tnum reg_off;
1725 
1726 	/* Byte size accesses are always allowed. */
1727 	if (!strict || size == 1)
1728 		return 0;
1729 
1730 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1731 	if (!tnum_is_aligned(reg_off, size)) {
1732 		char tn_buf[48];
1733 
1734 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1735 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1736 			pointer_desc, tn_buf, reg->off, off, size);
1737 		return -EACCES;
1738 	}
1739 
1740 	return 0;
1741 }
1742 
1743 static int check_ptr_alignment(struct bpf_verifier_env *env,
1744 			       const struct bpf_reg_state *reg, int off,
1745 			       int size, bool strict_alignment_once)
1746 {
1747 	bool strict = env->strict_alignment || strict_alignment_once;
1748 	const char *pointer_desc = "";
1749 
1750 	switch (reg->type) {
1751 	case PTR_TO_PACKET:
1752 	case PTR_TO_PACKET_META:
1753 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1754 		 * right in front, treat it the very same way.
1755 		 */
1756 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1757 	case PTR_TO_FLOW_KEYS:
1758 		pointer_desc = "flow keys ";
1759 		break;
1760 	case PTR_TO_MAP_VALUE:
1761 		pointer_desc = "value ";
1762 		break;
1763 	case PTR_TO_CTX:
1764 		pointer_desc = "context ";
1765 		break;
1766 	case PTR_TO_STACK:
1767 		pointer_desc = "stack ";
1768 		/* The stack spill tracking logic in check_stack_write()
1769 		 * and check_stack_read() relies on stack accesses being
1770 		 * aligned.
1771 		 */
1772 		strict = true;
1773 		break;
1774 	case PTR_TO_SOCKET:
1775 		pointer_desc = "sock ";
1776 		break;
1777 	default:
1778 		break;
1779 	}
1780 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1781 					   strict);
1782 }
1783 
1784 static int update_stack_depth(struct bpf_verifier_env *env,
1785 			      const struct bpf_func_state *func,
1786 			      int off)
1787 {
1788 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
1789 
1790 	if (stack >= -off)
1791 		return 0;
1792 
1793 	/* update known max for given subprogram */
1794 	env->subprog_info[func->subprogno].stack_depth = -off;
1795 	return 0;
1796 }
1797 
1798 /* starting from main bpf function walk all instructions of the function
1799  * and recursively walk all callees that given function can call.
1800  * Ignore jump and exit insns.
1801  * Since recursion is prevented by check_cfg() this algorithm
1802  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1803  */
1804 static int check_max_stack_depth(struct bpf_verifier_env *env)
1805 {
1806 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
1807 	struct bpf_subprog_info *subprog = env->subprog_info;
1808 	struct bpf_insn *insn = env->prog->insnsi;
1809 	int ret_insn[MAX_CALL_FRAMES];
1810 	int ret_prog[MAX_CALL_FRAMES];
1811 
1812 process_func:
1813 	/* round up to 32-bytes, since this is granularity
1814 	 * of interpreter stack size
1815 	 */
1816 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1817 	if (depth > MAX_BPF_STACK) {
1818 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
1819 			frame + 1, depth);
1820 		return -EACCES;
1821 	}
1822 continue_func:
1823 	subprog_end = subprog[idx + 1].start;
1824 	for (; i < subprog_end; i++) {
1825 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1826 			continue;
1827 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1828 			continue;
1829 		/* remember insn and function to return to */
1830 		ret_insn[frame] = i + 1;
1831 		ret_prog[frame] = idx;
1832 
1833 		/* find the callee */
1834 		i = i + insn[i].imm + 1;
1835 		idx = find_subprog(env, i);
1836 		if (idx < 0) {
1837 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1838 				  i);
1839 			return -EFAULT;
1840 		}
1841 		frame++;
1842 		if (frame >= MAX_CALL_FRAMES) {
1843 			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1844 			return -EFAULT;
1845 		}
1846 		goto process_func;
1847 	}
1848 	/* end of for() loop means the last insn of the 'subprog'
1849 	 * was reached. Doesn't matter whether it was JA or EXIT
1850 	 */
1851 	if (frame == 0)
1852 		return 0;
1853 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1854 	frame--;
1855 	i = ret_insn[frame];
1856 	idx = ret_prog[frame];
1857 	goto continue_func;
1858 }
1859 
1860 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1861 static int get_callee_stack_depth(struct bpf_verifier_env *env,
1862 				  const struct bpf_insn *insn, int idx)
1863 {
1864 	int start = idx + insn->imm + 1, subprog;
1865 
1866 	subprog = find_subprog(env, start);
1867 	if (subprog < 0) {
1868 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1869 			  start);
1870 		return -EFAULT;
1871 	}
1872 	return env->subprog_info[subprog].stack_depth;
1873 }
1874 #endif
1875 
1876 static int check_ctx_reg(struct bpf_verifier_env *env,
1877 			 const struct bpf_reg_state *reg, int regno)
1878 {
1879 	/* Access to ctx or passing it to a helper is only allowed in
1880 	 * its original, unmodified form.
1881 	 */
1882 
1883 	if (reg->off) {
1884 		verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1885 			regno, reg->off);
1886 		return -EACCES;
1887 	}
1888 
1889 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1890 		char tn_buf[48];
1891 
1892 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1893 		verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
1894 		return -EACCES;
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 /* truncate register to smaller size (in bytes)
1901  * must be called with size < BPF_REG_SIZE
1902  */
1903 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1904 {
1905 	u64 mask;
1906 
1907 	/* clear high bits in bit representation */
1908 	reg->var_off = tnum_cast(reg->var_off, size);
1909 
1910 	/* fix arithmetic bounds */
1911 	mask = ((u64)1 << (size * 8)) - 1;
1912 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1913 		reg->umin_value &= mask;
1914 		reg->umax_value &= mask;
1915 	} else {
1916 		reg->umin_value = 0;
1917 		reg->umax_value = mask;
1918 	}
1919 	reg->smin_value = reg->umin_value;
1920 	reg->smax_value = reg->umax_value;
1921 }
1922 
1923 /* check whether memory at (regno + off) is accessible for t = (read | write)
1924  * if t==write, value_regno is a register which value is stored into memory
1925  * if t==read, value_regno is a register which will receive the value from memory
1926  * if t==write && value_regno==-1, some unknown value is stored into memory
1927  * if t==read && value_regno==-1, don't care what we read from memory
1928  */
1929 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1930 			    int off, int bpf_size, enum bpf_access_type t,
1931 			    int value_regno, bool strict_alignment_once)
1932 {
1933 	struct bpf_reg_state *regs = cur_regs(env);
1934 	struct bpf_reg_state *reg = regs + regno;
1935 	struct bpf_func_state *state;
1936 	int size, err = 0;
1937 
1938 	size = bpf_size_to_bytes(bpf_size);
1939 	if (size < 0)
1940 		return size;
1941 
1942 	/* alignment checks will add in reg->off themselves */
1943 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1944 	if (err)
1945 		return err;
1946 
1947 	/* for access checks, reg->off is just part of off */
1948 	off += reg->off;
1949 
1950 	if (reg->type == PTR_TO_MAP_VALUE) {
1951 		if (t == BPF_WRITE && value_regno >= 0 &&
1952 		    is_pointer_value(env, value_regno)) {
1953 			verbose(env, "R%d leaks addr into map\n", value_regno);
1954 			return -EACCES;
1955 		}
1956 
1957 		err = check_map_access(env, regno, off, size, false);
1958 		if (!err && t == BPF_READ && value_regno >= 0)
1959 			mark_reg_unknown(env, regs, value_regno);
1960 
1961 	} else if (reg->type == PTR_TO_CTX) {
1962 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1963 
1964 		if (t == BPF_WRITE && value_regno >= 0 &&
1965 		    is_pointer_value(env, value_regno)) {
1966 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1967 			return -EACCES;
1968 		}
1969 
1970 		err = check_ctx_reg(env, reg, regno);
1971 		if (err < 0)
1972 			return err;
1973 
1974 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1975 		if (!err && t == BPF_READ && value_regno >= 0) {
1976 			/* ctx access returns either a scalar, or a
1977 			 * PTR_TO_PACKET[_META,_END]. In the latter
1978 			 * case, we know the offset is zero.
1979 			 */
1980 			if (reg_type == SCALAR_VALUE)
1981 				mark_reg_unknown(env, regs, value_regno);
1982 			else
1983 				mark_reg_known_zero(env, regs,
1984 						    value_regno);
1985 			regs[value_regno].type = reg_type;
1986 		}
1987 
1988 	} else if (reg->type == PTR_TO_STACK) {
1989 		off += reg->var_off.value;
1990 		err = check_stack_access(env, reg, off, size);
1991 		if (err)
1992 			return err;
1993 
1994 		state = func(env, reg);
1995 		err = update_stack_depth(env, state, off);
1996 		if (err)
1997 			return err;
1998 
1999 		if (t == BPF_WRITE)
2000 			err = check_stack_write(env, state, off, size,
2001 						value_regno, insn_idx);
2002 		else
2003 			err = check_stack_read(env, state, off, size,
2004 					       value_regno);
2005 	} else if (reg_is_pkt_pointer(reg)) {
2006 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
2007 			verbose(env, "cannot write into packet\n");
2008 			return -EACCES;
2009 		}
2010 		if (t == BPF_WRITE && value_regno >= 0 &&
2011 		    is_pointer_value(env, value_regno)) {
2012 			verbose(env, "R%d leaks addr into packet\n",
2013 				value_regno);
2014 			return -EACCES;
2015 		}
2016 		err = check_packet_access(env, regno, off, size, false);
2017 		if (!err && t == BPF_READ && value_regno >= 0)
2018 			mark_reg_unknown(env, regs, value_regno);
2019 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
2020 		if (t == BPF_WRITE && value_regno >= 0 &&
2021 		    is_pointer_value(env, value_regno)) {
2022 			verbose(env, "R%d leaks addr into flow keys\n",
2023 				value_regno);
2024 			return -EACCES;
2025 		}
2026 
2027 		err = check_flow_keys_access(env, off, size);
2028 		if (!err && t == BPF_READ && value_regno >= 0)
2029 			mark_reg_unknown(env, regs, value_regno);
2030 	} else if (reg->type == PTR_TO_SOCKET) {
2031 		if (t == BPF_WRITE) {
2032 			verbose(env, "cannot write into socket\n");
2033 			return -EACCES;
2034 		}
2035 		err = check_sock_access(env, regno, off, size, t);
2036 		if (!err && value_regno >= 0)
2037 			mark_reg_unknown(env, regs, value_regno);
2038 	} else {
2039 		verbose(env, "R%d invalid mem access '%s'\n", regno,
2040 			reg_type_str[reg->type]);
2041 		return -EACCES;
2042 	}
2043 
2044 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
2045 	    regs[value_regno].type == SCALAR_VALUE) {
2046 		/* b/h/w load zero-extends, mark upper bits as known 0 */
2047 		coerce_reg_to_size(&regs[value_regno], size);
2048 	}
2049 	return err;
2050 }
2051 
2052 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
2053 {
2054 	int err;
2055 
2056 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
2057 	    insn->imm != 0) {
2058 		verbose(env, "BPF_XADD uses reserved fields\n");
2059 		return -EINVAL;
2060 	}
2061 
2062 	/* check src1 operand */
2063 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
2064 	if (err)
2065 		return err;
2066 
2067 	/* check src2 operand */
2068 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2069 	if (err)
2070 		return err;
2071 
2072 	if (is_pointer_value(env, insn->src_reg)) {
2073 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
2074 		return -EACCES;
2075 	}
2076 
2077 	if (is_ctx_reg(env, insn->dst_reg) ||
2078 	    is_pkt_reg(env, insn->dst_reg) ||
2079 	    is_flow_key_reg(env, insn->dst_reg)) {
2080 		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2081 			insn->dst_reg,
2082 			reg_type_str[reg_state(env, insn->dst_reg)->type]);
2083 		return -EACCES;
2084 	}
2085 
2086 	/* check whether atomic_add can read the memory */
2087 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2088 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
2089 	if (err)
2090 		return err;
2091 
2092 	/* check whether atomic_add can write into the same memory */
2093 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2094 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
2095 }
2096 
2097 /* when register 'regno' is passed into function that will read 'access_size'
2098  * bytes from that pointer, make sure that it's within stack boundary
2099  * and all elements of stack are initialized.
2100  * Unlike most pointer bounds-checking functions, this one doesn't take an
2101  * 'off' argument, so it has to add in reg->off itself.
2102  */
2103 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
2104 				int access_size, bool zero_size_allowed,
2105 				struct bpf_call_arg_meta *meta)
2106 {
2107 	struct bpf_reg_state *reg = reg_state(env, regno);
2108 	struct bpf_func_state *state = func(env, reg);
2109 	int off, i, slot, spi;
2110 
2111 	if (reg->type != PTR_TO_STACK) {
2112 		/* Allow zero-byte read from NULL, regardless of pointer type */
2113 		if (zero_size_allowed && access_size == 0 &&
2114 		    register_is_null(reg))
2115 			return 0;
2116 
2117 		verbose(env, "R%d type=%s expected=%s\n", regno,
2118 			reg_type_str[reg->type],
2119 			reg_type_str[PTR_TO_STACK]);
2120 		return -EACCES;
2121 	}
2122 
2123 	/* Only allow fixed-offset stack reads */
2124 	if (!tnum_is_const(reg->var_off)) {
2125 		char tn_buf[48];
2126 
2127 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2128 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
2129 			regno, tn_buf);
2130 		return -EACCES;
2131 	}
2132 	off = reg->off + reg->var_off.value;
2133 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
2134 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
2135 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
2136 			regno, off, access_size);
2137 		return -EACCES;
2138 	}
2139 
2140 	if (meta && meta->raw_mode) {
2141 		meta->access_size = access_size;
2142 		meta->regno = regno;
2143 		return 0;
2144 	}
2145 
2146 	for (i = 0; i < access_size; i++) {
2147 		u8 *stype;
2148 
2149 		slot = -(off + i) - 1;
2150 		spi = slot / BPF_REG_SIZE;
2151 		if (state->allocated_stack <= slot)
2152 			goto err;
2153 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
2154 		if (*stype == STACK_MISC)
2155 			goto mark;
2156 		if (*stype == STACK_ZERO) {
2157 			/* helper can write anything into the stack */
2158 			*stype = STACK_MISC;
2159 			goto mark;
2160 		}
2161 err:
2162 		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
2163 			off, i, access_size);
2164 		return -EACCES;
2165 mark:
2166 		/* reading any byte out of 8-byte 'spill_slot' will cause
2167 		 * the whole slot to be marked as 'read'
2168 		 */
2169 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
2170 			      state->stack[spi].spilled_ptr.parent);
2171 	}
2172 	return update_stack_depth(env, state, off);
2173 }
2174 
2175 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
2176 				   int access_size, bool zero_size_allowed,
2177 				   struct bpf_call_arg_meta *meta)
2178 {
2179 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2180 
2181 	switch (reg->type) {
2182 	case PTR_TO_PACKET:
2183 	case PTR_TO_PACKET_META:
2184 		return check_packet_access(env, regno, reg->off, access_size,
2185 					   zero_size_allowed);
2186 	case PTR_TO_MAP_VALUE:
2187 		return check_map_access(env, regno, reg->off, access_size,
2188 					zero_size_allowed);
2189 	default: /* scalar_value|ptr_to_stack or invalid ptr */
2190 		return check_stack_boundary(env, regno, access_size,
2191 					    zero_size_allowed, meta);
2192 	}
2193 }
2194 
2195 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
2196 {
2197 	return type == ARG_PTR_TO_MEM ||
2198 	       type == ARG_PTR_TO_MEM_OR_NULL ||
2199 	       type == ARG_PTR_TO_UNINIT_MEM;
2200 }
2201 
2202 static bool arg_type_is_mem_size(enum bpf_arg_type type)
2203 {
2204 	return type == ARG_CONST_SIZE ||
2205 	       type == ARG_CONST_SIZE_OR_ZERO;
2206 }
2207 
2208 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
2209 			  enum bpf_arg_type arg_type,
2210 			  struct bpf_call_arg_meta *meta)
2211 {
2212 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2213 	enum bpf_reg_type expected_type, type = reg->type;
2214 	int err = 0;
2215 
2216 	if (arg_type == ARG_DONTCARE)
2217 		return 0;
2218 
2219 	err = check_reg_arg(env, regno, SRC_OP);
2220 	if (err)
2221 		return err;
2222 
2223 	if (arg_type == ARG_ANYTHING) {
2224 		if (is_pointer_value(env, regno)) {
2225 			verbose(env, "R%d leaks addr into helper function\n",
2226 				regno);
2227 			return -EACCES;
2228 		}
2229 		return 0;
2230 	}
2231 
2232 	if (type_is_pkt_pointer(type) &&
2233 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
2234 		verbose(env, "helper access to the packet is not allowed\n");
2235 		return -EACCES;
2236 	}
2237 
2238 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
2239 	    arg_type == ARG_PTR_TO_MAP_VALUE ||
2240 	    arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2241 		expected_type = PTR_TO_STACK;
2242 		if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
2243 		    type != expected_type)
2244 			goto err_type;
2245 	} else if (arg_type == ARG_CONST_SIZE ||
2246 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
2247 		expected_type = SCALAR_VALUE;
2248 		if (type != expected_type)
2249 			goto err_type;
2250 	} else if (arg_type == ARG_CONST_MAP_PTR) {
2251 		expected_type = CONST_PTR_TO_MAP;
2252 		if (type != expected_type)
2253 			goto err_type;
2254 	} else if (arg_type == ARG_PTR_TO_CTX) {
2255 		expected_type = PTR_TO_CTX;
2256 		if (type != expected_type)
2257 			goto err_type;
2258 		err = check_ctx_reg(env, reg, regno);
2259 		if (err < 0)
2260 			return err;
2261 	} else if (arg_type == ARG_PTR_TO_SOCKET) {
2262 		expected_type = PTR_TO_SOCKET;
2263 		if (type != expected_type)
2264 			goto err_type;
2265 		if (meta->ptr_id || !reg->id) {
2266 			verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
2267 				meta->ptr_id, reg->id);
2268 			return -EFAULT;
2269 		}
2270 		meta->ptr_id = reg->id;
2271 	} else if (arg_type_is_mem_ptr(arg_type)) {
2272 		expected_type = PTR_TO_STACK;
2273 		/* One exception here. In case function allows for NULL to be
2274 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
2275 		 * happens during stack boundary checking.
2276 		 */
2277 		if (register_is_null(reg) &&
2278 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
2279 			/* final test in check_stack_boundary() */;
2280 		else if (!type_is_pkt_pointer(type) &&
2281 			 type != PTR_TO_MAP_VALUE &&
2282 			 type != expected_type)
2283 			goto err_type;
2284 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
2285 	} else {
2286 		verbose(env, "unsupported arg_type %d\n", arg_type);
2287 		return -EFAULT;
2288 	}
2289 
2290 	if (arg_type == ARG_CONST_MAP_PTR) {
2291 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2292 		meta->map_ptr = reg->map_ptr;
2293 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2294 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
2295 		 * check that [key, key + map->key_size) are within
2296 		 * stack limits and initialized
2297 		 */
2298 		if (!meta->map_ptr) {
2299 			/* in function declaration map_ptr must come before
2300 			 * map_key, so that it's verified and known before
2301 			 * we have to check map_key here. Otherwise it means
2302 			 * that kernel subsystem misconfigured verifier
2303 			 */
2304 			verbose(env, "invalid map_ptr to access map->key\n");
2305 			return -EACCES;
2306 		}
2307 		err = check_helper_mem_access(env, regno,
2308 					      meta->map_ptr->key_size, false,
2309 					      NULL);
2310 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
2311 		   arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2312 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
2313 		 * check [value, value + map->value_size) validity
2314 		 */
2315 		if (!meta->map_ptr) {
2316 			/* kernel subsystem misconfigured verifier */
2317 			verbose(env, "invalid map_ptr to access map->value\n");
2318 			return -EACCES;
2319 		}
2320 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
2321 		err = check_helper_mem_access(env, regno,
2322 					      meta->map_ptr->value_size, false,
2323 					      meta);
2324 	} else if (arg_type_is_mem_size(arg_type)) {
2325 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2326 
2327 		/* remember the mem_size which may be used later
2328 		 * to refine return values.
2329 		 */
2330 		meta->msize_smax_value = reg->smax_value;
2331 		meta->msize_umax_value = reg->umax_value;
2332 
2333 		/* The register is SCALAR_VALUE; the access check
2334 		 * happens using its boundaries.
2335 		 */
2336 		if (!tnum_is_const(reg->var_off))
2337 			/* For unprivileged variable accesses, disable raw
2338 			 * mode so that the program is required to
2339 			 * initialize all the memory that the helper could
2340 			 * just partially fill up.
2341 			 */
2342 			meta = NULL;
2343 
2344 		if (reg->smin_value < 0) {
2345 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2346 				regno);
2347 			return -EACCES;
2348 		}
2349 
2350 		if (reg->umin_value == 0) {
2351 			err = check_helper_mem_access(env, regno - 1, 0,
2352 						      zero_size_allowed,
2353 						      meta);
2354 			if (err)
2355 				return err;
2356 		}
2357 
2358 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2359 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2360 				regno);
2361 			return -EACCES;
2362 		}
2363 		err = check_helper_mem_access(env, regno - 1,
2364 					      reg->umax_value,
2365 					      zero_size_allowed, meta);
2366 	}
2367 
2368 	return err;
2369 err_type:
2370 	verbose(env, "R%d type=%s expected=%s\n", regno,
2371 		reg_type_str[type], reg_type_str[expected_type]);
2372 	return -EACCES;
2373 }
2374 
2375 static int check_map_func_compatibility(struct bpf_verifier_env *env,
2376 					struct bpf_map *map, int func_id)
2377 {
2378 	if (!map)
2379 		return 0;
2380 
2381 	/* We need a two way check, first is from map perspective ... */
2382 	switch (map->map_type) {
2383 	case BPF_MAP_TYPE_PROG_ARRAY:
2384 		if (func_id != BPF_FUNC_tail_call)
2385 			goto error;
2386 		break;
2387 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2388 		if (func_id != BPF_FUNC_perf_event_read &&
2389 		    func_id != BPF_FUNC_perf_event_output &&
2390 		    func_id != BPF_FUNC_perf_event_read_value)
2391 			goto error;
2392 		break;
2393 	case BPF_MAP_TYPE_STACK_TRACE:
2394 		if (func_id != BPF_FUNC_get_stackid)
2395 			goto error;
2396 		break;
2397 	case BPF_MAP_TYPE_CGROUP_ARRAY:
2398 		if (func_id != BPF_FUNC_skb_under_cgroup &&
2399 		    func_id != BPF_FUNC_current_task_under_cgroup)
2400 			goto error;
2401 		break;
2402 	case BPF_MAP_TYPE_CGROUP_STORAGE:
2403 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
2404 		if (func_id != BPF_FUNC_get_local_storage)
2405 			goto error;
2406 		break;
2407 	/* devmap returns a pointer to a live net_device ifindex that we cannot
2408 	 * allow to be modified from bpf side. So do not allow lookup elements
2409 	 * for now.
2410 	 */
2411 	case BPF_MAP_TYPE_DEVMAP:
2412 		if (func_id != BPF_FUNC_redirect_map)
2413 			goto error;
2414 		break;
2415 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
2416 	 * appear.
2417 	 */
2418 	case BPF_MAP_TYPE_CPUMAP:
2419 	case BPF_MAP_TYPE_XSKMAP:
2420 		if (func_id != BPF_FUNC_redirect_map)
2421 			goto error;
2422 		break;
2423 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2424 	case BPF_MAP_TYPE_HASH_OF_MAPS:
2425 		if (func_id != BPF_FUNC_map_lookup_elem)
2426 			goto error;
2427 		break;
2428 	case BPF_MAP_TYPE_SOCKMAP:
2429 		if (func_id != BPF_FUNC_sk_redirect_map &&
2430 		    func_id != BPF_FUNC_sock_map_update &&
2431 		    func_id != BPF_FUNC_map_delete_elem &&
2432 		    func_id != BPF_FUNC_msg_redirect_map)
2433 			goto error;
2434 		break;
2435 	case BPF_MAP_TYPE_SOCKHASH:
2436 		if (func_id != BPF_FUNC_sk_redirect_hash &&
2437 		    func_id != BPF_FUNC_sock_hash_update &&
2438 		    func_id != BPF_FUNC_map_delete_elem &&
2439 		    func_id != BPF_FUNC_msg_redirect_hash)
2440 			goto error;
2441 		break;
2442 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
2443 		if (func_id != BPF_FUNC_sk_select_reuseport)
2444 			goto error;
2445 		break;
2446 	case BPF_MAP_TYPE_QUEUE:
2447 	case BPF_MAP_TYPE_STACK:
2448 		if (func_id != BPF_FUNC_map_peek_elem &&
2449 		    func_id != BPF_FUNC_map_pop_elem &&
2450 		    func_id != BPF_FUNC_map_push_elem)
2451 			goto error;
2452 		break;
2453 	default:
2454 		break;
2455 	}
2456 
2457 	/* ... and second from the function itself. */
2458 	switch (func_id) {
2459 	case BPF_FUNC_tail_call:
2460 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2461 			goto error;
2462 		if (env->subprog_cnt > 1) {
2463 			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2464 			return -EINVAL;
2465 		}
2466 		break;
2467 	case BPF_FUNC_perf_event_read:
2468 	case BPF_FUNC_perf_event_output:
2469 	case BPF_FUNC_perf_event_read_value:
2470 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2471 			goto error;
2472 		break;
2473 	case BPF_FUNC_get_stackid:
2474 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2475 			goto error;
2476 		break;
2477 	case BPF_FUNC_current_task_under_cgroup:
2478 	case BPF_FUNC_skb_under_cgroup:
2479 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2480 			goto error;
2481 		break;
2482 	case BPF_FUNC_redirect_map:
2483 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2484 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
2485 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
2486 			goto error;
2487 		break;
2488 	case BPF_FUNC_sk_redirect_map:
2489 	case BPF_FUNC_msg_redirect_map:
2490 	case BPF_FUNC_sock_map_update:
2491 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2492 			goto error;
2493 		break;
2494 	case BPF_FUNC_sk_redirect_hash:
2495 	case BPF_FUNC_msg_redirect_hash:
2496 	case BPF_FUNC_sock_hash_update:
2497 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
2498 			goto error;
2499 		break;
2500 	case BPF_FUNC_get_local_storage:
2501 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
2502 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
2503 			goto error;
2504 		break;
2505 	case BPF_FUNC_sk_select_reuseport:
2506 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
2507 			goto error;
2508 		break;
2509 	case BPF_FUNC_map_peek_elem:
2510 	case BPF_FUNC_map_pop_elem:
2511 	case BPF_FUNC_map_push_elem:
2512 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
2513 		    map->map_type != BPF_MAP_TYPE_STACK)
2514 			goto error;
2515 		break;
2516 	default:
2517 		break;
2518 	}
2519 
2520 	return 0;
2521 error:
2522 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
2523 		map->map_type, func_id_name(func_id), func_id);
2524 	return -EINVAL;
2525 }
2526 
2527 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2528 {
2529 	int count = 0;
2530 
2531 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2532 		count++;
2533 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2534 		count++;
2535 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2536 		count++;
2537 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2538 		count++;
2539 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2540 		count++;
2541 
2542 	/* We only support one arg being in raw mode at the moment,
2543 	 * which is sufficient for the helper functions we have
2544 	 * right now.
2545 	 */
2546 	return count <= 1;
2547 }
2548 
2549 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2550 				    enum bpf_arg_type arg_next)
2551 {
2552 	return (arg_type_is_mem_ptr(arg_curr) &&
2553 	        !arg_type_is_mem_size(arg_next)) ||
2554 	       (!arg_type_is_mem_ptr(arg_curr) &&
2555 		arg_type_is_mem_size(arg_next));
2556 }
2557 
2558 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2559 {
2560 	/* bpf_xxx(..., buf, len) call will access 'len'
2561 	 * bytes from memory 'buf'. Both arg types need
2562 	 * to be paired, so make sure there's no buggy
2563 	 * helper function specification.
2564 	 */
2565 	if (arg_type_is_mem_size(fn->arg1_type) ||
2566 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
2567 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2568 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2569 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2570 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2571 		return false;
2572 
2573 	return true;
2574 }
2575 
2576 static bool check_refcount_ok(const struct bpf_func_proto *fn)
2577 {
2578 	int count = 0;
2579 
2580 	if (arg_type_is_refcounted(fn->arg1_type))
2581 		count++;
2582 	if (arg_type_is_refcounted(fn->arg2_type))
2583 		count++;
2584 	if (arg_type_is_refcounted(fn->arg3_type))
2585 		count++;
2586 	if (arg_type_is_refcounted(fn->arg4_type))
2587 		count++;
2588 	if (arg_type_is_refcounted(fn->arg5_type))
2589 		count++;
2590 
2591 	/* We only support one arg being unreferenced at the moment,
2592 	 * which is sufficient for the helper functions we have right now.
2593 	 */
2594 	return count <= 1;
2595 }
2596 
2597 static int check_func_proto(const struct bpf_func_proto *fn)
2598 {
2599 	return check_raw_mode_ok(fn) &&
2600 	       check_arg_pair_ok(fn) &&
2601 	       check_refcount_ok(fn) ? 0 : -EINVAL;
2602 }
2603 
2604 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2605  * are now invalid, so turn them into unknown SCALAR_VALUE.
2606  */
2607 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2608 				     struct bpf_func_state *state)
2609 {
2610 	struct bpf_reg_state *regs = state->regs, *reg;
2611 	int i;
2612 
2613 	for (i = 0; i < MAX_BPF_REG; i++)
2614 		if (reg_is_pkt_pointer_any(&regs[i]))
2615 			mark_reg_unknown(env, regs, i);
2616 
2617 	bpf_for_each_spilled_reg(i, state, reg) {
2618 		if (!reg)
2619 			continue;
2620 		if (reg_is_pkt_pointer_any(reg))
2621 			__mark_reg_unknown(reg);
2622 	}
2623 }
2624 
2625 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2626 {
2627 	struct bpf_verifier_state *vstate = env->cur_state;
2628 	int i;
2629 
2630 	for (i = 0; i <= vstate->curframe; i++)
2631 		__clear_all_pkt_pointers(env, vstate->frame[i]);
2632 }
2633 
2634 static void release_reg_references(struct bpf_verifier_env *env,
2635 				   struct bpf_func_state *state, int id)
2636 {
2637 	struct bpf_reg_state *regs = state->regs, *reg;
2638 	int i;
2639 
2640 	for (i = 0; i < MAX_BPF_REG; i++)
2641 		if (regs[i].id == id)
2642 			mark_reg_unknown(env, regs, i);
2643 
2644 	bpf_for_each_spilled_reg(i, state, reg) {
2645 		if (!reg)
2646 			continue;
2647 		if (reg_is_refcounted(reg) && reg->id == id)
2648 			__mark_reg_unknown(reg);
2649 	}
2650 }
2651 
2652 /* The pointer with the specified id has released its reference to kernel
2653  * resources. Identify all copies of the same pointer and clear the reference.
2654  */
2655 static int release_reference(struct bpf_verifier_env *env,
2656 			     struct bpf_call_arg_meta *meta)
2657 {
2658 	struct bpf_verifier_state *vstate = env->cur_state;
2659 	int i;
2660 
2661 	for (i = 0; i <= vstate->curframe; i++)
2662 		release_reg_references(env, vstate->frame[i], meta->ptr_id);
2663 
2664 	return release_reference_state(env, meta->ptr_id);
2665 }
2666 
2667 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2668 			   int *insn_idx)
2669 {
2670 	struct bpf_verifier_state *state = env->cur_state;
2671 	struct bpf_func_state *caller, *callee;
2672 	int i, err, subprog, target_insn;
2673 
2674 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2675 		verbose(env, "the call stack of %d frames is too deep\n",
2676 			state->curframe + 2);
2677 		return -E2BIG;
2678 	}
2679 
2680 	target_insn = *insn_idx + insn->imm;
2681 	subprog = find_subprog(env, target_insn + 1);
2682 	if (subprog < 0) {
2683 		verbose(env, "verifier bug. No program starts at insn %d\n",
2684 			target_insn + 1);
2685 		return -EFAULT;
2686 	}
2687 
2688 	caller = state->frame[state->curframe];
2689 	if (state->frame[state->curframe + 1]) {
2690 		verbose(env, "verifier bug. Frame %d already allocated\n",
2691 			state->curframe + 1);
2692 		return -EFAULT;
2693 	}
2694 
2695 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2696 	if (!callee)
2697 		return -ENOMEM;
2698 	state->frame[state->curframe + 1] = callee;
2699 
2700 	/* callee cannot access r0, r6 - r9 for reading and has to write
2701 	 * into its own stack before reading from it.
2702 	 * callee can read/write into caller's stack
2703 	 */
2704 	init_func_state(env, callee,
2705 			/* remember the callsite, it will be used by bpf_exit */
2706 			*insn_idx /* callsite */,
2707 			state->curframe + 1 /* frameno within this callchain */,
2708 			subprog /* subprog number within this prog */);
2709 
2710 	/* Transfer references to the callee */
2711 	err = transfer_reference_state(callee, caller);
2712 	if (err)
2713 		return err;
2714 
2715 	/* copy r1 - r5 args that callee can access.  The copy includes parent
2716 	 * pointers, which connects us up to the liveness chain
2717 	 */
2718 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2719 		callee->regs[i] = caller->regs[i];
2720 
2721 	/* after the call registers r0 - r5 were scratched */
2722 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2723 		mark_reg_not_init(env, caller->regs, caller_saved[i]);
2724 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2725 	}
2726 
2727 	/* only increment it after check_reg_arg() finished */
2728 	state->curframe++;
2729 
2730 	/* and go analyze first insn of the callee */
2731 	*insn_idx = target_insn;
2732 
2733 	if (env->log.level) {
2734 		verbose(env, "caller:\n");
2735 		print_verifier_state(env, caller);
2736 		verbose(env, "callee:\n");
2737 		print_verifier_state(env, callee);
2738 	}
2739 	return 0;
2740 }
2741 
2742 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2743 {
2744 	struct bpf_verifier_state *state = env->cur_state;
2745 	struct bpf_func_state *caller, *callee;
2746 	struct bpf_reg_state *r0;
2747 	int err;
2748 
2749 	callee = state->frame[state->curframe];
2750 	r0 = &callee->regs[BPF_REG_0];
2751 	if (r0->type == PTR_TO_STACK) {
2752 		/* technically it's ok to return caller's stack pointer
2753 		 * (or caller's caller's pointer) back to the caller,
2754 		 * since these pointers are valid. Only current stack
2755 		 * pointer will be invalid as soon as function exits,
2756 		 * but let's be conservative
2757 		 */
2758 		verbose(env, "cannot return stack pointer to the caller\n");
2759 		return -EINVAL;
2760 	}
2761 
2762 	state->curframe--;
2763 	caller = state->frame[state->curframe];
2764 	/* return to the caller whatever r0 had in the callee */
2765 	caller->regs[BPF_REG_0] = *r0;
2766 
2767 	/* Transfer references to the caller */
2768 	err = transfer_reference_state(caller, callee);
2769 	if (err)
2770 		return err;
2771 
2772 	*insn_idx = callee->callsite + 1;
2773 	if (env->log.level) {
2774 		verbose(env, "returning from callee:\n");
2775 		print_verifier_state(env, callee);
2776 		verbose(env, "to caller at %d:\n", *insn_idx);
2777 		print_verifier_state(env, caller);
2778 	}
2779 	/* clear everything in the callee */
2780 	free_func_state(callee);
2781 	state->frame[state->curframe + 1] = NULL;
2782 	return 0;
2783 }
2784 
2785 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
2786 				   int func_id,
2787 				   struct bpf_call_arg_meta *meta)
2788 {
2789 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
2790 
2791 	if (ret_type != RET_INTEGER ||
2792 	    (func_id != BPF_FUNC_get_stack &&
2793 	     func_id != BPF_FUNC_probe_read_str))
2794 		return;
2795 
2796 	ret_reg->smax_value = meta->msize_smax_value;
2797 	ret_reg->umax_value = meta->msize_umax_value;
2798 	__reg_deduce_bounds(ret_reg);
2799 	__reg_bound_offset(ret_reg);
2800 }
2801 
2802 static int
2803 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2804 		int func_id, int insn_idx)
2805 {
2806 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2807 
2808 	if (func_id != BPF_FUNC_tail_call &&
2809 	    func_id != BPF_FUNC_map_lookup_elem &&
2810 	    func_id != BPF_FUNC_map_update_elem &&
2811 	    func_id != BPF_FUNC_map_delete_elem &&
2812 	    func_id != BPF_FUNC_map_push_elem &&
2813 	    func_id != BPF_FUNC_map_pop_elem &&
2814 	    func_id != BPF_FUNC_map_peek_elem)
2815 		return 0;
2816 
2817 	if (meta->map_ptr == NULL) {
2818 		verbose(env, "kernel subsystem misconfigured verifier\n");
2819 		return -EINVAL;
2820 	}
2821 
2822 	if (!BPF_MAP_PTR(aux->map_state))
2823 		bpf_map_ptr_store(aux, meta->map_ptr,
2824 				  meta->map_ptr->unpriv_array);
2825 	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2826 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2827 				  meta->map_ptr->unpriv_array);
2828 	return 0;
2829 }
2830 
2831 static int check_reference_leak(struct bpf_verifier_env *env)
2832 {
2833 	struct bpf_func_state *state = cur_func(env);
2834 	int i;
2835 
2836 	for (i = 0; i < state->acquired_refs; i++) {
2837 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
2838 			state->refs[i].id, state->refs[i].insn_idx);
2839 	}
2840 	return state->acquired_refs ? -EINVAL : 0;
2841 }
2842 
2843 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2844 {
2845 	const struct bpf_func_proto *fn = NULL;
2846 	struct bpf_reg_state *regs;
2847 	struct bpf_call_arg_meta meta;
2848 	bool changes_data;
2849 	int i, err;
2850 
2851 	/* find function prototype */
2852 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2853 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2854 			func_id);
2855 		return -EINVAL;
2856 	}
2857 
2858 	if (env->ops->get_func_proto)
2859 		fn = env->ops->get_func_proto(func_id, env->prog);
2860 	if (!fn) {
2861 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2862 			func_id);
2863 		return -EINVAL;
2864 	}
2865 
2866 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2867 	if (!env->prog->gpl_compatible && fn->gpl_only) {
2868 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
2869 		return -EINVAL;
2870 	}
2871 
2872 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
2873 	changes_data = bpf_helper_changes_pkt_data(fn->func);
2874 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2875 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2876 			func_id_name(func_id), func_id);
2877 		return -EINVAL;
2878 	}
2879 
2880 	memset(&meta, 0, sizeof(meta));
2881 	meta.pkt_access = fn->pkt_access;
2882 
2883 	err = check_func_proto(fn);
2884 	if (err) {
2885 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2886 			func_id_name(func_id), func_id);
2887 		return err;
2888 	}
2889 
2890 	/* check args */
2891 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2892 	if (err)
2893 		return err;
2894 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2895 	if (err)
2896 		return err;
2897 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2898 	if (err)
2899 		return err;
2900 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2901 	if (err)
2902 		return err;
2903 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2904 	if (err)
2905 		return err;
2906 
2907 	err = record_func_map(env, &meta, func_id, insn_idx);
2908 	if (err)
2909 		return err;
2910 
2911 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
2912 	 * is inferred from register state.
2913 	 */
2914 	for (i = 0; i < meta.access_size; i++) {
2915 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2916 				       BPF_WRITE, -1, false);
2917 		if (err)
2918 			return err;
2919 	}
2920 
2921 	if (func_id == BPF_FUNC_tail_call) {
2922 		err = check_reference_leak(env);
2923 		if (err) {
2924 			verbose(env, "tail_call would lead to reference leak\n");
2925 			return err;
2926 		}
2927 	} else if (is_release_function(func_id)) {
2928 		err = release_reference(env, &meta);
2929 		if (err)
2930 			return err;
2931 	}
2932 
2933 	regs = cur_regs(env);
2934 
2935 	/* check that flags argument in get_local_storage(map, flags) is 0,
2936 	 * this is required because get_local_storage() can't return an error.
2937 	 */
2938 	if (func_id == BPF_FUNC_get_local_storage &&
2939 	    !register_is_null(&regs[BPF_REG_2])) {
2940 		verbose(env, "get_local_storage() doesn't support non-zero flags\n");
2941 		return -EINVAL;
2942 	}
2943 
2944 	/* reset caller saved regs */
2945 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2946 		mark_reg_not_init(env, regs, caller_saved[i]);
2947 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2948 	}
2949 
2950 	/* update return register (already marked as written above) */
2951 	if (fn->ret_type == RET_INTEGER) {
2952 		/* sets type to SCALAR_VALUE */
2953 		mark_reg_unknown(env, regs, BPF_REG_0);
2954 	} else if (fn->ret_type == RET_VOID) {
2955 		regs[BPF_REG_0].type = NOT_INIT;
2956 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2957 		   fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2958 		/* There is no offset yet applied, variable or fixed */
2959 		mark_reg_known_zero(env, regs, BPF_REG_0);
2960 		/* remember map_ptr, so that check_map_access()
2961 		 * can check 'value_size' boundary of memory access
2962 		 * to map element returned from bpf_map_lookup_elem()
2963 		 */
2964 		if (meta.map_ptr == NULL) {
2965 			verbose(env,
2966 				"kernel subsystem misconfigured verifier\n");
2967 			return -EINVAL;
2968 		}
2969 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
2970 		if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2971 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2972 		} else {
2973 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2974 			regs[BPF_REG_0].id = ++env->id_gen;
2975 		}
2976 	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
2977 		int id = acquire_reference_state(env, insn_idx);
2978 		if (id < 0)
2979 			return id;
2980 		mark_reg_known_zero(env, regs, BPF_REG_0);
2981 		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
2982 		regs[BPF_REG_0].id = id;
2983 	} else {
2984 		verbose(env, "unknown return type %d of func %s#%d\n",
2985 			fn->ret_type, func_id_name(func_id), func_id);
2986 		return -EINVAL;
2987 	}
2988 
2989 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
2990 
2991 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2992 	if (err)
2993 		return err;
2994 
2995 	if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
2996 		const char *err_str;
2997 
2998 #ifdef CONFIG_PERF_EVENTS
2999 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
3000 		err_str = "cannot get callchain buffer for func %s#%d\n";
3001 #else
3002 		err = -ENOTSUPP;
3003 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
3004 #endif
3005 		if (err) {
3006 			verbose(env, err_str, func_id_name(func_id), func_id);
3007 			return err;
3008 		}
3009 
3010 		env->prog->has_callchain_buf = true;
3011 	}
3012 
3013 	if (changes_data)
3014 		clear_all_pkt_pointers(env);
3015 	return 0;
3016 }
3017 
3018 static bool signed_add_overflows(s64 a, s64 b)
3019 {
3020 	/* Do the add in u64, where overflow is well-defined */
3021 	s64 res = (s64)((u64)a + (u64)b);
3022 
3023 	if (b < 0)
3024 		return res > a;
3025 	return res < a;
3026 }
3027 
3028 static bool signed_sub_overflows(s64 a, s64 b)
3029 {
3030 	/* Do the sub in u64, where overflow is well-defined */
3031 	s64 res = (s64)((u64)a - (u64)b);
3032 
3033 	if (b < 0)
3034 		return res < a;
3035 	return res > a;
3036 }
3037 
3038 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3039 				  const struct bpf_reg_state *reg,
3040 				  enum bpf_reg_type type)
3041 {
3042 	bool known = tnum_is_const(reg->var_off);
3043 	s64 val = reg->var_off.value;
3044 	s64 smin = reg->smin_value;
3045 
3046 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
3047 		verbose(env, "math between %s pointer and %lld is not allowed\n",
3048 			reg_type_str[type], val);
3049 		return false;
3050 	}
3051 
3052 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
3053 		verbose(env, "%s pointer offset %d is not allowed\n",
3054 			reg_type_str[type], reg->off);
3055 		return false;
3056 	}
3057 
3058 	if (smin == S64_MIN) {
3059 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
3060 			reg_type_str[type]);
3061 		return false;
3062 	}
3063 
3064 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
3065 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
3066 			smin, reg_type_str[type]);
3067 		return false;
3068 	}
3069 
3070 	return true;
3071 }
3072 
3073 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3074 {
3075 	return &env->insn_aux_data[env->insn_idx];
3076 }
3077 
3078 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3079 			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
3080 {
3081 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
3082 			    (opcode == BPF_SUB && !off_is_neg);
3083 	u32 off;
3084 
3085 	switch (ptr_reg->type) {
3086 	case PTR_TO_STACK:
3087 		off = ptr_reg->off + ptr_reg->var_off.value;
3088 		if (mask_to_left)
3089 			*ptr_limit = MAX_BPF_STACK + off;
3090 		else
3091 			*ptr_limit = -off;
3092 		return 0;
3093 	case PTR_TO_MAP_VALUE:
3094 		if (mask_to_left) {
3095 			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3096 		} else {
3097 			off = ptr_reg->smin_value + ptr_reg->off;
3098 			*ptr_limit = ptr_reg->map_ptr->value_size - off;
3099 		}
3100 		return 0;
3101 	default:
3102 		return -EINVAL;
3103 	}
3104 }
3105 
3106 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3107 			    struct bpf_insn *insn,
3108 			    const struct bpf_reg_state *ptr_reg,
3109 			    struct bpf_reg_state *dst_reg,
3110 			    bool off_is_neg)
3111 {
3112 	struct bpf_verifier_state *vstate = env->cur_state;
3113 	struct bpf_insn_aux_data *aux = cur_aux(env);
3114 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
3115 	u8 opcode = BPF_OP(insn->code);
3116 	u32 alu_state, alu_limit;
3117 	struct bpf_reg_state tmp;
3118 	bool ret;
3119 
3120 	if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
3121 		return 0;
3122 
3123 	/* We already marked aux for masking from non-speculative
3124 	 * paths, thus we got here in the first place. We only care
3125 	 * to explore bad access from here.
3126 	 */
3127 	if (vstate->speculative)
3128 		goto do_sim;
3129 
3130 	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3131 	alu_state |= ptr_is_dst_reg ?
3132 		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3133 
3134 	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3135 		return 0;
3136 
3137 	/* If we arrived here from different branches with different
3138 	 * limits to sanitize, then this won't work.
3139 	 */
3140 	if (aux->alu_state &&
3141 	    (aux->alu_state != alu_state ||
3142 	     aux->alu_limit != alu_limit))
3143 		return -EACCES;
3144 
3145 	/* Corresponding fixup done in fixup_bpf_calls(). */
3146 	aux->alu_state = alu_state;
3147 	aux->alu_limit = alu_limit;
3148 
3149 do_sim:
3150 	/* Simulate and find potential out-of-bounds access under
3151 	 * speculative execution from truncation as a result of
3152 	 * masking when off was not within expected range. If off
3153 	 * sits in dst, then we temporarily need to move ptr there
3154 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
3155 	 * for cases where we use K-based arithmetic in one direction
3156 	 * and truncated reg-based in the other in order to explore
3157 	 * bad access.
3158 	 */
3159 	if (!ptr_is_dst_reg) {
3160 		tmp = *dst_reg;
3161 		*dst_reg = *ptr_reg;
3162 	}
3163 	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3164 	if (!ptr_is_dst_reg)
3165 		*dst_reg = tmp;
3166 	return !ret ? -EFAULT : 0;
3167 }
3168 
3169 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3170  * Caller should also handle BPF_MOV case separately.
3171  * If we return -EACCES, caller may want to try again treating pointer as a
3172  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
3173  */
3174 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3175 				   struct bpf_insn *insn,
3176 				   const struct bpf_reg_state *ptr_reg,
3177 				   const struct bpf_reg_state *off_reg)
3178 {
3179 	struct bpf_verifier_state *vstate = env->cur_state;
3180 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3181 	struct bpf_reg_state *regs = state->regs, *dst_reg;
3182 	bool known = tnum_is_const(off_reg->var_off);
3183 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
3184 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3185 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3186 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3187 	u32 dst = insn->dst_reg, src = insn->src_reg;
3188 	u8 opcode = BPF_OP(insn->code);
3189 	int ret;
3190 
3191 	dst_reg = &regs[dst];
3192 
3193 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
3194 	    smin_val > smax_val || umin_val > umax_val) {
3195 		/* Taint dst register if offset had invalid bounds derived from
3196 		 * e.g. dead branches.
3197 		 */
3198 		__mark_reg_unknown(dst_reg);
3199 		return 0;
3200 	}
3201 
3202 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3203 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
3204 		verbose(env,
3205 			"R%d 32-bit pointer arithmetic prohibited\n",
3206 			dst);
3207 		return -EACCES;
3208 	}
3209 
3210 	switch (ptr_reg->type) {
3211 	case PTR_TO_MAP_VALUE_OR_NULL:
3212 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
3213 			dst, reg_type_str[ptr_reg->type]);
3214 		return -EACCES;
3215 	case CONST_PTR_TO_MAP:
3216 	case PTR_TO_PACKET_END:
3217 	case PTR_TO_SOCKET:
3218 	case PTR_TO_SOCKET_OR_NULL:
3219 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3220 			dst, reg_type_str[ptr_reg->type]);
3221 		return -EACCES;
3222 	case PTR_TO_MAP_VALUE:
3223 		if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3224 			verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3225 				off_reg == dst_reg ? dst : src);
3226 			return -EACCES;
3227 		}
3228 		/* fall-through */
3229 	default:
3230 		break;
3231 	}
3232 
3233 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
3234 	 * The id may be overwritten later if we create a new variable offset.
3235 	 */
3236 	dst_reg->type = ptr_reg->type;
3237 	dst_reg->id = ptr_reg->id;
3238 
3239 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
3240 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
3241 		return -EINVAL;
3242 
3243 	switch (opcode) {
3244 	case BPF_ADD:
3245 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3246 		if (ret < 0) {
3247 			verbose(env, "R%d tried to add from different maps or paths\n", dst);
3248 			return ret;
3249 		}
3250 		/* We can take a fixed offset as long as it doesn't overflow
3251 		 * the s32 'off' field
3252 		 */
3253 		if (known && (ptr_reg->off + smin_val ==
3254 			      (s64)(s32)(ptr_reg->off + smin_val))) {
3255 			/* pointer += K.  Accumulate it into fixed offset */
3256 			dst_reg->smin_value = smin_ptr;
3257 			dst_reg->smax_value = smax_ptr;
3258 			dst_reg->umin_value = umin_ptr;
3259 			dst_reg->umax_value = umax_ptr;
3260 			dst_reg->var_off = ptr_reg->var_off;
3261 			dst_reg->off = ptr_reg->off + smin_val;
3262 			dst_reg->raw = ptr_reg->raw;
3263 			break;
3264 		}
3265 		/* A new variable offset is created.  Note that off_reg->off
3266 		 * == 0, since it's a scalar.
3267 		 * dst_reg gets the pointer type and since some positive
3268 		 * integer value was added to the pointer, give it a new 'id'
3269 		 * if it's a PTR_TO_PACKET.
3270 		 * this creates a new 'base' pointer, off_reg (variable) gets
3271 		 * added into the variable offset, and we copy the fixed offset
3272 		 * from ptr_reg.
3273 		 */
3274 		if (signed_add_overflows(smin_ptr, smin_val) ||
3275 		    signed_add_overflows(smax_ptr, smax_val)) {
3276 			dst_reg->smin_value = S64_MIN;
3277 			dst_reg->smax_value = S64_MAX;
3278 		} else {
3279 			dst_reg->smin_value = smin_ptr + smin_val;
3280 			dst_reg->smax_value = smax_ptr + smax_val;
3281 		}
3282 		if (umin_ptr + umin_val < umin_ptr ||
3283 		    umax_ptr + umax_val < umax_ptr) {
3284 			dst_reg->umin_value = 0;
3285 			dst_reg->umax_value = U64_MAX;
3286 		} else {
3287 			dst_reg->umin_value = umin_ptr + umin_val;
3288 			dst_reg->umax_value = umax_ptr + umax_val;
3289 		}
3290 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
3291 		dst_reg->off = ptr_reg->off;
3292 		dst_reg->raw = ptr_reg->raw;
3293 		if (reg_is_pkt_pointer(ptr_reg)) {
3294 			dst_reg->id = ++env->id_gen;
3295 			/* something was added to pkt_ptr, set range to zero */
3296 			dst_reg->raw = 0;
3297 		}
3298 		break;
3299 	case BPF_SUB:
3300 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3301 		if (ret < 0) {
3302 			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3303 			return ret;
3304 		}
3305 		if (dst_reg == off_reg) {
3306 			/* scalar -= pointer.  Creates an unknown scalar */
3307 			verbose(env, "R%d tried to subtract pointer from scalar\n",
3308 				dst);
3309 			return -EACCES;
3310 		}
3311 		/* We don't allow subtraction from FP, because (according to
3312 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
3313 		 * be able to deal with it.
3314 		 */
3315 		if (ptr_reg->type == PTR_TO_STACK) {
3316 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
3317 				dst);
3318 			return -EACCES;
3319 		}
3320 		if (known && (ptr_reg->off - smin_val ==
3321 			      (s64)(s32)(ptr_reg->off - smin_val))) {
3322 			/* pointer -= K.  Subtract it from fixed offset */
3323 			dst_reg->smin_value = smin_ptr;
3324 			dst_reg->smax_value = smax_ptr;
3325 			dst_reg->umin_value = umin_ptr;
3326 			dst_reg->umax_value = umax_ptr;
3327 			dst_reg->var_off = ptr_reg->var_off;
3328 			dst_reg->id = ptr_reg->id;
3329 			dst_reg->off = ptr_reg->off - smin_val;
3330 			dst_reg->raw = ptr_reg->raw;
3331 			break;
3332 		}
3333 		/* A new variable offset is created.  If the subtrahend is known
3334 		 * nonnegative, then any reg->range we had before is still good.
3335 		 */
3336 		if (signed_sub_overflows(smin_ptr, smax_val) ||
3337 		    signed_sub_overflows(smax_ptr, smin_val)) {
3338 			/* Overflow possible, we know nothing */
3339 			dst_reg->smin_value = S64_MIN;
3340 			dst_reg->smax_value = S64_MAX;
3341 		} else {
3342 			dst_reg->smin_value = smin_ptr - smax_val;
3343 			dst_reg->smax_value = smax_ptr - smin_val;
3344 		}
3345 		if (umin_ptr < umax_val) {
3346 			/* Overflow possible, we know nothing */
3347 			dst_reg->umin_value = 0;
3348 			dst_reg->umax_value = U64_MAX;
3349 		} else {
3350 			/* Cannot overflow (as long as bounds are consistent) */
3351 			dst_reg->umin_value = umin_ptr - umax_val;
3352 			dst_reg->umax_value = umax_ptr - umin_val;
3353 		}
3354 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
3355 		dst_reg->off = ptr_reg->off;
3356 		dst_reg->raw = ptr_reg->raw;
3357 		if (reg_is_pkt_pointer(ptr_reg)) {
3358 			dst_reg->id = ++env->id_gen;
3359 			/* something was added to pkt_ptr, set range to zero */
3360 			if (smin_val < 0)
3361 				dst_reg->raw = 0;
3362 		}
3363 		break;
3364 	case BPF_AND:
3365 	case BPF_OR:
3366 	case BPF_XOR:
3367 		/* bitwise ops on pointers are troublesome, prohibit. */
3368 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
3369 			dst, bpf_alu_string[opcode >> 4]);
3370 		return -EACCES;
3371 	default:
3372 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
3373 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
3374 			dst, bpf_alu_string[opcode >> 4]);
3375 		return -EACCES;
3376 	}
3377 
3378 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
3379 		return -EINVAL;
3380 
3381 	__update_reg_bounds(dst_reg);
3382 	__reg_deduce_bounds(dst_reg);
3383 	__reg_bound_offset(dst_reg);
3384 
3385 	/* For unprivileged we require that resulting offset must be in bounds
3386 	 * in order to be able to sanitize access later on.
3387 	 */
3388 	if (!env->allow_ptr_leaks) {
3389 		if (dst_reg->type == PTR_TO_MAP_VALUE &&
3390 		    check_map_access(env, dst, dst_reg->off, 1, false)) {
3391 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3392 				"prohibited for !root\n", dst);
3393 			return -EACCES;
3394 		} else if (dst_reg->type == PTR_TO_STACK &&
3395 			   check_stack_access(env, dst_reg, dst_reg->off +
3396 					      dst_reg->var_off.value, 1)) {
3397 			verbose(env, "R%d stack pointer arithmetic goes out of range, "
3398 				"prohibited for !root\n", dst);
3399 			return -EACCES;
3400 		}
3401 	}
3402 
3403 	return 0;
3404 }
3405 
3406 /* WARNING: This function does calculations on 64-bit values, but the actual
3407  * execution may occur on 32-bit values. Therefore, things like bitshifts
3408  * need extra checks in the 32-bit case.
3409  */
3410 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3411 				      struct bpf_insn *insn,
3412 				      struct bpf_reg_state *dst_reg,
3413 				      struct bpf_reg_state src_reg)
3414 {
3415 	struct bpf_reg_state *regs = cur_regs(env);
3416 	u8 opcode = BPF_OP(insn->code);
3417 	bool src_known, dst_known;
3418 	s64 smin_val, smax_val;
3419 	u64 umin_val, umax_val;
3420 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3421 
3422 	if (insn_bitness == 32) {
3423 		/* Relevant for 32-bit RSH: Information can propagate towards
3424 		 * LSB, so it isn't sufficient to only truncate the output to
3425 		 * 32 bits.
3426 		 */
3427 		coerce_reg_to_size(dst_reg, 4);
3428 		coerce_reg_to_size(&src_reg, 4);
3429 	}
3430 
3431 	smin_val = src_reg.smin_value;
3432 	smax_val = src_reg.smax_value;
3433 	umin_val = src_reg.umin_value;
3434 	umax_val = src_reg.umax_value;
3435 	src_known = tnum_is_const(src_reg.var_off);
3436 	dst_known = tnum_is_const(dst_reg->var_off);
3437 
3438 	if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
3439 	    smin_val > smax_val || umin_val > umax_val) {
3440 		/* Taint dst register if offset had invalid bounds derived from
3441 		 * e.g. dead branches.
3442 		 */
3443 		__mark_reg_unknown(dst_reg);
3444 		return 0;
3445 	}
3446 
3447 	if (!src_known &&
3448 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
3449 		__mark_reg_unknown(dst_reg);
3450 		return 0;
3451 	}
3452 
3453 	switch (opcode) {
3454 	case BPF_ADD:
3455 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3456 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
3457 			dst_reg->smin_value = S64_MIN;
3458 			dst_reg->smax_value = S64_MAX;
3459 		} else {
3460 			dst_reg->smin_value += smin_val;
3461 			dst_reg->smax_value += smax_val;
3462 		}
3463 		if (dst_reg->umin_value + umin_val < umin_val ||
3464 		    dst_reg->umax_value + umax_val < umax_val) {
3465 			dst_reg->umin_value = 0;
3466 			dst_reg->umax_value = U64_MAX;
3467 		} else {
3468 			dst_reg->umin_value += umin_val;
3469 			dst_reg->umax_value += umax_val;
3470 		}
3471 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3472 		break;
3473 	case BPF_SUB:
3474 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3475 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3476 			/* Overflow possible, we know nothing */
3477 			dst_reg->smin_value = S64_MIN;
3478 			dst_reg->smax_value = S64_MAX;
3479 		} else {
3480 			dst_reg->smin_value -= smax_val;
3481 			dst_reg->smax_value -= smin_val;
3482 		}
3483 		if (dst_reg->umin_value < umax_val) {
3484 			/* Overflow possible, we know nothing */
3485 			dst_reg->umin_value = 0;
3486 			dst_reg->umax_value = U64_MAX;
3487 		} else {
3488 			/* Cannot overflow (as long as bounds are consistent) */
3489 			dst_reg->umin_value -= umax_val;
3490 			dst_reg->umax_value -= umin_val;
3491 		}
3492 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
3493 		break;
3494 	case BPF_MUL:
3495 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
3496 		if (smin_val < 0 || dst_reg->smin_value < 0) {
3497 			/* Ain't nobody got time to multiply that sign */
3498 			__mark_reg_unbounded(dst_reg);
3499 			__update_reg_bounds(dst_reg);
3500 			break;
3501 		}
3502 		/* Both values are positive, so we can work with unsigned and
3503 		 * copy the result to signed (unless it exceeds S64_MAX).
3504 		 */
3505 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
3506 			/* Potential overflow, we know nothing */
3507 			__mark_reg_unbounded(dst_reg);
3508 			/* (except what we can learn from the var_off) */
3509 			__update_reg_bounds(dst_reg);
3510 			break;
3511 		}
3512 		dst_reg->umin_value *= umin_val;
3513 		dst_reg->umax_value *= umax_val;
3514 		if (dst_reg->umax_value > S64_MAX) {
3515 			/* Overflow possible, we know nothing */
3516 			dst_reg->smin_value = S64_MIN;
3517 			dst_reg->smax_value = S64_MAX;
3518 		} else {
3519 			dst_reg->smin_value = dst_reg->umin_value;
3520 			dst_reg->smax_value = dst_reg->umax_value;
3521 		}
3522 		break;
3523 	case BPF_AND:
3524 		if (src_known && dst_known) {
3525 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
3526 						  src_reg.var_off.value);
3527 			break;
3528 		}
3529 		/* We get our minimum from the var_off, since that's inherently
3530 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
3531 		 */
3532 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
3533 		dst_reg->umin_value = dst_reg->var_off.value;
3534 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
3535 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3536 			/* Lose signed bounds when ANDing negative numbers,
3537 			 * ain't nobody got time for that.
3538 			 */
3539 			dst_reg->smin_value = S64_MIN;
3540 			dst_reg->smax_value = S64_MAX;
3541 		} else {
3542 			/* ANDing two positives gives a positive, so safe to
3543 			 * cast result into s64.
3544 			 */
3545 			dst_reg->smin_value = dst_reg->umin_value;
3546 			dst_reg->smax_value = dst_reg->umax_value;
3547 		}
3548 		/* We may learn something more from the var_off */
3549 		__update_reg_bounds(dst_reg);
3550 		break;
3551 	case BPF_OR:
3552 		if (src_known && dst_known) {
3553 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
3554 						  src_reg.var_off.value);
3555 			break;
3556 		}
3557 		/* We get our maximum from the var_off, and our minimum is the
3558 		 * maximum of the operands' minima
3559 		 */
3560 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
3561 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
3562 		dst_reg->umax_value = dst_reg->var_off.value |
3563 				      dst_reg->var_off.mask;
3564 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3565 			/* Lose signed bounds when ORing negative numbers,
3566 			 * ain't nobody got time for that.
3567 			 */
3568 			dst_reg->smin_value = S64_MIN;
3569 			dst_reg->smax_value = S64_MAX;
3570 		} else {
3571 			/* ORing two positives gives a positive, so safe to
3572 			 * cast result into s64.
3573 			 */
3574 			dst_reg->smin_value = dst_reg->umin_value;
3575 			dst_reg->smax_value = dst_reg->umax_value;
3576 		}
3577 		/* We may learn something more from the var_off */
3578 		__update_reg_bounds(dst_reg);
3579 		break;
3580 	case BPF_LSH:
3581 		if (umax_val >= insn_bitness) {
3582 			/* Shifts greater than 31 or 63 are undefined.
3583 			 * This includes shifts by a negative number.
3584 			 */
3585 			mark_reg_unknown(env, regs, insn->dst_reg);
3586 			break;
3587 		}
3588 		/* We lose all sign bit information (except what we can pick
3589 		 * up from var_off)
3590 		 */
3591 		dst_reg->smin_value = S64_MIN;
3592 		dst_reg->smax_value = S64_MAX;
3593 		/* If we might shift our top bit out, then we know nothing */
3594 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
3595 			dst_reg->umin_value = 0;
3596 			dst_reg->umax_value = U64_MAX;
3597 		} else {
3598 			dst_reg->umin_value <<= umin_val;
3599 			dst_reg->umax_value <<= umax_val;
3600 		}
3601 		dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
3602 		/* We may learn something more from the var_off */
3603 		__update_reg_bounds(dst_reg);
3604 		break;
3605 	case BPF_RSH:
3606 		if (umax_val >= insn_bitness) {
3607 			/* Shifts greater than 31 or 63 are undefined.
3608 			 * This includes shifts by a negative number.
3609 			 */
3610 			mark_reg_unknown(env, regs, insn->dst_reg);
3611 			break;
3612 		}
3613 		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
3614 		 * be negative, then either:
3615 		 * 1) src_reg might be zero, so the sign bit of the result is
3616 		 *    unknown, so we lose our signed bounds
3617 		 * 2) it's known negative, thus the unsigned bounds capture the
3618 		 *    signed bounds
3619 		 * 3) the signed bounds cross zero, so they tell us nothing
3620 		 *    about the result
3621 		 * If the value in dst_reg is known nonnegative, then again the
3622 		 * unsigned bounts capture the signed bounds.
3623 		 * Thus, in all cases it suffices to blow away our signed bounds
3624 		 * and rely on inferring new ones from the unsigned bounds and
3625 		 * var_off of the result.
3626 		 */
3627 		dst_reg->smin_value = S64_MIN;
3628 		dst_reg->smax_value = S64_MAX;
3629 		dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
3630 		dst_reg->umin_value >>= umax_val;
3631 		dst_reg->umax_value >>= umin_val;
3632 		/* We may learn something more from the var_off */
3633 		__update_reg_bounds(dst_reg);
3634 		break;
3635 	case BPF_ARSH:
3636 		if (umax_val >= insn_bitness) {
3637 			/* Shifts greater than 31 or 63 are undefined.
3638 			 * This includes shifts by a negative number.
3639 			 */
3640 			mark_reg_unknown(env, regs, insn->dst_reg);
3641 			break;
3642 		}
3643 
3644 		/* Upon reaching here, src_known is true and
3645 		 * umax_val is equal to umin_val.
3646 		 */
3647 		dst_reg->smin_value >>= umin_val;
3648 		dst_reg->smax_value >>= umin_val;
3649 		dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
3650 
3651 		/* blow away the dst_reg umin_value/umax_value and rely on
3652 		 * dst_reg var_off to refine the result.
3653 		 */
3654 		dst_reg->umin_value = 0;
3655 		dst_reg->umax_value = U64_MAX;
3656 		__update_reg_bounds(dst_reg);
3657 		break;
3658 	default:
3659 		mark_reg_unknown(env, regs, insn->dst_reg);
3660 		break;
3661 	}
3662 
3663 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3664 		/* 32-bit ALU ops are (32,32)->32 */
3665 		coerce_reg_to_size(dst_reg, 4);
3666 	}
3667 
3668 	__reg_deduce_bounds(dst_reg);
3669 	__reg_bound_offset(dst_reg);
3670 	return 0;
3671 }
3672 
3673 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3674  * and var_off.
3675  */
3676 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3677 				   struct bpf_insn *insn)
3678 {
3679 	struct bpf_verifier_state *vstate = env->cur_state;
3680 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3681 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3682 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3683 	u8 opcode = BPF_OP(insn->code);
3684 
3685 	dst_reg = &regs[insn->dst_reg];
3686 	src_reg = NULL;
3687 	if (dst_reg->type != SCALAR_VALUE)
3688 		ptr_reg = dst_reg;
3689 	if (BPF_SRC(insn->code) == BPF_X) {
3690 		src_reg = &regs[insn->src_reg];
3691 		if (src_reg->type != SCALAR_VALUE) {
3692 			if (dst_reg->type != SCALAR_VALUE) {
3693 				/* Combining two pointers by any ALU op yields
3694 				 * an arbitrary scalar. Disallow all math except
3695 				 * pointer subtraction
3696 				 */
3697 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
3698 					mark_reg_unknown(env, regs, insn->dst_reg);
3699 					return 0;
3700 				}
3701 				verbose(env, "R%d pointer %s pointer prohibited\n",
3702 					insn->dst_reg,
3703 					bpf_alu_string[opcode >> 4]);
3704 				return -EACCES;
3705 			} else {
3706 				/* scalar += pointer
3707 				 * This is legal, but we have to reverse our
3708 				 * src/dest handling in computing the range
3709 				 */
3710 				return adjust_ptr_min_max_vals(env, insn,
3711 							       src_reg, dst_reg);
3712 			}
3713 		} else if (ptr_reg) {
3714 			/* pointer += scalar */
3715 			return adjust_ptr_min_max_vals(env, insn,
3716 						       dst_reg, src_reg);
3717 		}
3718 	} else {
3719 		/* Pretend the src is a reg with a known value, since we only
3720 		 * need to be able to read from this state.
3721 		 */
3722 		off_reg.type = SCALAR_VALUE;
3723 		__mark_reg_known(&off_reg, insn->imm);
3724 		src_reg = &off_reg;
3725 		if (ptr_reg) /* pointer += K */
3726 			return adjust_ptr_min_max_vals(env, insn,
3727 						       ptr_reg, src_reg);
3728 	}
3729 
3730 	/* Got here implies adding two SCALAR_VALUEs */
3731 	if (WARN_ON_ONCE(ptr_reg)) {
3732 		print_verifier_state(env, state);
3733 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
3734 		return -EINVAL;
3735 	}
3736 	if (WARN_ON(!src_reg)) {
3737 		print_verifier_state(env, state);
3738 		verbose(env, "verifier internal error: no src_reg\n");
3739 		return -EINVAL;
3740 	}
3741 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3742 }
3743 
3744 /* check validity of 32-bit and 64-bit arithmetic operations */
3745 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3746 {
3747 	struct bpf_reg_state *regs = cur_regs(env);
3748 	u8 opcode = BPF_OP(insn->code);
3749 	int err;
3750 
3751 	if (opcode == BPF_END || opcode == BPF_NEG) {
3752 		if (opcode == BPF_NEG) {
3753 			if (BPF_SRC(insn->code) != 0 ||
3754 			    insn->src_reg != BPF_REG_0 ||
3755 			    insn->off != 0 || insn->imm != 0) {
3756 				verbose(env, "BPF_NEG uses reserved fields\n");
3757 				return -EINVAL;
3758 			}
3759 		} else {
3760 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3761 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3762 			    BPF_CLASS(insn->code) == BPF_ALU64) {
3763 				verbose(env, "BPF_END uses reserved fields\n");
3764 				return -EINVAL;
3765 			}
3766 		}
3767 
3768 		/* check src operand */
3769 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3770 		if (err)
3771 			return err;
3772 
3773 		if (is_pointer_value(env, insn->dst_reg)) {
3774 			verbose(env, "R%d pointer arithmetic prohibited\n",
3775 				insn->dst_reg);
3776 			return -EACCES;
3777 		}
3778 
3779 		/* check dest operand */
3780 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
3781 		if (err)
3782 			return err;
3783 
3784 	} else if (opcode == BPF_MOV) {
3785 
3786 		if (BPF_SRC(insn->code) == BPF_X) {
3787 			if (insn->imm != 0 || insn->off != 0) {
3788 				verbose(env, "BPF_MOV uses reserved fields\n");
3789 				return -EINVAL;
3790 			}
3791 
3792 			/* check src operand */
3793 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3794 			if (err)
3795 				return err;
3796 		} else {
3797 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3798 				verbose(env, "BPF_MOV uses reserved fields\n");
3799 				return -EINVAL;
3800 			}
3801 		}
3802 
3803 		/* check dest operand, mark as required later */
3804 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3805 		if (err)
3806 			return err;
3807 
3808 		if (BPF_SRC(insn->code) == BPF_X) {
3809 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
3810 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
3811 
3812 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3813 				/* case: R1 = R2
3814 				 * copy register state to dest reg
3815 				 */
3816 				*dst_reg = *src_reg;
3817 				dst_reg->live |= REG_LIVE_WRITTEN;
3818 			} else {
3819 				/* R1 = (u32) R2 */
3820 				if (is_pointer_value(env, insn->src_reg)) {
3821 					verbose(env,
3822 						"R%d partial copy of pointer\n",
3823 						insn->src_reg);
3824 					return -EACCES;
3825 				} else if (src_reg->type == SCALAR_VALUE) {
3826 					*dst_reg = *src_reg;
3827 					dst_reg->live |= REG_LIVE_WRITTEN;
3828 				} else {
3829 					mark_reg_unknown(env, regs,
3830 							 insn->dst_reg);
3831 				}
3832 				coerce_reg_to_size(dst_reg, 4);
3833 			}
3834 		} else {
3835 			/* case: R = imm
3836 			 * remember the value we stored into this reg
3837 			 */
3838 			/* clear any state __mark_reg_known doesn't set */
3839 			mark_reg_unknown(env, regs, insn->dst_reg);
3840 			regs[insn->dst_reg].type = SCALAR_VALUE;
3841 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3842 				__mark_reg_known(regs + insn->dst_reg,
3843 						 insn->imm);
3844 			} else {
3845 				__mark_reg_known(regs + insn->dst_reg,
3846 						 (u32)insn->imm);
3847 			}
3848 		}
3849 
3850 	} else if (opcode > BPF_END) {
3851 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3852 		return -EINVAL;
3853 
3854 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
3855 
3856 		if (BPF_SRC(insn->code) == BPF_X) {
3857 			if (insn->imm != 0 || insn->off != 0) {
3858 				verbose(env, "BPF_ALU uses reserved fields\n");
3859 				return -EINVAL;
3860 			}
3861 			/* check src1 operand */
3862 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3863 			if (err)
3864 				return err;
3865 		} else {
3866 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3867 				verbose(env, "BPF_ALU uses reserved fields\n");
3868 				return -EINVAL;
3869 			}
3870 		}
3871 
3872 		/* check src2 operand */
3873 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3874 		if (err)
3875 			return err;
3876 
3877 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3878 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3879 			verbose(env, "div by zero\n");
3880 			return -EINVAL;
3881 		}
3882 
3883 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3884 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3885 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3886 
3887 			if (insn->imm < 0 || insn->imm >= size) {
3888 				verbose(env, "invalid shift %d\n", insn->imm);
3889 				return -EINVAL;
3890 			}
3891 		}
3892 
3893 		/* check dest operand */
3894 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3895 		if (err)
3896 			return err;
3897 
3898 		return adjust_reg_min_max_vals(env, insn);
3899 	}
3900 
3901 	return 0;
3902 }
3903 
3904 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3905 				   struct bpf_reg_state *dst_reg,
3906 				   enum bpf_reg_type type,
3907 				   bool range_right_open)
3908 {
3909 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3910 	struct bpf_reg_state *regs = state->regs, *reg;
3911 	u16 new_range;
3912 	int i, j;
3913 
3914 	if (dst_reg->off < 0 ||
3915 	    (dst_reg->off == 0 && range_right_open))
3916 		/* This doesn't give us any range */
3917 		return;
3918 
3919 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
3920 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3921 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
3922 		 * than pkt_end, but that's because it's also less than pkt.
3923 		 */
3924 		return;
3925 
3926 	new_range = dst_reg->off;
3927 	if (range_right_open)
3928 		new_range--;
3929 
3930 	/* Examples for register markings:
3931 	 *
3932 	 * pkt_data in dst register:
3933 	 *
3934 	 *   r2 = r3;
3935 	 *   r2 += 8;
3936 	 *   if (r2 > pkt_end) goto <handle exception>
3937 	 *   <access okay>
3938 	 *
3939 	 *   r2 = r3;
3940 	 *   r2 += 8;
3941 	 *   if (r2 < pkt_end) goto <access okay>
3942 	 *   <handle exception>
3943 	 *
3944 	 *   Where:
3945 	 *     r2 == dst_reg, pkt_end == src_reg
3946 	 *     r2=pkt(id=n,off=8,r=0)
3947 	 *     r3=pkt(id=n,off=0,r=0)
3948 	 *
3949 	 * pkt_data in src register:
3950 	 *
3951 	 *   r2 = r3;
3952 	 *   r2 += 8;
3953 	 *   if (pkt_end >= r2) goto <access okay>
3954 	 *   <handle exception>
3955 	 *
3956 	 *   r2 = r3;
3957 	 *   r2 += 8;
3958 	 *   if (pkt_end <= r2) goto <handle exception>
3959 	 *   <access okay>
3960 	 *
3961 	 *   Where:
3962 	 *     pkt_end == dst_reg, r2 == src_reg
3963 	 *     r2=pkt(id=n,off=8,r=0)
3964 	 *     r3=pkt(id=n,off=0,r=0)
3965 	 *
3966 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
3967 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
3968 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
3969 	 * the check.
3970 	 */
3971 
3972 	/* If our ids match, then we must have the same max_value.  And we
3973 	 * don't care about the other reg's fixed offset, since if it's too big
3974 	 * the range won't allow anything.
3975 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
3976 	 */
3977 	for (i = 0; i < MAX_BPF_REG; i++)
3978 		if (regs[i].type == type && regs[i].id == dst_reg->id)
3979 			/* keep the maximum range already checked */
3980 			regs[i].range = max(regs[i].range, new_range);
3981 
3982 	for (j = 0; j <= vstate->curframe; j++) {
3983 		state = vstate->frame[j];
3984 		bpf_for_each_spilled_reg(i, state, reg) {
3985 			if (!reg)
3986 				continue;
3987 			if (reg->type == type && reg->id == dst_reg->id)
3988 				reg->range = max(reg->range, new_range);
3989 		}
3990 	}
3991 }
3992 
3993 /* compute branch direction of the expression "if (reg opcode val) goto target;"
3994  * and return:
3995  *  1 - branch will be taken and "goto target" will be executed
3996  *  0 - branch will not be taken and fall-through to next insn
3997  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
3998  */
3999 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
4000 {
4001 	if (__is_pointer_value(false, reg))
4002 		return -1;
4003 
4004 	switch (opcode) {
4005 	case BPF_JEQ:
4006 		if (tnum_is_const(reg->var_off))
4007 			return !!tnum_equals_const(reg->var_off, val);
4008 		break;
4009 	case BPF_JNE:
4010 		if (tnum_is_const(reg->var_off))
4011 			return !tnum_equals_const(reg->var_off, val);
4012 		break;
4013 	case BPF_JSET:
4014 		if ((~reg->var_off.mask & reg->var_off.value) & val)
4015 			return 1;
4016 		if (!((reg->var_off.mask | reg->var_off.value) & val))
4017 			return 0;
4018 		break;
4019 	case BPF_JGT:
4020 		if (reg->umin_value > val)
4021 			return 1;
4022 		else if (reg->umax_value <= val)
4023 			return 0;
4024 		break;
4025 	case BPF_JSGT:
4026 		if (reg->smin_value > (s64)val)
4027 			return 1;
4028 		else if (reg->smax_value < (s64)val)
4029 			return 0;
4030 		break;
4031 	case BPF_JLT:
4032 		if (reg->umax_value < val)
4033 			return 1;
4034 		else if (reg->umin_value >= val)
4035 			return 0;
4036 		break;
4037 	case BPF_JSLT:
4038 		if (reg->smax_value < (s64)val)
4039 			return 1;
4040 		else if (reg->smin_value >= (s64)val)
4041 			return 0;
4042 		break;
4043 	case BPF_JGE:
4044 		if (reg->umin_value >= val)
4045 			return 1;
4046 		else if (reg->umax_value < val)
4047 			return 0;
4048 		break;
4049 	case BPF_JSGE:
4050 		if (reg->smin_value >= (s64)val)
4051 			return 1;
4052 		else if (reg->smax_value < (s64)val)
4053 			return 0;
4054 		break;
4055 	case BPF_JLE:
4056 		if (reg->umax_value <= val)
4057 			return 1;
4058 		else if (reg->umin_value > val)
4059 			return 0;
4060 		break;
4061 	case BPF_JSLE:
4062 		if (reg->smax_value <= (s64)val)
4063 			return 1;
4064 		else if (reg->smin_value > (s64)val)
4065 			return 0;
4066 		break;
4067 	}
4068 
4069 	return -1;
4070 }
4071 
4072 /* Adjusts the register min/max values in the case that the dst_reg is the
4073  * variable register that we are working on, and src_reg is a constant or we're
4074  * simply doing a BPF_K check.
4075  * In JEQ/JNE cases we also adjust the var_off values.
4076  */
4077 static void reg_set_min_max(struct bpf_reg_state *true_reg,
4078 			    struct bpf_reg_state *false_reg, u64 val,
4079 			    u8 opcode)
4080 {
4081 	/* If the dst_reg is a pointer, we can't learn anything about its
4082 	 * variable offset from the compare (unless src_reg were a pointer into
4083 	 * the same object, but we don't bother with that.
4084 	 * Since false_reg and true_reg have the same type by construction, we
4085 	 * only need to check one of them for pointerness.
4086 	 */
4087 	if (__is_pointer_value(false, false_reg))
4088 		return;
4089 
4090 	switch (opcode) {
4091 	case BPF_JEQ:
4092 		/* If this is false then we know nothing Jon Snow, but if it is
4093 		 * true then we know for sure.
4094 		 */
4095 		__mark_reg_known(true_reg, val);
4096 		break;
4097 	case BPF_JNE:
4098 		/* If this is true we know nothing Jon Snow, but if it is false
4099 		 * we know the value for sure;
4100 		 */
4101 		__mark_reg_known(false_reg, val);
4102 		break;
4103 	case BPF_JSET:
4104 		false_reg->var_off = tnum_and(false_reg->var_off,
4105 					      tnum_const(~val));
4106 		if (is_power_of_2(val))
4107 			true_reg->var_off = tnum_or(true_reg->var_off,
4108 						    tnum_const(val));
4109 		break;
4110 	case BPF_JGT:
4111 		false_reg->umax_value = min(false_reg->umax_value, val);
4112 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
4113 		break;
4114 	case BPF_JSGT:
4115 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4116 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4117 		break;
4118 	case BPF_JLT:
4119 		false_reg->umin_value = max(false_reg->umin_value, val);
4120 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
4121 		break;
4122 	case BPF_JSLT:
4123 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4124 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4125 		break;
4126 	case BPF_JGE:
4127 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
4128 		true_reg->umin_value = max(true_reg->umin_value, val);
4129 		break;
4130 	case BPF_JSGE:
4131 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4132 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4133 		break;
4134 	case BPF_JLE:
4135 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
4136 		true_reg->umax_value = min(true_reg->umax_value, val);
4137 		break;
4138 	case BPF_JSLE:
4139 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4140 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4141 		break;
4142 	default:
4143 		break;
4144 	}
4145 
4146 	__reg_deduce_bounds(false_reg);
4147 	__reg_deduce_bounds(true_reg);
4148 	/* We might have learned some bits from the bounds. */
4149 	__reg_bound_offset(false_reg);
4150 	__reg_bound_offset(true_reg);
4151 	/* Intersecting with the old var_off might have improved our bounds
4152 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4153 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4154 	 */
4155 	__update_reg_bounds(false_reg);
4156 	__update_reg_bounds(true_reg);
4157 }
4158 
4159 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
4160  * the variable reg.
4161  */
4162 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
4163 				struct bpf_reg_state *false_reg, u64 val,
4164 				u8 opcode)
4165 {
4166 	if (__is_pointer_value(false, false_reg))
4167 		return;
4168 
4169 	switch (opcode) {
4170 	case BPF_JEQ:
4171 		/* If this is false then we know nothing Jon Snow, but if it is
4172 		 * true then we know for sure.
4173 		 */
4174 		__mark_reg_known(true_reg, val);
4175 		break;
4176 	case BPF_JNE:
4177 		/* If this is true we know nothing Jon Snow, but if it is false
4178 		 * we know the value for sure;
4179 		 */
4180 		__mark_reg_known(false_reg, val);
4181 		break;
4182 	case BPF_JSET:
4183 		false_reg->var_off = tnum_and(false_reg->var_off,
4184 					      tnum_const(~val));
4185 		if (is_power_of_2(val))
4186 			true_reg->var_off = tnum_or(true_reg->var_off,
4187 						    tnum_const(val));
4188 		break;
4189 	case BPF_JGT:
4190 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
4191 		false_reg->umin_value = max(false_reg->umin_value, val);
4192 		break;
4193 	case BPF_JSGT:
4194 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4195 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4196 		break;
4197 	case BPF_JLT:
4198 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
4199 		false_reg->umax_value = min(false_reg->umax_value, val);
4200 		break;
4201 	case BPF_JSLT:
4202 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4203 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4204 		break;
4205 	case BPF_JGE:
4206 		true_reg->umax_value = min(true_reg->umax_value, val);
4207 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
4208 		break;
4209 	case BPF_JSGE:
4210 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4211 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4212 		break;
4213 	case BPF_JLE:
4214 		true_reg->umin_value = max(true_reg->umin_value, val);
4215 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
4216 		break;
4217 	case BPF_JSLE:
4218 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4219 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4220 		break;
4221 	default:
4222 		break;
4223 	}
4224 
4225 	__reg_deduce_bounds(false_reg);
4226 	__reg_deduce_bounds(true_reg);
4227 	/* We might have learned some bits from the bounds. */
4228 	__reg_bound_offset(false_reg);
4229 	__reg_bound_offset(true_reg);
4230 	/* Intersecting with the old var_off might have improved our bounds
4231 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4232 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4233 	 */
4234 	__update_reg_bounds(false_reg);
4235 	__update_reg_bounds(true_reg);
4236 }
4237 
4238 /* Regs are known to be equal, so intersect their min/max/var_off */
4239 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
4240 				  struct bpf_reg_state *dst_reg)
4241 {
4242 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
4243 							dst_reg->umin_value);
4244 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
4245 							dst_reg->umax_value);
4246 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
4247 							dst_reg->smin_value);
4248 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
4249 							dst_reg->smax_value);
4250 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
4251 							     dst_reg->var_off);
4252 	/* We might have learned new bounds from the var_off. */
4253 	__update_reg_bounds(src_reg);
4254 	__update_reg_bounds(dst_reg);
4255 	/* We might have learned something about the sign bit. */
4256 	__reg_deduce_bounds(src_reg);
4257 	__reg_deduce_bounds(dst_reg);
4258 	/* We might have learned some bits from the bounds. */
4259 	__reg_bound_offset(src_reg);
4260 	__reg_bound_offset(dst_reg);
4261 	/* Intersecting with the old var_off might have improved our bounds
4262 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4263 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4264 	 */
4265 	__update_reg_bounds(src_reg);
4266 	__update_reg_bounds(dst_reg);
4267 }
4268 
4269 static void reg_combine_min_max(struct bpf_reg_state *true_src,
4270 				struct bpf_reg_state *true_dst,
4271 				struct bpf_reg_state *false_src,
4272 				struct bpf_reg_state *false_dst,
4273 				u8 opcode)
4274 {
4275 	switch (opcode) {
4276 	case BPF_JEQ:
4277 		__reg_combine_min_max(true_src, true_dst);
4278 		break;
4279 	case BPF_JNE:
4280 		__reg_combine_min_max(false_src, false_dst);
4281 		break;
4282 	}
4283 }
4284 
4285 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
4286 				 struct bpf_reg_state *reg, u32 id,
4287 				 bool is_null)
4288 {
4289 	if (reg_type_may_be_null(reg->type) && reg->id == id) {
4290 		/* Old offset (both fixed and variable parts) should
4291 		 * have been known-zero, because we don't allow pointer
4292 		 * arithmetic on pointers that might be NULL.
4293 		 */
4294 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
4295 				 !tnum_equals_const(reg->var_off, 0) ||
4296 				 reg->off)) {
4297 			__mark_reg_known_zero(reg);
4298 			reg->off = 0;
4299 		}
4300 		if (is_null) {
4301 			reg->type = SCALAR_VALUE;
4302 		} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
4303 			if (reg->map_ptr->inner_map_meta) {
4304 				reg->type = CONST_PTR_TO_MAP;
4305 				reg->map_ptr = reg->map_ptr->inner_map_meta;
4306 			} else {
4307 				reg->type = PTR_TO_MAP_VALUE;
4308 			}
4309 		} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
4310 			reg->type = PTR_TO_SOCKET;
4311 		}
4312 		if (is_null || !reg_is_refcounted(reg)) {
4313 			/* We don't need id from this point onwards anymore,
4314 			 * thus we should better reset it, so that state
4315 			 * pruning has chances to take effect.
4316 			 */
4317 			reg->id = 0;
4318 		}
4319 	}
4320 }
4321 
4322 /* The logic is similar to find_good_pkt_pointers(), both could eventually
4323  * be folded together at some point.
4324  */
4325 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4326 				  bool is_null)
4327 {
4328 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4329 	struct bpf_reg_state *reg, *regs = state->regs;
4330 	u32 id = regs[regno].id;
4331 	int i, j;
4332 
4333 	if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
4334 		__release_reference_state(state, id);
4335 
4336 	for (i = 0; i < MAX_BPF_REG; i++)
4337 		mark_ptr_or_null_reg(state, &regs[i], id, is_null);
4338 
4339 	for (j = 0; j <= vstate->curframe; j++) {
4340 		state = vstate->frame[j];
4341 		bpf_for_each_spilled_reg(i, state, reg) {
4342 			if (!reg)
4343 				continue;
4344 			mark_ptr_or_null_reg(state, reg, id, is_null);
4345 		}
4346 	}
4347 }
4348 
4349 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
4350 				   struct bpf_reg_state *dst_reg,
4351 				   struct bpf_reg_state *src_reg,
4352 				   struct bpf_verifier_state *this_branch,
4353 				   struct bpf_verifier_state *other_branch)
4354 {
4355 	if (BPF_SRC(insn->code) != BPF_X)
4356 		return false;
4357 
4358 	switch (BPF_OP(insn->code)) {
4359 	case BPF_JGT:
4360 		if ((dst_reg->type == PTR_TO_PACKET &&
4361 		     src_reg->type == PTR_TO_PACKET_END) ||
4362 		    (dst_reg->type == PTR_TO_PACKET_META &&
4363 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4364 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
4365 			find_good_pkt_pointers(this_branch, dst_reg,
4366 					       dst_reg->type, false);
4367 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4368 			    src_reg->type == PTR_TO_PACKET) ||
4369 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4370 			    src_reg->type == PTR_TO_PACKET_META)) {
4371 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
4372 			find_good_pkt_pointers(other_branch, src_reg,
4373 					       src_reg->type, true);
4374 		} else {
4375 			return false;
4376 		}
4377 		break;
4378 	case BPF_JLT:
4379 		if ((dst_reg->type == PTR_TO_PACKET &&
4380 		     src_reg->type == PTR_TO_PACKET_END) ||
4381 		    (dst_reg->type == PTR_TO_PACKET_META &&
4382 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4383 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
4384 			find_good_pkt_pointers(other_branch, dst_reg,
4385 					       dst_reg->type, true);
4386 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4387 			    src_reg->type == PTR_TO_PACKET) ||
4388 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4389 			    src_reg->type == PTR_TO_PACKET_META)) {
4390 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
4391 			find_good_pkt_pointers(this_branch, src_reg,
4392 					       src_reg->type, false);
4393 		} else {
4394 			return false;
4395 		}
4396 		break;
4397 	case BPF_JGE:
4398 		if ((dst_reg->type == PTR_TO_PACKET &&
4399 		     src_reg->type == PTR_TO_PACKET_END) ||
4400 		    (dst_reg->type == PTR_TO_PACKET_META &&
4401 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4402 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
4403 			find_good_pkt_pointers(this_branch, dst_reg,
4404 					       dst_reg->type, true);
4405 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4406 			    src_reg->type == PTR_TO_PACKET) ||
4407 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4408 			    src_reg->type == PTR_TO_PACKET_META)) {
4409 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
4410 			find_good_pkt_pointers(other_branch, src_reg,
4411 					       src_reg->type, false);
4412 		} else {
4413 			return false;
4414 		}
4415 		break;
4416 	case BPF_JLE:
4417 		if ((dst_reg->type == PTR_TO_PACKET &&
4418 		     src_reg->type == PTR_TO_PACKET_END) ||
4419 		    (dst_reg->type == PTR_TO_PACKET_META &&
4420 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4421 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
4422 			find_good_pkt_pointers(other_branch, dst_reg,
4423 					       dst_reg->type, false);
4424 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4425 			    src_reg->type == PTR_TO_PACKET) ||
4426 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4427 			    src_reg->type == PTR_TO_PACKET_META)) {
4428 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
4429 			find_good_pkt_pointers(this_branch, src_reg,
4430 					       src_reg->type, true);
4431 		} else {
4432 			return false;
4433 		}
4434 		break;
4435 	default:
4436 		return false;
4437 	}
4438 
4439 	return true;
4440 }
4441 
4442 static int check_cond_jmp_op(struct bpf_verifier_env *env,
4443 			     struct bpf_insn *insn, int *insn_idx)
4444 {
4445 	struct bpf_verifier_state *this_branch = env->cur_state;
4446 	struct bpf_verifier_state *other_branch;
4447 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
4448 	struct bpf_reg_state *dst_reg, *other_branch_regs;
4449 	u8 opcode = BPF_OP(insn->code);
4450 	int err;
4451 
4452 	if (opcode > BPF_JSLE) {
4453 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
4454 		return -EINVAL;
4455 	}
4456 
4457 	if (BPF_SRC(insn->code) == BPF_X) {
4458 		if (insn->imm != 0) {
4459 			verbose(env, "BPF_JMP uses reserved fields\n");
4460 			return -EINVAL;
4461 		}
4462 
4463 		/* check src1 operand */
4464 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
4465 		if (err)
4466 			return err;
4467 
4468 		if (is_pointer_value(env, insn->src_reg)) {
4469 			verbose(env, "R%d pointer comparison prohibited\n",
4470 				insn->src_reg);
4471 			return -EACCES;
4472 		}
4473 	} else {
4474 		if (insn->src_reg != BPF_REG_0) {
4475 			verbose(env, "BPF_JMP uses reserved fields\n");
4476 			return -EINVAL;
4477 		}
4478 	}
4479 
4480 	/* check src2 operand */
4481 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4482 	if (err)
4483 		return err;
4484 
4485 	dst_reg = &regs[insn->dst_reg];
4486 
4487 	if (BPF_SRC(insn->code) == BPF_K) {
4488 		int pred = is_branch_taken(dst_reg, insn->imm, opcode);
4489 
4490 		if (pred == 1) {
4491 			 /* only follow the goto, ignore fall-through */
4492 			*insn_idx += insn->off;
4493 			return 0;
4494 		} else if (pred == 0) {
4495 			/* only follow fall-through branch, since
4496 			 * that's where the program will go
4497 			 */
4498 			return 0;
4499 		}
4500 	}
4501 
4502 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
4503 				  false);
4504 	if (!other_branch)
4505 		return -EFAULT;
4506 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
4507 
4508 	/* detect if we are comparing against a constant value so we can adjust
4509 	 * our min/max values for our dst register.
4510 	 * this is only legit if both are scalars (or pointers to the same
4511 	 * object, I suppose, but we don't support that right now), because
4512 	 * otherwise the different base pointers mean the offsets aren't
4513 	 * comparable.
4514 	 */
4515 	if (BPF_SRC(insn->code) == BPF_X) {
4516 		if (dst_reg->type == SCALAR_VALUE &&
4517 		    regs[insn->src_reg].type == SCALAR_VALUE) {
4518 			if (tnum_is_const(regs[insn->src_reg].var_off))
4519 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
4520 						dst_reg, regs[insn->src_reg].var_off.value,
4521 						opcode);
4522 			else if (tnum_is_const(dst_reg->var_off))
4523 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
4524 						    &regs[insn->src_reg],
4525 						    dst_reg->var_off.value, opcode);
4526 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
4527 				/* Comparing for equality, we can combine knowledge */
4528 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
4529 						    &other_branch_regs[insn->dst_reg],
4530 						    &regs[insn->src_reg],
4531 						    &regs[insn->dst_reg], opcode);
4532 		}
4533 	} else if (dst_reg->type == SCALAR_VALUE) {
4534 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
4535 					dst_reg, insn->imm, opcode);
4536 	}
4537 
4538 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
4539 	if (BPF_SRC(insn->code) == BPF_K &&
4540 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
4541 	    reg_type_may_be_null(dst_reg->type)) {
4542 		/* Mark all identical registers in each branch as either
4543 		 * safe or unknown depending R == 0 or R != 0 conditional.
4544 		 */
4545 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
4546 				      opcode == BPF_JNE);
4547 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
4548 				      opcode == BPF_JEQ);
4549 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
4550 					   this_branch, other_branch) &&
4551 		   is_pointer_value(env, insn->dst_reg)) {
4552 		verbose(env, "R%d pointer comparison prohibited\n",
4553 			insn->dst_reg);
4554 		return -EACCES;
4555 	}
4556 	if (env->log.level)
4557 		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
4558 	return 0;
4559 }
4560 
4561 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
4562 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
4563 {
4564 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
4565 
4566 	return (struct bpf_map *) (unsigned long) imm64;
4567 }
4568 
4569 /* verify BPF_LD_IMM64 instruction */
4570 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
4571 {
4572 	struct bpf_reg_state *regs = cur_regs(env);
4573 	int err;
4574 
4575 	if (BPF_SIZE(insn->code) != BPF_DW) {
4576 		verbose(env, "invalid BPF_LD_IMM insn\n");
4577 		return -EINVAL;
4578 	}
4579 	if (insn->off != 0) {
4580 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
4581 		return -EINVAL;
4582 	}
4583 
4584 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
4585 	if (err)
4586 		return err;
4587 
4588 	if (insn->src_reg == 0) {
4589 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
4590 
4591 		regs[insn->dst_reg].type = SCALAR_VALUE;
4592 		__mark_reg_known(&regs[insn->dst_reg], imm);
4593 		return 0;
4594 	}
4595 
4596 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
4597 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
4598 
4599 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
4600 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
4601 	return 0;
4602 }
4603 
4604 static bool may_access_skb(enum bpf_prog_type type)
4605 {
4606 	switch (type) {
4607 	case BPF_PROG_TYPE_SOCKET_FILTER:
4608 	case BPF_PROG_TYPE_SCHED_CLS:
4609 	case BPF_PROG_TYPE_SCHED_ACT:
4610 		return true;
4611 	default:
4612 		return false;
4613 	}
4614 }
4615 
4616 /* verify safety of LD_ABS|LD_IND instructions:
4617  * - they can only appear in the programs where ctx == skb
4618  * - since they are wrappers of function calls, they scratch R1-R5 registers,
4619  *   preserve R6-R9, and store return value into R0
4620  *
4621  * Implicit input:
4622  *   ctx == skb == R6 == CTX
4623  *
4624  * Explicit input:
4625  *   SRC == any register
4626  *   IMM == 32-bit immediate
4627  *
4628  * Output:
4629  *   R0 - 8/16/32-bit skb data converted to cpu endianness
4630  */
4631 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4632 {
4633 	struct bpf_reg_state *regs = cur_regs(env);
4634 	u8 mode = BPF_MODE(insn->code);
4635 	int i, err;
4636 
4637 	if (!may_access_skb(env->prog->type)) {
4638 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4639 		return -EINVAL;
4640 	}
4641 
4642 	if (!env->ops->gen_ld_abs) {
4643 		verbose(env, "bpf verifier is misconfigured\n");
4644 		return -EINVAL;
4645 	}
4646 
4647 	if (env->subprog_cnt > 1) {
4648 		/* when program has LD_ABS insn JITs and interpreter assume
4649 		 * that r1 == ctx == skb which is not the case for callees
4650 		 * that can have arbitrary arguments. It's problematic
4651 		 * for main prog as well since JITs would need to analyze
4652 		 * all functions in order to make proper register save/restore
4653 		 * decisions in the main prog. Hence disallow LD_ABS with calls
4654 		 */
4655 		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4656 		return -EINVAL;
4657 	}
4658 
4659 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
4660 	    BPF_SIZE(insn->code) == BPF_DW ||
4661 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
4662 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
4663 		return -EINVAL;
4664 	}
4665 
4666 	/* check whether implicit source operand (register R6) is readable */
4667 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
4668 	if (err)
4669 		return err;
4670 
4671 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
4672 	 * gen_ld_abs() may terminate the program at runtime, leading to
4673 	 * reference leak.
4674 	 */
4675 	err = check_reference_leak(env);
4676 	if (err) {
4677 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
4678 		return err;
4679 	}
4680 
4681 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4682 		verbose(env,
4683 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4684 		return -EINVAL;
4685 	}
4686 
4687 	if (mode == BPF_IND) {
4688 		/* check explicit source operand */
4689 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
4690 		if (err)
4691 			return err;
4692 	}
4693 
4694 	/* reset caller saved regs to unreadable */
4695 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
4696 		mark_reg_not_init(env, regs, caller_saved[i]);
4697 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4698 	}
4699 
4700 	/* mark destination R0 register as readable, since it contains
4701 	 * the value fetched from the packet.
4702 	 * Already marked as written above.
4703 	 */
4704 	mark_reg_unknown(env, regs, BPF_REG_0);
4705 	return 0;
4706 }
4707 
4708 static int check_return_code(struct bpf_verifier_env *env)
4709 {
4710 	struct bpf_reg_state *reg;
4711 	struct tnum range = tnum_range(0, 1);
4712 
4713 	switch (env->prog->type) {
4714 	case BPF_PROG_TYPE_CGROUP_SKB:
4715 	case BPF_PROG_TYPE_CGROUP_SOCK:
4716 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4717 	case BPF_PROG_TYPE_SOCK_OPS:
4718 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4719 		break;
4720 	default:
4721 		return 0;
4722 	}
4723 
4724 	reg = cur_regs(env) + BPF_REG_0;
4725 	if (reg->type != SCALAR_VALUE) {
4726 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
4727 			reg_type_str[reg->type]);
4728 		return -EINVAL;
4729 	}
4730 
4731 	if (!tnum_in(range, reg->var_off)) {
4732 		verbose(env, "At program exit the register R0 ");
4733 		if (!tnum_is_unknown(reg->var_off)) {
4734 			char tn_buf[48];
4735 
4736 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4737 			verbose(env, "has value %s", tn_buf);
4738 		} else {
4739 			verbose(env, "has unknown scalar value");
4740 		}
4741 		verbose(env, " should have been 0 or 1\n");
4742 		return -EINVAL;
4743 	}
4744 	return 0;
4745 }
4746 
4747 /* non-recursive DFS pseudo code
4748  * 1  procedure DFS-iterative(G,v):
4749  * 2      label v as discovered
4750  * 3      let S be a stack
4751  * 4      S.push(v)
4752  * 5      while S is not empty
4753  * 6            t <- S.pop()
4754  * 7            if t is what we're looking for:
4755  * 8                return t
4756  * 9            for all edges e in G.adjacentEdges(t) do
4757  * 10               if edge e is already labelled
4758  * 11                   continue with the next edge
4759  * 12               w <- G.adjacentVertex(t,e)
4760  * 13               if vertex w is not discovered and not explored
4761  * 14                   label e as tree-edge
4762  * 15                   label w as discovered
4763  * 16                   S.push(w)
4764  * 17                   continue at 5
4765  * 18               else if vertex w is discovered
4766  * 19                   label e as back-edge
4767  * 20               else
4768  * 21                   // vertex w is explored
4769  * 22                   label e as forward- or cross-edge
4770  * 23           label t as explored
4771  * 24           S.pop()
4772  *
4773  * convention:
4774  * 0x10 - discovered
4775  * 0x11 - discovered and fall-through edge labelled
4776  * 0x12 - discovered and fall-through and branch edges labelled
4777  * 0x20 - explored
4778  */
4779 
4780 enum {
4781 	DISCOVERED = 0x10,
4782 	EXPLORED = 0x20,
4783 	FALLTHROUGH = 1,
4784 	BRANCH = 2,
4785 };
4786 
4787 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4788 
4789 static int *insn_stack;	/* stack of insns to process */
4790 static int cur_stack;	/* current stack index */
4791 static int *insn_state;
4792 
4793 /* t, w, e - match pseudo-code above:
4794  * t - index of current instruction
4795  * w - next instruction
4796  * e - edge
4797  */
4798 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4799 {
4800 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4801 		return 0;
4802 
4803 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4804 		return 0;
4805 
4806 	if (w < 0 || w >= env->prog->len) {
4807 		verbose_linfo(env, t, "%d: ", t);
4808 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
4809 		return -EINVAL;
4810 	}
4811 
4812 	if (e == BRANCH)
4813 		/* mark branch target for state pruning */
4814 		env->explored_states[w] = STATE_LIST_MARK;
4815 
4816 	if (insn_state[w] == 0) {
4817 		/* tree-edge */
4818 		insn_state[t] = DISCOVERED | e;
4819 		insn_state[w] = DISCOVERED;
4820 		if (cur_stack >= env->prog->len)
4821 			return -E2BIG;
4822 		insn_stack[cur_stack++] = w;
4823 		return 1;
4824 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4825 		verbose_linfo(env, t, "%d: ", t);
4826 		verbose_linfo(env, w, "%d: ", w);
4827 		verbose(env, "back-edge from insn %d to %d\n", t, w);
4828 		return -EINVAL;
4829 	} else if (insn_state[w] == EXPLORED) {
4830 		/* forward- or cross-edge */
4831 		insn_state[t] = DISCOVERED | e;
4832 	} else {
4833 		verbose(env, "insn state internal bug\n");
4834 		return -EFAULT;
4835 	}
4836 	return 0;
4837 }
4838 
4839 /* non-recursive depth-first-search to detect loops in BPF program
4840  * loop == back-edge in directed graph
4841  */
4842 static int check_cfg(struct bpf_verifier_env *env)
4843 {
4844 	struct bpf_insn *insns = env->prog->insnsi;
4845 	int insn_cnt = env->prog->len;
4846 	int ret = 0;
4847 	int i, t;
4848 
4849 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4850 	if (!insn_state)
4851 		return -ENOMEM;
4852 
4853 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4854 	if (!insn_stack) {
4855 		kfree(insn_state);
4856 		return -ENOMEM;
4857 	}
4858 
4859 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4860 	insn_stack[0] = 0; /* 0 is the first instruction */
4861 	cur_stack = 1;
4862 
4863 peek_stack:
4864 	if (cur_stack == 0)
4865 		goto check_state;
4866 	t = insn_stack[cur_stack - 1];
4867 
4868 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4869 		u8 opcode = BPF_OP(insns[t].code);
4870 
4871 		if (opcode == BPF_EXIT) {
4872 			goto mark_explored;
4873 		} else if (opcode == BPF_CALL) {
4874 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4875 			if (ret == 1)
4876 				goto peek_stack;
4877 			else if (ret < 0)
4878 				goto err_free;
4879 			if (t + 1 < insn_cnt)
4880 				env->explored_states[t + 1] = STATE_LIST_MARK;
4881 			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4882 				env->explored_states[t] = STATE_LIST_MARK;
4883 				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4884 				if (ret == 1)
4885 					goto peek_stack;
4886 				else if (ret < 0)
4887 					goto err_free;
4888 			}
4889 		} else if (opcode == BPF_JA) {
4890 			if (BPF_SRC(insns[t].code) != BPF_K) {
4891 				ret = -EINVAL;
4892 				goto err_free;
4893 			}
4894 			/* unconditional jump with single edge */
4895 			ret = push_insn(t, t + insns[t].off + 1,
4896 					FALLTHROUGH, env);
4897 			if (ret == 1)
4898 				goto peek_stack;
4899 			else if (ret < 0)
4900 				goto err_free;
4901 			/* tell verifier to check for equivalent states
4902 			 * after every call and jump
4903 			 */
4904 			if (t + 1 < insn_cnt)
4905 				env->explored_states[t + 1] = STATE_LIST_MARK;
4906 		} else {
4907 			/* conditional jump with two edges */
4908 			env->explored_states[t] = STATE_LIST_MARK;
4909 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4910 			if (ret == 1)
4911 				goto peek_stack;
4912 			else if (ret < 0)
4913 				goto err_free;
4914 
4915 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4916 			if (ret == 1)
4917 				goto peek_stack;
4918 			else if (ret < 0)
4919 				goto err_free;
4920 		}
4921 	} else {
4922 		/* all other non-branch instructions with single
4923 		 * fall-through edge
4924 		 */
4925 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
4926 		if (ret == 1)
4927 			goto peek_stack;
4928 		else if (ret < 0)
4929 			goto err_free;
4930 	}
4931 
4932 mark_explored:
4933 	insn_state[t] = EXPLORED;
4934 	if (cur_stack-- <= 0) {
4935 		verbose(env, "pop stack internal bug\n");
4936 		ret = -EFAULT;
4937 		goto err_free;
4938 	}
4939 	goto peek_stack;
4940 
4941 check_state:
4942 	for (i = 0; i < insn_cnt; i++) {
4943 		if (insn_state[i] != EXPLORED) {
4944 			verbose(env, "unreachable insn %d\n", i);
4945 			ret = -EINVAL;
4946 			goto err_free;
4947 		}
4948 	}
4949 	ret = 0; /* cfg looks good */
4950 
4951 err_free:
4952 	kfree(insn_state);
4953 	kfree(insn_stack);
4954 	return ret;
4955 }
4956 
4957 /* The minimum supported BTF func info size */
4958 #define MIN_BPF_FUNCINFO_SIZE	8
4959 #define MAX_FUNCINFO_REC_SIZE	252
4960 
4961 static int check_btf_func(struct bpf_verifier_env *env,
4962 			  const union bpf_attr *attr,
4963 			  union bpf_attr __user *uattr)
4964 {
4965 	u32 i, nfuncs, urec_size, min_size, prev_offset;
4966 	u32 krec_size = sizeof(struct bpf_func_info);
4967 	struct bpf_func_info *krecord;
4968 	const struct btf_type *type;
4969 	struct bpf_prog *prog;
4970 	const struct btf *btf;
4971 	void __user *urecord;
4972 	int ret = 0;
4973 
4974 	nfuncs = attr->func_info_cnt;
4975 	if (!nfuncs)
4976 		return 0;
4977 
4978 	if (nfuncs != env->subprog_cnt) {
4979 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
4980 		return -EINVAL;
4981 	}
4982 
4983 	urec_size = attr->func_info_rec_size;
4984 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
4985 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
4986 	    urec_size % sizeof(u32)) {
4987 		verbose(env, "invalid func info rec size %u\n", urec_size);
4988 		return -EINVAL;
4989 	}
4990 
4991 	prog = env->prog;
4992 	btf = prog->aux->btf;
4993 
4994 	urecord = u64_to_user_ptr(attr->func_info);
4995 	min_size = min_t(u32, krec_size, urec_size);
4996 
4997 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
4998 	if (!krecord)
4999 		return -ENOMEM;
5000 
5001 	for (i = 0; i < nfuncs; i++) {
5002 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
5003 		if (ret) {
5004 			if (ret == -E2BIG) {
5005 				verbose(env, "nonzero tailing record in func info");
5006 				/* set the size kernel expects so loader can zero
5007 				 * out the rest of the record.
5008 				 */
5009 				if (put_user(min_size, &uattr->func_info_rec_size))
5010 					ret = -EFAULT;
5011 			}
5012 			goto err_free;
5013 		}
5014 
5015 		if (copy_from_user(&krecord[i], urecord, min_size)) {
5016 			ret = -EFAULT;
5017 			goto err_free;
5018 		}
5019 
5020 		/* check insn_off */
5021 		if (i == 0) {
5022 			if (krecord[i].insn_off) {
5023 				verbose(env,
5024 					"nonzero insn_off %u for the first func info record",
5025 					krecord[i].insn_off);
5026 				ret = -EINVAL;
5027 				goto err_free;
5028 			}
5029 		} else if (krecord[i].insn_off <= prev_offset) {
5030 			verbose(env,
5031 				"same or smaller insn offset (%u) than previous func info record (%u)",
5032 				krecord[i].insn_off, prev_offset);
5033 			ret = -EINVAL;
5034 			goto err_free;
5035 		}
5036 
5037 		if (env->subprog_info[i].start != krecord[i].insn_off) {
5038 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
5039 			ret = -EINVAL;
5040 			goto err_free;
5041 		}
5042 
5043 		/* check type_id */
5044 		type = btf_type_by_id(btf, krecord[i].type_id);
5045 		if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
5046 			verbose(env, "invalid type id %d in func info",
5047 				krecord[i].type_id);
5048 			ret = -EINVAL;
5049 			goto err_free;
5050 		}
5051 
5052 		prev_offset = krecord[i].insn_off;
5053 		urecord += urec_size;
5054 	}
5055 
5056 	prog->aux->func_info = krecord;
5057 	prog->aux->func_info_cnt = nfuncs;
5058 	return 0;
5059 
5060 err_free:
5061 	kvfree(krecord);
5062 	return ret;
5063 }
5064 
5065 static void adjust_btf_func(struct bpf_verifier_env *env)
5066 {
5067 	int i;
5068 
5069 	if (!env->prog->aux->func_info)
5070 		return;
5071 
5072 	for (i = 0; i < env->subprog_cnt; i++)
5073 		env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
5074 }
5075 
5076 #define MIN_BPF_LINEINFO_SIZE	(offsetof(struct bpf_line_info, line_col) + \
5077 		sizeof(((struct bpf_line_info *)(0))->line_col))
5078 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
5079 
5080 static int check_btf_line(struct bpf_verifier_env *env,
5081 			  const union bpf_attr *attr,
5082 			  union bpf_attr __user *uattr)
5083 {
5084 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
5085 	struct bpf_subprog_info *sub;
5086 	struct bpf_line_info *linfo;
5087 	struct bpf_prog *prog;
5088 	const struct btf *btf;
5089 	void __user *ulinfo;
5090 	int err;
5091 
5092 	nr_linfo = attr->line_info_cnt;
5093 	if (!nr_linfo)
5094 		return 0;
5095 
5096 	rec_size = attr->line_info_rec_size;
5097 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
5098 	    rec_size > MAX_LINEINFO_REC_SIZE ||
5099 	    rec_size & (sizeof(u32) - 1))
5100 		return -EINVAL;
5101 
5102 	/* Need to zero it in case the userspace may
5103 	 * pass in a smaller bpf_line_info object.
5104 	 */
5105 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
5106 			 GFP_KERNEL | __GFP_NOWARN);
5107 	if (!linfo)
5108 		return -ENOMEM;
5109 
5110 	prog = env->prog;
5111 	btf = prog->aux->btf;
5112 
5113 	s = 0;
5114 	sub = env->subprog_info;
5115 	ulinfo = u64_to_user_ptr(attr->line_info);
5116 	expected_size = sizeof(struct bpf_line_info);
5117 	ncopy = min_t(u32, expected_size, rec_size);
5118 	for (i = 0; i < nr_linfo; i++) {
5119 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
5120 		if (err) {
5121 			if (err == -E2BIG) {
5122 				verbose(env, "nonzero tailing record in line_info");
5123 				if (put_user(expected_size,
5124 					     &uattr->line_info_rec_size))
5125 					err = -EFAULT;
5126 			}
5127 			goto err_free;
5128 		}
5129 
5130 		if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
5131 			err = -EFAULT;
5132 			goto err_free;
5133 		}
5134 
5135 		/*
5136 		 * Check insn_off to ensure
5137 		 * 1) strictly increasing AND
5138 		 * 2) bounded by prog->len
5139 		 *
5140 		 * The linfo[0].insn_off == 0 check logically falls into
5141 		 * the later "missing bpf_line_info for func..." case
5142 		 * because the first linfo[0].insn_off must be the
5143 		 * first sub also and the first sub must have
5144 		 * subprog_info[0].start == 0.
5145 		 */
5146 		if ((i && linfo[i].insn_off <= prev_offset) ||
5147 		    linfo[i].insn_off >= prog->len) {
5148 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
5149 				i, linfo[i].insn_off, prev_offset,
5150 				prog->len);
5151 			err = -EINVAL;
5152 			goto err_free;
5153 		}
5154 
5155 		if (!prog->insnsi[linfo[i].insn_off].code) {
5156 			verbose(env,
5157 				"Invalid insn code at line_info[%u].insn_off\n",
5158 				i);
5159 			err = -EINVAL;
5160 			goto err_free;
5161 		}
5162 
5163 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
5164 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
5165 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
5166 			err = -EINVAL;
5167 			goto err_free;
5168 		}
5169 
5170 		if (s != env->subprog_cnt) {
5171 			if (linfo[i].insn_off == sub[s].start) {
5172 				sub[s].linfo_idx = i;
5173 				s++;
5174 			} else if (sub[s].start < linfo[i].insn_off) {
5175 				verbose(env, "missing bpf_line_info for func#%u\n", s);
5176 				err = -EINVAL;
5177 				goto err_free;
5178 			}
5179 		}
5180 
5181 		prev_offset = linfo[i].insn_off;
5182 		ulinfo += rec_size;
5183 	}
5184 
5185 	if (s != env->subprog_cnt) {
5186 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
5187 			env->subprog_cnt - s, s);
5188 		err = -EINVAL;
5189 		goto err_free;
5190 	}
5191 
5192 	prog->aux->linfo = linfo;
5193 	prog->aux->nr_linfo = nr_linfo;
5194 
5195 	return 0;
5196 
5197 err_free:
5198 	kvfree(linfo);
5199 	return err;
5200 }
5201 
5202 static int check_btf_info(struct bpf_verifier_env *env,
5203 			  const union bpf_attr *attr,
5204 			  union bpf_attr __user *uattr)
5205 {
5206 	struct btf *btf;
5207 	int err;
5208 
5209 	if (!attr->func_info_cnt && !attr->line_info_cnt)
5210 		return 0;
5211 
5212 	btf = btf_get_by_fd(attr->prog_btf_fd);
5213 	if (IS_ERR(btf))
5214 		return PTR_ERR(btf);
5215 	env->prog->aux->btf = btf;
5216 
5217 	err = check_btf_func(env, attr, uattr);
5218 	if (err)
5219 		return err;
5220 
5221 	err = check_btf_line(env, attr, uattr);
5222 	if (err)
5223 		return err;
5224 
5225 	return 0;
5226 }
5227 
5228 /* check %cur's range satisfies %old's */
5229 static bool range_within(struct bpf_reg_state *old,
5230 			 struct bpf_reg_state *cur)
5231 {
5232 	return old->umin_value <= cur->umin_value &&
5233 	       old->umax_value >= cur->umax_value &&
5234 	       old->smin_value <= cur->smin_value &&
5235 	       old->smax_value >= cur->smax_value;
5236 }
5237 
5238 /* Maximum number of register states that can exist at once */
5239 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
5240 struct idpair {
5241 	u32 old;
5242 	u32 cur;
5243 };
5244 
5245 /* If in the old state two registers had the same id, then they need to have
5246  * the same id in the new state as well.  But that id could be different from
5247  * the old state, so we need to track the mapping from old to new ids.
5248  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
5249  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
5250  * regs with a different old id could still have new id 9, we don't care about
5251  * that.
5252  * So we look through our idmap to see if this old id has been seen before.  If
5253  * so, we require the new id to match; otherwise, we add the id pair to the map.
5254  */
5255 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
5256 {
5257 	unsigned int i;
5258 
5259 	for (i = 0; i < ID_MAP_SIZE; i++) {
5260 		if (!idmap[i].old) {
5261 			/* Reached an empty slot; haven't seen this id before */
5262 			idmap[i].old = old_id;
5263 			idmap[i].cur = cur_id;
5264 			return true;
5265 		}
5266 		if (idmap[i].old == old_id)
5267 			return idmap[i].cur == cur_id;
5268 	}
5269 	/* We ran out of idmap slots, which should be impossible */
5270 	WARN_ON_ONCE(1);
5271 	return false;
5272 }
5273 
5274 static void clean_func_state(struct bpf_verifier_env *env,
5275 			     struct bpf_func_state *st)
5276 {
5277 	enum bpf_reg_liveness live;
5278 	int i, j;
5279 
5280 	for (i = 0; i < BPF_REG_FP; i++) {
5281 		live = st->regs[i].live;
5282 		/* liveness must not touch this register anymore */
5283 		st->regs[i].live |= REG_LIVE_DONE;
5284 		if (!(live & REG_LIVE_READ))
5285 			/* since the register is unused, clear its state
5286 			 * to make further comparison simpler
5287 			 */
5288 			__mark_reg_not_init(&st->regs[i]);
5289 	}
5290 
5291 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
5292 		live = st->stack[i].spilled_ptr.live;
5293 		/* liveness must not touch this stack slot anymore */
5294 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
5295 		if (!(live & REG_LIVE_READ)) {
5296 			__mark_reg_not_init(&st->stack[i].spilled_ptr);
5297 			for (j = 0; j < BPF_REG_SIZE; j++)
5298 				st->stack[i].slot_type[j] = STACK_INVALID;
5299 		}
5300 	}
5301 }
5302 
5303 static void clean_verifier_state(struct bpf_verifier_env *env,
5304 				 struct bpf_verifier_state *st)
5305 {
5306 	int i;
5307 
5308 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
5309 		/* all regs in this state in all frames were already marked */
5310 		return;
5311 
5312 	for (i = 0; i <= st->curframe; i++)
5313 		clean_func_state(env, st->frame[i]);
5314 }
5315 
5316 /* the parentage chains form a tree.
5317  * the verifier states are added to state lists at given insn and
5318  * pushed into state stack for future exploration.
5319  * when the verifier reaches bpf_exit insn some of the verifer states
5320  * stored in the state lists have their final liveness state already,
5321  * but a lot of states will get revised from liveness point of view when
5322  * the verifier explores other branches.
5323  * Example:
5324  * 1: r0 = 1
5325  * 2: if r1 == 100 goto pc+1
5326  * 3: r0 = 2
5327  * 4: exit
5328  * when the verifier reaches exit insn the register r0 in the state list of
5329  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5330  * of insn 2 and goes exploring further. At the insn 4 it will walk the
5331  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5332  *
5333  * Since the verifier pushes the branch states as it sees them while exploring
5334  * the program the condition of walking the branch instruction for the second
5335  * time means that all states below this branch were already explored and
5336  * their final liveness markes are already propagated.
5337  * Hence when the verifier completes the search of state list in is_state_visited()
5338  * we can call this clean_live_states() function to mark all liveness states
5339  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5340  * will not be used.
5341  * This function also clears the registers and stack for states that !READ
5342  * to simplify state merging.
5343  *
5344  * Important note here that walking the same branch instruction in the callee
5345  * doesn't meant that the states are DONE. The verifier has to compare
5346  * the callsites
5347  */
5348 static void clean_live_states(struct bpf_verifier_env *env, int insn,
5349 			      struct bpf_verifier_state *cur)
5350 {
5351 	struct bpf_verifier_state_list *sl;
5352 	int i;
5353 
5354 	sl = env->explored_states[insn];
5355 	if (!sl)
5356 		return;
5357 
5358 	while (sl != STATE_LIST_MARK) {
5359 		if (sl->state.curframe != cur->curframe)
5360 			goto next;
5361 		for (i = 0; i <= cur->curframe; i++)
5362 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
5363 				goto next;
5364 		clean_verifier_state(env, &sl->state);
5365 next:
5366 		sl = sl->next;
5367 	}
5368 }
5369 
5370 /* Returns true if (rold safe implies rcur safe) */
5371 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
5372 		    struct idpair *idmap)
5373 {
5374 	bool equal;
5375 
5376 	if (!(rold->live & REG_LIVE_READ))
5377 		/* explored state didn't use this */
5378 		return true;
5379 
5380 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
5381 
5382 	if (rold->type == PTR_TO_STACK)
5383 		/* two stack pointers are equal only if they're pointing to
5384 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
5385 		 */
5386 		return equal && rold->frameno == rcur->frameno;
5387 
5388 	if (equal)
5389 		return true;
5390 
5391 	if (rold->type == NOT_INIT)
5392 		/* explored state can't have used this */
5393 		return true;
5394 	if (rcur->type == NOT_INIT)
5395 		return false;
5396 	switch (rold->type) {
5397 	case SCALAR_VALUE:
5398 		if (rcur->type == SCALAR_VALUE) {
5399 			/* new val must satisfy old val knowledge */
5400 			return range_within(rold, rcur) &&
5401 			       tnum_in(rold->var_off, rcur->var_off);
5402 		} else {
5403 			/* We're trying to use a pointer in place of a scalar.
5404 			 * Even if the scalar was unbounded, this could lead to
5405 			 * pointer leaks because scalars are allowed to leak
5406 			 * while pointers are not. We could make this safe in
5407 			 * special cases if root is calling us, but it's
5408 			 * probably not worth the hassle.
5409 			 */
5410 			return false;
5411 		}
5412 	case PTR_TO_MAP_VALUE:
5413 		/* If the new min/max/var_off satisfy the old ones and
5414 		 * everything else matches, we are OK.
5415 		 * We don't care about the 'id' value, because nothing
5416 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
5417 		 */
5418 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
5419 		       range_within(rold, rcur) &&
5420 		       tnum_in(rold->var_off, rcur->var_off);
5421 	case PTR_TO_MAP_VALUE_OR_NULL:
5422 		/* a PTR_TO_MAP_VALUE could be safe to use as a
5423 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
5424 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
5425 		 * checked, doing so could have affected others with the same
5426 		 * id, and we can't check for that because we lost the id when
5427 		 * we converted to a PTR_TO_MAP_VALUE.
5428 		 */
5429 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
5430 			return false;
5431 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
5432 			return false;
5433 		/* Check our ids match any regs they're supposed to */
5434 		return check_ids(rold->id, rcur->id, idmap);
5435 	case PTR_TO_PACKET_META:
5436 	case PTR_TO_PACKET:
5437 		if (rcur->type != rold->type)
5438 			return false;
5439 		/* We must have at least as much range as the old ptr
5440 		 * did, so that any accesses which were safe before are
5441 		 * still safe.  This is true even if old range < old off,
5442 		 * since someone could have accessed through (ptr - k), or
5443 		 * even done ptr -= k in a register, to get a safe access.
5444 		 */
5445 		if (rold->range > rcur->range)
5446 			return false;
5447 		/* If the offsets don't match, we can't trust our alignment;
5448 		 * nor can we be sure that we won't fall out of range.
5449 		 */
5450 		if (rold->off != rcur->off)
5451 			return false;
5452 		/* id relations must be preserved */
5453 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
5454 			return false;
5455 		/* new val must satisfy old val knowledge */
5456 		return range_within(rold, rcur) &&
5457 		       tnum_in(rold->var_off, rcur->var_off);
5458 	case PTR_TO_CTX:
5459 	case CONST_PTR_TO_MAP:
5460 	case PTR_TO_PACKET_END:
5461 	case PTR_TO_FLOW_KEYS:
5462 	case PTR_TO_SOCKET:
5463 	case PTR_TO_SOCKET_OR_NULL:
5464 		/* Only valid matches are exact, which memcmp() above
5465 		 * would have accepted
5466 		 */
5467 	default:
5468 		/* Don't know what's going on, just say it's not safe */
5469 		return false;
5470 	}
5471 
5472 	/* Shouldn't get here; if we do, say it's not safe */
5473 	WARN_ON_ONCE(1);
5474 	return false;
5475 }
5476 
5477 static bool stacksafe(struct bpf_func_state *old,
5478 		      struct bpf_func_state *cur,
5479 		      struct idpair *idmap)
5480 {
5481 	int i, spi;
5482 
5483 	/* walk slots of the explored stack and ignore any additional
5484 	 * slots in the current stack, since explored(safe) state
5485 	 * didn't use them
5486 	 */
5487 	for (i = 0; i < old->allocated_stack; i++) {
5488 		spi = i / BPF_REG_SIZE;
5489 
5490 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
5491 			i += BPF_REG_SIZE - 1;
5492 			/* explored state didn't use this */
5493 			continue;
5494 		}
5495 
5496 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
5497 			continue;
5498 
5499 		/* explored stack has more populated slots than current stack
5500 		 * and these slots were used
5501 		 */
5502 		if (i >= cur->allocated_stack)
5503 			return false;
5504 
5505 		/* if old state was safe with misc data in the stack
5506 		 * it will be safe with zero-initialized stack.
5507 		 * The opposite is not true
5508 		 */
5509 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
5510 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
5511 			continue;
5512 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
5513 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
5514 			/* Ex: old explored (safe) state has STACK_SPILL in
5515 			 * this stack slot, but current has has STACK_MISC ->
5516 			 * this verifier states are not equivalent,
5517 			 * return false to continue verification of this path
5518 			 */
5519 			return false;
5520 		if (i % BPF_REG_SIZE)
5521 			continue;
5522 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
5523 			continue;
5524 		if (!regsafe(&old->stack[spi].spilled_ptr,
5525 			     &cur->stack[spi].spilled_ptr,
5526 			     idmap))
5527 			/* when explored and current stack slot are both storing
5528 			 * spilled registers, check that stored pointers types
5529 			 * are the same as well.
5530 			 * Ex: explored safe path could have stored
5531 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
5532 			 * but current path has stored:
5533 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
5534 			 * such verifier states are not equivalent.
5535 			 * return false to continue verification of this path
5536 			 */
5537 			return false;
5538 	}
5539 	return true;
5540 }
5541 
5542 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
5543 {
5544 	if (old->acquired_refs != cur->acquired_refs)
5545 		return false;
5546 	return !memcmp(old->refs, cur->refs,
5547 		       sizeof(*old->refs) * old->acquired_refs);
5548 }
5549 
5550 /* compare two verifier states
5551  *
5552  * all states stored in state_list are known to be valid, since
5553  * verifier reached 'bpf_exit' instruction through them
5554  *
5555  * this function is called when verifier exploring different branches of
5556  * execution popped from the state stack. If it sees an old state that has
5557  * more strict register state and more strict stack state then this execution
5558  * branch doesn't need to be explored further, since verifier already
5559  * concluded that more strict state leads to valid finish.
5560  *
5561  * Therefore two states are equivalent if register state is more conservative
5562  * and explored stack state is more conservative than the current one.
5563  * Example:
5564  *       explored                   current
5565  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
5566  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
5567  *
5568  * In other words if current stack state (one being explored) has more
5569  * valid slots than old one that already passed validation, it means
5570  * the verifier can stop exploring and conclude that current state is valid too
5571  *
5572  * Similarly with registers. If explored state has register type as invalid
5573  * whereas register type in current state is meaningful, it means that
5574  * the current state will reach 'bpf_exit' instruction safely
5575  */
5576 static bool func_states_equal(struct bpf_func_state *old,
5577 			      struct bpf_func_state *cur)
5578 {
5579 	struct idpair *idmap;
5580 	bool ret = false;
5581 	int i;
5582 
5583 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
5584 	/* If we failed to allocate the idmap, just say it's not safe */
5585 	if (!idmap)
5586 		return false;
5587 
5588 	for (i = 0; i < MAX_BPF_REG; i++) {
5589 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
5590 			goto out_free;
5591 	}
5592 
5593 	if (!stacksafe(old, cur, idmap))
5594 		goto out_free;
5595 
5596 	if (!refsafe(old, cur))
5597 		goto out_free;
5598 	ret = true;
5599 out_free:
5600 	kfree(idmap);
5601 	return ret;
5602 }
5603 
5604 static bool states_equal(struct bpf_verifier_env *env,
5605 			 struct bpf_verifier_state *old,
5606 			 struct bpf_verifier_state *cur)
5607 {
5608 	int i;
5609 
5610 	if (old->curframe != cur->curframe)
5611 		return false;
5612 
5613 	/* Verification state from speculative execution simulation
5614 	 * must never prune a non-speculative execution one.
5615 	 */
5616 	if (old->speculative && !cur->speculative)
5617 		return false;
5618 
5619 	/* for states to be equal callsites have to be the same
5620 	 * and all frame states need to be equivalent
5621 	 */
5622 	for (i = 0; i <= old->curframe; i++) {
5623 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
5624 			return false;
5625 		if (!func_states_equal(old->frame[i], cur->frame[i]))
5626 			return false;
5627 	}
5628 	return true;
5629 }
5630 
5631 /* A write screens off any subsequent reads; but write marks come from the
5632  * straight-line code between a state and its parent.  When we arrive at an
5633  * equivalent state (jump target or such) we didn't arrive by the straight-line
5634  * code, so read marks in the state must propagate to the parent regardless
5635  * of the state's write marks. That's what 'parent == state->parent' comparison
5636  * in mark_reg_read() is for.
5637  */
5638 static int propagate_liveness(struct bpf_verifier_env *env,
5639 			      const struct bpf_verifier_state *vstate,
5640 			      struct bpf_verifier_state *vparent)
5641 {
5642 	int i, frame, err = 0;
5643 	struct bpf_func_state *state, *parent;
5644 
5645 	if (vparent->curframe != vstate->curframe) {
5646 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
5647 		     vparent->curframe, vstate->curframe);
5648 		return -EFAULT;
5649 	}
5650 	/* Propagate read liveness of registers... */
5651 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
5652 	/* We don't need to worry about FP liveness because it's read-only */
5653 	for (i = 0; i < BPF_REG_FP; i++) {
5654 		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
5655 			continue;
5656 		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
5657 			err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
5658 					    &vparent->frame[vstate->curframe]->regs[i]);
5659 			if (err)
5660 				return err;
5661 		}
5662 	}
5663 
5664 	/* ... and stack slots */
5665 	for (frame = 0; frame <= vstate->curframe; frame++) {
5666 		state = vstate->frame[frame];
5667 		parent = vparent->frame[frame];
5668 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
5669 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
5670 			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
5671 				continue;
5672 			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
5673 				mark_reg_read(env, &state->stack[i].spilled_ptr,
5674 					      &parent->stack[i].spilled_ptr);
5675 		}
5676 	}
5677 	return err;
5678 }
5679 
5680 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5681 {
5682 	struct bpf_verifier_state_list *new_sl;
5683 	struct bpf_verifier_state_list *sl;
5684 	struct bpf_verifier_state *cur = env->cur_state, *new;
5685 	int i, j, err, states_cnt = 0;
5686 
5687 	sl = env->explored_states[insn_idx];
5688 	if (!sl)
5689 		/* this 'insn_idx' instruction wasn't marked, so we will not
5690 		 * be doing state search here
5691 		 */
5692 		return 0;
5693 
5694 	clean_live_states(env, insn_idx, cur);
5695 
5696 	while (sl != STATE_LIST_MARK) {
5697 		if (states_equal(env, &sl->state, cur)) {
5698 			/* reached equivalent register/stack state,
5699 			 * prune the search.
5700 			 * Registers read by the continuation are read by us.
5701 			 * If we have any write marks in env->cur_state, they
5702 			 * will prevent corresponding reads in the continuation
5703 			 * from reaching our parent (an explored_state).  Our
5704 			 * own state will get the read marks recorded, but
5705 			 * they'll be immediately forgotten as we're pruning
5706 			 * this state and will pop a new one.
5707 			 */
5708 			err = propagate_liveness(env, &sl->state, cur);
5709 			if (err)
5710 				return err;
5711 			return 1;
5712 		}
5713 		sl = sl->next;
5714 		states_cnt++;
5715 	}
5716 
5717 	if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
5718 		return 0;
5719 
5720 	/* there were no equivalent states, remember current one.
5721 	 * technically the current state is not proven to be safe yet,
5722 	 * but it will either reach outer most bpf_exit (which means it's safe)
5723 	 * or it will be rejected. Since there are no loops, we won't be
5724 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
5725 	 * again on the way to bpf_exit
5726 	 */
5727 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
5728 	if (!new_sl)
5729 		return -ENOMEM;
5730 
5731 	/* add new state to the head of linked list */
5732 	new = &new_sl->state;
5733 	err = copy_verifier_state(new, cur);
5734 	if (err) {
5735 		free_verifier_state(new, false);
5736 		kfree(new_sl);
5737 		return err;
5738 	}
5739 	new_sl->next = env->explored_states[insn_idx];
5740 	env->explored_states[insn_idx] = new_sl;
5741 	/* connect new state to parentage chain. Current frame needs all
5742 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
5743 	 * to the stack implicitly by JITs) so in callers' frames connect just
5744 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
5745 	 * the state of the call instruction (with WRITTEN set), and r0 comes
5746 	 * from callee with its full parentage chain, anyway.
5747 	 */
5748 	for (j = 0; j <= cur->curframe; j++)
5749 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
5750 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
5751 	/* clear write marks in current state: the writes we did are not writes
5752 	 * our child did, so they don't screen off its reads from us.
5753 	 * (There are no read marks in current state, because reads always mark
5754 	 * their parent and current state never has children yet.  Only
5755 	 * explored_states can get read marks.)
5756 	 */
5757 	for (i = 0; i < BPF_REG_FP; i++)
5758 		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
5759 
5760 	/* all stack frames are accessible from callee, clear them all */
5761 	for (j = 0; j <= cur->curframe; j++) {
5762 		struct bpf_func_state *frame = cur->frame[j];
5763 		struct bpf_func_state *newframe = new->frame[j];
5764 
5765 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
5766 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
5767 			frame->stack[i].spilled_ptr.parent =
5768 						&newframe->stack[i].spilled_ptr;
5769 		}
5770 	}
5771 	return 0;
5772 }
5773 
5774 /* Return true if it's OK to have the same insn return a different type. */
5775 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
5776 {
5777 	switch (type) {
5778 	case PTR_TO_CTX:
5779 	case PTR_TO_SOCKET:
5780 	case PTR_TO_SOCKET_OR_NULL:
5781 		return false;
5782 	default:
5783 		return true;
5784 	}
5785 }
5786 
5787 /* If an instruction was previously used with particular pointer types, then we
5788  * need to be careful to avoid cases such as the below, where it may be ok
5789  * for one branch accessing the pointer, but not ok for the other branch:
5790  *
5791  * R1 = sock_ptr
5792  * goto X;
5793  * ...
5794  * R1 = some_other_valid_ptr;
5795  * goto X;
5796  * ...
5797  * R2 = *(u32 *)(R1 + 0);
5798  */
5799 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
5800 {
5801 	return src != prev && (!reg_type_mismatch_ok(src) ||
5802 			       !reg_type_mismatch_ok(prev));
5803 }
5804 
5805 static int do_check(struct bpf_verifier_env *env)
5806 {
5807 	struct bpf_verifier_state *state;
5808 	struct bpf_insn *insns = env->prog->insnsi;
5809 	struct bpf_reg_state *regs;
5810 	int insn_cnt = env->prog->len, i;
5811 	int insn_processed = 0;
5812 	bool do_print_state = false;
5813 
5814 	env->prev_linfo = NULL;
5815 
5816 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
5817 	if (!state)
5818 		return -ENOMEM;
5819 	state->curframe = 0;
5820 	state->speculative = false;
5821 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
5822 	if (!state->frame[0]) {
5823 		kfree(state);
5824 		return -ENOMEM;
5825 	}
5826 	env->cur_state = state;
5827 	init_func_state(env, state->frame[0],
5828 			BPF_MAIN_FUNC /* callsite */,
5829 			0 /* frameno */,
5830 			0 /* subprogno, zero == main subprog */);
5831 
5832 	for (;;) {
5833 		struct bpf_insn *insn;
5834 		u8 class;
5835 		int err;
5836 
5837 		if (env->insn_idx >= insn_cnt) {
5838 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
5839 				env->insn_idx, insn_cnt);
5840 			return -EFAULT;
5841 		}
5842 
5843 		insn = &insns[env->insn_idx];
5844 		class = BPF_CLASS(insn->code);
5845 
5846 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
5847 			verbose(env,
5848 				"BPF program is too large. Processed %d insn\n",
5849 				insn_processed);
5850 			return -E2BIG;
5851 		}
5852 
5853 		err = is_state_visited(env, env->insn_idx);
5854 		if (err < 0)
5855 			return err;
5856 		if (err == 1) {
5857 			/* found equivalent state, can prune the search */
5858 			if (env->log.level) {
5859 				if (do_print_state)
5860 					verbose(env, "\nfrom %d to %d%s: safe\n",
5861 						env->prev_insn_idx, env->insn_idx,
5862 						env->cur_state->speculative ?
5863 						" (speculative execution)" : "");
5864 				else
5865 					verbose(env, "%d: safe\n", env->insn_idx);
5866 			}
5867 			goto process_bpf_exit;
5868 		}
5869 
5870 		if (signal_pending(current))
5871 			return -EAGAIN;
5872 
5873 		if (need_resched())
5874 			cond_resched();
5875 
5876 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
5877 			if (env->log.level > 1)
5878 				verbose(env, "%d:", env->insn_idx);
5879 			else
5880 				verbose(env, "\nfrom %d to %d%s:",
5881 					env->prev_insn_idx, env->insn_idx,
5882 					env->cur_state->speculative ?
5883 					" (speculative execution)" : "");
5884 			print_verifier_state(env, state->frame[state->curframe]);
5885 			do_print_state = false;
5886 		}
5887 
5888 		if (env->log.level) {
5889 			const struct bpf_insn_cbs cbs = {
5890 				.cb_print	= verbose,
5891 				.private_data	= env,
5892 			};
5893 
5894 			verbose_linfo(env, env->insn_idx, "; ");
5895 			verbose(env, "%d: ", env->insn_idx);
5896 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
5897 		}
5898 
5899 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
5900 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
5901 							   env->prev_insn_idx);
5902 			if (err)
5903 				return err;
5904 		}
5905 
5906 		regs = cur_regs(env);
5907 		env->insn_aux_data[env->insn_idx].seen = true;
5908 
5909 		if (class == BPF_ALU || class == BPF_ALU64) {
5910 			err = check_alu_op(env, insn);
5911 			if (err)
5912 				return err;
5913 
5914 		} else if (class == BPF_LDX) {
5915 			enum bpf_reg_type *prev_src_type, src_reg_type;
5916 
5917 			/* check for reserved fields is already done */
5918 
5919 			/* check src operand */
5920 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
5921 			if (err)
5922 				return err;
5923 
5924 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5925 			if (err)
5926 				return err;
5927 
5928 			src_reg_type = regs[insn->src_reg].type;
5929 
5930 			/* check that memory (src_reg + off) is readable,
5931 			 * the state of dst_reg will be updated by this func
5932 			 */
5933 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
5934 					       insn->off, BPF_SIZE(insn->code),
5935 					       BPF_READ, insn->dst_reg, false);
5936 			if (err)
5937 				return err;
5938 
5939 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
5940 
5941 			if (*prev_src_type == NOT_INIT) {
5942 				/* saw a valid insn
5943 				 * dst_reg = *(u32 *)(src_reg + off)
5944 				 * save type to validate intersecting paths
5945 				 */
5946 				*prev_src_type = src_reg_type;
5947 
5948 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
5949 				/* ABuser program is trying to use the same insn
5950 				 * dst_reg = *(u32*) (src_reg + off)
5951 				 * with different pointer types:
5952 				 * src_reg == ctx in one branch and
5953 				 * src_reg == stack|map in some other branch.
5954 				 * Reject it.
5955 				 */
5956 				verbose(env, "same insn cannot be used with different pointers\n");
5957 				return -EINVAL;
5958 			}
5959 
5960 		} else if (class == BPF_STX) {
5961 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
5962 
5963 			if (BPF_MODE(insn->code) == BPF_XADD) {
5964 				err = check_xadd(env, env->insn_idx, insn);
5965 				if (err)
5966 					return err;
5967 				env->insn_idx++;
5968 				continue;
5969 			}
5970 
5971 			/* check src1 operand */
5972 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
5973 			if (err)
5974 				return err;
5975 			/* check src2 operand */
5976 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5977 			if (err)
5978 				return err;
5979 
5980 			dst_reg_type = regs[insn->dst_reg].type;
5981 
5982 			/* check that memory (dst_reg + off) is writeable */
5983 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
5984 					       insn->off, BPF_SIZE(insn->code),
5985 					       BPF_WRITE, insn->src_reg, false);
5986 			if (err)
5987 				return err;
5988 
5989 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
5990 
5991 			if (*prev_dst_type == NOT_INIT) {
5992 				*prev_dst_type = dst_reg_type;
5993 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
5994 				verbose(env, "same insn cannot be used with different pointers\n");
5995 				return -EINVAL;
5996 			}
5997 
5998 		} else if (class == BPF_ST) {
5999 			if (BPF_MODE(insn->code) != BPF_MEM ||
6000 			    insn->src_reg != BPF_REG_0) {
6001 				verbose(env, "BPF_ST uses reserved fields\n");
6002 				return -EINVAL;
6003 			}
6004 			/* check src operand */
6005 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6006 			if (err)
6007 				return err;
6008 
6009 			if (is_ctx_reg(env, insn->dst_reg)) {
6010 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
6011 					insn->dst_reg,
6012 					reg_type_str[reg_state(env, insn->dst_reg)->type]);
6013 				return -EACCES;
6014 			}
6015 
6016 			/* check that memory (dst_reg + off) is writeable */
6017 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
6018 					       insn->off, BPF_SIZE(insn->code),
6019 					       BPF_WRITE, -1, false);
6020 			if (err)
6021 				return err;
6022 
6023 		} else if (class == BPF_JMP) {
6024 			u8 opcode = BPF_OP(insn->code);
6025 
6026 			if (opcode == BPF_CALL) {
6027 				if (BPF_SRC(insn->code) != BPF_K ||
6028 				    insn->off != 0 ||
6029 				    (insn->src_reg != BPF_REG_0 &&
6030 				     insn->src_reg != BPF_PSEUDO_CALL) ||
6031 				    insn->dst_reg != BPF_REG_0) {
6032 					verbose(env, "BPF_CALL uses reserved fields\n");
6033 					return -EINVAL;
6034 				}
6035 
6036 				if (insn->src_reg == BPF_PSEUDO_CALL)
6037 					err = check_func_call(env, insn, &env->insn_idx);
6038 				else
6039 					err = check_helper_call(env, insn->imm, env->insn_idx);
6040 				if (err)
6041 					return err;
6042 
6043 			} else if (opcode == BPF_JA) {
6044 				if (BPF_SRC(insn->code) != BPF_K ||
6045 				    insn->imm != 0 ||
6046 				    insn->src_reg != BPF_REG_0 ||
6047 				    insn->dst_reg != BPF_REG_0) {
6048 					verbose(env, "BPF_JA uses reserved fields\n");
6049 					return -EINVAL;
6050 				}
6051 
6052 				env->insn_idx += insn->off + 1;
6053 				continue;
6054 
6055 			} else if (opcode == BPF_EXIT) {
6056 				if (BPF_SRC(insn->code) != BPF_K ||
6057 				    insn->imm != 0 ||
6058 				    insn->src_reg != BPF_REG_0 ||
6059 				    insn->dst_reg != BPF_REG_0) {
6060 					verbose(env, "BPF_EXIT uses reserved fields\n");
6061 					return -EINVAL;
6062 				}
6063 
6064 				if (state->curframe) {
6065 					/* exit from nested function */
6066 					env->prev_insn_idx = env->insn_idx;
6067 					err = prepare_func_exit(env, &env->insn_idx);
6068 					if (err)
6069 						return err;
6070 					do_print_state = true;
6071 					continue;
6072 				}
6073 
6074 				err = check_reference_leak(env);
6075 				if (err)
6076 					return err;
6077 
6078 				/* eBPF calling convetion is such that R0 is used
6079 				 * to return the value from eBPF program.
6080 				 * Make sure that it's readable at this time
6081 				 * of bpf_exit, which means that program wrote
6082 				 * something into it earlier
6083 				 */
6084 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
6085 				if (err)
6086 					return err;
6087 
6088 				if (is_pointer_value(env, BPF_REG_0)) {
6089 					verbose(env, "R0 leaks addr as return value\n");
6090 					return -EACCES;
6091 				}
6092 
6093 				err = check_return_code(env);
6094 				if (err)
6095 					return err;
6096 process_bpf_exit:
6097 				err = pop_stack(env, &env->prev_insn_idx,
6098 						&env->insn_idx);
6099 				if (err < 0) {
6100 					if (err != -ENOENT)
6101 						return err;
6102 					break;
6103 				} else {
6104 					do_print_state = true;
6105 					continue;
6106 				}
6107 			} else {
6108 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
6109 				if (err)
6110 					return err;
6111 			}
6112 		} else if (class == BPF_LD) {
6113 			u8 mode = BPF_MODE(insn->code);
6114 
6115 			if (mode == BPF_ABS || mode == BPF_IND) {
6116 				err = check_ld_abs(env, insn);
6117 				if (err)
6118 					return err;
6119 
6120 			} else if (mode == BPF_IMM) {
6121 				err = check_ld_imm(env, insn);
6122 				if (err)
6123 					return err;
6124 
6125 				env->insn_idx++;
6126 				env->insn_aux_data[env->insn_idx].seen = true;
6127 			} else {
6128 				verbose(env, "invalid BPF_LD mode\n");
6129 				return -EINVAL;
6130 			}
6131 		} else {
6132 			verbose(env, "unknown insn class %d\n", class);
6133 			return -EINVAL;
6134 		}
6135 
6136 		env->insn_idx++;
6137 	}
6138 
6139 	verbose(env, "processed %d insns (limit %d), stack depth ",
6140 		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
6141 	for (i = 0; i < env->subprog_cnt; i++) {
6142 		u32 depth = env->subprog_info[i].stack_depth;
6143 
6144 		verbose(env, "%d", depth);
6145 		if (i + 1 < env->subprog_cnt)
6146 			verbose(env, "+");
6147 	}
6148 	verbose(env, "\n");
6149 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
6150 	return 0;
6151 }
6152 
6153 static int check_map_prealloc(struct bpf_map *map)
6154 {
6155 	return (map->map_type != BPF_MAP_TYPE_HASH &&
6156 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6157 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
6158 		!(map->map_flags & BPF_F_NO_PREALLOC);
6159 }
6160 
6161 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
6162 					struct bpf_map *map,
6163 					struct bpf_prog *prog)
6164 
6165 {
6166 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
6167 	 * preallocated hash maps, since doing memory allocation
6168 	 * in overflow_handler can crash depending on where nmi got
6169 	 * triggered.
6170 	 */
6171 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
6172 		if (!check_map_prealloc(map)) {
6173 			verbose(env, "perf_event programs can only use preallocated hash map\n");
6174 			return -EINVAL;
6175 		}
6176 		if (map->inner_map_meta &&
6177 		    !check_map_prealloc(map->inner_map_meta)) {
6178 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
6179 			return -EINVAL;
6180 		}
6181 	}
6182 
6183 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
6184 	    !bpf_offload_prog_map_match(prog, map)) {
6185 		verbose(env, "offload device mismatch between prog and map\n");
6186 		return -EINVAL;
6187 	}
6188 
6189 	return 0;
6190 }
6191 
6192 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
6193 {
6194 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
6195 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
6196 }
6197 
6198 /* look for pseudo eBPF instructions that access map FDs and
6199  * replace them with actual map pointers
6200  */
6201 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
6202 {
6203 	struct bpf_insn *insn = env->prog->insnsi;
6204 	int insn_cnt = env->prog->len;
6205 	int i, j, err;
6206 
6207 	err = bpf_prog_calc_tag(env->prog);
6208 	if (err)
6209 		return err;
6210 
6211 	for (i = 0; i < insn_cnt; i++, insn++) {
6212 		if (BPF_CLASS(insn->code) == BPF_LDX &&
6213 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
6214 			verbose(env, "BPF_LDX uses reserved fields\n");
6215 			return -EINVAL;
6216 		}
6217 
6218 		if (BPF_CLASS(insn->code) == BPF_STX &&
6219 		    ((BPF_MODE(insn->code) != BPF_MEM &&
6220 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
6221 			verbose(env, "BPF_STX uses reserved fields\n");
6222 			return -EINVAL;
6223 		}
6224 
6225 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
6226 			struct bpf_map *map;
6227 			struct fd f;
6228 
6229 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
6230 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
6231 			    insn[1].off != 0) {
6232 				verbose(env, "invalid bpf_ld_imm64 insn\n");
6233 				return -EINVAL;
6234 			}
6235 
6236 			if (insn->src_reg == 0)
6237 				/* valid generic load 64-bit imm */
6238 				goto next_insn;
6239 
6240 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
6241 				verbose(env,
6242 					"unrecognized bpf_ld_imm64 insn\n");
6243 				return -EINVAL;
6244 			}
6245 
6246 			f = fdget(insn->imm);
6247 			map = __bpf_map_get(f);
6248 			if (IS_ERR(map)) {
6249 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
6250 					insn->imm);
6251 				return PTR_ERR(map);
6252 			}
6253 
6254 			err = check_map_prog_compatibility(env, map, env->prog);
6255 			if (err) {
6256 				fdput(f);
6257 				return err;
6258 			}
6259 
6260 			/* store map pointer inside BPF_LD_IMM64 instruction */
6261 			insn[0].imm = (u32) (unsigned long) map;
6262 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
6263 
6264 			/* check whether we recorded this map already */
6265 			for (j = 0; j < env->used_map_cnt; j++)
6266 				if (env->used_maps[j] == map) {
6267 					fdput(f);
6268 					goto next_insn;
6269 				}
6270 
6271 			if (env->used_map_cnt >= MAX_USED_MAPS) {
6272 				fdput(f);
6273 				return -E2BIG;
6274 			}
6275 
6276 			/* hold the map. If the program is rejected by verifier,
6277 			 * the map will be released by release_maps() or it
6278 			 * will be used by the valid program until it's unloaded
6279 			 * and all maps are released in free_used_maps()
6280 			 */
6281 			map = bpf_map_inc(map, false);
6282 			if (IS_ERR(map)) {
6283 				fdput(f);
6284 				return PTR_ERR(map);
6285 			}
6286 			env->used_maps[env->used_map_cnt++] = map;
6287 
6288 			if (bpf_map_is_cgroup_storage(map) &&
6289 			    bpf_cgroup_storage_assign(env->prog, map)) {
6290 				verbose(env, "only one cgroup storage of each type is allowed\n");
6291 				fdput(f);
6292 				return -EBUSY;
6293 			}
6294 
6295 			fdput(f);
6296 next_insn:
6297 			insn++;
6298 			i++;
6299 			continue;
6300 		}
6301 
6302 		/* Basic sanity check before we invest more work here. */
6303 		if (!bpf_opcode_in_insntable(insn->code)) {
6304 			verbose(env, "unknown opcode %02x\n", insn->code);
6305 			return -EINVAL;
6306 		}
6307 	}
6308 
6309 	/* now all pseudo BPF_LD_IMM64 instructions load valid
6310 	 * 'struct bpf_map *' into a register instead of user map_fd.
6311 	 * These pointers will be used later by verifier to validate map access.
6312 	 */
6313 	return 0;
6314 }
6315 
6316 /* drop refcnt of maps used by the rejected program */
6317 static void release_maps(struct bpf_verifier_env *env)
6318 {
6319 	enum bpf_cgroup_storage_type stype;
6320 	int i;
6321 
6322 	for_each_cgroup_storage_type(stype) {
6323 		if (!env->prog->aux->cgroup_storage[stype])
6324 			continue;
6325 		bpf_cgroup_storage_release(env->prog,
6326 			env->prog->aux->cgroup_storage[stype]);
6327 	}
6328 
6329 	for (i = 0; i < env->used_map_cnt; i++)
6330 		bpf_map_put(env->used_maps[i]);
6331 }
6332 
6333 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
6334 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
6335 {
6336 	struct bpf_insn *insn = env->prog->insnsi;
6337 	int insn_cnt = env->prog->len;
6338 	int i;
6339 
6340 	for (i = 0; i < insn_cnt; i++, insn++)
6341 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
6342 			insn->src_reg = 0;
6343 }
6344 
6345 /* single env->prog->insni[off] instruction was replaced with the range
6346  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
6347  * [0, off) and [off, end) to new locations, so the patched range stays zero
6348  */
6349 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
6350 				u32 off, u32 cnt)
6351 {
6352 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
6353 	int i;
6354 
6355 	if (cnt == 1)
6356 		return 0;
6357 	new_data = vzalloc(array_size(prog_len,
6358 				      sizeof(struct bpf_insn_aux_data)));
6359 	if (!new_data)
6360 		return -ENOMEM;
6361 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
6362 	memcpy(new_data + off + cnt - 1, old_data + off,
6363 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
6364 	for (i = off; i < off + cnt - 1; i++)
6365 		new_data[i].seen = true;
6366 	env->insn_aux_data = new_data;
6367 	vfree(old_data);
6368 	return 0;
6369 }
6370 
6371 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
6372 {
6373 	int i;
6374 
6375 	if (len == 1)
6376 		return;
6377 	/* NOTE: fake 'exit' subprog should be updated as well. */
6378 	for (i = 0; i <= env->subprog_cnt; i++) {
6379 		if (env->subprog_info[i].start <= off)
6380 			continue;
6381 		env->subprog_info[i].start += len - 1;
6382 	}
6383 }
6384 
6385 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
6386 					    const struct bpf_insn *patch, u32 len)
6387 {
6388 	struct bpf_prog *new_prog;
6389 
6390 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
6391 	if (!new_prog)
6392 		return NULL;
6393 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
6394 		return NULL;
6395 	adjust_subprog_starts(env, off, len);
6396 	return new_prog;
6397 }
6398 
6399 /* The verifier does more data flow analysis than llvm and will not
6400  * explore branches that are dead at run time. Malicious programs can
6401  * have dead code too. Therefore replace all dead at-run-time code
6402  * with 'ja -1'.
6403  *
6404  * Just nops are not optimal, e.g. if they would sit at the end of the
6405  * program and through another bug we would manage to jump there, then
6406  * we'd execute beyond program memory otherwise. Returning exception
6407  * code also wouldn't work since we can have subprogs where the dead
6408  * code could be located.
6409  */
6410 static void sanitize_dead_code(struct bpf_verifier_env *env)
6411 {
6412 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
6413 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
6414 	struct bpf_insn *insn = env->prog->insnsi;
6415 	const int insn_cnt = env->prog->len;
6416 	int i;
6417 
6418 	for (i = 0; i < insn_cnt; i++) {
6419 		if (aux_data[i].seen)
6420 			continue;
6421 		memcpy(insn + i, &trap, sizeof(trap));
6422 	}
6423 }
6424 
6425 /* convert load instructions that access fields of a context type into a
6426  * sequence of instructions that access fields of the underlying structure:
6427  *     struct __sk_buff    -> struct sk_buff
6428  *     struct bpf_sock_ops -> struct sock
6429  */
6430 static int convert_ctx_accesses(struct bpf_verifier_env *env)
6431 {
6432 	const struct bpf_verifier_ops *ops = env->ops;
6433 	int i, cnt, size, ctx_field_size, delta = 0;
6434 	const int insn_cnt = env->prog->len;
6435 	struct bpf_insn insn_buf[16], *insn;
6436 	u32 target_size, size_default, off;
6437 	struct bpf_prog *new_prog;
6438 	enum bpf_access_type type;
6439 	bool is_narrower_load;
6440 
6441 	if (ops->gen_prologue || env->seen_direct_write) {
6442 		if (!ops->gen_prologue) {
6443 			verbose(env, "bpf verifier is misconfigured\n");
6444 			return -EINVAL;
6445 		}
6446 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
6447 					env->prog);
6448 		if (cnt >= ARRAY_SIZE(insn_buf)) {
6449 			verbose(env, "bpf verifier is misconfigured\n");
6450 			return -EINVAL;
6451 		} else if (cnt) {
6452 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
6453 			if (!new_prog)
6454 				return -ENOMEM;
6455 
6456 			env->prog = new_prog;
6457 			delta += cnt - 1;
6458 		}
6459 	}
6460 
6461 	if (bpf_prog_is_dev_bound(env->prog->aux))
6462 		return 0;
6463 
6464 	insn = env->prog->insnsi + delta;
6465 
6466 	for (i = 0; i < insn_cnt; i++, insn++) {
6467 		bpf_convert_ctx_access_t convert_ctx_access;
6468 
6469 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
6470 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
6471 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
6472 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
6473 			type = BPF_READ;
6474 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
6475 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
6476 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
6477 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
6478 			type = BPF_WRITE;
6479 		else
6480 			continue;
6481 
6482 		if (type == BPF_WRITE &&
6483 		    env->insn_aux_data[i + delta].sanitize_stack_off) {
6484 			struct bpf_insn patch[] = {
6485 				/* Sanitize suspicious stack slot with zero.
6486 				 * There are no memory dependencies for this store,
6487 				 * since it's only using frame pointer and immediate
6488 				 * constant of zero
6489 				 */
6490 				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
6491 					   env->insn_aux_data[i + delta].sanitize_stack_off,
6492 					   0),
6493 				/* the original STX instruction will immediately
6494 				 * overwrite the same stack slot with appropriate value
6495 				 */
6496 				*insn,
6497 			};
6498 
6499 			cnt = ARRAY_SIZE(patch);
6500 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
6501 			if (!new_prog)
6502 				return -ENOMEM;
6503 
6504 			delta    += cnt - 1;
6505 			env->prog = new_prog;
6506 			insn      = new_prog->insnsi + i + delta;
6507 			continue;
6508 		}
6509 
6510 		switch (env->insn_aux_data[i + delta].ptr_type) {
6511 		case PTR_TO_CTX:
6512 			if (!ops->convert_ctx_access)
6513 				continue;
6514 			convert_ctx_access = ops->convert_ctx_access;
6515 			break;
6516 		case PTR_TO_SOCKET:
6517 			convert_ctx_access = bpf_sock_convert_ctx_access;
6518 			break;
6519 		default:
6520 			continue;
6521 		}
6522 
6523 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
6524 		size = BPF_LDST_BYTES(insn);
6525 
6526 		/* If the read access is a narrower load of the field,
6527 		 * convert to a 4/8-byte load, to minimum program type specific
6528 		 * convert_ctx_access changes. If conversion is successful,
6529 		 * we will apply proper mask to the result.
6530 		 */
6531 		is_narrower_load = size < ctx_field_size;
6532 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
6533 		off = insn->off;
6534 		if (is_narrower_load) {
6535 			u8 size_code;
6536 
6537 			if (type == BPF_WRITE) {
6538 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
6539 				return -EINVAL;
6540 			}
6541 
6542 			size_code = BPF_H;
6543 			if (ctx_field_size == 4)
6544 				size_code = BPF_W;
6545 			else if (ctx_field_size == 8)
6546 				size_code = BPF_DW;
6547 
6548 			insn->off = off & ~(size_default - 1);
6549 			insn->code = BPF_LDX | BPF_MEM | size_code;
6550 		}
6551 
6552 		target_size = 0;
6553 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
6554 					 &target_size);
6555 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
6556 		    (ctx_field_size && !target_size)) {
6557 			verbose(env, "bpf verifier is misconfigured\n");
6558 			return -EINVAL;
6559 		}
6560 
6561 		if (is_narrower_load && size < target_size) {
6562 			u8 shift = (off & (size_default - 1)) * 8;
6563 
6564 			if (ctx_field_size <= 4) {
6565 				if (shift)
6566 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
6567 									insn->dst_reg,
6568 									shift);
6569 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
6570 								(1 << size * 8) - 1);
6571 			} else {
6572 				if (shift)
6573 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
6574 									insn->dst_reg,
6575 									shift);
6576 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
6577 								(1 << size * 8) - 1);
6578 			}
6579 		}
6580 
6581 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6582 		if (!new_prog)
6583 			return -ENOMEM;
6584 
6585 		delta += cnt - 1;
6586 
6587 		/* keep walking new program and skip insns we just inserted */
6588 		env->prog = new_prog;
6589 		insn      = new_prog->insnsi + i + delta;
6590 	}
6591 
6592 	return 0;
6593 }
6594 
6595 static int jit_subprogs(struct bpf_verifier_env *env)
6596 {
6597 	struct bpf_prog *prog = env->prog, **func, *tmp;
6598 	int i, j, subprog_start, subprog_end = 0, len, subprog;
6599 	struct bpf_insn *insn;
6600 	void *old_bpf_func;
6601 	int err;
6602 
6603 	if (env->subprog_cnt <= 1)
6604 		return 0;
6605 
6606 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6607 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6608 		    insn->src_reg != BPF_PSEUDO_CALL)
6609 			continue;
6610 		/* Upon error here we cannot fall back to interpreter but
6611 		 * need a hard reject of the program. Thus -EFAULT is
6612 		 * propagated in any case.
6613 		 */
6614 		subprog = find_subprog(env, i + insn->imm + 1);
6615 		if (subprog < 0) {
6616 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6617 				  i + insn->imm + 1);
6618 			return -EFAULT;
6619 		}
6620 		/* temporarily remember subprog id inside insn instead of
6621 		 * aux_data, since next loop will split up all insns into funcs
6622 		 */
6623 		insn->off = subprog;
6624 		/* remember original imm in case JIT fails and fallback
6625 		 * to interpreter will be needed
6626 		 */
6627 		env->insn_aux_data[i].call_imm = insn->imm;
6628 		/* point imm to __bpf_call_base+1 from JITs point of view */
6629 		insn->imm = 1;
6630 	}
6631 
6632 	err = bpf_prog_alloc_jited_linfo(prog);
6633 	if (err)
6634 		goto out_undo_insn;
6635 
6636 	err = -ENOMEM;
6637 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
6638 	if (!func)
6639 		goto out_undo_insn;
6640 
6641 	for (i = 0; i < env->subprog_cnt; i++) {
6642 		subprog_start = subprog_end;
6643 		subprog_end = env->subprog_info[i + 1].start;
6644 
6645 		len = subprog_end - subprog_start;
6646 		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
6647 		if (!func[i])
6648 			goto out_free;
6649 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
6650 		       len * sizeof(struct bpf_insn));
6651 		func[i]->type = prog->type;
6652 		func[i]->len = len;
6653 		if (bpf_prog_calc_tag(func[i]))
6654 			goto out_free;
6655 		func[i]->is_func = 1;
6656 		func[i]->aux->func_idx = i;
6657 		/* the btf and func_info will be freed only at prog->aux */
6658 		func[i]->aux->btf = prog->aux->btf;
6659 		func[i]->aux->func_info = prog->aux->func_info;
6660 
6661 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
6662 		 * Long term would need debug info to populate names
6663 		 */
6664 		func[i]->aux->name[0] = 'F';
6665 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
6666 		func[i]->jit_requested = 1;
6667 		func[i]->aux->linfo = prog->aux->linfo;
6668 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
6669 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
6670 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
6671 		func[i] = bpf_int_jit_compile(func[i]);
6672 		if (!func[i]->jited) {
6673 			err = -ENOTSUPP;
6674 			goto out_free;
6675 		}
6676 		cond_resched();
6677 	}
6678 	/* at this point all bpf functions were successfully JITed
6679 	 * now populate all bpf_calls with correct addresses and
6680 	 * run last pass of JIT
6681 	 */
6682 	for (i = 0; i < env->subprog_cnt; i++) {
6683 		insn = func[i]->insnsi;
6684 		for (j = 0; j < func[i]->len; j++, insn++) {
6685 			if (insn->code != (BPF_JMP | BPF_CALL) ||
6686 			    insn->src_reg != BPF_PSEUDO_CALL)
6687 				continue;
6688 			subprog = insn->off;
6689 			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
6690 				func[subprog]->bpf_func -
6691 				__bpf_call_base;
6692 		}
6693 
6694 		/* we use the aux data to keep a list of the start addresses
6695 		 * of the JITed images for each function in the program
6696 		 *
6697 		 * for some architectures, such as powerpc64, the imm field
6698 		 * might not be large enough to hold the offset of the start
6699 		 * address of the callee's JITed image from __bpf_call_base
6700 		 *
6701 		 * in such cases, we can lookup the start address of a callee
6702 		 * by using its subprog id, available from the off field of
6703 		 * the call instruction, as an index for this list
6704 		 */
6705 		func[i]->aux->func = func;
6706 		func[i]->aux->func_cnt = env->subprog_cnt;
6707 	}
6708 	for (i = 0; i < env->subprog_cnt; i++) {
6709 		old_bpf_func = func[i]->bpf_func;
6710 		tmp = bpf_int_jit_compile(func[i]);
6711 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
6712 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
6713 			err = -ENOTSUPP;
6714 			goto out_free;
6715 		}
6716 		cond_resched();
6717 	}
6718 
6719 	/* finally lock prog and jit images for all functions and
6720 	 * populate kallsysm
6721 	 */
6722 	for (i = 0; i < env->subprog_cnt; i++) {
6723 		bpf_prog_lock_ro(func[i]);
6724 		bpf_prog_kallsyms_add(func[i]);
6725 	}
6726 
6727 	/* Last step: make now unused interpreter insns from main
6728 	 * prog consistent for later dump requests, so they can
6729 	 * later look the same as if they were interpreted only.
6730 	 */
6731 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6732 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6733 		    insn->src_reg != BPF_PSEUDO_CALL)
6734 			continue;
6735 		insn->off = env->insn_aux_data[i].call_imm;
6736 		subprog = find_subprog(env, i + insn->off + 1);
6737 		insn->imm = subprog;
6738 	}
6739 
6740 	prog->jited = 1;
6741 	prog->bpf_func = func[0]->bpf_func;
6742 	prog->aux->func = func;
6743 	prog->aux->func_cnt = env->subprog_cnt;
6744 	bpf_prog_free_unused_jited_linfo(prog);
6745 	return 0;
6746 out_free:
6747 	for (i = 0; i < env->subprog_cnt; i++)
6748 		if (func[i])
6749 			bpf_jit_free(func[i]);
6750 	kfree(func);
6751 out_undo_insn:
6752 	/* cleanup main prog to be interpreted */
6753 	prog->jit_requested = 0;
6754 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6755 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6756 		    insn->src_reg != BPF_PSEUDO_CALL)
6757 			continue;
6758 		insn->off = 0;
6759 		insn->imm = env->insn_aux_data[i].call_imm;
6760 	}
6761 	bpf_prog_free_jited_linfo(prog);
6762 	return err;
6763 }
6764 
6765 static int fixup_call_args(struct bpf_verifier_env *env)
6766 {
6767 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6768 	struct bpf_prog *prog = env->prog;
6769 	struct bpf_insn *insn = prog->insnsi;
6770 	int i, depth;
6771 #endif
6772 	int err = 0;
6773 
6774 	if (env->prog->jit_requested &&
6775 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
6776 		err = jit_subprogs(env);
6777 		if (err == 0)
6778 			return 0;
6779 		if (err == -EFAULT)
6780 			return err;
6781 	}
6782 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6783 	for (i = 0; i < prog->len; i++, insn++) {
6784 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6785 		    insn->src_reg != BPF_PSEUDO_CALL)
6786 			continue;
6787 		depth = get_callee_stack_depth(env, insn, i);
6788 		if (depth < 0)
6789 			return depth;
6790 		bpf_patch_call_args(insn, depth);
6791 	}
6792 	err = 0;
6793 #endif
6794 	return err;
6795 }
6796 
6797 /* fixup insn->imm field of bpf_call instructions
6798  * and inline eligible helpers as explicit sequence of BPF instructions
6799  *
6800  * this function is called after eBPF program passed verification
6801  */
6802 static int fixup_bpf_calls(struct bpf_verifier_env *env)
6803 {
6804 	struct bpf_prog *prog = env->prog;
6805 	struct bpf_insn *insn = prog->insnsi;
6806 	const struct bpf_func_proto *fn;
6807 	const int insn_cnt = prog->len;
6808 	const struct bpf_map_ops *ops;
6809 	struct bpf_insn_aux_data *aux;
6810 	struct bpf_insn insn_buf[16];
6811 	struct bpf_prog *new_prog;
6812 	struct bpf_map *map_ptr;
6813 	int i, cnt, delta = 0;
6814 
6815 	for (i = 0; i < insn_cnt; i++, insn++) {
6816 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
6817 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6818 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
6819 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6820 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
6821 			struct bpf_insn mask_and_div[] = {
6822 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6823 				/* Rx div 0 -> 0 */
6824 				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
6825 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
6826 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6827 				*insn,
6828 			};
6829 			struct bpf_insn mask_and_mod[] = {
6830 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6831 				/* Rx mod 0 -> Rx */
6832 				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
6833 				*insn,
6834 			};
6835 			struct bpf_insn *patchlet;
6836 
6837 			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6838 			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6839 				patchlet = mask_and_div + (is64 ? 1 : 0);
6840 				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
6841 			} else {
6842 				patchlet = mask_and_mod + (is64 ? 1 : 0);
6843 				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
6844 			}
6845 
6846 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
6847 			if (!new_prog)
6848 				return -ENOMEM;
6849 
6850 			delta    += cnt - 1;
6851 			env->prog = prog = new_prog;
6852 			insn      = new_prog->insnsi + i + delta;
6853 			continue;
6854 		}
6855 
6856 		if (BPF_CLASS(insn->code) == BPF_LD &&
6857 		    (BPF_MODE(insn->code) == BPF_ABS ||
6858 		     BPF_MODE(insn->code) == BPF_IND)) {
6859 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
6860 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
6861 				verbose(env, "bpf verifier is misconfigured\n");
6862 				return -EINVAL;
6863 			}
6864 
6865 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6866 			if (!new_prog)
6867 				return -ENOMEM;
6868 
6869 			delta    += cnt - 1;
6870 			env->prog = prog = new_prog;
6871 			insn      = new_prog->insnsi + i + delta;
6872 			continue;
6873 		}
6874 
6875 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
6876 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
6877 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
6878 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
6879 			struct bpf_insn insn_buf[16];
6880 			struct bpf_insn *patch = &insn_buf[0];
6881 			bool issrc, isneg;
6882 			u32 off_reg;
6883 
6884 			aux = &env->insn_aux_data[i + delta];
6885 			if (!aux->alu_state)
6886 				continue;
6887 
6888 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
6889 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
6890 				BPF_ALU_SANITIZE_SRC;
6891 
6892 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
6893 			if (isneg)
6894 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6895 			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
6896 			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
6897 			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
6898 			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
6899 			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
6900 			if (issrc) {
6901 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
6902 							 off_reg);
6903 				insn->src_reg = BPF_REG_AX;
6904 			} else {
6905 				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
6906 							 BPF_REG_AX);
6907 			}
6908 			if (isneg)
6909 				insn->code = insn->code == code_add ?
6910 					     code_sub : code_add;
6911 			*patch++ = *insn;
6912 			if (issrc && isneg)
6913 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6914 			cnt = patch - insn_buf;
6915 
6916 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6917 			if (!new_prog)
6918 				return -ENOMEM;
6919 
6920 			delta    += cnt - 1;
6921 			env->prog = prog = new_prog;
6922 			insn      = new_prog->insnsi + i + delta;
6923 			continue;
6924 		}
6925 
6926 		if (insn->code != (BPF_JMP | BPF_CALL))
6927 			continue;
6928 		if (insn->src_reg == BPF_PSEUDO_CALL)
6929 			continue;
6930 
6931 		if (insn->imm == BPF_FUNC_get_route_realm)
6932 			prog->dst_needed = 1;
6933 		if (insn->imm == BPF_FUNC_get_prandom_u32)
6934 			bpf_user_rnd_init_once();
6935 		if (insn->imm == BPF_FUNC_override_return)
6936 			prog->kprobe_override = 1;
6937 		if (insn->imm == BPF_FUNC_tail_call) {
6938 			/* If we tail call into other programs, we
6939 			 * cannot make any assumptions since they can
6940 			 * be replaced dynamically during runtime in
6941 			 * the program array.
6942 			 */
6943 			prog->cb_access = 1;
6944 			env->prog->aux->stack_depth = MAX_BPF_STACK;
6945 			env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
6946 
6947 			/* mark bpf_tail_call as different opcode to avoid
6948 			 * conditional branch in the interpeter for every normal
6949 			 * call and to prevent accidental JITing by JIT compiler
6950 			 * that doesn't support bpf_tail_call yet
6951 			 */
6952 			insn->imm = 0;
6953 			insn->code = BPF_JMP | BPF_TAIL_CALL;
6954 
6955 			aux = &env->insn_aux_data[i + delta];
6956 			if (!bpf_map_ptr_unpriv(aux))
6957 				continue;
6958 
6959 			/* instead of changing every JIT dealing with tail_call
6960 			 * emit two extra insns:
6961 			 * if (index >= max_entries) goto out;
6962 			 * index &= array->index_mask;
6963 			 * to avoid out-of-bounds cpu speculation
6964 			 */
6965 			if (bpf_map_ptr_poisoned(aux)) {
6966 				verbose(env, "tail_call abusing map_ptr\n");
6967 				return -EINVAL;
6968 			}
6969 
6970 			map_ptr = BPF_MAP_PTR(aux->map_state);
6971 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
6972 						  map_ptr->max_entries, 2);
6973 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
6974 						    container_of(map_ptr,
6975 								 struct bpf_array,
6976 								 map)->index_mask);
6977 			insn_buf[2] = *insn;
6978 			cnt = 3;
6979 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6980 			if (!new_prog)
6981 				return -ENOMEM;
6982 
6983 			delta    += cnt - 1;
6984 			env->prog = prog = new_prog;
6985 			insn      = new_prog->insnsi + i + delta;
6986 			continue;
6987 		}
6988 
6989 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
6990 		 * and other inlining handlers are currently limited to 64 bit
6991 		 * only.
6992 		 */
6993 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
6994 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
6995 		     insn->imm == BPF_FUNC_map_update_elem ||
6996 		     insn->imm == BPF_FUNC_map_delete_elem ||
6997 		     insn->imm == BPF_FUNC_map_push_elem   ||
6998 		     insn->imm == BPF_FUNC_map_pop_elem    ||
6999 		     insn->imm == BPF_FUNC_map_peek_elem)) {
7000 			aux = &env->insn_aux_data[i + delta];
7001 			if (bpf_map_ptr_poisoned(aux))
7002 				goto patch_call_imm;
7003 
7004 			map_ptr = BPF_MAP_PTR(aux->map_state);
7005 			ops = map_ptr->ops;
7006 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
7007 			    ops->map_gen_lookup) {
7008 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
7009 				if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
7010 					verbose(env, "bpf verifier is misconfigured\n");
7011 					return -EINVAL;
7012 				}
7013 
7014 				new_prog = bpf_patch_insn_data(env, i + delta,
7015 							       insn_buf, cnt);
7016 				if (!new_prog)
7017 					return -ENOMEM;
7018 
7019 				delta    += cnt - 1;
7020 				env->prog = prog = new_prog;
7021 				insn      = new_prog->insnsi + i + delta;
7022 				continue;
7023 			}
7024 
7025 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
7026 				     (void *(*)(struct bpf_map *map, void *key))NULL));
7027 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
7028 				     (int (*)(struct bpf_map *map, void *key))NULL));
7029 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
7030 				     (int (*)(struct bpf_map *map, void *key, void *value,
7031 					      u64 flags))NULL));
7032 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
7033 				     (int (*)(struct bpf_map *map, void *value,
7034 					      u64 flags))NULL));
7035 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
7036 				     (int (*)(struct bpf_map *map, void *value))NULL));
7037 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
7038 				     (int (*)(struct bpf_map *map, void *value))NULL));
7039 
7040 			switch (insn->imm) {
7041 			case BPF_FUNC_map_lookup_elem:
7042 				insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
7043 					    __bpf_call_base;
7044 				continue;
7045 			case BPF_FUNC_map_update_elem:
7046 				insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
7047 					    __bpf_call_base;
7048 				continue;
7049 			case BPF_FUNC_map_delete_elem:
7050 				insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
7051 					    __bpf_call_base;
7052 				continue;
7053 			case BPF_FUNC_map_push_elem:
7054 				insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
7055 					    __bpf_call_base;
7056 				continue;
7057 			case BPF_FUNC_map_pop_elem:
7058 				insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
7059 					    __bpf_call_base;
7060 				continue;
7061 			case BPF_FUNC_map_peek_elem:
7062 				insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
7063 					    __bpf_call_base;
7064 				continue;
7065 			}
7066 
7067 			goto patch_call_imm;
7068 		}
7069 
7070 patch_call_imm:
7071 		fn = env->ops->get_func_proto(insn->imm, env->prog);
7072 		/* all functions that have prototype and verifier allowed
7073 		 * programs to call them, must be real in-kernel functions
7074 		 */
7075 		if (!fn->func) {
7076 			verbose(env,
7077 				"kernel subsystem misconfigured func %s#%d\n",
7078 				func_id_name(insn->imm), insn->imm);
7079 			return -EFAULT;
7080 		}
7081 		insn->imm = fn->func - __bpf_call_base;
7082 	}
7083 
7084 	return 0;
7085 }
7086 
7087 static void free_states(struct bpf_verifier_env *env)
7088 {
7089 	struct bpf_verifier_state_list *sl, *sln;
7090 	int i;
7091 
7092 	if (!env->explored_states)
7093 		return;
7094 
7095 	for (i = 0; i < env->prog->len; i++) {
7096 		sl = env->explored_states[i];
7097 
7098 		if (sl)
7099 			while (sl != STATE_LIST_MARK) {
7100 				sln = sl->next;
7101 				free_verifier_state(&sl->state, false);
7102 				kfree(sl);
7103 				sl = sln;
7104 			}
7105 	}
7106 
7107 	kfree(env->explored_states);
7108 }
7109 
7110 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
7111 	      union bpf_attr __user *uattr)
7112 {
7113 	struct bpf_verifier_env *env;
7114 	struct bpf_verifier_log *log;
7115 	int ret = -EINVAL;
7116 
7117 	/* no program is valid */
7118 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
7119 		return -EINVAL;
7120 
7121 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
7122 	 * allocate/free it every time bpf_check() is called
7123 	 */
7124 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
7125 	if (!env)
7126 		return -ENOMEM;
7127 	log = &env->log;
7128 
7129 	env->insn_aux_data =
7130 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
7131 				   (*prog)->len));
7132 	ret = -ENOMEM;
7133 	if (!env->insn_aux_data)
7134 		goto err_free_env;
7135 	env->prog = *prog;
7136 	env->ops = bpf_verifier_ops[env->prog->type];
7137 
7138 	/* grab the mutex to protect few globals used by verifier */
7139 	mutex_lock(&bpf_verifier_lock);
7140 
7141 	if (attr->log_level || attr->log_buf || attr->log_size) {
7142 		/* user requested verbose verifier output
7143 		 * and supplied buffer to store the verification trace
7144 		 */
7145 		log->level = attr->log_level;
7146 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
7147 		log->len_total = attr->log_size;
7148 
7149 		ret = -EINVAL;
7150 		/* log attributes have to be sane */
7151 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
7152 		    !log->level || !log->ubuf)
7153 			goto err_unlock;
7154 	}
7155 
7156 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
7157 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
7158 		env->strict_alignment = true;
7159 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
7160 		env->strict_alignment = false;
7161 
7162 	ret = replace_map_fd_with_map_ptr(env);
7163 	if (ret < 0)
7164 		goto skip_full_check;
7165 
7166 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
7167 		ret = bpf_prog_offload_verifier_prep(env->prog);
7168 		if (ret)
7169 			goto skip_full_check;
7170 	}
7171 
7172 	env->explored_states = kcalloc(env->prog->len,
7173 				       sizeof(struct bpf_verifier_state_list *),
7174 				       GFP_USER);
7175 	ret = -ENOMEM;
7176 	if (!env->explored_states)
7177 		goto skip_full_check;
7178 
7179 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
7180 
7181 	ret = check_subprogs(env);
7182 	if (ret < 0)
7183 		goto skip_full_check;
7184 
7185 	ret = check_btf_info(env, attr, uattr);
7186 	if (ret < 0)
7187 		goto skip_full_check;
7188 
7189 	ret = check_cfg(env);
7190 	if (ret < 0)
7191 		goto skip_full_check;
7192 
7193 	ret = do_check(env);
7194 	if (env->cur_state) {
7195 		free_verifier_state(env->cur_state, true);
7196 		env->cur_state = NULL;
7197 	}
7198 
7199 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
7200 		ret = bpf_prog_offload_finalize(env);
7201 
7202 skip_full_check:
7203 	while (!pop_stack(env, NULL, NULL));
7204 	free_states(env);
7205 
7206 	if (ret == 0)
7207 		ret = check_max_stack_depth(env);
7208 
7209 	/* instruction rewrites happen after this point */
7210 	if (ret == 0)
7211 		sanitize_dead_code(env);
7212 
7213 	if (ret == 0)
7214 		/* program is valid, convert *(u32*)(ctx + off) accesses */
7215 		ret = convert_ctx_accesses(env);
7216 
7217 	if (ret == 0)
7218 		ret = fixup_bpf_calls(env);
7219 
7220 	if (ret == 0)
7221 		ret = fixup_call_args(env);
7222 
7223 	if (log->level && bpf_verifier_log_full(log))
7224 		ret = -ENOSPC;
7225 	if (log->level && !log->ubuf) {
7226 		ret = -EFAULT;
7227 		goto err_release_maps;
7228 	}
7229 
7230 	if (ret == 0 && env->used_map_cnt) {
7231 		/* if program passed verifier, update used_maps in bpf_prog_info */
7232 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
7233 							  sizeof(env->used_maps[0]),
7234 							  GFP_KERNEL);
7235 
7236 		if (!env->prog->aux->used_maps) {
7237 			ret = -ENOMEM;
7238 			goto err_release_maps;
7239 		}
7240 
7241 		memcpy(env->prog->aux->used_maps, env->used_maps,
7242 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
7243 		env->prog->aux->used_map_cnt = env->used_map_cnt;
7244 
7245 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
7246 		 * bpf_ld_imm64 instructions
7247 		 */
7248 		convert_pseudo_ld_imm64(env);
7249 	}
7250 
7251 	if (ret == 0)
7252 		adjust_btf_func(env);
7253 
7254 err_release_maps:
7255 	if (!env->prog->aux->used_maps)
7256 		/* if we didn't copy map pointers into bpf_prog_info, release
7257 		 * them now. Otherwise free_used_maps() will release them.
7258 		 */
7259 		release_maps(env);
7260 	*prog = env->prog;
7261 err_unlock:
7262 	mutex_unlock(&bpf_verifier_lock);
7263 	vfree(env->insn_aux_data);
7264 err_free_env:
7265 	kfree(env);
7266 	return ret;
7267 }
7268