xref: /openbmc/linux/kernel/bpf/verifier.c (revision a72594ca)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 
24 #include "disasm.h"
25 
26 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
27 #define BPF_PROG_TYPE(_id, _name) \
28 	[_id] = & _name ## _verifier_ops,
29 #define BPF_MAP_TYPE(_id, _ops)
30 #include <linux/bpf_types.h>
31 #undef BPF_PROG_TYPE
32 #undef BPF_MAP_TYPE
33 };
34 
35 /* bpf_check() is a static code analyzer that walks eBPF program
36  * instruction by instruction and updates register/stack state.
37  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38  *
39  * The first pass is depth-first-search to check that the program is a DAG.
40  * It rejects the following programs:
41  * - larger than BPF_MAXINSNS insns
42  * - if loop is present (detected via back-edge)
43  * - unreachable insns exist (shouldn't be a forest. program = one function)
44  * - out of bounds or malformed jumps
45  * The second pass is all possible path descent from the 1st insn.
46  * Since it's analyzing all pathes through the program, the length of the
47  * analysis is limited to 64k insn, which may be hit even if total number of
48  * insn is less then 4K, but there are too many branches that change stack/regs.
49  * Number of 'branches to be analyzed' is limited to 1k
50  *
51  * On entry to each instruction, each register has a type, and the instruction
52  * changes the types of the registers depending on instruction semantics.
53  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
54  * copied to R1.
55  *
56  * All registers are 64-bit.
57  * R0 - return register
58  * R1-R5 argument passing registers
59  * R6-R9 callee saved registers
60  * R10 - frame pointer read-only
61  *
62  * At the start of BPF program the register R1 contains a pointer to bpf_context
63  * and has type PTR_TO_CTX.
64  *
65  * Verifier tracks arithmetic operations on pointers in case:
66  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
67  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
68  * 1st insn copies R10 (which has FRAME_PTR) type into R1
69  * and 2nd arithmetic instruction is pattern matched to recognize
70  * that it wants to construct a pointer to some element within stack.
71  * So after 2nd insn, the register R1 has type PTR_TO_STACK
72  * (and -20 constant is saved for further stack bounds checking).
73  * Meaning that this reg is a pointer to stack plus known immediate constant.
74  *
75  * Most of the time the registers have SCALAR_VALUE type, which
76  * means the register has some value, but it's not a valid pointer.
77  * (like pointer plus pointer becomes SCALAR_VALUE type)
78  *
79  * When verifier sees load or store instructions the type of base register
80  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
81  * types recognized by check_mem_access() function.
82  *
83  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
84  * and the range of [ptr, ptr + map's value_size) is accessible.
85  *
86  * registers used to pass values to function calls are checked against
87  * function argument constraints.
88  *
89  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
90  * It means that the register type passed to this function must be
91  * PTR_TO_STACK and it will be used inside the function as
92  * 'pointer to map element key'
93  *
94  * For example the argument constraints for bpf_map_lookup_elem():
95  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
96  *   .arg1_type = ARG_CONST_MAP_PTR,
97  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
98  *
99  * ret_type says that this function returns 'pointer to map elem value or null'
100  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
101  * 2nd argument should be a pointer to stack, which will be used inside
102  * the helper function as a pointer to map element key.
103  *
104  * On the kernel side the helper function looks like:
105  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106  * {
107  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
108  *    void *key = (void *) (unsigned long) r2;
109  *    void *value;
110  *
111  *    here kernel can access 'key' and 'map' pointers safely, knowing that
112  *    [key, key + map->key_size) bytes are valid and were initialized on
113  *    the stack of eBPF program.
114  * }
115  *
116  * Corresponding eBPF program may look like:
117  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
118  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
119  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
120  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
121  * here verifier looks at prototype of map_lookup_elem() and sees:
122  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
123  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124  *
125  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
126  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
127  * and were initialized prior to this call.
128  * If it's ok, then verifier allows this BPF_CALL insn and looks at
129  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
130  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
131  * returns ether pointer to map value or NULL.
132  *
133  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
134  * insn, the register holding that pointer in the true branch changes state to
135  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
136  * branch. See check_cond_jmp_op().
137  *
138  * After the call R0 is set to return type of the function and registers R1-R5
139  * are set to NOT_INIT to indicate that they are no longer readable.
140  */
141 
142 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
143 struct bpf_verifier_stack_elem {
144 	/* verifer state is 'st'
145 	 * before processing instruction 'insn_idx'
146 	 * and after processing instruction 'prev_insn_idx'
147 	 */
148 	struct bpf_verifier_state st;
149 	int insn_idx;
150 	int prev_insn_idx;
151 	struct bpf_verifier_stack_elem *next;
152 };
153 
154 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
155 #define BPF_COMPLEXITY_LIMIT_STACK	1024
156 
157 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
158 
159 struct bpf_call_arg_meta {
160 	struct bpf_map *map_ptr;
161 	bool raw_mode;
162 	bool pkt_access;
163 	int regno;
164 	int access_size;
165 };
166 
167 static DEFINE_MUTEX(bpf_verifier_lock);
168 
169 /* log_level controls verbosity level of eBPF verifier.
170  * verbose() is used to dump the verification trace to the log, so the user
171  * can figure out what's wrong with the program
172  */
173 static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
174 				   const char *fmt, ...)
175 {
176 	struct bpf_verifer_log *log = &env->log;
177 	unsigned int n;
178 	va_list args;
179 
180 	if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
181 		return;
182 
183 	va_start(args, fmt);
184 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
185 	va_end(args);
186 
187 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
188 		  "verifier log line truncated - local buffer too short\n");
189 
190 	n = min(log->len_total - log->len_used - 1, n);
191 	log->kbuf[n] = '\0';
192 
193 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
194 		log->len_used += n;
195 	else
196 		log->ubuf = NULL;
197 }
198 
199 static bool type_is_pkt_pointer(enum bpf_reg_type type)
200 {
201 	return type == PTR_TO_PACKET ||
202 	       type == PTR_TO_PACKET_META;
203 }
204 
205 /* string representation of 'enum bpf_reg_type' */
206 static const char * const reg_type_str[] = {
207 	[NOT_INIT]		= "?",
208 	[SCALAR_VALUE]		= "inv",
209 	[PTR_TO_CTX]		= "ctx",
210 	[CONST_PTR_TO_MAP]	= "map_ptr",
211 	[PTR_TO_MAP_VALUE]	= "map_value",
212 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
213 	[PTR_TO_STACK]		= "fp",
214 	[PTR_TO_PACKET]		= "pkt",
215 	[PTR_TO_PACKET_META]	= "pkt_meta",
216 	[PTR_TO_PACKET_END]	= "pkt_end",
217 };
218 
219 static void print_verifier_state(struct bpf_verifier_env *env,
220 				 struct bpf_verifier_state *state)
221 {
222 	struct bpf_reg_state *reg;
223 	enum bpf_reg_type t;
224 	int i;
225 
226 	for (i = 0; i < MAX_BPF_REG; i++) {
227 		reg = &state->regs[i];
228 		t = reg->type;
229 		if (t == NOT_INIT)
230 			continue;
231 		verbose(env, " R%d=%s", i, reg_type_str[t]);
232 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
233 		    tnum_is_const(reg->var_off)) {
234 			/* reg->off should be 0 for SCALAR_VALUE */
235 			verbose(env, "%lld", reg->var_off.value + reg->off);
236 		} else {
237 			verbose(env, "(id=%d", reg->id);
238 			if (t != SCALAR_VALUE)
239 				verbose(env, ",off=%d", reg->off);
240 			if (type_is_pkt_pointer(t))
241 				verbose(env, ",r=%d", reg->range);
242 			else if (t == CONST_PTR_TO_MAP ||
243 				 t == PTR_TO_MAP_VALUE ||
244 				 t == PTR_TO_MAP_VALUE_OR_NULL)
245 				verbose(env, ",ks=%d,vs=%d",
246 					reg->map_ptr->key_size,
247 					reg->map_ptr->value_size);
248 			if (tnum_is_const(reg->var_off)) {
249 				/* Typically an immediate SCALAR_VALUE, but
250 				 * could be a pointer whose offset is too big
251 				 * for reg->off
252 				 */
253 				verbose(env, ",imm=%llx", reg->var_off.value);
254 			} else {
255 				if (reg->smin_value != reg->umin_value &&
256 				    reg->smin_value != S64_MIN)
257 					verbose(env, ",smin_value=%lld",
258 						(long long)reg->smin_value);
259 				if (reg->smax_value != reg->umax_value &&
260 				    reg->smax_value != S64_MAX)
261 					verbose(env, ",smax_value=%lld",
262 						(long long)reg->smax_value);
263 				if (reg->umin_value != 0)
264 					verbose(env, ",umin_value=%llu",
265 						(unsigned long long)reg->umin_value);
266 				if (reg->umax_value != U64_MAX)
267 					verbose(env, ",umax_value=%llu",
268 						(unsigned long long)reg->umax_value);
269 				if (!tnum_is_unknown(reg->var_off)) {
270 					char tn_buf[48];
271 
272 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
273 					verbose(env, ",var_off=%s", tn_buf);
274 				}
275 			}
276 			verbose(env, ")");
277 		}
278 	}
279 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
280 		if (state->stack[i].slot_type[0] == STACK_SPILL)
281 			verbose(env, " fp%d=%s",
282 				-MAX_BPF_STACK + i * BPF_REG_SIZE,
283 				reg_type_str[state->stack[i].spilled_ptr.type]);
284 	}
285 	verbose(env, "\n");
286 }
287 
288 static int copy_stack_state(struct bpf_verifier_state *dst,
289 			    const struct bpf_verifier_state *src)
290 {
291 	if (!src->stack)
292 		return 0;
293 	if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
294 		/* internal bug, make state invalid to reject the program */
295 		memset(dst, 0, sizeof(*dst));
296 		return -EFAULT;
297 	}
298 	memcpy(dst->stack, src->stack,
299 	       sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
300 	return 0;
301 }
302 
303 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
304  * make it consume minimal amount of memory. check_stack_write() access from
305  * the program calls into realloc_verifier_state() to grow the stack size.
306  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
307  * which this function copies over. It points to previous bpf_verifier_state
308  * which is never reallocated
309  */
310 static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
311 				  bool copy_old)
312 {
313 	u32 old_size = state->allocated_stack;
314 	struct bpf_stack_state *new_stack;
315 	int slot = size / BPF_REG_SIZE;
316 
317 	if (size <= old_size || !size) {
318 		if (copy_old)
319 			return 0;
320 		state->allocated_stack = slot * BPF_REG_SIZE;
321 		if (!size && old_size) {
322 			kfree(state->stack);
323 			state->stack = NULL;
324 		}
325 		return 0;
326 	}
327 	new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
328 				  GFP_KERNEL);
329 	if (!new_stack)
330 		return -ENOMEM;
331 	if (copy_old) {
332 		if (state->stack)
333 			memcpy(new_stack, state->stack,
334 			       sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
335 		memset(new_stack + old_size / BPF_REG_SIZE, 0,
336 		       sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
337 	}
338 	state->allocated_stack = slot * BPF_REG_SIZE;
339 	kfree(state->stack);
340 	state->stack = new_stack;
341 	return 0;
342 }
343 
344 static void free_verifier_state(struct bpf_verifier_state *state,
345 				bool free_self)
346 {
347 	kfree(state->stack);
348 	if (free_self)
349 		kfree(state);
350 }
351 
352 /* copy verifier state from src to dst growing dst stack space
353  * when necessary to accommodate larger src stack
354  */
355 static int copy_verifier_state(struct bpf_verifier_state *dst,
356 			       const struct bpf_verifier_state *src)
357 {
358 	int err;
359 
360 	err = realloc_verifier_state(dst, src->allocated_stack, false);
361 	if (err)
362 		return err;
363 	memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
364 	return copy_stack_state(dst, src);
365 }
366 
367 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
368 		     int *insn_idx)
369 {
370 	struct bpf_verifier_state *cur = env->cur_state;
371 	struct bpf_verifier_stack_elem *elem, *head = env->head;
372 	int err;
373 
374 	if (env->head == NULL)
375 		return -ENOENT;
376 
377 	if (cur) {
378 		err = copy_verifier_state(cur, &head->st);
379 		if (err)
380 			return err;
381 	}
382 	if (insn_idx)
383 		*insn_idx = head->insn_idx;
384 	if (prev_insn_idx)
385 		*prev_insn_idx = head->prev_insn_idx;
386 	elem = head->next;
387 	free_verifier_state(&head->st, false);
388 	kfree(head);
389 	env->head = elem;
390 	env->stack_size--;
391 	return 0;
392 }
393 
394 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
395 					     int insn_idx, int prev_insn_idx)
396 {
397 	struct bpf_verifier_state *cur = env->cur_state;
398 	struct bpf_verifier_stack_elem *elem;
399 	int err;
400 
401 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
402 	if (!elem)
403 		goto err;
404 
405 	elem->insn_idx = insn_idx;
406 	elem->prev_insn_idx = prev_insn_idx;
407 	elem->next = env->head;
408 	env->head = elem;
409 	env->stack_size++;
410 	err = copy_verifier_state(&elem->st, cur);
411 	if (err)
412 		goto err;
413 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
414 		verbose(env, "BPF program is too complex\n");
415 		goto err;
416 	}
417 	return &elem->st;
418 err:
419 	/* pop all elements and return */
420 	while (!pop_stack(env, NULL, NULL));
421 	return NULL;
422 }
423 
424 #define CALLER_SAVED_REGS 6
425 static const int caller_saved[CALLER_SAVED_REGS] = {
426 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
427 };
428 
429 static void __mark_reg_not_init(struct bpf_reg_state *reg);
430 
431 /* Mark the unknown part of a register (variable offset or scalar value) as
432  * known to have the value @imm.
433  */
434 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
435 {
436 	reg->id = 0;
437 	reg->var_off = tnum_const(imm);
438 	reg->smin_value = (s64)imm;
439 	reg->smax_value = (s64)imm;
440 	reg->umin_value = imm;
441 	reg->umax_value = imm;
442 }
443 
444 /* Mark the 'variable offset' part of a register as zero.  This should be
445  * used only on registers holding a pointer type.
446  */
447 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
448 {
449 	__mark_reg_known(reg, 0);
450 }
451 
452 static void mark_reg_known_zero(struct bpf_verifier_env *env,
453 				struct bpf_reg_state *regs, u32 regno)
454 {
455 	if (WARN_ON(regno >= MAX_BPF_REG)) {
456 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
457 		/* Something bad happened, let's kill all regs */
458 		for (regno = 0; regno < MAX_BPF_REG; regno++)
459 			__mark_reg_not_init(regs + regno);
460 		return;
461 	}
462 	__mark_reg_known_zero(regs + regno);
463 }
464 
465 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
466 {
467 	return type_is_pkt_pointer(reg->type);
468 }
469 
470 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
471 {
472 	return reg_is_pkt_pointer(reg) ||
473 	       reg->type == PTR_TO_PACKET_END;
474 }
475 
476 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
477 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
478 				    enum bpf_reg_type which)
479 {
480 	/* The register can already have a range from prior markings.
481 	 * This is fine as long as it hasn't been advanced from its
482 	 * origin.
483 	 */
484 	return reg->type == which &&
485 	       reg->id == 0 &&
486 	       reg->off == 0 &&
487 	       tnum_equals_const(reg->var_off, 0);
488 }
489 
490 /* Attempts to improve min/max values based on var_off information */
491 static void __update_reg_bounds(struct bpf_reg_state *reg)
492 {
493 	/* min signed is max(sign bit) | min(other bits) */
494 	reg->smin_value = max_t(s64, reg->smin_value,
495 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
496 	/* max signed is min(sign bit) | max(other bits) */
497 	reg->smax_value = min_t(s64, reg->smax_value,
498 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
499 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
500 	reg->umax_value = min(reg->umax_value,
501 			      reg->var_off.value | reg->var_off.mask);
502 }
503 
504 /* Uses signed min/max values to inform unsigned, and vice-versa */
505 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
506 {
507 	/* Learn sign from signed bounds.
508 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
509 	 * are the same, so combine.  This works even in the negative case, e.g.
510 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
511 	 */
512 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
513 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
514 							  reg->umin_value);
515 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
516 							  reg->umax_value);
517 		return;
518 	}
519 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
520 	 * boundary, so we must be careful.
521 	 */
522 	if ((s64)reg->umax_value >= 0) {
523 		/* Positive.  We can't learn anything from the smin, but smax
524 		 * is positive, hence safe.
525 		 */
526 		reg->smin_value = reg->umin_value;
527 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
528 							  reg->umax_value);
529 	} else if ((s64)reg->umin_value < 0) {
530 		/* Negative.  We can't learn anything from the smax, but smin
531 		 * is negative, hence safe.
532 		 */
533 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
534 							  reg->umin_value);
535 		reg->smax_value = reg->umax_value;
536 	}
537 }
538 
539 /* Attempts to improve var_off based on unsigned min/max information */
540 static void __reg_bound_offset(struct bpf_reg_state *reg)
541 {
542 	reg->var_off = tnum_intersect(reg->var_off,
543 				      tnum_range(reg->umin_value,
544 						 reg->umax_value));
545 }
546 
547 /* Reset the min/max bounds of a register */
548 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
549 {
550 	reg->smin_value = S64_MIN;
551 	reg->smax_value = S64_MAX;
552 	reg->umin_value = 0;
553 	reg->umax_value = U64_MAX;
554 }
555 
556 /* Mark a register as having a completely unknown (scalar) value. */
557 static void __mark_reg_unknown(struct bpf_reg_state *reg)
558 {
559 	reg->type = SCALAR_VALUE;
560 	reg->id = 0;
561 	reg->off = 0;
562 	reg->var_off = tnum_unknown;
563 	__mark_reg_unbounded(reg);
564 }
565 
566 static void mark_reg_unknown(struct bpf_verifier_env *env,
567 			     struct bpf_reg_state *regs, u32 regno)
568 {
569 	if (WARN_ON(regno >= MAX_BPF_REG)) {
570 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
571 		/* Something bad happened, let's kill all regs */
572 		for (regno = 0; regno < MAX_BPF_REG; regno++)
573 			__mark_reg_not_init(regs + regno);
574 		return;
575 	}
576 	__mark_reg_unknown(regs + regno);
577 }
578 
579 static void __mark_reg_not_init(struct bpf_reg_state *reg)
580 {
581 	__mark_reg_unknown(reg);
582 	reg->type = NOT_INIT;
583 }
584 
585 static void mark_reg_not_init(struct bpf_verifier_env *env,
586 			      struct bpf_reg_state *regs, u32 regno)
587 {
588 	if (WARN_ON(regno >= MAX_BPF_REG)) {
589 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
590 		/* Something bad happened, let's kill all regs */
591 		for (regno = 0; regno < MAX_BPF_REG; regno++)
592 			__mark_reg_not_init(regs + regno);
593 		return;
594 	}
595 	__mark_reg_not_init(regs + regno);
596 }
597 
598 static void init_reg_state(struct bpf_verifier_env *env,
599 			   struct bpf_reg_state *regs)
600 {
601 	int i;
602 
603 	for (i = 0; i < MAX_BPF_REG; i++) {
604 		mark_reg_not_init(env, regs, i);
605 		regs[i].live = REG_LIVE_NONE;
606 	}
607 
608 	/* frame pointer */
609 	regs[BPF_REG_FP].type = PTR_TO_STACK;
610 	mark_reg_known_zero(env, regs, BPF_REG_FP);
611 
612 	/* 1st arg to a function */
613 	regs[BPF_REG_1].type = PTR_TO_CTX;
614 	mark_reg_known_zero(env, regs, BPF_REG_1);
615 }
616 
617 enum reg_arg_type {
618 	SRC_OP,		/* register is used as source operand */
619 	DST_OP,		/* register is used as destination operand */
620 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
621 };
622 
623 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
624 {
625 	struct bpf_verifier_state *parent = state->parent;
626 
627 	if (regno == BPF_REG_FP)
628 		/* We don't need to worry about FP liveness because it's read-only */
629 		return;
630 
631 	while (parent) {
632 		/* if read wasn't screened by an earlier write ... */
633 		if (state->regs[regno].live & REG_LIVE_WRITTEN)
634 			break;
635 		/* ... then we depend on parent's value */
636 		parent->regs[regno].live |= REG_LIVE_READ;
637 		state = parent;
638 		parent = state->parent;
639 	}
640 }
641 
642 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
643 			 enum reg_arg_type t)
644 {
645 	struct bpf_reg_state *regs = env->cur_state->regs;
646 
647 	if (regno >= MAX_BPF_REG) {
648 		verbose(env, "R%d is invalid\n", regno);
649 		return -EINVAL;
650 	}
651 
652 	if (t == SRC_OP) {
653 		/* check whether register used as source operand can be read */
654 		if (regs[regno].type == NOT_INIT) {
655 			verbose(env, "R%d !read_ok\n", regno);
656 			return -EACCES;
657 		}
658 		mark_reg_read(env->cur_state, regno);
659 	} else {
660 		/* check whether register used as dest operand can be written to */
661 		if (regno == BPF_REG_FP) {
662 			verbose(env, "frame pointer is read only\n");
663 			return -EACCES;
664 		}
665 		regs[regno].live |= REG_LIVE_WRITTEN;
666 		if (t == DST_OP)
667 			mark_reg_unknown(env, regs, regno);
668 	}
669 	return 0;
670 }
671 
672 static bool is_spillable_regtype(enum bpf_reg_type type)
673 {
674 	switch (type) {
675 	case PTR_TO_MAP_VALUE:
676 	case PTR_TO_MAP_VALUE_OR_NULL:
677 	case PTR_TO_STACK:
678 	case PTR_TO_CTX:
679 	case PTR_TO_PACKET:
680 	case PTR_TO_PACKET_META:
681 	case PTR_TO_PACKET_END:
682 	case CONST_PTR_TO_MAP:
683 		return true;
684 	default:
685 		return false;
686 	}
687 }
688 
689 /* check_stack_read/write functions track spill/fill of registers,
690  * stack boundary and alignment are checked in check_mem_access()
691  */
692 static int check_stack_write(struct bpf_verifier_env *env,
693 			     struct bpf_verifier_state *state, int off,
694 			     int size, int value_regno)
695 {
696 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
697 
698 	err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
699 				     true);
700 	if (err)
701 		return err;
702 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
703 	 * so it's aligned access and [off, off + size) are within stack limits
704 	 */
705 	if (!env->allow_ptr_leaks &&
706 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
707 	    size != BPF_REG_SIZE) {
708 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
709 		return -EACCES;
710 	}
711 
712 	if (value_regno >= 0 &&
713 	    is_spillable_regtype(state->regs[value_regno].type)) {
714 
715 		/* register containing pointer is being spilled into stack */
716 		if (size != BPF_REG_SIZE) {
717 			verbose(env, "invalid size of register spill\n");
718 			return -EACCES;
719 		}
720 
721 		/* save register state */
722 		state->stack[spi].spilled_ptr = state->regs[value_regno];
723 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
724 
725 		for (i = 0; i < BPF_REG_SIZE; i++)
726 			state->stack[spi].slot_type[i] = STACK_SPILL;
727 	} else {
728 		/* regular write of data into stack */
729 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
730 
731 		for (i = 0; i < size; i++)
732 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
733 				STACK_MISC;
734 	}
735 	return 0;
736 }
737 
738 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
739 {
740 	struct bpf_verifier_state *parent = state->parent;
741 
742 	while (parent) {
743 		/* if read wasn't screened by an earlier write ... */
744 		if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
745 			break;
746 		/* ... then we depend on parent's value */
747 		parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
748 		state = parent;
749 		parent = state->parent;
750 	}
751 }
752 
753 static int check_stack_read(struct bpf_verifier_env *env,
754 			    struct bpf_verifier_state *state, int off, int size,
755 			    int value_regno)
756 {
757 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
758 	u8 *stype;
759 
760 	if (state->allocated_stack <= slot) {
761 		verbose(env, "invalid read from stack off %d+0 size %d\n",
762 			off, size);
763 		return -EACCES;
764 	}
765 	stype = state->stack[spi].slot_type;
766 
767 	if (stype[0] == STACK_SPILL) {
768 		if (size != BPF_REG_SIZE) {
769 			verbose(env, "invalid size of register spill\n");
770 			return -EACCES;
771 		}
772 		for (i = 1; i < BPF_REG_SIZE; i++) {
773 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
774 				verbose(env, "corrupted spill memory\n");
775 				return -EACCES;
776 			}
777 		}
778 
779 		if (value_regno >= 0) {
780 			/* restore register state from stack */
781 			state->regs[value_regno] = state->stack[spi].spilled_ptr;
782 			mark_stack_slot_read(state, spi);
783 		}
784 		return 0;
785 	} else {
786 		for (i = 0; i < size; i++) {
787 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
788 				verbose(env, "invalid read from stack off %d+%d size %d\n",
789 					off, i, size);
790 				return -EACCES;
791 			}
792 		}
793 		if (value_regno >= 0)
794 			/* have read misc data from the stack */
795 			mark_reg_unknown(env, state->regs, value_regno);
796 		return 0;
797 	}
798 }
799 
800 /* check read/write into map element returned by bpf_map_lookup_elem() */
801 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
802 			      int size, bool zero_size_allowed)
803 {
804 	struct bpf_reg_state *regs = cur_regs(env);
805 	struct bpf_map *map = regs[regno].map_ptr;
806 
807 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
808 	    off + size > map->value_size) {
809 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
810 			map->value_size, off, size);
811 		return -EACCES;
812 	}
813 	return 0;
814 }
815 
816 /* check read/write into a map element with possible variable offset */
817 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
818 			    int off, int size, bool zero_size_allowed)
819 {
820 	struct bpf_verifier_state *state = env->cur_state;
821 	struct bpf_reg_state *reg = &state->regs[regno];
822 	int err;
823 
824 	/* We may have adjusted the register to this map value, so we
825 	 * need to try adding each of min_value and max_value to off
826 	 * to make sure our theoretical access will be safe.
827 	 */
828 	if (env->log.level)
829 		print_verifier_state(env, state);
830 	/* The minimum value is only important with signed
831 	 * comparisons where we can't assume the floor of a
832 	 * value is 0.  If we are using signed variables for our
833 	 * index'es we need to make sure that whatever we use
834 	 * will have a set floor within our range.
835 	 */
836 	if (reg->smin_value < 0) {
837 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
838 			regno);
839 		return -EACCES;
840 	}
841 	err = __check_map_access(env, regno, reg->smin_value + off, size,
842 				 zero_size_allowed);
843 	if (err) {
844 		verbose(env, "R%d min value is outside of the array range\n",
845 			regno);
846 		return err;
847 	}
848 
849 	/* If we haven't set a max value then we need to bail since we can't be
850 	 * sure we won't do bad things.
851 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
852 	 */
853 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
854 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
855 			regno);
856 		return -EACCES;
857 	}
858 	err = __check_map_access(env, regno, reg->umax_value + off, size,
859 				 zero_size_allowed);
860 	if (err)
861 		verbose(env, "R%d max value is outside of the array range\n",
862 			regno);
863 	return err;
864 }
865 
866 #define MAX_PACKET_OFF 0xffff
867 
868 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
869 				       const struct bpf_call_arg_meta *meta,
870 				       enum bpf_access_type t)
871 {
872 	switch (env->prog->type) {
873 	case BPF_PROG_TYPE_LWT_IN:
874 	case BPF_PROG_TYPE_LWT_OUT:
875 		/* dst_input() and dst_output() can't write for now */
876 		if (t == BPF_WRITE)
877 			return false;
878 		/* fallthrough */
879 	case BPF_PROG_TYPE_SCHED_CLS:
880 	case BPF_PROG_TYPE_SCHED_ACT:
881 	case BPF_PROG_TYPE_XDP:
882 	case BPF_PROG_TYPE_LWT_XMIT:
883 	case BPF_PROG_TYPE_SK_SKB:
884 		if (meta)
885 			return meta->pkt_access;
886 
887 		env->seen_direct_write = true;
888 		return true;
889 	default:
890 		return false;
891 	}
892 }
893 
894 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
895 				 int off, int size, bool zero_size_allowed)
896 {
897 	struct bpf_reg_state *regs = cur_regs(env);
898 	struct bpf_reg_state *reg = &regs[regno];
899 
900 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
901 	    (u64)off + size > reg->range) {
902 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
903 			off, size, regno, reg->id, reg->off, reg->range);
904 		return -EACCES;
905 	}
906 	return 0;
907 }
908 
909 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
910 			       int size, bool zero_size_allowed)
911 {
912 	struct bpf_reg_state *regs = cur_regs(env);
913 	struct bpf_reg_state *reg = &regs[regno];
914 	int err;
915 
916 	/* We may have added a variable offset to the packet pointer; but any
917 	 * reg->range we have comes after that.  We are only checking the fixed
918 	 * offset.
919 	 */
920 
921 	/* We don't allow negative numbers, because we aren't tracking enough
922 	 * detail to prove they're safe.
923 	 */
924 	if (reg->smin_value < 0) {
925 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
926 			regno);
927 		return -EACCES;
928 	}
929 	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
930 	if (err) {
931 		verbose(env, "R%d offset is outside of the packet\n", regno);
932 		return err;
933 	}
934 	return err;
935 }
936 
937 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
938 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
939 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
940 {
941 	struct bpf_insn_access_aux info = {
942 		.reg_type = *reg_type,
943 	};
944 
945 	if (env->ops->is_valid_access &&
946 	    env->ops->is_valid_access(off, size, t, &info)) {
947 		/* A non zero info.ctx_field_size indicates that this field is a
948 		 * candidate for later verifier transformation to load the whole
949 		 * field and then apply a mask when accessed with a narrower
950 		 * access than actual ctx access size. A zero info.ctx_field_size
951 		 * will only allow for whole field access and rejects any other
952 		 * type of narrower access.
953 		 */
954 		*reg_type = info.reg_type;
955 
956 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
957 		/* remember the offset of last byte accessed in ctx */
958 		if (env->prog->aux->max_ctx_offset < off + size)
959 			env->prog->aux->max_ctx_offset = off + size;
960 		return 0;
961 	}
962 
963 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
964 	return -EACCES;
965 }
966 
967 static bool __is_pointer_value(bool allow_ptr_leaks,
968 			       const struct bpf_reg_state *reg)
969 {
970 	if (allow_ptr_leaks)
971 		return false;
972 
973 	return reg->type != SCALAR_VALUE;
974 }
975 
976 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
977 {
978 	return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
979 }
980 
981 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
982 				   const struct bpf_reg_state *reg,
983 				   int off, int size, bool strict)
984 {
985 	struct tnum reg_off;
986 	int ip_align;
987 
988 	/* Byte size accesses are always allowed. */
989 	if (!strict || size == 1)
990 		return 0;
991 
992 	/* For platforms that do not have a Kconfig enabling
993 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
994 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
995 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
996 	 * to this code only in strict mode where we want to emulate
997 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
998 	 * unconditional IP align value of '2'.
999 	 */
1000 	ip_align = 2;
1001 
1002 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1003 	if (!tnum_is_aligned(reg_off, size)) {
1004 		char tn_buf[48];
1005 
1006 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1007 		verbose(env,
1008 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1009 			ip_align, tn_buf, reg->off, off, size);
1010 		return -EACCES;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1017 				       const struct bpf_reg_state *reg,
1018 				       const char *pointer_desc,
1019 				       int off, int size, bool strict)
1020 {
1021 	struct tnum reg_off;
1022 
1023 	/* Byte size accesses are always allowed. */
1024 	if (!strict || size == 1)
1025 		return 0;
1026 
1027 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1028 	if (!tnum_is_aligned(reg_off, size)) {
1029 		char tn_buf[48];
1030 
1031 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1032 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1033 			pointer_desc, tn_buf, reg->off, off, size);
1034 		return -EACCES;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 static int check_ptr_alignment(struct bpf_verifier_env *env,
1041 			       const struct bpf_reg_state *reg,
1042 			       int off, int size)
1043 {
1044 	bool strict = env->strict_alignment;
1045 	const char *pointer_desc = "";
1046 
1047 	switch (reg->type) {
1048 	case PTR_TO_PACKET:
1049 	case PTR_TO_PACKET_META:
1050 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1051 		 * right in front, treat it the very same way.
1052 		 */
1053 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1054 	case PTR_TO_MAP_VALUE:
1055 		pointer_desc = "value ";
1056 		break;
1057 	case PTR_TO_CTX:
1058 		pointer_desc = "context ";
1059 		break;
1060 	case PTR_TO_STACK:
1061 		pointer_desc = "stack ";
1062 		/* The stack spill tracking logic in check_stack_write()
1063 		 * and check_stack_read() relies on stack accesses being
1064 		 * aligned.
1065 		 */
1066 		strict = true;
1067 		break;
1068 	default:
1069 		break;
1070 	}
1071 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1072 					   strict);
1073 }
1074 
1075 /* truncate register to smaller size (in bytes)
1076  * must be called with size < BPF_REG_SIZE
1077  */
1078 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1079 {
1080 	u64 mask;
1081 
1082 	/* clear high bits in bit representation */
1083 	reg->var_off = tnum_cast(reg->var_off, size);
1084 
1085 	/* fix arithmetic bounds */
1086 	mask = ((u64)1 << (size * 8)) - 1;
1087 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1088 		reg->umin_value &= mask;
1089 		reg->umax_value &= mask;
1090 	} else {
1091 		reg->umin_value = 0;
1092 		reg->umax_value = mask;
1093 	}
1094 	reg->smin_value = reg->umin_value;
1095 	reg->smax_value = reg->umax_value;
1096 }
1097 
1098 /* check whether memory at (regno + off) is accessible for t = (read | write)
1099  * if t==write, value_regno is a register which value is stored into memory
1100  * if t==read, value_regno is a register which will receive the value from memory
1101  * if t==write && value_regno==-1, some unknown value is stored into memory
1102  * if t==read && value_regno==-1, don't care what we read from memory
1103  */
1104 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
1105 			    int bpf_size, enum bpf_access_type t,
1106 			    int value_regno)
1107 {
1108 	struct bpf_verifier_state *state = env->cur_state;
1109 	struct bpf_reg_state *regs = cur_regs(env);
1110 	struct bpf_reg_state *reg = regs + regno;
1111 	int size, err = 0;
1112 
1113 	size = bpf_size_to_bytes(bpf_size);
1114 	if (size < 0)
1115 		return size;
1116 
1117 	/* alignment checks will add in reg->off themselves */
1118 	err = check_ptr_alignment(env, reg, off, size);
1119 	if (err)
1120 		return err;
1121 
1122 	/* for access checks, reg->off is just part of off */
1123 	off += reg->off;
1124 
1125 	if (reg->type == PTR_TO_MAP_VALUE) {
1126 		if (t == BPF_WRITE && value_regno >= 0 &&
1127 		    is_pointer_value(env, value_regno)) {
1128 			verbose(env, "R%d leaks addr into map\n", value_regno);
1129 			return -EACCES;
1130 		}
1131 
1132 		err = check_map_access(env, regno, off, size, false);
1133 		if (!err && t == BPF_READ && value_regno >= 0)
1134 			mark_reg_unknown(env, regs, value_regno);
1135 
1136 	} else if (reg->type == PTR_TO_CTX) {
1137 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1138 
1139 		if (t == BPF_WRITE && value_regno >= 0 &&
1140 		    is_pointer_value(env, value_regno)) {
1141 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1142 			return -EACCES;
1143 		}
1144 		/* ctx accesses must be at a fixed offset, so that we can
1145 		 * determine what type of data were returned.
1146 		 */
1147 		if (reg->off) {
1148 			verbose(env,
1149 				"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1150 				regno, reg->off, off - reg->off);
1151 			return -EACCES;
1152 		}
1153 		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1154 			char tn_buf[48];
1155 
1156 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1157 			verbose(env,
1158 				"variable ctx access var_off=%s off=%d size=%d",
1159 				tn_buf, off, size);
1160 			return -EACCES;
1161 		}
1162 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1163 		if (!err && t == BPF_READ && value_regno >= 0) {
1164 			/* ctx access returns either a scalar, or a
1165 			 * PTR_TO_PACKET[_META,_END]. In the latter
1166 			 * case, we know the offset is zero.
1167 			 */
1168 			if (reg_type == SCALAR_VALUE)
1169 				mark_reg_unknown(env, regs, value_regno);
1170 			else
1171 				mark_reg_known_zero(env, regs,
1172 						    value_regno);
1173 			regs[value_regno].id = 0;
1174 			regs[value_regno].off = 0;
1175 			regs[value_regno].range = 0;
1176 			regs[value_regno].type = reg_type;
1177 		}
1178 
1179 	} else if (reg->type == PTR_TO_STACK) {
1180 		/* stack accesses must be at a fixed offset, so that we can
1181 		 * determine what type of data were returned.
1182 		 * See check_stack_read().
1183 		 */
1184 		if (!tnum_is_const(reg->var_off)) {
1185 			char tn_buf[48];
1186 
1187 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1188 			verbose(env, "variable stack access var_off=%s off=%d size=%d",
1189 				tn_buf, off, size);
1190 			return -EACCES;
1191 		}
1192 		off += reg->var_off.value;
1193 		if (off >= 0 || off < -MAX_BPF_STACK) {
1194 			verbose(env, "invalid stack off=%d size=%d\n", off,
1195 				size);
1196 			return -EACCES;
1197 		}
1198 
1199 		if (env->prog->aux->stack_depth < -off)
1200 			env->prog->aux->stack_depth = -off;
1201 
1202 		if (t == BPF_WRITE)
1203 			err = check_stack_write(env, state, off, size,
1204 						value_regno);
1205 		else
1206 			err = check_stack_read(env, state, off, size,
1207 					       value_regno);
1208 	} else if (reg_is_pkt_pointer(reg)) {
1209 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1210 			verbose(env, "cannot write into packet\n");
1211 			return -EACCES;
1212 		}
1213 		if (t == BPF_WRITE && value_regno >= 0 &&
1214 		    is_pointer_value(env, value_regno)) {
1215 			verbose(env, "R%d leaks addr into packet\n",
1216 				value_regno);
1217 			return -EACCES;
1218 		}
1219 		err = check_packet_access(env, regno, off, size, false);
1220 		if (!err && t == BPF_READ && value_regno >= 0)
1221 			mark_reg_unknown(env, regs, value_regno);
1222 	} else {
1223 		verbose(env, "R%d invalid mem access '%s'\n", regno,
1224 			reg_type_str[reg->type]);
1225 		return -EACCES;
1226 	}
1227 
1228 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1229 	    regs[value_regno].type == SCALAR_VALUE) {
1230 		/* b/h/w load zero-extends, mark upper bits as known 0 */
1231 		coerce_reg_to_size(&regs[value_regno], size);
1232 	}
1233 	return err;
1234 }
1235 
1236 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1237 {
1238 	int err;
1239 
1240 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1241 	    insn->imm != 0) {
1242 		verbose(env, "BPF_XADD uses reserved fields\n");
1243 		return -EINVAL;
1244 	}
1245 
1246 	/* check src1 operand */
1247 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1248 	if (err)
1249 		return err;
1250 
1251 	/* check src2 operand */
1252 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1253 	if (err)
1254 		return err;
1255 
1256 	if (is_pointer_value(env, insn->src_reg)) {
1257 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1258 		return -EACCES;
1259 	}
1260 
1261 	/* check whether atomic_add can read the memory */
1262 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1263 			       BPF_SIZE(insn->code), BPF_READ, -1);
1264 	if (err)
1265 		return err;
1266 
1267 	/* check whether atomic_add can write into the same memory */
1268 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1269 				BPF_SIZE(insn->code), BPF_WRITE, -1);
1270 }
1271 
1272 /* Does this register contain a constant zero? */
1273 static bool register_is_null(struct bpf_reg_state reg)
1274 {
1275 	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
1276 }
1277 
1278 /* when register 'regno' is passed into function that will read 'access_size'
1279  * bytes from that pointer, make sure that it's within stack boundary
1280  * and all elements of stack are initialized.
1281  * Unlike most pointer bounds-checking functions, this one doesn't take an
1282  * 'off' argument, so it has to add in reg->off itself.
1283  */
1284 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1285 				int access_size, bool zero_size_allowed,
1286 				struct bpf_call_arg_meta *meta)
1287 {
1288 	struct bpf_verifier_state *state = env->cur_state;
1289 	struct bpf_reg_state *regs = state->regs;
1290 	int off, i, slot, spi;
1291 
1292 	if (regs[regno].type != PTR_TO_STACK) {
1293 		/* Allow zero-byte read from NULL, regardless of pointer type */
1294 		if (zero_size_allowed && access_size == 0 &&
1295 		    register_is_null(regs[regno]))
1296 			return 0;
1297 
1298 		verbose(env, "R%d type=%s expected=%s\n", regno,
1299 			reg_type_str[regs[regno].type],
1300 			reg_type_str[PTR_TO_STACK]);
1301 		return -EACCES;
1302 	}
1303 
1304 	/* Only allow fixed-offset stack reads */
1305 	if (!tnum_is_const(regs[regno].var_off)) {
1306 		char tn_buf[48];
1307 
1308 		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1309 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
1310 			regno, tn_buf);
1311 		return -EACCES;
1312 	}
1313 	off = regs[regno].off + regs[regno].var_off.value;
1314 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1315 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
1316 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1317 			regno, off, access_size);
1318 		return -EACCES;
1319 	}
1320 
1321 	if (env->prog->aux->stack_depth < -off)
1322 		env->prog->aux->stack_depth = -off;
1323 
1324 	if (meta && meta->raw_mode) {
1325 		meta->access_size = access_size;
1326 		meta->regno = regno;
1327 		return 0;
1328 	}
1329 
1330 	for (i = 0; i < access_size; i++) {
1331 		slot = -(off + i) - 1;
1332 		spi = slot / BPF_REG_SIZE;
1333 		if (state->allocated_stack <= slot ||
1334 		    state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
1335 			STACK_MISC) {
1336 			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1337 				off, i, access_size);
1338 			return -EACCES;
1339 		}
1340 	}
1341 	return 0;
1342 }
1343 
1344 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1345 				   int access_size, bool zero_size_allowed,
1346 				   struct bpf_call_arg_meta *meta)
1347 {
1348 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1349 
1350 	switch (reg->type) {
1351 	case PTR_TO_PACKET:
1352 	case PTR_TO_PACKET_META:
1353 		return check_packet_access(env, regno, reg->off, access_size,
1354 					   zero_size_allowed);
1355 	case PTR_TO_MAP_VALUE:
1356 		return check_map_access(env, regno, reg->off, access_size,
1357 					zero_size_allowed);
1358 	default: /* scalar_value|ptr_to_stack or invalid ptr */
1359 		return check_stack_boundary(env, regno, access_size,
1360 					    zero_size_allowed, meta);
1361 	}
1362 }
1363 
1364 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1365 			  enum bpf_arg_type arg_type,
1366 			  struct bpf_call_arg_meta *meta)
1367 {
1368 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1369 	enum bpf_reg_type expected_type, type = reg->type;
1370 	int err = 0;
1371 
1372 	if (arg_type == ARG_DONTCARE)
1373 		return 0;
1374 
1375 	err = check_reg_arg(env, regno, SRC_OP);
1376 	if (err)
1377 		return err;
1378 
1379 	if (arg_type == ARG_ANYTHING) {
1380 		if (is_pointer_value(env, regno)) {
1381 			verbose(env, "R%d leaks addr into helper function\n",
1382 				regno);
1383 			return -EACCES;
1384 		}
1385 		return 0;
1386 	}
1387 
1388 	if (type_is_pkt_pointer(type) &&
1389 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1390 		verbose(env, "helper access to the packet is not allowed\n");
1391 		return -EACCES;
1392 	}
1393 
1394 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1395 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1396 		expected_type = PTR_TO_STACK;
1397 		if (!type_is_pkt_pointer(type) &&
1398 		    type != expected_type)
1399 			goto err_type;
1400 	} else if (arg_type == ARG_CONST_SIZE ||
1401 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1402 		expected_type = SCALAR_VALUE;
1403 		if (type != expected_type)
1404 			goto err_type;
1405 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1406 		expected_type = CONST_PTR_TO_MAP;
1407 		if (type != expected_type)
1408 			goto err_type;
1409 	} else if (arg_type == ARG_PTR_TO_CTX) {
1410 		expected_type = PTR_TO_CTX;
1411 		if (type != expected_type)
1412 			goto err_type;
1413 	} else if (arg_type == ARG_PTR_TO_MEM ||
1414 		   arg_type == ARG_PTR_TO_MEM_OR_NULL ||
1415 		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
1416 		expected_type = PTR_TO_STACK;
1417 		/* One exception here. In case function allows for NULL to be
1418 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
1419 		 * happens during stack boundary checking.
1420 		 */
1421 		if (register_is_null(*reg) &&
1422 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
1423 			/* final test in check_stack_boundary() */;
1424 		else if (!type_is_pkt_pointer(type) &&
1425 			 type != PTR_TO_MAP_VALUE &&
1426 			 type != expected_type)
1427 			goto err_type;
1428 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1429 	} else {
1430 		verbose(env, "unsupported arg_type %d\n", arg_type);
1431 		return -EFAULT;
1432 	}
1433 
1434 	if (arg_type == ARG_CONST_MAP_PTR) {
1435 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1436 		meta->map_ptr = reg->map_ptr;
1437 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1438 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
1439 		 * check that [key, key + map->key_size) are within
1440 		 * stack limits and initialized
1441 		 */
1442 		if (!meta->map_ptr) {
1443 			/* in function declaration map_ptr must come before
1444 			 * map_key, so that it's verified and known before
1445 			 * we have to check map_key here. Otherwise it means
1446 			 * that kernel subsystem misconfigured verifier
1447 			 */
1448 			verbose(env, "invalid map_ptr to access map->key\n");
1449 			return -EACCES;
1450 		}
1451 		if (type_is_pkt_pointer(type))
1452 			err = check_packet_access(env, regno, reg->off,
1453 						  meta->map_ptr->key_size,
1454 						  false);
1455 		else
1456 			err = check_stack_boundary(env, regno,
1457 						   meta->map_ptr->key_size,
1458 						   false, NULL);
1459 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1460 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
1461 		 * check [value, value + map->value_size) validity
1462 		 */
1463 		if (!meta->map_ptr) {
1464 			/* kernel subsystem misconfigured verifier */
1465 			verbose(env, "invalid map_ptr to access map->value\n");
1466 			return -EACCES;
1467 		}
1468 		if (type_is_pkt_pointer(type))
1469 			err = check_packet_access(env, regno, reg->off,
1470 						  meta->map_ptr->value_size,
1471 						  false);
1472 		else
1473 			err = check_stack_boundary(env, regno,
1474 						   meta->map_ptr->value_size,
1475 						   false, NULL);
1476 	} else if (arg_type == ARG_CONST_SIZE ||
1477 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1478 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1479 
1480 		/* bpf_xxx(..., buf, len) call will access 'len' bytes
1481 		 * from stack pointer 'buf'. Check it
1482 		 * note: regno == len, regno - 1 == buf
1483 		 */
1484 		if (regno == 0) {
1485 			/* kernel subsystem misconfigured verifier */
1486 			verbose(env,
1487 				"ARG_CONST_SIZE cannot be first argument\n");
1488 			return -EACCES;
1489 		}
1490 
1491 		/* The register is SCALAR_VALUE; the access check
1492 		 * happens using its boundaries.
1493 		 */
1494 
1495 		if (!tnum_is_const(reg->var_off))
1496 			/* For unprivileged variable accesses, disable raw
1497 			 * mode so that the program is required to
1498 			 * initialize all the memory that the helper could
1499 			 * just partially fill up.
1500 			 */
1501 			meta = NULL;
1502 
1503 		if (reg->smin_value < 0) {
1504 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
1505 				regno);
1506 			return -EACCES;
1507 		}
1508 
1509 		if (reg->umin_value == 0) {
1510 			err = check_helper_mem_access(env, regno - 1, 0,
1511 						      zero_size_allowed,
1512 						      meta);
1513 			if (err)
1514 				return err;
1515 		}
1516 
1517 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
1518 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1519 				regno);
1520 			return -EACCES;
1521 		}
1522 		err = check_helper_mem_access(env, regno - 1,
1523 					      reg->umax_value,
1524 					      zero_size_allowed, meta);
1525 	}
1526 
1527 	return err;
1528 err_type:
1529 	verbose(env, "R%d type=%s expected=%s\n", regno,
1530 		reg_type_str[type], reg_type_str[expected_type]);
1531 	return -EACCES;
1532 }
1533 
1534 static int check_map_func_compatibility(struct bpf_verifier_env *env,
1535 					struct bpf_map *map, int func_id)
1536 {
1537 	if (!map)
1538 		return 0;
1539 
1540 	/* We need a two way check, first is from map perspective ... */
1541 	switch (map->map_type) {
1542 	case BPF_MAP_TYPE_PROG_ARRAY:
1543 		if (func_id != BPF_FUNC_tail_call)
1544 			goto error;
1545 		break;
1546 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1547 		if (func_id != BPF_FUNC_perf_event_read &&
1548 		    func_id != BPF_FUNC_perf_event_output &&
1549 		    func_id != BPF_FUNC_perf_event_read_value)
1550 			goto error;
1551 		break;
1552 	case BPF_MAP_TYPE_STACK_TRACE:
1553 		if (func_id != BPF_FUNC_get_stackid)
1554 			goto error;
1555 		break;
1556 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1557 		if (func_id != BPF_FUNC_skb_under_cgroup &&
1558 		    func_id != BPF_FUNC_current_task_under_cgroup)
1559 			goto error;
1560 		break;
1561 	/* devmap returns a pointer to a live net_device ifindex that we cannot
1562 	 * allow to be modified from bpf side. So do not allow lookup elements
1563 	 * for now.
1564 	 */
1565 	case BPF_MAP_TYPE_DEVMAP:
1566 		if (func_id != BPF_FUNC_redirect_map)
1567 			goto error;
1568 		break;
1569 	/* Restrict bpf side of cpumap, open when use-cases appear */
1570 	case BPF_MAP_TYPE_CPUMAP:
1571 		if (func_id != BPF_FUNC_redirect_map)
1572 			goto error;
1573 		break;
1574 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1575 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1576 		if (func_id != BPF_FUNC_map_lookup_elem)
1577 			goto error;
1578 		break;
1579 	case BPF_MAP_TYPE_SOCKMAP:
1580 		if (func_id != BPF_FUNC_sk_redirect_map &&
1581 		    func_id != BPF_FUNC_sock_map_update &&
1582 		    func_id != BPF_FUNC_map_delete_elem)
1583 			goto error;
1584 		break;
1585 	default:
1586 		break;
1587 	}
1588 
1589 	/* ... and second from the function itself. */
1590 	switch (func_id) {
1591 	case BPF_FUNC_tail_call:
1592 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1593 			goto error;
1594 		break;
1595 	case BPF_FUNC_perf_event_read:
1596 	case BPF_FUNC_perf_event_output:
1597 	case BPF_FUNC_perf_event_read_value:
1598 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1599 			goto error;
1600 		break;
1601 	case BPF_FUNC_get_stackid:
1602 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1603 			goto error;
1604 		break;
1605 	case BPF_FUNC_current_task_under_cgroup:
1606 	case BPF_FUNC_skb_under_cgroup:
1607 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1608 			goto error;
1609 		break;
1610 	case BPF_FUNC_redirect_map:
1611 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
1612 		    map->map_type != BPF_MAP_TYPE_CPUMAP)
1613 			goto error;
1614 		break;
1615 	case BPF_FUNC_sk_redirect_map:
1616 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1617 			goto error;
1618 		break;
1619 	case BPF_FUNC_sock_map_update:
1620 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1621 			goto error;
1622 		break;
1623 	default:
1624 		break;
1625 	}
1626 
1627 	return 0;
1628 error:
1629 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
1630 		map->map_type, func_id_name(func_id), func_id);
1631 	return -EINVAL;
1632 }
1633 
1634 static int check_raw_mode(const struct bpf_func_proto *fn)
1635 {
1636 	int count = 0;
1637 
1638 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1639 		count++;
1640 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1641 		count++;
1642 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1643 		count++;
1644 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1645 		count++;
1646 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1647 		count++;
1648 
1649 	return count > 1 ? -EINVAL : 0;
1650 }
1651 
1652 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
1653  * are now invalid, so turn them into unknown SCALAR_VALUE.
1654  */
1655 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1656 {
1657 	struct bpf_verifier_state *state = env->cur_state;
1658 	struct bpf_reg_state *regs = state->regs, *reg;
1659 	int i;
1660 
1661 	for (i = 0; i < MAX_BPF_REG; i++)
1662 		if (reg_is_pkt_pointer_any(&regs[i]))
1663 			mark_reg_unknown(env, regs, i);
1664 
1665 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1666 		if (state->stack[i].slot_type[0] != STACK_SPILL)
1667 			continue;
1668 		reg = &state->stack[i].spilled_ptr;
1669 		if (reg_is_pkt_pointer_any(reg))
1670 			__mark_reg_unknown(reg);
1671 	}
1672 }
1673 
1674 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1675 {
1676 	const struct bpf_func_proto *fn = NULL;
1677 	struct bpf_reg_state *regs;
1678 	struct bpf_call_arg_meta meta;
1679 	bool changes_data;
1680 	int i, err;
1681 
1682 	/* find function prototype */
1683 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1684 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
1685 			func_id);
1686 		return -EINVAL;
1687 	}
1688 
1689 	if (env->ops->get_func_proto)
1690 		fn = env->ops->get_func_proto(func_id);
1691 
1692 	if (!fn) {
1693 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
1694 			func_id);
1695 		return -EINVAL;
1696 	}
1697 
1698 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1699 	if (!env->prog->gpl_compatible && fn->gpl_only) {
1700 		verbose(env, "cannot call GPL only function from proprietary program\n");
1701 		return -EINVAL;
1702 	}
1703 
1704 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
1705 	changes_data = bpf_helper_changes_pkt_data(fn->func);
1706 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
1707 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
1708 			func_id_name(func_id), func_id);
1709 		return -EINVAL;
1710 	}
1711 
1712 	memset(&meta, 0, sizeof(meta));
1713 	meta.pkt_access = fn->pkt_access;
1714 
1715 	/* We only support one arg being in raw mode at the moment, which
1716 	 * is sufficient for the helper functions we have right now.
1717 	 */
1718 	err = check_raw_mode(fn);
1719 	if (err) {
1720 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
1721 			func_id_name(func_id), func_id);
1722 		return err;
1723 	}
1724 
1725 	/* check args */
1726 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1727 	if (err)
1728 		return err;
1729 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1730 	if (err)
1731 		return err;
1732 	if (func_id == BPF_FUNC_tail_call) {
1733 		if (meta.map_ptr == NULL) {
1734 			verbose(env, "verifier bug\n");
1735 			return -EINVAL;
1736 		}
1737 		env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1738 	}
1739 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1740 	if (err)
1741 		return err;
1742 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1743 	if (err)
1744 		return err;
1745 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1746 	if (err)
1747 		return err;
1748 
1749 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
1750 	 * is inferred from register state.
1751 	 */
1752 	for (i = 0; i < meta.access_size; i++) {
1753 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1754 		if (err)
1755 			return err;
1756 	}
1757 
1758 	regs = cur_regs(env);
1759 	/* reset caller saved regs */
1760 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
1761 		mark_reg_not_init(env, regs, caller_saved[i]);
1762 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
1763 	}
1764 
1765 	/* update return register (already marked as written above) */
1766 	if (fn->ret_type == RET_INTEGER) {
1767 		/* sets type to SCALAR_VALUE */
1768 		mark_reg_unknown(env, regs, BPF_REG_0);
1769 	} else if (fn->ret_type == RET_VOID) {
1770 		regs[BPF_REG_0].type = NOT_INIT;
1771 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1772 		struct bpf_insn_aux_data *insn_aux;
1773 
1774 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1775 		/* There is no offset yet applied, variable or fixed */
1776 		mark_reg_known_zero(env, regs, BPF_REG_0);
1777 		regs[BPF_REG_0].off = 0;
1778 		/* remember map_ptr, so that check_map_access()
1779 		 * can check 'value_size' boundary of memory access
1780 		 * to map element returned from bpf_map_lookup_elem()
1781 		 */
1782 		if (meta.map_ptr == NULL) {
1783 			verbose(env,
1784 				"kernel subsystem misconfigured verifier\n");
1785 			return -EINVAL;
1786 		}
1787 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
1788 		regs[BPF_REG_0].id = ++env->id_gen;
1789 		insn_aux = &env->insn_aux_data[insn_idx];
1790 		if (!insn_aux->map_ptr)
1791 			insn_aux->map_ptr = meta.map_ptr;
1792 		else if (insn_aux->map_ptr != meta.map_ptr)
1793 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1794 	} else {
1795 		verbose(env, "unknown return type %d of func %s#%d\n",
1796 			fn->ret_type, func_id_name(func_id), func_id);
1797 		return -EINVAL;
1798 	}
1799 
1800 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
1801 	if (err)
1802 		return err;
1803 
1804 	if (changes_data)
1805 		clear_all_pkt_pointers(env);
1806 	return 0;
1807 }
1808 
1809 static bool signed_add_overflows(s64 a, s64 b)
1810 {
1811 	/* Do the add in u64, where overflow is well-defined */
1812 	s64 res = (s64)((u64)a + (u64)b);
1813 
1814 	if (b < 0)
1815 		return res > a;
1816 	return res < a;
1817 }
1818 
1819 static bool signed_sub_overflows(s64 a, s64 b)
1820 {
1821 	/* Do the sub in u64, where overflow is well-defined */
1822 	s64 res = (s64)((u64)a - (u64)b);
1823 
1824 	if (b < 0)
1825 		return res < a;
1826 	return res > a;
1827 }
1828 
1829 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
1830 				  const struct bpf_reg_state *reg,
1831 				  enum bpf_reg_type type)
1832 {
1833 	bool known = tnum_is_const(reg->var_off);
1834 	s64 val = reg->var_off.value;
1835 	s64 smin = reg->smin_value;
1836 
1837 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
1838 		verbose(env, "math between %s pointer and %lld is not allowed\n",
1839 			reg_type_str[type], val);
1840 		return false;
1841 	}
1842 
1843 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
1844 		verbose(env, "%s pointer offset %d is not allowed\n",
1845 			reg_type_str[type], reg->off);
1846 		return false;
1847 	}
1848 
1849 	if (smin == S64_MIN) {
1850 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
1851 			reg_type_str[type]);
1852 		return false;
1853 	}
1854 
1855 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
1856 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
1857 			smin, reg_type_str[type]);
1858 		return false;
1859 	}
1860 
1861 	return true;
1862 }
1863 
1864 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1865  * Caller should also handle BPF_MOV case separately.
1866  * If we return -EACCES, caller may want to try again treating pointer as a
1867  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
1868  */
1869 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1870 				   struct bpf_insn *insn,
1871 				   const struct bpf_reg_state *ptr_reg,
1872 				   const struct bpf_reg_state *off_reg)
1873 {
1874 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
1875 	bool known = tnum_is_const(off_reg->var_off);
1876 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
1877 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1878 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1879 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1880 	u8 opcode = BPF_OP(insn->code);
1881 	u32 dst = insn->dst_reg;
1882 
1883 	dst_reg = &regs[dst];
1884 
1885 	if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1886 		print_verifier_state(env, env->cur_state);
1887 		verbose(env,
1888 			"verifier internal error: known but bad sbounds\n");
1889 		return -EINVAL;
1890 	}
1891 	if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1892 		print_verifier_state(env, env->cur_state);
1893 		verbose(env,
1894 			"verifier internal error: known but bad ubounds\n");
1895 		return -EINVAL;
1896 	}
1897 
1898 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
1899 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
1900 		verbose(env,
1901 			"R%d 32-bit pointer arithmetic prohibited\n",
1902 			dst);
1903 		return -EACCES;
1904 	}
1905 
1906 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1907 		verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1908 			dst);
1909 		return -EACCES;
1910 	}
1911 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
1912 		verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1913 			dst);
1914 		return -EACCES;
1915 	}
1916 	if (ptr_reg->type == PTR_TO_PACKET_END) {
1917 		verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1918 			dst);
1919 		return -EACCES;
1920 	}
1921 
1922 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1923 	 * The id may be overwritten later if we create a new variable offset.
1924 	 */
1925 	dst_reg->type = ptr_reg->type;
1926 	dst_reg->id = ptr_reg->id;
1927 
1928 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
1929 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
1930 		return -EINVAL;
1931 
1932 	switch (opcode) {
1933 	case BPF_ADD:
1934 		/* We can take a fixed offset as long as it doesn't overflow
1935 		 * the s32 'off' field
1936 		 */
1937 		if (known && (ptr_reg->off + smin_val ==
1938 			      (s64)(s32)(ptr_reg->off + smin_val))) {
1939 			/* pointer += K.  Accumulate it into fixed offset */
1940 			dst_reg->smin_value = smin_ptr;
1941 			dst_reg->smax_value = smax_ptr;
1942 			dst_reg->umin_value = umin_ptr;
1943 			dst_reg->umax_value = umax_ptr;
1944 			dst_reg->var_off = ptr_reg->var_off;
1945 			dst_reg->off = ptr_reg->off + smin_val;
1946 			dst_reg->range = ptr_reg->range;
1947 			break;
1948 		}
1949 		/* A new variable offset is created.  Note that off_reg->off
1950 		 * == 0, since it's a scalar.
1951 		 * dst_reg gets the pointer type and since some positive
1952 		 * integer value was added to the pointer, give it a new 'id'
1953 		 * if it's a PTR_TO_PACKET.
1954 		 * this creates a new 'base' pointer, off_reg (variable) gets
1955 		 * added into the variable offset, and we copy the fixed offset
1956 		 * from ptr_reg.
1957 		 */
1958 		if (signed_add_overflows(smin_ptr, smin_val) ||
1959 		    signed_add_overflows(smax_ptr, smax_val)) {
1960 			dst_reg->smin_value = S64_MIN;
1961 			dst_reg->smax_value = S64_MAX;
1962 		} else {
1963 			dst_reg->smin_value = smin_ptr + smin_val;
1964 			dst_reg->smax_value = smax_ptr + smax_val;
1965 		}
1966 		if (umin_ptr + umin_val < umin_ptr ||
1967 		    umax_ptr + umax_val < umax_ptr) {
1968 			dst_reg->umin_value = 0;
1969 			dst_reg->umax_value = U64_MAX;
1970 		} else {
1971 			dst_reg->umin_value = umin_ptr + umin_val;
1972 			dst_reg->umax_value = umax_ptr + umax_val;
1973 		}
1974 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1975 		dst_reg->off = ptr_reg->off;
1976 		if (reg_is_pkt_pointer(ptr_reg)) {
1977 			dst_reg->id = ++env->id_gen;
1978 			/* something was added to pkt_ptr, set range to zero */
1979 			dst_reg->range = 0;
1980 		}
1981 		break;
1982 	case BPF_SUB:
1983 		if (dst_reg == off_reg) {
1984 			/* scalar -= pointer.  Creates an unknown scalar */
1985 			verbose(env, "R%d tried to subtract pointer from scalar\n",
1986 				dst);
1987 			return -EACCES;
1988 		}
1989 		/* We don't allow subtraction from FP, because (according to
1990 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
1991 		 * be able to deal with it.
1992 		 */
1993 		if (ptr_reg->type == PTR_TO_STACK) {
1994 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
1995 				dst);
1996 			return -EACCES;
1997 		}
1998 		if (known && (ptr_reg->off - smin_val ==
1999 			      (s64)(s32)(ptr_reg->off - smin_val))) {
2000 			/* pointer -= K.  Subtract it from fixed offset */
2001 			dst_reg->smin_value = smin_ptr;
2002 			dst_reg->smax_value = smax_ptr;
2003 			dst_reg->umin_value = umin_ptr;
2004 			dst_reg->umax_value = umax_ptr;
2005 			dst_reg->var_off = ptr_reg->var_off;
2006 			dst_reg->id = ptr_reg->id;
2007 			dst_reg->off = ptr_reg->off - smin_val;
2008 			dst_reg->range = ptr_reg->range;
2009 			break;
2010 		}
2011 		/* A new variable offset is created.  If the subtrahend is known
2012 		 * nonnegative, then any reg->range we had before is still good.
2013 		 */
2014 		if (signed_sub_overflows(smin_ptr, smax_val) ||
2015 		    signed_sub_overflows(smax_ptr, smin_val)) {
2016 			/* Overflow possible, we know nothing */
2017 			dst_reg->smin_value = S64_MIN;
2018 			dst_reg->smax_value = S64_MAX;
2019 		} else {
2020 			dst_reg->smin_value = smin_ptr - smax_val;
2021 			dst_reg->smax_value = smax_ptr - smin_val;
2022 		}
2023 		if (umin_ptr < umax_val) {
2024 			/* Overflow possible, we know nothing */
2025 			dst_reg->umin_value = 0;
2026 			dst_reg->umax_value = U64_MAX;
2027 		} else {
2028 			/* Cannot overflow (as long as bounds are consistent) */
2029 			dst_reg->umin_value = umin_ptr - umax_val;
2030 			dst_reg->umax_value = umax_ptr - umin_val;
2031 		}
2032 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
2033 		dst_reg->off = ptr_reg->off;
2034 		if (reg_is_pkt_pointer(ptr_reg)) {
2035 			dst_reg->id = ++env->id_gen;
2036 			/* something was added to pkt_ptr, set range to zero */
2037 			if (smin_val < 0)
2038 				dst_reg->range = 0;
2039 		}
2040 		break;
2041 	case BPF_AND:
2042 	case BPF_OR:
2043 	case BPF_XOR:
2044 		/* bitwise ops on pointers are troublesome, prohibit. */
2045 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
2046 			dst, bpf_alu_string[opcode >> 4]);
2047 		return -EACCES;
2048 	default:
2049 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
2050 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
2051 			dst, bpf_alu_string[opcode >> 4]);
2052 		return -EACCES;
2053 	}
2054 
2055 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2056 		return -EINVAL;
2057 
2058 	__update_reg_bounds(dst_reg);
2059 	__reg_deduce_bounds(dst_reg);
2060 	__reg_bound_offset(dst_reg);
2061 	return 0;
2062 }
2063 
2064 /* WARNING: This function does calculations on 64-bit values, but the actual
2065  * execution may occur on 32-bit values. Therefore, things like bitshifts
2066  * need extra checks in the 32-bit case.
2067  */
2068 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2069 				      struct bpf_insn *insn,
2070 				      struct bpf_reg_state *dst_reg,
2071 				      struct bpf_reg_state src_reg)
2072 {
2073 	struct bpf_reg_state *regs = cur_regs(env);
2074 	u8 opcode = BPF_OP(insn->code);
2075 	bool src_known, dst_known;
2076 	s64 smin_val, smax_val;
2077 	u64 umin_val, umax_val;
2078 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2079 
2080 	smin_val = src_reg.smin_value;
2081 	smax_val = src_reg.smax_value;
2082 	umin_val = src_reg.umin_value;
2083 	umax_val = src_reg.umax_value;
2084 	src_known = tnum_is_const(src_reg.var_off);
2085 	dst_known = tnum_is_const(dst_reg->var_off);
2086 
2087 	if (!src_known &&
2088 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2089 		__mark_reg_unknown(dst_reg);
2090 		return 0;
2091 	}
2092 
2093 	switch (opcode) {
2094 	case BPF_ADD:
2095 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2096 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
2097 			dst_reg->smin_value = S64_MIN;
2098 			dst_reg->smax_value = S64_MAX;
2099 		} else {
2100 			dst_reg->smin_value += smin_val;
2101 			dst_reg->smax_value += smax_val;
2102 		}
2103 		if (dst_reg->umin_value + umin_val < umin_val ||
2104 		    dst_reg->umax_value + umax_val < umax_val) {
2105 			dst_reg->umin_value = 0;
2106 			dst_reg->umax_value = U64_MAX;
2107 		} else {
2108 			dst_reg->umin_value += umin_val;
2109 			dst_reg->umax_value += umax_val;
2110 		}
2111 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2112 		break;
2113 	case BPF_SUB:
2114 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2115 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2116 			/* Overflow possible, we know nothing */
2117 			dst_reg->smin_value = S64_MIN;
2118 			dst_reg->smax_value = S64_MAX;
2119 		} else {
2120 			dst_reg->smin_value -= smax_val;
2121 			dst_reg->smax_value -= smin_val;
2122 		}
2123 		if (dst_reg->umin_value < umax_val) {
2124 			/* Overflow possible, we know nothing */
2125 			dst_reg->umin_value = 0;
2126 			dst_reg->umax_value = U64_MAX;
2127 		} else {
2128 			/* Cannot overflow (as long as bounds are consistent) */
2129 			dst_reg->umin_value -= umax_val;
2130 			dst_reg->umax_value -= umin_val;
2131 		}
2132 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2133 		break;
2134 	case BPF_MUL:
2135 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2136 		if (smin_val < 0 || dst_reg->smin_value < 0) {
2137 			/* Ain't nobody got time to multiply that sign */
2138 			__mark_reg_unbounded(dst_reg);
2139 			__update_reg_bounds(dst_reg);
2140 			break;
2141 		}
2142 		/* Both values are positive, so we can work with unsigned and
2143 		 * copy the result to signed (unless it exceeds S64_MAX).
2144 		 */
2145 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2146 			/* Potential overflow, we know nothing */
2147 			__mark_reg_unbounded(dst_reg);
2148 			/* (except what we can learn from the var_off) */
2149 			__update_reg_bounds(dst_reg);
2150 			break;
2151 		}
2152 		dst_reg->umin_value *= umin_val;
2153 		dst_reg->umax_value *= umax_val;
2154 		if (dst_reg->umax_value > S64_MAX) {
2155 			/* Overflow possible, we know nothing */
2156 			dst_reg->smin_value = S64_MIN;
2157 			dst_reg->smax_value = S64_MAX;
2158 		} else {
2159 			dst_reg->smin_value = dst_reg->umin_value;
2160 			dst_reg->smax_value = dst_reg->umax_value;
2161 		}
2162 		break;
2163 	case BPF_AND:
2164 		if (src_known && dst_known) {
2165 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
2166 						  src_reg.var_off.value);
2167 			break;
2168 		}
2169 		/* We get our minimum from the var_off, since that's inherently
2170 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
2171 		 */
2172 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2173 		dst_reg->umin_value = dst_reg->var_off.value;
2174 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2175 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2176 			/* Lose signed bounds when ANDing negative numbers,
2177 			 * ain't nobody got time for that.
2178 			 */
2179 			dst_reg->smin_value = S64_MIN;
2180 			dst_reg->smax_value = S64_MAX;
2181 		} else {
2182 			/* ANDing two positives gives a positive, so safe to
2183 			 * cast result into s64.
2184 			 */
2185 			dst_reg->smin_value = dst_reg->umin_value;
2186 			dst_reg->smax_value = dst_reg->umax_value;
2187 		}
2188 		/* We may learn something more from the var_off */
2189 		__update_reg_bounds(dst_reg);
2190 		break;
2191 	case BPF_OR:
2192 		if (src_known && dst_known) {
2193 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
2194 						  src_reg.var_off.value);
2195 			break;
2196 		}
2197 		/* We get our maximum from the var_off, and our minimum is the
2198 		 * maximum of the operands' minima
2199 		 */
2200 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2201 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2202 		dst_reg->umax_value = dst_reg->var_off.value |
2203 				      dst_reg->var_off.mask;
2204 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2205 			/* Lose signed bounds when ORing negative numbers,
2206 			 * ain't nobody got time for that.
2207 			 */
2208 			dst_reg->smin_value = S64_MIN;
2209 			dst_reg->smax_value = S64_MAX;
2210 		} else {
2211 			/* ORing two positives gives a positive, so safe to
2212 			 * cast result into s64.
2213 			 */
2214 			dst_reg->smin_value = dst_reg->umin_value;
2215 			dst_reg->smax_value = dst_reg->umax_value;
2216 		}
2217 		/* We may learn something more from the var_off */
2218 		__update_reg_bounds(dst_reg);
2219 		break;
2220 	case BPF_LSH:
2221 		if (umax_val >= insn_bitness) {
2222 			/* Shifts greater than 31 or 63 are undefined.
2223 			 * This includes shifts by a negative number.
2224 			 */
2225 			mark_reg_unknown(env, regs, insn->dst_reg);
2226 			break;
2227 		}
2228 		/* We lose all sign bit information (except what we can pick
2229 		 * up from var_off)
2230 		 */
2231 		dst_reg->smin_value = S64_MIN;
2232 		dst_reg->smax_value = S64_MAX;
2233 		/* If we might shift our top bit out, then we know nothing */
2234 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2235 			dst_reg->umin_value = 0;
2236 			dst_reg->umax_value = U64_MAX;
2237 		} else {
2238 			dst_reg->umin_value <<= umin_val;
2239 			dst_reg->umax_value <<= umax_val;
2240 		}
2241 		if (src_known)
2242 			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2243 		else
2244 			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2245 		/* We may learn something more from the var_off */
2246 		__update_reg_bounds(dst_reg);
2247 		break;
2248 	case BPF_RSH:
2249 		if (umax_val >= insn_bitness) {
2250 			/* Shifts greater than 31 or 63 are undefined.
2251 			 * This includes shifts by a negative number.
2252 			 */
2253 			mark_reg_unknown(env, regs, insn->dst_reg);
2254 			break;
2255 		}
2256 		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
2257 		 * be negative, then either:
2258 		 * 1) src_reg might be zero, so the sign bit of the result is
2259 		 *    unknown, so we lose our signed bounds
2260 		 * 2) it's known negative, thus the unsigned bounds capture the
2261 		 *    signed bounds
2262 		 * 3) the signed bounds cross zero, so they tell us nothing
2263 		 *    about the result
2264 		 * If the value in dst_reg is known nonnegative, then again the
2265 		 * unsigned bounts capture the signed bounds.
2266 		 * Thus, in all cases it suffices to blow away our signed bounds
2267 		 * and rely on inferring new ones from the unsigned bounds and
2268 		 * var_off of the result.
2269 		 */
2270 		dst_reg->smin_value = S64_MIN;
2271 		dst_reg->smax_value = S64_MAX;
2272 		if (src_known)
2273 			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2274 						       umin_val);
2275 		else
2276 			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
2277 		dst_reg->umin_value >>= umax_val;
2278 		dst_reg->umax_value >>= umin_val;
2279 		/* We may learn something more from the var_off */
2280 		__update_reg_bounds(dst_reg);
2281 		break;
2282 	default:
2283 		mark_reg_unknown(env, regs, insn->dst_reg);
2284 		break;
2285 	}
2286 
2287 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
2288 		/* 32-bit ALU ops are (32,32)->32 */
2289 		coerce_reg_to_size(dst_reg, 4);
2290 		coerce_reg_to_size(&src_reg, 4);
2291 	}
2292 
2293 	__reg_deduce_bounds(dst_reg);
2294 	__reg_bound_offset(dst_reg);
2295 	return 0;
2296 }
2297 
2298 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2299  * and var_off.
2300  */
2301 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2302 				   struct bpf_insn *insn)
2303 {
2304 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2305 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2306 	u8 opcode = BPF_OP(insn->code);
2307 
2308 	dst_reg = &regs[insn->dst_reg];
2309 	src_reg = NULL;
2310 	if (dst_reg->type != SCALAR_VALUE)
2311 		ptr_reg = dst_reg;
2312 	if (BPF_SRC(insn->code) == BPF_X) {
2313 		src_reg = &regs[insn->src_reg];
2314 		if (src_reg->type != SCALAR_VALUE) {
2315 			if (dst_reg->type != SCALAR_VALUE) {
2316 				/* Combining two pointers by any ALU op yields
2317 				 * an arbitrary scalar. Disallow all math except
2318 				 * pointer subtraction
2319 				 */
2320 				if (opcode == BPF_SUB){
2321 					mark_reg_unknown(env, regs, insn->dst_reg);
2322 					return 0;
2323 				}
2324 				verbose(env, "R%d pointer %s pointer prohibited\n",
2325 					insn->dst_reg,
2326 					bpf_alu_string[opcode >> 4]);
2327 				return -EACCES;
2328 			} else {
2329 				/* scalar += pointer
2330 				 * This is legal, but we have to reverse our
2331 				 * src/dest handling in computing the range
2332 				 */
2333 				return adjust_ptr_min_max_vals(env, insn,
2334 							       src_reg, dst_reg);
2335 			}
2336 		} else if (ptr_reg) {
2337 			/* pointer += scalar */
2338 			return adjust_ptr_min_max_vals(env, insn,
2339 						       dst_reg, src_reg);
2340 		}
2341 	} else {
2342 		/* Pretend the src is a reg with a known value, since we only
2343 		 * need to be able to read from this state.
2344 		 */
2345 		off_reg.type = SCALAR_VALUE;
2346 		__mark_reg_known(&off_reg, insn->imm);
2347 		src_reg = &off_reg;
2348 		if (ptr_reg) /* pointer += K */
2349 			return adjust_ptr_min_max_vals(env, insn,
2350 						       ptr_reg, src_reg);
2351 	}
2352 
2353 	/* Got here implies adding two SCALAR_VALUEs */
2354 	if (WARN_ON_ONCE(ptr_reg)) {
2355 		print_verifier_state(env, env->cur_state);
2356 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
2357 		return -EINVAL;
2358 	}
2359 	if (WARN_ON(!src_reg)) {
2360 		print_verifier_state(env, env->cur_state);
2361 		verbose(env, "verifier internal error: no src_reg\n");
2362 		return -EINVAL;
2363 	}
2364 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
2365 }
2366 
2367 /* check validity of 32-bit and 64-bit arithmetic operations */
2368 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2369 {
2370 	struct bpf_reg_state *regs = cur_regs(env);
2371 	u8 opcode = BPF_OP(insn->code);
2372 	int err;
2373 
2374 	if (opcode == BPF_END || opcode == BPF_NEG) {
2375 		if (opcode == BPF_NEG) {
2376 			if (BPF_SRC(insn->code) != 0 ||
2377 			    insn->src_reg != BPF_REG_0 ||
2378 			    insn->off != 0 || insn->imm != 0) {
2379 				verbose(env, "BPF_NEG uses reserved fields\n");
2380 				return -EINVAL;
2381 			}
2382 		} else {
2383 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
2384 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
2385 			    BPF_CLASS(insn->code) == BPF_ALU64) {
2386 				verbose(env, "BPF_END uses reserved fields\n");
2387 				return -EINVAL;
2388 			}
2389 		}
2390 
2391 		/* check src operand */
2392 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2393 		if (err)
2394 			return err;
2395 
2396 		if (is_pointer_value(env, insn->dst_reg)) {
2397 			verbose(env, "R%d pointer arithmetic prohibited\n",
2398 				insn->dst_reg);
2399 			return -EACCES;
2400 		}
2401 
2402 		/* check dest operand */
2403 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2404 		if (err)
2405 			return err;
2406 
2407 	} else if (opcode == BPF_MOV) {
2408 
2409 		if (BPF_SRC(insn->code) == BPF_X) {
2410 			if (insn->imm != 0 || insn->off != 0) {
2411 				verbose(env, "BPF_MOV uses reserved fields\n");
2412 				return -EINVAL;
2413 			}
2414 
2415 			/* check src operand */
2416 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2417 			if (err)
2418 				return err;
2419 		} else {
2420 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2421 				verbose(env, "BPF_MOV uses reserved fields\n");
2422 				return -EINVAL;
2423 			}
2424 		}
2425 
2426 		/* check dest operand */
2427 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2428 		if (err)
2429 			return err;
2430 
2431 		if (BPF_SRC(insn->code) == BPF_X) {
2432 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2433 				/* case: R1 = R2
2434 				 * copy register state to dest reg
2435 				 */
2436 				regs[insn->dst_reg] = regs[insn->src_reg];
2437 				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
2438 			} else {
2439 				/* R1 = (u32) R2 */
2440 				if (is_pointer_value(env, insn->src_reg)) {
2441 					verbose(env,
2442 						"R%d partial copy of pointer\n",
2443 						insn->src_reg);
2444 					return -EACCES;
2445 				}
2446 				mark_reg_unknown(env, regs, insn->dst_reg);
2447 				coerce_reg_to_size(&regs[insn->dst_reg], 4);
2448 			}
2449 		} else {
2450 			/* case: R = imm
2451 			 * remember the value we stored into this reg
2452 			 */
2453 			regs[insn->dst_reg].type = SCALAR_VALUE;
2454 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2455 				__mark_reg_known(regs + insn->dst_reg,
2456 						 insn->imm);
2457 			} else {
2458 				__mark_reg_known(regs + insn->dst_reg,
2459 						 (u32)insn->imm);
2460 			}
2461 		}
2462 
2463 	} else if (opcode > BPF_END) {
2464 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
2465 		return -EINVAL;
2466 
2467 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
2468 
2469 		if (BPF_SRC(insn->code) == BPF_X) {
2470 			if (insn->imm != 0 || insn->off != 0) {
2471 				verbose(env, "BPF_ALU uses reserved fields\n");
2472 				return -EINVAL;
2473 			}
2474 			/* check src1 operand */
2475 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2476 			if (err)
2477 				return err;
2478 		} else {
2479 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2480 				verbose(env, "BPF_ALU uses reserved fields\n");
2481 				return -EINVAL;
2482 			}
2483 		}
2484 
2485 		/* check src2 operand */
2486 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2487 		if (err)
2488 			return err;
2489 
2490 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2491 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2492 			verbose(env, "div by zero\n");
2493 			return -EINVAL;
2494 		}
2495 
2496 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2497 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2498 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2499 
2500 			if (insn->imm < 0 || insn->imm >= size) {
2501 				verbose(env, "invalid shift %d\n", insn->imm);
2502 				return -EINVAL;
2503 			}
2504 		}
2505 
2506 		/* check dest operand */
2507 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
2508 		if (err)
2509 			return err;
2510 
2511 		return adjust_reg_min_max_vals(env, insn);
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2518 				   struct bpf_reg_state *dst_reg,
2519 				   enum bpf_reg_type type,
2520 				   bool range_right_open)
2521 {
2522 	struct bpf_reg_state *regs = state->regs, *reg;
2523 	u16 new_range;
2524 	int i;
2525 
2526 	if (dst_reg->off < 0 ||
2527 	    (dst_reg->off == 0 && range_right_open))
2528 		/* This doesn't give us any range */
2529 		return;
2530 
2531 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
2532 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
2533 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
2534 		 * than pkt_end, but that's because it's also less than pkt.
2535 		 */
2536 		return;
2537 
2538 	new_range = dst_reg->off;
2539 	if (range_right_open)
2540 		new_range--;
2541 
2542 	/* Examples for register markings:
2543 	 *
2544 	 * pkt_data in dst register:
2545 	 *
2546 	 *   r2 = r3;
2547 	 *   r2 += 8;
2548 	 *   if (r2 > pkt_end) goto <handle exception>
2549 	 *   <access okay>
2550 	 *
2551 	 *   r2 = r3;
2552 	 *   r2 += 8;
2553 	 *   if (r2 < pkt_end) goto <access okay>
2554 	 *   <handle exception>
2555 	 *
2556 	 *   Where:
2557 	 *     r2 == dst_reg, pkt_end == src_reg
2558 	 *     r2=pkt(id=n,off=8,r=0)
2559 	 *     r3=pkt(id=n,off=0,r=0)
2560 	 *
2561 	 * pkt_data in src register:
2562 	 *
2563 	 *   r2 = r3;
2564 	 *   r2 += 8;
2565 	 *   if (pkt_end >= r2) goto <access okay>
2566 	 *   <handle exception>
2567 	 *
2568 	 *   r2 = r3;
2569 	 *   r2 += 8;
2570 	 *   if (pkt_end <= r2) goto <handle exception>
2571 	 *   <access okay>
2572 	 *
2573 	 *   Where:
2574 	 *     pkt_end == dst_reg, r2 == src_reg
2575 	 *     r2=pkt(id=n,off=8,r=0)
2576 	 *     r3=pkt(id=n,off=0,r=0)
2577 	 *
2578 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2579 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2580 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
2581 	 * the check.
2582 	 */
2583 
2584 	/* If our ids match, then we must have the same max_value.  And we
2585 	 * don't care about the other reg's fixed offset, since if it's too big
2586 	 * the range won't allow anything.
2587 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2588 	 */
2589 	for (i = 0; i < MAX_BPF_REG; i++)
2590 		if (regs[i].type == type && regs[i].id == dst_reg->id)
2591 			/* keep the maximum range already checked */
2592 			regs[i].range = max(regs[i].range, new_range);
2593 
2594 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2595 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2596 			continue;
2597 		reg = &state->stack[i].spilled_ptr;
2598 		if (reg->type == type && reg->id == dst_reg->id)
2599 			reg->range = max(reg->range, new_range);
2600 	}
2601 }
2602 
2603 /* Adjusts the register min/max values in the case that the dst_reg is the
2604  * variable register that we are working on, and src_reg is a constant or we're
2605  * simply doing a BPF_K check.
2606  * In JEQ/JNE cases we also adjust the var_off values.
2607  */
2608 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2609 			    struct bpf_reg_state *false_reg, u64 val,
2610 			    u8 opcode)
2611 {
2612 	/* If the dst_reg is a pointer, we can't learn anything about its
2613 	 * variable offset from the compare (unless src_reg were a pointer into
2614 	 * the same object, but we don't bother with that.
2615 	 * Since false_reg and true_reg have the same type by construction, we
2616 	 * only need to check one of them for pointerness.
2617 	 */
2618 	if (__is_pointer_value(false, false_reg))
2619 		return;
2620 
2621 	switch (opcode) {
2622 	case BPF_JEQ:
2623 		/* If this is false then we know nothing Jon Snow, but if it is
2624 		 * true then we know for sure.
2625 		 */
2626 		__mark_reg_known(true_reg, val);
2627 		break;
2628 	case BPF_JNE:
2629 		/* If this is true we know nothing Jon Snow, but if it is false
2630 		 * we know the value for sure;
2631 		 */
2632 		__mark_reg_known(false_reg, val);
2633 		break;
2634 	case BPF_JGT:
2635 		false_reg->umax_value = min(false_reg->umax_value, val);
2636 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2637 		break;
2638 	case BPF_JSGT:
2639 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2640 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2641 		break;
2642 	case BPF_JLT:
2643 		false_reg->umin_value = max(false_reg->umin_value, val);
2644 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2645 		break;
2646 	case BPF_JSLT:
2647 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2648 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2649 		break;
2650 	case BPF_JGE:
2651 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2652 		true_reg->umin_value = max(true_reg->umin_value, val);
2653 		break;
2654 	case BPF_JSGE:
2655 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2656 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2657 		break;
2658 	case BPF_JLE:
2659 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2660 		true_reg->umax_value = min(true_reg->umax_value, val);
2661 		break;
2662 	case BPF_JSLE:
2663 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2664 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2665 		break;
2666 	default:
2667 		break;
2668 	}
2669 
2670 	__reg_deduce_bounds(false_reg);
2671 	__reg_deduce_bounds(true_reg);
2672 	/* We might have learned some bits from the bounds. */
2673 	__reg_bound_offset(false_reg);
2674 	__reg_bound_offset(true_reg);
2675 	/* Intersecting with the old var_off might have improved our bounds
2676 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2677 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2678 	 */
2679 	__update_reg_bounds(false_reg);
2680 	__update_reg_bounds(true_reg);
2681 }
2682 
2683 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2684  * the variable reg.
2685  */
2686 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2687 				struct bpf_reg_state *false_reg, u64 val,
2688 				u8 opcode)
2689 {
2690 	if (__is_pointer_value(false, false_reg))
2691 		return;
2692 
2693 	switch (opcode) {
2694 	case BPF_JEQ:
2695 		/* If this is false then we know nothing Jon Snow, but if it is
2696 		 * true then we know for sure.
2697 		 */
2698 		__mark_reg_known(true_reg, val);
2699 		break;
2700 	case BPF_JNE:
2701 		/* If this is true we know nothing Jon Snow, but if it is false
2702 		 * we know the value for sure;
2703 		 */
2704 		__mark_reg_known(false_reg, val);
2705 		break;
2706 	case BPF_JGT:
2707 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2708 		false_reg->umin_value = max(false_reg->umin_value, val);
2709 		break;
2710 	case BPF_JSGT:
2711 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2712 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2713 		break;
2714 	case BPF_JLT:
2715 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2716 		false_reg->umax_value = min(false_reg->umax_value, val);
2717 		break;
2718 	case BPF_JSLT:
2719 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2720 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2721 		break;
2722 	case BPF_JGE:
2723 		true_reg->umax_value = min(true_reg->umax_value, val);
2724 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2725 		break;
2726 	case BPF_JSGE:
2727 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2728 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2729 		break;
2730 	case BPF_JLE:
2731 		true_reg->umin_value = max(true_reg->umin_value, val);
2732 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2733 		break;
2734 	case BPF_JSLE:
2735 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2736 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2737 		break;
2738 	default:
2739 		break;
2740 	}
2741 
2742 	__reg_deduce_bounds(false_reg);
2743 	__reg_deduce_bounds(true_reg);
2744 	/* We might have learned some bits from the bounds. */
2745 	__reg_bound_offset(false_reg);
2746 	__reg_bound_offset(true_reg);
2747 	/* Intersecting with the old var_off might have improved our bounds
2748 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2749 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2750 	 */
2751 	__update_reg_bounds(false_reg);
2752 	__update_reg_bounds(true_reg);
2753 }
2754 
2755 /* Regs are known to be equal, so intersect their min/max/var_off */
2756 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
2757 				  struct bpf_reg_state *dst_reg)
2758 {
2759 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
2760 							dst_reg->umin_value);
2761 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
2762 							dst_reg->umax_value);
2763 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
2764 							dst_reg->smin_value);
2765 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
2766 							dst_reg->smax_value);
2767 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
2768 							     dst_reg->var_off);
2769 	/* We might have learned new bounds from the var_off. */
2770 	__update_reg_bounds(src_reg);
2771 	__update_reg_bounds(dst_reg);
2772 	/* We might have learned something about the sign bit. */
2773 	__reg_deduce_bounds(src_reg);
2774 	__reg_deduce_bounds(dst_reg);
2775 	/* We might have learned some bits from the bounds. */
2776 	__reg_bound_offset(src_reg);
2777 	__reg_bound_offset(dst_reg);
2778 	/* Intersecting with the old var_off might have improved our bounds
2779 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2780 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2781 	 */
2782 	__update_reg_bounds(src_reg);
2783 	__update_reg_bounds(dst_reg);
2784 }
2785 
2786 static void reg_combine_min_max(struct bpf_reg_state *true_src,
2787 				struct bpf_reg_state *true_dst,
2788 				struct bpf_reg_state *false_src,
2789 				struct bpf_reg_state *false_dst,
2790 				u8 opcode)
2791 {
2792 	switch (opcode) {
2793 	case BPF_JEQ:
2794 		__reg_combine_min_max(true_src, true_dst);
2795 		break;
2796 	case BPF_JNE:
2797 		__reg_combine_min_max(false_src, false_dst);
2798 		break;
2799 	}
2800 }
2801 
2802 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2803 			 bool is_null)
2804 {
2805 	struct bpf_reg_state *reg = &regs[regno];
2806 
2807 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2808 		/* Old offset (both fixed and variable parts) should
2809 		 * have been known-zero, because we don't allow pointer
2810 		 * arithmetic on pointers that might be NULL.
2811 		 */
2812 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
2813 				 !tnum_equals_const(reg->var_off, 0) ||
2814 				 reg->off)) {
2815 			__mark_reg_known_zero(reg);
2816 			reg->off = 0;
2817 		}
2818 		if (is_null) {
2819 			reg->type = SCALAR_VALUE;
2820 		} else if (reg->map_ptr->inner_map_meta) {
2821 			reg->type = CONST_PTR_TO_MAP;
2822 			reg->map_ptr = reg->map_ptr->inner_map_meta;
2823 		} else {
2824 			reg->type = PTR_TO_MAP_VALUE;
2825 		}
2826 		/* We don't need id from this point onwards anymore, thus we
2827 		 * should better reset it, so that state pruning has chances
2828 		 * to take effect.
2829 		 */
2830 		reg->id = 0;
2831 	}
2832 }
2833 
2834 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2835  * be folded together at some point.
2836  */
2837 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2838 			  bool is_null)
2839 {
2840 	struct bpf_reg_state *regs = state->regs;
2841 	u32 id = regs[regno].id;
2842 	int i;
2843 
2844 	for (i = 0; i < MAX_BPF_REG; i++)
2845 		mark_map_reg(regs, i, id, is_null);
2846 
2847 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2848 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2849 			continue;
2850 		mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
2851 	}
2852 }
2853 
2854 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
2855 				   struct bpf_reg_state *dst_reg,
2856 				   struct bpf_reg_state *src_reg,
2857 				   struct bpf_verifier_state *this_branch,
2858 				   struct bpf_verifier_state *other_branch)
2859 {
2860 	if (BPF_SRC(insn->code) != BPF_X)
2861 		return false;
2862 
2863 	switch (BPF_OP(insn->code)) {
2864 	case BPF_JGT:
2865 		if ((dst_reg->type == PTR_TO_PACKET &&
2866 		     src_reg->type == PTR_TO_PACKET_END) ||
2867 		    (dst_reg->type == PTR_TO_PACKET_META &&
2868 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2869 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
2870 			find_good_pkt_pointers(this_branch, dst_reg,
2871 					       dst_reg->type, false);
2872 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2873 			    src_reg->type == PTR_TO_PACKET) ||
2874 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2875 			    src_reg->type == PTR_TO_PACKET_META)) {
2876 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
2877 			find_good_pkt_pointers(other_branch, src_reg,
2878 					       src_reg->type, true);
2879 		} else {
2880 			return false;
2881 		}
2882 		break;
2883 	case BPF_JLT:
2884 		if ((dst_reg->type == PTR_TO_PACKET &&
2885 		     src_reg->type == PTR_TO_PACKET_END) ||
2886 		    (dst_reg->type == PTR_TO_PACKET_META &&
2887 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2888 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
2889 			find_good_pkt_pointers(other_branch, dst_reg,
2890 					       dst_reg->type, true);
2891 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2892 			    src_reg->type == PTR_TO_PACKET) ||
2893 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2894 			    src_reg->type == PTR_TO_PACKET_META)) {
2895 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
2896 			find_good_pkt_pointers(this_branch, src_reg,
2897 					       src_reg->type, false);
2898 		} else {
2899 			return false;
2900 		}
2901 		break;
2902 	case BPF_JGE:
2903 		if ((dst_reg->type == PTR_TO_PACKET &&
2904 		     src_reg->type == PTR_TO_PACKET_END) ||
2905 		    (dst_reg->type == PTR_TO_PACKET_META &&
2906 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2907 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
2908 			find_good_pkt_pointers(this_branch, dst_reg,
2909 					       dst_reg->type, true);
2910 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2911 			    src_reg->type == PTR_TO_PACKET) ||
2912 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2913 			    src_reg->type == PTR_TO_PACKET_META)) {
2914 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
2915 			find_good_pkt_pointers(other_branch, src_reg,
2916 					       src_reg->type, false);
2917 		} else {
2918 			return false;
2919 		}
2920 		break;
2921 	case BPF_JLE:
2922 		if ((dst_reg->type == PTR_TO_PACKET &&
2923 		     src_reg->type == PTR_TO_PACKET_END) ||
2924 		    (dst_reg->type == PTR_TO_PACKET_META &&
2925 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2926 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
2927 			find_good_pkt_pointers(other_branch, dst_reg,
2928 					       dst_reg->type, false);
2929 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2930 			    src_reg->type == PTR_TO_PACKET) ||
2931 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2932 			    src_reg->type == PTR_TO_PACKET_META)) {
2933 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
2934 			find_good_pkt_pointers(this_branch, src_reg,
2935 					       src_reg->type, true);
2936 		} else {
2937 			return false;
2938 		}
2939 		break;
2940 	default:
2941 		return false;
2942 	}
2943 
2944 	return true;
2945 }
2946 
2947 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2948 			     struct bpf_insn *insn, int *insn_idx)
2949 {
2950 	struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
2951 	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2952 	u8 opcode = BPF_OP(insn->code);
2953 	int err;
2954 
2955 	if (opcode > BPF_JSLE) {
2956 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
2957 		return -EINVAL;
2958 	}
2959 
2960 	if (BPF_SRC(insn->code) == BPF_X) {
2961 		if (insn->imm != 0) {
2962 			verbose(env, "BPF_JMP uses reserved fields\n");
2963 			return -EINVAL;
2964 		}
2965 
2966 		/* check src1 operand */
2967 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
2968 		if (err)
2969 			return err;
2970 
2971 		if (is_pointer_value(env, insn->src_reg)) {
2972 			verbose(env, "R%d pointer comparison prohibited\n",
2973 				insn->src_reg);
2974 			return -EACCES;
2975 		}
2976 	} else {
2977 		if (insn->src_reg != BPF_REG_0) {
2978 			verbose(env, "BPF_JMP uses reserved fields\n");
2979 			return -EINVAL;
2980 		}
2981 	}
2982 
2983 	/* check src2 operand */
2984 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2985 	if (err)
2986 		return err;
2987 
2988 	dst_reg = &regs[insn->dst_reg];
2989 
2990 	/* detect if R == 0 where R was initialized to zero earlier */
2991 	if (BPF_SRC(insn->code) == BPF_K &&
2992 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2993 	    dst_reg->type == SCALAR_VALUE &&
2994 	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
2995 		if (opcode == BPF_JEQ) {
2996 			/* if (imm == imm) goto pc+off;
2997 			 * only follow the goto, ignore fall-through
2998 			 */
2999 			*insn_idx += insn->off;
3000 			return 0;
3001 		} else {
3002 			/* if (imm != imm) goto pc+off;
3003 			 * only follow fall-through branch, since
3004 			 * that's where the program will go
3005 			 */
3006 			return 0;
3007 		}
3008 	}
3009 
3010 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3011 	if (!other_branch)
3012 		return -EFAULT;
3013 
3014 	/* detect if we are comparing against a constant value so we can adjust
3015 	 * our min/max values for our dst register.
3016 	 * this is only legit if both are scalars (or pointers to the same
3017 	 * object, I suppose, but we don't support that right now), because
3018 	 * otherwise the different base pointers mean the offsets aren't
3019 	 * comparable.
3020 	 */
3021 	if (BPF_SRC(insn->code) == BPF_X) {
3022 		if (dst_reg->type == SCALAR_VALUE &&
3023 		    regs[insn->src_reg].type == SCALAR_VALUE) {
3024 			if (tnum_is_const(regs[insn->src_reg].var_off))
3025 				reg_set_min_max(&other_branch->regs[insn->dst_reg],
3026 						dst_reg, regs[insn->src_reg].var_off.value,
3027 						opcode);
3028 			else if (tnum_is_const(dst_reg->var_off))
3029 				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
3030 						    &regs[insn->src_reg],
3031 						    dst_reg->var_off.value, opcode);
3032 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
3033 				/* Comparing for equality, we can combine knowledge */
3034 				reg_combine_min_max(&other_branch->regs[insn->src_reg],
3035 						    &other_branch->regs[insn->dst_reg],
3036 						    &regs[insn->src_reg],
3037 						    &regs[insn->dst_reg], opcode);
3038 		}
3039 	} else if (dst_reg->type == SCALAR_VALUE) {
3040 		reg_set_min_max(&other_branch->regs[insn->dst_reg],
3041 					dst_reg, insn->imm, opcode);
3042 	}
3043 
3044 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
3045 	if (BPF_SRC(insn->code) == BPF_K &&
3046 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3047 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
3048 		/* Mark all identical map registers in each branch as either
3049 		 * safe or unknown depending R == 0 or R != 0 conditional.
3050 		 */
3051 		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
3052 		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
3053 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
3054 					   this_branch, other_branch) &&
3055 		   is_pointer_value(env, insn->dst_reg)) {
3056 		verbose(env, "R%d pointer comparison prohibited\n",
3057 			insn->dst_reg);
3058 		return -EACCES;
3059 	}
3060 	if (env->log.level)
3061 		print_verifier_state(env, this_branch);
3062 	return 0;
3063 }
3064 
3065 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
3066 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3067 {
3068 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3069 
3070 	return (struct bpf_map *) (unsigned long) imm64;
3071 }
3072 
3073 /* verify BPF_LD_IMM64 instruction */
3074 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3075 {
3076 	struct bpf_reg_state *regs = cur_regs(env);
3077 	int err;
3078 
3079 	if (BPF_SIZE(insn->code) != BPF_DW) {
3080 		verbose(env, "invalid BPF_LD_IMM insn\n");
3081 		return -EINVAL;
3082 	}
3083 	if (insn->off != 0) {
3084 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3085 		return -EINVAL;
3086 	}
3087 
3088 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
3089 	if (err)
3090 		return err;
3091 
3092 	if (insn->src_reg == 0) {
3093 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3094 
3095 		regs[insn->dst_reg].type = SCALAR_VALUE;
3096 		__mark_reg_known(&regs[insn->dst_reg], imm);
3097 		return 0;
3098 	}
3099 
3100 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3101 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3102 
3103 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3104 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3105 	return 0;
3106 }
3107 
3108 static bool may_access_skb(enum bpf_prog_type type)
3109 {
3110 	switch (type) {
3111 	case BPF_PROG_TYPE_SOCKET_FILTER:
3112 	case BPF_PROG_TYPE_SCHED_CLS:
3113 	case BPF_PROG_TYPE_SCHED_ACT:
3114 		return true;
3115 	default:
3116 		return false;
3117 	}
3118 }
3119 
3120 /* verify safety of LD_ABS|LD_IND instructions:
3121  * - they can only appear in the programs where ctx == skb
3122  * - since they are wrappers of function calls, they scratch R1-R5 registers,
3123  *   preserve R6-R9, and store return value into R0
3124  *
3125  * Implicit input:
3126  *   ctx == skb == R6 == CTX
3127  *
3128  * Explicit input:
3129  *   SRC == any register
3130  *   IMM == 32-bit immediate
3131  *
3132  * Output:
3133  *   R0 - 8/16/32-bit skb data converted to cpu endianness
3134  */
3135 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3136 {
3137 	struct bpf_reg_state *regs = cur_regs(env);
3138 	u8 mode = BPF_MODE(insn->code);
3139 	int i, err;
3140 
3141 	if (!may_access_skb(env->prog->type)) {
3142 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3143 		return -EINVAL;
3144 	}
3145 
3146 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
3147 	    BPF_SIZE(insn->code) == BPF_DW ||
3148 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
3149 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
3150 		return -EINVAL;
3151 	}
3152 
3153 	/* check whether implicit source operand (register R6) is readable */
3154 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
3155 	if (err)
3156 		return err;
3157 
3158 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
3159 		verbose(env,
3160 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3161 		return -EINVAL;
3162 	}
3163 
3164 	if (mode == BPF_IND) {
3165 		/* check explicit source operand */
3166 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3167 		if (err)
3168 			return err;
3169 	}
3170 
3171 	/* reset caller saved regs to unreadable */
3172 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
3173 		mark_reg_not_init(env, regs, caller_saved[i]);
3174 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3175 	}
3176 
3177 	/* mark destination R0 register as readable, since it contains
3178 	 * the value fetched from the packet.
3179 	 * Already marked as written above.
3180 	 */
3181 	mark_reg_unknown(env, regs, BPF_REG_0);
3182 	return 0;
3183 }
3184 
3185 static int check_return_code(struct bpf_verifier_env *env)
3186 {
3187 	struct bpf_reg_state *reg;
3188 	struct tnum range = tnum_range(0, 1);
3189 
3190 	switch (env->prog->type) {
3191 	case BPF_PROG_TYPE_CGROUP_SKB:
3192 	case BPF_PROG_TYPE_CGROUP_SOCK:
3193 	case BPF_PROG_TYPE_SOCK_OPS:
3194 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3195 		break;
3196 	default:
3197 		return 0;
3198 	}
3199 
3200 	reg = cur_regs(env) + BPF_REG_0;
3201 	if (reg->type != SCALAR_VALUE) {
3202 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
3203 			reg_type_str[reg->type]);
3204 		return -EINVAL;
3205 	}
3206 
3207 	if (!tnum_in(range, reg->var_off)) {
3208 		verbose(env, "At program exit the register R0 ");
3209 		if (!tnum_is_unknown(reg->var_off)) {
3210 			char tn_buf[48];
3211 
3212 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3213 			verbose(env, "has value %s", tn_buf);
3214 		} else {
3215 			verbose(env, "has unknown scalar value");
3216 		}
3217 		verbose(env, " should have been 0 or 1\n");
3218 		return -EINVAL;
3219 	}
3220 	return 0;
3221 }
3222 
3223 /* non-recursive DFS pseudo code
3224  * 1  procedure DFS-iterative(G,v):
3225  * 2      label v as discovered
3226  * 3      let S be a stack
3227  * 4      S.push(v)
3228  * 5      while S is not empty
3229  * 6            t <- S.pop()
3230  * 7            if t is what we're looking for:
3231  * 8                return t
3232  * 9            for all edges e in G.adjacentEdges(t) do
3233  * 10               if edge e is already labelled
3234  * 11                   continue with the next edge
3235  * 12               w <- G.adjacentVertex(t,e)
3236  * 13               if vertex w is not discovered and not explored
3237  * 14                   label e as tree-edge
3238  * 15                   label w as discovered
3239  * 16                   S.push(w)
3240  * 17                   continue at 5
3241  * 18               else if vertex w is discovered
3242  * 19                   label e as back-edge
3243  * 20               else
3244  * 21                   // vertex w is explored
3245  * 22                   label e as forward- or cross-edge
3246  * 23           label t as explored
3247  * 24           S.pop()
3248  *
3249  * convention:
3250  * 0x10 - discovered
3251  * 0x11 - discovered and fall-through edge labelled
3252  * 0x12 - discovered and fall-through and branch edges labelled
3253  * 0x20 - explored
3254  */
3255 
3256 enum {
3257 	DISCOVERED = 0x10,
3258 	EXPLORED = 0x20,
3259 	FALLTHROUGH = 1,
3260 	BRANCH = 2,
3261 };
3262 
3263 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3264 
3265 static int *insn_stack;	/* stack of insns to process */
3266 static int cur_stack;	/* current stack index */
3267 static int *insn_state;
3268 
3269 /* t, w, e - match pseudo-code above:
3270  * t - index of current instruction
3271  * w - next instruction
3272  * e - edge
3273  */
3274 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
3275 {
3276 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
3277 		return 0;
3278 
3279 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
3280 		return 0;
3281 
3282 	if (w < 0 || w >= env->prog->len) {
3283 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
3284 		return -EINVAL;
3285 	}
3286 
3287 	if (e == BRANCH)
3288 		/* mark branch target for state pruning */
3289 		env->explored_states[w] = STATE_LIST_MARK;
3290 
3291 	if (insn_state[w] == 0) {
3292 		/* tree-edge */
3293 		insn_state[t] = DISCOVERED | e;
3294 		insn_state[w] = DISCOVERED;
3295 		if (cur_stack >= env->prog->len)
3296 			return -E2BIG;
3297 		insn_stack[cur_stack++] = w;
3298 		return 1;
3299 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
3300 		verbose(env, "back-edge from insn %d to %d\n", t, w);
3301 		return -EINVAL;
3302 	} else if (insn_state[w] == EXPLORED) {
3303 		/* forward- or cross-edge */
3304 		insn_state[t] = DISCOVERED | e;
3305 	} else {
3306 		verbose(env, "insn state internal bug\n");
3307 		return -EFAULT;
3308 	}
3309 	return 0;
3310 }
3311 
3312 /* non-recursive depth-first-search to detect loops in BPF program
3313  * loop == back-edge in directed graph
3314  */
3315 static int check_cfg(struct bpf_verifier_env *env)
3316 {
3317 	struct bpf_insn *insns = env->prog->insnsi;
3318 	int insn_cnt = env->prog->len;
3319 	int ret = 0;
3320 	int i, t;
3321 
3322 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3323 	if (!insn_state)
3324 		return -ENOMEM;
3325 
3326 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3327 	if (!insn_stack) {
3328 		kfree(insn_state);
3329 		return -ENOMEM;
3330 	}
3331 
3332 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
3333 	insn_stack[0] = 0; /* 0 is the first instruction */
3334 	cur_stack = 1;
3335 
3336 peek_stack:
3337 	if (cur_stack == 0)
3338 		goto check_state;
3339 	t = insn_stack[cur_stack - 1];
3340 
3341 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
3342 		u8 opcode = BPF_OP(insns[t].code);
3343 
3344 		if (opcode == BPF_EXIT) {
3345 			goto mark_explored;
3346 		} else if (opcode == BPF_CALL) {
3347 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3348 			if (ret == 1)
3349 				goto peek_stack;
3350 			else if (ret < 0)
3351 				goto err_free;
3352 			if (t + 1 < insn_cnt)
3353 				env->explored_states[t + 1] = STATE_LIST_MARK;
3354 		} else if (opcode == BPF_JA) {
3355 			if (BPF_SRC(insns[t].code) != BPF_K) {
3356 				ret = -EINVAL;
3357 				goto err_free;
3358 			}
3359 			/* unconditional jump with single edge */
3360 			ret = push_insn(t, t + insns[t].off + 1,
3361 					FALLTHROUGH, env);
3362 			if (ret == 1)
3363 				goto peek_stack;
3364 			else if (ret < 0)
3365 				goto err_free;
3366 			/* tell verifier to check for equivalent states
3367 			 * after every call and jump
3368 			 */
3369 			if (t + 1 < insn_cnt)
3370 				env->explored_states[t + 1] = STATE_LIST_MARK;
3371 		} else {
3372 			/* conditional jump with two edges */
3373 			env->explored_states[t] = STATE_LIST_MARK;
3374 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3375 			if (ret == 1)
3376 				goto peek_stack;
3377 			else if (ret < 0)
3378 				goto err_free;
3379 
3380 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
3381 			if (ret == 1)
3382 				goto peek_stack;
3383 			else if (ret < 0)
3384 				goto err_free;
3385 		}
3386 	} else {
3387 		/* all other non-branch instructions with single
3388 		 * fall-through edge
3389 		 */
3390 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
3391 		if (ret == 1)
3392 			goto peek_stack;
3393 		else if (ret < 0)
3394 			goto err_free;
3395 	}
3396 
3397 mark_explored:
3398 	insn_state[t] = EXPLORED;
3399 	if (cur_stack-- <= 0) {
3400 		verbose(env, "pop stack internal bug\n");
3401 		ret = -EFAULT;
3402 		goto err_free;
3403 	}
3404 	goto peek_stack;
3405 
3406 check_state:
3407 	for (i = 0; i < insn_cnt; i++) {
3408 		if (insn_state[i] != EXPLORED) {
3409 			verbose(env, "unreachable insn %d\n", i);
3410 			ret = -EINVAL;
3411 			goto err_free;
3412 		}
3413 	}
3414 	ret = 0; /* cfg looks good */
3415 
3416 err_free:
3417 	kfree(insn_state);
3418 	kfree(insn_stack);
3419 	return ret;
3420 }
3421 
3422 /* check %cur's range satisfies %old's */
3423 static bool range_within(struct bpf_reg_state *old,
3424 			 struct bpf_reg_state *cur)
3425 {
3426 	return old->umin_value <= cur->umin_value &&
3427 	       old->umax_value >= cur->umax_value &&
3428 	       old->smin_value <= cur->smin_value &&
3429 	       old->smax_value >= cur->smax_value;
3430 }
3431 
3432 /* Maximum number of register states that can exist at once */
3433 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3434 struct idpair {
3435 	u32 old;
3436 	u32 cur;
3437 };
3438 
3439 /* If in the old state two registers had the same id, then they need to have
3440  * the same id in the new state as well.  But that id could be different from
3441  * the old state, so we need to track the mapping from old to new ids.
3442  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3443  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
3444  * regs with a different old id could still have new id 9, we don't care about
3445  * that.
3446  * So we look through our idmap to see if this old id has been seen before.  If
3447  * so, we require the new id to match; otherwise, we add the id pair to the map.
3448  */
3449 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3450 {
3451 	unsigned int i;
3452 
3453 	for (i = 0; i < ID_MAP_SIZE; i++) {
3454 		if (!idmap[i].old) {
3455 			/* Reached an empty slot; haven't seen this id before */
3456 			idmap[i].old = old_id;
3457 			idmap[i].cur = cur_id;
3458 			return true;
3459 		}
3460 		if (idmap[i].old == old_id)
3461 			return idmap[i].cur == cur_id;
3462 	}
3463 	/* We ran out of idmap slots, which should be impossible */
3464 	WARN_ON_ONCE(1);
3465 	return false;
3466 }
3467 
3468 /* Returns true if (rold safe implies rcur safe) */
3469 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3470 		    struct idpair *idmap)
3471 {
3472 	if (!(rold->live & REG_LIVE_READ))
3473 		/* explored state didn't use this */
3474 		return true;
3475 
3476 	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
3477 		return true;
3478 
3479 	if (rold->type == NOT_INIT)
3480 		/* explored state can't have used this */
3481 		return true;
3482 	if (rcur->type == NOT_INIT)
3483 		return false;
3484 	switch (rold->type) {
3485 	case SCALAR_VALUE:
3486 		if (rcur->type == SCALAR_VALUE) {
3487 			/* new val must satisfy old val knowledge */
3488 			return range_within(rold, rcur) &&
3489 			       tnum_in(rold->var_off, rcur->var_off);
3490 		} else {
3491 			/* We're trying to use a pointer in place of a scalar.
3492 			 * Even if the scalar was unbounded, this could lead to
3493 			 * pointer leaks because scalars are allowed to leak
3494 			 * while pointers are not. We could make this safe in
3495 			 * special cases if root is calling us, but it's
3496 			 * probably not worth the hassle.
3497 			 */
3498 			return false;
3499 		}
3500 	case PTR_TO_MAP_VALUE:
3501 		/* If the new min/max/var_off satisfy the old ones and
3502 		 * everything else matches, we are OK.
3503 		 * We don't care about the 'id' value, because nothing
3504 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3505 		 */
3506 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3507 		       range_within(rold, rcur) &&
3508 		       tnum_in(rold->var_off, rcur->var_off);
3509 	case PTR_TO_MAP_VALUE_OR_NULL:
3510 		/* a PTR_TO_MAP_VALUE could be safe to use as a
3511 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3512 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3513 		 * checked, doing so could have affected others with the same
3514 		 * id, and we can't check for that because we lost the id when
3515 		 * we converted to a PTR_TO_MAP_VALUE.
3516 		 */
3517 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
3518 			return false;
3519 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
3520 			return false;
3521 		/* Check our ids match any regs they're supposed to */
3522 		return check_ids(rold->id, rcur->id, idmap);
3523 	case PTR_TO_PACKET_META:
3524 	case PTR_TO_PACKET:
3525 		if (rcur->type != rold->type)
3526 			return false;
3527 		/* We must have at least as much range as the old ptr
3528 		 * did, so that any accesses which were safe before are
3529 		 * still safe.  This is true even if old range < old off,
3530 		 * since someone could have accessed through (ptr - k), or
3531 		 * even done ptr -= k in a register, to get a safe access.
3532 		 */
3533 		if (rold->range > rcur->range)
3534 			return false;
3535 		/* If the offsets don't match, we can't trust our alignment;
3536 		 * nor can we be sure that we won't fall out of range.
3537 		 */
3538 		if (rold->off != rcur->off)
3539 			return false;
3540 		/* id relations must be preserved */
3541 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
3542 			return false;
3543 		/* new val must satisfy old val knowledge */
3544 		return range_within(rold, rcur) &&
3545 		       tnum_in(rold->var_off, rcur->var_off);
3546 	case PTR_TO_CTX:
3547 	case CONST_PTR_TO_MAP:
3548 	case PTR_TO_STACK:
3549 	case PTR_TO_PACKET_END:
3550 		/* Only valid matches are exact, which memcmp() above
3551 		 * would have accepted
3552 		 */
3553 	default:
3554 		/* Don't know what's going on, just say it's not safe */
3555 		return false;
3556 	}
3557 
3558 	/* Shouldn't get here; if we do, say it's not safe */
3559 	WARN_ON_ONCE(1);
3560 	return false;
3561 }
3562 
3563 static bool stacksafe(struct bpf_verifier_state *old,
3564 		      struct bpf_verifier_state *cur,
3565 		      struct idpair *idmap)
3566 {
3567 	int i, spi;
3568 
3569 	/* if explored stack has more populated slots than current stack
3570 	 * such stacks are not equivalent
3571 	 */
3572 	if (old->allocated_stack > cur->allocated_stack)
3573 		return false;
3574 
3575 	/* walk slots of the explored stack and ignore any additional
3576 	 * slots in the current stack, since explored(safe) state
3577 	 * didn't use them
3578 	 */
3579 	for (i = 0; i < old->allocated_stack; i++) {
3580 		spi = i / BPF_REG_SIZE;
3581 
3582 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
3583 			continue;
3584 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
3585 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
3586 			/* Ex: old explored (safe) state has STACK_SPILL in
3587 			 * this stack slot, but current has has STACK_MISC ->
3588 			 * this verifier states are not equivalent,
3589 			 * return false to continue verification of this path
3590 			 */
3591 			return false;
3592 		if (i % BPF_REG_SIZE)
3593 			continue;
3594 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
3595 			continue;
3596 		if (!regsafe(&old->stack[spi].spilled_ptr,
3597 			     &cur->stack[spi].spilled_ptr,
3598 			     idmap))
3599 			/* when explored and current stack slot are both storing
3600 			 * spilled registers, check that stored pointers types
3601 			 * are the same as well.
3602 			 * Ex: explored safe path could have stored
3603 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3604 			 * but current path has stored:
3605 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3606 			 * such verifier states are not equivalent.
3607 			 * return false to continue verification of this path
3608 			 */
3609 			return false;
3610 	}
3611 	return true;
3612 }
3613 
3614 /* compare two verifier states
3615  *
3616  * all states stored in state_list are known to be valid, since
3617  * verifier reached 'bpf_exit' instruction through them
3618  *
3619  * this function is called when verifier exploring different branches of
3620  * execution popped from the state stack. If it sees an old state that has
3621  * more strict register state and more strict stack state then this execution
3622  * branch doesn't need to be explored further, since verifier already
3623  * concluded that more strict state leads to valid finish.
3624  *
3625  * Therefore two states are equivalent if register state is more conservative
3626  * and explored stack state is more conservative than the current one.
3627  * Example:
3628  *       explored                   current
3629  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3630  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3631  *
3632  * In other words if current stack state (one being explored) has more
3633  * valid slots than old one that already passed validation, it means
3634  * the verifier can stop exploring and conclude that current state is valid too
3635  *
3636  * Similarly with registers. If explored state has register type as invalid
3637  * whereas register type in current state is meaningful, it means that
3638  * the current state will reach 'bpf_exit' instruction safely
3639  */
3640 static bool states_equal(struct bpf_verifier_env *env,
3641 			 struct bpf_verifier_state *old,
3642 			 struct bpf_verifier_state *cur)
3643 {
3644 	struct idpair *idmap;
3645 	bool ret = false;
3646 	int i;
3647 
3648 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3649 	/* If we failed to allocate the idmap, just say it's not safe */
3650 	if (!idmap)
3651 		return false;
3652 
3653 	for (i = 0; i < MAX_BPF_REG; i++) {
3654 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3655 			goto out_free;
3656 	}
3657 
3658 	if (!stacksafe(old, cur, idmap))
3659 		goto out_free;
3660 	ret = true;
3661 out_free:
3662 	kfree(idmap);
3663 	return ret;
3664 }
3665 
3666 /* A write screens off any subsequent reads; but write marks come from the
3667  * straight-line code between a state and its parent.  When we arrive at a
3668  * jump target (in the first iteration of the propagate_liveness() loop),
3669  * we didn't arrive by the straight-line code, so read marks in state must
3670  * propagate to parent regardless of state's write marks.
3671  */
3672 static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3673 				  struct bpf_verifier_state *parent)
3674 {
3675 	bool writes = parent == state->parent; /* Observe write marks */
3676 	bool touched = false; /* any changes made? */
3677 	int i;
3678 
3679 	if (!parent)
3680 		return touched;
3681 	/* Propagate read liveness of registers... */
3682 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
3683 	/* We don't need to worry about FP liveness because it's read-only */
3684 	for (i = 0; i < BPF_REG_FP; i++) {
3685 		if (parent->regs[i].live & REG_LIVE_READ)
3686 			continue;
3687 		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3688 			continue;
3689 		if (state->regs[i].live & REG_LIVE_READ) {
3690 			parent->regs[i].live |= REG_LIVE_READ;
3691 			touched = true;
3692 		}
3693 	}
3694 	/* ... and stack slots */
3695 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
3696 		    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3697 		if (parent->stack[i].slot_type[0] != STACK_SPILL)
3698 			continue;
3699 		if (state->stack[i].slot_type[0] != STACK_SPILL)
3700 			continue;
3701 		if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
3702 			continue;
3703 		if (writes &&
3704 		    (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
3705 			continue;
3706 		if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
3707 			parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
3708 			touched = true;
3709 		}
3710 	}
3711 	return touched;
3712 }
3713 
3714 /* "parent" is "a state from which we reach the current state", but initially
3715  * it is not the state->parent (i.e. "the state whose straight-line code leads
3716  * to the current state"), instead it is the state that happened to arrive at
3717  * a (prunable) equivalent of the current state.  See comment above
3718  * do_propagate_liveness() for consequences of this.
3719  * This function is just a more efficient way of calling mark_reg_read() or
3720  * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3721  * though it requires that parent != state->parent in the call arguments.
3722  */
3723 static void propagate_liveness(const struct bpf_verifier_state *state,
3724 			       struct bpf_verifier_state *parent)
3725 {
3726 	while (do_propagate_liveness(state, parent)) {
3727 		/* Something changed, so we need to feed those changes onward */
3728 		state = parent;
3729 		parent = state->parent;
3730 	}
3731 }
3732 
3733 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3734 {
3735 	struct bpf_verifier_state_list *new_sl;
3736 	struct bpf_verifier_state_list *sl;
3737 	struct bpf_verifier_state *cur = env->cur_state;
3738 	int i, err;
3739 
3740 	sl = env->explored_states[insn_idx];
3741 	if (!sl)
3742 		/* this 'insn_idx' instruction wasn't marked, so we will not
3743 		 * be doing state search here
3744 		 */
3745 		return 0;
3746 
3747 	while (sl != STATE_LIST_MARK) {
3748 		if (states_equal(env, &sl->state, cur)) {
3749 			/* reached equivalent register/stack state,
3750 			 * prune the search.
3751 			 * Registers read by the continuation are read by us.
3752 			 * If we have any write marks in env->cur_state, they
3753 			 * will prevent corresponding reads in the continuation
3754 			 * from reaching our parent (an explored_state).  Our
3755 			 * own state will get the read marks recorded, but
3756 			 * they'll be immediately forgotten as we're pruning
3757 			 * this state and will pop a new one.
3758 			 */
3759 			propagate_liveness(&sl->state, cur);
3760 			return 1;
3761 		}
3762 		sl = sl->next;
3763 	}
3764 
3765 	/* there were no equivalent states, remember current one.
3766 	 * technically the current state is not proven to be safe yet,
3767 	 * but it will either reach bpf_exit (which means it's safe) or
3768 	 * it will be rejected. Since there are no loops, we won't be
3769 	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3770 	 */
3771 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
3772 	if (!new_sl)
3773 		return -ENOMEM;
3774 
3775 	/* add new state to the head of linked list */
3776 	err = copy_verifier_state(&new_sl->state, cur);
3777 	if (err) {
3778 		free_verifier_state(&new_sl->state, false);
3779 		kfree(new_sl);
3780 		return err;
3781 	}
3782 	new_sl->next = env->explored_states[insn_idx];
3783 	env->explored_states[insn_idx] = new_sl;
3784 	/* connect new state to parentage chain */
3785 	cur->parent = &new_sl->state;
3786 	/* clear write marks in current state: the writes we did are not writes
3787 	 * our child did, so they don't screen off its reads from us.
3788 	 * (There are no read marks in current state, because reads always mark
3789 	 * their parent and current state never has children yet.  Only
3790 	 * explored_states can get read marks.)
3791 	 */
3792 	for (i = 0; i < BPF_REG_FP; i++)
3793 		cur->regs[i].live = REG_LIVE_NONE;
3794 	for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
3795 		if (cur->stack[i].slot_type[0] == STACK_SPILL)
3796 			cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
3797 	return 0;
3798 }
3799 
3800 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3801 				  int insn_idx, int prev_insn_idx)
3802 {
3803 	if (env->dev_ops && env->dev_ops->insn_hook)
3804 		return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
3805 
3806 	return 0;
3807 }
3808 
3809 static int do_check(struct bpf_verifier_env *env)
3810 {
3811 	struct bpf_verifier_state *state;
3812 	struct bpf_insn *insns = env->prog->insnsi;
3813 	struct bpf_reg_state *regs;
3814 	int insn_cnt = env->prog->len;
3815 	int insn_idx, prev_insn_idx = 0;
3816 	int insn_processed = 0;
3817 	bool do_print_state = false;
3818 
3819 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
3820 	if (!state)
3821 		return -ENOMEM;
3822 	env->cur_state = state;
3823 	init_reg_state(env, state->regs);
3824 	state->parent = NULL;
3825 	insn_idx = 0;
3826 	for (;;) {
3827 		struct bpf_insn *insn;
3828 		u8 class;
3829 		int err;
3830 
3831 		if (insn_idx >= insn_cnt) {
3832 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
3833 				insn_idx, insn_cnt);
3834 			return -EFAULT;
3835 		}
3836 
3837 		insn = &insns[insn_idx];
3838 		class = BPF_CLASS(insn->code);
3839 
3840 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3841 			verbose(env,
3842 				"BPF program is too large. Processed %d insn\n",
3843 				insn_processed);
3844 			return -E2BIG;
3845 		}
3846 
3847 		err = is_state_visited(env, insn_idx);
3848 		if (err < 0)
3849 			return err;
3850 		if (err == 1) {
3851 			/* found equivalent state, can prune the search */
3852 			if (env->log.level) {
3853 				if (do_print_state)
3854 					verbose(env, "\nfrom %d to %d: safe\n",
3855 						prev_insn_idx, insn_idx);
3856 				else
3857 					verbose(env, "%d: safe\n", insn_idx);
3858 			}
3859 			goto process_bpf_exit;
3860 		}
3861 
3862 		if (need_resched())
3863 			cond_resched();
3864 
3865 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
3866 			if (env->log.level > 1)
3867 				verbose(env, "%d:", insn_idx);
3868 			else
3869 				verbose(env, "\nfrom %d to %d:",
3870 					prev_insn_idx, insn_idx);
3871 			print_verifier_state(env, state);
3872 			do_print_state = false;
3873 		}
3874 
3875 		if (env->log.level) {
3876 			verbose(env, "%d: ", insn_idx);
3877 			print_bpf_insn(verbose, env, insn,
3878 				       env->allow_ptr_leaks);
3879 		}
3880 
3881 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3882 		if (err)
3883 			return err;
3884 
3885 		regs = cur_regs(env);
3886 		env->insn_aux_data[insn_idx].seen = true;
3887 		if (class == BPF_ALU || class == BPF_ALU64) {
3888 			err = check_alu_op(env, insn);
3889 			if (err)
3890 				return err;
3891 
3892 		} else if (class == BPF_LDX) {
3893 			enum bpf_reg_type *prev_src_type, src_reg_type;
3894 
3895 			/* check for reserved fields is already done */
3896 
3897 			/* check src operand */
3898 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3899 			if (err)
3900 				return err;
3901 
3902 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3903 			if (err)
3904 				return err;
3905 
3906 			src_reg_type = regs[insn->src_reg].type;
3907 
3908 			/* check that memory (src_reg + off) is readable,
3909 			 * the state of dst_reg will be updated by this func
3910 			 */
3911 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3912 					       BPF_SIZE(insn->code), BPF_READ,
3913 					       insn->dst_reg);
3914 			if (err)
3915 				return err;
3916 
3917 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3918 
3919 			if (*prev_src_type == NOT_INIT) {
3920 				/* saw a valid insn
3921 				 * dst_reg = *(u32 *)(src_reg + off)
3922 				 * save type to validate intersecting paths
3923 				 */
3924 				*prev_src_type = src_reg_type;
3925 
3926 			} else if (src_reg_type != *prev_src_type &&
3927 				   (src_reg_type == PTR_TO_CTX ||
3928 				    *prev_src_type == PTR_TO_CTX)) {
3929 				/* ABuser program is trying to use the same insn
3930 				 * dst_reg = *(u32*) (src_reg + off)
3931 				 * with different pointer types:
3932 				 * src_reg == ctx in one branch and
3933 				 * src_reg == stack|map in some other branch.
3934 				 * Reject it.
3935 				 */
3936 				verbose(env, "same insn cannot be used with different pointers\n");
3937 				return -EINVAL;
3938 			}
3939 
3940 		} else if (class == BPF_STX) {
3941 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
3942 
3943 			if (BPF_MODE(insn->code) == BPF_XADD) {
3944 				err = check_xadd(env, insn_idx, insn);
3945 				if (err)
3946 					return err;
3947 				insn_idx++;
3948 				continue;
3949 			}
3950 
3951 			/* check src1 operand */
3952 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3953 			if (err)
3954 				return err;
3955 			/* check src2 operand */
3956 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3957 			if (err)
3958 				return err;
3959 
3960 			dst_reg_type = regs[insn->dst_reg].type;
3961 
3962 			/* check that memory (dst_reg + off) is writeable */
3963 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3964 					       BPF_SIZE(insn->code), BPF_WRITE,
3965 					       insn->src_reg);
3966 			if (err)
3967 				return err;
3968 
3969 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3970 
3971 			if (*prev_dst_type == NOT_INIT) {
3972 				*prev_dst_type = dst_reg_type;
3973 			} else if (dst_reg_type != *prev_dst_type &&
3974 				   (dst_reg_type == PTR_TO_CTX ||
3975 				    *prev_dst_type == PTR_TO_CTX)) {
3976 				verbose(env, "same insn cannot be used with different pointers\n");
3977 				return -EINVAL;
3978 			}
3979 
3980 		} else if (class == BPF_ST) {
3981 			if (BPF_MODE(insn->code) != BPF_MEM ||
3982 			    insn->src_reg != BPF_REG_0) {
3983 				verbose(env, "BPF_ST uses reserved fields\n");
3984 				return -EINVAL;
3985 			}
3986 			/* check src operand */
3987 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3988 			if (err)
3989 				return err;
3990 
3991 			/* check that memory (dst_reg + off) is writeable */
3992 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3993 					       BPF_SIZE(insn->code), BPF_WRITE,
3994 					       -1);
3995 			if (err)
3996 				return err;
3997 
3998 		} else if (class == BPF_JMP) {
3999 			u8 opcode = BPF_OP(insn->code);
4000 
4001 			if (opcode == BPF_CALL) {
4002 				if (BPF_SRC(insn->code) != BPF_K ||
4003 				    insn->off != 0 ||
4004 				    insn->src_reg != BPF_REG_0 ||
4005 				    insn->dst_reg != BPF_REG_0) {
4006 					verbose(env, "BPF_CALL uses reserved fields\n");
4007 					return -EINVAL;
4008 				}
4009 
4010 				err = check_call(env, insn->imm, insn_idx);
4011 				if (err)
4012 					return err;
4013 
4014 			} else if (opcode == BPF_JA) {
4015 				if (BPF_SRC(insn->code) != BPF_K ||
4016 				    insn->imm != 0 ||
4017 				    insn->src_reg != BPF_REG_0 ||
4018 				    insn->dst_reg != BPF_REG_0) {
4019 					verbose(env, "BPF_JA uses reserved fields\n");
4020 					return -EINVAL;
4021 				}
4022 
4023 				insn_idx += insn->off + 1;
4024 				continue;
4025 
4026 			} else if (opcode == BPF_EXIT) {
4027 				if (BPF_SRC(insn->code) != BPF_K ||
4028 				    insn->imm != 0 ||
4029 				    insn->src_reg != BPF_REG_0 ||
4030 				    insn->dst_reg != BPF_REG_0) {
4031 					verbose(env, "BPF_EXIT uses reserved fields\n");
4032 					return -EINVAL;
4033 				}
4034 
4035 				/* eBPF calling convetion is such that R0 is used
4036 				 * to return the value from eBPF program.
4037 				 * Make sure that it's readable at this time
4038 				 * of bpf_exit, which means that program wrote
4039 				 * something into it earlier
4040 				 */
4041 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
4042 				if (err)
4043 					return err;
4044 
4045 				if (is_pointer_value(env, BPF_REG_0)) {
4046 					verbose(env, "R0 leaks addr as return value\n");
4047 					return -EACCES;
4048 				}
4049 
4050 				err = check_return_code(env);
4051 				if (err)
4052 					return err;
4053 process_bpf_exit:
4054 				err = pop_stack(env, &prev_insn_idx, &insn_idx);
4055 				if (err < 0) {
4056 					if (err != -ENOENT)
4057 						return err;
4058 					break;
4059 				} else {
4060 					do_print_state = true;
4061 					continue;
4062 				}
4063 			} else {
4064 				err = check_cond_jmp_op(env, insn, &insn_idx);
4065 				if (err)
4066 					return err;
4067 			}
4068 		} else if (class == BPF_LD) {
4069 			u8 mode = BPF_MODE(insn->code);
4070 
4071 			if (mode == BPF_ABS || mode == BPF_IND) {
4072 				err = check_ld_abs(env, insn);
4073 				if (err)
4074 					return err;
4075 
4076 			} else if (mode == BPF_IMM) {
4077 				err = check_ld_imm(env, insn);
4078 				if (err)
4079 					return err;
4080 
4081 				insn_idx++;
4082 				env->insn_aux_data[insn_idx].seen = true;
4083 			} else {
4084 				verbose(env, "invalid BPF_LD mode\n");
4085 				return -EINVAL;
4086 			}
4087 		} else {
4088 			verbose(env, "unknown insn class %d\n", class);
4089 			return -EINVAL;
4090 		}
4091 
4092 		insn_idx++;
4093 	}
4094 
4095 	verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
4096 		env->prog->aux->stack_depth);
4097 	return 0;
4098 }
4099 
4100 static int check_map_prealloc(struct bpf_map *map)
4101 {
4102 	return (map->map_type != BPF_MAP_TYPE_HASH &&
4103 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
4104 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
4105 		!(map->map_flags & BPF_F_NO_PREALLOC);
4106 }
4107 
4108 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
4109 					struct bpf_map *map,
4110 					struct bpf_prog *prog)
4111 
4112 {
4113 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
4114 	 * preallocated hash maps, since doing memory allocation
4115 	 * in overflow_handler can crash depending on where nmi got
4116 	 * triggered.
4117 	 */
4118 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
4119 		if (!check_map_prealloc(map)) {
4120 			verbose(env, "perf_event programs can only use preallocated hash map\n");
4121 			return -EINVAL;
4122 		}
4123 		if (map->inner_map_meta &&
4124 		    !check_map_prealloc(map->inner_map_meta)) {
4125 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
4126 			return -EINVAL;
4127 		}
4128 	}
4129 	return 0;
4130 }
4131 
4132 /* look for pseudo eBPF instructions that access map FDs and
4133  * replace them with actual map pointers
4134  */
4135 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
4136 {
4137 	struct bpf_insn *insn = env->prog->insnsi;
4138 	int insn_cnt = env->prog->len;
4139 	int i, j, err;
4140 
4141 	err = bpf_prog_calc_tag(env->prog);
4142 	if (err)
4143 		return err;
4144 
4145 	for (i = 0; i < insn_cnt; i++, insn++) {
4146 		if (BPF_CLASS(insn->code) == BPF_LDX &&
4147 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
4148 			verbose(env, "BPF_LDX uses reserved fields\n");
4149 			return -EINVAL;
4150 		}
4151 
4152 		if (BPF_CLASS(insn->code) == BPF_STX &&
4153 		    ((BPF_MODE(insn->code) != BPF_MEM &&
4154 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
4155 			verbose(env, "BPF_STX uses reserved fields\n");
4156 			return -EINVAL;
4157 		}
4158 
4159 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
4160 			struct bpf_map *map;
4161 			struct fd f;
4162 
4163 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
4164 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
4165 			    insn[1].off != 0) {
4166 				verbose(env, "invalid bpf_ld_imm64 insn\n");
4167 				return -EINVAL;
4168 			}
4169 
4170 			if (insn->src_reg == 0)
4171 				/* valid generic load 64-bit imm */
4172 				goto next_insn;
4173 
4174 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
4175 				verbose(env,
4176 					"unrecognized bpf_ld_imm64 insn\n");
4177 				return -EINVAL;
4178 			}
4179 
4180 			f = fdget(insn->imm);
4181 			map = __bpf_map_get(f);
4182 			if (IS_ERR(map)) {
4183 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
4184 					insn->imm);
4185 				return PTR_ERR(map);
4186 			}
4187 
4188 			err = check_map_prog_compatibility(env, map, env->prog);
4189 			if (err) {
4190 				fdput(f);
4191 				return err;
4192 			}
4193 
4194 			/* store map pointer inside BPF_LD_IMM64 instruction */
4195 			insn[0].imm = (u32) (unsigned long) map;
4196 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
4197 
4198 			/* check whether we recorded this map already */
4199 			for (j = 0; j < env->used_map_cnt; j++)
4200 				if (env->used_maps[j] == map) {
4201 					fdput(f);
4202 					goto next_insn;
4203 				}
4204 
4205 			if (env->used_map_cnt >= MAX_USED_MAPS) {
4206 				fdput(f);
4207 				return -E2BIG;
4208 			}
4209 
4210 			/* hold the map. If the program is rejected by verifier,
4211 			 * the map will be released by release_maps() or it
4212 			 * will be used by the valid program until it's unloaded
4213 			 * and all maps are released in free_bpf_prog_info()
4214 			 */
4215 			map = bpf_map_inc(map, false);
4216 			if (IS_ERR(map)) {
4217 				fdput(f);
4218 				return PTR_ERR(map);
4219 			}
4220 			env->used_maps[env->used_map_cnt++] = map;
4221 
4222 			fdput(f);
4223 next_insn:
4224 			insn++;
4225 			i++;
4226 		}
4227 	}
4228 
4229 	/* now all pseudo BPF_LD_IMM64 instructions load valid
4230 	 * 'struct bpf_map *' into a register instead of user map_fd.
4231 	 * These pointers will be used later by verifier to validate map access.
4232 	 */
4233 	return 0;
4234 }
4235 
4236 /* drop refcnt of maps used by the rejected program */
4237 static void release_maps(struct bpf_verifier_env *env)
4238 {
4239 	int i;
4240 
4241 	for (i = 0; i < env->used_map_cnt; i++)
4242 		bpf_map_put(env->used_maps[i]);
4243 }
4244 
4245 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
4246 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
4247 {
4248 	struct bpf_insn *insn = env->prog->insnsi;
4249 	int insn_cnt = env->prog->len;
4250 	int i;
4251 
4252 	for (i = 0; i < insn_cnt; i++, insn++)
4253 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
4254 			insn->src_reg = 0;
4255 }
4256 
4257 /* single env->prog->insni[off] instruction was replaced with the range
4258  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
4259  * [0, off) and [off, end) to new locations, so the patched range stays zero
4260  */
4261 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4262 				u32 off, u32 cnt)
4263 {
4264 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4265 	int i;
4266 
4267 	if (cnt == 1)
4268 		return 0;
4269 	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
4270 	if (!new_data)
4271 		return -ENOMEM;
4272 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4273 	memcpy(new_data + off + cnt - 1, old_data + off,
4274 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4275 	for (i = off; i < off + cnt - 1; i++)
4276 		new_data[i].seen = true;
4277 	env->insn_aux_data = new_data;
4278 	vfree(old_data);
4279 	return 0;
4280 }
4281 
4282 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
4283 					    const struct bpf_insn *patch, u32 len)
4284 {
4285 	struct bpf_prog *new_prog;
4286 
4287 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4288 	if (!new_prog)
4289 		return NULL;
4290 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
4291 		return NULL;
4292 	return new_prog;
4293 }
4294 
4295 /* The verifier does more data flow analysis than llvm and will not explore
4296  * branches that are dead at run time. Malicious programs can have dead code
4297  * too. Therefore replace all dead at-run-time code with nops.
4298  */
4299 static void sanitize_dead_code(struct bpf_verifier_env *env)
4300 {
4301 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
4302 	struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
4303 	struct bpf_insn *insn = env->prog->insnsi;
4304 	const int insn_cnt = env->prog->len;
4305 	int i;
4306 
4307 	for (i = 0; i < insn_cnt; i++) {
4308 		if (aux_data[i].seen)
4309 			continue;
4310 		memcpy(insn + i, &nop, sizeof(nop));
4311 	}
4312 }
4313 
4314 /* convert load instructions that access fields of 'struct __sk_buff'
4315  * into sequence of instructions that access fields of 'struct sk_buff'
4316  */
4317 static int convert_ctx_accesses(struct bpf_verifier_env *env)
4318 {
4319 	const struct bpf_verifier_ops *ops = env->ops;
4320 	int i, cnt, size, ctx_field_size, delta = 0;
4321 	const int insn_cnt = env->prog->len;
4322 	struct bpf_insn insn_buf[16], *insn;
4323 	struct bpf_prog *new_prog;
4324 	enum bpf_access_type type;
4325 	bool is_narrower_load;
4326 	u32 target_size;
4327 
4328 	if (ops->gen_prologue) {
4329 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
4330 					env->prog);
4331 		if (cnt >= ARRAY_SIZE(insn_buf)) {
4332 			verbose(env, "bpf verifier is misconfigured\n");
4333 			return -EINVAL;
4334 		} else if (cnt) {
4335 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
4336 			if (!new_prog)
4337 				return -ENOMEM;
4338 
4339 			env->prog = new_prog;
4340 			delta += cnt - 1;
4341 		}
4342 	}
4343 
4344 	if (!ops->convert_ctx_access)
4345 		return 0;
4346 
4347 	insn = env->prog->insnsi + delta;
4348 
4349 	for (i = 0; i < insn_cnt; i++, insn++) {
4350 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4351 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4352 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4353 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4354 			type = BPF_READ;
4355 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4356 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4357 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4358 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4359 			type = BPF_WRITE;
4360 		else
4361 			continue;
4362 
4363 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4364 			continue;
4365 
4366 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
4367 		size = BPF_LDST_BYTES(insn);
4368 
4369 		/* If the read access is a narrower load of the field,
4370 		 * convert to a 4/8-byte load, to minimum program type specific
4371 		 * convert_ctx_access changes. If conversion is successful,
4372 		 * we will apply proper mask to the result.
4373 		 */
4374 		is_narrower_load = size < ctx_field_size;
4375 		if (is_narrower_load) {
4376 			u32 off = insn->off;
4377 			u8 size_code;
4378 
4379 			if (type == BPF_WRITE) {
4380 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
4381 				return -EINVAL;
4382 			}
4383 
4384 			size_code = BPF_H;
4385 			if (ctx_field_size == 4)
4386 				size_code = BPF_W;
4387 			else if (ctx_field_size == 8)
4388 				size_code = BPF_DW;
4389 
4390 			insn->off = off & ~(ctx_field_size - 1);
4391 			insn->code = BPF_LDX | BPF_MEM | size_code;
4392 		}
4393 
4394 		target_size = 0;
4395 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
4396 					      &target_size);
4397 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
4398 		    (ctx_field_size && !target_size)) {
4399 			verbose(env, "bpf verifier is misconfigured\n");
4400 			return -EINVAL;
4401 		}
4402 
4403 		if (is_narrower_load && size < target_size) {
4404 			if (ctx_field_size <= 4)
4405 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
4406 								(1 << size * 8) - 1);
4407 			else
4408 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
4409 								(1 << size * 8) - 1);
4410 		}
4411 
4412 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4413 		if (!new_prog)
4414 			return -ENOMEM;
4415 
4416 		delta += cnt - 1;
4417 
4418 		/* keep walking new program and skip insns we just inserted */
4419 		env->prog = new_prog;
4420 		insn      = new_prog->insnsi + i + delta;
4421 	}
4422 
4423 	return 0;
4424 }
4425 
4426 /* fixup insn->imm field of bpf_call instructions
4427  * and inline eligible helpers as explicit sequence of BPF instructions
4428  *
4429  * this function is called after eBPF program passed verification
4430  */
4431 static int fixup_bpf_calls(struct bpf_verifier_env *env)
4432 {
4433 	struct bpf_prog *prog = env->prog;
4434 	struct bpf_insn *insn = prog->insnsi;
4435 	const struct bpf_func_proto *fn;
4436 	const int insn_cnt = prog->len;
4437 	struct bpf_insn insn_buf[16];
4438 	struct bpf_prog *new_prog;
4439 	struct bpf_map *map_ptr;
4440 	int i, cnt, delta = 0;
4441 
4442 	for (i = 0; i < insn_cnt; i++, insn++) {
4443 		if (insn->code != (BPF_JMP | BPF_CALL))
4444 			continue;
4445 
4446 		if (insn->imm == BPF_FUNC_get_route_realm)
4447 			prog->dst_needed = 1;
4448 		if (insn->imm == BPF_FUNC_get_prandom_u32)
4449 			bpf_user_rnd_init_once();
4450 		if (insn->imm == BPF_FUNC_tail_call) {
4451 			/* If we tail call into other programs, we
4452 			 * cannot make any assumptions since they can
4453 			 * be replaced dynamically during runtime in
4454 			 * the program array.
4455 			 */
4456 			prog->cb_access = 1;
4457 			env->prog->aux->stack_depth = MAX_BPF_STACK;
4458 
4459 			/* mark bpf_tail_call as different opcode to avoid
4460 			 * conditional branch in the interpeter for every normal
4461 			 * call and to prevent accidental JITing by JIT compiler
4462 			 * that doesn't support bpf_tail_call yet
4463 			 */
4464 			insn->imm = 0;
4465 			insn->code = BPF_JMP | BPF_TAIL_CALL;
4466 
4467 			/* instead of changing every JIT dealing with tail_call
4468 			 * emit two extra insns:
4469 			 * if (index >= max_entries) goto out;
4470 			 * index &= array->index_mask;
4471 			 * to avoid out-of-bounds cpu speculation
4472 			 */
4473 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
4474 			if (map_ptr == BPF_MAP_PTR_POISON) {
4475 				verbose(env, "tail_call obusing map_ptr\n");
4476 				return -EINVAL;
4477 			}
4478 			if (!map_ptr->unpriv_array)
4479 				continue;
4480 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4481 						  map_ptr->max_entries, 2);
4482 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4483 						    container_of(map_ptr,
4484 								 struct bpf_array,
4485 								 map)->index_mask);
4486 			insn_buf[2] = *insn;
4487 			cnt = 3;
4488 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4489 			if (!new_prog)
4490 				return -ENOMEM;
4491 
4492 			delta    += cnt - 1;
4493 			env->prog = prog = new_prog;
4494 			insn      = new_prog->insnsi + i + delta;
4495 			continue;
4496 		}
4497 
4498 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4499 		 * handlers are currently limited to 64 bit only.
4500 		 */
4501 		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
4502 		    insn->imm == BPF_FUNC_map_lookup_elem) {
4503 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
4504 			if (map_ptr == BPF_MAP_PTR_POISON ||
4505 			    !map_ptr->ops->map_gen_lookup)
4506 				goto patch_call_imm;
4507 
4508 			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
4509 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
4510 				verbose(env, "bpf verifier is misconfigured\n");
4511 				return -EINVAL;
4512 			}
4513 
4514 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
4515 						       cnt);
4516 			if (!new_prog)
4517 				return -ENOMEM;
4518 
4519 			delta += cnt - 1;
4520 
4521 			/* keep walking new program and skip insns we just inserted */
4522 			env->prog = prog = new_prog;
4523 			insn      = new_prog->insnsi + i + delta;
4524 			continue;
4525 		}
4526 
4527 		if (insn->imm == BPF_FUNC_redirect_map) {
4528 			/* Note, we cannot use prog directly as imm as subsequent
4529 			 * rewrites would still change the prog pointer. The only
4530 			 * stable address we can use is aux, which also works with
4531 			 * prog clones during blinding.
4532 			 */
4533 			u64 addr = (unsigned long)prog->aux;
4534 			struct bpf_insn r4_ld[] = {
4535 				BPF_LD_IMM64(BPF_REG_4, addr),
4536 				*insn,
4537 			};
4538 			cnt = ARRAY_SIZE(r4_ld);
4539 
4540 			new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
4541 			if (!new_prog)
4542 				return -ENOMEM;
4543 
4544 			delta    += cnt - 1;
4545 			env->prog = prog = new_prog;
4546 			insn      = new_prog->insnsi + i + delta;
4547 		}
4548 patch_call_imm:
4549 		fn = env->ops->get_func_proto(insn->imm);
4550 		/* all functions that have prototype and verifier allowed
4551 		 * programs to call them, must be real in-kernel functions
4552 		 */
4553 		if (!fn->func) {
4554 			verbose(env,
4555 				"kernel subsystem misconfigured func %s#%d\n",
4556 				func_id_name(insn->imm), insn->imm);
4557 			return -EFAULT;
4558 		}
4559 		insn->imm = fn->func - __bpf_call_base;
4560 	}
4561 
4562 	return 0;
4563 }
4564 
4565 static void free_states(struct bpf_verifier_env *env)
4566 {
4567 	struct bpf_verifier_state_list *sl, *sln;
4568 	int i;
4569 
4570 	if (!env->explored_states)
4571 		return;
4572 
4573 	for (i = 0; i < env->prog->len; i++) {
4574 		sl = env->explored_states[i];
4575 
4576 		if (sl)
4577 			while (sl != STATE_LIST_MARK) {
4578 				sln = sl->next;
4579 				free_verifier_state(&sl->state, false);
4580 				kfree(sl);
4581 				sl = sln;
4582 			}
4583 	}
4584 
4585 	kfree(env->explored_states);
4586 }
4587 
4588 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4589 {
4590 	struct bpf_verifier_env *env;
4591 	struct bpf_verifer_log *log;
4592 	int ret = -EINVAL;
4593 
4594 	/* no program is valid */
4595 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
4596 		return -EINVAL;
4597 
4598 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
4599 	 * allocate/free it every time bpf_check() is called
4600 	 */
4601 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4602 	if (!env)
4603 		return -ENOMEM;
4604 	log = &env->log;
4605 
4606 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4607 				     (*prog)->len);
4608 	ret = -ENOMEM;
4609 	if (!env->insn_aux_data)
4610 		goto err_free_env;
4611 	env->prog = *prog;
4612 	env->ops = bpf_verifier_ops[env->prog->type];
4613 
4614 	/* grab the mutex to protect few globals used by verifier */
4615 	mutex_lock(&bpf_verifier_lock);
4616 
4617 	if (attr->log_level || attr->log_buf || attr->log_size) {
4618 		/* user requested verbose verifier output
4619 		 * and supplied buffer to store the verification trace
4620 		 */
4621 		log->level = attr->log_level;
4622 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
4623 		log->len_total = attr->log_size;
4624 
4625 		ret = -EINVAL;
4626 		/* log attributes have to be sane */
4627 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
4628 		    !log->level || !log->ubuf)
4629 			goto err_unlock;
4630 	}
4631 
4632 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
4633 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4634 		env->strict_alignment = true;
4635 
4636 	if (env->prog->aux->offload) {
4637 		ret = bpf_prog_offload_verifier_prep(env);
4638 		if (ret)
4639 			goto err_unlock;
4640 	}
4641 
4642 	ret = replace_map_fd_with_map_ptr(env);
4643 	if (ret < 0)
4644 		goto skip_full_check;
4645 
4646 	env->explored_states = kcalloc(env->prog->len,
4647 				       sizeof(struct bpf_verifier_state_list *),
4648 				       GFP_USER);
4649 	ret = -ENOMEM;
4650 	if (!env->explored_states)
4651 		goto skip_full_check;
4652 
4653 	ret = check_cfg(env);
4654 	if (ret < 0)
4655 		goto skip_full_check;
4656 
4657 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4658 
4659 	ret = do_check(env);
4660 	if (env->cur_state) {
4661 		free_verifier_state(env->cur_state, true);
4662 		env->cur_state = NULL;
4663 	}
4664 
4665 skip_full_check:
4666 	while (!pop_stack(env, NULL, NULL));
4667 	free_states(env);
4668 
4669 	if (ret == 0)
4670 		sanitize_dead_code(env);
4671 
4672 	if (ret == 0)
4673 		/* program is valid, convert *(u32*)(ctx + off) accesses */
4674 		ret = convert_ctx_accesses(env);
4675 
4676 	if (ret == 0)
4677 		ret = fixup_bpf_calls(env);
4678 
4679 	if (log->level && bpf_verifier_log_full(log))
4680 		ret = -ENOSPC;
4681 	if (log->level && !log->ubuf) {
4682 		ret = -EFAULT;
4683 		goto err_release_maps;
4684 	}
4685 
4686 	if (ret == 0 && env->used_map_cnt) {
4687 		/* if program passed verifier, update used_maps in bpf_prog_info */
4688 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
4689 							  sizeof(env->used_maps[0]),
4690 							  GFP_KERNEL);
4691 
4692 		if (!env->prog->aux->used_maps) {
4693 			ret = -ENOMEM;
4694 			goto err_release_maps;
4695 		}
4696 
4697 		memcpy(env->prog->aux->used_maps, env->used_maps,
4698 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
4699 		env->prog->aux->used_map_cnt = env->used_map_cnt;
4700 
4701 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
4702 		 * bpf_ld_imm64 instructions
4703 		 */
4704 		convert_pseudo_ld_imm64(env);
4705 	}
4706 
4707 err_release_maps:
4708 	if (!env->prog->aux->used_maps)
4709 		/* if we didn't copy map pointers into bpf_prog_info, release
4710 		 * them now. Otherwise free_bpf_prog_info() will release them.
4711 		 */
4712 		release_maps(env);
4713 	*prog = env->prog;
4714 err_unlock:
4715 	mutex_unlock(&bpf_verifier_lock);
4716 	vfree(env->insn_aux_data);
4717 err_free_env:
4718 	kfree(env);
4719 	return ret;
4720 }
4721