xref: /openbmc/linux/kernel/bpf/verifier.c (revision 2eb3ed33e55d003d721d4d1a5e72fe323c12b4c0)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 
24 #include "disasm.h"
25 
26 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
27 #define BPF_PROG_TYPE(_id, _name) \
28 	[_id] = & _name ## _verifier_ops,
29 #define BPF_MAP_TYPE(_id, _ops)
30 #include <linux/bpf_types.h>
31 #undef BPF_PROG_TYPE
32 #undef BPF_MAP_TYPE
33 };
34 
35 /* bpf_check() is a static code analyzer that walks eBPF program
36  * instruction by instruction and updates register/stack state.
37  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38  *
39  * The first pass is depth-first-search to check that the program is a DAG.
40  * It rejects the following programs:
41  * - larger than BPF_MAXINSNS insns
42  * - if loop is present (detected via back-edge)
43  * - unreachable insns exist (shouldn't be a forest. program = one function)
44  * - out of bounds or malformed jumps
45  * The second pass is all possible path descent from the 1st insn.
46  * Since it's analyzing all pathes through the program, the length of the
47  * analysis is limited to 64k insn, which may be hit even if total number of
48  * insn is less then 4K, but there are too many branches that change stack/regs.
49  * Number of 'branches to be analyzed' is limited to 1k
50  *
51  * On entry to each instruction, each register has a type, and the instruction
52  * changes the types of the registers depending on instruction semantics.
53  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
54  * copied to R1.
55  *
56  * All registers are 64-bit.
57  * R0 - return register
58  * R1-R5 argument passing registers
59  * R6-R9 callee saved registers
60  * R10 - frame pointer read-only
61  *
62  * At the start of BPF program the register R1 contains a pointer to bpf_context
63  * and has type PTR_TO_CTX.
64  *
65  * Verifier tracks arithmetic operations on pointers in case:
66  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
67  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
68  * 1st insn copies R10 (which has FRAME_PTR) type into R1
69  * and 2nd arithmetic instruction is pattern matched to recognize
70  * that it wants to construct a pointer to some element within stack.
71  * So after 2nd insn, the register R1 has type PTR_TO_STACK
72  * (and -20 constant is saved for further stack bounds checking).
73  * Meaning that this reg is a pointer to stack plus known immediate constant.
74  *
75  * Most of the time the registers have SCALAR_VALUE type, which
76  * means the register has some value, but it's not a valid pointer.
77  * (like pointer plus pointer becomes SCALAR_VALUE type)
78  *
79  * When verifier sees load or store instructions the type of base register
80  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
81  * types recognized by check_mem_access() function.
82  *
83  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
84  * and the range of [ptr, ptr + map's value_size) is accessible.
85  *
86  * registers used to pass values to function calls are checked against
87  * function argument constraints.
88  *
89  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
90  * It means that the register type passed to this function must be
91  * PTR_TO_STACK and it will be used inside the function as
92  * 'pointer to map element key'
93  *
94  * For example the argument constraints for bpf_map_lookup_elem():
95  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
96  *   .arg1_type = ARG_CONST_MAP_PTR,
97  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
98  *
99  * ret_type says that this function returns 'pointer to map elem value or null'
100  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
101  * 2nd argument should be a pointer to stack, which will be used inside
102  * the helper function as a pointer to map element key.
103  *
104  * On the kernel side the helper function looks like:
105  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106  * {
107  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
108  *    void *key = (void *) (unsigned long) r2;
109  *    void *value;
110  *
111  *    here kernel can access 'key' and 'map' pointers safely, knowing that
112  *    [key, key + map->key_size) bytes are valid and were initialized on
113  *    the stack of eBPF program.
114  * }
115  *
116  * Corresponding eBPF program may look like:
117  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
118  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
119  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
120  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
121  * here verifier looks at prototype of map_lookup_elem() and sees:
122  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
123  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124  *
125  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
126  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
127  * and were initialized prior to this call.
128  * If it's ok, then verifier allows this BPF_CALL insn and looks at
129  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
130  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
131  * returns ether pointer to map value or NULL.
132  *
133  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
134  * insn, the register holding that pointer in the true branch changes state to
135  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
136  * branch. See check_cond_jmp_op().
137  *
138  * After the call R0 is set to return type of the function and registers R1-R5
139  * are set to NOT_INIT to indicate that they are no longer readable.
140  */
141 
142 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
143 struct bpf_verifier_stack_elem {
144 	/* verifer state is 'st'
145 	 * before processing instruction 'insn_idx'
146 	 * and after processing instruction 'prev_insn_idx'
147 	 */
148 	struct bpf_verifier_state st;
149 	int insn_idx;
150 	int prev_insn_idx;
151 	struct bpf_verifier_stack_elem *next;
152 };
153 
154 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
155 #define BPF_COMPLEXITY_LIMIT_STACK	1024
156 
157 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
158 
159 struct bpf_call_arg_meta {
160 	struct bpf_map *map_ptr;
161 	bool raw_mode;
162 	bool pkt_access;
163 	int regno;
164 	int access_size;
165 };
166 
167 static DEFINE_MUTEX(bpf_verifier_lock);
168 
169 /* log_level controls verbosity level of eBPF verifier.
170  * verbose() is used to dump the verification trace to the log, so the user
171  * can figure out what's wrong with the program
172  */
173 static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
174 				   const char *fmt, ...)
175 {
176 	struct bpf_verifer_log *log = &env->log;
177 	unsigned int n;
178 	va_list args;
179 
180 	if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
181 		return;
182 
183 	va_start(args, fmt);
184 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
185 	va_end(args);
186 
187 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
188 		  "verifier log line truncated - local buffer too short\n");
189 
190 	n = min(log->len_total - log->len_used - 1, n);
191 	log->kbuf[n] = '\0';
192 
193 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
194 		log->len_used += n;
195 	else
196 		log->ubuf = NULL;
197 }
198 
199 static bool type_is_pkt_pointer(enum bpf_reg_type type)
200 {
201 	return type == PTR_TO_PACKET ||
202 	       type == PTR_TO_PACKET_META;
203 }
204 
205 /* string representation of 'enum bpf_reg_type' */
206 static const char * const reg_type_str[] = {
207 	[NOT_INIT]		= "?",
208 	[SCALAR_VALUE]		= "inv",
209 	[PTR_TO_CTX]		= "ctx",
210 	[CONST_PTR_TO_MAP]	= "map_ptr",
211 	[PTR_TO_MAP_VALUE]	= "map_value",
212 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
213 	[PTR_TO_STACK]		= "fp",
214 	[PTR_TO_PACKET]		= "pkt",
215 	[PTR_TO_PACKET_META]	= "pkt_meta",
216 	[PTR_TO_PACKET_END]	= "pkt_end",
217 };
218 
219 static void print_verifier_state(struct bpf_verifier_env *env,
220 				 struct bpf_verifier_state *state)
221 {
222 	struct bpf_reg_state *reg;
223 	enum bpf_reg_type t;
224 	int i;
225 
226 	for (i = 0; i < MAX_BPF_REG; i++) {
227 		reg = &state->regs[i];
228 		t = reg->type;
229 		if (t == NOT_INIT)
230 			continue;
231 		verbose(env, " R%d=%s", i, reg_type_str[t]);
232 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
233 		    tnum_is_const(reg->var_off)) {
234 			/* reg->off should be 0 for SCALAR_VALUE */
235 			verbose(env, "%lld", reg->var_off.value + reg->off);
236 		} else {
237 			verbose(env, "(id=%d", reg->id);
238 			if (t != SCALAR_VALUE)
239 				verbose(env, ",off=%d", reg->off);
240 			if (type_is_pkt_pointer(t))
241 				verbose(env, ",r=%d", reg->range);
242 			else if (t == CONST_PTR_TO_MAP ||
243 				 t == PTR_TO_MAP_VALUE ||
244 				 t == PTR_TO_MAP_VALUE_OR_NULL)
245 				verbose(env, ",ks=%d,vs=%d",
246 					reg->map_ptr->key_size,
247 					reg->map_ptr->value_size);
248 			if (tnum_is_const(reg->var_off)) {
249 				/* Typically an immediate SCALAR_VALUE, but
250 				 * could be a pointer whose offset is too big
251 				 * for reg->off
252 				 */
253 				verbose(env, ",imm=%llx", reg->var_off.value);
254 			} else {
255 				if (reg->smin_value != reg->umin_value &&
256 				    reg->smin_value != S64_MIN)
257 					verbose(env, ",smin_value=%lld",
258 						(long long)reg->smin_value);
259 				if (reg->smax_value != reg->umax_value &&
260 				    reg->smax_value != S64_MAX)
261 					verbose(env, ",smax_value=%lld",
262 						(long long)reg->smax_value);
263 				if (reg->umin_value != 0)
264 					verbose(env, ",umin_value=%llu",
265 						(unsigned long long)reg->umin_value);
266 				if (reg->umax_value != U64_MAX)
267 					verbose(env, ",umax_value=%llu",
268 						(unsigned long long)reg->umax_value);
269 				if (!tnum_is_unknown(reg->var_off)) {
270 					char tn_buf[48];
271 
272 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
273 					verbose(env, ",var_off=%s", tn_buf);
274 				}
275 			}
276 			verbose(env, ")");
277 		}
278 	}
279 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
280 		if (state->stack[i].slot_type[0] == STACK_SPILL)
281 			verbose(env, " fp%d=%s",
282 				-MAX_BPF_STACK + i * BPF_REG_SIZE,
283 				reg_type_str[state->stack[i].spilled_ptr.type]);
284 	}
285 	verbose(env, "\n");
286 }
287 
288 static int copy_stack_state(struct bpf_verifier_state *dst,
289 			    const struct bpf_verifier_state *src)
290 {
291 	if (!src->stack)
292 		return 0;
293 	if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
294 		/* internal bug, make state invalid to reject the program */
295 		memset(dst, 0, sizeof(*dst));
296 		return -EFAULT;
297 	}
298 	memcpy(dst->stack, src->stack,
299 	       sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
300 	return 0;
301 }
302 
303 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
304  * make it consume minimal amount of memory. check_stack_write() access from
305  * the program calls into realloc_verifier_state() to grow the stack size.
306  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
307  * which this function copies over. It points to previous bpf_verifier_state
308  * which is never reallocated
309  */
310 static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
311 				  bool copy_old)
312 {
313 	u32 old_size = state->allocated_stack;
314 	struct bpf_stack_state *new_stack;
315 	int slot = size / BPF_REG_SIZE;
316 
317 	if (size <= old_size || !size) {
318 		if (copy_old)
319 			return 0;
320 		state->allocated_stack = slot * BPF_REG_SIZE;
321 		if (!size && old_size) {
322 			kfree(state->stack);
323 			state->stack = NULL;
324 		}
325 		return 0;
326 	}
327 	new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
328 				  GFP_KERNEL);
329 	if (!new_stack)
330 		return -ENOMEM;
331 	if (copy_old) {
332 		if (state->stack)
333 			memcpy(new_stack, state->stack,
334 			       sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
335 		memset(new_stack + old_size / BPF_REG_SIZE, 0,
336 		       sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
337 	}
338 	state->allocated_stack = slot * BPF_REG_SIZE;
339 	kfree(state->stack);
340 	state->stack = new_stack;
341 	return 0;
342 }
343 
344 static void free_verifier_state(struct bpf_verifier_state *state,
345 				bool free_self)
346 {
347 	kfree(state->stack);
348 	if (free_self)
349 		kfree(state);
350 }
351 
352 /* copy verifier state from src to dst growing dst stack space
353  * when necessary to accommodate larger src stack
354  */
355 static int copy_verifier_state(struct bpf_verifier_state *dst,
356 			       const struct bpf_verifier_state *src)
357 {
358 	int err;
359 
360 	err = realloc_verifier_state(dst, src->allocated_stack, false);
361 	if (err)
362 		return err;
363 	memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
364 	return copy_stack_state(dst, src);
365 }
366 
367 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
368 		     int *insn_idx)
369 {
370 	struct bpf_verifier_state *cur = env->cur_state;
371 	struct bpf_verifier_stack_elem *elem, *head = env->head;
372 	int err;
373 
374 	if (env->head == NULL)
375 		return -ENOENT;
376 
377 	if (cur) {
378 		err = copy_verifier_state(cur, &head->st);
379 		if (err)
380 			return err;
381 	}
382 	if (insn_idx)
383 		*insn_idx = head->insn_idx;
384 	if (prev_insn_idx)
385 		*prev_insn_idx = head->prev_insn_idx;
386 	elem = head->next;
387 	free_verifier_state(&head->st, false);
388 	kfree(head);
389 	env->head = elem;
390 	env->stack_size--;
391 	return 0;
392 }
393 
394 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
395 					     int insn_idx, int prev_insn_idx)
396 {
397 	struct bpf_verifier_state *cur = env->cur_state;
398 	struct bpf_verifier_stack_elem *elem;
399 	int err;
400 
401 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
402 	if (!elem)
403 		goto err;
404 
405 	elem->insn_idx = insn_idx;
406 	elem->prev_insn_idx = prev_insn_idx;
407 	elem->next = env->head;
408 	env->head = elem;
409 	env->stack_size++;
410 	err = copy_verifier_state(&elem->st, cur);
411 	if (err)
412 		goto err;
413 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
414 		verbose(env, "BPF program is too complex\n");
415 		goto err;
416 	}
417 	return &elem->st;
418 err:
419 	/* pop all elements and return */
420 	while (!pop_stack(env, NULL, NULL));
421 	return NULL;
422 }
423 
424 #define CALLER_SAVED_REGS 6
425 static const int caller_saved[CALLER_SAVED_REGS] = {
426 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
427 };
428 
429 static void __mark_reg_not_init(struct bpf_reg_state *reg);
430 
431 /* Mark the unknown part of a register (variable offset or scalar value) as
432  * known to have the value @imm.
433  */
434 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
435 {
436 	reg->id = 0;
437 	reg->var_off = tnum_const(imm);
438 	reg->smin_value = (s64)imm;
439 	reg->smax_value = (s64)imm;
440 	reg->umin_value = imm;
441 	reg->umax_value = imm;
442 }
443 
444 /* Mark the 'variable offset' part of a register as zero.  This should be
445  * used only on registers holding a pointer type.
446  */
447 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
448 {
449 	__mark_reg_known(reg, 0);
450 }
451 
452 static void mark_reg_known_zero(struct bpf_verifier_env *env,
453 				struct bpf_reg_state *regs, u32 regno)
454 {
455 	if (WARN_ON(regno >= MAX_BPF_REG)) {
456 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
457 		/* Something bad happened, let's kill all regs */
458 		for (regno = 0; regno < MAX_BPF_REG; regno++)
459 			__mark_reg_not_init(regs + regno);
460 		return;
461 	}
462 	__mark_reg_known_zero(regs + regno);
463 }
464 
465 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
466 {
467 	return type_is_pkt_pointer(reg->type);
468 }
469 
470 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
471 {
472 	return reg_is_pkt_pointer(reg) ||
473 	       reg->type == PTR_TO_PACKET_END;
474 }
475 
476 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
477 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
478 				    enum bpf_reg_type which)
479 {
480 	/* The register can already have a range from prior markings.
481 	 * This is fine as long as it hasn't been advanced from its
482 	 * origin.
483 	 */
484 	return reg->type == which &&
485 	       reg->id == 0 &&
486 	       reg->off == 0 &&
487 	       tnum_equals_const(reg->var_off, 0);
488 }
489 
490 /* Attempts to improve min/max values based on var_off information */
491 static void __update_reg_bounds(struct bpf_reg_state *reg)
492 {
493 	/* min signed is max(sign bit) | min(other bits) */
494 	reg->smin_value = max_t(s64, reg->smin_value,
495 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
496 	/* max signed is min(sign bit) | max(other bits) */
497 	reg->smax_value = min_t(s64, reg->smax_value,
498 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
499 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
500 	reg->umax_value = min(reg->umax_value,
501 			      reg->var_off.value | reg->var_off.mask);
502 }
503 
504 /* Uses signed min/max values to inform unsigned, and vice-versa */
505 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
506 {
507 	/* Learn sign from signed bounds.
508 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
509 	 * are the same, so combine.  This works even in the negative case, e.g.
510 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
511 	 */
512 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
513 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
514 							  reg->umin_value);
515 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
516 							  reg->umax_value);
517 		return;
518 	}
519 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
520 	 * boundary, so we must be careful.
521 	 */
522 	if ((s64)reg->umax_value >= 0) {
523 		/* Positive.  We can't learn anything from the smin, but smax
524 		 * is positive, hence safe.
525 		 */
526 		reg->smin_value = reg->umin_value;
527 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
528 							  reg->umax_value);
529 	} else if ((s64)reg->umin_value < 0) {
530 		/* Negative.  We can't learn anything from the smax, but smin
531 		 * is negative, hence safe.
532 		 */
533 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
534 							  reg->umin_value);
535 		reg->smax_value = reg->umax_value;
536 	}
537 }
538 
539 /* Attempts to improve var_off based on unsigned min/max information */
540 static void __reg_bound_offset(struct bpf_reg_state *reg)
541 {
542 	reg->var_off = tnum_intersect(reg->var_off,
543 				      tnum_range(reg->umin_value,
544 						 reg->umax_value));
545 }
546 
547 /* Reset the min/max bounds of a register */
548 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
549 {
550 	reg->smin_value = S64_MIN;
551 	reg->smax_value = S64_MAX;
552 	reg->umin_value = 0;
553 	reg->umax_value = U64_MAX;
554 }
555 
556 /* Mark a register as having a completely unknown (scalar) value. */
557 static void __mark_reg_unknown(struct bpf_reg_state *reg)
558 {
559 	reg->type = SCALAR_VALUE;
560 	reg->id = 0;
561 	reg->off = 0;
562 	reg->var_off = tnum_unknown;
563 	__mark_reg_unbounded(reg);
564 }
565 
566 static void mark_reg_unknown(struct bpf_verifier_env *env,
567 			     struct bpf_reg_state *regs, u32 regno)
568 {
569 	if (WARN_ON(regno >= MAX_BPF_REG)) {
570 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
571 		/* Something bad happened, let's kill all regs */
572 		for (regno = 0; regno < MAX_BPF_REG; regno++)
573 			__mark_reg_not_init(regs + regno);
574 		return;
575 	}
576 	__mark_reg_unknown(regs + regno);
577 }
578 
579 static void __mark_reg_not_init(struct bpf_reg_state *reg)
580 {
581 	__mark_reg_unknown(reg);
582 	reg->type = NOT_INIT;
583 }
584 
585 static void mark_reg_not_init(struct bpf_verifier_env *env,
586 			      struct bpf_reg_state *regs, u32 regno)
587 {
588 	if (WARN_ON(regno >= MAX_BPF_REG)) {
589 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
590 		/* Something bad happened, let's kill all regs */
591 		for (regno = 0; regno < MAX_BPF_REG; regno++)
592 			__mark_reg_not_init(regs + regno);
593 		return;
594 	}
595 	__mark_reg_not_init(regs + regno);
596 }
597 
598 static void init_reg_state(struct bpf_verifier_env *env,
599 			   struct bpf_reg_state *regs)
600 {
601 	int i;
602 
603 	for (i = 0; i < MAX_BPF_REG; i++) {
604 		mark_reg_not_init(env, regs, i);
605 		regs[i].live = REG_LIVE_NONE;
606 	}
607 
608 	/* frame pointer */
609 	regs[BPF_REG_FP].type = PTR_TO_STACK;
610 	mark_reg_known_zero(env, regs, BPF_REG_FP);
611 
612 	/* 1st arg to a function */
613 	regs[BPF_REG_1].type = PTR_TO_CTX;
614 	mark_reg_known_zero(env, regs, BPF_REG_1);
615 }
616 
617 enum reg_arg_type {
618 	SRC_OP,		/* register is used as source operand */
619 	DST_OP,		/* register is used as destination operand */
620 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
621 };
622 
623 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
624 {
625 	struct bpf_verifier_state *parent = state->parent;
626 
627 	if (regno == BPF_REG_FP)
628 		/* We don't need to worry about FP liveness because it's read-only */
629 		return;
630 
631 	while (parent) {
632 		/* if read wasn't screened by an earlier write ... */
633 		if (state->regs[regno].live & REG_LIVE_WRITTEN)
634 			break;
635 		/* ... then we depend on parent's value */
636 		parent->regs[regno].live |= REG_LIVE_READ;
637 		state = parent;
638 		parent = state->parent;
639 	}
640 }
641 
642 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
643 			 enum reg_arg_type t)
644 {
645 	struct bpf_reg_state *regs = env->cur_state->regs;
646 
647 	if (regno >= MAX_BPF_REG) {
648 		verbose(env, "R%d is invalid\n", regno);
649 		return -EINVAL;
650 	}
651 
652 	if (t == SRC_OP) {
653 		/* check whether register used as source operand can be read */
654 		if (regs[regno].type == NOT_INIT) {
655 			verbose(env, "R%d !read_ok\n", regno);
656 			return -EACCES;
657 		}
658 		mark_reg_read(env->cur_state, regno);
659 	} else {
660 		/* check whether register used as dest operand can be written to */
661 		if (regno == BPF_REG_FP) {
662 			verbose(env, "frame pointer is read only\n");
663 			return -EACCES;
664 		}
665 		regs[regno].live |= REG_LIVE_WRITTEN;
666 		if (t == DST_OP)
667 			mark_reg_unknown(env, regs, regno);
668 	}
669 	return 0;
670 }
671 
672 static bool is_spillable_regtype(enum bpf_reg_type type)
673 {
674 	switch (type) {
675 	case PTR_TO_MAP_VALUE:
676 	case PTR_TO_MAP_VALUE_OR_NULL:
677 	case PTR_TO_STACK:
678 	case PTR_TO_CTX:
679 	case PTR_TO_PACKET:
680 	case PTR_TO_PACKET_META:
681 	case PTR_TO_PACKET_END:
682 	case CONST_PTR_TO_MAP:
683 		return true;
684 	default:
685 		return false;
686 	}
687 }
688 
689 /* check_stack_read/write functions track spill/fill of registers,
690  * stack boundary and alignment are checked in check_mem_access()
691  */
692 static int check_stack_write(struct bpf_verifier_env *env,
693 			     struct bpf_verifier_state *state, int off,
694 			     int size, int value_regno)
695 {
696 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
697 
698 	err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
699 				     true);
700 	if (err)
701 		return err;
702 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
703 	 * so it's aligned access and [off, off + size) are within stack limits
704 	 */
705 	if (!env->allow_ptr_leaks &&
706 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
707 	    size != BPF_REG_SIZE) {
708 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
709 		return -EACCES;
710 	}
711 
712 	if (value_regno >= 0 &&
713 	    is_spillable_regtype(state->regs[value_regno].type)) {
714 
715 		/* register containing pointer is being spilled into stack */
716 		if (size != BPF_REG_SIZE) {
717 			verbose(env, "invalid size of register spill\n");
718 			return -EACCES;
719 		}
720 
721 		/* save register state */
722 		state->stack[spi].spilled_ptr = state->regs[value_regno];
723 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
724 
725 		for (i = 0; i < BPF_REG_SIZE; i++)
726 			state->stack[spi].slot_type[i] = STACK_SPILL;
727 	} else {
728 		/* regular write of data into stack */
729 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
730 
731 		for (i = 0; i < size; i++)
732 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
733 				STACK_MISC;
734 	}
735 	return 0;
736 }
737 
738 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
739 {
740 	struct bpf_verifier_state *parent = state->parent;
741 
742 	while (parent) {
743 		/* if read wasn't screened by an earlier write ... */
744 		if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
745 			break;
746 		/* ... then we depend on parent's value */
747 		parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
748 		state = parent;
749 		parent = state->parent;
750 	}
751 }
752 
753 static int check_stack_read(struct bpf_verifier_env *env,
754 			    struct bpf_verifier_state *state, int off, int size,
755 			    int value_regno)
756 {
757 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
758 	u8 *stype;
759 
760 	if (state->allocated_stack <= slot) {
761 		verbose(env, "invalid read from stack off %d+0 size %d\n",
762 			off, size);
763 		return -EACCES;
764 	}
765 	stype = state->stack[spi].slot_type;
766 
767 	if (stype[0] == STACK_SPILL) {
768 		if (size != BPF_REG_SIZE) {
769 			verbose(env, "invalid size of register spill\n");
770 			return -EACCES;
771 		}
772 		for (i = 1; i < BPF_REG_SIZE; i++) {
773 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
774 				verbose(env, "corrupted spill memory\n");
775 				return -EACCES;
776 			}
777 		}
778 
779 		if (value_regno >= 0) {
780 			/* restore register state from stack */
781 			state->regs[value_regno] = state->stack[spi].spilled_ptr;
782 			mark_stack_slot_read(state, spi);
783 		}
784 		return 0;
785 	} else {
786 		for (i = 0; i < size; i++) {
787 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
788 				verbose(env, "invalid read from stack off %d+%d size %d\n",
789 					off, i, size);
790 				return -EACCES;
791 			}
792 		}
793 		if (value_regno >= 0)
794 			/* have read misc data from the stack */
795 			mark_reg_unknown(env, state->regs, value_regno);
796 		return 0;
797 	}
798 }
799 
800 /* check read/write into map element returned by bpf_map_lookup_elem() */
801 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
802 			    int size)
803 {
804 	struct bpf_reg_state *regs = cur_regs(env);
805 	struct bpf_map *map = regs[regno].map_ptr;
806 
807 	if (off < 0 || size <= 0 || off + size > map->value_size) {
808 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
809 			map->value_size, off, size);
810 		return -EACCES;
811 	}
812 	return 0;
813 }
814 
815 /* check read/write into a map element with possible variable offset */
816 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
817 			    int off, int size)
818 {
819 	struct bpf_verifier_state *state = env->cur_state;
820 	struct bpf_reg_state *reg = &state->regs[regno];
821 	int err;
822 
823 	/* We may have adjusted the register to this map value, so we
824 	 * need to try adding each of min_value and max_value to off
825 	 * to make sure our theoretical access will be safe.
826 	 */
827 	if (env->log.level)
828 		print_verifier_state(env, state);
829 	/* The minimum value is only important with signed
830 	 * comparisons where we can't assume the floor of a
831 	 * value is 0.  If we are using signed variables for our
832 	 * index'es we need to make sure that whatever we use
833 	 * will have a set floor within our range.
834 	 */
835 	if (reg->smin_value < 0) {
836 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
837 			regno);
838 		return -EACCES;
839 	}
840 	err = __check_map_access(env, regno, reg->smin_value + off, size);
841 	if (err) {
842 		verbose(env, "R%d min value is outside of the array range\n",
843 			regno);
844 		return err;
845 	}
846 
847 	/* If we haven't set a max value then we need to bail since we can't be
848 	 * sure we won't do bad things.
849 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
850 	 */
851 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
852 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
853 			regno);
854 		return -EACCES;
855 	}
856 	err = __check_map_access(env, regno, reg->umax_value + off, size);
857 	if (err)
858 		verbose(env, "R%d max value is outside of the array range\n",
859 			regno);
860 	return err;
861 }
862 
863 #define MAX_PACKET_OFF 0xffff
864 
865 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
866 				       const struct bpf_call_arg_meta *meta,
867 				       enum bpf_access_type t)
868 {
869 	switch (env->prog->type) {
870 	case BPF_PROG_TYPE_LWT_IN:
871 	case BPF_PROG_TYPE_LWT_OUT:
872 		/* dst_input() and dst_output() can't write for now */
873 		if (t == BPF_WRITE)
874 			return false;
875 		/* fallthrough */
876 	case BPF_PROG_TYPE_SCHED_CLS:
877 	case BPF_PROG_TYPE_SCHED_ACT:
878 	case BPF_PROG_TYPE_XDP:
879 	case BPF_PROG_TYPE_LWT_XMIT:
880 	case BPF_PROG_TYPE_SK_SKB:
881 		if (meta)
882 			return meta->pkt_access;
883 
884 		env->seen_direct_write = true;
885 		return true;
886 	default:
887 		return false;
888 	}
889 }
890 
891 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
892 				 int off, int size)
893 {
894 	struct bpf_reg_state *regs = cur_regs(env);
895 	struct bpf_reg_state *reg = &regs[regno];
896 
897 	if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
898 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
899 			off, size, regno, reg->id, reg->off, reg->range);
900 		return -EACCES;
901 	}
902 	return 0;
903 }
904 
905 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
906 			       int size)
907 {
908 	struct bpf_reg_state *regs = cur_regs(env);
909 	struct bpf_reg_state *reg = &regs[regno];
910 	int err;
911 
912 	/* We may have added a variable offset to the packet pointer; but any
913 	 * reg->range we have comes after that.  We are only checking the fixed
914 	 * offset.
915 	 */
916 
917 	/* We don't allow negative numbers, because we aren't tracking enough
918 	 * detail to prove they're safe.
919 	 */
920 	if (reg->smin_value < 0) {
921 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
922 			regno);
923 		return -EACCES;
924 	}
925 	err = __check_packet_access(env, regno, off, size);
926 	if (err) {
927 		verbose(env, "R%d offset is outside of the packet\n", regno);
928 		return err;
929 	}
930 	return err;
931 }
932 
933 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
934 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
935 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
936 {
937 	struct bpf_insn_access_aux info = {
938 		.reg_type = *reg_type,
939 	};
940 
941 	if (env->ops->is_valid_access &&
942 	    env->ops->is_valid_access(off, size, t, &info)) {
943 		/* A non zero info.ctx_field_size indicates that this field is a
944 		 * candidate for later verifier transformation to load the whole
945 		 * field and then apply a mask when accessed with a narrower
946 		 * access than actual ctx access size. A zero info.ctx_field_size
947 		 * will only allow for whole field access and rejects any other
948 		 * type of narrower access.
949 		 */
950 		*reg_type = info.reg_type;
951 
952 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
953 		/* remember the offset of last byte accessed in ctx */
954 		if (env->prog->aux->max_ctx_offset < off + size)
955 			env->prog->aux->max_ctx_offset = off + size;
956 		return 0;
957 	}
958 
959 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
960 	return -EACCES;
961 }
962 
963 static bool __is_pointer_value(bool allow_ptr_leaks,
964 			       const struct bpf_reg_state *reg)
965 {
966 	if (allow_ptr_leaks)
967 		return false;
968 
969 	return reg->type != SCALAR_VALUE;
970 }
971 
972 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
973 {
974 	return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
975 }
976 
977 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
978 				   const struct bpf_reg_state *reg,
979 				   int off, int size, bool strict)
980 {
981 	struct tnum reg_off;
982 	int ip_align;
983 
984 	/* Byte size accesses are always allowed. */
985 	if (!strict || size == 1)
986 		return 0;
987 
988 	/* For platforms that do not have a Kconfig enabling
989 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
990 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
991 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
992 	 * to this code only in strict mode where we want to emulate
993 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
994 	 * unconditional IP align value of '2'.
995 	 */
996 	ip_align = 2;
997 
998 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
999 	if (!tnum_is_aligned(reg_off, size)) {
1000 		char tn_buf[48];
1001 
1002 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1003 		verbose(env,
1004 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1005 			ip_align, tn_buf, reg->off, off, size);
1006 		return -EACCES;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1013 				       const struct bpf_reg_state *reg,
1014 				       const char *pointer_desc,
1015 				       int off, int size, bool strict)
1016 {
1017 	struct tnum reg_off;
1018 
1019 	/* Byte size accesses are always allowed. */
1020 	if (!strict || size == 1)
1021 		return 0;
1022 
1023 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1024 	if (!tnum_is_aligned(reg_off, size)) {
1025 		char tn_buf[48];
1026 
1027 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1028 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1029 			pointer_desc, tn_buf, reg->off, off, size);
1030 		return -EACCES;
1031 	}
1032 
1033 	return 0;
1034 }
1035 
1036 static int check_ptr_alignment(struct bpf_verifier_env *env,
1037 			       const struct bpf_reg_state *reg,
1038 			       int off, int size)
1039 {
1040 	bool strict = env->strict_alignment;
1041 	const char *pointer_desc = "";
1042 
1043 	switch (reg->type) {
1044 	case PTR_TO_PACKET:
1045 	case PTR_TO_PACKET_META:
1046 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1047 		 * right in front, treat it the very same way.
1048 		 */
1049 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1050 	case PTR_TO_MAP_VALUE:
1051 		pointer_desc = "value ";
1052 		break;
1053 	case PTR_TO_CTX:
1054 		pointer_desc = "context ";
1055 		break;
1056 	case PTR_TO_STACK:
1057 		pointer_desc = "stack ";
1058 		break;
1059 	default:
1060 		break;
1061 	}
1062 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1063 					   strict);
1064 }
1065 
1066 /* check whether memory at (regno + off) is accessible for t = (read | write)
1067  * if t==write, value_regno is a register which value is stored into memory
1068  * if t==read, value_regno is a register which will receive the value from memory
1069  * if t==write && value_regno==-1, some unknown value is stored into memory
1070  * if t==read && value_regno==-1, don't care what we read from memory
1071  */
1072 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
1073 			    int bpf_size, enum bpf_access_type t,
1074 			    int value_regno)
1075 {
1076 	struct bpf_verifier_state *state = env->cur_state;
1077 	struct bpf_reg_state *regs = cur_regs(env);
1078 	struct bpf_reg_state *reg = regs + regno;
1079 	int size, err = 0;
1080 
1081 	size = bpf_size_to_bytes(bpf_size);
1082 	if (size < 0)
1083 		return size;
1084 
1085 	/* alignment checks will add in reg->off themselves */
1086 	err = check_ptr_alignment(env, reg, off, size);
1087 	if (err)
1088 		return err;
1089 
1090 	/* for access checks, reg->off is just part of off */
1091 	off += reg->off;
1092 
1093 	if (reg->type == PTR_TO_MAP_VALUE) {
1094 		if (t == BPF_WRITE && value_regno >= 0 &&
1095 		    is_pointer_value(env, value_regno)) {
1096 			verbose(env, "R%d leaks addr into map\n", value_regno);
1097 			return -EACCES;
1098 		}
1099 
1100 		err = check_map_access(env, regno, off, size);
1101 		if (!err && t == BPF_READ && value_regno >= 0)
1102 			mark_reg_unknown(env, regs, value_regno);
1103 
1104 	} else if (reg->type == PTR_TO_CTX) {
1105 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1106 
1107 		if (t == BPF_WRITE && value_regno >= 0 &&
1108 		    is_pointer_value(env, value_regno)) {
1109 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1110 			return -EACCES;
1111 		}
1112 		/* ctx accesses must be at a fixed offset, so that we can
1113 		 * determine what type of data were returned.
1114 		 */
1115 		if (reg->off) {
1116 			verbose(env,
1117 				"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1118 				regno, reg->off, off - reg->off);
1119 			return -EACCES;
1120 		}
1121 		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1122 			char tn_buf[48];
1123 
1124 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1125 			verbose(env,
1126 				"variable ctx access var_off=%s off=%d size=%d",
1127 				tn_buf, off, size);
1128 			return -EACCES;
1129 		}
1130 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1131 		if (!err && t == BPF_READ && value_regno >= 0) {
1132 			/* ctx access returns either a scalar, or a
1133 			 * PTR_TO_PACKET[_META,_END]. In the latter
1134 			 * case, we know the offset is zero.
1135 			 */
1136 			if (reg_type == SCALAR_VALUE)
1137 				mark_reg_unknown(env, regs, value_regno);
1138 			else
1139 				mark_reg_known_zero(env, regs,
1140 						    value_regno);
1141 			regs[value_regno].id = 0;
1142 			regs[value_regno].off = 0;
1143 			regs[value_regno].range = 0;
1144 			regs[value_regno].type = reg_type;
1145 		}
1146 
1147 	} else if (reg->type == PTR_TO_STACK) {
1148 		/* stack accesses must be at a fixed offset, so that we can
1149 		 * determine what type of data were returned.
1150 		 * See check_stack_read().
1151 		 */
1152 		if (!tnum_is_const(reg->var_off)) {
1153 			char tn_buf[48];
1154 
1155 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1156 			verbose(env, "variable stack access var_off=%s off=%d size=%d",
1157 				tn_buf, off, size);
1158 			return -EACCES;
1159 		}
1160 		off += reg->var_off.value;
1161 		if (off >= 0 || off < -MAX_BPF_STACK) {
1162 			verbose(env, "invalid stack off=%d size=%d\n", off,
1163 				size);
1164 			return -EACCES;
1165 		}
1166 
1167 		if (env->prog->aux->stack_depth < -off)
1168 			env->prog->aux->stack_depth = -off;
1169 
1170 		if (t == BPF_WRITE)
1171 			err = check_stack_write(env, state, off, size,
1172 						value_regno);
1173 		else
1174 			err = check_stack_read(env, state, off, size,
1175 					       value_regno);
1176 	} else if (reg_is_pkt_pointer(reg)) {
1177 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1178 			verbose(env, "cannot write into packet\n");
1179 			return -EACCES;
1180 		}
1181 		if (t == BPF_WRITE && value_regno >= 0 &&
1182 		    is_pointer_value(env, value_regno)) {
1183 			verbose(env, "R%d leaks addr into packet\n",
1184 				value_regno);
1185 			return -EACCES;
1186 		}
1187 		err = check_packet_access(env, regno, off, size);
1188 		if (!err && t == BPF_READ && value_regno >= 0)
1189 			mark_reg_unknown(env, regs, value_regno);
1190 	} else {
1191 		verbose(env, "R%d invalid mem access '%s'\n", regno,
1192 			reg_type_str[reg->type]);
1193 		return -EACCES;
1194 	}
1195 
1196 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1197 	    regs[value_regno].type == SCALAR_VALUE) {
1198 		/* b/h/w load zero-extends, mark upper bits as known 0 */
1199 		regs[value_regno].var_off =
1200 			tnum_cast(regs[value_regno].var_off, size);
1201 		__update_reg_bounds(&regs[value_regno]);
1202 	}
1203 	return err;
1204 }
1205 
1206 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1207 {
1208 	int err;
1209 
1210 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1211 	    insn->imm != 0) {
1212 		verbose(env, "BPF_XADD uses reserved fields\n");
1213 		return -EINVAL;
1214 	}
1215 
1216 	/* check src1 operand */
1217 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1218 	if (err)
1219 		return err;
1220 
1221 	/* check src2 operand */
1222 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1223 	if (err)
1224 		return err;
1225 
1226 	if (is_pointer_value(env, insn->src_reg)) {
1227 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1228 		return -EACCES;
1229 	}
1230 
1231 	/* check whether atomic_add can read the memory */
1232 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1233 			       BPF_SIZE(insn->code), BPF_READ, -1);
1234 	if (err)
1235 		return err;
1236 
1237 	/* check whether atomic_add can write into the same memory */
1238 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1239 				BPF_SIZE(insn->code), BPF_WRITE, -1);
1240 }
1241 
1242 /* Does this register contain a constant zero? */
1243 static bool register_is_null(struct bpf_reg_state reg)
1244 {
1245 	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
1246 }
1247 
1248 /* when register 'regno' is passed into function that will read 'access_size'
1249  * bytes from that pointer, make sure that it's within stack boundary
1250  * and all elements of stack are initialized.
1251  * Unlike most pointer bounds-checking functions, this one doesn't take an
1252  * 'off' argument, so it has to add in reg->off itself.
1253  */
1254 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1255 				int access_size, bool zero_size_allowed,
1256 				struct bpf_call_arg_meta *meta)
1257 {
1258 	struct bpf_verifier_state *state = env->cur_state;
1259 	struct bpf_reg_state *regs = state->regs;
1260 	int off, i, slot, spi;
1261 
1262 	if (regs[regno].type != PTR_TO_STACK) {
1263 		/* Allow zero-byte read from NULL, regardless of pointer type */
1264 		if (zero_size_allowed && access_size == 0 &&
1265 		    register_is_null(regs[regno]))
1266 			return 0;
1267 
1268 		verbose(env, "R%d type=%s expected=%s\n", regno,
1269 			reg_type_str[regs[regno].type],
1270 			reg_type_str[PTR_TO_STACK]);
1271 		return -EACCES;
1272 	}
1273 
1274 	/* Only allow fixed-offset stack reads */
1275 	if (!tnum_is_const(regs[regno].var_off)) {
1276 		char tn_buf[48];
1277 
1278 		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1279 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
1280 			regno, tn_buf);
1281 	}
1282 	off = regs[regno].off + regs[regno].var_off.value;
1283 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1284 	    access_size <= 0) {
1285 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1286 			regno, off, access_size);
1287 		return -EACCES;
1288 	}
1289 
1290 	if (env->prog->aux->stack_depth < -off)
1291 		env->prog->aux->stack_depth = -off;
1292 
1293 	if (meta && meta->raw_mode) {
1294 		meta->access_size = access_size;
1295 		meta->regno = regno;
1296 		return 0;
1297 	}
1298 
1299 	for (i = 0; i < access_size; i++) {
1300 		slot = -(off + i) - 1;
1301 		spi = slot / BPF_REG_SIZE;
1302 		if (state->allocated_stack <= slot ||
1303 		    state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
1304 			STACK_MISC) {
1305 			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1306 				off, i, access_size);
1307 			return -EACCES;
1308 		}
1309 	}
1310 	return 0;
1311 }
1312 
1313 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1314 				   int access_size, bool zero_size_allowed,
1315 				   struct bpf_call_arg_meta *meta)
1316 {
1317 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1318 
1319 	switch (reg->type) {
1320 	case PTR_TO_PACKET:
1321 	case PTR_TO_PACKET_META:
1322 		return check_packet_access(env, regno, reg->off, access_size);
1323 	case PTR_TO_MAP_VALUE:
1324 		return check_map_access(env, regno, reg->off, access_size);
1325 	default: /* scalar_value|ptr_to_stack or invalid ptr */
1326 		return check_stack_boundary(env, regno, access_size,
1327 					    zero_size_allowed, meta);
1328 	}
1329 }
1330 
1331 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1332 			  enum bpf_arg_type arg_type,
1333 			  struct bpf_call_arg_meta *meta)
1334 {
1335 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1336 	enum bpf_reg_type expected_type, type = reg->type;
1337 	int err = 0;
1338 
1339 	if (arg_type == ARG_DONTCARE)
1340 		return 0;
1341 
1342 	err = check_reg_arg(env, regno, SRC_OP);
1343 	if (err)
1344 		return err;
1345 
1346 	if (arg_type == ARG_ANYTHING) {
1347 		if (is_pointer_value(env, regno)) {
1348 			verbose(env, "R%d leaks addr into helper function\n",
1349 				regno);
1350 			return -EACCES;
1351 		}
1352 		return 0;
1353 	}
1354 
1355 	if (type_is_pkt_pointer(type) &&
1356 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1357 		verbose(env, "helper access to the packet is not allowed\n");
1358 		return -EACCES;
1359 	}
1360 
1361 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1362 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1363 		expected_type = PTR_TO_STACK;
1364 		if (!type_is_pkt_pointer(type) &&
1365 		    type != expected_type)
1366 			goto err_type;
1367 	} else if (arg_type == ARG_CONST_SIZE ||
1368 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1369 		expected_type = SCALAR_VALUE;
1370 		if (type != expected_type)
1371 			goto err_type;
1372 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1373 		expected_type = CONST_PTR_TO_MAP;
1374 		if (type != expected_type)
1375 			goto err_type;
1376 	} else if (arg_type == ARG_PTR_TO_CTX) {
1377 		expected_type = PTR_TO_CTX;
1378 		if (type != expected_type)
1379 			goto err_type;
1380 	} else if (arg_type == ARG_PTR_TO_MEM ||
1381 		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
1382 		expected_type = PTR_TO_STACK;
1383 		/* One exception here. In case function allows for NULL to be
1384 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
1385 		 * happens during stack boundary checking.
1386 		 */
1387 		if (register_is_null(*reg))
1388 			/* final test in check_stack_boundary() */;
1389 		else if (!type_is_pkt_pointer(type) &&
1390 			 type != PTR_TO_MAP_VALUE &&
1391 			 type != expected_type)
1392 			goto err_type;
1393 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1394 	} else {
1395 		verbose(env, "unsupported arg_type %d\n", arg_type);
1396 		return -EFAULT;
1397 	}
1398 
1399 	if (arg_type == ARG_CONST_MAP_PTR) {
1400 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1401 		meta->map_ptr = reg->map_ptr;
1402 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1403 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
1404 		 * check that [key, key + map->key_size) are within
1405 		 * stack limits and initialized
1406 		 */
1407 		if (!meta->map_ptr) {
1408 			/* in function declaration map_ptr must come before
1409 			 * map_key, so that it's verified and known before
1410 			 * we have to check map_key here. Otherwise it means
1411 			 * that kernel subsystem misconfigured verifier
1412 			 */
1413 			verbose(env, "invalid map_ptr to access map->key\n");
1414 			return -EACCES;
1415 		}
1416 		if (type_is_pkt_pointer(type))
1417 			err = check_packet_access(env, regno, reg->off,
1418 						  meta->map_ptr->key_size);
1419 		else
1420 			err = check_stack_boundary(env, regno,
1421 						   meta->map_ptr->key_size,
1422 						   false, NULL);
1423 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1424 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
1425 		 * check [value, value + map->value_size) validity
1426 		 */
1427 		if (!meta->map_ptr) {
1428 			/* kernel subsystem misconfigured verifier */
1429 			verbose(env, "invalid map_ptr to access map->value\n");
1430 			return -EACCES;
1431 		}
1432 		if (type_is_pkt_pointer(type))
1433 			err = check_packet_access(env, regno, reg->off,
1434 						  meta->map_ptr->value_size);
1435 		else
1436 			err = check_stack_boundary(env, regno,
1437 						   meta->map_ptr->value_size,
1438 						   false, NULL);
1439 	} else if (arg_type == ARG_CONST_SIZE ||
1440 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1441 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1442 
1443 		/* bpf_xxx(..., buf, len) call will access 'len' bytes
1444 		 * from stack pointer 'buf'. Check it
1445 		 * note: regno == len, regno - 1 == buf
1446 		 */
1447 		if (regno == 0) {
1448 			/* kernel subsystem misconfigured verifier */
1449 			verbose(env,
1450 				"ARG_CONST_SIZE cannot be first argument\n");
1451 			return -EACCES;
1452 		}
1453 
1454 		/* The register is SCALAR_VALUE; the access check
1455 		 * happens using its boundaries.
1456 		 */
1457 
1458 		if (!tnum_is_const(reg->var_off))
1459 			/* For unprivileged variable accesses, disable raw
1460 			 * mode so that the program is required to
1461 			 * initialize all the memory that the helper could
1462 			 * just partially fill up.
1463 			 */
1464 			meta = NULL;
1465 
1466 		if (reg->smin_value < 0) {
1467 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
1468 				regno);
1469 			return -EACCES;
1470 		}
1471 
1472 		if (reg->umin_value == 0) {
1473 			err = check_helper_mem_access(env, regno - 1, 0,
1474 						      zero_size_allowed,
1475 						      meta);
1476 			if (err)
1477 				return err;
1478 		}
1479 
1480 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
1481 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1482 				regno);
1483 			return -EACCES;
1484 		}
1485 		err = check_helper_mem_access(env, regno - 1,
1486 					      reg->umax_value,
1487 					      zero_size_allowed, meta);
1488 	}
1489 
1490 	return err;
1491 err_type:
1492 	verbose(env, "R%d type=%s expected=%s\n", regno,
1493 		reg_type_str[type], reg_type_str[expected_type]);
1494 	return -EACCES;
1495 }
1496 
1497 static int check_map_func_compatibility(struct bpf_verifier_env *env,
1498 					struct bpf_map *map, int func_id)
1499 {
1500 	if (!map)
1501 		return 0;
1502 
1503 	/* We need a two way check, first is from map perspective ... */
1504 	switch (map->map_type) {
1505 	case BPF_MAP_TYPE_PROG_ARRAY:
1506 		if (func_id != BPF_FUNC_tail_call)
1507 			goto error;
1508 		break;
1509 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1510 		if (func_id != BPF_FUNC_perf_event_read &&
1511 		    func_id != BPF_FUNC_perf_event_output &&
1512 		    func_id != BPF_FUNC_perf_event_read_value)
1513 			goto error;
1514 		break;
1515 	case BPF_MAP_TYPE_STACK_TRACE:
1516 		if (func_id != BPF_FUNC_get_stackid)
1517 			goto error;
1518 		break;
1519 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1520 		if (func_id != BPF_FUNC_skb_under_cgroup &&
1521 		    func_id != BPF_FUNC_current_task_under_cgroup)
1522 			goto error;
1523 		break;
1524 	/* devmap returns a pointer to a live net_device ifindex that we cannot
1525 	 * allow to be modified from bpf side. So do not allow lookup elements
1526 	 * for now.
1527 	 */
1528 	case BPF_MAP_TYPE_DEVMAP:
1529 		if (func_id != BPF_FUNC_redirect_map)
1530 			goto error;
1531 		break;
1532 	/* Restrict bpf side of cpumap, open when use-cases appear */
1533 	case BPF_MAP_TYPE_CPUMAP:
1534 		if (func_id != BPF_FUNC_redirect_map)
1535 			goto error;
1536 		break;
1537 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1538 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1539 		if (func_id != BPF_FUNC_map_lookup_elem)
1540 			goto error;
1541 		break;
1542 	case BPF_MAP_TYPE_SOCKMAP:
1543 		if (func_id != BPF_FUNC_sk_redirect_map &&
1544 		    func_id != BPF_FUNC_sock_map_update &&
1545 		    func_id != BPF_FUNC_map_delete_elem)
1546 			goto error;
1547 		break;
1548 	default:
1549 		break;
1550 	}
1551 
1552 	/* ... and second from the function itself. */
1553 	switch (func_id) {
1554 	case BPF_FUNC_tail_call:
1555 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1556 			goto error;
1557 		break;
1558 	case BPF_FUNC_perf_event_read:
1559 	case BPF_FUNC_perf_event_output:
1560 	case BPF_FUNC_perf_event_read_value:
1561 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1562 			goto error;
1563 		break;
1564 	case BPF_FUNC_get_stackid:
1565 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1566 			goto error;
1567 		break;
1568 	case BPF_FUNC_current_task_under_cgroup:
1569 	case BPF_FUNC_skb_under_cgroup:
1570 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1571 			goto error;
1572 		break;
1573 	case BPF_FUNC_redirect_map:
1574 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
1575 		    map->map_type != BPF_MAP_TYPE_CPUMAP)
1576 			goto error;
1577 		break;
1578 	case BPF_FUNC_sk_redirect_map:
1579 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1580 			goto error;
1581 		break;
1582 	case BPF_FUNC_sock_map_update:
1583 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1584 			goto error;
1585 		break;
1586 	default:
1587 		break;
1588 	}
1589 
1590 	return 0;
1591 error:
1592 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
1593 		map->map_type, func_id_name(func_id), func_id);
1594 	return -EINVAL;
1595 }
1596 
1597 static int check_raw_mode(const struct bpf_func_proto *fn)
1598 {
1599 	int count = 0;
1600 
1601 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1602 		count++;
1603 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1604 		count++;
1605 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1606 		count++;
1607 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1608 		count++;
1609 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1610 		count++;
1611 
1612 	return count > 1 ? -EINVAL : 0;
1613 }
1614 
1615 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
1616  * are now invalid, so turn them into unknown SCALAR_VALUE.
1617  */
1618 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1619 {
1620 	struct bpf_verifier_state *state = env->cur_state;
1621 	struct bpf_reg_state *regs = state->regs, *reg;
1622 	int i;
1623 
1624 	for (i = 0; i < MAX_BPF_REG; i++)
1625 		if (reg_is_pkt_pointer_any(&regs[i]))
1626 			mark_reg_unknown(env, regs, i);
1627 
1628 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1629 		if (state->stack[i].slot_type[0] != STACK_SPILL)
1630 			continue;
1631 		reg = &state->stack[i].spilled_ptr;
1632 		if (reg_is_pkt_pointer_any(reg))
1633 			__mark_reg_unknown(reg);
1634 	}
1635 }
1636 
1637 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1638 {
1639 	const struct bpf_func_proto *fn = NULL;
1640 	struct bpf_reg_state *regs;
1641 	struct bpf_call_arg_meta meta;
1642 	bool changes_data;
1643 	int i, err;
1644 
1645 	/* find function prototype */
1646 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1647 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
1648 			func_id);
1649 		return -EINVAL;
1650 	}
1651 
1652 	if (env->ops->get_func_proto)
1653 		fn = env->ops->get_func_proto(func_id);
1654 
1655 	if (!fn) {
1656 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
1657 			func_id);
1658 		return -EINVAL;
1659 	}
1660 
1661 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1662 	if (!env->prog->gpl_compatible && fn->gpl_only) {
1663 		verbose(env, "cannot call GPL only function from proprietary program\n");
1664 		return -EINVAL;
1665 	}
1666 
1667 	changes_data = bpf_helper_changes_pkt_data(fn->func);
1668 
1669 	memset(&meta, 0, sizeof(meta));
1670 	meta.pkt_access = fn->pkt_access;
1671 
1672 	/* We only support one arg being in raw mode at the moment, which
1673 	 * is sufficient for the helper functions we have right now.
1674 	 */
1675 	err = check_raw_mode(fn);
1676 	if (err) {
1677 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
1678 			func_id_name(func_id), func_id);
1679 		return err;
1680 	}
1681 
1682 	/* check args */
1683 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1684 	if (err)
1685 		return err;
1686 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1687 	if (err)
1688 		return err;
1689 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1690 	if (err)
1691 		return err;
1692 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1693 	if (err)
1694 		return err;
1695 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1696 	if (err)
1697 		return err;
1698 
1699 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
1700 	 * is inferred from register state.
1701 	 */
1702 	for (i = 0; i < meta.access_size; i++) {
1703 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1704 		if (err)
1705 			return err;
1706 	}
1707 
1708 	regs = cur_regs(env);
1709 	/* reset caller saved regs */
1710 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
1711 		mark_reg_not_init(env, regs, caller_saved[i]);
1712 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
1713 	}
1714 
1715 	/* update return register (already marked as written above) */
1716 	if (fn->ret_type == RET_INTEGER) {
1717 		/* sets type to SCALAR_VALUE */
1718 		mark_reg_unknown(env, regs, BPF_REG_0);
1719 	} else if (fn->ret_type == RET_VOID) {
1720 		regs[BPF_REG_0].type = NOT_INIT;
1721 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1722 		struct bpf_insn_aux_data *insn_aux;
1723 
1724 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1725 		/* There is no offset yet applied, variable or fixed */
1726 		mark_reg_known_zero(env, regs, BPF_REG_0);
1727 		regs[BPF_REG_0].off = 0;
1728 		/* remember map_ptr, so that check_map_access()
1729 		 * can check 'value_size' boundary of memory access
1730 		 * to map element returned from bpf_map_lookup_elem()
1731 		 */
1732 		if (meta.map_ptr == NULL) {
1733 			verbose(env,
1734 				"kernel subsystem misconfigured verifier\n");
1735 			return -EINVAL;
1736 		}
1737 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
1738 		regs[BPF_REG_0].id = ++env->id_gen;
1739 		insn_aux = &env->insn_aux_data[insn_idx];
1740 		if (!insn_aux->map_ptr)
1741 			insn_aux->map_ptr = meta.map_ptr;
1742 		else if (insn_aux->map_ptr != meta.map_ptr)
1743 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1744 	} else {
1745 		verbose(env, "unknown return type %d of func %s#%d\n",
1746 			fn->ret_type, func_id_name(func_id), func_id);
1747 		return -EINVAL;
1748 	}
1749 
1750 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
1751 	if (err)
1752 		return err;
1753 
1754 	if (changes_data)
1755 		clear_all_pkt_pointers(env);
1756 	return 0;
1757 }
1758 
1759 static void coerce_reg_to_32(struct bpf_reg_state *reg)
1760 {
1761 	/* clear high 32 bits */
1762 	reg->var_off = tnum_cast(reg->var_off, 4);
1763 	/* Update bounds */
1764 	__update_reg_bounds(reg);
1765 }
1766 
1767 static bool signed_add_overflows(s64 a, s64 b)
1768 {
1769 	/* Do the add in u64, where overflow is well-defined */
1770 	s64 res = (s64)((u64)a + (u64)b);
1771 
1772 	if (b < 0)
1773 		return res > a;
1774 	return res < a;
1775 }
1776 
1777 static bool signed_sub_overflows(s64 a, s64 b)
1778 {
1779 	/* Do the sub in u64, where overflow is well-defined */
1780 	s64 res = (s64)((u64)a - (u64)b);
1781 
1782 	if (b < 0)
1783 		return res < a;
1784 	return res > a;
1785 }
1786 
1787 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1788  * Caller should also handle BPF_MOV case separately.
1789  * If we return -EACCES, caller may want to try again treating pointer as a
1790  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
1791  */
1792 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1793 				   struct bpf_insn *insn,
1794 				   const struct bpf_reg_state *ptr_reg,
1795 				   const struct bpf_reg_state *off_reg)
1796 {
1797 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
1798 	bool known = tnum_is_const(off_reg->var_off);
1799 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
1800 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1801 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1802 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1803 	u8 opcode = BPF_OP(insn->code);
1804 	u32 dst = insn->dst_reg;
1805 
1806 	dst_reg = &regs[dst];
1807 
1808 	if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1809 		print_verifier_state(env, env->cur_state);
1810 		verbose(env,
1811 			"verifier internal error: known but bad sbounds\n");
1812 		return -EINVAL;
1813 	}
1814 	if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1815 		print_verifier_state(env, env->cur_state);
1816 		verbose(env,
1817 			"verifier internal error: known but bad ubounds\n");
1818 		return -EINVAL;
1819 	}
1820 
1821 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
1822 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
1823 		if (!env->allow_ptr_leaks)
1824 			verbose(env,
1825 				"R%d 32-bit pointer arithmetic prohibited\n",
1826 				dst);
1827 		return -EACCES;
1828 	}
1829 
1830 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1831 		if (!env->allow_ptr_leaks)
1832 			verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1833 				dst);
1834 		return -EACCES;
1835 	}
1836 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
1837 		if (!env->allow_ptr_leaks)
1838 			verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1839 				dst);
1840 		return -EACCES;
1841 	}
1842 	if (ptr_reg->type == PTR_TO_PACKET_END) {
1843 		if (!env->allow_ptr_leaks)
1844 			verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1845 				dst);
1846 		return -EACCES;
1847 	}
1848 
1849 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1850 	 * The id may be overwritten later if we create a new variable offset.
1851 	 */
1852 	dst_reg->type = ptr_reg->type;
1853 	dst_reg->id = ptr_reg->id;
1854 
1855 	switch (opcode) {
1856 	case BPF_ADD:
1857 		/* We can take a fixed offset as long as it doesn't overflow
1858 		 * the s32 'off' field
1859 		 */
1860 		if (known && (ptr_reg->off + smin_val ==
1861 			      (s64)(s32)(ptr_reg->off + smin_val))) {
1862 			/* pointer += K.  Accumulate it into fixed offset */
1863 			dst_reg->smin_value = smin_ptr;
1864 			dst_reg->smax_value = smax_ptr;
1865 			dst_reg->umin_value = umin_ptr;
1866 			dst_reg->umax_value = umax_ptr;
1867 			dst_reg->var_off = ptr_reg->var_off;
1868 			dst_reg->off = ptr_reg->off + smin_val;
1869 			dst_reg->range = ptr_reg->range;
1870 			break;
1871 		}
1872 		/* A new variable offset is created.  Note that off_reg->off
1873 		 * == 0, since it's a scalar.
1874 		 * dst_reg gets the pointer type and since some positive
1875 		 * integer value was added to the pointer, give it a new 'id'
1876 		 * if it's a PTR_TO_PACKET.
1877 		 * this creates a new 'base' pointer, off_reg (variable) gets
1878 		 * added into the variable offset, and we copy the fixed offset
1879 		 * from ptr_reg.
1880 		 */
1881 		if (signed_add_overflows(smin_ptr, smin_val) ||
1882 		    signed_add_overflows(smax_ptr, smax_val)) {
1883 			dst_reg->smin_value = S64_MIN;
1884 			dst_reg->smax_value = S64_MAX;
1885 		} else {
1886 			dst_reg->smin_value = smin_ptr + smin_val;
1887 			dst_reg->smax_value = smax_ptr + smax_val;
1888 		}
1889 		if (umin_ptr + umin_val < umin_ptr ||
1890 		    umax_ptr + umax_val < umax_ptr) {
1891 			dst_reg->umin_value = 0;
1892 			dst_reg->umax_value = U64_MAX;
1893 		} else {
1894 			dst_reg->umin_value = umin_ptr + umin_val;
1895 			dst_reg->umax_value = umax_ptr + umax_val;
1896 		}
1897 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1898 		dst_reg->off = ptr_reg->off;
1899 		if (reg_is_pkt_pointer(ptr_reg)) {
1900 			dst_reg->id = ++env->id_gen;
1901 			/* something was added to pkt_ptr, set range to zero */
1902 			dst_reg->range = 0;
1903 		}
1904 		break;
1905 	case BPF_SUB:
1906 		if (dst_reg == off_reg) {
1907 			/* scalar -= pointer.  Creates an unknown scalar */
1908 			if (!env->allow_ptr_leaks)
1909 				verbose(env, "R%d tried to subtract pointer from scalar\n",
1910 					dst);
1911 			return -EACCES;
1912 		}
1913 		/* We don't allow subtraction from FP, because (according to
1914 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
1915 		 * be able to deal with it.
1916 		 */
1917 		if (ptr_reg->type == PTR_TO_STACK) {
1918 			if (!env->allow_ptr_leaks)
1919 				verbose(env, "R%d subtraction from stack pointer prohibited\n",
1920 					dst);
1921 			return -EACCES;
1922 		}
1923 		if (known && (ptr_reg->off - smin_val ==
1924 			      (s64)(s32)(ptr_reg->off - smin_val))) {
1925 			/* pointer -= K.  Subtract it from fixed offset */
1926 			dst_reg->smin_value = smin_ptr;
1927 			dst_reg->smax_value = smax_ptr;
1928 			dst_reg->umin_value = umin_ptr;
1929 			dst_reg->umax_value = umax_ptr;
1930 			dst_reg->var_off = ptr_reg->var_off;
1931 			dst_reg->id = ptr_reg->id;
1932 			dst_reg->off = ptr_reg->off - smin_val;
1933 			dst_reg->range = ptr_reg->range;
1934 			break;
1935 		}
1936 		/* A new variable offset is created.  If the subtrahend is known
1937 		 * nonnegative, then any reg->range we had before is still good.
1938 		 */
1939 		if (signed_sub_overflows(smin_ptr, smax_val) ||
1940 		    signed_sub_overflows(smax_ptr, smin_val)) {
1941 			/* Overflow possible, we know nothing */
1942 			dst_reg->smin_value = S64_MIN;
1943 			dst_reg->smax_value = S64_MAX;
1944 		} else {
1945 			dst_reg->smin_value = smin_ptr - smax_val;
1946 			dst_reg->smax_value = smax_ptr - smin_val;
1947 		}
1948 		if (umin_ptr < umax_val) {
1949 			/* Overflow possible, we know nothing */
1950 			dst_reg->umin_value = 0;
1951 			dst_reg->umax_value = U64_MAX;
1952 		} else {
1953 			/* Cannot overflow (as long as bounds are consistent) */
1954 			dst_reg->umin_value = umin_ptr - umax_val;
1955 			dst_reg->umax_value = umax_ptr - umin_val;
1956 		}
1957 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
1958 		dst_reg->off = ptr_reg->off;
1959 		if (reg_is_pkt_pointer(ptr_reg)) {
1960 			dst_reg->id = ++env->id_gen;
1961 			/* something was added to pkt_ptr, set range to zero */
1962 			if (smin_val < 0)
1963 				dst_reg->range = 0;
1964 		}
1965 		break;
1966 	case BPF_AND:
1967 	case BPF_OR:
1968 	case BPF_XOR:
1969 		/* bitwise ops on pointers are troublesome, prohibit for now.
1970 		 * (However, in principle we could allow some cases, e.g.
1971 		 * ptr &= ~3 which would reduce min_value by 3.)
1972 		 */
1973 		if (!env->allow_ptr_leaks)
1974 			verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1975 				dst, bpf_alu_string[opcode >> 4]);
1976 		return -EACCES;
1977 	default:
1978 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
1979 		if (!env->allow_ptr_leaks)
1980 			verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
1981 				dst, bpf_alu_string[opcode >> 4]);
1982 		return -EACCES;
1983 	}
1984 
1985 	__update_reg_bounds(dst_reg);
1986 	__reg_deduce_bounds(dst_reg);
1987 	__reg_bound_offset(dst_reg);
1988 	return 0;
1989 }
1990 
1991 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
1992 				      struct bpf_insn *insn,
1993 				      struct bpf_reg_state *dst_reg,
1994 				      struct bpf_reg_state src_reg)
1995 {
1996 	struct bpf_reg_state *regs = cur_regs(env);
1997 	u8 opcode = BPF_OP(insn->code);
1998 	bool src_known, dst_known;
1999 	s64 smin_val, smax_val;
2000 	u64 umin_val, umax_val;
2001 
2002 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
2003 		/* 32-bit ALU ops are (32,32)->64 */
2004 		coerce_reg_to_32(dst_reg);
2005 		coerce_reg_to_32(&src_reg);
2006 	}
2007 	smin_val = src_reg.smin_value;
2008 	smax_val = src_reg.smax_value;
2009 	umin_val = src_reg.umin_value;
2010 	umax_val = src_reg.umax_value;
2011 	src_known = tnum_is_const(src_reg.var_off);
2012 	dst_known = tnum_is_const(dst_reg->var_off);
2013 
2014 	switch (opcode) {
2015 	case BPF_ADD:
2016 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2017 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
2018 			dst_reg->smin_value = S64_MIN;
2019 			dst_reg->smax_value = S64_MAX;
2020 		} else {
2021 			dst_reg->smin_value += smin_val;
2022 			dst_reg->smax_value += smax_val;
2023 		}
2024 		if (dst_reg->umin_value + umin_val < umin_val ||
2025 		    dst_reg->umax_value + umax_val < umax_val) {
2026 			dst_reg->umin_value = 0;
2027 			dst_reg->umax_value = U64_MAX;
2028 		} else {
2029 			dst_reg->umin_value += umin_val;
2030 			dst_reg->umax_value += umax_val;
2031 		}
2032 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2033 		break;
2034 	case BPF_SUB:
2035 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2036 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2037 			/* Overflow possible, we know nothing */
2038 			dst_reg->smin_value = S64_MIN;
2039 			dst_reg->smax_value = S64_MAX;
2040 		} else {
2041 			dst_reg->smin_value -= smax_val;
2042 			dst_reg->smax_value -= smin_val;
2043 		}
2044 		if (dst_reg->umin_value < umax_val) {
2045 			/* Overflow possible, we know nothing */
2046 			dst_reg->umin_value = 0;
2047 			dst_reg->umax_value = U64_MAX;
2048 		} else {
2049 			/* Cannot overflow (as long as bounds are consistent) */
2050 			dst_reg->umin_value -= umax_val;
2051 			dst_reg->umax_value -= umin_val;
2052 		}
2053 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2054 		break;
2055 	case BPF_MUL:
2056 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2057 		if (smin_val < 0 || dst_reg->smin_value < 0) {
2058 			/* Ain't nobody got time to multiply that sign */
2059 			__mark_reg_unbounded(dst_reg);
2060 			__update_reg_bounds(dst_reg);
2061 			break;
2062 		}
2063 		/* Both values are positive, so we can work with unsigned and
2064 		 * copy the result to signed (unless it exceeds S64_MAX).
2065 		 */
2066 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2067 			/* Potential overflow, we know nothing */
2068 			__mark_reg_unbounded(dst_reg);
2069 			/* (except what we can learn from the var_off) */
2070 			__update_reg_bounds(dst_reg);
2071 			break;
2072 		}
2073 		dst_reg->umin_value *= umin_val;
2074 		dst_reg->umax_value *= umax_val;
2075 		if (dst_reg->umax_value > S64_MAX) {
2076 			/* Overflow possible, we know nothing */
2077 			dst_reg->smin_value = S64_MIN;
2078 			dst_reg->smax_value = S64_MAX;
2079 		} else {
2080 			dst_reg->smin_value = dst_reg->umin_value;
2081 			dst_reg->smax_value = dst_reg->umax_value;
2082 		}
2083 		break;
2084 	case BPF_AND:
2085 		if (src_known && dst_known) {
2086 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
2087 						  src_reg.var_off.value);
2088 			break;
2089 		}
2090 		/* We get our minimum from the var_off, since that's inherently
2091 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
2092 		 */
2093 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2094 		dst_reg->umin_value = dst_reg->var_off.value;
2095 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2096 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2097 			/* Lose signed bounds when ANDing negative numbers,
2098 			 * ain't nobody got time for that.
2099 			 */
2100 			dst_reg->smin_value = S64_MIN;
2101 			dst_reg->smax_value = S64_MAX;
2102 		} else {
2103 			/* ANDing two positives gives a positive, so safe to
2104 			 * cast result into s64.
2105 			 */
2106 			dst_reg->smin_value = dst_reg->umin_value;
2107 			dst_reg->smax_value = dst_reg->umax_value;
2108 		}
2109 		/* We may learn something more from the var_off */
2110 		__update_reg_bounds(dst_reg);
2111 		break;
2112 	case BPF_OR:
2113 		if (src_known && dst_known) {
2114 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
2115 						  src_reg.var_off.value);
2116 			break;
2117 		}
2118 		/* We get our maximum from the var_off, and our minimum is the
2119 		 * maximum of the operands' minima
2120 		 */
2121 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2122 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2123 		dst_reg->umax_value = dst_reg->var_off.value |
2124 				      dst_reg->var_off.mask;
2125 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2126 			/* Lose signed bounds when ORing negative numbers,
2127 			 * ain't nobody got time for that.
2128 			 */
2129 			dst_reg->smin_value = S64_MIN;
2130 			dst_reg->smax_value = S64_MAX;
2131 		} else {
2132 			/* ORing two positives gives a positive, so safe to
2133 			 * cast result into s64.
2134 			 */
2135 			dst_reg->smin_value = dst_reg->umin_value;
2136 			dst_reg->smax_value = dst_reg->umax_value;
2137 		}
2138 		/* We may learn something more from the var_off */
2139 		__update_reg_bounds(dst_reg);
2140 		break;
2141 	case BPF_LSH:
2142 		if (umax_val > 63) {
2143 			/* Shifts greater than 63 are undefined.  This includes
2144 			 * shifts by a negative number.
2145 			 */
2146 			mark_reg_unknown(env, regs, insn->dst_reg);
2147 			break;
2148 		}
2149 		/* We lose all sign bit information (except what we can pick
2150 		 * up from var_off)
2151 		 */
2152 		dst_reg->smin_value = S64_MIN;
2153 		dst_reg->smax_value = S64_MAX;
2154 		/* If we might shift our top bit out, then we know nothing */
2155 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2156 			dst_reg->umin_value = 0;
2157 			dst_reg->umax_value = U64_MAX;
2158 		} else {
2159 			dst_reg->umin_value <<= umin_val;
2160 			dst_reg->umax_value <<= umax_val;
2161 		}
2162 		if (src_known)
2163 			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2164 		else
2165 			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2166 		/* We may learn something more from the var_off */
2167 		__update_reg_bounds(dst_reg);
2168 		break;
2169 	case BPF_RSH:
2170 		if (umax_val > 63) {
2171 			/* Shifts greater than 63 are undefined.  This includes
2172 			 * shifts by a negative number.
2173 			 */
2174 			mark_reg_unknown(env, regs, insn->dst_reg);
2175 			break;
2176 		}
2177 		/* BPF_RSH is an unsigned shift, so make the appropriate casts */
2178 		if (dst_reg->smin_value < 0) {
2179 			if (umin_val) {
2180 				/* Sign bit will be cleared */
2181 				dst_reg->smin_value = 0;
2182 			} else {
2183 				/* Lost sign bit information */
2184 				dst_reg->smin_value = S64_MIN;
2185 				dst_reg->smax_value = S64_MAX;
2186 			}
2187 		} else {
2188 			dst_reg->smin_value =
2189 				(u64)(dst_reg->smin_value) >> umax_val;
2190 		}
2191 		if (src_known)
2192 			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2193 						       umin_val);
2194 		else
2195 			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
2196 		dst_reg->umin_value >>= umax_val;
2197 		dst_reg->umax_value >>= umin_val;
2198 		/* We may learn something more from the var_off */
2199 		__update_reg_bounds(dst_reg);
2200 		break;
2201 	default:
2202 		mark_reg_unknown(env, regs, insn->dst_reg);
2203 		break;
2204 	}
2205 
2206 	__reg_deduce_bounds(dst_reg);
2207 	__reg_bound_offset(dst_reg);
2208 	return 0;
2209 }
2210 
2211 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2212  * and var_off.
2213  */
2214 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2215 				   struct bpf_insn *insn)
2216 {
2217 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2218 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2219 	u8 opcode = BPF_OP(insn->code);
2220 	int rc;
2221 
2222 	dst_reg = &regs[insn->dst_reg];
2223 	src_reg = NULL;
2224 	if (dst_reg->type != SCALAR_VALUE)
2225 		ptr_reg = dst_reg;
2226 	if (BPF_SRC(insn->code) == BPF_X) {
2227 		src_reg = &regs[insn->src_reg];
2228 		if (src_reg->type != SCALAR_VALUE) {
2229 			if (dst_reg->type != SCALAR_VALUE) {
2230 				/* Combining two pointers by any ALU op yields
2231 				 * an arbitrary scalar.
2232 				 */
2233 				if (!env->allow_ptr_leaks) {
2234 					verbose(env, "R%d pointer %s pointer prohibited\n",
2235 						insn->dst_reg,
2236 						bpf_alu_string[opcode >> 4]);
2237 					return -EACCES;
2238 				}
2239 				mark_reg_unknown(env, regs, insn->dst_reg);
2240 				return 0;
2241 			} else {
2242 				/* scalar += pointer
2243 				 * This is legal, but we have to reverse our
2244 				 * src/dest handling in computing the range
2245 				 */
2246 				rc = adjust_ptr_min_max_vals(env, insn,
2247 							     src_reg, dst_reg);
2248 				if (rc == -EACCES && env->allow_ptr_leaks) {
2249 					/* scalar += unknown scalar */
2250 					__mark_reg_unknown(&off_reg);
2251 					return adjust_scalar_min_max_vals(
2252 							env, insn,
2253 							dst_reg, off_reg);
2254 				}
2255 				return rc;
2256 			}
2257 		} else if (ptr_reg) {
2258 			/* pointer += scalar */
2259 			rc = adjust_ptr_min_max_vals(env, insn,
2260 						     dst_reg, src_reg);
2261 			if (rc == -EACCES && env->allow_ptr_leaks) {
2262 				/* unknown scalar += scalar */
2263 				__mark_reg_unknown(dst_reg);
2264 				return adjust_scalar_min_max_vals(
2265 						env, insn, dst_reg, *src_reg);
2266 			}
2267 			return rc;
2268 		}
2269 	} else {
2270 		/* Pretend the src is a reg with a known value, since we only
2271 		 * need to be able to read from this state.
2272 		 */
2273 		off_reg.type = SCALAR_VALUE;
2274 		__mark_reg_known(&off_reg, insn->imm);
2275 		src_reg = &off_reg;
2276 		if (ptr_reg) { /* pointer += K */
2277 			rc = adjust_ptr_min_max_vals(env, insn,
2278 						     ptr_reg, src_reg);
2279 			if (rc == -EACCES && env->allow_ptr_leaks) {
2280 				/* unknown scalar += K */
2281 				__mark_reg_unknown(dst_reg);
2282 				return adjust_scalar_min_max_vals(
2283 						env, insn, dst_reg, off_reg);
2284 			}
2285 			return rc;
2286 		}
2287 	}
2288 
2289 	/* Got here implies adding two SCALAR_VALUEs */
2290 	if (WARN_ON_ONCE(ptr_reg)) {
2291 		print_verifier_state(env, env->cur_state);
2292 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
2293 		return -EINVAL;
2294 	}
2295 	if (WARN_ON(!src_reg)) {
2296 		print_verifier_state(env, env->cur_state);
2297 		verbose(env, "verifier internal error: no src_reg\n");
2298 		return -EINVAL;
2299 	}
2300 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
2301 }
2302 
2303 /* check validity of 32-bit and 64-bit arithmetic operations */
2304 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2305 {
2306 	struct bpf_reg_state *regs = cur_regs(env);
2307 	u8 opcode = BPF_OP(insn->code);
2308 	int err;
2309 
2310 	if (opcode == BPF_END || opcode == BPF_NEG) {
2311 		if (opcode == BPF_NEG) {
2312 			if (BPF_SRC(insn->code) != 0 ||
2313 			    insn->src_reg != BPF_REG_0 ||
2314 			    insn->off != 0 || insn->imm != 0) {
2315 				verbose(env, "BPF_NEG uses reserved fields\n");
2316 				return -EINVAL;
2317 			}
2318 		} else {
2319 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
2320 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
2321 			    BPF_CLASS(insn->code) == BPF_ALU64) {
2322 				verbose(env, "BPF_END uses reserved fields\n");
2323 				return -EINVAL;
2324 			}
2325 		}
2326 
2327 		/* check src operand */
2328 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2329 		if (err)
2330 			return err;
2331 
2332 		if (is_pointer_value(env, insn->dst_reg)) {
2333 			verbose(env, "R%d pointer arithmetic prohibited\n",
2334 				insn->dst_reg);
2335 			return -EACCES;
2336 		}
2337 
2338 		/* check dest operand */
2339 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2340 		if (err)
2341 			return err;
2342 
2343 	} else if (opcode == BPF_MOV) {
2344 
2345 		if (BPF_SRC(insn->code) == BPF_X) {
2346 			if (insn->imm != 0 || insn->off != 0) {
2347 				verbose(env, "BPF_MOV uses reserved fields\n");
2348 				return -EINVAL;
2349 			}
2350 
2351 			/* check src operand */
2352 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2353 			if (err)
2354 				return err;
2355 		} else {
2356 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2357 				verbose(env, "BPF_MOV uses reserved fields\n");
2358 				return -EINVAL;
2359 			}
2360 		}
2361 
2362 		/* check dest operand */
2363 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2364 		if (err)
2365 			return err;
2366 
2367 		if (BPF_SRC(insn->code) == BPF_X) {
2368 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2369 				/* case: R1 = R2
2370 				 * copy register state to dest reg
2371 				 */
2372 				regs[insn->dst_reg] = regs[insn->src_reg];
2373 				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
2374 			} else {
2375 				/* R1 = (u32) R2 */
2376 				if (is_pointer_value(env, insn->src_reg)) {
2377 					verbose(env,
2378 						"R%d partial copy of pointer\n",
2379 						insn->src_reg);
2380 					return -EACCES;
2381 				}
2382 				mark_reg_unknown(env, regs, insn->dst_reg);
2383 				/* high 32 bits are known zero. */
2384 				regs[insn->dst_reg].var_off = tnum_cast(
2385 						regs[insn->dst_reg].var_off, 4);
2386 				__update_reg_bounds(&regs[insn->dst_reg]);
2387 			}
2388 		} else {
2389 			/* case: R = imm
2390 			 * remember the value we stored into this reg
2391 			 */
2392 			regs[insn->dst_reg].type = SCALAR_VALUE;
2393 			__mark_reg_known(regs + insn->dst_reg, insn->imm);
2394 		}
2395 
2396 	} else if (opcode > BPF_END) {
2397 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
2398 		return -EINVAL;
2399 
2400 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
2401 
2402 		if (BPF_SRC(insn->code) == BPF_X) {
2403 			if (insn->imm != 0 || insn->off != 0) {
2404 				verbose(env, "BPF_ALU uses reserved fields\n");
2405 				return -EINVAL;
2406 			}
2407 			/* check src1 operand */
2408 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2409 			if (err)
2410 				return err;
2411 		} else {
2412 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2413 				verbose(env, "BPF_ALU uses reserved fields\n");
2414 				return -EINVAL;
2415 			}
2416 		}
2417 
2418 		/* check src2 operand */
2419 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2420 		if (err)
2421 			return err;
2422 
2423 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2424 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2425 			verbose(env, "div by zero\n");
2426 			return -EINVAL;
2427 		}
2428 
2429 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2430 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2431 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2432 
2433 			if (insn->imm < 0 || insn->imm >= size) {
2434 				verbose(env, "invalid shift %d\n", insn->imm);
2435 				return -EINVAL;
2436 			}
2437 		}
2438 
2439 		/* check dest operand */
2440 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
2441 		if (err)
2442 			return err;
2443 
2444 		return adjust_reg_min_max_vals(env, insn);
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2451 				   struct bpf_reg_state *dst_reg,
2452 				   enum bpf_reg_type type,
2453 				   bool range_right_open)
2454 {
2455 	struct bpf_reg_state *regs = state->regs, *reg;
2456 	u16 new_range;
2457 	int i;
2458 
2459 	if (dst_reg->off < 0 ||
2460 	    (dst_reg->off == 0 && range_right_open))
2461 		/* This doesn't give us any range */
2462 		return;
2463 
2464 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
2465 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
2466 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
2467 		 * than pkt_end, but that's because it's also less than pkt.
2468 		 */
2469 		return;
2470 
2471 	new_range = dst_reg->off;
2472 	if (range_right_open)
2473 		new_range--;
2474 
2475 	/* Examples for register markings:
2476 	 *
2477 	 * pkt_data in dst register:
2478 	 *
2479 	 *   r2 = r3;
2480 	 *   r2 += 8;
2481 	 *   if (r2 > pkt_end) goto <handle exception>
2482 	 *   <access okay>
2483 	 *
2484 	 *   r2 = r3;
2485 	 *   r2 += 8;
2486 	 *   if (r2 < pkt_end) goto <access okay>
2487 	 *   <handle exception>
2488 	 *
2489 	 *   Where:
2490 	 *     r2 == dst_reg, pkt_end == src_reg
2491 	 *     r2=pkt(id=n,off=8,r=0)
2492 	 *     r3=pkt(id=n,off=0,r=0)
2493 	 *
2494 	 * pkt_data in src register:
2495 	 *
2496 	 *   r2 = r3;
2497 	 *   r2 += 8;
2498 	 *   if (pkt_end >= r2) goto <access okay>
2499 	 *   <handle exception>
2500 	 *
2501 	 *   r2 = r3;
2502 	 *   r2 += 8;
2503 	 *   if (pkt_end <= r2) goto <handle exception>
2504 	 *   <access okay>
2505 	 *
2506 	 *   Where:
2507 	 *     pkt_end == dst_reg, r2 == src_reg
2508 	 *     r2=pkt(id=n,off=8,r=0)
2509 	 *     r3=pkt(id=n,off=0,r=0)
2510 	 *
2511 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2512 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2513 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
2514 	 * the check.
2515 	 */
2516 
2517 	/* If our ids match, then we must have the same max_value.  And we
2518 	 * don't care about the other reg's fixed offset, since if it's too big
2519 	 * the range won't allow anything.
2520 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2521 	 */
2522 	for (i = 0; i < MAX_BPF_REG; i++)
2523 		if (regs[i].type == type && regs[i].id == dst_reg->id)
2524 			/* keep the maximum range already checked */
2525 			regs[i].range = max(regs[i].range, new_range);
2526 
2527 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2528 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2529 			continue;
2530 		reg = &state->stack[i].spilled_ptr;
2531 		if (reg->type == type && reg->id == dst_reg->id)
2532 			reg->range = max(reg->range, new_range);
2533 	}
2534 }
2535 
2536 /* Adjusts the register min/max values in the case that the dst_reg is the
2537  * variable register that we are working on, and src_reg is a constant or we're
2538  * simply doing a BPF_K check.
2539  * In JEQ/JNE cases we also adjust the var_off values.
2540  */
2541 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2542 			    struct bpf_reg_state *false_reg, u64 val,
2543 			    u8 opcode)
2544 {
2545 	/* If the dst_reg is a pointer, we can't learn anything about its
2546 	 * variable offset from the compare (unless src_reg were a pointer into
2547 	 * the same object, but we don't bother with that.
2548 	 * Since false_reg and true_reg have the same type by construction, we
2549 	 * only need to check one of them for pointerness.
2550 	 */
2551 	if (__is_pointer_value(false, false_reg))
2552 		return;
2553 
2554 	switch (opcode) {
2555 	case BPF_JEQ:
2556 		/* If this is false then we know nothing Jon Snow, but if it is
2557 		 * true then we know for sure.
2558 		 */
2559 		__mark_reg_known(true_reg, val);
2560 		break;
2561 	case BPF_JNE:
2562 		/* If this is true we know nothing Jon Snow, but if it is false
2563 		 * we know the value for sure;
2564 		 */
2565 		__mark_reg_known(false_reg, val);
2566 		break;
2567 	case BPF_JGT:
2568 		false_reg->umax_value = min(false_reg->umax_value, val);
2569 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2570 		break;
2571 	case BPF_JSGT:
2572 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2573 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2574 		break;
2575 	case BPF_JLT:
2576 		false_reg->umin_value = max(false_reg->umin_value, val);
2577 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2578 		break;
2579 	case BPF_JSLT:
2580 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2581 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2582 		break;
2583 	case BPF_JGE:
2584 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2585 		true_reg->umin_value = max(true_reg->umin_value, val);
2586 		break;
2587 	case BPF_JSGE:
2588 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2589 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2590 		break;
2591 	case BPF_JLE:
2592 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2593 		true_reg->umax_value = min(true_reg->umax_value, val);
2594 		break;
2595 	case BPF_JSLE:
2596 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2597 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2598 		break;
2599 	default:
2600 		break;
2601 	}
2602 
2603 	__reg_deduce_bounds(false_reg);
2604 	__reg_deduce_bounds(true_reg);
2605 	/* We might have learned some bits from the bounds. */
2606 	__reg_bound_offset(false_reg);
2607 	__reg_bound_offset(true_reg);
2608 	/* Intersecting with the old var_off might have improved our bounds
2609 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2610 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2611 	 */
2612 	__update_reg_bounds(false_reg);
2613 	__update_reg_bounds(true_reg);
2614 }
2615 
2616 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2617  * the variable reg.
2618  */
2619 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2620 				struct bpf_reg_state *false_reg, u64 val,
2621 				u8 opcode)
2622 {
2623 	if (__is_pointer_value(false, false_reg))
2624 		return;
2625 
2626 	switch (opcode) {
2627 	case BPF_JEQ:
2628 		/* If this is false then we know nothing Jon Snow, but if it is
2629 		 * true then we know for sure.
2630 		 */
2631 		__mark_reg_known(true_reg, val);
2632 		break;
2633 	case BPF_JNE:
2634 		/* If this is true we know nothing Jon Snow, but if it is false
2635 		 * we know the value for sure;
2636 		 */
2637 		__mark_reg_known(false_reg, val);
2638 		break;
2639 	case BPF_JGT:
2640 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2641 		false_reg->umin_value = max(false_reg->umin_value, val);
2642 		break;
2643 	case BPF_JSGT:
2644 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2645 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2646 		break;
2647 	case BPF_JLT:
2648 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2649 		false_reg->umax_value = min(false_reg->umax_value, val);
2650 		break;
2651 	case BPF_JSLT:
2652 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2653 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2654 		break;
2655 	case BPF_JGE:
2656 		true_reg->umax_value = min(true_reg->umax_value, val);
2657 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2658 		break;
2659 	case BPF_JSGE:
2660 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2661 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2662 		break;
2663 	case BPF_JLE:
2664 		true_reg->umin_value = max(true_reg->umin_value, val);
2665 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2666 		break;
2667 	case BPF_JSLE:
2668 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2669 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2670 		break;
2671 	default:
2672 		break;
2673 	}
2674 
2675 	__reg_deduce_bounds(false_reg);
2676 	__reg_deduce_bounds(true_reg);
2677 	/* We might have learned some bits from the bounds. */
2678 	__reg_bound_offset(false_reg);
2679 	__reg_bound_offset(true_reg);
2680 	/* Intersecting with the old var_off might have improved our bounds
2681 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2682 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2683 	 */
2684 	__update_reg_bounds(false_reg);
2685 	__update_reg_bounds(true_reg);
2686 }
2687 
2688 /* Regs are known to be equal, so intersect their min/max/var_off */
2689 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
2690 				  struct bpf_reg_state *dst_reg)
2691 {
2692 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
2693 							dst_reg->umin_value);
2694 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
2695 							dst_reg->umax_value);
2696 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
2697 							dst_reg->smin_value);
2698 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
2699 							dst_reg->smax_value);
2700 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
2701 							     dst_reg->var_off);
2702 	/* We might have learned new bounds from the var_off. */
2703 	__update_reg_bounds(src_reg);
2704 	__update_reg_bounds(dst_reg);
2705 	/* We might have learned something about the sign bit. */
2706 	__reg_deduce_bounds(src_reg);
2707 	__reg_deduce_bounds(dst_reg);
2708 	/* We might have learned some bits from the bounds. */
2709 	__reg_bound_offset(src_reg);
2710 	__reg_bound_offset(dst_reg);
2711 	/* Intersecting with the old var_off might have improved our bounds
2712 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2713 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2714 	 */
2715 	__update_reg_bounds(src_reg);
2716 	__update_reg_bounds(dst_reg);
2717 }
2718 
2719 static void reg_combine_min_max(struct bpf_reg_state *true_src,
2720 				struct bpf_reg_state *true_dst,
2721 				struct bpf_reg_state *false_src,
2722 				struct bpf_reg_state *false_dst,
2723 				u8 opcode)
2724 {
2725 	switch (opcode) {
2726 	case BPF_JEQ:
2727 		__reg_combine_min_max(true_src, true_dst);
2728 		break;
2729 	case BPF_JNE:
2730 		__reg_combine_min_max(false_src, false_dst);
2731 		break;
2732 	}
2733 }
2734 
2735 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2736 			 bool is_null)
2737 {
2738 	struct bpf_reg_state *reg = &regs[regno];
2739 
2740 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2741 		/* Old offset (both fixed and variable parts) should
2742 		 * have been known-zero, because we don't allow pointer
2743 		 * arithmetic on pointers that might be NULL.
2744 		 */
2745 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
2746 				 !tnum_equals_const(reg->var_off, 0) ||
2747 				 reg->off)) {
2748 			__mark_reg_known_zero(reg);
2749 			reg->off = 0;
2750 		}
2751 		if (is_null) {
2752 			reg->type = SCALAR_VALUE;
2753 		} else if (reg->map_ptr->inner_map_meta) {
2754 			reg->type = CONST_PTR_TO_MAP;
2755 			reg->map_ptr = reg->map_ptr->inner_map_meta;
2756 		} else {
2757 			reg->type = PTR_TO_MAP_VALUE;
2758 		}
2759 		/* We don't need id from this point onwards anymore, thus we
2760 		 * should better reset it, so that state pruning has chances
2761 		 * to take effect.
2762 		 */
2763 		reg->id = 0;
2764 	}
2765 }
2766 
2767 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2768  * be folded together at some point.
2769  */
2770 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2771 			  bool is_null)
2772 {
2773 	struct bpf_reg_state *regs = state->regs;
2774 	u32 id = regs[regno].id;
2775 	int i;
2776 
2777 	for (i = 0; i < MAX_BPF_REG; i++)
2778 		mark_map_reg(regs, i, id, is_null);
2779 
2780 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2781 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2782 			continue;
2783 		mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
2784 	}
2785 }
2786 
2787 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
2788 				   struct bpf_reg_state *dst_reg,
2789 				   struct bpf_reg_state *src_reg,
2790 				   struct bpf_verifier_state *this_branch,
2791 				   struct bpf_verifier_state *other_branch)
2792 {
2793 	if (BPF_SRC(insn->code) != BPF_X)
2794 		return false;
2795 
2796 	switch (BPF_OP(insn->code)) {
2797 	case BPF_JGT:
2798 		if ((dst_reg->type == PTR_TO_PACKET &&
2799 		     src_reg->type == PTR_TO_PACKET_END) ||
2800 		    (dst_reg->type == PTR_TO_PACKET_META &&
2801 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2802 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
2803 			find_good_pkt_pointers(this_branch, dst_reg,
2804 					       dst_reg->type, false);
2805 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2806 			    src_reg->type == PTR_TO_PACKET) ||
2807 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2808 			    src_reg->type == PTR_TO_PACKET_META)) {
2809 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
2810 			find_good_pkt_pointers(other_branch, src_reg,
2811 					       src_reg->type, true);
2812 		} else {
2813 			return false;
2814 		}
2815 		break;
2816 	case BPF_JLT:
2817 		if ((dst_reg->type == PTR_TO_PACKET &&
2818 		     src_reg->type == PTR_TO_PACKET_END) ||
2819 		    (dst_reg->type == PTR_TO_PACKET_META &&
2820 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2821 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
2822 			find_good_pkt_pointers(other_branch, dst_reg,
2823 					       dst_reg->type, true);
2824 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2825 			    src_reg->type == PTR_TO_PACKET) ||
2826 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2827 			    src_reg->type == PTR_TO_PACKET_META)) {
2828 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
2829 			find_good_pkt_pointers(this_branch, src_reg,
2830 					       src_reg->type, false);
2831 		} else {
2832 			return false;
2833 		}
2834 		break;
2835 	case BPF_JGE:
2836 		if ((dst_reg->type == PTR_TO_PACKET &&
2837 		     src_reg->type == PTR_TO_PACKET_END) ||
2838 		    (dst_reg->type == PTR_TO_PACKET_META &&
2839 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2840 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
2841 			find_good_pkt_pointers(this_branch, dst_reg,
2842 					       dst_reg->type, true);
2843 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2844 			    src_reg->type == PTR_TO_PACKET) ||
2845 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2846 			    src_reg->type == PTR_TO_PACKET_META)) {
2847 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
2848 			find_good_pkt_pointers(other_branch, src_reg,
2849 					       src_reg->type, false);
2850 		} else {
2851 			return false;
2852 		}
2853 		break;
2854 	case BPF_JLE:
2855 		if ((dst_reg->type == PTR_TO_PACKET &&
2856 		     src_reg->type == PTR_TO_PACKET_END) ||
2857 		    (dst_reg->type == PTR_TO_PACKET_META &&
2858 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2859 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
2860 			find_good_pkt_pointers(other_branch, dst_reg,
2861 					       dst_reg->type, false);
2862 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2863 			    src_reg->type == PTR_TO_PACKET) ||
2864 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2865 			    src_reg->type == PTR_TO_PACKET_META)) {
2866 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
2867 			find_good_pkt_pointers(this_branch, src_reg,
2868 					       src_reg->type, true);
2869 		} else {
2870 			return false;
2871 		}
2872 		break;
2873 	default:
2874 		return false;
2875 	}
2876 
2877 	return true;
2878 }
2879 
2880 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2881 			     struct bpf_insn *insn, int *insn_idx)
2882 {
2883 	struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
2884 	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2885 	u8 opcode = BPF_OP(insn->code);
2886 	int err;
2887 
2888 	if (opcode > BPF_JSLE) {
2889 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
2890 		return -EINVAL;
2891 	}
2892 
2893 	if (BPF_SRC(insn->code) == BPF_X) {
2894 		if (insn->imm != 0) {
2895 			verbose(env, "BPF_JMP uses reserved fields\n");
2896 			return -EINVAL;
2897 		}
2898 
2899 		/* check src1 operand */
2900 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
2901 		if (err)
2902 			return err;
2903 
2904 		if (is_pointer_value(env, insn->src_reg)) {
2905 			verbose(env, "R%d pointer comparison prohibited\n",
2906 				insn->src_reg);
2907 			return -EACCES;
2908 		}
2909 	} else {
2910 		if (insn->src_reg != BPF_REG_0) {
2911 			verbose(env, "BPF_JMP uses reserved fields\n");
2912 			return -EINVAL;
2913 		}
2914 	}
2915 
2916 	/* check src2 operand */
2917 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2918 	if (err)
2919 		return err;
2920 
2921 	dst_reg = &regs[insn->dst_reg];
2922 
2923 	/* detect if R == 0 where R was initialized to zero earlier */
2924 	if (BPF_SRC(insn->code) == BPF_K &&
2925 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2926 	    dst_reg->type == SCALAR_VALUE &&
2927 	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
2928 		if (opcode == BPF_JEQ) {
2929 			/* if (imm == imm) goto pc+off;
2930 			 * only follow the goto, ignore fall-through
2931 			 */
2932 			*insn_idx += insn->off;
2933 			return 0;
2934 		} else {
2935 			/* if (imm != imm) goto pc+off;
2936 			 * only follow fall-through branch, since
2937 			 * that's where the program will go
2938 			 */
2939 			return 0;
2940 		}
2941 	}
2942 
2943 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
2944 	if (!other_branch)
2945 		return -EFAULT;
2946 
2947 	/* detect if we are comparing against a constant value so we can adjust
2948 	 * our min/max values for our dst register.
2949 	 * this is only legit if both are scalars (or pointers to the same
2950 	 * object, I suppose, but we don't support that right now), because
2951 	 * otherwise the different base pointers mean the offsets aren't
2952 	 * comparable.
2953 	 */
2954 	if (BPF_SRC(insn->code) == BPF_X) {
2955 		if (dst_reg->type == SCALAR_VALUE &&
2956 		    regs[insn->src_reg].type == SCALAR_VALUE) {
2957 			if (tnum_is_const(regs[insn->src_reg].var_off))
2958 				reg_set_min_max(&other_branch->regs[insn->dst_reg],
2959 						dst_reg, regs[insn->src_reg].var_off.value,
2960 						opcode);
2961 			else if (tnum_is_const(dst_reg->var_off))
2962 				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
2963 						    &regs[insn->src_reg],
2964 						    dst_reg->var_off.value, opcode);
2965 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
2966 				/* Comparing for equality, we can combine knowledge */
2967 				reg_combine_min_max(&other_branch->regs[insn->src_reg],
2968 						    &other_branch->regs[insn->dst_reg],
2969 						    &regs[insn->src_reg],
2970 						    &regs[insn->dst_reg], opcode);
2971 		}
2972 	} else if (dst_reg->type == SCALAR_VALUE) {
2973 		reg_set_min_max(&other_branch->regs[insn->dst_reg],
2974 					dst_reg, insn->imm, opcode);
2975 	}
2976 
2977 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2978 	if (BPF_SRC(insn->code) == BPF_K &&
2979 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2980 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2981 		/* Mark all identical map registers in each branch as either
2982 		 * safe or unknown depending R == 0 or R != 0 conditional.
2983 		 */
2984 		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
2985 		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
2986 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
2987 					   this_branch, other_branch) &&
2988 		   is_pointer_value(env, insn->dst_reg)) {
2989 		verbose(env, "R%d pointer comparison prohibited\n",
2990 			insn->dst_reg);
2991 		return -EACCES;
2992 	}
2993 	if (env->log.level)
2994 		print_verifier_state(env, this_branch);
2995 	return 0;
2996 }
2997 
2998 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
2999 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3000 {
3001 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3002 
3003 	return (struct bpf_map *) (unsigned long) imm64;
3004 }
3005 
3006 /* verify BPF_LD_IMM64 instruction */
3007 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3008 {
3009 	struct bpf_reg_state *regs = cur_regs(env);
3010 	int err;
3011 
3012 	if (BPF_SIZE(insn->code) != BPF_DW) {
3013 		verbose(env, "invalid BPF_LD_IMM insn\n");
3014 		return -EINVAL;
3015 	}
3016 	if (insn->off != 0) {
3017 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3018 		return -EINVAL;
3019 	}
3020 
3021 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
3022 	if (err)
3023 		return err;
3024 
3025 	if (insn->src_reg == 0) {
3026 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3027 
3028 		regs[insn->dst_reg].type = SCALAR_VALUE;
3029 		__mark_reg_known(&regs[insn->dst_reg], imm);
3030 		return 0;
3031 	}
3032 
3033 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3034 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3035 
3036 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3037 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3038 	return 0;
3039 }
3040 
3041 static bool may_access_skb(enum bpf_prog_type type)
3042 {
3043 	switch (type) {
3044 	case BPF_PROG_TYPE_SOCKET_FILTER:
3045 	case BPF_PROG_TYPE_SCHED_CLS:
3046 	case BPF_PROG_TYPE_SCHED_ACT:
3047 		return true;
3048 	default:
3049 		return false;
3050 	}
3051 }
3052 
3053 /* verify safety of LD_ABS|LD_IND instructions:
3054  * - they can only appear in the programs where ctx == skb
3055  * - since they are wrappers of function calls, they scratch R1-R5 registers,
3056  *   preserve R6-R9, and store return value into R0
3057  *
3058  * Implicit input:
3059  *   ctx == skb == R6 == CTX
3060  *
3061  * Explicit input:
3062  *   SRC == any register
3063  *   IMM == 32-bit immediate
3064  *
3065  * Output:
3066  *   R0 - 8/16/32-bit skb data converted to cpu endianness
3067  */
3068 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3069 {
3070 	struct bpf_reg_state *regs = cur_regs(env);
3071 	u8 mode = BPF_MODE(insn->code);
3072 	int i, err;
3073 
3074 	if (!may_access_skb(env->prog->type)) {
3075 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3076 		return -EINVAL;
3077 	}
3078 
3079 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
3080 	    BPF_SIZE(insn->code) == BPF_DW ||
3081 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
3082 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
3083 		return -EINVAL;
3084 	}
3085 
3086 	/* check whether implicit source operand (register R6) is readable */
3087 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
3088 	if (err)
3089 		return err;
3090 
3091 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
3092 		verbose(env,
3093 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3094 		return -EINVAL;
3095 	}
3096 
3097 	if (mode == BPF_IND) {
3098 		/* check explicit source operand */
3099 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3100 		if (err)
3101 			return err;
3102 	}
3103 
3104 	/* reset caller saved regs to unreadable */
3105 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
3106 		mark_reg_not_init(env, regs, caller_saved[i]);
3107 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3108 	}
3109 
3110 	/* mark destination R0 register as readable, since it contains
3111 	 * the value fetched from the packet.
3112 	 * Already marked as written above.
3113 	 */
3114 	mark_reg_unknown(env, regs, BPF_REG_0);
3115 	return 0;
3116 }
3117 
3118 static int check_return_code(struct bpf_verifier_env *env)
3119 {
3120 	struct bpf_reg_state *reg;
3121 	struct tnum range = tnum_range(0, 1);
3122 
3123 	switch (env->prog->type) {
3124 	case BPF_PROG_TYPE_CGROUP_SKB:
3125 	case BPF_PROG_TYPE_CGROUP_SOCK:
3126 	case BPF_PROG_TYPE_SOCK_OPS:
3127 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3128 		break;
3129 	default:
3130 		return 0;
3131 	}
3132 
3133 	reg = cur_regs(env) + BPF_REG_0;
3134 	if (reg->type != SCALAR_VALUE) {
3135 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
3136 			reg_type_str[reg->type]);
3137 		return -EINVAL;
3138 	}
3139 
3140 	if (!tnum_in(range, reg->var_off)) {
3141 		verbose(env, "At program exit the register R0 ");
3142 		if (!tnum_is_unknown(reg->var_off)) {
3143 			char tn_buf[48];
3144 
3145 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3146 			verbose(env, "has value %s", tn_buf);
3147 		} else {
3148 			verbose(env, "has unknown scalar value");
3149 		}
3150 		verbose(env, " should have been 0 or 1\n");
3151 		return -EINVAL;
3152 	}
3153 	return 0;
3154 }
3155 
3156 /* non-recursive DFS pseudo code
3157  * 1  procedure DFS-iterative(G,v):
3158  * 2      label v as discovered
3159  * 3      let S be a stack
3160  * 4      S.push(v)
3161  * 5      while S is not empty
3162  * 6            t <- S.pop()
3163  * 7            if t is what we're looking for:
3164  * 8                return t
3165  * 9            for all edges e in G.adjacentEdges(t) do
3166  * 10               if edge e is already labelled
3167  * 11                   continue with the next edge
3168  * 12               w <- G.adjacentVertex(t,e)
3169  * 13               if vertex w is not discovered and not explored
3170  * 14                   label e as tree-edge
3171  * 15                   label w as discovered
3172  * 16                   S.push(w)
3173  * 17                   continue at 5
3174  * 18               else if vertex w is discovered
3175  * 19                   label e as back-edge
3176  * 20               else
3177  * 21                   // vertex w is explored
3178  * 22                   label e as forward- or cross-edge
3179  * 23           label t as explored
3180  * 24           S.pop()
3181  *
3182  * convention:
3183  * 0x10 - discovered
3184  * 0x11 - discovered and fall-through edge labelled
3185  * 0x12 - discovered and fall-through and branch edges labelled
3186  * 0x20 - explored
3187  */
3188 
3189 enum {
3190 	DISCOVERED = 0x10,
3191 	EXPLORED = 0x20,
3192 	FALLTHROUGH = 1,
3193 	BRANCH = 2,
3194 };
3195 
3196 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3197 
3198 static int *insn_stack;	/* stack of insns to process */
3199 static int cur_stack;	/* current stack index */
3200 static int *insn_state;
3201 
3202 /* t, w, e - match pseudo-code above:
3203  * t - index of current instruction
3204  * w - next instruction
3205  * e - edge
3206  */
3207 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
3208 {
3209 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
3210 		return 0;
3211 
3212 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
3213 		return 0;
3214 
3215 	if (w < 0 || w >= env->prog->len) {
3216 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
3217 		return -EINVAL;
3218 	}
3219 
3220 	if (e == BRANCH)
3221 		/* mark branch target for state pruning */
3222 		env->explored_states[w] = STATE_LIST_MARK;
3223 
3224 	if (insn_state[w] == 0) {
3225 		/* tree-edge */
3226 		insn_state[t] = DISCOVERED | e;
3227 		insn_state[w] = DISCOVERED;
3228 		if (cur_stack >= env->prog->len)
3229 			return -E2BIG;
3230 		insn_stack[cur_stack++] = w;
3231 		return 1;
3232 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
3233 		verbose(env, "back-edge from insn %d to %d\n", t, w);
3234 		return -EINVAL;
3235 	} else if (insn_state[w] == EXPLORED) {
3236 		/* forward- or cross-edge */
3237 		insn_state[t] = DISCOVERED | e;
3238 	} else {
3239 		verbose(env, "insn state internal bug\n");
3240 		return -EFAULT;
3241 	}
3242 	return 0;
3243 }
3244 
3245 /* non-recursive depth-first-search to detect loops in BPF program
3246  * loop == back-edge in directed graph
3247  */
3248 static int check_cfg(struct bpf_verifier_env *env)
3249 {
3250 	struct bpf_insn *insns = env->prog->insnsi;
3251 	int insn_cnt = env->prog->len;
3252 	int ret = 0;
3253 	int i, t;
3254 
3255 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3256 	if (!insn_state)
3257 		return -ENOMEM;
3258 
3259 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3260 	if (!insn_stack) {
3261 		kfree(insn_state);
3262 		return -ENOMEM;
3263 	}
3264 
3265 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
3266 	insn_stack[0] = 0; /* 0 is the first instruction */
3267 	cur_stack = 1;
3268 
3269 peek_stack:
3270 	if (cur_stack == 0)
3271 		goto check_state;
3272 	t = insn_stack[cur_stack - 1];
3273 
3274 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
3275 		u8 opcode = BPF_OP(insns[t].code);
3276 
3277 		if (opcode == BPF_EXIT) {
3278 			goto mark_explored;
3279 		} else if (opcode == BPF_CALL) {
3280 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3281 			if (ret == 1)
3282 				goto peek_stack;
3283 			else if (ret < 0)
3284 				goto err_free;
3285 			if (t + 1 < insn_cnt)
3286 				env->explored_states[t + 1] = STATE_LIST_MARK;
3287 		} else if (opcode == BPF_JA) {
3288 			if (BPF_SRC(insns[t].code) != BPF_K) {
3289 				ret = -EINVAL;
3290 				goto err_free;
3291 			}
3292 			/* unconditional jump with single edge */
3293 			ret = push_insn(t, t + insns[t].off + 1,
3294 					FALLTHROUGH, env);
3295 			if (ret == 1)
3296 				goto peek_stack;
3297 			else if (ret < 0)
3298 				goto err_free;
3299 			/* tell verifier to check for equivalent states
3300 			 * after every call and jump
3301 			 */
3302 			if (t + 1 < insn_cnt)
3303 				env->explored_states[t + 1] = STATE_LIST_MARK;
3304 		} else {
3305 			/* conditional jump with two edges */
3306 			env->explored_states[t] = STATE_LIST_MARK;
3307 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3308 			if (ret == 1)
3309 				goto peek_stack;
3310 			else if (ret < 0)
3311 				goto err_free;
3312 
3313 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
3314 			if (ret == 1)
3315 				goto peek_stack;
3316 			else if (ret < 0)
3317 				goto err_free;
3318 		}
3319 	} else {
3320 		/* all other non-branch instructions with single
3321 		 * fall-through edge
3322 		 */
3323 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
3324 		if (ret == 1)
3325 			goto peek_stack;
3326 		else if (ret < 0)
3327 			goto err_free;
3328 	}
3329 
3330 mark_explored:
3331 	insn_state[t] = EXPLORED;
3332 	if (cur_stack-- <= 0) {
3333 		verbose(env, "pop stack internal bug\n");
3334 		ret = -EFAULT;
3335 		goto err_free;
3336 	}
3337 	goto peek_stack;
3338 
3339 check_state:
3340 	for (i = 0; i < insn_cnt; i++) {
3341 		if (insn_state[i] != EXPLORED) {
3342 			verbose(env, "unreachable insn %d\n", i);
3343 			ret = -EINVAL;
3344 			goto err_free;
3345 		}
3346 	}
3347 	ret = 0; /* cfg looks good */
3348 
3349 err_free:
3350 	kfree(insn_state);
3351 	kfree(insn_stack);
3352 	return ret;
3353 }
3354 
3355 /* check %cur's range satisfies %old's */
3356 static bool range_within(struct bpf_reg_state *old,
3357 			 struct bpf_reg_state *cur)
3358 {
3359 	return old->umin_value <= cur->umin_value &&
3360 	       old->umax_value >= cur->umax_value &&
3361 	       old->smin_value <= cur->smin_value &&
3362 	       old->smax_value >= cur->smax_value;
3363 }
3364 
3365 /* Maximum number of register states that can exist at once */
3366 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3367 struct idpair {
3368 	u32 old;
3369 	u32 cur;
3370 };
3371 
3372 /* If in the old state two registers had the same id, then they need to have
3373  * the same id in the new state as well.  But that id could be different from
3374  * the old state, so we need to track the mapping from old to new ids.
3375  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3376  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
3377  * regs with a different old id could still have new id 9, we don't care about
3378  * that.
3379  * So we look through our idmap to see if this old id has been seen before.  If
3380  * so, we require the new id to match; otherwise, we add the id pair to the map.
3381  */
3382 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3383 {
3384 	unsigned int i;
3385 
3386 	for (i = 0; i < ID_MAP_SIZE; i++) {
3387 		if (!idmap[i].old) {
3388 			/* Reached an empty slot; haven't seen this id before */
3389 			idmap[i].old = old_id;
3390 			idmap[i].cur = cur_id;
3391 			return true;
3392 		}
3393 		if (idmap[i].old == old_id)
3394 			return idmap[i].cur == cur_id;
3395 	}
3396 	/* We ran out of idmap slots, which should be impossible */
3397 	WARN_ON_ONCE(1);
3398 	return false;
3399 }
3400 
3401 /* Returns true if (rold safe implies rcur safe) */
3402 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3403 		    struct idpair *idmap)
3404 {
3405 	if (!(rold->live & REG_LIVE_READ))
3406 		/* explored state didn't use this */
3407 		return true;
3408 
3409 	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
3410 		return true;
3411 
3412 	if (rold->type == NOT_INIT)
3413 		/* explored state can't have used this */
3414 		return true;
3415 	if (rcur->type == NOT_INIT)
3416 		return false;
3417 	switch (rold->type) {
3418 	case SCALAR_VALUE:
3419 		if (rcur->type == SCALAR_VALUE) {
3420 			/* new val must satisfy old val knowledge */
3421 			return range_within(rold, rcur) &&
3422 			       tnum_in(rold->var_off, rcur->var_off);
3423 		} else {
3424 			/* if we knew anything about the old value, we're not
3425 			 * equal, because we can't know anything about the
3426 			 * scalar value of the pointer in the new value.
3427 			 */
3428 			return rold->umin_value == 0 &&
3429 			       rold->umax_value == U64_MAX &&
3430 			       rold->smin_value == S64_MIN &&
3431 			       rold->smax_value == S64_MAX &&
3432 			       tnum_is_unknown(rold->var_off);
3433 		}
3434 	case PTR_TO_MAP_VALUE:
3435 		/* If the new min/max/var_off satisfy the old ones and
3436 		 * everything else matches, we are OK.
3437 		 * We don't care about the 'id' value, because nothing
3438 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3439 		 */
3440 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3441 		       range_within(rold, rcur) &&
3442 		       tnum_in(rold->var_off, rcur->var_off);
3443 	case PTR_TO_MAP_VALUE_OR_NULL:
3444 		/* a PTR_TO_MAP_VALUE could be safe to use as a
3445 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3446 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3447 		 * checked, doing so could have affected others with the same
3448 		 * id, and we can't check for that because we lost the id when
3449 		 * we converted to a PTR_TO_MAP_VALUE.
3450 		 */
3451 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
3452 			return false;
3453 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
3454 			return false;
3455 		/* Check our ids match any regs they're supposed to */
3456 		return check_ids(rold->id, rcur->id, idmap);
3457 	case PTR_TO_PACKET_META:
3458 	case PTR_TO_PACKET:
3459 		if (rcur->type != rold->type)
3460 			return false;
3461 		/* We must have at least as much range as the old ptr
3462 		 * did, so that any accesses which were safe before are
3463 		 * still safe.  This is true even if old range < old off,
3464 		 * since someone could have accessed through (ptr - k), or
3465 		 * even done ptr -= k in a register, to get a safe access.
3466 		 */
3467 		if (rold->range > rcur->range)
3468 			return false;
3469 		/* If the offsets don't match, we can't trust our alignment;
3470 		 * nor can we be sure that we won't fall out of range.
3471 		 */
3472 		if (rold->off != rcur->off)
3473 			return false;
3474 		/* id relations must be preserved */
3475 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
3476 			return false;
3477 		/* new val must satisfy old val knowledge */
3478 		return range_within(rold, rcur) &&
3479 		       tnum_in(rold->var_off, rcur->var_off);
3480 	case PTR_TO_CTX:
3481 	case CONST_PTR_TO_MAP:
3482 	case PTR_TO_STACK:
3483 	case PTR_TO_PACKET_END:
3484 		/* Only valid matches are exact, which memcmp() above
3485 		 * would have accepted
3486 		 */
3487 	default:
3488 		/* Don't know what's going on, just say it's not safe */
3489 		return false;
3490 	}
3491 
3492 	/* Shouldn't get here; if we do, say it's not safe */
3493 	WARN_ON_ONCE(1);
3494 	return false;
3495 }
3496 
3497 static bool stacksafe(struct bpf_verifier_state *old,
3498 		      struct bpf_verifier_state *cur,
3499 		      struct idpair *idmap)
3500 {
3501 	int i, spi;
3502 
3503 	/* if explored stack has more populated slots than current stack
3504 	 * such stacks are not equivalent
3505 	 */
3506 	if (old->allocated_stack > cur->allocated_stack)
3507 		return false;
3508 
3509 	/* walk slots of the explored stack and ignore any additional
3510 	 * slots in the current stack, since explored(safe) state
3511 	 * didn't use them
3512 	 */
3513 	for (i = 0; i < old->allocated_stack; i++) {
3514 		spi = i / BPF_REG_SIZE;
3515 
3516 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
3517 			continue;
3518 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
3519 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
3520 			/* Ex: old explored (safe) state has STACK_SPILL in
3521 			 * this stack slot, but current has has STACK_MISC ->
3522 			 * this verifier states are not equivalent,
3523 			 * return false to continue verification of this path
3524 			 */
3525 			return false;
3526 		if (i % BPF_REG_SIZE)
3527 			continue;
3528 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
3529 			continue;
3530 		if (!regsafe(&old->stack[spi].spilled_ptr,
3531 			     &cur->stack[spi].spilled_ptr,
3532 			     idmap))
3533 			/* when explored and current stack slot are both storing
3534 			 * spilled registers, check that stored pointers types
3535 			 * are the same as well.
3536 			 * Ex: explored safe path could have stored
3537 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3538 			 * but current path has stored:
3539 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3540 			 * such verifier states are not equivalent.
3541 			 * return false to continue verification of this path
3542 			 */
3543 			return false;
3544 	}
3545 	return true;
3546 }
3547 
3548 /* compare two verifier states
3549  *
3550  * all states stored in state_list are known to be valid, since
3551  * verifier reached 'bpf_exit' instruction through them
3552  *
3553  * this function is called when verifier exploring different branches of
3554  * execution popped from the state stack. If it sees an old state that has
3555  * more strict register state and more strict stack state then this execution
3556  * branch doesn't need to be explored further, since verifier already
3557  * concluded that more strict state leads to valid finish.
3558  *
3559  * Therefore two states are equivalent if register state is more conservative
3560  * and explored stack state is more conservative than the current one.
3561  * Example:
3562  *       explored                   current
3563  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3564  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3565  *
3566  * In other words if current stack state (one being explored) has more
3567  * valid slots than old one that already passed validation, it means
3568  * the verifier can stop exploring and conclude that current state is valid too
3569  *
3570  * Similarly with registers. If explored state has register type as invalid
3571  * whereas register type in current state is meaningful, it means that
3572  * the current state will reach 'bpf_exit' instruction safely
3573  */
3574 static bool states_equal(struct bpf_verifier_env *env,
3575 			 struct bpf_verifier_state *old,
3576 			 struct bpf_verifier_state *cur)
3577 {
3578 	struct idpair *idmap;
3579 	bool ret = false;
3580 	int i;
3581 
3582 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3583 	/* If we failed to allocate the idmap, just say it's not safe */
3584 	if (!idmap)
3585 		return false;
3586 
3587 	for (i = 0; i < MAX_BPF_REG; i++) {
3588 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3589 			goto out_free;
3590 	}
3591 
3592 	if (!stacksafe(old, cur, idmap))
3593 		goto out_free;
3594 	ret = true;
3595 out_free:
3596 	kfree(idmap);
3597 	return ret;
3598 }
3599 
3600 /* A write screens off any subsequent reads; but write marks come from the
3601  * straight-line code between a state and its parent.  When we arrive at a
3602  * jump target (in the first iteration of the propagate_liveness() loop),
3603  * we didn't arrive by the straight-line code, so read marks in state must
3604  * propagate to parent regardless of state's write marks.
3605  */
3606 static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3607 				  struct bpf_verifier_state *parent)
3608 {
3609 	bool writes = parent == state->parent; /* Observe write marks */
3610 	bool touched = false; /* any changes made? */
3611 	int i;
3612 
3613 	if (!parent)
3614 		return touched;
3615 	/* Propagate read liveness of registers... */
3616 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
3617 	/* We don't need to worry about FP liveness because it's read-only */
3618 	for (i = 0; i < BPF_REG_FP; i++) {
3619 		if (parent->regs[i].live & REG_LIVE_READ)
3620 			continue;
3621 		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3622 			continue;
3623 		if (state->regs[i].live & REG_LIVE_READ) {
3624 			parent->regs[i].live |= REG_LIVE_READ;
3625 			touched = true;
3626 		}
3627 	}
3628 	/* ... and stack slots */
3629 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
3630 		    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3631 		if (parent->stack[i].slot_type[0] != STACK_SPILL)
3632 			continue;
3633 		if (state->stack[i].slot_type[0] != STACK_SPILL)
3634 			continue;
3635 		if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
3636 			continue;
3637 		if (writes &&
3638 		    (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
3639 			continue;
3640 		if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
3641 			parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
3642 			touched = true;
3643 		}
3644 	}
3645 	return touched;
3646 }
3647 
3648 /* "parent" is "a state from which we reach the current state", but initially
3649  * it is not the state->parent (i.e. "the state whose straight-line code leads
3650  * to the current state"), instead it is the state that happened to arrive at
3651  * a (prunable) equivalent of the current state.  See comment above
3652  * do_propagate_liveness() for consequences of this.
3653  * This function is just a more efficient way of calling mark_reg_read() or
3654  * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3655  * though it requires that parent != state->parent in the call arguments.
3656  */
3657 static void propagate_liveness(const struct bpf_verifier_state *state,
3658 			       struct bpf_verifier_state *parent)
3659 {
3660 	while (do_propagate_liveness(state, parent)) {
3661 		/* Something changed, so we need to feed those changes onward */
3662 		state = parent;
3663 		parent = state->parent;
3664 	}
3665 }
3666 
3667 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3668 {
3669 	struct bpf_verifier_state_list *new_sl;
3670 	struct bpf_verifier_state_list *sl;
3671 	struct bpf_verifier_state *cur = env->cur_state;
3672 	int i, err;
3673 
3674 	sl = env->explored_states[insn_idx];
3675 	if (!sl)
3676 		/* this 'insn_idx' instruction wasn't marked, so we will not
3677 		 * be doing state search here
3678 		 */
3679 		return 0;
3680 
3681 	while (sl != STATE_LIST_MARK) {
3682 		if (states_equal(env, &sl->state, cur)) {
3683 			/* reached equivalent register/stack state,
3684 			 * prune the search.
3685 			 * Registers read by the continuation are read by us.
3686 			 * If we have any write marks in env->cur_state, they
3687 			 * will prevent corresponding reads in the continuation
3688 			 * from reaching our parent (an explored_state).  Our
3689 			 * own state will get the read marks recorded, but
3690 			 * they'll be immediately forgotten as we're pruning
3691 			 * this state and will pop a new one.
3692 			 */
3693 			propagate_liveness(&sl->state, cur);
3694 			return 1;
3695 		}
3696 		sl = sl->next;
3697 	}
3698 
3699 	/* there were no equivalent states, remember current one.
3700 	 * technically the current state is not proven to be safe yet,
3701 	 * but it will either reach bpf_exit (which means it's safe) or
3702 	 * it will be rejected. Since there are no loops, we won't be
3703 	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3704 	 */
3705 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
3706 	if (!new_sl)
3707 		return -ENOMEM;
3708 
3709 	/* add new state to the head of linked list */
3710 	err = copy_verifier_state(&new_sl->state, cur);
3711 	if (err) {
3712 		free_verifier_state(&new_sl->state, false);
3713 		kfree(new_sl);
3714 		return err;
3715 	}
3716 	new_sl->next = env->explored_states[insn_idx];
3717 	env->explored_states[insn_idx] = new_sl;
3718 	/* connect new state to parentage chain */
3719 	cur->parent = &new_sl->state;
3720 	/* clear write marks in current state: the writes we did are not writes
3721 	 * our child did, so they don't screen off its reads from us.
3722 	 * (There are no read marks in current state, because reads always mark
3723 	 * their parent and current state never has children yet.  Only
3724 	 * explored_states can get read marks.)
3725 	 */
3726 	for (i = 0; i < BPF_REG_FP; i++)
3727 		cur->regs[i].live = REG_LIVE_NONE;
3728 	for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
3729 		if (cur->stack[i].slot_type[0] == STACK_SPILL)
3730 			cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
3731 	return 0;
3732 }
3733 
3734 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3735 				  int insn_idx, int prev_insn_idx)
3736 {
3737 	if (env->dev_ops && env->dev_ops->insn_hook)
3738 		return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
3739 
3740 	return 0;
3741 }
3742 
3743 static int do_check(struct bpf_verifier_env *env)
3744 {
3745 	struct bpf_verifier_state *state;
3746 	struct bpf_insn *insns = env->prog->insnsi;
3747 	struct bpf_reg_state *regs;
3748 	int insn_cnt = env->prog->len;
3749 	int insn_idx, prev_insn_idx = 0;
3750 	int insn_processed = 0;
3751 	bool do_print_state = false;
3752 
3753 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
3754 	if (!state)
3755 		return -ENOMEM;
3756 	env->cur_state = state;
3757 	init_reg_state(env, state->regs);
3758 	state->parent = NULL;
3759 	insn_idx = 0;
3760 	for (;;) {
3761 		struct bpf_insn *insn;
3762 		u8 class;
3763 		int err;
3764 
3765 		if (insn_idx >= insn_cnt) {
3766 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
3767 				insn_idx, insn_cnt);
3768 			return -EFAULT;
3769 		}
3770 
3771 		insn = &insns[insn_idx];
3772 		class = BPF_CLASS(insn->code);
3773 
3774 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3775 			verbose(env,
3776 				"BPF program is too large. Processed %d insn\n",
3777 				insn_processed);
3778 			return -E2BIG;
3779 		}
3780 
3781 		err = is_state_visited(env, insn_idx);
3782 		if (err < 0)
3783 			return err;
3784 		if (err == 1) {
3785 			/* found equivalent state, can prune the search */
3786 			if (env->log.level) {
3787 				if (do_print_state)
3788 					verbose(env, "\nfrom %d to %d: safe\n",
3789 						prev_insn_idx, insn_idx);
3790 				else
3791 					verbose(env, "%d: safe\n", insn_idx);
3792 			}
3793 			goto process_bpf_exit;
3794 		}
3795 
3796 		if (need_resched())
3797 			cond_resched();
3798 
3799 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
3800 			if (env->log.level > 1)
3801 				verbose(env, "%d:", insn_idx);
3802 			else
3803 				verbose(env, "\nfrom %d to %d:",
3804 					prev_insn_idx, insn_idx);
3805 			print_verifier_state(env, state);
3806 			do_print_state = false;
3807 		}
3808 
3809 		if (env->log.level) {
3810 			verbose(env, "%d: ", insn_idx);
3811 			print_bpf_insn(verbose, env, insn,
3812 				       env->allow_ptr_leaks);
3813 		}
3814 
3815 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3816 		if (err)
3817 			return err;
3818 
3819 		regs = cur_regs(env);
3820 		if (class == BPF_ALU || class == BPF_ALU64) {
3821 			err = check_alu_op(env, insn);
3822 			if (err)
3823 				return err;
3824 
3825 		} else if (class == BPF_LDX) {
3826 			enum bpf_reg_type *prev_src_type, src_reg_type;
3827 
3828 			/* check for reserved fields is already done */
3829 
3830 			/* check src operand */
3831 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3832 			if (err)
3833 				return err;
3834 
3835 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3836 			if (err)
3837 				return err;
3838 
3839 			src_reg_type = regs[insn->src_reg].type;
3840 
3841 			/* check that memory (src_reg + off) is readable,
3842 			 * the state of dst_reg will be updated by this func
3843 			 */
3844 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3845 					       BPF_SIZE(insn->code), BPF_READ,
3846 					       insn->dst_reg);
3847 			if (err)
3848 				return err;
3849 
3850 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3851 
3852 			if (*prev_src_type == NOT_INIT) {
3853 				/* saw a valid insn
3854 				 * dst_reg = *(u32 *)(src_reg + off)
3855 				 * save type to validate intersecting paths
3856 				 */
3857 				*prev_src_type = src_reg_type;
3858 
3859 			} else if (src_reg_type != *prev_src_type &&
3860 				   (src_reg_type == PTR_TO_CTX ||
3861 				    *prev_src_type == PTR_TO_CTX)) {
3862 				/* ABuser program is trying to use the same insn
3863 				 * dst_reg = *(u32*) (src_reg + off)
3864 				 * with different pointer types:
3865 				 * src_reg == ctx in one branch and
3866 				 * src_reg == stack|map in some other branch.
3867 				 * Reject it.
3868 				 */
3869 				verbose(env, "same insn cannot be used with different pointers\n");
3870 				return -EINVAL;
3871 			}
3872 
3873 		} else if (class == BPF_STX) {
3874 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
3875 
3876 			if (BPF_MODE(insn->code) == BPF_XADD) {
3877 				err = check_xadd(env, insn_idx, insn);
3878 				if (err)
3879 					return err;
3880 				insn_idx++;
3881 				continue;
3882 			}
3883 
3884 			/* check src1 operand */
3885 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3886 			if (err)
3887 				return err;
3888 			/* check src2 operand */
3889 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3890 			if (err)
3891 				return err;
3892 
3893 			dst_reg_type = regs[insn->dst_reg].type;
3894 
3895 			/* check that memory (dst_reg + off) is writeable */
3896 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3897 					       BPF_SIZE(insn->code), BPF_WRITE,
3898 					       insn->src_reg);
3899 			if (err)
3900 				return err;
3901 
3902 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3903 
3904 			if (*prev_dst_type == NOT_INIT) {
3905 				*prev_dst_type = dst_reg_type;
3906 			} else if (dst_reg_type != *prev_dst_type &&
3907 				   (dst_reg_type == PTR_TO_CTX ||
3908 				    *prev_dst_type == PTR_TO_CTX)) {
3909 				verbose(env, "same insn cannot be used with different pointers\n");
3910 				return -EINVAL;
3911 			}
3912 
3913 		} else if (class == BPF_ST) {
3914 			if (BPF_MODE(insn->code) != BPF_MEM ||
3915 			    insn->src_reg != BPF_REG_0) {
3916 				verbose(env, "BPF_ST uses reserved fields\n");
3917 				return -EINVAL;
3918 			}
3919 			/* check src operand */
3920 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3921 			if (err)
3922 				return err;
3923 
3924 			/* check that memory (dst_reg + off) is writeable */
3925 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3926 					       BPF_SIZE(insn->code), BPF_WRITE,
3927 					       -1);
3928 			if (err)
3929 				return err;
3930 
3931 		} else if (class == BPF_JMP) {
3932 			u8 opcode = BPF_OP(insn->code);
3933 
3934 			if (opcode == BPF_CALL) {
3935 				if (BPF_SRC(insn->code) != BPF_K ||
3936 				    insn->off != 0 ||
3937 				    insn->src_reg != BPF_REG_0 ||
3938 				    insn->dst_reg != BPF_REG_0) {
3939 					verbose(env, "BPF_CALL uses reserved fields\n");
3940 					return -EINVAL;
3941 				}
3942 
3943 				err = check_call(env, insn->imm, insn_idx);
3944 				if (err)
3945 					return err;
3946 
3947 			} else if (opcode == BPF_JA) {
3948 				if (BPF_SRC(insn->code) != BPF_K ||
3949 				    insn->imm != 0 ||
3950 				    insn->src_reg != BPF_REG_0 ||
3951 				    insn->dst_reg != BPF_REG_0) {
3952 					verbose(env, "BPF_JA uses reserved fields\n");
3953 					return -EINVAL;
3954 				}
3955 
3956 				insn_idx += insn->off + 1;
3957 				continue;
3958 
3959 			} else if (opcode == BPF_EXIT) {
3960 				if (BPF_SRC(insn->code) != BPF_K ||
3961 				    insn->imm != 0 ||
3962 				    insn->src_reg != BPF_REG_0 ||
3963 				    insn->dst_reg != BPF_REG_0) {
3964 					verbose(env, "BPF_EXIT uses reserved fields\n");
3965 					return -EINVAL;
3966 				}
3967 
3968 				/* eBPF calling convetion is such that R0 is used
3969 				 * to return the value from eBPF program.
3970 				 * Make sure that it's readable at this time
3971 				 * of bpf_exit, which means that program wrote
3972 				 * something into it earlier
3973 				 */
3974 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
3975 				if (err)
3976 					return err;
3977 
3978 				if (is_pointer_value(env, BPF_REG_0)) {
3979 					verbose(env, "R0 leaks addr as return value\n");
3980 					return -EACCES;
3981 				}
3982 
3983 				err = check_return_code(env);
3984 				if (err)
3985 					return err;
3986 process_bpf_exit:
3987 				err = pop_stack(env, &prev_insn_idx, &insn_idx);
3988 				if (err < 0) {
3989 					if (err != -ENOENT)
3990 						return err;
3991 					break;
3992 				} else {
3993 					do_print_state = true;
3994 					continue;
3995 				}
3996 			} else {
3997 				err = check_cond_jmp_op(env, insn, &insn_idx);
3998 				if (err)
3999 					return err;
4000 			}
4001 		} else if (class == BPF_LD) {
4002 			u8 mode = BPF_MODE(insn->code);
4003 
4004 			if (mode == BPF_ABS || mode == BPF_IND) {
4005 				err = check_ld_abs(env, insn);
4006 				if (err)
4007 					return err;
4008 
4009 			} else if (mode == BPF_IMM) {
4010 				err = check_ld_imm(env, insn);
4011 				if (err)
4012 					return err;
4013 
4014 				insn_idx++;
4015 			} else {
4016 				verbose(env, "invalid BPF_LD mode\n");
4017 				return -EINVAL;
4018 			}
4019 		} else {
4020 			verbose(env, "unknown insn class %d\n", class);
4021 			return -EINVAL;
4022 		}
4023 
4024 		insn_idx++;
4025 	}
4026 
4027 	verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
4028 		env->prog->aux->stack_depth);
4029 	return 0;
4030 }
4031 
4032 static int check_map_prealloc(struct bpf_map *map)
4033 {
4034 	return (map->map_type != BPF_MAP_TYPE_HASH &&
4035 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
4036 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
4037 		!(map->map_flags & BPF_F_NO_PREALLOC);
4038 }
4039 
4040 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
4041 					struct bpf_map *map,
4042 					struct bpf_prog *prog)
4043 
4044 {
4045 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
4046 	 * preallocated hash maps, since doing memory allocation
4047 	 * in overflow_handler can crash depending on where nmi got
4048 	 * triggered.
4049 	 */
4050 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
4051 		if (!check_map_prealloc(map)) {
4052 			verbose(env, "perf_event programs can only use preallocated hash map\n");
4053 			return -EINVAL;
4054 		}
4055 		if (map->inner_map_meta &&
4056 		    !check_map_prealloc(map->inner_map_meta)) {
4057 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
4058 			return -EINVAL;
4059 		}
4060 	}
4061 	return 0;
4062 }
4063 
4064 /* look for pseudo eBPF instructions that access map FDs and
4065  * replace them with actual map pointers
4066  */
4067 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
4068 {
4069 	struct bpf_insn *insn = env->prog->insnsi;
4070 	int insn_cnt = env->prog->len;
4071 	int i, j, err;
4072 
4073 	err = bpf_prog_calc_tag(env->prog);
4074 	if (err)
4075 		return err;
4076 
4077 	for (i = 0; i < insn_cnt; i++, insn++) {
4078 		if (BPF_CLASS(insn->code) == BPF_LDX &&
4079 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
4080 			verbose(env, "BPF_LDX uses reserved fields\n");
4081 			return -EINVAL;
4082 		}
4083 
4084 		if (BPF_CLASS(insn->code) == BPF_STX &&
4085 		    ((BPF_MODE(insn->code) != BPF_MEM &&
4086 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
4087 			verbose(env, "BPF_STX uses reserved fields\n");
4088 			return -EINVAL;
4089 		}
4090 
4091 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
4092 			struct bpf_map *map;
4093 			struct fd f;
4094 
4095 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
4096 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
4097 			    insn[1].off != 0) {
4098 				verbose(env, "invalid bpf_ld_imm64 insn\n");
4099 				return -EINVAL;
4100 			}
4101 
4102 			if (insn->src_reg == 0)
4103 				/* valid generic load 64-bit imm */
4104 				goto next_insn;
4105 
4106 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
4107 				verbose(env,
4108 					"unrecognized bpf_ld_imm64 insn\n");
4109 				return -EINVAL;
4110 			}
4111 
4112 			f = fdget(insn->imm);
4113 			map = __bpf_map_get(f);
4114 			if (IS_ERR(map)) {
4115 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
4116 					insn->imm);
4117 				return PTR_ERR(map);
4118 			}
4119 
4120 			err = check_map_prog_compatibility(env, map, env->prog);
4121 			if (err) {
4122 				fdput(f);
4123 				return err;
4124 			}
4125 
4126 			/* store map pointer inside BPF_LD_IMM64 instruction */
4127 			insn[0].imm = (u32) (unsigned long) map;
4128 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
4129 
4130 			/* check whether we recorded this map already */
4131 			for (j = 0; j < env->used_map_cnt; j++)
4132 				if (env->used_maps[j] == map) {
4133 					fdput(f);
4134 					goto next_insn;
4135 				}
4136 
4137 			if (env->used_map_cnt >= MAX_USED_MAPS) {
4138 				fdput(f);
4139 				return -E2BIG;
4140 			}
4141 
4142 			/* hold the map. If the program is rejected by verifier,
4143 			 * the map will be released by release_maps() or it
4144 			 * will be used by the valid program until it's unloaded
4145 			 * and all maps are released in free_bpf_prog_info()
4146 			 */
4147 			map = bpf_map_inc(map, false);
4148 			if (IS_ERR(map)) {
4149 				fdput(f);
4150 				return PTR_ERR(map);
4151 			}
4152 			env->used_maps[env->used_map_cnt++] = map;
4153 
4154 			fdput(f);
4155 next_insn:
4156 			insn++;
4157 			i++;
4158 		}
4159 	}
4160 
4161 	/* now all pseudo BPF_LD_IMM64 instructions load valid
4162 	 * 'struct bpf_map *' into a register instead of user map_fd.
4163 	 * These pointers will be used later by verifier to validate map access.
4164 	 */
4165 	return 0;
4166 }
4167 
4168 /* drop refcnt of maps used by the rejected program */
4169 static void release_maps(struct bpf_verifier_env *env)
4170 {
4171 	int i;
4172 
4173 	for (i = 0; i < env->used_map_cnt; i++)
4174 		bpf_map_put(env->used_maps[i]);
4175 }
4176 
4177 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
4178 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
4179 {
4180 	struct bpf_insn *insn = env->prog->insnsi;
4181 	int insn_cnt = env->prog->len;
4182 	int i;
4183 
4184 	for (i = 0; i < insn_cnt; i++, insn++)
4185 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
4186 			insn->src_reg = 0;
4187 }
4188 
4189 /* single env->prog->insni[off] instruction was replaced with the range
4190  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
4191  * [0, off) and [off, end) to new locations, so the patched range stays zero
4192  */
4193 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4194 				u32 off, u32 cnt)
4195 {
4196 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4197 
4198 	if (cnt == 1)
4199 		return 0;
4200 	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
4201 	if (!new_data)
4202 		return -ENOMEM;
4203 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4204 	memcpy(new_data + off + cnt - 1, old_data + off,
4205 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4206 	env->insn_aux_data = new_data;
4207 	vfree(old_data);
4208 	return 0;
4209 }
4210 
4211 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
4212 					    const struct bpf_insn *patch, u32 len)
4213 {
4214 	struct bpf_prog *new_prog;
4215 
4216 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4217 	if (!new_prog)
4218 		return NULL;
4219 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
4220 		return NULL;
4221 	return new_prog;
4222 }
4223 
4224 /* convert load instructions that access fields of 'struct __sk_buff'
4225  * into sequence of instructions that access fields of 'struct sk_buff'
4226  */
4227 static int convert_ctx_accesses(struct bpf_verifier_env *env)
4228 {
4229 	const struct bpf_verifier_ops *ops = env->ops;
4230 	int i, cnt, size, ctx_field_size, delta = 0;
4231 	const int insn_cnt = env->prog->len;
4232 	struct bpf_insn insn_buf[16], *insn;
4233 	struct bpf_prog *new_prog;
4234 	enum bpf_access_type type;
4235 	bool is_narrower_load;
4236 	u32 target_size;
4237 
4238 	if (ops->gen_prologue) {
4239 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
4240 					env->prog);
4241 		if (cnt >= ARRAY_SIZE(insn_buf)) {
4242 			verbose(env, "bpf verifier is misconfigured\n");
4243 			return -EINVAL;
4244 		} else if (cnt) {
4245 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
4246 			if (!new_prog)
4247 				return -ENOMEM;
4248 
4249 			env->prog = new_prog;
4250 			delta += cnt - 1;
4251 		}
4252 	}
4253 
4254 	if (!ops->convert_ctx_access)
4255 		return 0;
4256 
4257 	insn = env->prog->insnsi + delta;
4258 
4259 	for (i = 0; i < insn_cnt; i++, insn++) {
4260 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4261 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4262 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4263 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4264 			type = BPF_READ;
4265 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4266 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4267 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4268 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4269 			type = BPF_WRITE;
4270 		else
4271 			continue;
4272 
4273 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4274 			continue;
4275 
4276 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
4277 		size = BPF_LDST_BYTES(insn);
4278 
4279 		/* If the read access is a narrower load of the field,
4280 		 * convert to a 4/8-byte load, to minimum program type specific
4281 		 * convert_ctx_access changes. If conversion is successful,
4282 		 * we will apply proper mask to the result.
4283 		 */
4284 		is_narrower_load = size < ctx_field_size;
4285 		if (is_narrower_load) {
4286 			u32 off = insn->off;
4287 			u8 size_code;
4288 
4289 			if (type == BPF_WRITE) {
4290 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
4291 				return -EINVAL;
4292 			}
4293 
4294 			size_code = BPF_H;
4295 			if (ctx_field_size == 4)
4296 				size_code = BPF_W;
4297 			else if (ctx_field_size == 8)
4298 				size_code = BPF_DW;
4299 
4300 			insn->off = off & ~(ctx_field_size - 1);
4301 			insn->code = BPF_LDX | BPF_MEM | size_code;
4302 		}
4303 
4304 		target_size = 0;
4305 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
4306 					      &target_size);
4307 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
4308 		    (ctx_field_size && !target_size)) {
4309 			verbose(env, "bpf verifier is misconfigured\n");
4310 			return -EINVAL;
4311 		}
4312 
4313 		if (is_narrower_load && size < target_size) {
4314 			if (ctx_field_size <= 4)
4315 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
4316 								(1 << size * 8) - 1);
4317 			else
4318 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
4319 								(1 << size * 8) - 1);
4320 		}
4321 
4322 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4323 		if (!new_prog)
4324 			return -ENOMEM;
4325 
4326 		delta += cnt - 1;
4327 
4328 		/* keep walking new program and skip insns we just inserted */
4329 		env->prog = new_prog;
4330 		insn      = new_prog->insnsi + i + delta;
4331 	}
4332 
4333 	return 0;
4334 }
4335 
4336 /* fixup insn->imm field of bpf_call instructions
4337  * and inline eligible helpers as explicit sequence of BPF instructions
4338  *
4339  * this function is called after eBPF program passed verification
4340  */
4341 static int fixup_bpf_calls(struct bpf_verifier_env *env)
4342 {
4343 	struct bpf_prog *prog = env->prog;
4344 	struct bpf_insn *insn = prog->insnsi;
4345 	const struct bpf_func_proto *fn;
4346 	const int insn_cnt = prog->len;
4347 	struct bpf_insn insn_buf[16];
4348 	struct bpf_prog *new_prog;
4349 	struct bpf_map *map_ptr;
4350 	int i, cnt, delta = 0;
4351 
4352 	for (i = 0; i < insn_cnt; i++, insn++) {
4353 		if (insn->code != (BPF_JMP | BPF_CALL))
4354 			continue;
4355 
4356 		if (insn->imm == BPF_FUNC_get_route_realm)
4357 			prog->dst_needed = 1;
4358 		if (insn->imm == BPF_FUNC_get_prandom_u32)
4359 			bpf_user_rnd_init_once();
4360 		if (insn->imm == BPF_FUNC_tail_call) {
4361 			/* If we tail call into other programs, we
4362 			 * cannot make any assumptions since they can
4363 			 * be replaced dynamically during runtime in
4364 			 * the program array.
4365 			 */
4366 			prog->cb_access = 1;
4367 			env->prog->aux->stack_depth = MAX_BPF_STACK;
4368 
4369 			/* mark bpf_tail_call as different opcode to avoid
4370 			 * conditional branch in the interpeter for every normal
4371 			 * call and to prevent accidental JITing by JIT compiler
4372 			 * that doesn't support bpf_tail_call yet
4373 			 */
4374 			insn->imm = 0;
4375 			insn->code = BPF_JMP | BPF_TAIL_CALL;
4376 			continue;
4377 		}
4378 
4379 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4380 		 * handlers are currently limited to 64 bit only.
4381 		 */
4382 		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
4383 		    insn->imm == BPF_FUNC_map_lookup_elem) {
4384 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
4385 			if (map_ptr == BPF_MAP_PTR_POISON ||
4386 			    !map_ptr->ops->map_gen_lookup)
4387 				goto patch_call_imm;
4388 
4389 			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
4390 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
4391 				verbose(env, "bpf verifier is misconfigured\n");
4392 				return -EINVAL;
4393 			}
4394 
4395 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
4396 						       cnt);
4397 			if (!new_prog)
4398 				return -ENOMEM;
4399 
4400 			delta += cnt - 1;
4401 
4402 			/* keep walking new program and skip insns we just inserted */
4403 			env->prog = prog = new_prog;
4404 			insn      = new_prog->insnsi + i + delta;
4405 			continue;
4406 		}
4407 
4408 		if (insn->imm == BPF_FUNC_redirect_map) {
4409 			/* Note, we cannot use prog directly as imm as subsequent
4410 			 * rewrites would still change the prog pointer. The only
4411 			 * stable address we can use is aux, which also works with
4412 			 * prog clones during blinding.
4413 			 */
4414 			u64 addr = (unsigned long)prog->aux;
4415 			struct bpf_insn r4_ld[] = {
4416 				BPF_LD_IMM64(BPF_REG_4, addr),
4417 				*insn,
4418 			};
4419 			cnt = ARRAY_SIZE(r4_ld);
4420 
4421 			new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
4422 			if (!new_prog)
4423 				return -ENOMEM;
4424 
4425 			delta    += cnt - 1;
4426 			env->prog = prog = new_prog;
4427 			insn      = new_prog->insnsi + i + delta;
4428 		}
4429 patch_call_imm:
4430 		fn = env->ops->get_func_proto(insn->imm);
4431 		/* all functions that have prototype and verifier allowed
4432 		 * programs to call them, must be real in-kernel functions
4433 		 */
4434 		if (!fn->func) {
4435 			verbose(env,
4436 				"kernel subsystem misconfigured func %s#%d\n",
4437 				func_id_name(insn->imm), insn->imm);
4438 			return -EFAULT;
4439 		}
4440 		insn->imm = fn->func - __bpf_call_base;
4441 	}
4442 
4443 	return 0;
4444 }
4445 
4446 static void free_states(struct bpf_verifier_env *env)
4447 {
4448 	struct bpf_verifier_state_list *sl, *sln;
4449 	int i;
4450 
4451 	if (!env->explored_states)
4452 		return;
4453 
4454 	for (i = 0; i < env->prog->len; i++) {
4455 		sl = env->explored_states[i];
4456 
4457 		if (sl)
4458 			while (sl != STATE_LIST_MARK) {
4459 				sln = sl->next;
4460 				free_verifier_state(&sl->state, false);
4461 				kfree(sl);
4462 				sl = sln;
4463 			}
4464 	}
4465 
4466 	kfree(env->explored_states);
4467 }
4468 
4469 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4470 {
4471 	struct bpf_verifier_env *env;
4472 	struct bpf_verifer_log *log;
4473 	int ret = -EINVAL;
4474 
4475 	/* no program is valid */
4476 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
4477 		return -EINVAL;
4478 
4479 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
4480 	 * allocate/free it every time bpf_check() is called
4481 	 */
4482 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4483 	if (!env)
4484 		return -ENOMEM;
4485 	log = &env->log;
4486 
4487 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4488 				     (*prog)->len);
4489 	ret = -ENOMEM;
4490 	if (!env->insn_aux_data)
4491 		goto err_free_env;
4492 	env->prog = *prog;
4493 	env->ops = bpf_verifier_ops[env->prog->type];
4494 
4495 	/* grab the mutex to protect few globals used by verifier */
4496 	mutex_lock(&bpf_verifier_lock);
4497 
4498 	if (attr->log_level || attr->log_buf || attr->log_size) {
4499 		/* user requested verbose verifier output
4500 		 * and supplied buffer to store the verification trace
4501 		 */
4502 		log->level = attr->log_level;
4503 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
4504 		log->len_total = attr->log_size;
4505 
4506 		ret = -EINVAL;
4507 		/* log attributes have to be sane */
4508 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
4509 		    !log->level || !log->ubuf)
4510 			goto err_unlock;
4511 	}
4512 
4513 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
4514 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4515 		env->strict_alignment = true;
4516 
4517 	if (env->prog->aux->offload) {
4518 		ret = bpf_prog_offload_verifier_prep(env);
4519 		if (ret)
4520 			goto err_unlock;
4521 	}
4522 
4523 	ret = replace_map_fd_with_map_ptr(env);
4524 	if (ret < 0)
4525 		goto skip_full_check;
4526 
4527 	env->explored_states = kcalloc(env->prog->len,
4528 				       sizeof(struct bpf_verifier_state_list *),
4529 				       GFP_USER);
4530 	ret = -ENOMEM;
4531 	if (!env->explored_states)
4532 		goto skip_full_check;
4533 
4534 	ret = check_cfg(env);
4535 	if (ret < 0)
4536 		goto skip_full_check;
4537 
4538 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4539 
4540 	ret = do_check(env);
4541 	if (env->cur_state) {
4542 		free_verifier_state(env->cur_state, true);
4543 		env->cur_state = NULL;
4544 	}
4545 
4546 skip_full_check:
4547 	while (!pop_stack(env, NULL, NULL));
4548 	free_states(env);
4549 
4550 	if (ret == 0)
4551 		/* program is valid, convert *(u32*)(ctx + off) accesses */
4552 		ret = convert_ctx_accesses(env);
4553 
4554 	if (ret == 0)
4555 		ret = fixup_bpf_calls(env);
4556 
4557 	if (log->level && bpf_verifier_log_full(log))
4558 		ret = -ENOSPC;
4559 	if (log->level && !log->ubuf) {
4560 		ret = -EFAULT;
4561 		goto err_release_maps;
4562 	}
4563 
4564 	if (ret == 0 && env->used_map_cnt) {
4565 		/* if program passed verifier, update used_maps in bpf_prog_info */
4566 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
4567 							  sizeof(env->used_maps[0]),
4568 							  GFP_KERNEL);
4569 
4570 		if (!env->prog->aux->used_maps) {
4571 			ret = -ENOMEM;
4572 			goto err_release_maps;
4573 		}
4574 
4575 		memcpy(env->prog->aux->used_maps, env->used_maps,
4576 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
4577 		env->prog->aux->used_map_cnt = env->used_map_cnt;
4578 
4579 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
4580 		 * bpf_ld_imm64 instructions
4581 		 */
4582 		convert_pseudo_ld_imm64(env);
4583 	}
4584 
4585 err_release_maps:
4586 	if (!env->prog->aux->used_maps)
4587 		/* if we didn't copy map pointers into bpf_prog_info, release
4588 		 * them now. Otherwise free_bpf_prog_info() will release them.
4589 		 */
4590 		release_maps(env);
4591 	*prog = env->prog;
4592 err_unlock:
4593 	mutex_unlock(&bpf_verifier_lock);
4594 	vfree(env->insn_aux_data);
4595 err_free_env:
4596 	kfree(env);
4597 	return ret;
4598 }
4599