xref: /openbmc/linux/kernel/bpf/verifier.c (revision 8b030a57)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  */
14 #include <uapi/linux/btf.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/bpf.h>
19 #include <linux/btf.h>
20 #include <linux/bpf_verifier.h>
21 #include <linux/filter.h>
22 #include <net/netlink.h>
23 #include <linux/file.h>
24 #include <linux/vmalloc.h>
25 #include <linux/stringify.h>
26 #include <linux/bsearch.h>
27 #include <linux/sort.h>
28 #include <linux/perf_event.h>
29 #include <linux/ctype.h>
30 
31 #include "disasm.h"
32 
33 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
34 #define BPF_PROG_TYPE(_id, _name) \
35 	[_id] = & _name ## _verifier_ops,
36 #define BPF_MAP_TYPE(_id, _ops)
37 #include <linux/bpf_types.h>
38 #undef BPF_PROG_TYPE
39 #undef BPF_MAP_TYPE
40 };
41 
42 /* bpf_check() is a static code analyzer that walks eBPF program
43  * instruction by instruction and updates register/stack state.
44  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
45  *
46  * The first pass is depth-first-search to check that the program is a DAG.
47  * It rejects the following programs:
48  * - larger than BPF_MAXINSNS insns
49  * - if loop is present (detected via back-edge)
50  * - unreachable insns exist (shouldn't be a forest. program = one function)
51  * - out of bounds or malformed jumps
52  * The second pass is all possible path descent from the 1st insn.
53  * Since it's analyzing all pathes through the program, the length of the
54  * analysis is limited to 64k insn, which may be hit even if total number of
55  * insn is less then 4K, but there are too many branches that change stack/regs.
56  * Number of 'branches to be analyzed' is limited to 1k
57  *
58  * On entry to each instruction, each register has a type, and the instruction
59  * changes the types of the registers depending on instruction semantics.
60  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
61  * copied to R1.
62  *
63  * All registers are 64-bit.
64  * R0 - return register
65  * R1-R5 argument passing registers
66  * R6-R9 callee saved registers
67  * R10 - frame pointer read-only
68  *
69  * At the start of BPF program the register R1 contains a pointer to bpf_context
70  * and has type PTR_TO_CTX.
71  *
72  * Verifier tracks arithmetic operations on pointers in case:
73  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
74  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
75  * 1st insn copies R10 (which has FRAME_PTR) type into R1
76  * and 2nd arithmetic instruction is pattern matched to recognize
77  * that it wants to construct a pointer to some element within stack.
78  * So after 2nd insn, the register R1 has type PTR_TO_STACK
79  * (and -20 constant is saved for further stack bounds checking).
80  * Meaning that this reg is a pointer to stack plus known immediate constant.
81  *
82  * Most of the time the registers have SCALAR_VALUE type, which
83  * means the register has some value, but it's not a valid pointer.
84  * (like pointer plus pointer becomes SCALAR_VALUE type)
85  *
86  * When verifier sees load or store instructions the type of base register
87  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
88  * four pointer types recognized by check_mem_access() function.
89  *
90  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
91  * and the range of [ptr, ptr + map's value_size) is accessible.
92  *
93  * registers used to pass values to function calls are checked against
94  * function argument constraints.
95  *
96  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
97  * It means that the register type passed to this function must be
98  * PTR_TO_STACK and it will be used inside the function as
99  * 'pointer to map element key'
100  *
101  * For example the argument constraints for bpf_map_lookup_elem():
102  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
103  *   .arg1_type = ARG_CONST_MAP_PTR,
104  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
105  *
106  * ret_type says that this function returns 'pointer to map elem value or null'
107  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
108  * 2nd argument should be a pointer to stack, which will be used inside
109  * the helper function as a pointer to map element key.
110  *
111  * On the kernel side the helper function looks like:
112  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
113  * {
114  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
115  *    void *key = (void *) (unsigned long) r2;
116  *    void *value;
117  *
118  *    here kernel can access 'key' and 'map' pointers safely, knowing that
119  *    [key, key + map->key_size) bytes are valid and were initialized on
120  *    the stack of eBPF program.
121  * }
122  *
123  * Corresponding eBPF program may look like:
124  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
125  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
126  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
127  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
128  * here verifier looks at prototype of map_lookup_elem() and sees:
129  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
130  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
131  *
132  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
133  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
134  * and were initialized prior to this call.
135  * If it's ok, then verifier allows this BPF_CALL insn and looks at
136  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
137  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
138  * returns ether pointer to map value or NULL.
139  *
140  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
141  * insn, the register holding that pointer in the true branch changes state to
142  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
143  * branch. See check_cond_jmp_op().
144  *
145  * After the call R0 is set to return type of the function and registers R1-R5
146  * are set to NOT_INIT to indicate that they are no longer readable.
147  *
148  * The following reference types represent a potential reference to a kernel
149  * resource which, after first being allocated, must be checked and freed by
150  * the BPF program:
151  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
152  *
153  * When the verifier sees a helper call return a reference type, it allocates a
154  * pointer id for the reference and stores it in the current function state.
155  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
156  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
157  * passes through a NULL-check conditional. For the branch wherein the state is
158  * changed to CONST_IMM, the verifier releases the reference.
159  *
160  * For each helper function that allocates a reference, such as
161  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
162  * bpf_sk_release(). When a reference type passes into the release function,
163  * the verifier also releases the reference. If any unchecked or unreleased
164  * reference remains at the end of the program, the verifier rejects it.
165  */
166 
167 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
168 struct bpf_verifier_stack_elem {
169 	/* verifer state is 'st'
170 	 * before processing instruction 'insn_idx'
171 	 * and after processing instruction 'prev_insn_idx'
172 	 */
173 	struct bpf_verifier_state st;
174 	int insn_idx;
175 	int prev_insn_idx;
176 	struct bpf_verifier_stack_elem *next;
177 };
178 
179 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
180 #define BPF_COMPLEXITY_LIMIT_STACK	1024
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_PTR_UNPRIV	1UL
184 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
185 					  POISON_POINTER_DELTA))
186 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
187 
188 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
189 {
190 	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
191 }
192 
193 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
194 {
195 	return aux->map_state & BPF_MAP_PTR_UNPRIV;
196 }
197 
198 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
199 			      const struct bpf_map *map, bool unpriv)
200 {
201 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
202 	unpriv |= bpf_map_ptr_unpriv(aux);
203 	aux->map_state = (unsigned long)map |
204 			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
205 }
206 
207 struct bpf_call_arg_meta {
208 	struct bpf_map *map_ptr;
209 	bool raw_mode;
210 	bool pkt_access;
211 	int regno;
212 	int access_size;
213 	s64 msize_smax_value;
214 	u64 msize_umax_value;
215 	int ptr_id;
216 };
217 
218 static DEFINE_MUTEX(bpf_verifier_lock);
219 
220 static const struct bpf_line_info *
221 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
222 {
223 	const struct bpf_line_info *linfo;
224 	const struct bpf_prog *prog;
225 	u32 i, nr_linfo;
226 
227 	prog = env->prog;
228 	nr_linfo = prog->aux->nr_linfo;
229 
230 	if (!nr_linfo || insn_off >= prog->len)
231 		return NULL;
232 
233 	linfo = prog->aux->linfo;
234 	for (i = 1; i < nr_linfo; i++)
235 		if (insn_off < linfo[i].insn_off)
236 			break;
237 
238 	return &linfo[i - 1];
239 }
240 
241 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
242 		       va_list args)
243 {
244 	unsigned int n;
245 
246 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
247 
248 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
249 		  "verifier log line truncated - local buffer too short\n");
250 
251 	n = min(log->len_total - log->len_used - 1, n);
252 	log->kbuf[n] = '\0';
253 
254 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
255 		log->len_used += n;
256 	else
257 		log->ubuf = NULL;
258 }
259 
260 /* log_level controls verbosity level of eBPF verifier.
261  * bpf_verifier_log_write() is used to dump the verification trace to the log,
262  * so the user can figure out what's wrong with the program
263  */
264 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
265 					   const char *fmt, ...)
266 {
267 	va_list args;
268 
269 	if (!bpf_verifier_log_needed(&env->log))
270 		return;
271 
272 	va_start(args, fmt);
273 	bpf_verifier_vlog(&env->log, fmt, args);
274 	va_end(args);
275 }
276 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
277 
278 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
279 {
280 	struct bpf_verifier_env *env = private_data;
281 	va_list args;
282 
283 	if (!bpf_verifier_log_needed(&env->log))
284 		return;
285 
286 	va_start(args, fmt);
287 	bpf_verifier_vlog(&env->log, fmt, args);
288 	va_end(args);
289 }
290 
291 static const char *ltrim(const char *s)
292 {
293 	while (isspace(*s))
294 		s++;
295 
296 	return s;
297 }
298 
299 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
300 					 u32 insn_off,
301 					 const char *prefix_fmt, ...)
302 {
303 	const struct bpf_line_info *linfo;
304 
305 	if (!bpf_verifier_log_needed(&env->log))
306 		return;
307 
308 	linfo = find_linfo(env, insn_off);
309 	if (!linfo || linfo == env->prev_linfo)
310 		return;
311 
312 	if (prefix_fmt) {
313 		va_list args;
314 
315 		va_start(args, prefix_fmt);
316 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
317 		va_end(args);
318 	}
319 
320 	verbose(env, "%s\n",
321 		ltrim(btf_name_by_offset(env->prog->aux->btf,
322 					 linfo->line_off)));
323 
324 	env->prev_linfo = linfo;
325 }
326 
327 static bool type_is_pkt_pointer(enum bpf_reg_type type)
328 {
329 	return type == PTR_TO_PACKET ||
330 	       type == PTR_TO_PACKET_META;
331 }
332 
333 static bool reg_type_may_be_null(enum bpf_reg_type type)
334 {
335 	return type == PTR_TO_MAP_VALUE_OR_NULL ||
336 	       type == PTR_TO_SOCKET_OR_NULL;
337 }
338 
339 static bool type_is_refcounted(enum bpf_reg_type type)
340 {
341 	return type == PTR_TO_SOCKET;
342 }
343 
344 static bool type_is_refcounted_or_null(enum bpf_reg_type type)
345 {
346 	return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
347 }
348 
349 static bool reg_is_refcounted(const struct bpf_reg_state *reg)
350 {
351 	return type_is_refcounted(reg->type);
352 }
353 
354 static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
355 {
356 	return type_is_refcounted_or_null(reg->type);
357 }
358 
359 static bool arg_type_is_refcounted(enum bpf_arg_type type)
360 {
361 	return type == ARG_PTR_TO_SOCKET;
362 }
363 
364 /* Determine whether the function releases some resources allocated by another
365  * function call. The first reference type argument will be assumed to be
366  * released by release_reference().
367  */
368 static bool is_release_function(enum bpf_func_id func_id)
369 {
370 	return func_id == BPF_FUNC_sk_release;
371 }
372 
373 /* string representation of 'enum bpf_reg_type' */
374 static const char * const reg_type_str[] = {
375 	[NOT_INIT]		= "?",
376 	[SCALAR_VALUE]		= "inv",
377 	[PTR_TO_CTX]		= "ctx",
378 	[CONST_PTR_TO_MAP]	= "map_ptr",
379 	[PTR_TO_MAP_VALUE]	= "map_value",
380 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
381 	[PTR_TO_STACK]		= "fp",
382 	[PTR_TO_PACKET]		= "pkt",
383 	[PTR_TO_PACKET_META]	= "pkt_meta",
384 	[PTR_TO_PACKET_END]	= "pkt_end",
385 	[PTR_TO_FLOW_KEYS]	= "flow_keys",
386 	[PTR_TO_SOCKET]		= "sock",
387 	[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
388 };
389 
390 static char slot_type_char[] = {
391 	[STACK_INVALID]	= '?',
392 	[STACK_SPILL]	= 'r',
393 	[STACK_MISC]	= 'm',
394 	[STACK_ZERO]	= '0',
395 };
396 
397 static void print_liveness(struct bpf_verifier_env *env,
398 			   enum bpf_reg_liveness live)
399 {
400 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
401 	    verbose(env, "_");
402 	if (live & REG_LIVE_READ)
403 		verbose(env, "r");
404 	if (live & REG_LIVE_WRITTEN)
405 		verbose(env, "w");
406 	if (live & REG_LIVE_DONE)
407 		verbose(env, "D");
408 }
409 
410 static struct bpf_func_state *func(struct bpf_verifier_env *env,
411 				   const struct bpf_reg_state *reg)
412 {
413 	struct bpf_verifier_state *cur = env->cur_state;
414 
415 	return cur->frame[reg->frameno];
416 }
417 
418 static void print_verifier_state(struct bpf_verifier_env *env,
419 				 const struct bpf_func_state *state)
420 {
421 	const struct bpf_reg_state *reg;
422 	enum bpf_reg_type t;
423 	int i;
424 
425 	if (state->frameno)
426 		verbose(env, " frame%d:", state->frameno);
427 	for (i = 0; i < MAX_BPF_REG; i++) {
428 		reg = &state->regs[i];
429 		t = reg->type;
430 		if (t == NOT_INIT)
431 			continue;
432 		verbose(env, " R%d", i);
433 		print_liveness(env, reg->live);
434 		verbose(env, "=%s", reg_type_str[t]);
435 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
436 		    tnum_is_const(reg->var_off)) {
437 			/* reg->off should be 0 for SCALAR_VALUE */
438 			verbose(env, "%lld", reg->var_off.value + reg->off);
439 			if (t == PTR_TO_STACK)
440 				verbose(env, ",call_%d", func(env, reg)->callsite);
441 		} else {
442 			verbose(env, "(id=%d", reg->id);
443 			if (t != SCALAR_VALUE)
444 				verbose(env, ",off=%d", reg->off);
445 			if (type_is_pkt_pointer(t))
446 				verbose(env, ",r=%d", reg->range);
447 			else if (t == CONST_PTR_TO_MAP ||
448 				 t == PTR_TO_MAP_VALUE ||
449 				 t == PTR_TO_MAP_VALUE_OR_NULL)
450 				verbose(env, ",ks=%d,vs=%d",
451 					reg->map_ptr->key_size,
452 					reg->map_ptr->value_size);
453 			if (tnum_is_const(reg->var_off)) {
454 				/* Typically an immediate SCALAR_VALUE, but
455 				 * could be a pointer whose offset is too big
456 				 * for reg->off
457 				 */
458 				verbose(env, ",imm=%llx", reg->var_off.value);
459 			} else {
460 				if (reg->smin_value != reg->umin_value &&
461 				    reg->smin_value != S64_MIN)
462 					verbose(env, ",smin_value=%lld",
463 						(long long)reg->smin_value);
464 				if (reg->smax_value != reg->umax_value &&
465 				    reg->smax_value != S64_MAX)
466 					verbose(env, ",smax_value=%lld",
467 						(long long)reg->smax_value);
468 				if (reg->umin_value != 0)
469 					verbose(env, ",umin_value=%llu",
470 						(unsigned long long)reg->umin_value);
471 				if (reg->umax_value != U64_MAX)
472 					verbose(env, ",umax_value=%llu",
473 						(unsigned long long)reg->umax_value);
474 				if (!tnum_is_unknown(reg->var_off)) {
475 					char tn_buf[48];
476 
477 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
478 					verbose(env, ",var_off=%s", tn_buf);
479 				}
480 			}
481 			verbose(env, ")");
482 		}
483 	}
484 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
485 		char types_buf[BPF_REG_SIZE + 1];
486 		bool valid = false;
487 		int j;
488 
489 		for (j = 0; j < BPF_REG_SIZE; j++) {
490 			if (state->stack[i].slot_type[j] != STACK_INVALID)
491 				valid = true;
492 			types_buf[j] = slot_type_char[
493 					state->stack[i].slot_type[j]];
494 		}
495 		types_buf[BPF_REG_SIZE] = 0;
496 		if (!valid)
497 			continue;
498 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
499 		print_liveness(env, state->stack[i].spilled_ptr.live);
500 		if (state->stack[i].slot_type[0] == STACK_SPILL)
501 			verbose(env, "=%s",
502 				reg_type_str[state->stack[i].spilled_ptr.type]);
503 		else
504 			verbose(env, "=%s", types_buf);
505 	}
506 	if (state->acquired_refs && state->refs[0].id) {
507 		verbose(env, " refs=%d", state->refs[0].id);
508 		for (i = 1; i < state->acquired_refs; i++)
509 			if (state->refs[i].id)
510 				verbose(env, ",%d", state->refs[i].id);
511 	}
512 	verbose(env, "\n");
513 }
514 
515 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE)				\
516 static int copy_##NAME##_state(struct bpf_func_state *dst,		\
517 			       const struct bpf_func_state *src)	\
518 {									\
519 	if (!src->FIELD)						\
520 		return 0;						\
521 	if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) {			\
522 		/* internal bug, make state invalid to reject the program */ \
523 		memset(dst, 0, sizeof(*dst));				\
524 		return -EFAULT;						\
525 	}								\
526 	memcpy(dst->FIELD, src->FIELD,					\
527 	       sizeof(*src->FIELD) * (src->COUNT / SIZE));		\
528 	return 0;							\
529 }
530 /* copy_reference_state() */
531 COPY_STATE_FN(reference, acquired_refs, refs, 1)
532 /* copy_stack_state() */
533 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
534 #undef COPY_STATE_FN
535 
536 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE)			\
537 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
538 				  bool copy_old)			\
539 {									\
540 	u32 old_size = state->COUNT;					\
541 	struct bpf_##NAME##_state *new_##FIELD;				\
542 	int slot = size / SIZE;						\
543 									\
544 	if (size <= old_size || !size) {				\
545 		if (copy_old)						\
546 			return 0;					\
547 		state->COUNT = slot * SIZE;				\
548 		if (!size && old_size) {				\
549 			kfree(state->FIELD);				\
550 			state->FIELD = NULL;				\
551 		}							\
552 		return 0;						\
553 	}								\
554 	new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
555 				    GFP_KERNEL);			\
556 	if (!new_##FIELD)						\
557 		return -ENOMEM;						\
558 	if (copy_old) {							\
559 		if (state->FIELD)					\
560 			memcpy(new_##FIELD, state->FIELD,		\
561 			       sizeof(*new_##FIELD) * (old_size / SIZE)); \
562 		memset(new_##FIELD + old_size / SIZE, 0,		\
563 		       sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
564 	}								\
565 	state->COUNT = slot * SIZE;					\
566 	kfree(state->FIELD);						\
567 	state->FIELD = new_##FIELD;					\
568 	return 0;							\
569 }
570 /* realloc_reference_state() */
571 REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
572 /* realloc_stack_state() */
573 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
574 #undef REALLOC_STATE_FN
575 
576 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
577  * make it consume minimal amount of memory. check_stack_write() access from
578  * the program calls into realloc_func_state() to grow the stack size.
579  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
580  * which realloc_stack_state() copies over. It points to previous
581  * bpf_verifier_state which is never reallocated.
582  */
583 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
584 			      int refs_size, bool copy_old)
585 {
586 	int err = realloc_reference_state(state, refs_size, copy_old);
587 	if (err)
588 		return err;
589 	return realloc_stack_state(state, stack_size, copy_old);
590 }
591 
592 /* Acquire a pointer id from the env and update the state->refs to include
593  * this new pointer reference.
594  * On success, returns a valid pointer id to associate with the register
595  * On failure, returns a negative errno.
596  */
597 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
598 {
599 	struct bpf_func_state *state = cur_func(env);
600 	int new_ofs = state->acquired_refs;
601 	int id, err;
602 
603 	err = realloc_reference_state(state, state->acquired_refs + 1, true);
604 	if (err)
605 		return err;
606 	id = ++env->id_gen;
607 	state->refs[new_ofs].id = id;
608 	state->refs[new_ofs].insn_idx = insn_idx;
609 
610 	return id;
611 }
612 
613 /* release function corresponding to acquire_reference_state(). Idempotent. */
614 static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
615 {
616 	int i, last_idx;
617 
618 	if (!ptr_id)
619 		return -EFAULT;
620 
621 	last_idx = state->acquired_refs - 1;
622 	for (i = 0; i < state->acquired_refs; i++) {
623 		if (state->refs[i].id == ptr_id) {
624 			if (last_idx && i != last_idx)
625 				memcpy(&state->refs[i], &state->refs[last_idx],
626 				       sizeof(*state->refs));
627 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
628 			state->acquired_refs--;
629 			return 0;
630 		}
631 	}
632 	return -EFAULT;
633 }
634 
635 /* variation on the above for cases where we expect that there must be an
636  * outstanding reference for the specified ptr_id.
637  */
638 static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
639 {
640 	struct bpf_func_state *state = cur_func(env);
641 	int err;
642 
643 	err = __release_reference_state(state, ptr_id);
644 	if (WARN_ON_ONCE(err != 0))
645 		verbose(env, "verifier internal error: can't release reference\n");
646 	return err;
647 }
648 
649 static int transfer_reference_state(struct bpf_func_state *dst,
650 				    struct bpf_func_state *src)
651 {
652 	int err = realloc_reference_state(dst, src->acquired_refs, false);
653 	if (err)
654 		return err;
655 	err = copy_reference_state(dst, src);
656 	if (err)
657 		return err;
658 	return 0;
659 }
660 
661 static void free_func_state(struct bpf_func_state *state)
662 {
663 	if (!state)
664 		return;
665 	kfree(state->refs);
666 	kfree(state->stack);
667 	kfree(state);
668 }
669 
670 static void free_verifier_state(struct bpf_verifier_state *state,
671 				bool free_self)
672 {
673 	int i;
674 
675 	for (i = 0; i <= state->curframe; i++) {
676 		free_func_state(state->frame[i]);
677 		state->frame[i] = NULL;
678 	}
679 	if (free_self)
680 		kfree(state);
681 }
682 
683 /* copy verifier state from src to dst growing dst stack space
684  * when necessary to accommodate larger src stack
685  */
686 static int copy_func_state(struct bpf_func_state *dst,
687 			   const struct bpf_func_state *src)
688 {
689 	int err;
690 
691 	err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
692 				 false);
693 	if (err)
694 		return err;
695 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
696 	err = copy_reference_state(dst, src);
697 	if (err)
698 		return err;
699 	return copy_stack_state(dst, src);
700 }
701 
702 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
703 			       const struct bpf_verifier_state *src)
704 {
705 	struct bpf_func_state *dst;
706 	int i, err;
707 
708 	/* if dst has more stack frames then src frame, free them */
709 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
710 		free_func_state(dst_state->frame[i]);
711 		dst_state->frame[i] = NULL;
712 	}
713 	dst_state->speculative = src->speculative;
714 	dst_state->curframe = src->curframe;
715 	for (i = 0; i <= src->curframe; i++) {
716 		dst = dst_state->frame[i];
717 		if (!dst) {
718 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
719 			if (!dst)
720 				return -ENOMEM;
721 			dst_state->frame[i] = dst;
722 		}
723 		err = copy_func_state(dst, src->frame[i]);
724 		if (err)
725 			return err;
726 	}
727 	return 0;
728 }
729 
730 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
731 		     int *insn_idx)
732 {
733 	struct bpf_verifier_state *cur = env->cur_state;
734 	struct bpf_verifier_stack_elem *elem, *head = env->head;
735 	int err;
736 
737 	if (env->head == NULL)
738 		return -ENOENT;
739 
740 	if (cur) {
741 		err = copy_verifier_state(cur, &head->st);
742 		if (err)
743 			return err;
744 	}
745 	if (insn_idx)
746 		*insn_idx = head->insn_idx;
747 	if (prev_insn_idx)
748 		*prev_insn_idx = head->prev_insn_idx;
749 	elem = head->next;
750 	free_verifier_state(&head->st, false);
751 	kfree(head);
752 	env->head = elem;
753 	env->stack_size--;
754 	return 0;
755 }
756 
757 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
758 					     int insn_idx, int prev_insn_idx,
759 					     bool speculative)
760 {
761 	struct bpf_verifier_state *cur = env->cur_state;
762 	struct bpf_verifier_stack_elem *elem;
763 	int err;
764 
765 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
766 	if (!elem)
767 		goto err;
768 
769 	elem->insn_idx = insn_idx;
770 	elem->prev_insn_idx = prev_insn_idx;
771 	elem->next = env->head;
772 	env->head = elem;
773 	env->stack_size++;
774 	err = copy_verifier_state(&elem->st, cur);
775 	if (err)
776 		goto err;
777 	elem->st.speculative |= speculative;
778 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
779 		verbose(env, "BPF program is too complex\n");
780 		goto err;
781 	}
782 	return &elem->st;
783 err:
784 	free_verifier_state(env->cur_state, true);
785 	env->cur_state = NULL;
786 	/* pop all elements and return */
787 	while (!pop_stack(env, NULL, NULL));
788 	return NULL;
789 }
790 
791 #define CALLER_SAVED_REGS 6
792 static const int caller_saved[CALLER_SAVED_REGS] = {
793 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
794 };
795 
796 static void __mark_reg_not_init(struct bpf_reg_state *reg);
797 
798 /* Mark the unknown part of a register (variable offset or scalar value) as
799  * known to have the value @imm.
800  */
801 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
802 {
803 	/* Clear id, off, and union(map_ptr, range) */
804 	memset(((u8 *)reg) + sizeof(reg->type), 0,
805 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
806 	reg->var_off = tnum_const(imm);
807 	reg->smin_value = (s64)imm;
808 	reg->smax_value = (s64)imm;
809 	reg->umin_value = imm;
810 	reg->umax_value = imm;
811 }
812 
813 /* Mark the 'variable offset' part of a register as zero.  This should be
814  * used only on registers holding a pointer type.
815  */
816 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
817 {
818 	__mark_reg_known(reg, 0);
819 }
820 
821 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
822 {
823 	__mark_reg_known(reg, 0);
824 	reg->type = SCALAR_VALUE;
825 }
826 
827 static void mark_reg_known_zero(struct bpf_verifier_env *env,
828 				struct bpf_reg_state *regs, u32 regno)
829 {
830 	if (WARN_ON(regno >= MAX_BPF_REG)) {
831 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
832 		/* Something bad happened, let's kill all regs */
833 		for (regno = 0; regno < MAX_BPF_REG; regno++)
834 			__mark_reg_not_init(regs + regno);
835 		return;
836 	}
837 	__mark_reg_known_zero(regs + regno);
838 }
839 
840 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
841 {
842 	return type_is_pkt_pointer(reg->type);
843 }
844 
845 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
846 {
847 	return reg_is_pkt_pointer(reg) ||
848 	       reg->type == PTR_TO_PACKET_END;
849 }
850 
851 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
852 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
853 				    enum bpf_reg_type which)
854 {
855 	/* The register can already have a range from prior markings.
856 	 * This is fine as long as it hasn't been advanced from its
857 	 * origin.
858 	 */
859 	return reg->type == which &&
860 	       reg->id == 0 &&
861 	       reg->off == 0 &&
862 	       tnum_equals_const(reg->var_off, 0);
863 }
864 
865 /* Attempts to improve min/max values based on var_off information */
866 static void __update_reg_bounds(struct bpf_reg_state *reg)
867 {
868 	/* min signed is max(sign bit) | min(other bits) */
869 	reg->smin_value = max_t(s64, reg->smin_value,
870 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
871 	/* max signed is min(sign bit) | max(other bits) */
872 	reg->smax_value = min_t(s64, reg->smax_value,
873 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
874 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
875 	reg->umax_value = min(reg->umax_value,
876 			      reg->var_off.value | reg->var_off.mask);
877 }
878 
879 /* Uses signed min/max values to inform unsigned, and vice-versa */
880 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
881 {
882 	/* Learn sign from signed bounds.
883 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
884 	 * are the same, so combine.  This works even in the negative case, e.g.
885 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
886 	 */
887 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
888 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
889 							  reg->umin_value);
890 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
891 							  reg->umax_value);
892 		return;
893 	}
894 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
895 	 * boundary, so we must be careful.
896 	 */
897 	if ((s64)reg->umax_value >= 0) {
898 		/* Positive.  We can't learn anything from the smin, but smax
899 		 * is positive, hence safe.
900 		 */
901 		reg->smin_value = reg->umin_value;
902 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
903 							  reg->umax_value);
904 	} else if ((s64)reg->umin_value < 0) {
905 		/* Negative.  We can't learn anything from the smax, but smin
906 		 * is negative, hence safe.
907 		 */
908 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
909 							  reg->umin_value);
910 		reg->smax_value = reg->umax_value;
911 	}
912 }
913 
914 /* Attempts to improve var_off based on unsigned min/max information */
915 static void __reg_bound_offset(struct bpf_reg_state *reg)
916 {
917 	reg->var_off = tnum_intersect(reg->var_off,
918 				      tnum_range(reg->umin_value,
919 						 reg->umax_value));
920 }
921 
922 /* Reset the min/max bounds of a register */
923 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
924 {
925 	reg->smin_value = S64_MIN;
926 	reg->smax_value = S64_MAX;
927 	reg->umin_value = 0;
928 	reg->umax_value = U64_MAX;
929 }
930 
931 /* Mark a register as having a completely unknown (scalar) value. */
932 static void __mark_reg_unknown(struct bpf_reg_state *reg)
933 {
934 	/*
935 	 * Clear type, id, off, and union(map_ptr, range) and
936 	 * padding between 'type' and union
937 	 */
938 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
939 	reg->type = SCALAR_VALUE;
940 	reg->var_off = tnum_unknown;
941 	reg->frameno = 0;
942 	__mark_reg_unbounded(reg);
943 }
944 
945 static void mark_reg_unknown(struct bpf_verifier_env *env,
946 			     struct bpf_reg_state *regs, u32 regno)
947 {
948 	if (WARN_ON(regno >= MAX_BPF_REG)) {
949 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
950 		/* Something bad happened, let's kill all regs except FP */
951 		for (regno = 0; regno < BPF_REG_FP; regno++)
952 			__mark_reg_not_init(regs + regno);
953 		return;
954 	}
955 	__mark_reg_unknown(regs + regno);
956 }
957 
958 static void __mark_reg_not_init(struct bpf_reg_state *reg)
959 {
960 	__mark_reg_unknown(reg);
961 	reg->type = NOT_INIT;
962 }
963 
964 static void mark_reg_not_init(struct bpf_verifier_env *env,
965 			      struct bpf_reg_state *regs, u32 regno)
966 {
967 	if (WARN_ON(regno >= MAX_BPF_REG)) {
968 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
969 		/* Something bad happened, let's kill all regs except FP */
970 		for (regno = 0; regno < BPF_REG_FP; regno++)
971 			__mark_reg_not_init(regs + regno);
972 		return;
973 	}
974 	__mark_reg_not_init(regs + regno);
975 }
976 
977 static void init_reg_state(struct bpf_verifier_env *env,
978 			   struct bpf_func_state *state)
979 {
980 	struct bpf_reg_state *regs = state->regs;
981 	int i;
982 
983 	for (i = 0; i < MAX_BPF_REG; i++) {
984 		mark_reg_not_init(env, regs, i);
985 		regs[i].live = REG_LIVE_NONE;
986 		regs[i].parent = NULL;
987 	}
988 
989 	/* frame pointer */
990 	regs[BPF_REG_FP].type = PTR_TO_STACK;
991 	mark_reg_known_zero(env, regs, BPF_REG_FP);
992 	regs[BPF_REG_FP].frameno = state->frameno;
993 
994 	/* 1st arg to a function */
995 	regs[BPF_REG_1].type = PTR_TO_CTX;
996 	mark_reg_known_zero(env, regs, BPF_REG_1);
997 }
998 
999 #define BPF_MAIN_FUNC (-1)
1000 static void init_func_state(struct bpf_verifier_env *env,
1001 			    struct bpf_func_state *state,
1002 			    int callsite, int frameno, int subprogno)
1003 {
1004 	state->callsite = callsite;
1005 	state->frameno = frameno;
1006 	state->subprogno = subprogno;
1007 	init_reg_state(env, state);
1008 }
1009 
1010 enum reg_arg_type {
1011 	SRC_OP,		/* register is used as source operand */
1012 	DST_OP,		/* register is used as destination operand */
1013 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1014 };
1015 
1016 static int cmp_subprogs(const void *a, const void *b)
1017 {
1018 	return ((struct bpf_subprog_info *)a)->start -
1019 	       ((struct bpf_subprog_info *)b)->start;
1020 }
1021 
1022 static int find_subprog(struct bpf_verifier_env *env, int off)
1023 {
1024 	struct bpf_subprog_info *p;
1025 
1026 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1027 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1028 	if (!p)
1029 		return -ENOENT;
1030 	return p - env->subprog_info;
1031 
1032 }
1033 
1034 static int add_subprog(struct bpf_verifier_env *env, int off)
1035 {
1036 	int insn_cnt = env->prog->len;
1037 	int ret;
1038 
1039 	if (off >= insn_cnt || off < 0) {
1040 		verbose(env, "call to invalid destination\n");
1041 		return -EINVAL;
1042 	}
1043 	ret = find_subprog(env, off);
1044 	if (ret >= 0)
1045 		return 0;
1046 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1047 		verbose(env, "too many subprograms\n");
1048 		return -E2BIG;
1049 	}
1050 	env->subprog_info[env->subprog_cnt++].start = off;
1051 	sort(env->subprog_info, env->subprog_cnt,
1052 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1053 	return 0;
1054 }
1055 
1056 static int check_subprogs(struct bpf_verifier_env *env)
1057 {
1058 	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
1059 	struct bpf_subprog_info *subprog = env->subprog_info;
1060 	struct bpf_insn *insn = env->prog->insnsi;
1061 	int insn_cnt = env->prog->len;
1062 
1063 	/* Add entry function. */
1064 	ret = add_subprog(env, 0);
1065 	if (ret < 0)
1066 		return ret;
1067 
1068 	/* determine subprog starts. The end is one before the next starts */
1069 	for (i = 0; i < insn_cnt; i++) {
1070 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1071 			continue;
1072 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1073 			continue;
1074 		if (!env->allow_ptr_leaks) {
1075 			verbose(env, "function calls to other bpf functions are allowed for root only\n");
1076 			return -EPERM;
1077 		}
1078 		ret = add_subprog(env, i + insn[i].imm + 1);
1079 		if (ret < 0)
1080 			return ret;
1081 	}
1082 
1083 	/* Add a fake 'exit' subprog which could simplify subprog iteration
1084 	 * logic. 'subprog_cnt' should not be increased.
1085 	 */
1086 	subprog[env->subprog_cnt].start = insn_cnt;
1087 
1088 	if (env->log.level > 1)
1089 		for (i = 0; i < env->subprog_cnt; i++)
1090 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
1091 
1092 	/* now check that all jumps are within the same subprog */
1093 	subprog_start = subprog[cur_subprog].start;
1094 	subprog_end = subprog[cur_subprog + 1].start;
1095 	for (i = 0; i < insn_cnt; i++) {
1096 		u8 code = insn[i].code;
1097 
1098 		if (BPF_CLASS(code) != BPF_JMP)
1099 			goto next;
1100 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1101 			goto next;
1102 		off = i + insn[i].off + 1;
1103 		if (off < subprog_start || off >= subprog_end) {
1104 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
1105 			return -EINVAL;
1106 		}
1107 next:
1108 		if (i == subprog_end - 1) {
1109 			/* to avoid fall-through from one subprog into another
1110 			 * the last insn of the subprog should be either exit
1111 			 * or unconditional jump back
1112 			 */
1113 			if (code != (BPF_JMP | BPF_EXIT) &&
1114 			    code != (BPF_JMP | BPF_JA)) {
1115 				verbose(env, "last insn is not an exit or jmp\n");
1116 				return -EINVAL;
1117 			}
1118 			subprog_start = subprog_end;
1119 			cur_subprog++;
1120 			if (cur_subprog < env->subprog_cnt)
1121 				subprog_end = subprog[cur_subprog + 1].start;
1122 		}
1123 	}
1124 	return 0;
1125 }
1126 
1127 /* Parentage chain of this register (or stack slot) should take care of all
1128  * issues like callee-saved registers, stack slot allocation time, etc.
1129  */
1130 static int mark_reg_read(struct bpf_verifier_env *env,
1131 			 const struct bpf_reg_state *state,
1132 			 struct bpf_reg_state *parent)
1133 {
1134 	bool writes = parent == state->parent; /* Observe write marks */
1135 
1136 	while (parent) {
1137 		/* if read wasn't screened by an earlier write ... */
1138 		if (writes && state->live & REG_LIVE_WRITTEN)
1139 			break;
1140 		if (parent->live & REG_LIVE_DONE) {
1141 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1142 				reg_type_str[parent->type],
1143 				parent->var_off.value, parent->off);
1144 			return -EFAULT;
1145 		}
1146 		/* ... then we depend on parent's value */
1147 		parent->live |= REG_LIVE_READ;
1148 		state = parent;
1149 		parent = state->parent;
1150 		writes = true;
1151 	}
1152 	return 0;
1153 }
1154 
1155 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
1156 			 enum reg_arg_type t)
1157 {
1158 	struct bpf_verifier_state *vstate = env->cur_state;
1159 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1160 	struct bpf_reg_state *regs = state->regs;
1161 
1162 	if (regno >= MAX_BPF_REG) {
1163 		verbose(env, "R%d is invalid\n", regno);
1164 		return -EINVAL;
1165 	}
1166 
1167 	if (t == SRC_OP) {
1168 		/* check whether register used as source operand can be read */
1169 		if (regs[regno].type == NOT_INIT) {
1170 			verbose(env, "R%d !read_ok\n", regno);
1171 			return -EACCES;
1172 		}
1173 		/* We don't need to worry about FP liveness because it's read-only */
1174 		if (regno != BPF_REG_FP)
1175 			return mark_reg_read(env, &regs[regno],
1176 					     regs[regno].parent);
1177 	} else {
1178 		/* check whether register used as dest operand can be written to */
1179 		if (regno == BPF_REG_FP) {
1180 			verbose(env, "frame pointer is read only\n");
1181 			return -EACCES;
1182 		}
1183 		regs[regno].live |= REG_LIVE_WRITTEN;
1184 		if (t == DST_OP)
1185 			mark_reg_unknown(env, regs, regno);
1186 	}
1187 	return 0;
1188 }
1189 
1190 static bool is_spillable_regtype(enum bpf_reg_type type)
1191 {
1192 	switch (type) {
1193 	case PTR_TO_MAP_VALUE:
1194 	case PTR_TO_MAP_VALUE_OR_NULL:
1195 	case PTR_TO_STACK:
1196 	case PTR_TO_CTX:
1197 	case PTR_TO_PACKET:
1198 	case PTR_TO_PACKET_META:
1199 	case PTR_TO_PACKET_END:
1200 	case PTR_TO_FLOW_KEYS:
1201 	case CONST_PTR_TO_MAP:
1202 	case PTR_TO_SOCKET:
1203 	case PTR_TO_SOCKET_OR_NULL:
1204 		return true;
1205 	default:
1206 		return false;
1207 	}
1208 }
1209 
1210 /* Does this register contain a constant zero? */
1211 static bool register_is_null(struct bpf_reg_state *reg)
1212 {
1213 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1214 }
1215 
1216 /* check_stack_read/write functions track spill/fill of registers,
1217  * stack boundary and alignment are checked in check_mem_access()
1218  */
1219 static int check_stack_write(struct bpf_verifier_env *env,
1220 			     struct bpf_func_state *state, /* func where register points to */
1221 			     int off, int size, int value_regno, int insn_idx)
1222 {
1223 	struct bpf_func_state *cur; /* state of the current function */
1224 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1225 	enum bpf_reg_type type;
1226 
1227 	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1228 				 state->acquired_refs, true);
1229 	if (err)
1230 		return err;
1231 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1232 	 * so it's aligned access and [off, off + size) are within stack limits
1233 	 */
1234 	if (!env->allow_ptr_leaks &&
1235 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
1236 	    size != BPF_REG_SIZE) {
1237 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
1238 		return -EACCES;
1239 	}
1240 
1241 	cur = env->cur_state->frame[env->cur_state->curframe];
1242 	if (value_regno >= 0 &&
1243 	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
1244 
1245 		/* register containing pointer is being spilled into stack */
1246 		if (size != BPF_REG_SIZE) {
1247 			verbose(env, "invalid size of register spill\n");
1248 			return -EACCES;
1249 		}
1250 
1251 		if (state != cur && type == PTR_TO_STACK) {
1252 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1253 			return -EINVAL;
1254 		}
1255 
1256 		/* save register state */
1257 		state->stack[spi].spilled_ptr = cur->regs[value_regno];
1258 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1259 
1260 		for (i = 0; i < BPF_REG_SIZE; i++) {
1261 			if (state->stack[spi].slot_type[i] == STACK_MISC &&
1262 			    !env->allow_ptr_leaks) {
1263 				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1264 				int soff = (-spi - 1) * BPF_REG_SIZE;
1265 
1266 				/* detected reuse of integer stack slot with a pointer
1267 				 * which means either llvm is reusing stack slot or
1268 				 * an attacker is trying to exploit CVE-2018-3639
1269 				 * (speculative store bypass)
1270 				 * Have to sanitize that slot with preemptive
1271 				 * store of zero.
1272 				 */
1273 				if (*poff && *poff != soff) {
1274 					/* disallow programs where single insn stores
1275 					 * into two different stack slots, since verifier
1276 					 * cannot sanitize them
1277 					 */
1278 					verbose(env,
1279 						"insn %d cannot access two stack slots fp%d and fp%d",
1280 						insn_idx, *poff, soff);
1281 					return -EINVAL;
1282 				}
1283 				*poff = soff;
1284 			}
1285 			state->stack[spi].slot_type[i] = STACK_SPILL;
1286 		}
1287 	} else {
1288 		u8 type = STACK_MISC;
1289 
1290 		/* regular write of data into stack destroys any spilled ptr */
1291 		state->stack[spi].spilled_ptr.type = NOT_INIT;
1292 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
1293 		if (state->stack[spi].slot_type[0] == STACK_SPILL)
1294 			for (i = 0; i < BPF_REG_SIZE; i++)
1295 				state->stack[spi].slot_type[i] = STACK_MISC;
1296 
1297 		/* only mark the slot as written if all 8 bytes were written
1298 		 * otherwise read propagation may incorrectly stop too soon
1299 		 * when stack slots are partially written.
1300 		 * This heuristic means that read propagation will be
1301 		 * conservative, since it will add reg_live_read marks
1302 		 * to stack slots all the way to first state when programs
1303 		 * writes+reads less than 8 bytes
1304 		 */
1305 		if (size == BPF_REG_SIZE)
1306 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1307 
1308 		/* when we zero initialize stack slots mark them as such */
1309 		if (value_regno >= 0 &&
1310 		    register_is_null(&cur->regs[value_regno]))
1311 			type = STACK_ZERO;
1312 
1313 		/* Mark slots affected by this stack write. */
1314 		for (i = 0; i < size; i++)
1315 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1316 				type;
1317 	}
1318 	return 0;
1319 }
1320 
1321 static int check_stack_read(struct bpf_verifier_env *env,
1322 			    struct bpf_func_state *reg_state /* func where register points to */,
1323 			    int off, int size, int value_regno)
1324 {
1325 	struct bpf_verifier_state *vstate = env->cur_state;
1326 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1327 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1328 	u8 *stype;
1329 
1330 	if (reg_state->allocated_stack <= slot) {
1331 		verbose(env, "invalid read from stack off %d+0 size %d\n",
1332 			off, size);
1333 		return -EACCES;
1334 	}
1335 	stype = reg_state->stack[spi].slot_type;
1336 
1337 	if (stype[0] == STACK_SPILL) {
1338 		if (size != BPF_REG_SIZE) {
1339 			verbose(env, "invalid size of register spill\n");
1340 			return -EACCES;
1341 		}
1342 		for (i = 1; i < BPF_REG_SIZE; i++) {
1343 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1344 				verbose(env, "corrupted spill memory\n");
1345 				return -EACCES;
1346 			}
1347 		}
1348 
1349 		if (value_regno >= 0) {
1350 			/* restore register state from stack */
1351 			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1352 			/* mark reg as written since spilled pointer state likely
1353 			 * has its liveness marks cleared by is_state_visited()
1354 			 * which resets stack/reg liveness for state transitions
1355 			 */
1356 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1357 		}
1358 		mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1359 			      reg_state->stack[spi].spilled_ptr.parent);
1360 		return 0;
1361 	} else {
1362 		int zeros = 0;
1363 
1364 		for (i = 0; i < size; i++) {
1365 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1366 				continue;
1367 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1368 				zeros++;
1369 				continue;
1370 			}
1371 			verbose(env, "invalid read from stack off %d+%d size %d\n",
1372 				off, i, size);
1373 			return -EACCES;
1374 		}
1375 		mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1376 			      reg_state->stack[spi].spilled_ptr.parent);
1377 		if (value_regno >= 0) {
1378 			if (zeros == size) {
1379 				/* any size read into register is zero extended,
1380 				 * so the whole register == const_zero
1381 				 */
1382 				__mark_reg_const_zero(&state->regs[value_regno]);
1383 			} else {
1384 				/* have read misc data from the stack */
1385 				mark_reg_unknown(env, state->regs, value_regno);
1386 			}
1387 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1388 		}
1389 		return 0;
1390 	}
1391 }
1392 
1393 static int check_stack_access(struct bpf_verifier_env *env,
1394 			      const struct bpf_reg_state *reg,
1395 			      int off, int size)
1396 {
1397 	/* Stack accesses must be at a fixed offset, so that we
1398 	 * can determine what type of data were returned. See
1399 	 * check_stack_read().
1400 	 */
1401 	if (!tnum_is_const(reg->var_off)) {
1402 		char tn_buf[48];
1403 
1404 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1405 		verbose(env, "variable stack access var_off=%s off=%d size=%d",
1406 			tn_buf, off, size);
1407 		return -EACCES;
1408 	}
1409 
1410 	if (off >= 0 || off < -MAX_BPF_STACK) {
1411 		verbose(env, "invalid stack off=%d size=%d\n", off, size);
1412 		return -EACCES;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 /* check read/write into map element returned by bpf_map_lookup_elem() */
1419 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1420 			      int size, bool zero_size_allowed)
1421 {
1422 	struct bpf_reg_state *regs = cur_regs(env);
1423 	struct bpf_map *map = regs[regno].map_ptr;
1424 
1425 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1426 	    off + size > map->value_size) {
1427 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1428 			map->value_size, off, size);
1429 		return -EACCES;
1430 	}
1431 	return 0;
1432 }
1433 
1434 /* check read/write into a map element with possible variable offset */
1435 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1436 			    int off, int size, bool zero_size_allowed)
1437 {
1438 	struct bpf_verifier_state *vstate = env->cur_state;
1439 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1440 	struct bpf_reg_state *reg = &state->regs[regno];
1441 	int err;
1442 
1443 	/* We may have adjusted the register to this map value, so we
1444 	 * need to try adding each of min_value and max_value to off
1445 	 * to make sure our theoretical access will be safe.
1446 	 */
1447 	if (env->log.level)
1448 		print_verifier_state(env, state);
1449 
1450 	/* The minimum value is only important with signed
1451 	 * comparisons where we can't assume the floor of a
1452 	 * value is 0.  If we are using signed variables for our
1453 	 * index'es we need to make sure that whatever we use
1454 	 * will have a set floor within our range.
1455 	 */
1456 	if (reg->smin_value < 0 &&
1457 	    (reg->smin_value == S64_MIN ||
1458 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
1459 	      reg->smin_value + off < 0)) {
1460 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1461 			regno);
1462 		return -EACCES;
1463 	}
1464 	err = __check_map_access(env, regno, reg->smin_value + off, size,
1465 				 zero_size_allowed);
1466 	if (err) {
1467 		verbose(env, "R%d min value is outside of the array range\n",
1468 			regno);
1469 		return err;
1470 	}
1471 
1472 	/* If we haven't set a max value then we need to bail since we can't be
1473 	 * sure we won't do bad things.
1474 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
1475 	 */
1476 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1477 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1478 			regno);
1479 		return -EACCES;
1480 	}
1481 	err = __check_map_access(env, regno, reg->umax_value + off, size,
1482 				 zero_size_allowed);
1483 	if (err)
1484 		verbose(env, "R%d max value is outside of the array range\n",
1485 			regno);
1486 	return err;
1487 }
1488 
1489 #define MAX_PACKET_OFF 0xffff
1490 
1491 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1492 				       const struct bpf_call_arg_meta *meta,
1493 				       enum bpf_access_type t)
1494 {
1495 	switch (env->prog->type) {
1496 	/* Program types only with direct read access go here! */
1497 	case BPF_PROG_TYPE_LWT_IN:
1498 	case BPF_PROG_TYPE_LWT_OUT:
1499 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1500 	case BPF_PROG_TYPE_SK_REUSEPORT:
1501 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1502 	case BPF_PROG_TYPE_CGROUP_SKB:
1503 		if (t == BPF_WRITE)
1504 			return false;
1505 		/* fallthrough */
1506 
1507 	/* Program types with direct read + write access go here! */
1508 	case BPF_PROG_TYPE_SCHED_CLS:
1509 	case BPF_PROG_TYPE_SCHED_ACT:
1510 	case BPF_PROG_TYPE_XDP:
1511 	case BPF_PROG_TYPE_LWT_XMIT:
1512 	case BPF_PROG_TYPE_SK_SKB:
1513 	case BPF_PROG_TYPE_SK_MSG:
1514 		if (meta)
1515 			return meta->pkt_access;
1516 
1517 		env->seen_direct_write = true;
1518 		return true;
1519 	default:
1520 		return false;
1521 	}
1522 }
1523 
1524 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1525 				 int off, int size, bool zero_size_allowed)
1526 {
1527 	struct bpf_reg_state *regs = cur_regs(env);
1528 	struct bpf_reg_state *reg = &regs[regno];
1529 
1530 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1531 	    (u64)off + size > reg->range) {
1532 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1533 			off, size, regno, reg->id, reg->off, reg->range);
1534 		return -EACCES;
1535 	}
1536 	return 0;
1537 }
1538 
1539 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1540 			       int size, bool zero_size_allowed)
1541 {
1542 	struct bpf_reg_state *regs = cur_regs(env);
1543 	struct bpf_reg_state *reg = &regs[regno];
1544 	int err;
1545 
1546 	/* We may have added a variable offset to the packet pointer; but any
1547 	 * reg->range we have comes after that.  We are only checking the fixed
1548 	 * offset.
1549 	 */
1550 
1551 	/* We don't allow negative numbers, because we aren't tracking enough
1552 	 * detail to prove they're safe.
1553 	 */
1554 	if (reg->smin_value < 0) {
1555 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1556 			regno);
1557 		return -EACCES;
1558 	}
1559 	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1560 	if (err) {
1561 		verbose(env, "R%d offset is outside of the packet\n", regno);
1562 		return err;
1563 	}
1564 
1565 	/* __check_packet_access has made sure "off + size - 1" is within u16.
1566 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
1567 	 * otherwise find_good_pkt_pointers would have refused to set range info
1568 	 * that __check_packet_access would have rejected this pkt access.
1569 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
1570 	 */
1571 	env->prog->aux->max_pkt_offset =
1572 		max_t(u32, env->prog->aux->max_pkt_offset,
1573 		      off + reg->umax_value + size - 1);
1574 
1575 	return err;
1576 }
1577 
1578 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
1579 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1580 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
1581 {
1582 	struct bpf_insn_access_aux info = {
1583 		.reg_type = *reg_type,
1584 	};
1585 
1586 	if (env->ops->is_valid_access &&
1587 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1588 		/* A non zero info.ctx_field_size indicates that this field is a
1589 		 * candidate for later verifier transformation to load the whole
1590 		 * field and then apply a mask when accessed with a narrower
1591 		 * access than actual ctx access size. A zero info.ctx_field_size
1592 		 * will only allow for whole field access and rejects any other
1593 		 * type of narrower access.
1594 		 */
1595 		*reg_type = info.reg_type;
1596 
1597 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1598 		/* remember the offset of last byte accessed in ctx */
1599 		if (env->prog->aux->max_ctx_offset < off + size)
1600 			env->prog->aux->max_ctx_offset = off + size;
1601 		return 0;
1602 	}
1603 
1604 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1605 	return -EACCES;
1606 }
1607 
1608 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
1609 				  int size)
1610 {
1611 	if (size < 0 || off < 0 ||
1612 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
1613 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
1614 			off, size);
1615 		return -EACCES;
1616 	}
1617 	return 0;
1618 }
1619 
1620 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
1621 			     u32 regno, int off, int size,
1622 			     enum bpf_access_type t)
1623 {
1624 	struct bpf_reg_state *regs = cur_regs(env);
1625 	struct bpf_reg_state *reg = &regs[regno];
1626 	struct bpf_insn_access_aux info = {};
1627 
1628 	if (reg->smin_value < 0) {
1629 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1630 			regno);
1631 		return -EACCES;
1632 	}
1633 
1634 	if (!bpf_sock_is_valid_access(off, size, t, &info)) {
1635 		verbose(env, "invalid bpf_sock access off=%d size=%d\n",
1636 			off, size);
1637 		return -EACCES;
1638 	}
1639 
1640 	env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1641 
1642 	return 0;
1643 }
1644 
1645 static bool __is_pointer_value(bool allow_ptr_leaks,
1646 			       const struct bpf_reg_state *reg)
1647 {
1648 	if (allow_ptr_leaks)
1649 		return false;
1650 
1651 	return reg->type != SCALAR_VALUE;
1652 }
1653 
1654 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
1655 {
1656 	return cur_regs(env) + regno;
1657 }
1658 
1659 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1660 {
1661 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
1662 }
1663 
1664 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1665 {
1666 	const struct bpf_reg_state *reg = reg_state(env, regno);
1667 
1668 	return reg->type == PTR_TO_CTX ||
1669 	       reg->type == PTR_TO_SOCKET;
1670 }
1671 
1672 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1673 {
1674 	const struct bpf_reg_state *reg = reg_state(env, regno);
1675 
1676 	return type_is_pkt_pointer(reg->type);
1677 }
1678 
1679 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
1680 {
1681 	const struct bpf_reg_state *reg = reg_state(env, regno);
1682 
1683 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
1684 	return reg->type == PTR_TO_FLOW_KEYS;
1685 }
1686 
1687 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1688 				   const struct bpf_reg_state *reg,
1689 				   int off, int size, bool strict)
1690 {
1691 	struct tnum reg_off;
1692 	int ip_align;
1693 
1694 	/* Byte size accesses are always allowed. */
1695 	if (!strict || size == 1)
1696 		return 0;
1697 
1698 	/* For platforms that do not have a Kconfig enabling
1699 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1700 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
1701 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1702 	 * to this code only in strict mode where we want to emulate
1703 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
1704 	 * unconditional IP align value of '2'.
1705 	 */
1706 	ip_align = 2;
1707 
1708 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1709 	if (!tnum_is_aligned(reg_off, size)) {
1710 		char tn_buf[48];
1711 
1712 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1713 		verbose(env,
1714 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1715 			ip_align, tn_buf, reg->off, off, size);
1716 		return -EACCES;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1723 				       const struct bpf_reg_state *reg,
1724 				       const char *pointer_desc,
1725 				       int off, int size, bool strict)
1726 {
1727 	struct tnum reg_off;
1728 
1729 	/* Byte size accesses are always allowed. */
1730 	if (!strict || size == 1)
1731 		return 0;
1732 
1733 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1734 	if (!tnum_is_aligned(reg_off, size)) {
1735 		char tn_buf[48];
1736 
1737 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1738 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1739 			pointer_desc, tn_buf, reg->off, off, size);
1740 		return -EACCES;
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static int check_ptr_alignment(struct bpf_verifier_env *env,
1747 			       const struct bpf_reg_state *reg, int off,
1748 			       int size, bool strict_alignment_once)
1749 {
1750 	bool strict = env->strict_alignment || strict_alignment_once;
1751 	const char *pointer_desc = "";
1752 
1753 	switch (reg->type) {
1754 	case PTR_TO_PACKET:
1755 	case PTR_TO_PACKET_META:
1756 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1757 		 * right in front, treat it the very same way.
1758 		 */
1759 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1760 	case PTR_TO_FLOW_KEYS:
1761 		pointer_desc = "flow keys ";
1762 		break;
1763 	case PTR_TO_MAP_VALUE:
1764 		pointer_desc = "value ";
1765 		break;
1766 	case PTR_TO_CTX:
1767 		pointer_desc = "context ";
1768 		break;
1769 	case PTR_TO_STACK:
1770 		pointer_desc = "stack ";
1771 		/* The stack spill tracking logic in check_stack_write()
1772 		 * and check_stack_read() relies on stack accesses being
1773 		 * aligned.
1774 		 */
1775 		strict = true;
1776 		break;
1777 	case PTR_TO_SOCKET:
1778 		pointer_desc = "sock ";
1779 		break;
1780 	default:
1781 		break;
1782 	}
1783 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1784 					   strict);
1785 }
1786 
1787 static int update_stack_depth(struct bpf_verifier_env *env,
1788 			      const struct bpf_func_state *func,
1789 			      int off)
1790 {
1791 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
1792 
1793 	if (stack >= -off)
1794 		return 0;
1795 
1796 	/* update known max for given subprogram */
1797 	env->subprog_info[func->subprogno].stack_depth = -off;
1798 	return 0;
1799 }
1800 
1801 /* starting from main bpf function walk all instructions of the function
1802  * and recursively walk all callees that given function can call.
1803  * Ignore jump and exit insns.
1804  * Since recursion is prevented by check_cfg() this algorithm
1805  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1806  */
1807 static int check_max_stack_depth(struct bpf_verifier_env *env)
1808 {
1809 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
1810 	struct bpf_subprog_info *subprog = env->subprog_info;
1811 	struct bpf_insn *insn = env->prog->insnsi;
1812 	int ret_insn[MAX_CALL_FRAMES];
1813 	int ret_prog[MAX_CALL_FRAMES];
1814 
1815 process_func:
1816 	/* round up to 32-bytes, since this is granularity
1817 	 * of interpreter stack size
1818 	 */
1819 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1820 	if (depth > MAX_BPF_STACK) {
1821 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
1822 			frame + 1, depth);
1823 		return -EACCES;
1824 	}
1825 continue_func:
1826 	subprog_end = subprog[idx + 1].start;
1827 	for (; i < subprog_end; i++) {
1828 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1829 			continue;
1830 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1831 			continue;
1832 		/* remember insn and function to return to */
1833 		ret_insn[frame] = i + 1;
1834 		ret_prog[frame] = idx;
1835 
1836 		/* find the callee */
1837 		i = i + insn[i].imm + 1;
1838 		idx = find_subprog(env, i);
1839 		if (idx < 0) {
1840 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1841 				  i);
1842 			return -EFAULT;
1843 		}
1844 		frame++;
1845 		if (frame >= MAX_CALL_FRAMES) {
1846 			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1847 			return -EFAULT;
1848 		}
1849 		goto process_func;
1850 	}
1851 	/* end of for() loop means the last insn of the 'subprog'
1852 	 * was reached. Doesn't matter whether it was JA or EXIT
1853 	 */
1854 	if (frame == 0)
1855 		return 0;
1856 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
1857 	frame--;
1858 	i = ret_insn[frame];
1859 	idx = ret_prog[frame];
1860 	goto continue_func;
1861 }
1862 
1863 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1864 static int get_callee_stack_depth(struct bpf_verifier_env *env,
1865 				  const struct bpf_insn *insn, int idx)
1866 {
1867 	int start = idx + insn->imm + 1, subprog;
1868 
1869 	subprog = find_subprog(env, start);
1870 	if (subprog < 0) {
1871 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1872 			  start);
1873 		return -EFAULT;
1874 	}
1875 	return env->subprog_info[subprog].stack_depth;
1876 }
1877 #endif
1878 
1879 static int check_ctx_reg(struct bpf_verifier_env *env,
1880 			 const struct bpf_reg_state *reg, int regno)
1881 {
1882 	/* Access to ctx or passing it to a helper is only allowed in
1883 	 * its original, unmodified form.
1884 	 */
1885 
1886 	if (reg->off) {
1887 		verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1888 			regno, reg->off);
1889 		return -EACCES;
1890 	}
1891 
1892 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1893 		char tn_buf[48];
1894 
1895 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1896 		verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
1897 		return -EACCES;
1898 	}
1899 
1900 	return 0;
1901 }
1902 
1903 /* truncate register to smaller size (in bytes)
1904  * must be called with size < BPF_REG_SIZE
1905  */
1906 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1907 {
1908 	u64 mask;
1909 
1910 	/* clear high bits in bit representation */
1911 	reg->var_off = tnum_cast(reg->var_off, size);
1912 
1913 	/* fix arithmetic bounds */
1914 	mask = ((u64)1 << (size * 8)) - 1;
1915 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1916 		reg->umin_value &= mask;
1917 		reg->umax_value &= mask;
1918 	} else {
1919 		reg->umin_value = 0;
1920 		reg->umax_value = mask;
1921 	}
1922 	reg->smin_value = reg->umin_value;
1923 	reg->smax_value = reg->umax_value;
1924 }
1925 
1926 /* check whether memory at (regno + off) is accessible for t = (read | write)
1927  * if t==write, value_regno is a register which value is stored into memory
1928  * if t==read, value_regno is a register which will receive the value from memory
1929  * if t==write && value_regno==-1, some unknown value is stored into memory
1930  * if t==read && value_regno==-1, don't care what we read from memory
1931  */
1932 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1933 			    int off, int bpf_size, enum bpf_access_type t,
1934 			    int value_regno, bool strict_alignment_once)
1935 {
1936 	struct bpf_reg_state *regs = cur_regs(env);
1937 	struct bpf_reg_state *reg = regs + regno;
1938 	struct bpf_func_state *state;
1939 	int size, err = 0;
1940 
1941 	size = bpf_size_to_bytes(bpf_size);
1942 	if (size < 0)
1943 		return size;
1944 
1945 	/* alignment checks will add in reg->off themselves */
1946 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1947 	if (err)
1948 		return err;
1949 
1950 	/* for access checks, reg->off is just part of off */
1951 	off += reg->off;
1952 
1953 	if (reg->type == PTR_TO_MAP_VALUE) {
1954 		if (t == BPF_WRITE && value_regno >= 0 &&
1955 		    is_pointer_value(env, value_regno)) {
1956 			verbose(env, "R%d leaks addr into map\n", value_regno);
1957 			return -EACCES;
1958 		}
1959 
1960 		err = check_map_access(env, regno, off, size, false);
1961 		if (!err && t == BPF_READ && value_regno >= 0)
1962 			mark_reg_unknown(env, regs, value_regno);
1963 
1964 	} else if (reg->type == PTR_TO_CTX) {
1965 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1966 
1967 		if (t == BPF_WRITE && value_regno >= 0 &&
1968 		    is_pointer_value(env, value_regno)) {
1969 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1970 			return -EACCES;
1971 		}
1972 
1973 		err = check_ctx_reg(env, reg, regno);
1974 		if (err < 0)
1975 			return err;
1976 
1977 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1978 		if (!err && t == BPF_READ && value_regno >= 0) {
1979 			/* ctx access returns either a scalar, or a
1980 			 * PTR_TO_PACKET[_META,_END]. In the latter
1981 			 * case, we know the offset is zero.
1982 			 */
1983 			if (reg_type == SCALAR_VALUE)
1984 				mark_reg_unknown(env, regs, value_regno);
1985 			else
1986 				mark_reg_known_zero(env, regs,
1987 						    value_regno);
1988 			regs[value_regno].type = reg_type;
1989 		}
1990 
1991 	} else if (reg->type == PTR_TO_STACK) {
1992 		off += reg->var_off.value;
1993 		err = check_stack_access(env, reg, off, size);
1994 		if (err)
1995 			return err;
1996 
1997 		state = func(env, reg);
1998 		err = update_stack_depth(env, state, off);
1999 		if (err)
2000 			return err;
2001 
2002 		if (t == BPF_WRITE)
2003 			err = check_stack_write(env, state, off, size,
2004 						value_regno, insn_idx);
2005 		else
2006 			err = check_stack_read(env, state, off, size,
2007 					       value_regno);
2008 	} else if (reg_is_pkt_pointer(reg)) {
2009 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
2010 			verbose(env, "cannot write into packet\n");
2011 			return -EACCES;
2012 		}
2013 		if (t == BPF_WRITE && value_regno >= 0 &&
2014 		    is_pointer_value(env, value_regno)) {
2015 			verbose(env, "R%d leaks addr into packet\n",
2016 				value_regno);
2017 			return -EACCES;
2018 		}
2019 		err = check_packet_access(env, regno, off, size, false);
2020 		if (!err && t == BPF_READ && value_regno >= 0)
2021 			mark_reg_unknown(env, regs, value_regno);
2022 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
2023 		if (t == BPF_WRITE && value_regno >= 0 &&
2024 		    is_pointer_value(env, value_regno)) {
2025 			verbose(env, "R%d leaks addr into flow keys\n",
2026 				value_regno);
2027 			return -EACCES;
2028 		}
2029 
2030 		err = check_flow_keys_access(env, off, size);
2031 		if (!err && t == BPF_READ && value_regno >= 0)
2032 			mark_reg_unknown(env, regs, value_regno);
2033 	} else if (reg->type == PTR_TO_SOCKET) {
2034 		if (t == BPF_WRITE) {
2035 			verbose(env, "cannot write into socket\n");
2036 			return -EACCES;
2037 		}
2038 		err = check_sock_access(env, insn_idx, regno, off, size, t);
2039 		if (!err && value_regno >= 0)
2040 			mark_reg_unknown(env, regs, value_regno);
2041 	} else {
2042 		verbose(env, "R%d invalid mem access '%s'\n", regno,
2043 			reg_type_str[reg->type]);
2044 		return -EACCES;
2045 	}
2046 
2047 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
2048 	    regs[value_regno].type == SCALAR_VALUE) {
2049 		/* b/h/w load zero-extends, mark upper bits as known 0 */
2050 		coerce_reg_to_size(&regs[value_regno], size);
2051 	}
2052 	return err;
2053 }
2054 
2055 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
2056 {
2057 	int err;
2058 
2059 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
2060 	    insn->imm != 0) {
2061 		verbose(env, "BPF_XADD uses reserved fields\n");
2062 		return -EINVAL;
2063 	}
2064 
2065 	/* check src1 operand */
2066 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
2067 	if (err)
2068 		return err;
2069 
2070 	/* check src2 operand */
2071 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2072 	if (err)
2073 		return err;
2074 
2075 	if (is_pointer_value(env, insn->src_reg)) {
2076 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
2077 		return -EACCES;
2078 	}
2079 
2080 	if (is_ctx_reg(env, insn->dst_reg) ||
2081 	    is_pkt_reg(env, insn->dst_reg) ||
2082 	    is_flow_key_reg(env, insn->dst_reg)) {
2083 		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2084 			insn->dst_reg,
2085 			reg_type_str[reg_state(env, insn->dst_reg)->type]);
2086 		return -EACCES;
2087 	}
2088 
2089 	/* check whether atomic_add can read the memory */
2090 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2091 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
2092 	if (err)
2093 		return err;
2094 
2095 	/* check whether atomic_add can write into the same memory */
2096 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2097 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
2098 }
2099 
2100 /* when register 'regno' is passed into function that will read 'access_size'
2101  * bytes from that pointer, make sure that it's within stack boundary
2102  * and all elements of stack are initialized.
2103  * Unlike most pointer bounds-checking functions, this one doesn't take an
2104  * 'off' argument, so it has to add in reg->off itself.
2105  */
2106 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
2107 				int access_size, bool zero_size_allowed,
2108 				struct bpf_call_arg_meta *meta)
2109 {
2110 	struct bpf_reg_state *reg = reg_state(env, regno);
2111 	struct bpf_func_state *state = func(env, reg);
2112 	int off, i, slot, spi;
2113 
2114 	if (reg->type != PTR_TO_STACK) {
2115 		/* Allow zero-byte read from NULL, regardless of pointer type */
2116 		if (zero_size_allowed && access_size == 0 &&
2117 		    register_is_null(reg))
2118 			return 0;
2119 
2120 		verbose(env, "R%d type=%s expected=%s\n", regno,
2121 			reg_type_str[reg->type],
2122 			reg_type_str[PTR_TO_STACK]);
2123 		return -EACCES;
2124 	}
2125 
2126 	/* Only allow fixed-offset stack reads */
2127 	if (!tnum_is_const(reg->var_off)) {
2128 		char tn_buf[48];
2129 
2130 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2131 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
2132 			regno, tn_buf);
2133 		return -EACCES;
2134 	}
2135 	off = reg->off + reg->var_off.value;
2136 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
2137 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
2138 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
2139 			regno, off, access_size);
2140 		return -EACCES;
2141 	}
2142 
2143 	if (meta && meta->raw_mode) {
2144 		meta->access_size = access_size;
2145 		meta->regno = regno;
2146 		return 0;
2147 	}
2148 
2149 	for (i = 0; i < access_size; i++) {
2150 		u8 *stype;
2151 
2152 		slot = -(off + i) - 1;
2153 		spi = slot / BPF_REG_SIZE;
2154 		if (state->allocated_stack <= slot)
2155 			goto err;
2156 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
2157 		if (*stype == STACK_MISC)
2158 			goto mark;
2159 		if (*stype == STACK_ZERO) {
2160 			/* helper can write anything into the stack */
2161 			*stype = STACK_MISC;
2162 			goto mark;
2163 		}
2164 err:
2165 		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
2166 			off, i, access_size);
2167 		return -EACCES;
2168 mark:
2169 		/* reading any byte out of 8-byte 'spill_slot' will cause
2170 		 * the whole slot to be marked as 'read'
2171 		 */
2172 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
2173 			      state->stack[spi].spilled_ptr.parent);
2174 	}
2175 	return update_stack_depth(env, state, off);
2176 }
2177 
2178 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
2179 				   int access_size, bool zero_size_allowed,
2180 				   struct bpf_call_arg_meta *meta)
2181 {
2182 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2183 
2184 	switch (reg->type) {
2185 	case PTR_TO_PACKET:
2186 	case PTR_TO_PACKET_META:
2187 		return check_packet_access(env, regno, reg->off, access_size,
2188 					   zero_size_allowed);
2189 	case PTR_TO_MAP_VALUE:
2190 		return check_map_access(env, regno, reg->off, access_size,
2191 					zero_size_allowed);
2192 	default: /* scalar_value|ptr_to_stack or invalid ptr */
2193 		return check_stack_boundary(env, regno, access_size,
2194 					    zero_size_allowed, meta);
2195 	}
2196 }
2197 
2198 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
2199 {
2200 	return type == ARG_PTR_TO_MEM ||
2201 	       type == ARG_PTR_TO_MEM_OR_NULL ||
2202 	       type == ARG_PTR_TO_UNINIT_MEM;
2203 }
2204 
2205 static bool arg_type_is_mem_size(enum bpf_arg_type type)
2206 {
2207 	return type == ARG_CONST_SIZE ||
2208 	       type == ARG_CONST_SIZE_OR_ZERO;
2209 }
2210 
2211 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
2212 			  enum bpf_arg_type arg_type,
2213 			  struct bpf_call_arg_meta *meta)
2214 {
2215 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
2216 	enum bpf_reg_type expected_type, type = reg->type;
2217 	int err = 0;
2218 
2219 	if (arg_type == ARG_DONTCARE)
2220 		return 0;
2221 
2222 	err = check_reg_arg(env, regno, SRC_OP);
2223 	if (err)
2224 		return err;
2225 
2226 	if (arg_type == ARG_ANYTHING) {
2227 		if (is_pointer_value(env, regno)) {
2228 			verbose(env, "R%d leaks addr into helper function\n",
2229 				regno);
2230 			return -EACCES;
2231 		}
2232 		return 0;
2233 	}
2234 
2235 	if (type_is_pkt_pointer(type) &&
2236 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
2237 		verbose(env, "helper access to the packet is not allowed\n");
2238 		return -EACCES;
2239 	}
2240 
2241 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
2242 	    arg_type == ARG_PTR_TO_MAP_VALUE ||
2243 	    arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2244 		expected_type = PTR_TO_STACK;
2245 		if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
2246 		    type != expected_type)
2247 			goto err_type;
2248 	} else if (arg_type == ARG_CONST_SIZE ||
2249 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
2250 		expected_type = SCALAR_VALUE;
2251 		if (type != expected_type)
2252 			goto err_type;
2253 	} else if (arg_type == ARG_CONST_MAP_PTR) {
2254 		expected_type = CONST_PTR_TO_MAP;
2255 		if (type != expected_type)
2256 			goto err_type;
2257 	} else if (arg_type == ARG_PTR_TO_CTX) {
2258 		expected_type = PTR_TO_CTX;
2259 		if (type != expected_type)
2260 			goto err_type;
2261 		err = check_ctx_reg(env, reg, regno);
2262 		if (err < 0)
2263 			return err;
2264 	} else if (arg_type == ARG_PTR_TO_SOCKET) {
2265 		expected_type = PTR_TO_SOCKET;
2266 		if (type != expected_type)
2267 			goto err_type;
2268 		if (meta->ptr_id || !reg->id) {
2269 			verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
2270 				meta->ptr_id, reg->id);
2271 			return -EFAULT;
2272 		}
2273 		meta->ptr_id = reg->id;
2274 	} else if (arg_type_is_mem_ptr(arg_type)) {
2275 		expected_type = PTR_TO_STACK;
2276 		/* One exception here. In case function allows for NULL to be
2277 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
2278 		 * happens during stack boundary checking.
2279 		 */
2280 		if (register_is_null(reg) &&
2281 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
2282 			/* final test in check_stack_boundary() */;
2283 		else if (!type_is_pkt_pointer(type) &&
2284 			 type != PTR_TO_MAP_VALUE &&
2285 			 type != expected_type)
2286 			goto err_type;
2287 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
2288 	} else {
2289 		verbose(env, "unsupported arg_type %d\n", arg_type);
2290 		return -EFAULT;
2291 	}
2292 
2293 	if (arg_type == ARG_CONST_MAP_PTR) {
2294 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2295 		meta->map_ptr = reg->map_ptr;
2296 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2297 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
2298 		 * check that [key, key + map->key_size) are within
2299 		 * stack limits and initialized
2300 		 */
2301 		if (!meta->map_ptr) {
2302 			/* in function declaration map_ptr must come before
2303 			 * map_key, so that it's verified and known before
2304 			 * we have to check map_key here. Otherwise it means
2305 			 * that kernel subsystem misconfigured verifier
2306 			 */
2307 			verbose(env, "invalid map_ptr to access map->key\n");
2308 			return -EACCES;
2309 		}
2310 		err = check_helper_mem_access(env, regno,
2311 					      meta->map_ptr->key_size, false,
2312 					      NULL);
2313 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
2314 		   arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
2315 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
2316 		 * check [value, value + map->value_size) validity
2317 		 */
2318 		if (!meta->map_ptr) {
2319 			/* kernel subsystem misconfigured verifier */
2320 			verbose(env, "invalid map_ptr to access map->value\n");
2321 			return -EACCES;
2322 		}
2323 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
2324 		err = check_helper_mem_access(env, regno,
2325 					      meta->map_ptr->value_size, false,
2326 					      meta);
2327 	} else if (arg_type_is_mem_size(arg_type)) {
2328 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2329 
2330 		/* remember the mem_size which may be used later
2331 		 * to refine return values.
2332 		 */
2333 		meta->msize_smax_value = reg->smax_value;
2334 		meta->msize_umax_value = reg->umax_value;
2335 
2336 		/* The register is SCALAR_VALUE; the access check
2337 		 * happens using its boundaries.
2338 		 */
2339 		if (!tnum_is_const(reg->var_off))
2340 			/* For unprivileged variable accesses, disable raw
2341 			 * mode so that the program is required to
2342 			 * initialize all the memory that the helper could
2343 			 * just partially fill up.
2344 			 */
2345 			meta = NULL;
2346 
2347 		if (reg->smin_value < 0) {
2348 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2349 				regno);
2350 			return -EACCES;
2351 		}
2352 
2353 		if (reg->umin_value == 0) {
2354 			err = check_helper_mem_access(env, regno - 1, 0,
2355 						      zero_size_allowed,
2356 						      meta);
2357 			if (err)
2358 				return err;
2359 		}
2360 
2361 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2362 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2363 				regno);
2364 			return -EACCES;
2365 		}
2366 		err = check_helper_mem_access(env, regno - 1,
2367 					      reg->umax_value,
2368 					      zero_size_allowed, meta);
2369 	}
2370 
2371 	return err;
2372 err_type:
2373 	verbose(env, "R%d type=%s expected=%s\n", regno,
2374 		reg_type_str[type], reg_type_str[expected_type]);
2375 	return -EACCES;
2376 }
2377 
2378 static int check_map_func_compatibility(struct bpf_verifier_env *env,
2379 					struct bpf_map *map, int func_id)
2380 {
2381 	if (!map)
2382 		return 0;
2383 
2384 	/* We need a two way check, first is from map perspective ... */
2385 	switch (map->map_type) {
2386 	case BPF_MAP_TYPE_PROG_ARRAY:
2387 		if (func_id != BPF_FUNC_tail_call)
2388 			goto error;
2389 		break;
2390 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2391 		if (func_id != BPF_FUNC_perf_event_read &&
2392 		    func_id != BPF_FUNC_perf_event_output &&
2393 		    func_id != BPF_FUNC_perf_event_read_value)
2394 			goto error;
2395 		break;
2396 	case BPF_MAP_TYPE_STACK_TRACE:
2397 		if (func_id != BPF_FUNC_get_stackid)
2398 			goto error;
2399 		break;
2400 	case BPF_MAP_TYPE_CGROUP_ARRAY:
2401 		if (func_id != BPF_FUNC_skb_under_cgroup &&
2402 		    func_id != BPF_FUNC_current_task_under_cgroup)
2403 			goto error;
2404 		break;
2405 	case BPF_MAP_TYPE_CGROUP_STORAGE:
2406 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
2407 		if (func_id != BPF_FUNC_get_local_storage)
2408 			goto error;
2409 		break;
2410 	/* devmap returns a pointer to a live net_device ifindex that we cannot
2411 	 * allow to be modified from bpf side. So do not allow lookup elements
2412 	 * for now.
2413 	 */
2414 	case BPF_MAP_TYPE_DEVMAP:
2415 		if (func_id != BPF_FUNC_redirect_map)
2416 			goto error;
2417 		break;
2418 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
2419 	 * appear.
2420 	 */
2421 	case BPF_MAP_TYPE_CPUMAP:
2422 	case BPF_MAP_TYPE_XSKMAP:
2423 		if (func_id != BPF_FUNC_redirect_map)
2424 			goto error;
2425 		break;
2426 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2427 	case BPF_MAP_TYPE_HASH_OF_MAPS:
2428 		if (func_id != BPF_FUNC_map_lookup_elem)
2429 			goto error;
2430 		break;
2431 	case BPF_MAP_TYPE_SOCKMAP:
2432 		if (func_id != BPF_FUNC_sk_redirect_map &&
2433 		    func_id != BPF_FUNC_sock_map_update &&
2434 		    func_id != BPF_FUNC_map_delete_elem &&
2435 		    func_id != BPF_FUNC_msg_redirect_map)
2436 			goto error;
2437 		break;
2438 	case BPF_MAP_TYPE_SOCKHASH:
2439 		if (func_id != BPF_FUNC_sk_redirect_hash &&
2440 		    func_id != BPF_FUNC_sock_hash_update &&
2441 		    func_id != BPF_FUNC_map_delete_elem &&
2442 		    func_id != BPF_FUNC_msg_redirect_hash)
2443 			goto error;
2444 		break;
2445 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
2446 		if (func_id != BPF_FUNC_sk_select_reuseport)
2447 			goto error;
2448 		break;
2449 	case BPF_MAP_TYPE_QUEUE:
2450 	case BPF_MAP_TYPE_STACK:
2451 		if (func_id != BPF_FUNC_map_peek_elem &&
2452 		    func_id != BPF_FUNC_map_pop_elem &&
2453 		    func_id != BPF_FUNC_map_push_elem)
2454 			goto error;
2455 		break;
2456 	default:
2457 		break;
2458 	}
2459 
2460 	/* ... and second from the function itself. */
2461 	switch (func_id) {
2462 	case BPF_FUNC_tail_call:
2463 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2464 			goto error;
2465 		if (env->subprog_cnt > 1) {
2466 			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2467 			return -EINVAL;
2468 		}
2469 		break;
2470 	case BPF_FUNC_perf_event_read:
2471 	case BPF_FUNC_perf_event_output:
2472 	case BPF_FUNC_perf_event_read_value:
2473 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2474 			goto error;
2475 		break;
2476 	case BPF_FUNC_get_stackid:
2477 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2478 			goto error;
2479 		break;
2480 	case BPF_FUNC_current_task_under_cgroup:
2481 	case BPF_FUNC_skb_under_cgroup:
2482 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2483 			goto error;
2484 		break;
2485 	case BPF_FUNC_redirect_map:
2486 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2487 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
2488 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
2489 			goto error;
2490 		break;
2491 	case BPF_FUNC_sk_redirect_map:
2492 	case BPF_FUNC_msg_redirect_map:
2493 	case BPF_FUNC_sock_map_update:
2494 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2495 			goto error;
2496 		break;
2497 	case BPF_FUNC_sk_redirect_hash:
2498 	case BPF_FUNC_msg_redirect_hash:
2499 	case BPF_FUNC_sock_hash_update:
2500 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
2501 			goto error;
2502 		break;
2503 	case BPF_FUNC_get_local_storage:
2504 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
2505 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
2506 			goto error;
2507 		break;
2508 	case BPF_FUNC_sk_select_reuseport:
2509 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
2510 			goto error;
2511 		break;
2512 	case BPF_FUNC_map_peek_elem:
2513 	case BPF_FUNC_map_pop_elem:
2514 	case BPF_FUNC_map_push_elem:
2515 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
2516 		    map->map_type != BPF_MAP_TYPE_STACK)
2517 			goto error;
2518 		break;
2519 	default:
2520 		break;
2521 	}
2522 
2523 	return 0;
2524 error:
2525 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
2526 		map->map_type, func_id_name(func_id), func_id);
2527 	return -EINVAL;
2528 }
2529 
2530 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2531 {
2532 	int count = 0;
2533 
2534 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2535 		count++;
2536 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2537 		count++;
2538 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2539 		count++;
2540 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2541 		count++;
2542 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2543 		count++;
2544 
2545 	/* We only support one arg being in raw mode at the moment,
2546 	 * which is sufficient for the helper functions we have
2547 	 * right now.
2548 	 */
2549 	return count <= 1;
2550 }
2551 
2552 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2553 				    enum bpf_arg_type arg_next)
2554 {
2555 	return (arg_type_is_mem_ptr(arg_curr) &&
2556 	        !arg_type_is_mem_size(arg_next)) ||
2557 	       (!arg_type_is_mem_ptr(arg_curr) &&
2558 		arg_type_is_mem_size(arg_next));
2559 }
2560 
2561 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2562 {
2563 	/* bpf_xxx(..., buf, len) call will access 'len'
2564 	 * bytes from memory 'buf'. Both arg types need
2565 	 * to be paired, so make sure there's no buggy
2566 	 * helper function specification.
2567 	 */
2568 	if (arg_type_is_mem_size(fn->arg1_type) ||
2569 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
2570 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2571 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2572 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2573 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2574 		return false;
2575 
2576 	return true;
2577 }
2578 
2579 static bool check_refcount_ok(const struct bpf_func_proto *fn)
2580 {
2581 	int count = 0;
2582 
2583 	if (arg_type_is_refcounted(fn->arg1_type))
2584 		count++;
2585 	if (arg_type_is_refcounted(fn->arg2_type))
2586 		count++;
2587 	if (arg_type_is_refcounted(fn->arg3_type))
2588 		count++;
2589 	if (arg_type_is_refcounted(fn->arg4_type))
2590 		count++;
2591 	if (arg_type_is_refcounted(fn->arg5_type))
2592 		count++;
2593 
2594 	/* We only support one arg being unreferenced at the moment,
2595 	 * which is sufficient for the helper functions we have right now.
2596 	 */
2597 	return count <= 1;
2598 }
2599 
2600 static int check_func_proto(const struct bpf_func_proto *fn)
2601 {
2602 	return check_raw_mode_ok(fn) &&
2603 	       check_arg_pair_ok(fn) &&
2604 	       check_refcount_ok(fn) ? 0 : -EINVAL;
2605 }
2606 
2607 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2608  * are now invalid, so turn them into unknown SCALAR_VALUE.
2609  */
2610 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2611 				     struct bpf_func_state *state)
2612 {
2613 	struct bpf_reg_state *regs = state->regs, *reg;
2614 	int i;
2615 
2616 	for (i = 0; i < MAX_BPF_REG; i++)
2617 		if (reg_is_pkt_pointer_any(&regs[i]))
2618 			mark_reg_unknown(env, regs, i);
2619 
2620 	bpf_for_each_spilled_reg(i, state, reg) {
2621 		if (!reg)
2622 			continue;
2623 		if (reg_is_pkt_pointer_any(reg))
2624 			__mark_reg_unknown(reg);
2625 	}
2626 }
2627 
2628 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2629 {
2630 	struct bpf_verifier_state *vstate = env->cur_state;
2631 	int i;
2632 
2633 	for (i = 0; i <= vstate->curframe; i++)
2634 		__clear_all_pkt_pointers(env, vstate->frame[i]);
2635 }
2636 
2637 static void release_reg_references(struct bpf_verifier_env *env,
2638 				   struct bpf_func_state *state, int id)
2639 {
2640 	struct bpf_reg_state *regs = state->regs, *reg;
2641 	int i;
2642 
2643 	for (i = 0; i < MAX_BPF_REG; i++)
2644 		if (regs[i].id == id)
2645 			mark_reg_unknown(env, regs, i);
2646 
2647 	bpf_for_each_spilled_reg(i, state, reg) {
2648 		if (!reg)
2649 			continue;
2650 		if (reg_is_refcounted(reg) && reg->id == id)
2651 			__mark_reg_unknown(reg);
2652 	}
2653 }
2654 
2655 /* The pointer with the specified id has released its reference to kernel
2656  * resources. Identify all copies of the same pointer and clear the reference.
2657  */
2658 static int release_reference(struct bpf_verifier_env *env,
2659 			     struct bpf_call_arg_meta *meta)
2660 {
2661 	struct bpf_verifier_state *vstate = env->cur_state;
2662 	int i;
2663 
2664 	for (i = 0; i <= vstate->curframe; i++)
2665 		release_reg_references(env, vstate->frame[i], meta->ptr_id);
2666 
2667 	return release_reference_state(env, meta->ptr_id);
2668 }
2669 
2670 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2671 			   int *insn_idx)
2672 {
2673 	struct bpf_verifier_state *state = env->cur_state;
2674 	struct bpf_func_state *caller, *callee;
2675 	int i, err, subprog, target_insn;
2676 
2677 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2678 		verbose(env, "the call stack of %d frames is too deep\n",
2679 			state->curframe + 2);
2680 		return -E2BIG;
2681 	}
2682 
2683 	target_insn = *insn_idx + insn->imm;
2684 	subprog = find_subprog(env, target_insn + 1);
2685 	if (subprog < 0) {
2686 		verbose(env, "verifier bug. No program starts at insn %d\n",
2687 			target_insn + 1);
2688 		return -EFAULT;
2689 	}
2690 
2691 	caller = state->frame[state->curframe];
2692 	if (state->frame[state->curframe + 1]) {
2693 		verbose(env, "verifier bug. Frame %d already allocated\n",
2694 			state->curframe + 1);
2695 		return -EFAULT;
2696 	}
2697 
2698 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2699 	if (!callee)
2700 		return -ENOMEM;
2701 	state->frame[state->curframe + 1] = callee;
2702 
2703 	/* callee cannot access r0, r6 - r9 for reading and has to write
2704 	 * into its own stack before reading from it.
2705 	 * callee can read/write into caller's stack
2706 	 */
2707 	init_func_state(env, callee,
2708 			/* remember the callsite, it will be used by bpf_exit */
2709 			*insn_idx /* callsite */,
2710 			state->curframe + 1 /* frameno within this callchain */,
2711 			subprog /* subprog number within this prog */);
2712 
2713 	/* Transfer references to the callee */
2714 	err = transfer_reference_state(callee, caller);
2715 	if (err)
2716 		return err;
2717 
2718 	/* copy r1 - r5 args that callee can access.  The copy includes parent
2719 	 * pointers, which connects us up to the liveness chain
2720 	 */
2721 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2722 		callee->regs[i] = caller->regs[i];
2723 
2724 	/* after the call registers r0 - r5 were scratched */
2725 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2726 		mark_reg_not_init(env, caller->regs, caller_saved[i]);
2727 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2728 	}
2729 
2730 	/* only increment it after check_reg_arg() finished */
2731 	state->curframe++;
2732 
2733 	/* and go analyze first insn of the callee */
2734 	*insn_idx = target_insn;
2735 
2736 	if (env->log.level) {
2737 		verbose(env, "caller:\n");
2738 		print_verifier_state(env, caller);
2739 		verbose(env, "callee:\n");
2740 		print_verifier_state(env, callee);
2741 	}
2742 	return 0;
2743 }
2744 
2745 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2746 {
2747 	struct bpf_verifier_state *state = env->cur_state;
2748 	struct bpf_func_state *caller, *callee;
2749 	struct bpf_reg_state *r0;
2750 	int err;
2751 
2752 	callee = state->frame[state->curframe];
2753 	r0 = &callee->regs[BPF_REG_0];
2754 	if (r0->type == PTR_TO_STACK) {
2755 		/* technically it's ok to return caller's stack pointer
2756 		 * (or caller's caller's pointer) back to the caller,
2757 		 * since these pointers are valid. Only current stack
2758 		 * pointer will be invalid as soon as function exits,
2759 		 * but let's be conservative
2760 		 */
2761 		verbose(env, "cannot return stack pointer to the caller\n");
2762 		return -EINVAL;
2763 	}
2764 
2765 	state->curframe--;
2766 	caller = state->frame[state->curframe];
2767 	/* return to the caller whatever r0 had in the callee */
2768 	caller->regs[BPF_REG_0] = *r0;
2769 
2770 	/* Transfer references to the caller */
2771 	err = transfer_reference_state(caller, callee);
2772 	if (err)
2773 		return err;
2774 
2775 	*insn_idx = callee->callsite + 1;
2776 	if (env->log.level) {
2777 		verbose(env, "returning from callee:\n");
2778 		print_verifier_state(env, callee);
2779 		verbose(env, "to caller at %d:\n", *insn_idx);
2780 		print_verifier_state(env, caller);
2781 	}
2782 	/* clear everything in the callee */
2783 	free_func_state(callee);
2784 	state->frame[state->curframe + 1] = NULL;
2785 	return 0;
2786 }
2787 
2788 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
2789 				   int func_id,
2790 				   struct bpf_call_arg_meta *meta)
2791 {
2792 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
2793 
2794 	if (ret_type != RET_INTEGER ||
2795 	    (func_id != BPF_FUNC_get_stack &&
2796 	     func_id != BPF_FUNC_probe_read_str))
2797 		return;
2798 
2799 	ret_reg->smax_value = meta->msize_smax_value;
2800 	ret_reg->umax_value = meta->msize_umax_value;
2801 	__reg_deduce_bounds(ret_reg);
2802 	__reg_bound_offset(ret_reg);
2803 }
2804 
2805 static int
2806 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2807 		int func_id, int insn_idx)
2808 {
2809 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2810 
2811 	if (func_id != BPF_FUNC_tail_call &&
2812 	    func_id != BPF_FUNC_map_lookup_elem &&
2813 	    func_id != BPF_FUNC_map_update_elem &&
2814 	    func_id != BPF_FUNC_map_delete_elem &&
2815 	    func_id != BPF_FUNC_map_push_elem &&
2816 	    func_id != BPF_FUNC_map_pop_elem &&
2817 	    func_id != BPF_FUNC_map_peek_elem)
2818 		return 0;
2819 
2820 	if (meta->map_ptr == NULL) {
2821 		verbose(env, "kernel subsystem misconfigured verifier\n");
2822 		return -EINVAL;
2823 	}
2824 
2825 	if (!BPF_MAP_PTR(aux->map_state))
2826 		bpf_map_ptr_store(aux, meta->map_ptr,
2827 				  meta->map_ptr->unpriv_array);
2828 	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2829 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2830 				  meta->map_ptr->unpriv_array);
2831 	return 0;
2832 }
2833 
2834 static int check_reference_leak(struct bpf_verifier_env *env)
2835 {
2836 	struct bpf_func_state *state = cur_func(env);
2837 	int i;
2838 
2839 	for (i = 0; i < state->acquired_refs; i++) {
2840 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
2841 			state->refs[i].id, state->refs[i].insn_idx);
2842 	}
2843 	return state->acquired_refs ? -EINVAL : 0;
2844 }
2845 
2846 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2847 {
2848 	const struct bpf_func_proto *fn = NULL;
2849 	struct bpf_reg_state *regs;
2850 	struct bpf_call_arg_meta meta;
2851 	bool changes_data;
2852 	int i, err;
2853 
2854 	/* find function prototype */
2855 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2856 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2857 			func_id);
2858 		return -EINVAL;
2859 	}
2860 
2861 	if (env->ops->get_func_proto)
2862 		fn = env->ops->get_func_proto(func_id, env->prog);
2863 	if (!fn) {
2864 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2865 			func_id);
2866 		return -EINVAL;
2867 	}
2868 
2869 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2870 	if (!env->prog->gpl_compatible && fn->gpl_only) {
2871 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
2872 		return -EINVAL;
2873 	}
2874 
2875 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
2876 	changes_data = bpf_helper_changes_pkt_data(fn->func);
2877 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2878 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2879 			func_id_name(func_id), func_id);
2880 		return -EINVAL;
2881 	}
2882 
2883 	memset(&meta, 0, sizeof(meta));
2884 	meta.pkt_access = fn->pkt_access;
2885 
2886 	err = check_func_proto(fn);
2887 	if (err) {
2888 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2889 			func_id_name(func_id), func_id);
2890 		return err;
2891 	}
2892 
2893 	/* check args */
2894 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2895 	if (err)
2896 		return err;
2897 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2898 	if (err)
2899 		return err;
2900 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2901 	if (err)
2902 		return err;
2903 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2904 	if (err)
2905 		return err;
2906 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2907 	if (err)
2908 		return err;
2909 
2910 	err = record_func_map(env, &meta, func_id, insn_idx);
2911 	if (err)
2912 		return err;
2913 
2914 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
2915 	 * is inferred from register state.
2916 	 */
2917 	for (i = 0; i < meta.access_size; i++) {
2918 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2919 				       BPF_WRITE, -1, false);
2920 		if (err)
2921 			return err;
2922 	}
2923 
2924 	if (func_id == BPF_FUNC_tail_call) {
2925 		err = check_reference_leak(env);
2926 		if (err) {
2927 			verbose(env, "tail_call would lead to reference leak\n");
2928 			return err;
2929 		}
2930 	} else if (is_release_function(func_id)) {
2931 		err = release_reference(env, &meta);
2932 		if (err)
2933 			return err;
2934 	}
2935 
2936 	regs = cur_regs(env);
2937 
2938 	/* check that flags argument in get_local_storage(map, flags) is 0,
2939 	 * this is required because get_local_storage() can't return an error.
2940 	 */
2941 	if (func_id == BPF_FUNC_get_local_storage &&
2942 	    !register_is_null(&regs[BPF_REG_2])) {
2943 		verbose(env, "get_local_storage() doesn't support non-zero flags\n");
2944 		return -EINVAL;
2945 	}
2946 
2947 	/* reset caller saved regs */
2948 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2949 		mark_reg_not_init(env, regs, caller_saved[i]);
2950 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2951 	}
2952 
2953 	/* update return register (already marked as written above) */
2954 	if (fn->ret_type == RET_INTEGER) {
2955 		/* sets type to SCALAR_VALUE */
2956 		mark_reg_unknown(env, regs, BPF_REG_0);
2957 	} else if (fn->ret_type == RET_VOID) {
2958 		regs[BPF_REG_0].type = NOT_INIT;
2959 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
2960 		   fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2961 		/* There is no offset yet applied, variable or fixed */
2962 		mark_reg_known_zero(env, regs, BPF_REG_0);
2963 		/* remember map_ptr, so that check_map_access()
2964 		 * can check 'value_size' boundary of memory access
2965 		 * to map element returned from bpf_map_lookup_elem()
2966 		 */
2967 		if (meta.map_ptr == NULL) {
2968 			verbose(env,
2969 				"kernel subsystem misconfigured verifier\n");
2970 			return -EINVAL;
2971 		}
2972 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
2973 		if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
2974 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
2975 		} else {
2976 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2977 			regs[BPF_REG_0].id = ++env->id_gen;
2978 		}
2979 	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
2980 		int id = acquire_reference_state(env, insn_idx);
2981 		if (id < 0)
2982 			return id;
2983 		mark_reg_known_zero(env, regs, BPF_REG_0);
2984 		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
2985 		regs[BPF_REG_0].id = id;
2986 	} else {
2987 		verbose(env, "unknown return type %d of func %s#%d\n",
2988 			fn->ret_type, func_id_name(func_id), func_id);
2989 		return -EINVAL;
2990 	}
2991 
2992 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
2993 
2994 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2995 	if (err)
2996 		return err;
2997 
2998 	if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
2999 		const char *err_str;
3000 
3001 #ifdef CONFIG_PERF_EVENTS
3002 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
3003 		err_str = "cannot get callchain buffer for func %s#%d\n";
3004 #else
3005 		err = -ENOTSUPP;
3006 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
3007 #endif
3008 		if (err) {
3009 			verbose(env, err_str, func_id_name(func_id), func_id);
3010 			return err;
3011 		}
3012 
3013 		env->prog->has_callchain_buf = true;
3014 	}
3015 
3016 	if (changes_data)
3017 		clear_all_pkt_pointers(env);
3018 	return 0;
3019 }
3020 
3021 static bool signed_add_overflows(s64 a, s64 b)
3022 {
3023 	/* Do the add in u64, where overflow is well-defined */
3024 	s64 res = (s64)((u64)a + (u64)b);
3025 
3026 	if (b < 0)
3027 		return res > a;
3028 	return res < a;
3029 }
3030 
3031 static bool signed_sub_overflows(s64 a, s64 b)
3032 {
3033 	/* Do the sub in u64, where overflow is well-defined */
3034 	s64 res = (s64)((u64)a - (u64)b);
3035 
3036 	if (b < 0)
3037 		return res < a;
3038 	return res > a;
3039 }
3040 
3041 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
3042 				  const struct bpf_reg_state *reg,
3043 				  enum bpf_reg_type type)
3044 {
3045 	bool known = tnum_is_const(reg->var_off);
3046 	s64 val = reg->var_off.value;
3047 	s64 smin = reg->smin_value;
3048 
3049 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
3050 		verbose(env, "math between %s pointer and %lld is not allowed\n",
3051 			reg_type_str[type], val);
3052 		return false;
3053 	}
3054 
3055 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
3056 		verbose(env, "%s pointer offset %d is not allowed\n",
3057 			reg_type_str[type], reg->off);
3058 		return false;
3059 	}
3060 
3061 	if (smin == S64_MIN) {
3062 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
3063 			reg_type_str[type]);
3064 		return false;
3065 	}
3066 
3067 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
3068 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
3069 			smin, reg_type_str[type]);
3070 		return false;
3071 	}
3072 
3073 	return true;
3074 }
3075 
3076 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
3077 {
3078 	return &env->insn_aux_data[env->insn_idx];
3079 }
3080 
3081 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3082 			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
3083 {
3084 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
3085 			    (opcode == BPF_SUB && !off_is_neg);
3086 	u32 off;
3087 
3088 	switch (ptr_reg->type) {
3089 	case PTR_TO_STACK:
3090 		off = ptr_reg->off + ptr_reg->var_off.value;
3091 		if (mask_to_left)
3092 			*ptr_limit = MAX_BPF_STACK + off;
3093 		else
3094 			*ptr_limit = -off;
3095 		return 0;
3096 	case PTR_TO_MAP_VALUE:
3097 		if (mask_to_left) {
3098 			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
3099 		} else {
3100 			off = ptr_reg->smin_value + ptr_reg->off;
3101 			*ptr_limit = ptr_reg->map_ptr->value_size - off;
3102 		}
3103 		return 0;
3104 	default:
3105 		return -EINVAL;
3106 	}
3107 }
3108 
3109 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3110 				    const struct bpf_insn *insn)
3111 {
3112 	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3113 }
3114 
3115 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3116 				       u32 alu_state, u32 alu_limit)
3117 {
3118 	/* If we arrived here from different branches with different
3119 	 * state or limits to sanitize, then this won't work.
3120 	 */
3121 	if (aux->alu_state &&
3122 	    (aux->alu_state != alu_state ||
3123 	     aux->alu_limit != alu_limit))
3124 		return -EACCES;
3125 
3126 	/* Corresponding fixup done in fixup_bpf_calls(). */
3127 	aux->alu_state = alu_state;
3128 	aux->alu_limit = alu_limit;
3129 	return 0;
3130 }
3131 
3132 static int sanitize_val_alu(struct bpf_verifier_env *env,
3133 			    struct bpf_insn *insn)
3134 {
3135 	struct bpf_insn_aux_data *aux = cur_aux(env);
3136 
3137 	if (can_skip_alu_sanitation(env, insn))
3138 		return 0;
3139 
3140 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3141 }
3142 
3143 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3144 			    struct bpf_insn *insn,
3145 			    const struct bpf_reg_state *ptr_reg,
3146 			    struct bpf_reg_state *dst_reg,
3147 			    bool off_is_neg)
3148 {
3149 	struct bpf_verifier_state *vstate = env->cur_state;
3150 	struct bpf_insn_aux_data *aux = cur_aux(env);
3151 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
3152 	u8 opcode = BPF_OP(insn->code);
3153 	u32 alu_state, alu_limit;
3154 	struct bpf_reg_state tmp;
3155 	bool ret;
3156 
3157 	if (can_skip_alu_sanitation(env, insn))
3158 		return 0;
3159 
3160 	/* We already marked aux for masking from non-speculative
3161 	 * paths, thus we got here in the first place. We only care
3162 	 * to explore bad access from here.
3163 	 */
3164 	if (vstate->speculative)
3165 		goto do_sim;
3166 
3167 	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
3168 	alu_state |= ptr_is_dst_reg ?
3169 		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3170 
3171 	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3172 		return 0;
3173 	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3174 		return -EACCES;
3175 do_sim:
3176 	/* Simulate and find potential out-of-bounds access under
3177 	 * speculative execution from truncation as a result of
3178 	 * masking when off was not within expected range. If off
3179 	 * sits in dst, then we temporarily need to move ptr there
3180 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
3181 	 * for cases where we use K-based arithmetic in one direction
3182 	 * and truncated reg-based in the other in order to explore
3183 	 * bad access.
3184 	 */
3185 	if (!ptr_is_dst_reg) {
3186 		tmp = *dst_reg;
3187 		*dst_reg = *ptr_reg;
3188 	}
3189 	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3190 	if (!ptr_is_dst_reg)
3191 		*dst_reg = tmp;
3192 	return !ret ? -EFAULT : 0;
3193 }
3194 
3195 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3196  * Caller should also handle BPF_MOV case separately.
3197  * If we return -EACCES, caller may want to try again treating pointer as a
3198  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
3199  */
3200 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3201 				   struct bpf_insn *insn,
3202 				   const struct bpf_reg_state *ptr_reg,
3203 				   const struct bpf_reg_state *off_reg)
3204 {
3205 	struct bpf_verifier_state *vstate = env->cur_state;
3206 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3207 	struct bpf_reg_state *regs = state->regs, *dst_reg;
3208 	bool known = tnum_is_const(off_reg->var_off);
3209 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
3210 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3211 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3212 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3213 	u32 dst = insn->dst_reg, src = insn->src_reg;
3214 	u8 opcode = BPF_OP(insn->code);
3215 	int ret;
3216 
3217 	dst_reg = &regs[dst];
3218 
3219 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
3220 	    smin_val > smax_val || umin_val > umax_val) {
3221 		/* Taint dst register if offset had invalid bounds derived from
3222 		 * e.g. dead branches.
3223 		 */
3224 		__mark_reg_unknown(dst_reg);
3225 		return 0;
3226 	}
3227 
3228 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3229 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
3230 		verbose(env,
3231 			"R%d 32-bit pointer arithmetic prohibited\n",
3232 			dst);
3233 		return -EACCES;
3234 	}
3235 
3236 	switch (ptr_reg->type) {
3237 	case PTR_TO_MAP_VALUE_OR_NULL:
3238 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
3239 			dst, reg_type_str[ptr_reg->type]);
3240 		return -EACCES;
3241 	case CONST_PTR_TO_MAP:
3242 	case PTR_TO_PACKET_END:
3243 	case PTR_TO_SOCKET:
3244 	case PTR_TO_SOCKET_OR_NULL:
3245 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3246 			dst, reg_type_str[ptr_reg->type]);
3247 		return -EACCES;
3248 	case PTR_TO_MAP_VALUE:
3249 		if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3250 			verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3251 				off_reg == dst_reg ? dst : src);
3252 			return -EACCES;
3253 		}
3254 		/* fall-through */
3255 	default:
3256 		break;
3257 	}
3258 
3259 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
3260 	 * The id may be overwritten later if we create a new variable offset.
3261 	 */
3262 	dst_reg->type = ptr_reg->type;
3263 	dst_reg->id = ptr_reg->id;
3264 
3265 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
3266 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
3267 		return -EINVAL;
3268 
3269 	switch (opcode) {
3270 	case BPF_ADD:
3271 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3272 		if (ret < 0) {
3273 			verbose(env, "R%d tried to add from different maps or paths\n", dst);
3274 			return ret;
3275 		}
3276 		/* We can take a fixed offset as long as it doesn't overflow
3277 		 * the s32 'off' field
3278 		 */
3279 		if (known && (ptr_reg->off + smin_val ==
3280 			      (s64)(s32)(ptr_reg->off + smin_val))) {
3281 			/* pointer += K.  Accumulate it into fixed offset */
3282 			dst_reg->smin_value = smin_ptr;
3283 			dst_reg->smax_value = smax_ptr;
3284 			dst_reg->umin_value = umin_ptr;
3285 			dst_reg->umax_value = umax_ptr;
3286 			dst_reg->var_off = ptr_reg->var_off;
3287 			dst_reg->off = ptr_reg->off + smin_val;
3288 			dst_reg->raw = ptr_reg->raw;
3289 			break;
3290 		}
3291 		/* A new variable offset is created.  Note that off_reg->off
3292 		 * == 0, since it's a scalar.
3293 		 * dst_reg gets the pointer type and since some positive
3294 		 * integer value was added to the pointer, give it a new 'id'
3295 		 * if it's a PTR_TO_PACKET.
3296 		 * this creates a new 'base' pointer, off_reg (variable) gets
3297 		 * added into the variable offset, and we copy the fixed offset
3298 		 * from ptr_reg.
3299 		 */
3300 		if (signed_add_overflows(smin_ptr, smin_val) ||
3301 		    signed_add_overflows(smax_ptr, smax_val)) {
3302 			dst_reg->smin_value = S64_MIN;
3303 			dst_reg->smax_value = S64_MAX;
3304 		} else {
3305 			dst_reg->smin_value = smin_ptr + smin_val;
3306 			dst_reg->smax_value = smax_ptr + smax_val;
3307 		}
3308 		if (umin_ptr + umin_val < umin_ptr ||
3309 		    umax_ptr + umax_val < umax_ptr) {
3310 			dst_reg->umin_value = 0;
3311 			dst_reg->umax_value = U64_MAX;
3312 		} else {
3313 			dst_reg->umin_value = umin_ptr + umin_val;
3314 			dst_reg->umax_value = umax_ptr + umax_val;
3315 		}
3316 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
3317 		dst_reg->off = ptr_reg->off;
3318 		dst_reg->raw = ptr_reg->raw;
3319 		if (reg_is_pkt_pointer(ptr_reg)) {
3320 			dst_reg->id = ++env->id_gen;
3321 			/* something was added to pkt_ptr, set range to zero */
3322 			dst_reg->raw = 0;
3323 		}
3324 		break;
3325 	case BPF_SUB:
3326 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3327 		if (ret < 0) {
3328 			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
3329 			return ret;
3330 		}
3331 		if (dst_reg == off_reg) {
3332 			/* scalar -= pointer.  Creates an unknown scalar */
3333 			verbose(env, "R%d tried to subtract pointer from scalar\n",
3334 				dst);
3335 			return -EACCES;
3336 		}
3337 		/* We don't allow subtraction from FP, because (according to
3338 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
3339 		 * be able to deal with it.
3340 		 */
3341 		if (ptr_reg->type == PTR_TO_STACK) {
3342 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
3343 				dst);
3344 			return -EACCES;
3345 		}
3346 		if (known && (ptr_reg->off - smin_val ==
3347 			      (s64)(s32)(ptr_reg->off - smin_val))) {
3348 			/* pointer -= K.  Subtract it from fixed offset */
3349 			dst_reg->smin_value = smin_ptr;
3350 			dst_reg->smax_value = smax_ptr;
3351 			dst_reg->umin_value = umin_ptr;
3352 			dst_reg->umax_value = umax_ptr;
3353 			dst_reg->var_off = ptr_reg->var_off;
3354 			dst_reg->id = ptr_reg->id;
3355 			dst_reg->off = ptr_reg->off - smin_val;
3356 			dst_reg->raw = ptr_reg->raw;
3357 			break;
3358 		}
3359 		/* A new variable offset is created.  If the subtrahend is known
3360 		 * nonnegative, then any reg->range we had before is still good.
3361 		 */
3362 		if (signed_sub_overflows(smin_ptr, smax_val) ||
3363 		    signed_sub_overflows(smax_ptr, smin_val)) {
3364 			/* Overflow possible, we know nothing */
3365 			dst_reg->smin_value = S64_MIN;
3366 			dst_reg->smax_value = S64_MAX;
3367 		} else {
3368 			dst_reg->smin_value = smin_ptr - smax_val;
3369 			dst_reg->smax_value = smax_ptr - smin_val;
3370 		}
3371 		if (umin_ptr < umax_val) {
3372 			/* Overflow possible, we know nothing */
3373 			dst_reg->umin_value = 0;
3374 			dst_reg->umax_value = U64_MAX;
3375 		} else {
3376 			/* Cannot overflow (as long as bounds are consistent) */
3377 			dst_reg->umin_value = umin_ptr - umax_val;
3378 			dst_reg->umax_value = umax_ptr - umin_val;
3379 		}
3380 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
3381 		dst_reg->off = ptr_reg->off;
3382 		dst_reg->raw = ptr_reg->raw;
3383 		if (reg_is_pkt_pointer(ptr_reg)) {
3384 			dst_reg->id = ++env->id_gen;
3385 			/* something was added to pkt_ptr, set range to zero */
3386 			if (smin_val < 0)
3387 				dst_reg->raw = 0;
3388 		}
3389 		break;
3390 	case BPF_AND:
3391 	case BPF_OR:
3392 	case BPF_XOR:
3393 		/* bitwise ops on pointers are troublesome, prohibit. */
3394 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
3395 			dst, bpf_alu_string[opcode >> 4]);
3396 		return -EACCES;
3397 	default:
3398 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
3399 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
3400 			dst, bpf_alu_string[opcode >> 4]);
3401 		return -EACCES;
3402 	}
3403 
3404 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
3405 		return -EINVAL;
3406 
3407 	__update_reg_bounds(dst_reg);
3408 	__reg_deduce_bounds(dst_reg);
3409 	__reg_bound_offset(dst_reg);
3410 
3411 	/* For unprivileged we require that resulting offset must be in bounds
3412 	 * in order to be able to sanitize access later on.
3413 	 */
3414 	if (!env->allow_ptr_leaks) {
3415 		if (dst_reg->type == PTR_TO_MAP_VALUE &&
3416 		    check_map_access(env, dst, dst_reg->off, 1, false)) {
3417 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
3418 				"prohibited for !root\n", dst);
3419 			return -EACCES;
3420 		} else if (dst_reg->type == PTR_TO_STACK &&
3421 			   check_stack_access(env, dst_reg, dst_reg->off +
3422 					      dst_reg->var_off.value, 1)) {
3423 			verbose(env, "R%d stack pointer arithmetic goes out of range, "
3424 				"prohibited for !root\n", dst);
3425 			return -EACCES;
3426 		}
3427 	}
3428 
3429 	return 0;
3430 }
3431 
3432 /* WARNING: This function does calculations on 64-bit values, but the actual
3433  * execution may occur on 32-bit values. Therefore, things like bitshifts
3434  * need extra checks in the 32-bit case.
3435  */
3436 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3437 				      struct bpf_insn *insn,
3438 				      struct bpf_reg_state *dst_reg,
3439 				      struct bpf_reg_state src_reg)
3440 {
3441 	struct bpf_reg_state *regs = cur_regs(env);
3442 	u8 opcode = BPF_OP(insn->code);
3443 	bool src_known, dst_known;
3444 	s64 smin_val, smax_val;
3445 	u64 umin_val, umax_val;
3446 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3447 	u32 dst = insn->dst_reg;
3448 	int ret;
3449 
3450 	if (insn_bitness == 32) {
3451 		/* Relevant for 32-bit RSH: Information can propagate towards
3452 		 * LSB, so it isn't sufficient to only truncate the output to
3453 		 * 32 bits.
3454 		 */
3455 		coerce_reg_to_size(dst_reg, 4);
3456 		coerce_reg_to_size(&src_reg, 4);
3457 	}
3458 
3459 	smin_val = src_reg.smin_value;
3460 	smax_val = src_reg.smax_value;
3461 	umin_val = src_reg.umin_value;
3462 	umax_val = src_reg.umax_value;
3463 	src_known = tnum_is_const(src_reg.var_off);
3464 	dst_known = tnum_is_const(dst_reg->var_off);
3465 
3466 	if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
3467 	    smin_val > smax_val || umin_val > umax_val) {
3468 		/* Taint dst register if offset had invalid bounds derived from
3469 		 * e.g. dead branches.
3470 		 */
3471 		__mark_reg_unknown(dst_reg);
3472 		return 0;
3473 	}
3474 
3475 	if (!src_known &&
3476 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
3477 		__mark_reg_unknown(dst_reg);
3478 		return 0;
3479 	}
3480 
3481 	switch (opcode) {
3482 	case BPF_ADD:
3483 		ret = sanitize_val_alu(env, insn);
3484 		if (ret < 0) {
3485 			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3486 			return ret;
3487 		}
3488 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3489 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
3490 			dst_reg->smin_value = S64_MIN;
3491 			dst_reg->smax_value = S64_MAX;
3492 		} else {
3493 			dst_reg->smin_value += smin_val;
3494 			dst_reg->smax_value += smax_val;
3495 		}
3496 		if (dst_reg->umin_value + umin_val < umin_val ||
3497 		    dst_reg->umax_value + umax_val < umax_val) {
3498 			dst_reg->umin_value = 0;
3499 			dst_reg->umax_value = U64_MAX;
3500 		} else {
3501 			dst_reg->umin_value += umin_val;
3502 			dst_reg->umax_value += umax_val;
3503 		}
3504 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3505 		break;
3506 	case BPF_SUB:
3507 		ret = sanitize_val_alu(env, insn);
3508 		if (ret < 0) {
3509 			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3510 			return ret;
3511 		}
3512 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3513 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3514 			/* Overflow possible, we know nothing */
3515 			dst_reg->smin_value = S64_MIN;
3516 			dst_reg->smax_value = S64_MAX;
3517 		} else {
3518 			dst_reg->smin_value -= smax_val;
3519 			dst_reg->smax_value -= smin_val;
3520 		}
3521 		if (dst_reg->umin_value < umax_val) {
3522 			/* Overflow possible, we know nothing */
3523 			dst_reg->umin_value = 0;
3524 			dst_reg->umax_value = U64_MAX;
3525 		} else {
3526 			/* Cannot overflow (as long as bounds are consistent) */
3527 			dst_reg->umin_value -= umax_val;
3528 			dst_reg->umax_value -= umin_val;
3529 		}
3530 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
3531 		break;
3532 	case BPF_MUL:
3533 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
3534 		if (smin_val < 0 || dst_reg->smin_value < 0) {
3535 			/* Ain't nobody got time to multiply that sign */
3536 			__mark_reg_unbounded(dst_reg);
3537 			__update_reg_bounds(dst_reg);
3538 			break;
3539 		}
3540 		/* Both values are positive, so we can work with unsigned and
3541 		 * copy the result to signed (unless it exceeds S64_MAX).
3542 		 */
3543 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
3544 			/* Potential overflow, we know nothing */
3545 			__mark_reg_unbounded(dst_reg);
3546 			/* (except what we can learn from the var_off) */
3547 			__update_reg_bounds(dst_reg);
3548 			break;
3549 		}
3550 		dst_reg->umin_value *= umin_val;
3551 		dst_reg->umax_value *= umax_val;
3552 		if (dst_reg->umax_value > S64_MAX) {
3553 			/* Overflow possible, we know nothing */
3554 			dst_reg->smin_value = S64_MIN;
3555 			dst_reg->smax_value = S64_MAX;
3556 		} else {
3557 			dst_reg->smin_value = dst_reg->umin_value;
3558 			dst_reg->smax_value = dst_reg->umax_value;
3559 		}
3560 		break;
3561 	case BPF_AND:
3562 		if (src_known && dst_known) {
3563 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
3564 						  src_reg.var_off.value);
3565 			break;
3566 		}
3567 		/* We get our minimum from the var_off, since that's inherently
3568 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
3569 		 */
3570 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
3571 		dst_reg->umin_value = dst_reg->var_off.value;
3572 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
3573 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3574 			/* Lose signed bounds when ANDing negative numbers,
3575 			 * ain't nobody got time for that.
3576 			 */
3577 			dst_reg->smin_value = S64_MIN;
3578 			dst_reg->smax_value = S64_MAX;
3579 		} else {
3580 			/* ANDing two positives gives a positive, so safe to
3581 			 * cast result into s64.
3582 			 */
3583 			dst_reg->smin_value = dst_reg->umin_value;
3584 			dst_reg->smax_value = dst_reg->umax_value;
3585 		}
3586 		/* We may learn something more from the var_off */
3587 		__update_reg_bounds(dst_reg);
3588 		break;
3589 	case BPF_OR:
3590 		if (src_known && dst_known) {
3591 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
3592 						  src_reg.var_off.value);
3593 			break;
3594 		}
3595 		/* We get our maximum from the var_off, and our minimum is the
3596 		 * maximum of the operands' minima
3597 		 */
3598 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
3599 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
3600 		dst_reg->umax_value = dst_reg->var_off.value |
3601 				      dst_reg->var_off.mask;
3602 		if (dst_reg->smin_value < 0 || smin_val < 0) {
3603 			/* Lose signed bounds when ORing negative numbers,
3604 			 * ain't nobody got time for that.
3605 			 */
3606 			dst_reg->smin_value = S64_MIN;
3607 			dst_reg->smax_value = S64_MAX;
3608 		} else {
3609 			/* ORing two positives gives a positive, so safe to
3610 			 * cast result into s64.
3611 			 */
3612 			dst_reg->smin_value = dst_reg->umin_value;
3613 			dst_reg->smax_value = dst_reg->umax_value;
3614 		}
3615 		/* We may learn something more from the var_off */
3616 		__update_reg_bounds(dst_reg);
3617 		break;
3618 	case BPF_LSH:
3619 		if (umax_val >= insn_bitness) {
3620 			/* Shifts greater than 31 or 63 are undefined.
3621 			 * This includes shifts by a negative number.
3622 			 */
3623 			mark_reg_unknown(env, regs, insn->dst_reg);
3624 			break;
3625 		}
3626 		/* We lose all sign bit information (except what we can pick
3627 		 * up from var_off)
3628 		 */
3629 		dst_reg->smin_value = S64_MIN;
3630 		dst_reg->smax_value = S64_MAX;
3631 		/* If we might shift our top bit out, then we know nothing */
3632 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
3633 			dst_reg->umin_value = 0;
3634 			dst_reg->umax_value = U64_MAX;
3635 		} else {
3636 			dst_reg->umin_value <<= umin_val;
3637 			dst_reg->umax_value <<= umax_val;
3638 		}
3639 		dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
3640 		/* We may learn something more from the var_off */
3641 		__update_reg_bounds(dst_reg);
3642 		break;
3643 	case BPF_RSH:
3644 		if (umax_val >= insn_bitness) {
3645 			/* Shifts greater than 31 or 63 are undefined.
3646 			 * This includes shifts by a negative number.
3647 			 */
3648 			mark_reg_unknown(env, regs, insn->dst_reg);
3649 			break;
3650 		}
3651 		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
3652 		 * be negative, then either:
3653 		 * 1) src_reg might be zero, so the sign bit of the result is
3654 		 *    unknown, so we lose our signed bounds
3655 		 * 2) it's known negative, thus the unsigned bounds capture the
3656 		 *    signed bounds
3657 		 * 3) the signed bounds cross zero, so they tell us nothing
3658 		 *    about the result
3659 		 * If the value in dst_reg is known nonnegative, then again the
3660 		 * unsigned bounts capture the signed bounds.
3661 		 * Thus, in all cases it suffices to blow away our signed bounds
3662 		 * and rely on inferring new ones from the unsigned bounds and
3663 		 * var_off of the result.
3664 		 */
3665 		dst_reg->smin_value = S64_MIN;
3666 		dst_reg->smax_value = S64_MAX;
3667 		dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
3668 		dst_reg->umin_value >>= umax_val;
3669 		dst_reg->umax_value >>= umin_val;
3670 		/* We may learn something more from the var_off */
3671 		__update_reg_bounds(dst_reg);
3672 		break;
3673 	case BPF_ARSH:
3674 		if (umax_val >= insn_bitness) {
3675 			/* Shifts greater than 31 or 63 are undefined.
3676 			 * This includes shifts by a negative number.
3677 			 */
3678 			mark_reg_unknown(env, regs, insn->dst_reg);
3679 			break;
3680 		}
3681 
3682 		/* Upon reaching here, src_known is true and
3683 		 * umax_val is equal to umin_val.
3684 		 */
3685 		dst_reg->smin_value >>= umin_val;
3686 		dst_reg->smax_value >>= umin_val;
3687 		dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
3688 
3689 		/* blow away the dst_reg umin_value/umax_value and rely on
3690 		 * dst_reg var_off to refine the result.
3691 		 */
3692 		dst_reg->umin_value = 0;
3693 		dst_reg->umax_value = U64_MAX;
3694 		__update_reg_bounds(dst_reg);
3695 		break;
3696 	default:
3697 		mark_reg_unknown(env, regs, insn->dst_reg);
3698 		break;
3699 	}
3700 
3701 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3702 		/* 32-bit ALU ops are (32,32)->32 */
3703 		coerce_reg_to_size(dst_reg, 4);
3704 	}
3705 
3706 	__reg_deduce_bounds(dst_reg);
3707 	__reg_bound_offset(dst_reg);
3708 	return 0;
3709 }
3710 
3711 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3712  * and var_off.
3713  */
3714 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3715 				   struct bpf_insn *insn)
3716 {
3717 	struct bpf_verifier_state *vstate = env->cur_state;
3718 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3719 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3720 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3721 	u8 opcode = BPF_OP(insn->code);
3722 
3723 	dst_reg = &regs[insn->dst_reg];
3724 	src_reg = NULL;
3725 	if (dst_reg->type != SCALAR_VALUE)
3726 		ptr_reg = dst_reg;
3727 	if (BPF_SRC(insn->code) == BPF_X) {
3728 		src_reg = &regs[insn->src_reg];
3729 		if (src_reg->type != SCALAR_VALUE) {
3730 			if (dst_reg->type != SCALAR_VALUE) {
3731 				/* Combining two pointers by any ALU op yields
3732 				 * an arbitrary scalar. Disallow all math except
3733 				 * pointer subtraction
3734 				 */
3735 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
3736 					mark_reg_unknown(env, regs, insn->dst_reg);
3737 					return 0;
3738 				}
3739 				verbose(env, "R%d pointer %s pointer prohibited\n",
3740 					insn->dst_reg,
3741 					bpf_alu_string[opcode >> 4]);
3742 				return -EACCES;
3743 			} else {
3744 				/* scalar += pointer
3745 				 * This is legal, but we have to reverse our
3746 				 * src/dest handling in computing the range
3747 				 */
3748 				return adjust_ptr_min_max_vals(env, insn,
3749 							       src_reg, dst_reg);
3750 			}
3751 		} else if (ptr_reg) {
3752 			/* pointer += scalar */
3753 			return adjust_ptr_min_max_vals(env, insn,
3754 						       dst_reg, src_reg);
3755 		}
3756 	} else {
3757 		/* Pretend the src is a reg with a known value, since we only
3758 		 * need to be able to read from this state.
3759 		 */
3760 		off_reg.type = SCALAR_VALUE;
3761 		__mark_reg_known(&off_reg, insn->imm);
3762 		src_reg = &off_reg;
3763 		if (ptr_reg) /* pointer += K */
3764 			return adjust_ptr_min_max_vals(env, insn,
3765 						       ptr_reg, src_reg);
3766 	}
3767 
3768 	/* Got here implies adding two SCALAR_VALUEs */
3769 	if (WARN_ON_ONCE(ptr_reg)) {
3770 		print_verifier_state(env, state);
3771 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
3772 		return -EINVAL;
3773 	}
3774 	if (WARN_ON(!src_reg)) {
3775 		print_verifier_state(env, state);
3776 		verbose(env, "verifier internal error: no src_reg\n");
3777 		return -EINVAL;
3778 	}
3779 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3780 }
3781 
3782 /* check validity of 32-bit and 64-bit arithmetic operations */
3783 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3784 {
3785 	struct bpf_reg_state *regs = cur_regs(env);
3786 	u8 opcode = BPF_OP(insn->code);
3787 	int err;
3788 
3789 	if (opcode == BPF_END || opcode == BPF_NEG) {
3790 		if (opcode == BPF_NEG) {
3791 			if (BPF_SRC(insn->code) != 0 ||
3792 			    insn->src_reg != BPF_REG_0 ||
3793 			    insn->off != 0 || insn->imm != 0) {
3794 				verbose(env, "BPF_NEG uses reserved fields\n");
3795 				return -EINVAL;
3796 			}
3797 		} else {
3798 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3799 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3800 			    BPF_CLASS(insn->code) == BPF_ALU64) {
3801 				verbose(env, "BPF_END uses reserved fields\n");
3802 				return -EINVAL;
3803 			}
3804 		}
3805 
3806 		/* check src operand */
3807 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3808 		if (err)
3809 			return err;
3810 
3811 		if (is_pointer_value(env, insn->dst_reg)) {
3812 			verbose(env, "R%d pointer arithmetic prohibited\n",
3813 				insn->dst_reg);
3814 			return -EACCES;
3815 		}
3816 
3817 		/* check dest operand */
3818 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
3819 		if (err)
3820 			return err;
3821 
3822 	} else if (opcode == BPF_MOV) {
3823 
3824 		if (BPF_SRC(insn->code) == BPF_X) {
3825 			if (insn->imm != 0 || insn->off != 0) {
3826 				verbose(env, "BPF_MOV uses reserved fields\n");
3827 				return -EINVAL;
3828 			}
3829 
3830 			/* check src operand */
3831 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3832 			if (err)
3833 				return err;
3834 		} else {
3835 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3836 				verbose(env, "BPF_MOV uses reserved fields\n");
3837 				return -EINVAL;
3838 			}
3839 		}
3840 
3841 		/* check dest operand, mark as required later */
3842 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3843 		if (err)
3844 			return err;
3845 
3846 		if (BPF_SRC(insn->code) == BPF_X) {
3847 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
3848 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
3849 
3850 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3851 				/* case: R1 = R2
3852 				 * copy register state to dest reg
3853 				 */
3854 				*dst_reg = *src_reg;
3855 				dst_reg->live |= REG_LIVE_WRITTEN;
3856 			} else {
3857 				/* R1 = (u32) R2 */
3858 				if (is_pointer_value(env, insn->src_reg)) {
3859 					verbose(env,
3860 						"R%d partial copy of pointer\n",
3861 						insn->src_reg);
3862 					return -EACCES;
3863 				} else if (src_reg->type == SCALAR_VALUE) {
3864 					*dst_reg = *src_reg;
3865 					dst_reg->live |= REG_LIVE_WRITTEN;
3866 				} else {
3867 					mark_reg_unknown(env, regs,
3868 							 insn->dst_reg);
3869 				}
3870 				coerce_reg_to_size(dst_reg, 4);
3871 			}
3872 		} else {
3873 			/* case: R = imm
3874 			 * remember the value we stored into this reg
3875 			 */
3876 			/* clear any state __mark_reg_known doesn't set */
3877 			mark_reg_unknown(env, regs, insn->dst_reg);
3878 			regs[insn->dst_reg].type = SCALAR_VALUE;
3879 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3880 				__mark_reg_known(regs + insn->dst_reg,
3881 						 insn->imm);
3882 			} else {
3883 				__mark_reg_known(regs + insn->dst_reg,
3884 						 (u32)insn->imm);
3885 			}
3886 		}
3887 
3888 	} else if (opcode > BPF_END) {
3889 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3890 		return -EINVAL;
3891 
3892 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
3893 
3894 		if (BPF_SRC(insn->code) == BPF_X) {
3895 			if (insn->imm != 0 || insn->off != 0) {
3896 				verbose(env, "BPF_ALU uses reserved fields\n");
3897 				return -EINVAL;
3898 			}
3899 			/* check src1 operand */
3900 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3901 			if (err)
3902 				return err;
3903 		} else {
3904 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3905 				verbose(env, "BPF_ALU uses reserved fields\n");
3906 				return -EINVAL;
3907 			}
3908 		}
3909 
3910 		/* check src2 operand */
3911 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3912 		if (err)
3913 			return err;
3914 
3915 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3916 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3917 			verbose(env, "div by zero\n");
3918 			return -EINVAL;
3919 		}
3920 
3921 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3922 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3923 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3924 
3925 			if (insn->imm < 0 || insn->imm >= size) {
3926 				verbose(env, "invalid shift %d\n", insn->imm);
3927 				return -EINVAL;
3928 			}
3929 		}
3930 
3931 		/* check dest operand */
3932 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3933 		if (err)
3934 			return err;
3935 
3936 		return adjust_reg_min_max_vals(env, insn);
3937 	}
3938 
3939 	return 0;
3940 }
3941 
3942 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3943 				   struct bpf_reg_state *dst_reg,
3944 				   enum bpf_reg_type type,
3945 				   bool range_right_open)
3946 {
3947 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3948 	struct bpf_reg_state *regs = state->regs, *reg;
3949 	u16 new_range;
3950 	int i, j;
3951 
3952 	if (dst_reg->off < 0 ||
3953 	    (dst_reg->off == 0 && range_right_open))
3954 		/* This doesn't give us any range */
3955 		return;
3956 
3957 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
3958 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3959 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
3960 		 * than pkt_end, but that's because it's also less than pkt.
3961 		 */
3962 		return;
3963 
3964 	new_range = dst_reg->off;
3965 	if (range_right_open)
3966 		new_range--;
3967 
3968 	/* Examples for register markings:
3969 	 *
3970 	 * pkt_data in dst register:
3971 	 *
3972 	 *   r2 = r3;
3973 	 *   r2 += 8;
3974 	 *   if (r2 > pkt_end) goto <handle exception>
3975 	 *   <access okay>
3976 	 *
3977 	 *   r2 = r3;
3978 	 *   r2 += 8;
3979 	 *   if (r2 < pkt_end) goto <access okay>
3980 	 *   <handle exception>
3981 	 *
3982 	 *   Where:
3983 	 *     r2 == dst_reg, pkt_end == src_reg
3984 	 *     r2=pkt(id=n,off=8,r=0)
3985 	 *     r3=pkt(id=n,off=0,r=0)
3986 	 *
3987 	 * pkt_data in src register:
3988 	 *
3989 	 *   r2 = r3;
3990 	 *   r2 += 8;
3991 	 *   if (pkt_end >= r2) goto <access okay>
3992 	 *   <handle exception>
3993 	 *
3994 	 *   r2 = r3;
3995 	 *   r2 += 8;
3996 	 *   if (pkt_end <= r2) goto <handle exception>
3997 	 *   <access okay>
3998 	 *
3999 	 *   Where:
4000 	 *     pkt_end == dst_reg, r2 == src_reg
4001 	 *     r2=pkt(id=n,off=8,r=0)
4002 	 *     r3=pkt(id=n,off=0,r=0)
4003 	 *
4004 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
4005 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
4006 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
4007 	 * the check.
4008 	 */
4009 
4010 	/* If our ids match, then we must have the same max_value.  And we
4011 	 * don't care about the other reg's fixed offset, since if it's too big
4012 	 * the range won't allow anything.
4013 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
4014 	 */
4015 	for (i = 0; i < MAX_BPF_REG; i++)
4016 		if (regs[i].type == type && regs[i].id == dst_reg->id)
4017 			/* keep the maximum range already checked */
4018 			regs[i].range = max(regs[i].range, new_range);
4019 
4020 	for (j = 0; j <= vstate->curframe; j++) {
4021 		state = vstate->frame[j];
4022 		bpf_for_each_spilled_reg(i, state, reg) {
4023 			if (!reg)
4024 				continue;
4025 			if (reg->type == type && reg->id == dst_reg->id)
4026 				reg->range = max(reg->range, new_range);
4027 		}
4028 	}
4029 }
4030 
4031 /* compute branch direction of the expression "if (reg opcode val) goto target;"
4032  * and return:
4033  *  1 - branch will be taken and "goto target" will be executed
4034  *  0 - branch will not be taken and fall-through to next insn
4035  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
4036  */
4037 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
4038 {
4039 	if (__is_pointer_value(false, reg))
4040 		return -1;
4041 
4042 	switch (opcode) {
4043 	case BPF_JEQ:
4044 		if (tnum_is_const(reg->var_off))
4045 			return !!tnum_equals_const(reg->var_off, val);
4046 		break;
4047 	case BPF_JNE:
4048 		if (tnum_is_const(reg->var_off))
4049 			return !tnum_equals_const(reg->var_off, val);
4050 		break;
4051 	case BPF_JSET:
4052 		if ((~reg->var_off.mask & reg->var_off.value) & val)
4053 			return 1;
4054 		if (!((reg->var_off.mask | reg->var_off.value) & val))
4055 			return 0;
4056 		break;
4057 	case BPF_JGT:
4058 		if (reg->umin_value > val)
4059 			return 1;
4060 		else if (reg->umax_value <= val)
4061 			return 0;
4062 		break;
4063 	case BPF_JSGT:
4064 		if (reg->smin_value > (s64)val)
4065 			return 1;
4066 		else if (reg->smax_value < (s64)val)
4067 			return 0;
4068 		break;
4069 	case BPF_JLT:
4070 		if (reg->umax_value < val)
4071 			return 1;
4072 		else if (reg->umin_value >= val)
4073 			return 0;
4074 		break;
4075 	case BPF_JSLT:
4076 		if (reg->smax_value < (s64)val)
4077 			return 1;
4078 		else if (reg->smin_value >= (s64)val)
4079 			return 0;
4080 		break;
4081 	case BPF_JGE:
4082 		if (reg->umin_value >= val)
4083 			return 1;
4084 		else if (reg->umax_value < val)
4085 			return 0;
4086 		break;
4087 	case BPF_JSGE:
4088 		if (reg->smin_value >= (s64)val)
4089 			return 1;
4090 		else if (reg->smax_value < (s64)val)
4091 			return 0;
4092 		break;
4093 	case BPF_JLE:
4094 		if (reg->umax_value <= val)
4095 			return 1;
4096 		else if (reg->umin_value > val)
4097 			return 0;
4098 		break;
4099 	case BPF_JSLE:
4100 		if (reg->smax_value <= (s64)val)
4101 			return 1;
4102 		else if (reg->smin_value > (s64)val)
4103 			return 0;
4104 		break;
4105 	}
4106 
4107 	return -1;
4108 }
4109 
4110 /* Adjusts the register min/max values in the case that the dst_reg is the
4111  * variable register that we are working on, and src_reg is a constant or we're
4112  * simply doing a BPF_K check.
4113  * In JEQ/JNE cases we also adjust the var_off values.
4114  */
4115 static void reg_set_min_max(struct bpf_reg_state *true_reg,
4116 			    struct bpf_reg_state *false_reg, u64 val,
4117 			    u8 opcode)
4118 {
4119 	/* If the dst_reg is a pointer, we can't learn anything about its
4120 	 * variable offset from the compare (unless src_reg were a pointer into
4121 	 * the same object, but we don't bother with that.
4122 	 * Since false_reg and true_reg have the same type by construction, we
4123 	 * only need to check one of them for pointerness.
4124 	 */
4125 	if (__is_pointer_value(false, false_reg))
4126 		return;
4127 
4128 	switch (opcode) {
4129 	case BPF_JEQ:
4130 		/* If this is false then we know nothing Jon Snow, but if it is
4131 		 * true then we know for sure.
4132 		 */
4133 		__mark_reg_known(true_reg, val);
4134 		break;
4135 	case BPF_JNE:
4136 		/* If this is true we know nothing Jon Snow, but if it is false
4137 		 * we know the value for sure;
4138 		 */
4139 		__mark_reg_known(false_reg, val);
4140 		break;
4141 	case BPF_JSET:
4142 		false_reg->var_off = tnum_and(false_reg->var_off,
4143 					      tnum_const(~val));
4144 		if (is_power_of_2(val))
4145 			true_reg->var_off = tnum_or(true_reg->var_off,
4146 						    tnum_const(val));
4147 		break;
4148 	case BPF_JGT:
4149 		false_reg->umax_value = min(false_reg->umax_value, val);
4150 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
4151 		break;
4152 	case BPF_JSGT:
4153 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4154 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4155 		break;
4156 	case BPF_JLT:
4157 		false_reg->umin_value = max(false_reg->umin_value, val);
4158 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
4159 		break;
4160 	case BPF_JSLT:
4161 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4162 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4163 		break;
4164 	case BPF_JGE:
4165 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
4166 		true_reg->umin_value = max(true_reg->umin_value, val);
4167 		break;
4168 	case BPF_JSGE:
4169 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4170 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4171 		break;
4172 	case BPF_JLE:
4173 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
4174 		true_reg->umax_value = min(true_reg->umax_value, val);
4175 		break;
4176 	case BPF_JSLE:
4177 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4178 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4179 		break;
4180 	default:
4181 		break;
4182 	}
4183 
4184 	__reg_deduce_bounds(false_reg);
4185 	__reg_deduce_bounds(true_reg);
4186 	/* We might have learned some bits from the bounds. */
4187 	__reg_bound_offset(false_reg);
4188 	__reg_bound_offset(true_reg);
4189 	/* Intersecting with the old var_off might have improved our bounds
4190 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4191 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4192 	 */
4193 	__update_reg_bounds(false_reg);
4194 	__update_reg_bounds(true_reg);
4195 }
4196 
4197 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
4198  * the variable reg.
4199  */
4200 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
4201 				struct bpf_reg_state *false_reg, u64 val,
4202 				u8 opcode)
4203 {
4204 	if (__is_pointer_value(false, false_reg))
4205 		return;
4206 
4207 	switch (opcode) {
4208 	case BPF_JEQ:
4209 		/* If this is false then we know nothing Jon Snow, but if it is
4210 		 * true then we know for sure.
4211 		 */
4212 		__mark_reg_known(true_reg, val);
4213 		break;
4214 	case BPF_JNE:
4215 		/* If this is true we know nothing Jon Snow, but if it is false
4216 		 * we know the value for sure;
4217 		 */
4218 		__mark_reg_known(false_reg, val);
4219 		break;
4220 	case BPF_JSET:
4221 		false_reg->var_off = tnum_and(false_reg->var_off,
4222 					      tnum_const(~val));
4223 		if (is_power_of_2(val))
4224 			true_reg->var_off = tnum_or(true_reg->var_off,
4225 						    tnum_const(val));
4226 		break;
4227 	case BPF_JGT:
4228 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
4229 		false_reg->umin_value = max(false_reg->umin_value, val);
4230 		break;
4231 	case BPF_JSGT:
4232 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
4233 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
4234 		break;
4235 	case BPF_JLT:
4236 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
4237 		false_reg->umax_value = min(false_reg->umax_value, val);
4238 		break;
4239 	case BPF_JSLT:
4240 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
4241 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
4242 		break;
4243 	case BPF_JGE:
4244 		true_reg->umax_value = min(true_reg->umax_value, val);
4245 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
4246 		break;
4247 	case BPF_JSGE:
4248 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
4249 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
4250 		break;
4251 	case BPF_JLE:
4252 		true_reg->umin_value = max(true_reg->umin_value, val);
4253 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
4254 		break;
4255 	case BPF_JSLE:
4256 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
4257 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
4258 		break;
4259 	default:
4260 		break;
4261 	}
4262 
4263 	__reg_deduce_bounds(false_reg);
4264 	__reg_deduce_bounds(true_reg);
4265 	/* We might have learned some bits from the bounds. */
4266 	__reg_bound_offset(false_reg);
4267 	__reg_bound_offset(true_reg);
4268 	/* Intersecting with the old var_off might have improved our bounds
4269 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4270 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4271 	 */
4272 	__update_reg_bounds(false_reg);
4273 	__update_reg_bounds(true_reg);
4274 }
4275 
4276 /* Regs are known to be equal, so intersect their min/max/var_off */
4277 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
4278 				  struct bpf_reg_state *dst_reg)
4279 {
4280 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
4281 							dst_reg->umin_value);
4282 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
4283 							dst_reg->umax_value);
4284 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
4285 							dst_reg->smin_value);
4286 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
4287 							dst_reg->smax_value);
4288 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
4289 							     dst_reg->var_off);
4290 	/* We might have learned new bounds from the var_off. */
4291 	__update_reg_bounds(src_reg);
4292 	__update_reg_bounds(dst_reg);
4293 	/* We might have learned something about the sign bit. */
4294 	__reg_deduce_bounds(src_reg);
4295 	__reg_deduce_bounds(dst_reg);
4296 	/* We might have learned some bits from the bounds. */
4297 	__reg_bound_offset(src_reg);
4298 	__reg_bound_offset(dst_reg);
4299 	/* Intersecting with the old var_off might have improved our bounds
4300 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4301 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
4302 	 */
4303 	__update_reg_bounds(src_reg);
4304 	__update_reg_bounds(dst_reg);
4305 }
4306 
4307 static void reg_combine_min_max(struct bpf_reg_state *true_src,
4308 				struct bpf_reg_state *true_dst,
4309 				struct bpf_reg_state *false_src,
4310 				struct bpf_reg_state *false_dst,
4311 				u8 opcode)
4312 {
4313 	switch (opcode) {
4314 	case BPF_JEQ:
4315 		__reg_combine_min_max(true_src, true_dst);
4316 		break;
4317 	case BPF_JNE:
4318 		__reg_combine_min_max(false_src, false_dst);
4319 		break;
4320 	}
4321 }
4322 
4323 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
4324 				 struct bpf_reg_state *reg, u32 id,
4325 				 bool is_null)
4326 {
4327 	if (reg_type_may_be_null(reg->type) && reg->id == id) {
4328 		/* Old offset (both fixed and variable parts) should
4329 		 * have been known-zero, because we don't allow pointer
4330 		 * arithmetic on pointers that might be NULL.
4331 		 */
4332 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
4333 				 !tnum_equals_const(reg->var_off, 0) ||
4334 				 reg->off)) {
4335 			__mark_reg_known_zero(reg);
4336 			reg->off = 0;
4337 		}
4338 		if (is_null) {
4339 			reg->type = SCALAR_VALUE;
4340 		} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
4341 			if (reg->map_ptr->inner_map_meta) {
4342 				reg->type = CONST_PTR_TO_MAP;
4343 				reg->map_ptr = reg->map_ptr->inner_map_meta;
4344 			} else {
4345 				reg->type = PTR_TO_MAP_VALUE;
4346 			}
4347 		} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
4348 			reg->type = PTR_TO_SOCKET;
4349 		}
4350 		if (is_null || !reg_is_refcounted(reg)) {
4351 			/* We don't need id from this point onwards anymore,
4352 			 * thus we should better reset it, so that state
4353 			 * pruning has chances to take effect.
4354 			 */
4355 			reg->id = 0;
4356 		}
4357 	}
4358 }
4359 
4360 /* The logic is similar to find_good_pkt_pointers(), both could eventually
4361  * be folded together at some point.
4362  */
4363 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4364 				  bool is_null)
4365 {
4366 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4367 	struct bpf_reg_state *reg, *regs = state->regs;
4368 	u32 id = regs[regno].id;
4369 	int i, j;
4370 
4371 	if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
4372 		__release_reference_state(state, id);
4373 
4374 	for (i = 0; i < MAX_BPF_REG; i++)
4375 		mark_ptr_or_null_reg(state, &regs[i], id, is_null);
4376 
4377 	for (j = 0; j <= vstate->curframe; j++) {
4378 		state = vstate->frame[j];
4379 		bpf_for_each_spilled_reg(i, state, reg) {
4380 			if (!reg)
4381 				continue;
4382 			mark_ptr_or_null_reg(state, reg, id, is_null);
4383 		}
4384 	}
4385 }
4386 
4387 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
4388 				   struct bpf_reg_state *dst_reg,
4389 				   struct bpf_reg_state *src_reg,
4390 				   struct bpf_verifier_state *this_branch,
4391 				   struct bpf_verifier_state *other_branch)
4392 {
4393 	if (BPF_SRC(insn->code) != BPF_X)
4394 		return false;
4395 
4396 	switch (BPF_OP(insn->code)) {
4397 	case BPF_JGT:
4398 		if ((dst_reg->type == PTR_TO_PACKET &&
4399 		     src_reg->type == PTR_TO_PACKET_END) ||
4400 		    (dst_reg->type == PTR_TO_PACKET_META &&
4401 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4402 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
4403 			find_good_pkt_pointers(this_branch, dst_reg,
4404 					       dst_reg->type, false);
4405 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4406 			    src_reg->type == PTR_TO_PACKET) ||
4407 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4408 			    src_reg->type == PTR_TO_PACKET_META)) {
4409 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
4410 			find_good_pkt_pointers(other_branch, src_reg,
4411 					       src_reg->type, true);
4412 		} else {
4413 			return false;
4414 		}
4415 		break;
4416 	case BPF_JLT:
4417 		if ((dst_reg->type == PTR_TO_PACKET &&
4418 		     src_reg->type == PTR_TO_PACKET_END) ||
4419 		    (dst_reg->type == PTR_TO_PACKET_META &&
4420 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4421 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
4422 			find_good_pkt_pointers(other_branch, dst_reg,
4423 					       dst_reg->type, true);
4424 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4425 			    src_reg->type == PTR_TO_PACKET) ||
4426 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4427 			    src_reg->type == PTR_TO_PACKET_META)) {
4428 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
4429 			find_good_pkt_pointers(this_branch, src_reg,
4430 					       src_reg->type, false);
4431 		} else {
4432 			return false;
4433 		}
4434 		break;
4435 	case BPF_JGE:
4436 		if ((dst_reg->type == PTR_TO_PACKET &&
4437 		     src_reg->type == PTR_TO_PACKET_END) ||
4438 		    (dst_reg->type == PTR_TO_PACKET_META &&
4439 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4440 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
4441 			find_good_pkt_pointers(this_branch, dst_reg,
4442 					       dst_reg->type, true);
4443 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4444 			    src_reg->type == PTR_TO_PACKET) ||
4445 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4446 			    src_reg->type == PTR_TO_PACKET_META)) {
4447 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
4448 			find_good_pkt_pointers(other_branch, src_reg,
4449 					       src_reg->type, false);
4450 		} else {
4451 			return false;
4452 		}
4453 		break;
4454 	case BPF_JLE:
4455 		if ((dst_reg->type == PTR_TO_PACKET &&
4456 		     src_reg->type == PTR_TO_PACKET_END) ||
4457 		    (dst_reg->type == PTR_TO_PACKET_META &&
4458 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
4459 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
4460 			find_good_pkt_pointers(other_branch, dst_reg,
4461 					       dst_reg->type, false);
4462 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
4463 			    src_reg->type == PTR_TO_PACKET) ||
4464 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
4465 			    src_reg->type == PTR_TO_PACKET_META)) {
4466 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
4467 			find_good_pkt_pointers(this_branch, src_reg,
4468 					       src_reg->type, true);
4469 		} else {
4470 			return false;
4471 		}
4472 		break;
4473 	default:
4474 		return false;
4475 	}
4476 
4477 	return true;
4478 }
4479 
4480 static int check_cond_jmp_op(struct bpf_verifier_env *env,
4481 			     struct bpf_insn *insn, int *insn_idx)
4482 {
4483 	struct bpf_verifier_state *this_branch = env->cur_state;
4484 	struct bpf_verifier_state *other_branch;
4485 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
4486 	struct bpf_reg_state *dst_reg, *other_branch_regs;
4487 	u8 opcode = BPF_OP(insn->code);
4488 	int err;
4489 
4490 	if (opcode > BPF_JSLE) {
4491 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
4492 		return -EINVAL;
4493 	}
4494 
4495 	if (BPF_SRC(insn->code) == BPF_X) {
4496 		if (insn->imm != 0) {
4497 			verbose(env, "BPF_JMP uses reserved fields\n");
4498 			return -EINVAL;
4499 		}
4500 
4501 		/* check src1 operand */
4502 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
4503 		if (err)
4504 			return err;
4505 
4506 		if (is_pointer_value(env, insn->src_reg)) {
4507 			verbose(env, "R%d pointer comparison prohibited\n",
4508 				insn->src_reg);
4509 			return -EACCES;
4510 		}
4511 	} else {
4512 		if (insn->src_reg != BPF_REG_0) {
4513 			verbose(env, "BPF_JMP uses reserved fields\n");
4514 			return -EINVAL;
4515 		}
4516 	}
4517 
4518 	/* check src2 operand */
4519 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4520 	if (err)
4521 		return err;
4522 
4523 	dst_reg = &regs[insn->dst_reg];
4524 
4525 	if (BPF_SRC(insn->code) == BPF_K) {
4526 		int pred = is_branch_taken(dst_reg, insn->imm, opcode);
4527 
4528 		if (pred == 1) {
4529 			 /* only follow the goto, ignore fall-through */
4530 			*insn_idx += insn->off;
4531 			return 0;
4532 		} else if (pred == 0) {
4533 			/* only follow fall-through branch, since
4534 			 * that's where the program will go
4535 			 */
4536 			return 0;
4537 		}
4538 	}
4539 
4540 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
4541 				  false);
4542 	if (!other_branch)
4543 		return -EFAULT;
4544 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
4545 
4546 	/* detect if we are comparing against a constant value so we can adjust
4547 	 * our min/max values for our dst register.
4548 	 * this is only legit if both are scalars (or pointers to the same
4549 	 * object, I suppose, but we don't support that right now), because
4550 	 * otherwise the different base pointers mean the offsets aren't
4551 	 * comparable.
4552 	 */
4553 	if (BPF_SRC(insn->code) == BPF_X) {
4554 		if (dst_reg->type == SCALAR_VALUE &&
4555 		    regs[insn->src_reg].type == SCALAR_VALUE) {
4556 			if (tnum_is_const(regs[insn->src_reg].var_off))
4557 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
4558 						dst_reg, regs[insn->src_reg].var_off.value,
4559 						opcode);
4560 			else if (tnum_is_const(dst_reg->var_off))
4561 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
4562 						    &regs[insn->src_reg],
4563 						    dst_reg->var_off.value, opcode);
4564 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
4565 				/* Comparing for equality, we can combine knowledge */
4566 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
4567 						    &other_branch_regs[insn->dst_reg],
4568 						    &regs[insn->src_reg],
4569 						    &regs[insn->dst_reg], opcode);
4570 		}
4571 	} else if (dst_reg->type == SCALAR_VALUE) {
4572 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
4573 					dst_reg, insn->imm, opcode);
4574 	}
4575 
4576 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
4577 	if (BPF_SRC(insn->code) == BPF_K &&
4578 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
4579 	    reg_type_may_be_null(dst_reg->type)) {
4580 		/* Mark all identical registers in each branch as either
4581 		 * safe or unknown depending R == 0 or R != 0 conditional.
4582 		 */
4583 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
4584 				      opcode == BPF_JNE);
4585 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
4586 				      opcode == BPF_JEQ);
4587 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
4588 					   this_branch, other_branch) &&
4589 		   is_pointer_value(env, insn->dst_reg)) {
4590 		verbose(env, "R%d pointer comparison prohibited\n",
4591 			insn->dst_reg);
4592 		return -EACCES;
4593 	}
4594 	if (env->log.level)
4595 		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
4596 	return 0;
4597 }
4598 
4599 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
4600 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
4601 {
4602 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
4603 
4604 	return (struct bpf_map *) (unsigned long) imm64;
4605 }
4606 
4607 /* verify BPF_LD_IMM64 instruction */
4608 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
4609 {
4610 	struct bpf_reg_state *regs = cur_regs(env);
4611 	int err;
4612 
4613 	if (BPF_SIZE(insn->code) != BPF_DW) {
4614 		verbose(env, "invalid BPF_LD_IMM insn\n");
4615 		return -EINVAL;
4616 	}
4617 	if (insn->off != 0) {
4618 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
4619 		return -EINVAL;
4620 	}
4621 
4622 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
4623 	if (err)
4624 		return err;
4625 
4626 	if (insn->src_reg == 0) {
4627 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
4628 
4629 		regs[insn->dst_reg].type = SCALAR_VALUE;
4630 		__mark_reg_known(&regs[insn->dst_reg], imm);
4631 		return 0;
4632 	}
4633 
4634 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
4635 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
4636 
4637 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
4638 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
4639 	return 0;
4640 }
4641 
4642 static bool may_access_skb(enum bpf_prog_type type)
4643 {
4644 	switch (type) {
4645 	case BPF_PROG_TYPE_SOCKET_FILTER:
4646 	case BPF_PROG_TYPE_SCHED_CLS:
4647 	case BPF_PROG_TYPE_SCHED_ACT:
4648 		return true;
4649 	default:
4650 		return false;
4651 	}
4652 }
4653 
4654 /* verify safety of LD_ABS|LD_IND instructions:
4655  * - they can only appear in the programs where ctx == skb
4656  * - since they are wrappers of function calls, they scratch R1-R5 registers,
4657  *   preserve R6-R9, and store return value into R0
4658  *
4659  * Implicit input:
4660  *   ctx == skb == R6 == CTX
4661  *
4662  * Explicit input:
4663  *   SRC == any register
4664  *   IMM == 32-bit immediate
4665  *
4666  * Output:
4667  *   R0 - 8/16/32-bit skb data converted to cpu endianness
4668  */
4669 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4670 {
4671 	struct bpf_reg_state *regs = cur_regs(env);
4672 	u8 mode = BPF_MODE(insn->code);
4673 	int i, err;
4674 
4675 	if (!may_access_skb(env->prog->type)) {
4676 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4677 		return -EINVAL;
4678 	}
4679 
4680 	if (!env->ops->gen_ld_abs) {
4681 		verbose(env, "bpf verifier is misconfigured\n");
4682 		return -EINVAL;
4683 	}
4684 
4685 	if (env->subprog_cnt > 1) {
4686 		/* when program has LD_ABS insn JITs and interpreter assume
4687 		 * that r1 == ctx == skb which is not the case for callees
4688 		 * that can have arbitrary arguments. It's problematic
4689 		 * for main prog as well since JITs would need to analyze
4690 		 * all functions in order to make proper register save/restore
4691 		 * decisions in the main prog. Hence disallow LD_ABS with calls
4692 		 */
4693 		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4694 		return -EINVAL;
4695 	}
4696 
4697 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
4698 	    BPF_SIZE(insn->code) == BPF_DW ||
4699 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
4700 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
4701 		return -EINVAL;
4702 	}
4703 
4704 	/* check whether implicit source operand (register R6) is readable */
4705 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
4706 	if (err)
4707 		return err;
4708 
4709 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
4710 	 * gen_ld_abs() may terminate the program at runtime, leading to
4711 	 * reference leak.
4712 	 */
4713 	err = check_reference_leak(env);
4714 	if (err) {
4715 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
4716 		return err;
4717 	}
4718 
4719 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4720 		verbose(env,
4721 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4722 		return -EINVAL;
4723 	}
4724 
4725 	if (mode == BPF_IND) {
4726 		/* check explicit source operand */
4727 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
4728 		if (err)
4729 			return err;
4730 	}
4731 
4732 	/* reset caller saved regs to unreadable */
4733 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
4734 		mark_reg_not_init(env, regs, caller_saved[i]);
4735 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4736 	}
4737 
4738 	/* mark destination R0 register as readable, since it contains
4739 	 * the value fetched from the packet.
4740 	 * Already marked as written above.
4741 	 */
4742 	mark_reg_unknown(env, regs, BPF_REG_0);
4743 	return 0;
4744 }
4745 
4746 static int check_return_code(struct bpf_verifier_env *env)
4747 {
4748 	struct bpf_reg_state *reg;
4749 	struct tnum range = tnum_range(0, 1);
4750 
4751 	switch (env->prog->type) {
4752 	case BPF_PROG_TYPE_CGROUP_SKB:
4753 	case BPF_PROG_TYPE_CGROUP_SOCK:
4754 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4755 	case BPF_PROG_TYPE_SOCK_OPS:
4756 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4757 		break;
4758 	default:
4759 		return 0;
4760 	}
4761 
4762 	reg = cur_regs(env) + BPF_REG_0;
4763 	if (reg->type != SCALAR_VALUE) {
4764 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
4765 			reg_type_str[reg->type]);
4766 		return -EINVAL;
4767 	}
4768 
4769 	if (!tnum_in(range, reg->var_off)) {
4770 		verbose(env, "At program exit the register R0 ");
4771 		if (!tnum_is_unknown(reg->var_off)) {
4772 			char tn_buf[48];
4773 
4774 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4775 			verbose(env, "has value %s", tn_buf);
4776 		} else {
4777 			verbose(env, "has unknown scalar value");
4778 		}
4779 		verbose(env, " should have been 0 or 1\n");
4780 		return -EINVAL;
4781 	}
4782 	return 0;
4783 }
4784 
4785 /* non-recursive DFS pseudo code
4786  * 1  procedure DFS-iterative(G,v):
4787  * 2      label v as discovered
4788  * 3      let S be a stack
4789  * 4      S.push(v)
4790  * 5      while S is not empty
4791  * 6            t <- S.pop()
4792  * 7            if t is what we're looking for:
4793  * 8                return t
4794  * 9            for all edges e in G.adjacentEdges(t) do
4795  * 10               if edge e is already labelled
4796  * 11                   continue with the next edge
4797  * 12               w <- G.adjacentVertex(t,e)
4798  * 13               if vertex w is not discovered and not explored
4799  * 14                   label e as tree-edge
4800  * 15                   label w as discovered
4801  * 16                   S.push(w)
4802  * 17                   continue at 5
4803  * 18               else if vertex w is discovered
4804  * 19                   label e as back-edge
4805  * 20               else
4806  * 21                   // vertex w is explored
4807  * 22                   label e as forward- or cross-edge
4808  * 23           label t as explored
4809  * 24           S.pop()
4810  *
4811  * convention:
4812  * 0x10 - discovered
4813  * 0x11 - discovered and fall-through edge labelled
4814  * 0x12 - discovered and fall-through and branch edges labelled
4815  * 0x20 - explored
4816  */
4817 
4818 enum {
4819 	DISCOVERED = 0x10,
4820 	EXPLORED = 0x20,
4821 	FALLTHROUGH = 1,
4822 	BRANCH = 2,
4823 };
4824 
4825 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4826 
4827 static int *insn_stack;	/* stack of insns to process */
4828 static int cur_stack;	/* current stack index */
4829 static int *insn_state;
4830 
4831 /* t, w, e - match pseudo-code above:
4832  * t - index of current instruction
4833  * w - next instruction
4834  * e - edge
4835  */
4836 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4837 {
4838 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4839 		return 0;
4840 
4841 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4842 		return 0;
4843 
4844 	if (w < 0 || w >= env->prog->len) {
4845 		verbose_linfo(env, t, "%d: ", t);
4846 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
4847 		return -EINVAL;
4848 	}
4849 
4850 	if (e == BRANCH)
4851 		/* mark branch target for state pruning */
4852 		env->explored_states[w] = STATE_LIST_MARK;
4853 
4854 	if (insn_state[w] == 0) {
4855 		/* tree-edge */
4856 		insn_state[t] = DISCOVERED | e;
4857 		insn_state[w] = DISCOVERED;
4858 		if (cur_stack >= env->prog->len)
4859 			return -E2BIG;
4860 		insn_stack[cur_stack++] = w;
4861 		return 1;
4862 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4863 		verbose_linfo(env, t, "%d: ", t);
4864 		verbose_linfo(env, w, "%d: ", w);
4865 		verbose(env, "back-edge from insn %d to %d\n", t, w);
4866 		return -EINVAL;
4867 	} else if (insn_state[w] == EXPLORED) {
4868 		/* forward- or cross-edge */
4869 		insn_state[t] = DISCOVERED | e;
4870 	} else {
4871 		verbose(env, "insn state internal bug\n");
4872 		return -EFAULT;
4873 	}
4874 	return 0;
4875 }
4876 
4877 /* non-recursive depth-first-search to detect loops in BPF program
4878  * loop == back-edge in directed graph
4879  */
4880 static int check_cfg(struct bpf_verifier_env *env)
4881 {
4882 	struct bpf_insn *insns = env->prog->insnsi;
4883 	int insn_cnt = env->prog->len;
4884 	int ret = 0;
4885 	int i, t;
4886 
4887 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4888 	if (!insn_state)
4889 		return -ENOMEM;
4890 
4891 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4892 	if (!insn_stack) {
4893 		kfree(insn_state);
4894 		return -ENOMEM;
4895 	}
4896 
4897 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4898 	insn_stack[0] = 0; /* 0 is the first instruction */
4899 	cur_stack = 1;
4900 
4901 peek_stack:
4902 	if (cur_stack == 0)
4903 		goto check_state;
4904 	t = insn_stack[cur_stack - 1];
4905 
4906 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4907 		u8 opcode = BPF_OP(insns[t].code);
4908 
4909 		if (opcode == BPF_EXIT) {
4910 			goto mark_explored;
4911 		} else if (opcode == BPF_CALL) {
4912 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4913 			if (ret == 1)
4914 				goto peek_stack;
4915 			else if (ret < 0)
4916 				goto err_free;
4917 			if (t + 1 < insn_cnt)
4918 				env->explored_states[t + 1] = STATE_LIST_MARK;
4919 			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4920 				env->explored_states[t] = STATE_LIST_MARK;
4921 				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4922 				if (ret == 1)
4923 					goto peek_stack;
4924 				else if (ret < 0)
4925 					goto err_free;
4926 			}
4927 		} else if (opcode == BPF_JA) {
4928 			if (BPF_SRC(insns[t].code) != BPF_K) {
4929 				ret = -EINVAL;
4930 				goto err_free;
4931 			}
4932 			/* unconditional jump with single edge */
4933 			ret = push_insn(t, t + insns[t].off + 1,
4934 					FALLTHROUGH, env);
4935 			if (ret == 1)
4936 				goto peek_stack;
4937 			else if (ret < 0)
4938 				goto err_free;
4939 			/* tell verifier to check for equivalent states
4940 			 * after every call and jump
4941 			 */
4942 			if (t + 1 < insn_cnt)
4943 				env->explored_states[t + 1] = STATE_LIST_MARK;
4944 		} else {
4945 			/* conditional jump with two edges */
4946 			env->explored_states[t] = STATE_LIST_MARK;
4947 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4948 			if (ret == 1)
4949 				goto peek_stack;
4950 			else if (ret < 0)
4951 				goto err_free;
4952 
4953 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4954 			if (ret == 1)
4955 				goto peek_stack;
4956 			else if (ret < 0)
4957 				goto err_free;
4958 		}
4959 	} else {
4960 		/* all other non-branch instructions with single
4961 		 * fall-through edge
4962 		 */
4963 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
4964 		if (ret == 1)
4965 			goto peek_stack;
4966 		else if (ret < 0)
4967 			goto err_free;
4968 	}
4969 
4970 mark_explored:
4971 	insn_state[t] = EXPLORED;
4972 	if (cur_stack-- <= 0) {
4973 		verbose(env, "pop stack internal bug\n");
4974 		ret = -EFAULT;
4975 		goto err_free;
4976 	}
4977 	goto peek_stack;
4978 
4979 check_state:
4980 	for (i = 0; i < insn_cnt; i++) {
4981 		if (insn_state[i] != EXPLORED) {
4982 			verbose(env, "unreachable insn %d\n", i);
4983 			ret = -EINVAL;
4984 			goto err_free;
4985 		}
4986 	}
4987 	ret = 0; /* cfg looks good */
4988 
4989 err_free:
4990 	kfree(insn_state);
4991 	kfree(insn_stack);
4992 	return ret;
4993 }
4994 
4995 /* The minimum supported BTF func info size */
4996 #define MIN_BPF_FUNCINFO_SIZE	8
4997 #define MAX_FUNCINFO_REC_SIZE	252
4998 
4999 static int check_btf_func(struct bpf_verifier_env *env,
5000 			  const union bpf_attr *attr,
5001 			  union bpf_attr __user *uattr)
5002 {
5003 	u32 i, nfuncs, urec_size, min_size, prev_offset;
5004 	u32 krec_size = sizeof(struct bpf_func_info);
5005 	struct bpf_func_info *krecord;
5006 	const struct btf_type *type;
5007 	struct bpf_prog *prog;
5008 	const struct btf *btf;
5009 	void __user *urecord;
5010 	int ret = 0;
5011 
5012 	nfuncs = attr->func_info_cnt;
5013 	if (!nfuncs)
5014 		return 0;
5015 
5016 	if (nfuncs != env->subprog_cnt) {
5017 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
5018 		return -EINVAL;
5019 	}
5020 
5021 	urec_size = attr->func_info_rec_size;
5022 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
5023 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
5024 	    urec_size % sizeof(u32)) {
5025 		verbose(env, "invalid func info rec size %u\n", urec_size);
5026 		return -EINVAL;
5027 	}
5028 
5029 	prog = env->prog;
5030 	btf = prog->aux->btf;
5031 
5032 	urecord = u64_to_user_ptr(attr->func_info);
5033 	min_size = min_t(u32, krec_size, urec_size);
5034 
5035 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
5036 	if (!krecord)
5037 		return -ENOMEM;
5038 
5039 	for (i = 0; i < nfuncs; i++) {
5040 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
5041 		if (ret) {
5042 			if (ret == -E2BIG) {
5043 				verbose(env, "nonzero tailing record in func info");
5044 				/* set the size kernel expects so loader can zero
5045 				 * out the rest of the record.
5046 				 */
5047 				if (put_user(min_size, &uattr->func_info_rec_size))
5048 					ret = -EFAULT;
5049 			}
5050 			goto err_free;
5051 		}
5052 
5053 		if (copy_from_user(&krecord[i], urecord, min_size)) {
5054 			ret = -EFAULT;
5055 			goto err_free;
5056 		}
5057 
5058 		/* check insn_off */
5059 		if (i == 0) {
5060 			if (krecord[i].insn_off) {
5061 				verbose(env,
5062 					"nonzero insn_off %u for the first func info record",
5063 					krecord[i].insn_off);
5064 				ret = -EINVAL;
5065 				goto err_free;
5066 			}
5067 		} else if (krecord[i].insn_off <= prev_offset) {
5068 			verbose(env,
5069 				"same or smaller insn offset (%u) than previous func info record (%u)",
5070 				krecord[i].insn_off, prev_offset);
5071 			ret = -EINVAL;
5072 			goto err_free;
5073 		}
5074 
5075 		if (env->subprog_info[i].start != krecord[i].insn_off) {
5076 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
5077 			ret = -EINVAL;
5078 			goto err_free;
5079 		}
5080 
5081 		/* check type_id */
5082 		type = btf_type_by_id(btf, krecord[i].type_id);
5083 		if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
5084 			verbose(env, "invalid type id %d in func info",
5085 				krecord[i].type_id);
5086 			ret = -EINVAL;
5087 			goto err_free;
5088 		}
5089 
5090 		prev_offset = krecord[i].insn_off;
5091 		urecord += urec_size;
5092 	}
5093 
5094 	prog->aux->func_info = krecord;
5095 	prog->aux->func_info_cnt = nfuncs;
5096 	return 0;
5097 
5098 err_free:
5099 	kvfree(krecord);
5100 	return ret;
5101 }
5102 
5103 static void adjust_btf_func(struct bpf_verifier_env *env)
5104 {
5105 	int i;
5106 
5107 	if (!env->prog->aux->func_info)
5108 		return;
5109 
5110 	for (i = 0; i < env->subprog_cnt; i++)
5111 		env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
5112 }
5113 
5114 #define MIN_BPF_LINEINFO_SIZE	(offsetof(struct bpf_line_info, line_col) + \
5115 		sizeof(((struct bpf_line_info *)(0))->line_col))
5116 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
5117 
5118 static int check_btf_line(struct bpf_verifier_env *env,
5119 			  const union bpf_attr *attr,
5120 			  union bpf_attr __user *uattr)
5121 {
5122 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
5123 	struct bpf_subprog_info *sub;
5124 	struct bpf_line_info *linfo;
5125 	struct bpf_prog *prog;
5126 	const struct btf *btf;
5127 	void __user *ulinfo;
5128 	int err;
5129 
5130 	nr_linfo = attr->line_info_cnt;
5131 	if (!nr_linfo)
5132 		return 0;
5133 
5134 	rec_size = attr->line_info_rec_size;
5135 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
5136 	    rec_size > MAX_LINEINFO_REC_SIZE ||
5137 	    rec_size & (sizeof(u32) - 1))
5138 		return -EINVAL;
5139 
5140 	/* Need to zero it in case the userspace may
5141 	 * pass in a smaller bpf_line_info object.
5142 	 */
5143 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
5144 			 GFP_KERNEL | __GFP_NOWARN);
5145 	if (!linfo)
5146 		return -ENOMEM;
5147 
5148 	prog = env->prog;
5149 	btf = prog->aux->btf;
5150 
5151 	s = 0;
5152 	sub = env->subprog_info;
5153 	ulinfo = u64_to_user_ptr(attr->line_info);
5154 	expected_size = sizeof(struct bpf_line_info);
5155 	ncopy = min_t(u32, expected_size, rec_size);
5156 	for (i = 0; i < nr_linfo; i++) {
5157 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
5158 		if (err) {
5159 			if (err == -E2BIG) {
5160 				verbose(env, "nonzero tailing record in line_info");
5161 				if (put_user(expected_size,
5162 					     &uattr->line_info_rec_size))
5163 					err = -EFAULT;
5164 			}
5165 			goto err_free;
5166 		}
5167 
5168 		if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
5169 			err = -EFAULT;
5170 			goto err_free;
5171 		}
5172 
5173 		/*
5174 		 * Check insn_off to ensure
5175 		 * 1) strictly increasing AND
5176 		 * 2) bounded by prog->len
5177 		 *
5178 		 * The linfo[0].insn_off == 0 check logically falls into
5179 		 * the later "missing bpf_line_info for func..." case
5180 		 * because the first linfo[0].insn_off must be the
5181 		 * first sub also and the first sub must have
5182 		 * subprog_info[0].start == 0.
5183 		 */
5184 		if ((i && linfo[i].insn_off <= prev_offset) ||
5185 		    linfo[i].insn_off >= prog->len) {
5186 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
5187 				i, linfo[i].insn_off, prev_offset,
5188 				prog->len);
5189 			err = -EINVAL;
5190 			goto err_free;
5191 		}
5192 
5193 		if (!prog->insnsi[linfo[i].insn_off].code) {
5194 			verbose(env,
5195 				"Invalid insn code at line_info[%u].insn_off\n",
5196 				i);
5197 			err = -EINVAL;
5198 			goto err_free;
5199 		}
5200 
5201 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
5202 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
5203 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
5204 			err = -EINVAL;
5205 			goto err_free;
5206 		}
5207 
5208 		if (s != env->subprog_cnt) {
5209 			if (linfo[i].insn_off == sub[s].start) {
5210 				sub[s].linfo_idx = i;
5211 				s++;
5212 			} else if (sub[s].start < linfo[i].insn_off) {
5213 				verbose(env, "missing bpf_line_info for func#%u\n", s);
5214 				err = -EINVAL;
5215 				goto err_free;
5216 			}
5217 		}
5218 
5219 		prev_offset = linfo[i].insn_off;
5220 		ulinfo += rec_size;
5221 	}
5222 
5223 	if (s != env->subprog_cnt) {
5224 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
5225 			env->subprog_cnt - s, s);
5226 		err = -EINVAL;
5227 		goto err_free;
5228 	}
5229 
5230 	prog->aux->linfo = linfo;
5231 	prog->aux->nr_linfo = nr_linfo;
5232 
5233 	return 0;
5234 
5235 err_free:
5236 	kvfree(linfo);
5237 	return err;
5238 }
5239 
5240 static int check_btf_info(struct bpf_verifier_env *env,
5241 			  const union bpf_attr *attr,
5242 			  union bpf_attr __user *uattr)
5243 {
5244 	struct btf *btf;
5245 	int err;
5246 
5247 	if (!attr->func_info_cnt && !attr->line_info_cnt)
5248 		return 0;
5249 
5250 	btf = btf_get_by_fd(attr->prog_btf_fd);
5251 	if (IS_ERR(btf))
5252 		return PTR_ERR(btf);
5253 	env->prog->aux->btf = btf;
5254 
5255 	err = check_btf_func(env, attr, uattr);
5256 	if (err)
5257 		return err;
5258 
5259 	err = check_btf_line(env, attr, uattr);
5260 	if (err)
5261 		return err;
5262 
5263 	return 0;
5264 }
5265 
5266 /* check %cur's range satisfies %old's */
5267 static bool range_within(struct bpf_reg_state *old,
5268 			 struct bpf_reg_state *cur)
5269 {
5270 	return old->umin_value <= cur->umin_value &&
5271 	       old->umax_value >= cur->umax_value &&
5272 	       old->smin_value <= cur->smin_value &&
5273 	       old->smax_value >= cur->smax_value;
5274 }
5275 
5276 /* Maximum number of register states that can exist at once */
5277 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
5278 struct idpair {
5279 	u32 old;
5280 	u32 cur;
5281 };
5282 
5283 /* If in the old state two registers had the same id, then they need to have
5284  * the same id in the new state as well.  But that id could be different from
5285  * the old state, so we need to track the mapping from old to new ids.
5286  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
5287  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
5288  * regs with a different old id could still have new id 9, we don't care about
5289  * that.
5290  * So we look through our idmap to see if this old id has been seen before.  If
5291  * so, we require the new id to match; otherwise, we add the id pair to the map.
5292  */
5293 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
5294 {
5295 	unsigned int i;
5296 
5297 	for (i = 0; i < ID_MAP_SIZE; i++) {
5298 		if (!idmap[i].old) {
5299 			/* Reached an empty slot; haven't seen this id before */
5300 			idmap[i].old = old_id;
5301 			idmap[i].cur = cur_id;
5302 			return true;
5303 		}
5304 		if (idmap[i].old == old_id)
5305 			return idmap[i].cur == cur_id;
5306 	}
5307 	/* We ran out of idmap slots, which should be impossible */
5308 	WARN_ON_ONCE(1);
5309 	return false;
5310 }
5311 
5312 static void clean_func_state(struct bpf_verifier_env *env,
5313 			     struct bpf_func_state *st)
5314 {
5315 	enum bpf_reg_liveness live;
5316 	int i, j;
5317 
5318 	for (i = 0; i < BPF_REG_FP; i++) {
5319 		live = st->regs[i].live;
5320 		/* liveness must not touch this register anymore */
5321 		st->regs[i].live |= REG_LIVE_DONE;
5322 		if (!(live & REG_LIVE_READ))
5323 			/* since the register is unused, clear its state
5324 			 * to make further comparison simpler
5325 			 */
5326 			__mark_reg_not_init(&st->regs[i]);
5327 	}
5328 
5329 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
5330 		live = st->stack[i].spilled_ptr.live;
5331 		/* liveness must not touch this stack slot anymore */
5332 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
5333 		if (!(live & REG_LIVE_READ)) {
5334 			__mark_reg_not_init(&st->stack[i].spilled_ptr);
5335 			for (j = 0; j < BPF_REG_SIZE; j++)
5336 				st->stack[i].slot_type[j] = STACK_INVALID;
5337 		}
5338 	}
5339 }
5340 
5341 static void clean_verifier_state(struct bpf_verifier_env *env,
5342 				 struct bpf_verifier_state *st)
5343 {
5344 	int i;
5345 
5346 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
5347 		/* all regs in this state in all frames were already marked */
5348 		return;
5349 
5350 	for (i = 0; i <= st->curframe; i++)
5351 		clean_func_state(env, st->frame[i]);
5352 }
5353 
5354 /* the parentage chains form a tree.
5355  * the verifier states are added to state lists at given insn and
5356  * pushed into state stack for future exploration.
5357  * when the verifier reaches bpf_exit insn some of the verifer states
5358  * stored in the state lists have their final liveness state already,
5359  * but a lot of states will get revised from liveness point of view when
5360  * the verifier explores other branches.
5361  * Example:
5362  * 1: r0 = 1
5363  * 2: if r1 == 100 goto pc+1
5364  * 3: r0 = 2
5365  * 4: exit
5366  * when the verifier reaches exit insn the register r0 in the state list of
5367  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5368  * of insn 2 and goes exploring further. At the insn 4 it will walk the
5369  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5370  *
5371  * Since the verifier pushes the branch states as it sees them while exploring
5372  * the program the condition of walking the branch instruction for the second
5373  * time means that all states below this branch were already explored and
5374  * their final liveness markes are already propagated.
5375  * Hence when the verifier completes the search of state list in is_state_visited()
5376  * we can call this clean_live_states() function to mark all liveness states
5377  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5378  * will not be used.
5379  * This function also clears the registers and stack for states that !READ
5380  * to simplify state merging.
5381  *
5382  * Important note here that walking the same branch instruction in the callee
5383  * doesn't meant that the states are DONE. The verifier has to compare
5384  * the callsites
5385  */
5386 static void clean_live_states(struct bpf_verifier_env *env, int insn,
5387 			      struct bpf_verifier_state *cur)
5388 {
5389 	struct bpf_verifier_state_list *sl;
5390 	int i;
5391 
5392 	sl = env->explored_states[insn];
5393 	if (!sl)
5394 		return;
5395 
5396 	while (sl != STATE_LIST_MARK) {
5397 		if (sl->state.curframe != cur->curframe)
5398 			goto next;
5399 		for (i = 0; i <= cur->curframe; i++)
5400 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
5401 				goto next;
5402 		clean_verifier_state(env, &sl->state);
5403 next:
5404 		sl = sl->next;
5405 	}
5406 }
5407 
5408 /* Returns true if (rold safe implies rcur safe) */
5409 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
5410 		    struct idpair *idmap)
5411 {
5412 	bool equal;
5413 
5414 	if (!(rold->live & REG_LIVE_READ))
5415 		/* explored state didn't use this */
5416 		return true;
5417 
5418 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
5419 
5420 	if (rold->type == PTR_TO_STACK)
5421 		/* two stack pointers are equal only if they're pointing to
5422 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
5423 		 */
5424 		return equal && rold->frameno == rcur->frameno;
5425 
5426 	if (equal)
5427 		return true;
5428 
5429 	if (rold->type == NOT_INIT)
5430 		/* explored state can't have used this */
5431 		return true;
5432 	if (rcur->type == NOT_INIT)
5433 		return false;
5434 	switch (rold->type) {
5435 	case SCALAR_VALUE:
5436 		if (rcur->type == SCALAR_VALUE) {
5437 			/* new val must satisfy old val knowledge */
5438 			return range_within(rold, rcur) &&
5439 			       tnum_in(rold->var_off, rcur->var_off);
5440 		} else {
5441 			/* We're trying to use a pointer in place of a scalar.
5442 			 * Even if the scalar was unbounded, this could lead to
5443 			 * pointer leaks because scalars are allowed to leak
5444 			 * while pointers are not. We could make this safe in
5445 			 * special cases if root is calling us, but it's
5446 			 * probably not worth the hassle.
5447 			 */
5448 			return false;
5449 		}
5450 	case PTR_TO_MAP_VALUE:
5451 		/* If the new min/max/var_off satisfy the old ones and
5452 		 * everything else matches, we are OK.
5453 		 * We don't care about the 'id' value, because nothing
5454 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
5455 		 */
5456 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
5457 		       range_within(rold, rcur) &&
5458 		       tnum_in(rold->var_off, rcur->var_off);
5459 	case PTR_TO_MAP_VALUE_OR_NULL:
5460 		/* a PTR_TO_MAP_VALUE could be safe to use as a
5461 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
5462 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
5463 		 * checked, doing so could have affected others with the same
5464 		 * id, and we can't check for that because we lost the id when
5465 		 * we converted to a PTR_TO_MAP_VALUE.
5466 		 */
5467 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
5468 			return false;
5469 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
5470 			return false;
5471 		/* Check our ids match any regs they're supposed to */
5472 		return check_ids(rold->id, rcur->id, idmap);
5473 	case PTR_TO_PACKET_META:
5474 	case PTR_TO_PACKET:
5475 		if (rcur->type != rold->type)
5476 			return false;
5477 		/* We must have at least as much range as the old ptr
5478 		 * did, so that any accesses which were safe before are
5479 		 * still safe.  This is true even if old range < old off,
5480 		 * since someone could have accessed through (ptr - k), or
5481 		 * even done ptr -= k in a register, to get a safe access.
5482 		 */
5483 		if (rold->range > rcur->range)
5484 			return false;
5485 		/* If the offsets don't match, we can't trust our alignment;
5486 		 * nor can we be sure that we won't fall out of range.
5487 		 */
5488 		if (rold->off != rcur->off)
5489 			return false;
5490 		/* id relations must be preserved */
5491 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
5492 			return false;
5493 		/* new val must satisfy old val knowledge */
5494 		return range_within(rold, rcur) &&
5495 		       tnum_in(rold->var_off, rcur->var_off);
5496 	case PTR_TO_CTX:
5497 	case CONST_PTR_TO_MAP:
5498 	case PTR_TO_PACKET_END:
5499 	case PTR_TO_FLOW_KEYS:
5500 	case PTR_TO_SOCKET:
5501 	case PTR_TO_SOCKET_OR_NULL:
5502 		/* Only valid matches are exact, which memcmp() above
5503 		 * would have accepted
5504 		 */
5505 	default:
5506 		/* Don't know what's going on, just say it's not safe */
5507 		return false;
5508 	}
5509 
5510 	/* Shouldn't get here; if we do, say it's not safe */
5511 	WARN_ON_ONCE(1);
5512 	return false;
5513 }
5514 
5515 static bool stacksafe(struct bpf_func_state *old,
5516 		      struct bpf_func_state *cur,
5517 		      struct idpair *idmap)
5518 {
5519 	int i, spi;
5520 
5521 	/* walk slots of the explored stack and ignore any additional
5522 	 * slots in the current stack, since explored(safe) state
5523 	 * didn't use them
5524 	 */
5525 	for (i = 0; i < old->allocated_stack; i++) {
5526 		spi = i / BPF_REG_SIZE;
5527 
5528 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
5529 			i += BPF_REG_SIZE - 1;
5530 			/* explored state didn't use this */
5531 			continue;
5532 		}
5533 
5534 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
5535 			continue;
5536 
5537 		/* explored stack has more populated slots than current stack
5538 		 * and these slots were used
5539 		 */
5540 		if (i >= cur->allocated_stack)
5541 			return false;
5542 
5543 		/* if old state was safe with misc data in the stack
5544 		 * it will be safe with zero-initialized stack.
5545 		 * The opposite is not true
5546 		 */
5547 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
5548 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
5549 			continue;
5550 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
5551 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
5552 			/* Ex: old explored (safe) state has STACK_SPILL in
5553 			 * this stack slot, but current has has STACK_MISC ->
5554 			 * this verifier states are not equivalent,
5555 			 * return false to continue verification of this path
5556 			 */
5557 			return false;
5558 		if (i % BPF_REG_SIZE)
5559 			continue;
5560 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
5561 			continue;
5562 		if (!regsafe(&old->stack[spi].spilled_ptr,
5563 			     &cur->stack[spi].spilled_ptr,
5564 			     idmap))
5565 			/* when explored and current stack slot are both storing
5566 			 * spilled registers, check that stored pointers types
5567 			 * are the same as well.
5568 			 * Ex: explored safe path could have stored
5569 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
5570 			 * but current path has stored:
5571 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
5572 			 * such verifier states are not equivalent.
5573 			 * return false to continue verification of this path
5574 			 */
5575 			return false;
5576 	}
5577 	return true;
5578 }
5579 
5580 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
5581 {
5582 	if (old->acquired_refs != cur->acquired_refs)
5583 		return false;
5584 	return !memcmp(old->refs, cur->refs,
5585 		       sizeof(*old->refs) * old->acquired_refs);
5586 }
5587 
5588 /* compare two verifier states
5589  *
5590  * all states stored in state_list are known to be valid, since
5591  * verifier reached 'bpf_exit' instruction through them
5592  *
5593  * this function is called when verifier exploring different branches of
5594  * execution popped from the state stack. If it sees an old state that has
5595  * more strict register state and more strict stack state then this execution
5596  * branch doesn't need to be explored further, since verifier already
5597  * concluded that more strict state leads to valid finish.
5598  *
5599  * Therefore two states are equivalent if register state is more conservative
5600  * and explored stack state is more conservative than the current one.
5601  * Example:
5602  *       explored                   current
5603  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
5604  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
5605  *
5606  * In other words if current stack state (one being explored) has more
5607  * valid slots than old one that already passed validation, it means
5608  * the verifier can stop exploring and conclude that current state is valid too
5609  *
5610  * Similarly with registers. If explored state has register type as invalid
5611  * whereas register type in current state is meaningful, it means that
5612  * the current state will reach 'bpf_exit' instruction safely
5613  */
5614 static bool func_states_equal(struct bpf_func_state *old,
5615 			      struct bpf_func_state *cur)
5616 {
5617 	struct idpair *idmap;
5618 	bool ret = false;
5619 	int i;
5620 
5621 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
5622 	/* If we failed to allocate the idmap, just say it's not safe */
5623 	if (!idmap)
5624 		return false;
5625 
5626 	for (i = 0; i < MAX_BPF_REG; i++) {
5627 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
5628 			goto out_free;
5629 	}
5630 
5631 	if (!stacksafe(old, cur, idmap))
5632 		goto out_free;
5633 
5634 	if (!refsafe(old, cur))
5635 		goto out_free;
5636 	ret = true;
5637 out_free:
5638 	kfree(idmap);
5639 	return ret;
5640 }
5641 
5642 static bool states_equal(struct bpf_verifier_env *env,
5643 			 struct bpf_verifier_state *old,
5644 			 struct bpf_verifier_state *cur)
5645 {
5646 	int i;
5647 
5648 	if (old->curframe != cur->curframe)
5649 		return false;
5650 
5651 	/* Verification state from speculative execution simulation
5652 	 * must never prune a non-speculative execution one.
5653 	 */
5654 	if (old->speculative && !cur->speculative)
5655 		return false;
5656 
5657 	/* for states to be equal callsites have to be the same
5658 	 * and all frame states need to be equivalent
5659 	 */
5660 	for (i = 0; i <= old->curframe; i++) {
5661 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
5662 			return false;
5663 		if (!func_states_equal(old->frame[i], cur->frame[i]))
5664 			return false;
5665 	}
5666 	return true;
5667 }
5668 
5669 /* A write screens off any subsequent reads; but write marks come from the
5670  * straight-line code between a state and its parent.  When we arrive at an
5671  * equivalent state (jump target or such) we didn't arrive by the straight-line
5672  * code, so read marks in the state must propagate to the parent regardless
5673  * of the state's write marks. That's what 'parent == state->parent' comparison
5674  * in mark_reg_read() is for.
5675  */
5676 static int propagate_liveness(struct bpf_verifier_env *env,
5677 			      const struct bpf_verifier_state *vstate,
5678 			      struct bpf_verifier_state *vparent)
5679 {
5680 	int i, frame, err = 0;
5681 	struct bpf_func_state *state, *parent;
5682 
5683 	if (vparent->curframe != vstate->curframe) {
5684 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
5685 		     vparent->curframe, vstate->curframe);
5686 		return -EFAULT;
5687 	}
5688 	/* Propagate read liveness of registers... */
5689 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
5690 	/* We don't need to worry about FP liveness because it's read-only */
5691 	for (i = 0; i < BPF_REG_FP; i++) {
5692 		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
5693 			continue;
5694 		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
5695 			err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
5696 					    &vparent->frame[vstate->curframe]->regs[i]);
5697 			if (err)
5698 				return err;
5699 		}
5700 	}
5701 
5702 	/* ... and stack slots */
5703 	for (frame = 0; frame <= vstate->curframe; frame++) {
5704 		state = vstate->frame[frame];
5705 		parent = vparent->frame[frame];
5706 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
5707 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
5708 			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
5709 				continue;
5710 			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
5711 				mark_reg_read(env, &state->stack[i].spilled_ptr,
5712 					      &parent->stack[i].spilled_ptr);
5713 		}
5714 	}
5715 	return err;
5716 }
5717 
5718 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5719 {
5720 	struct bpf_verifier_state_list *new_sl;
5721 	struct bpf_verifier_state_list *sl;
5722 	struct bpf_verifier_state *cur = env->cur_state, *new;
5723 	int i, j, err, states_cnt = 0;
5724 
5725 	sl = env->explored_states[insn_idx];
5726 	if (!sl)
5727 		/* this 'insn_idx' instruction wasn't marked, so we will not
5728 		 * be doing state search here
5729 		 */
5730 		return 0;
5731 
5732 	clean_live_states(env, insn_idx, cur);
5733 
5734 	while (sl != STATE_LIST_MARK) {
5735 		if (states_equal(env, &sl->state, cur)) {
5736 			/* reached equivalent register/stack state,
5737 			 * prune the search.
5738 			 * Registers read by the continuation are read by us.
5739 			 * If we have any write marks in env->cur_state, they
5740 			 * will prevent corresponding reads in the continuation
5741 			 * from reaching our parent (an explored_state).  Our
5742 			 * own state will get the read marks recorded, but
5743 			 * they'll be immediately forgotten as we're pruning
5744 			 * this state and will pop a new one.
5745 			 */
5746 			err = propagate_liveness(env, &sl->state, cur);
5747 			if (err)
5748 				return err;
5749 			return 1;
5750 		}
5751 		sl = sl->next;
5752 		states_cnt++;
5753 	}
5754 
5755 	if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
5756 		return 0;
5757 
5758 	/* there were no equivalent states, remember current one.
5759 	 * technically the current state is not proven to be safe yet,
5760 	 * but it will either reach outer most bpf_exit (which means it's safe)
5761 	 * or it will be rejected. Since there are no loops, we won't be
5762 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
5763 	 * again on the way to bpf_exit
5764 	 */
5765 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
5766 	if (!new_sl)
5767 		return -ENOMEM;
5768 
5769 	/* add new state to the head of linked list */
5770 	new = &new_sl->state;
5771 	err = copy_verifier_state(new, cur);
5772 	if (err) {
5773 		free_verifier_state(new, false);
5774 		kfree(new_sl);
5775 		return err;
5776 	}
5777 	new_sl->next = env->explored_states[insn_idx];
5778 	env->explored_states[insn_idx] = new_sl;
5779 	/* connect new state to parentage chain. Current frame needs all
5780 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
5781 	 * to the stack implicitly by JITs) so in callers' frames connect just
5782 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
5783 	 * the state of the call instruction (with WRITTEN set), and r0 comes
5784 	 * from callee with its full parentage chain, anyway.
5785 	 */
5786 	for (j = 0; j <= cur->curframe; j++)
5787 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
5788 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
5789 	/* clear write marks in current state: the writes we did are not writes
5790 	 * our child did, so they don't screen off its reads from us.
5791 	 * (There are no read marks in current state, because reads always mark
5792 	 * their parent and current state never has children yet.  Only
5793 	 * explored_states can get read marks.)
5794 	 */
5795 	for (i = 0; i < BPF_REG_FP; i++)
5796 		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
5797 
5798 	/* all stack frames are accessible from callee, clear them all */
5799 	for (j = 0; j <= cur->curframe; j++) {
5800 		struct bpf_func_state *frame = cur->frame[j];
5801 		struct bpf_func_state *newframe = new->frame[j];
5802 
5803 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
5804 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
5805 			frame->stack[i].spilled_ptr.parent =
5806 						&newframe->stack[i].spilled_ptr;
5807 		}
5808 	}
5809 	return 0;
5810 }
5811 
5812 /* Return true if it's OK to have the same insn return a different type. */
5813 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
5814 {
5815 	switch (type) {
5816 	case PTR_TO_CTX:
5817 	case PTR_TO_SOCKET:
5818 	case PTR_TO_SOCKET_OR_NULL:
5819 		return false;
5820 	default:
5821 		return true;
5822 	}
5823 }
5824 
5825 /* If an instruction was previously used with particular pointer types, then we
5826  * need to be careful to avoid cases such as the below, where it may be ok
5827  * for one branch accessing the pointer, but not ok for the other branch:
5828  *
5829  * R1 = sock_ptr
5830  * goto X;
5831  * ...
5832  * R1 = some_other_valid_ptr;
5833  * goto X;
5834  * ...
5835  * R2 = *(u32 *)(R1 + 0);
5836  */
5837 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
5838 {
5839 	return src != prev && (!reg_type_mismatch_ok(src) ||
5840 			       !reg_type_mismatch_ok(prev));
5841 }
5842 
5843 static int do_check(struct bpf_verifier_env *env)
5844 {
5845 	struct bpf_verifier_state *state;
5846 	struct bpf_insn *insns = env->prog->insnsi;
5847 	struct bpf_reg_state *regs;
5848 	int insn_cnt = env->prog->len, i;
5849 	int insn_processed = 0;
5850 	bool do_print_state = false;
5851 
5852 	env->prev_linfo = NULL;
5853 
5854 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
5855 	if (!state)
5856 		return -ENOMEM;
5857 	state->curframe = 0;
5858 	state->speculative = false;
5859 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
5860 	if (!state->frame[0]) {
5861 		kfree(state);
5862 		return -ENOMEM;
5863 	}
5864 	env->cur_state = state;
5865 	init_func_state(env, state->frame[0],
5866 			BPF_MAIN_FUNC /* callsite */,
5867 			0 /* frameno */,
5868 			0 /* subprogno, zero == main subprog */);
5869 
5870 	for (;;) {
5871 		struct bpf_insn *insn;
5872 		u8 class;
5873 		int err;
5874 
5875 		if (env->insn_idx >= insn_cnt) {
5876 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
5877 				env->insn_idx, insn_cnt);
5878 			return -EFAULT;
5879 		}
5880 
5881 		insn = &insns[env->insn_idx];
5882 		class = BPF_CLASS(insn->code);
5883 
5884 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
5885 			verbose(env,
5886 				"BPF program is too large. Processed %d insn\n",
5887 				insn_processed);
5888 			return -E2BIG;
5889 		}
5890 
5891 		err = is_state_visited(env, env->insn_idx);
5892 		if (err < 0)
5893 			return err;
5894 		if (err == 1) {
5895 			/* found equivalent state, can prune the search */
5896 			if (env->log.level) {
5897 				if (do_print_state)
5898 					verbose(env, "\nfrom %d to %d%s: safe\n",
5899 						env->prev_insn_idx, env->insn_idx,
5900 						env->cur_state->speculative ?
5901 						" (speculative execution)" : "");
5902 				else
5903 					verbose(env, "%d: safe\n", env->insn_idx);
5904 			}
5905 			goto process_bpf_exit;
5906 		}
5907 
5908 		if (signal_pending(current))
5909 			return -EAGAIN;
5910 
5911 		if (need_resched())
5912 			cond_resched();
5913 
5914 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
5915 			if (env->log.level > 1)
5916 				verbose(env, "%d:", env->insn_idx);
5917 			else
5918 				verbose(env, "\nfrom %d to %d%s:",
5919 					env->prev_insn_idx, env->insn_idx,
5920 					env->cur_state->speculative ?
5921 					" (speculative execution)" : "");
5922 			print_verifier_state(env, state->frame[state->curframe]);
5923 			do_print_state = false;
5924 		}
5925 
5926 		if (env->log.level) {
5927 			const struct bpf_insn_cbs cbs = {
5928 				.cb_print	= verbose,
5929 				.private_data	= env,
5930 			};
5931 
5932 			verbose_linfo(env, env->insn_idx, "; ");
5933 			verbose(env, "%d: ", env->insn_idx);
5934 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
5935 		}
5936 
5937 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
5938 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
5939 							   env->prev_insn_idx);
5940 			if (err)
5941 				return err;
5942 		}
5943 
5944 		regs = cur_regs(env);
5945 		env->insn_aux_data[env->insn_idx].seen = true;
5946 
5947 		if (class == BPF_ALU || class == BPF_ALU64) {
5948 			err = check_alu_op(env, insn);
5949 			if (err)
5950 				return err;
5951 
5952 		} else if (class == BPF_LDX) {
5953 			enum bpf_reg_type *prev_src_type, src_reg_type;
5954 
5955 			/* check for reserved fields is already done */
5956 
5957 			/* check src operand */
5958 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
5959 			if (err)
5960 				return err;
5961 
5962 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5963 			if (err)
5964 				return err;
5965 
5966 			src_reg_type = regs[insn->src_reg].type;
5967 
5968 			/* check that memory (src_reg + off) is readable,
5969 			 * the state of dst_reg will be updated by this func
5970 			 */
5971 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
5972 					       insn->off, BPF_SIZE(insn->code),
5973 					       BPF_READ, insn->dst_reg, false);
5974 			if (err)
5975 				return err;
5976 
5977 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
5978 
5979 			if (*prev_src_type == NOT_INIT) {
5980 				/* saw a valid insn
5981 				 * dst_reg = *(u32 *)(src_reg + off)
5982 				 * save type to validate intersecting paths
5983 				 */
5984 				*prev_src_type = src_reg_type;
5985 
5986 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
5987 				/* ABuser program is trying to use the same insn
5988 				 * dst_reg = *(u32*) (src_reg + off)
5989 				 * with different pointer types:
5990 				 * src_reg == ctx in one branch and
5991 				 * src_reg == stack|map in some other branch.
5992 				 * Reject it.
5993 				 */
5994 				verbose(env, "same insn cannot be used with different pointers\n");
5995 				return -EINVAL;
5996 			}
5997 
5998 		} else if (class == BPF_STX) {
5999 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
6000 
6001 			if (BPF_MODE(insn->code) == BPF_XADD) {
6002 				err = check_xadd(env, env->insn_idx, insn);
6003 				if (err)
6004 					return err;
6005 				env->insn_idx++;
6006 				continue;
6007 			}
6008 
6009 			/* check src1 operand */
6010 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
6011 			if (err)
6012 				return err;
6013 			/* check src2 operand */
6014 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6015 			if (err)
6016 				return err;
6017 
6018 			dst_reg_type = regs[insn->dst_reg].type;
6019 
6020 			/* check that memory (dst_reg + off) is writeable */
6021 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
6022 					       insn->off, BPF_SIZE(insn->code),
6023 					       BPF_WRITE, insn->src_reg, false);
6024 			if (err)
6025 				return err;
6026 
6027 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
6028 
6029 			if (*prev_dst_type == NOT_INIT) {
6030 				*prev_dst_type = dst_reg_type;
6031 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
6032 				verbose(env, "same insn cannot be used with different pointers\n");
6033 				return -EINVAL;
6034 			}
6035 
6036 		} else if (class == BPF_ST) {
6037 			if (BPF_MODE(insn->code) != BPF_MEM ||
6038 			    insn->src_reg != BPF_REG_0) {
6039 				verbose(env, "BPF_ST uses reserved fields\n");
6040 				return -EINVAL;
6041 			}
6042 			/* check src operand */
6043 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6044 			if (err)
6045 				return err;
6046 
6047 			if (is_ctx_reg(env, insn->dst_reg)) {
6048 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
6049 					insn->dst_reg,
6050 					reg_type_str[reg_state(env, insn->dst_reg)->type]);
6051 				return -EACCES;
6052 			}
6053 
6054 			/* check that memory (dst_reg + off) is writeable */
6055 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
6056 					       insn->off, BPF_SIZE(insn->code),
6057 					       BPF_WRITE, -1, false);
6058 			if (err)
6059 				return err;
6060 
6061 		} else if (class == BPF_JMP) {
6062 			u8 opcode = BPF_OP(insn->code);
6063 
6064 			if (opcode == BPF_CALL) {
6065 				if (BPF_SRC(insn->code) != BPF_K ||
6066 				    insn->off != 0 ||
6067 				    (insn->src_reg != BPF_REG_0 &&
6068 				     insn->src_reg != BPF_PSEUDO_CALL) ||
6069 				    insn->dst_reg != BPF_REG_0) {
6070 					verbose(env, "BPF_CALL uses reserved fields\n");
6071 					return -EINVAL;
6072 				}
6073 
6074 				if (insn->src_reg == BPF_PSEUDO_CALL)
6075 					err = check_func_call(env, insn, &env->insn_idx);
6076 				else
6077 					err = check_helper_call(env, insn->imm, env->insn_idx);
6078 				if (err)
6079 					return err;
6080 
6081 			} else if (opcode == BPF_JA) {
6082 				if (BPF_SRC(insn->code) != BPF_K ||
6083 				    insn->imm != 0 ||
6084 				    insn->src_reg != BPF_REG_0 ||
6085 				    insn->dst_reg != BPF_REG_0) {
6086 					verbose(env, "BPF_JA uses reserved fields\n");
6087 					return -EINVAL;
6088 				}
6089 
6090 				env->insn_idx += insn->off + 1;
6091 				continue;
6092 
6093 			} else if (opcode == BPF_EXIT) {
6094 				if (BPF_SRC(insn->code) != BPF_K ||
6095 				    insn->imm != 0 ||
6096 				    insn->src_reg != BPF_REG_0 ||
6097 				    insn->dst_reg != BPF_REG_0) {
6098 					verbose(env, "BPF_EXIT uses reserved fields\n");
6099 					return -EINVAL;
6100 				}
6101 
6102 				if (state->curframe) {
6103 					/* exit from nested function */
6104 					env->prev_insn_idx = env->insn_idx;
6105 					err = prepare_func_exit(env, &env->insn_idx);
6106 					if (err)
6107 						return err;
6108 					do_print_state = true;
6109 					continue;
6110 				}
6111 
6112 				err = check_reference_leak(env);
6113 				if (err)
6114 					return err;
6115 
6116 				/* eBPF calling convetion is such that R0 is used
6117 				 * to return the value from eBPF program.
6118 				 * Make sure that it's readable at this time
6119 				 * of bpf_exit, which means that program wrote
6120 				 * something into it earlier
6121 				 */
6122 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
6123 				if (err)
6124 					return err;
6125 
6126 				if (is_pointer_value(env, BPF_REG_0)) {
6127 					verbose(env, "R0 leaks addr as return value\n");
6128 					return -EACCES;
6129 				}
6130 
6131 				err = check_return_code(env);
6132 				if (err)
6133 					return err;
6134 process_bpf_exit:
6135 				err = pop_stack(env, &env->prev_insn_idx,
6136 						&env->insn_idx);
6137 				if (err < 0) {
6138 					if (err != -ENOENT)
6139 						return err;
6140 					break;
6141 				} else {
6142 					do_print_state = true;
6143 					continue;
6144 				}
6145 			} else {
6146 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
6147 				if (err)
6148 					return err;
6149 			}
6150 		} else if (class == BPF_LD) {
6151 			u8 mode = BPF_MODE(insn->code);
6152 
6153 			if (mode == BPF_ABS || mode == BPF_IND) {
6154 				err = check_ld_abs(env, insn);
6155 				if (err)
6156 					return err;
6157 
6158 			} else if (mode == BPF_IMM) {
6159 				err = check_ld_imm(env, insn);
6160 				if (err)
6161 					return err;
6162 
6163 				env->insn_idx++;
6164 				env->insn_aux_data[env->insn_idx].seen = true;
6165 			} else {
6166 				verbose(env, "invalid BPF_LD mode\n");
6167 				return -EINVAL;
6168 			}
6169 		} else {
6170 			verbose(env, "unknown insn class %d\n", class);
6171 			return -EINVAL;
6172 		}
6173 
6174 		env->insn_idx++;
6175 	}
6176 
6177 	verbose(env, "processed %d insns (limit %d), stack depth ",
6178 		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
6179 	for (i = 0; i < env->subprog_cnt; i++) {
6180 		u32 depth = env->subprog_info[i].stack_depth;
6181 
6182 		verbose(env, "%d", depth);
6183 		if (i + 1 < env->subprog_cnt)
6184 			verbose(env, "+");
6185 	}
6186 	verbose(env, "\n");
6187 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
6188 	return 0;
6189 }
6190 
6191 static int check_map_prealloc(struct bpf_map *map)
6192 {
6193 	return (map->map_type != BPF_MAP_TYPE_HASH &&
6194 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6195 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
6196 		!(map->map_flags & BPF_F_NO_PREALLOC);
6197 }
6198 
6199 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
6200 					struct bpf_map *map,
6201 					struct bpf_prog *prog)
6202 
6203 {
6204 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
6205 	 * preallocated hash maps, since doing memory allocation
6206 	 * in overflow_handler can crash depending on where nmi got
6207 	 * triggered.
6208 	 */
6209 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
6210 		if (!check_map_prealloc(map)) {
6211 			verbose(env, "perf_event programs can only use preallocated hash map\n");
6212 			return -EINVAL;
6213 		}
6214 		if (map->inner_map_meta &&
6215 		    !check_map_prealloc(map->inner_map_meta)) {
6216 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
6217 			return -EINVAL;
6218 		}
6219 	}
6220 
6221 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
6222 	    !bpf_offload_prog_map_match(prog, map)) {
6223 		verbose(env, "offload device mismatch between prog and map\n");
6224 		return -EINVAL;
6225 	}
6226 
6227 	return 0;
6228 }
6229 
6230 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
6231 {
6232 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
6233 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
6234 }
6235 
6236 /* look for pseudo eBPF instructions that access map FDs and
6237  * replace them with actual map pointers
6238  */
6239 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
6240 {
6241 	struct bpf_insn *insn = env->prog->insnsi;
6242 	int insn_cnt = env->prog->len;
6243 	int i, j, err;
6244 
6245 	err = bpf_prog_calc_tag(env->prog);
6246 	if (err)
6247 		return err;
6248 
6249 	for (i = 0; i < insn_cnt; i++, insn++) {
6250 		if (BPF_CLASS(insn->code) == BPF_LDX &&
6251 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
6252 			verbose(env, "BPF_LDX uses reserved fields\n");
6253 			return -EINVAL;
6254 		}
6255 
6256 		if (BPF_CLASS(insn->code) == BPF_STX &&
6257 		    ((BPF_MODE(insn->code) != BPF_MEM &&
6258 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
6259 			verbose(env, "BPF_STX uses reserved fields\n");
6260 			return -EINVAL;
6261 		}
6262 
6263 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
6264 			struct bpf_map *map;
6265 			struct fd f;
6266 
6267 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
6268 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
6269 			    insn[1].off != 0) {
6270 				verbose(env, "invalid bpf_ld_imm64 insn\n");
6271 				return -EINVAL;
6272 			}
6273 
6274 			if (insn->src_reg == 0)
6275 				/* valid generic load 64-bit imm */
6276 				goto next_insn;
6277 
6278 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
6279 				verbose(env,
6280 					"unrecognized bpf_ld_imm64 insn\n");
6281 				return -EINVAL;
6282 			}
6283 
6284 			f = fdget(insn->imm);
6285 			map = __bpf_map_get(f);
6286 			if (IS_ERR(map)) {
6287 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
6288 					insn->imm);
6289 				return PTR_ERR(map);
6290 			}
6291 
6292 			err = check_map_prog_compatibility(env, map, env->prog);
6293 			if (err) {
6294 				fdput(f);
6295 				return err;
6296 			}
6297 
6298 			/* store map pointer inside BPF_LD_IMM64 instruction */
6299 			insn[0].imm = (u32) (unsigned long) map;
6300 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
6301 
6302 			/* check whether we recorded this map already */
6303 			for (j = 0; j < env->used_map_cnt; j++)
6304 				if (env->used_maps[j] == map) {
6305 					fdput(f);
6306 					goto next_insn;
6307 				}
6308 
6309 			if (env->used_map_cnt >= MAX_USED_MAPS) {
6310 				fdput(f);
6311 				return -E2BIG;
6312 			}
6313 
6314 			/* hold the map. If the program is rejected by verifier,
6315 			 * the map will be released by release_maps() or it
6316 			 * will be used by the valid program until it's unloaded
6317 			 * and all maps are released in free_used_maps()
6318 			 */
6319 			map = bpf_map_inc(map, false);
6320 			if (IS_ERR(map)) {
6321 				fdput(f);
6322 				return PTR_ERR(map);
6323 			}
6324 			env->used_maps[env->used_map_cnt++] = map;
6325 
6326 			if (bpf_map_is_cgroup_storage(map) &&
6327 			    bpf_cgroup_storage_assign(env->prog, map)) {
6328 				verbose(env, "only one cgroup storage of each type is allowed\n");
6329 				fdput(f);
6330 				return -EBUSY;
6331 			}
6332 
6333 			fdput(f);
6334 next_insn:
6335 			insn++;
6336 			i++;
6337 			continue;
6338 		}
6339 
6340 		/* Basic sanity check before we invest more work here. */
6341 		if (!bpf_opcode_in_insntable(insn->code)) {
6342 			verbose(env, "unknown opcode %02x\n", insn->code);
6343 			return -EINVAL;
6344 		}
6345 	}
6346 
6347 	/* now all pseudo BPF_LD_IMM64 instructions load valid
6348 	 * 'struct bpf_map *' into a register instead of user map_fd.
6349 	 * These pointers will be used later by verifier to validate map access.
6350 	 */
6351 	return 0;
6352 }
6353 
6354 /* drop refcnt of maps used by the rejected program */
6355 static void release_maps(struct bpf_verifier_env *env)
6356 {
6357 	enum bpf_cgroup_storage_type stype;
6358 	int i;
6359 
6360 	for_each_cgroup_storage_type(stype) {
6361 		if (!env->prog->aux->cgroup_storage[stype])
6362 			continue;
6363 		bpf_cgroup_storage_release(env->prog,
6364 			env->prog->aux->cgroup_storage[stype]);
6365 	}
6366 
6367 	for (i = 0; i < env->used_map_cnt; i++)
6368 		bpf_map_put(env->used_maps[i]);
6369 }
6370 
6371 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
6372 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
6373 {
6374 	struct bpf_insn *insn = env->prog->insnsi;
6375 	int insn_cnt = env->prog->len;
6376 	int i;
6377 
6378 	for (i = 0; i < insn_cnt; i++, insn++)
6379 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
6380 			insn->src_reg = 0;
6381 }
6382 
6383 /* single env->prog->insni[off] instruction was replaced with the range
6384  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
6385  * [0, off) and [off, end) to new locations, so the patched range stays zero
6386  */
6387 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
6388 				u32 off, u32 cnt)
6389 {
6390 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
6391 	int i;
6392 
6393 	if (cnt == 1)
6394 		return 0;
6395 	new_data = vzalloc(array_size(prog_len,
6396 				      sizeof(struct bpf_insn_aux_data)));
6397 	if (!new_data)
6398 		return -ENOMEM;
6399 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
6400 	memcpy(new_data + off + cnt - 1, old_data + off,
6401 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
6402 	for (i = off; i < off + cnt - 1; i++)
6403 		new_data[i].seen = true;
6404 	env->insn_aux_data = new_data;
6405 	vfree(old_data);
6406 	return 0;
6407 }
6408 
6409 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
6410 {
6411 	int i;
6412 
6413 	if (len == 1)
6414 		return;
6415 	/* NOTE: fake 'exit' subprog should be updated as well. */
6416 	for (i = 0; i <= env->subprog_cnt; i++) {
6417 		if (env->subprog_info[i].start <= off)
6418 			continue;
6419 		env->subprog_info[i].start += len - 1;
6420 	}
6421 }
6422 
6423 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
6424 					    const struct bpf_insn *patch, u32 len)
6425 {
6426 	struct bpf_prog *new_prog;
6427 
6428 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
6429 	if (!new_prog)
6430 		return NULL;
6431 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
6432 		return NULL;
6433 	adjust_subprog_starts(env, off, len);
6434 	return new_prog;
6435 }
6436 
6437 /* The verifier does more data flow analysis than llvm and will not
6438  * explore branches that are dead at run time. Malicious programs can
6439  * have dead code too. Therefore replace all dead at-run-time code
6440  * with 'ja -1'.
6441  *
6442  * Just nops are not optimal, e.g. if they would sit at the end of the
6443  * program and through another bug we would manage to jump there, then
6444  * we'd execute beyond program memory otherwise. Returning exception
6445  * code also wouldn't work since we can have subprogs where the dead
6446  * code could be located.
6447  */
6448 static void sanitize_dead_code(struct bpf_verifier_env *env)
6449 {
6450 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
6451 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
6452 	struct bpf_insn *insn = env->prog->insnsi;
6453 	const int insn_cnt = env->prog->len;
6454 	int i;
6455 
6456 	for (i = 0; i < insn_cnt; i++) {
6457 		if (aux_data[i].seen)
6458 			continue;
6459 		memcpy(insn + i, &trap, sizeof(trap));
6460 	}
6461 }
6462 
6463 /* convert load instructions that access fields of a context type into a
6464  * sequence of instructions that access fields of the underlying structure:
6465  *     struct __sk_buff    -> struct sk_buff
6466  *     struct bpf_sock_ops -> struct sock
6467  */
6468 static int convert_ctx_accesses(struct bpf_verifier_env *env)
6469 {
6470 	const struct bpf_verifier_ops *ops = env->ops;
6471 	int i, cnt, size, ctx_field_size, delta = 0;
6472 	const int insn_cnt = env->prog->len;
6473 	struct bpf_insn insn_buf[16], *insn;
6474 	u32 target_size, size_default, off;
6475 	struct bpf_prog *new_prog;
6476 	enum bpf_access_type type;
6477 	bool is_narrower_load;
6478 
6479 	if (ops->gen_prologue || env->seen_direct_write) {
6480 		if (!ops->gen_prologue) {
6481 			verbose(env, "bpf verifier is misconfigured\n");
6482 			return -EINVAL;
6483 		}
6484 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
6485 					env->prog);
6486 		if (cnt >= ARRAY_SIZE(insn_buf)) {
6487 			verbose(env, "bpf verifier is misconfigured\n");
6488 			return -EINVAL;
6489 		} else if (cnt) {
6490 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
6491 			if (!new_prog)
6492 				return -ENOMEM;
6493 
6494 			env->prog = new_prog;
6495 			delta += cnt - 1;
6496 		}
6497 	}
6498 
6499 	if (bpf_prog_is_dev_bound(env->prog->aux))
6500 		return 0;
6501 
6502 	insn = env->prog->insnsi + delta;
6503 
6504 	for (i = 0; i < insn_cnt; i++, insn++) {
6505 		bpf_convert_ctx_access_t convert_ctx_access;
6506 
6507 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
6508 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
6509 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
6510 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
6511 			type = BPF_READ;
6512 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
6513 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
6514 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
6515 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
6516 			type = BPF_WRITE;
6517 		else
6518 			continue;
6519 
6520 		if (type == BPF_WRITE &&
6521 		    env->insn_aux_data[i + delta].sanitize_stack_off) {
6522 			struct bpf_insn patch[] = {
6523 				/* Sanitize suspicious stack slot with zero.
6524 				 * There are no memory dependencies for this store,
6525 				 * since it's only using frame pointer and immediate
6526 				 * constant of zero
6527 				 */
6528 				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
6529 					   env->insn_aux_data[i + delta].sanitize_stack_off,
6530 					   0),
6531 				/* the original STX instruction will immediately
6532 				 * overwrite the same stack slot with appropriate value
6533 				 */
6534 				*insn,
6535 			};
6536 
6537 			cnt = ARRAY_SIZE(patch);
6538 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
6539 			if (!new_prog)
6540 				return -ENOMEM;
6541 
6542 			delta    += cnt - 1;
6543 			env->prog = new_prog;
6544 			insn      = new_prog->insnsi + i + delta;
6545 			continue;
6546 		}
6547 
6548 		switch (env->insn_aux_data[i + delta].ptr_type) {
6549 		case PTR_TO_CTX:
6550 			if (!ops->convert_ctx_access)
6551 				continue;
6552 			convert_ctx_access = ops->convert_ctx_access;
6553 			break;
6554 		case PTR_TO_SOCKET:
6555 			convert_ctx_access = bpf_sock_convert_ctx_access;
6556 			break;
6557 		default:
6558 			continue;
6559 		}
6560 
6561 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
6562 		size = BPF_LDST_BYTES(insn);
6563 
6564 		/* If the read access is a narrower load of the field,
6565 		 * convert to a 4/8-byte load, to minimum program type specific
6566 		 * convert_ctx_access changes. If conversion is successful,
6567 		 * we will apply proper mask to the result.
6568 		 */
6569 		is_narrower_load = size < ctx_field_size;
6570 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
6571 		off = insn->off;
6572 		if (is_narrower_load) {
6573 			u8 size_code;
6574 
6575 			if (type == BPF_WRITE) {
6576 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
6577 				return -EINVAL;
6578 			}
6579 
6580 			size_code = BPF_H;
6581 			if (ctx_field_size == 4)
6582 				size_code = BPF_W;
6583 			else if (ctx_field_size == 8)
6584 				size_code = BPF_DW;
6585 
6586 			insn->off = off & ~(size_default - 1);
6587 			insn->code = BPF_LDX | BPF_MEM | size_code;
6588 		}
6589 
6590 		target_size = 0;
6591 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
6592 					 &target_size);
6593 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
6594 		    (ctx_field_size && !target_size)) {
6595 			verbose(env, "bpf verifier is misconfigured\n");
6596 			return -EINVAL;
6597 		}
6598 
6599 		if (is_narrower_load && size < target_size) {
6600 			u8 shift = (off & (size_default - 1)) * 8;
6601 
6602 			if (ctx_field_size <= 4) {
6603 				if (shift)
6604 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
6605 									insn->dst_reg,
6606 									shift);
6607 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
6608 								(1 << size * 8) - 1);
6609 			} else {
6610 				if (shift)
6611 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
6612 									insn->dst_reg,
6613 									shift);
6614 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
6615 								(1 << size * 8) - 1);
6616 			}
6617 		}
6618 
6619 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6620 		if (!new_prog)
6621 			return -ENOMEM;
6622 
6623 		delta += cnt - 1;
6624 
6625 		/* keep walking new program and skip insns we just inserted */
6626 		env->prog = new_prog;
6627 		insn      = new_prog->insnsi + i + delta;
6628 	}
6629 
6630 	return 0;
6631 }
6632 
6633 static int jit_subprogs(struct bpf_verifier_env *env)
6634 {
6635 	struct bpf_prog *prog = env->prog, **func, *tmp;
6636 	int i, j, subprog_start, subprog_end = 0, len, subprog;
6637 	struct bpf_insn *insn;
6638 	void *old_bpf_func;
6639 	int err;
6640 
6641 	if (env->subprog_cnt <= 1)
6642 		return 0;
6643 
6644 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6645 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6646 		    insn->src_reg != BPF_PSEUDO_CALL)
6647 			continue;
6648 		/* Upon error here we cannot fall back to interpreter but
6649 		 * need a hard reject of the program. Thus -EFAULT is
6650 		 * propagated in any case.
6651 		 */
6652 		subprog = find_subprog(env, i + insn->imm + 1);
6653 		if (subprog < 0) {
6654 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6655 				  i + insn->imm + 1);
6656 			return -EFAULT;
6657 		}
6658 		/* temporarily remember subprog id inside insn instead of
6659 		 * aux_data, since next loop will split up all insns into funcs
6660 		 */
6661 		insn->off = subprog;
6662 		/* remember original imm in case JIT fails and fallback
6663 		 * to interpreter will be needed
6664 		 */
6665 		env->insn_aux_data[i].call_imm = insn->imm;
6666 		/* point imm to __bpf_call_base+1 from JITs point of view */
6667 		insn->imm = 1;
6668 	}
6669 
6670 	err = bpf_prog_alloc_jited_linfo(prog);
6671 	if (err)
6672 		goto out_undo_insn;
6673 
6674 	err = -ENOMEM;
6675 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
6676 	if (!func)
6677 		goto out_undo_insn;
6678 
6679 	for (i = 0; i < env->subprog_cnt; i++) {
6680 		subprog_start = subprog_end;
6681 		subprog_end = env->subprog_info[i + 1].start;
6682 
6683 		len = subprog_end - subprog_start;
6684 		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
6685 		if (!func[i])
6686 			goto out_free;
6687 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
6688 		       len * sizeof(struct bpf_insn));
6689 		func[i]->type = prog->type;
6690 		func[i]->len = len;
6691 		if (bpf_prog_calc_tag(func[i]))
6692 			goto out_free;
6693 		func[i]->is_func = 1;
6694 		func[i]->aux->func_idx = i;
6695 		/* the btf and func_info will be freed only at prog->aux */
6696 		func[i]->aux->btf = prog->aux->btf;
6697 		func[i]->aux->func_info = prog->aux->func_info;
6698 
6699 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
6700 		 * Long term would need debug info to populate names
6701 		 */
6702 		func[i]->aux->name[0] = 'F';
6703 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
6704 		func[i]->jit_requested = 1;
6705 		func[i]->aux->linfo = prog->aux->linfo;
6706 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
6707 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
6708 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
6709 		func[i] = bpf_int_jit_compile(func[i]);
6710 		if (!func[i]->jited) {
6711 			err = -ENOTSUPP;
6712 			goto out_free;
6713 		}
6714 		cond_resched();
6715 	}
6716 	/* at this point all bpf functions were successfully JITed
6717 	 * now populate all bpf_calls with correct addresses and
6718 	 * run last pass of JIT
6719 	 */
6720 	for (i = 0; i < env->subprog_cnt; i++) {
6721 		insn = func[i]->insnsi;
6722 		for (j = 0; j < func[i]->len; j++, insn++) {
6723 			if (insn->code != (BPF_JMP | BPF_CALL) ||
6724 			    insn->src_reg != BPF_PSEUDO_CALL)
6725 				continue;
6726 			subprog = insn->off;
6727 			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
6728 				func[subprog]->bpf_func -
6729 				__bpf_call_base;
6730 		}
6731 
6732 		/* we use the aux data to keep a list of the start addresses
6733 		 * of the JITed images for each function in the program
6734 		 *
6735 		 * for some architectures, such as powerpc64, the imm field
6736 		 * might not be large enough to hold the offset of the start
6737 		 * address of the callee's JITed image from __bpf_call_base
6738 		 *
6739 		 * in such cases, we can lookup the start address of a callee
6740 		 * by using its subprog id, available from the off field of
6741 		 * the call instruction, as an index for this list
6742 		 */
6743 		func[i]->aux->func = func;
6744 		func[i]->aux->func_cnt = env->subprog_cnt;
6745 	}
6746 	for (i = 0; i < env->subprog_cnt; i++) {
6747 		old_bpf_func = func[i]->bpf_func;
6748 		tmp = bpf_int_jit_compile(func[i]);
6749 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
6750 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
6751 			err = -ENOTSUPP;
6752 			goto out_free;
6753 		}
6754 		cond_resched();
6755 	}
6756 
6757 	/* finally lock prog and jit images for all functions and
6758 	 * populate kallsysm
6759 	 */
6760 	for (i = 0; i < env->subprog_cnt; i++) {
6761 		bpf_prog_lock_ro(func[i]);
6762 		bpf_prog_kallsyms_add(func[i]);
6763 	}
6764 
6765 	/* Last step: make now unused interpreter insns from main
6766 	 * prog consistent for later dump requests, so they can
6767 	 * later look the same as if they were interpreted only.
6768 	 */
6769 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6770 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6771 		    insn->src_reg != BPF_PSEUDO_CALL)
6772 			continue;
6773 		insn->off = env->insn_aux_data[i].call_imm;
6774 		subprog = find_subprog(env, i + insn->off + 1);
6775 		insn->imm = subprog;
6776 	}
6777 
6778 	prog->jited = 1;
6779 	prog->bpf_func = func[0]->bpf_func;
6780 	prog->aux->func = func;
6781 	prog->aux->func_cnt = env->subprog_cnt;
6782 	bpf_prog_free_unused_jited_linfo(prog);
6783 	return 0;
6784 out_free:
6785 	for (i = 0; i < env->subprog_cnt; i++)
6786 		if (func[i])
6787 			bpf_jit_free(func[i]);
6788 	kfree(func);
6789 out_undo_insn:
6790 	/* cleanup main prog to be interpreted */
6791 	prog->jit_requested = 0;
6792 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
6793 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6794 		    insn->src_reg != BPF_PSEUDO_CALL)
6795 			continue;
6796 		insn->off = 0;
6797 		insn->imm = env->insn_aux_data[i].call_imm;
6798 	}
6799 	bpf_prog_free_jited_linfo(prog);
6800 	return err;
6801 }
6802 
6803 static int fixup_call_args(struct bpf_verifier_env *env)
6804 {
6805 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6806 	struct bpf_prog *prog = env->prog;
6807 	struct bpf_insn *insn = prog->insnsi;
6808 	int i, depth;
6809 #endif
6810 	int err = 0;
6811 
6812 	if (env->prog->jit_requested &&
6813 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
6814 		err = jit_subprogs(env);
6815 		if (err == 0)
6816 			return 0;
6817 		if (err == -EFAULT)
6818 			return err;
6819 	}
6820 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6821 	for (i = 0; i < prog->len; i++, insn++) {
6822 		if (insn->code != (BPF_JMP | BPF_CALL) ||
6823 		    insn->src_reg != BPF_PSEUDO_CALL)
6824 			continue;
6825 		depth = get_callee_stack_depth(env, insn, i);
6826 		if (depth < 0)
6827 			return depth;
6828 		bpf_patch_call_args(insn, depth);
6829 	}
6830 	err = 0;
6831 #endif
6832 	return err;
6833 }
6834 
6835 /* fixup insn->imm field of bpf_call instructions
6836  * and inline eligible helpers as explicit sequence of BPF instructions
6837  *
6838  * this function is called after eBPF program passed verification
6839  */
6840 static int fixup_bpf_calls(struct bpf_verifier_env *env)
6841 {
6842 	struct bpf_prog *prog = env->prog;
6843 	struct bpf_insn *insn = prog->insnsi;
6844 	const struct bpf_func_proto *fn;
6845 	const int insn_cnt = prog->len;
6846 	const struct bpf_map_ops *ops;
6847 	struct bpf_insn_aux_data *aux;
6848 	struct bpf_insn insn_buf[16];
6849 	struct bpf_prog *new_prog;
6850 	struct bpf_map *map_ptr;
6851 	int i, cnt, delta = 0;
6852 
6853 	for (i = 0; i < insn_cnt; i++, insn++) {
6854 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
6855 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6856 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
6857 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6858 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
6859 			struct bpf_insn mask_and_div[] = {
6860 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6861 				/* Rx div 0 -> 0 */
6862 				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
6863 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
6864 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6865 				*insn,
6866 			};
6867 			struct bpf_insn mask_and_mod[] = {
6868 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
6869 				/* Rx mod 0 -> Rx */
6870 				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
6871 				*insn,
6872 			};
6873 			struct bpf_insn *patchlet;
6874 
6875 			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
6876 			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
6877 				patchlet = mask_and_div + (is64 ? 1 : 0);
6878 				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
6879 			} else {
6880 				patchlet = mask_and_mod + (is64 ? 1 : 0);
6881 				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
6882 			}
6883 
6884 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
6885 			if (!new_prog)
6886 				return -ENOMEM;
6887 
6888 			delta    += cnt - 1;
6889 			env->prog = prog = new_prog;
6890 			insn      = new_prog->insnsi + i + delta;
6891 			continue;
6892 		}
6893 
6894 		if (BPF_CLASS(insn->code) == BPF_LD &&
6895 		    (BPF_MODE(insn->code) == BPF_ABS ||
6896 		     BPF_MODE(insn->code) == BPF_IND)) {
6897 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
6898 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
6899 				verbose(env, "bpf verifier is misconfigured\n");
6900 				return -EINVAL;
6901 			}
6902 
6903 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6904 			if (!new_prog)
6905 				return -ENOMEM;
6906 
6907 			delta    += cnt - 1;
6908 			env->prog = prog = new_prog;
6909 			insn      = new_prog->insnsi + i + delta;
6910 			continue;
6911 		}
6912 
6913 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
6914 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
6915 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
6916 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
6917 			struct bpf_insn insn_buf[16];
6918 			struct bpf_insn *patch = &insn_buf[0];
6919 			bool issrc, isneg;
6920 			u32 off_reg;
6921 
6922 			aux = &env->insn_aux_data[i + delta];
6923 			if (!aux->alu_state)
6924 				continue;
6925 
6926 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
6927 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
6928 				BPF_ALU_SANITIZE_SRC;
6929 
6930 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
6931 			if (isneg)
6932 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6933 			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
6934 			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
6935 			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
6936 			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
6937 			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
6938 			if (issrc) {
6939 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
6940 							 off_reg);
6941 				insn->src_reg = BPF_REG_AX;
6942 			} else {
6943 				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
6944 							 BPF_REG_AX);
6945 			}
6946 			if (isneg)
6947 				insn->code = insn->code == code_add ?
6948 					     code_sub : code_add;
6949 			*patch++ = *insn;
6950 			if (issrc && isneg)
6951 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
6952 			cnt = patch - insn_buf;
6953 
6954 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
6955 			if (!new_prog)
6956 				return -ENOMEM;
6957 
6958 			delta    += cnt - 1;
6959 			env->prog = prog = new_prog;
6960 			insn      = new_prog->insnsi + i + delta;
6961 			continue;
6962 		}
6963 
6964 		if (insn->code != (BPF_JMP | BPF_CALL))
6965 			continue;
6966 		if (insn->src_reg == BPF_PSEUDO_CALL)
6967 			continue;
6968 
6969 		if (insn->imm == BPF_FUNC_get_route_realm)
6970 			prog->dst_needed = 1;
6971 		if (insn->imm == BPF_FUNC_get_prandom_u32)
6972 			bpf_user_rnd_init_once();
6973 		if (insn->imm == BPF_FUNC_override_return)
6974 			prog->kprobe_override = 1;
6975 		if (insn->imm == BPF_FUNC_tail_call) {
6976 			/* If we tail call into other programs, we
6977 			 * cannot make any assumptions since they can
6978 			 * be replaced dynamically during runtime in
6979 			 * the program array.
6980 			 */
6981 			prog->cb_access = 1;
6982 			env->prog->aux->stack_depth = MAX_BPF_STACK;
6983 			env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
6984 
6985 			/* mark bpf_tail_call as different opcode to avoid
6986 			 * conditional branch in the interpeter for every normal
6987 			 * call and to prevent accidental JITing by JIT compiler
6988 			 * that doesn't support bpf_tail_call yet
6989 			 */
6990 			insn->imm = 0;
6991 			insn->code = BPF_JMP | BPF_TAIL_CALL;
6992 
6993 			aux = &env->insn_aux_data[i + delta];
6994 			if (!bpf_map_ptr_unpriv(aux))
6995 				continue;
6996 
6997 			/* instead of changing every JIT dealing with tail_call
6998 			 * emit two extra insns:
6999 			 * if (index >= max_entries) goto out;
7000 			 * index &= array->index_mask;
7001 			 * to avoid out-of-bounds cpu speculation
7002 			 */
7003 			if (bpf_map_ptr_poisoned(aux)) {
7004 				verbose(env, "tail_call abusing map_ptr\n");
7005 				return -EINVAL;
7006 			}
7007 
7008 			map_ptr = BPF_MAP_PTR(aux->map_state);
7009 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
7010 						  map_ptr->max_entries, 2);
7011 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
7012 						    container_of(map_ptr,
7013 								 struct bpf_array,
7014 								 map)->index_mask);
7015 			insn_buf[2] = *insn;
7016 			cnt = 3;
7017 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
7018 			if (!new_prog)
7019 				return -ENOMEM;
7020 
7021 			delta    += cnt - 1;
7022 			env->prog = prog = new_prog;
7023 			insn      = new_prog->insnsi + i + delta;
7024 			continue;
7025 		}
7026 
7027 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
7028 		 * and other inlining handlers are currently limited to 64 bit
7029 		 * only.
7030 		 */
7031 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
7032 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
7033 		     insn->imm == BPF_FUNC_map_update_elem ||
7034 		     insn->imm == BPF_FUNC_map_delete_elem ||
7035 		     insn->imm == BPF_FUNC_map_push_elem   ||
7036 		     insn->imm == BPF_FUNC_map_pop_elem    ||
7037 		     insn->imm == BPF_FUNC_map_peek_elem)) {
7038 			aux = &env->insn_aux_data[i + delta];
7039 			if (bpf_map_ptr_poisoned(aux))
7040 				goto patch_call_imm;
7041 
7042 			map_ptr = BPF_MAP_PTR(aux->map_state);
7043 			ops = map_ptr->ops;
7044 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
7045 			    ops->map_gen_lookup) {
7046 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
7047 				if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
7048 					verbose(env, "bpf verifier is misconfigured\n");
7049 					return -EINVAL;
7050 				}
7051 
7052 				new_prog = bpf_patch_insn_data(env, i + delta,
7053 							       insn_buf, cnt);
7054 				if (!new_prog)
7055 					return -ENOMEM;
7056 
7057 				delta    += cnt - 1;
7058 				env->prog = prog = new_prog;
7059 				insn      = new_prog->insnsi + i + delta;
7060 				continue;
7061 			}
7062 
7063 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
7064 				     (void *(*)(struct bpf_map *map, void *key))NULL));
7065 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
7066 				     (int (*)(struct bpf_map *map, void *key))NULL));
7067 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
7068 				     (int (*)(struct bpf_map *map, void *key, void *value,
7069 					      u64 flags))NULL));
7070 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
7071 				     (int (*)(struct bpf_map *map, void *value,
7072 					      u64 flags))NULL));
7073 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
7074 				     (int (*)(struct bpf_map *map, void *value))NULL));
7075 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
7076 				     (int (*)(struct bpf_map *map, void *value))NULL));
7077 
7078 			switch (insn->imm) {
7079 			case BPF_FUNC_map_lookup_elem:
7080 				insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
7081 					    __bpf_call_base;
7082 				continue;
7083 			case BPF_FUNC_map_update_elem:
7084 				insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
7085 					    __bpf_call_base;
7086 				continue;
7087 			case BPF_FUNC_map_delete_elem:
7088 				insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
7089 					    __bpf_call_base;
7090 				continue;
7091 			case BPF_FUNC_map_push_elem:
7092 				insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
7093 					    __bpf_call_base;
7094 				continue;
7095 			case BPF_FUNC_map_pop_elem:
7096 				insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
7097 					    __bpf_call_base;
7098 				continue;
7099 			case BPF_FUNC_map_peek_elem:
7100 				insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
7101 					    __bpf_call_base;
7102 				continue;
7103 			}
7104 
7105 			goto patch_call_imm;
7106 		}
7107 
7108 patch_call_imm:
7109 		fn = env->ops->get_func_proto(insn->imm, env->prog);
7110 		/* all functions that have prototype and verifier allowed
7111 		 * programs to call them, must be real in-kernel functions
7112 		 */
7113 		if (!fn->func) {
7114 			verbose(env,
7115 				"kernel subsystem misconfigured func %s#%d\n",
7116 				func_id_name(insn->imm), insn->imm);
7117 			return -EFAULT;
7118 		}
7119 		insn->imm = fn->func - __bpf_call_base;
7120 	}
7121 
7122 	return 0;
7123 }
7124 
7125 static void free_states(struct bpf_verifier_env *env)
7126 {
7127 	struct bpf_verifier_state_list *sl, *sln;
7128 	int i;
7129 
7130 	if (!env->explored_states)
7131 		return;
7132 
7133 	for (i = 0; i < env->prog->len; i++) {
7134 		sl = env->explored_states[i];
7135 
7136 		if (sl)
7137 			while (sl != STATE_LIST_MARK) {
7138 				sln = sl->next;
7139 				free_verifier_state(&sl->state, false);
7140 				kfree(sl);
7141 				sl = sln;
7142 			}
7143 	}
7144 
7145 	kfree(env->explored_states);
7146 }
7147 
7148 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
7149 	      union bpf_attr __user *uattr)
7150 {
7151 	struct bpf_verifier_env *env;
7152 	struct bpf_verifier_log *log;
7153 	int ret = -EINVAL;
7154 
7155 	/* no program is valid */
7156 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
7157 		return -EINVAL;
7158 
7159 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
7160 	 * allocate/free it every time bpf_check() is called
7161 	 */
7162 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
7163 	if (!env)
7164 		return -ENOMEM;
7165 	log = &env->log;
7166 
7167 	env->insn_aux_data =
7168 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
7169 				   (*prog)->len));
7170 	ret = -ENOMEM;
7171 	if (!env->insn_aux_data)
7172 		goto err_free_env;
7173 	env->prog = *prog;
7174 	env->ops = bpf_verifier_ops[env->prog->type];
7175 
7176 	/* grab the mutex to protect few globals used by verifier */
7177 	mutex_lock(&bpf_verifier_lock);
7178 
7179 	if (attr->log_level || attr->log_buf || attr->log_size) {
7180 		/* user requested verbose verifier output
7181 		 * and supplied buffer to store the verification trace
7182 		 */
7183 		log->level = attr->log_level;
7184 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
7185 		log->len_total = attr->log_size;
7186 
7187 		ret = -EINVAL;
7188 		/* log attributes have to be sane */
7189 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
7190 		    !log->level || !log->ubuf)
7191 			goto err_unlock;
7192 	}
7193 
7194 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
7195 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
7196 		env->strict_alignment = true;
7197 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
7198 		env->strict_alignment = false;
7199 
7200 	ret = replace_map_fd_with_map_ptr(env);
7201 	if (ret < 0)
7202 		goto skip_full_check;
7203 
7204 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
7205 		ret = bpf_prog_offload_verifier_prep(env->prog);
7206 		if (ret)
7207 			goto skip_full_check;
7208 	}
7209 
7210 	env->explored_states = kcalloc(env->prog->len,
7211 				       sizeof(struct bpf_verifier_state_list *),
7212 				       GFP_USER);
7213 	ret = -ENOMEM;
7214 	if (!env->explored_states)
7215 		goto skip_full_check;
7216 
7217 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
7218 
7219 	ret = check_subprogs(env);
7220 	if (ret < 0)
7221 		goto skip_full_check;
7222 
7223 	ret = check_btf_info(env, attr, uattr);
7224 	if (ret < 0)
7225 		goto skip_full_check;
7226 
7227 	ret = check_cfg(env);
7228 	if (ret < 0)
7229 		goto skip_full_check;
7230 
7231 	ret = do_check(env);
7232 	if (env->cur_state) {
7233 		free_verifier_state(env->cur_state, true);
7234 		env->cur_state = NULL;
7235 	}
7236 
7237 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
7238 		ret = bpf_prog_offload_finalize(env);
7239 
7240 skip_full_check:
7241 	while (!pop_stack(env, NULL, NULL));
7242 	free_states(env);
7243 
7244 	if (ret == 0)
7245 		ret = check_max_stack_depth(env);
7246 
7247 	/* instruction rewrites happen after this point */
7248 	if (ret == 0)
7249 		sanitize_dead_code(env);
7250 
7251 	if (ret == 0)
7252 		/* program is valid, convert *(u32*)(ctx + off) accesses */
7253 		ret = convert_ctx_accesses(env);
7254 
7255 	if (ret == 0)
7256 		ret = fixup_bpf_calls(env);
7257 
7258 	if (ret == 0)
7259 		ret = fixup_call_args(env);
7260 
7261 	if (log->level && bpf_verifier_log_full(log))
7262 		ret = -ENOSPC;
7263 	if (log->level && !log->ubuf) {
7264 		ret = -EFAULT;
7265 		goto err_release_maps;
7266 	}
7267 
7268 	if (ret == 0 && env->used_map_cnt) {
7269 		/* if program passed verifier, update used_maps in bpf_prog_info */
7270 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
7271 							  sizeof(env->used_maps[0]),
7272 							  GFP_KERNEL);
7273 
7274 		if (!env->prog->aux->used_maps) {
7275 			ret = -ENOMEM;
7276 			goto err_release_maps;
7277 		}
7278 
7279 		memcpy(env->prog->aux->used_maps, env->used_maps,
7280 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
7281 		env->prog->aux->used_map_cnt = env->used_map_cnt;
7282 
7283 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
7284 		 * bpf_ld_imm64 instructions
7285 		 */
7286 		convert_pseudo_ld_imm64(env);
7287 	}
7288 
7289 	if (ret == 0)
7290 		adjust_btf_func(env);
7291 
7292 err_release_maps:
7293 	if (!env->prog->aux->used_maps)
7294 		/* if we didn't copy map pointers into bpf_prog_info, release
7295 		 * them now. Otherwise free_used_maps() will release them.
7296 		 */
7297 		release_maps(env);
7298 	*prog = env->prog;
7299 err_unlock:
7300 	mutex_unlock(&bpf_verifier_lock);
7301 	vfree(env->insn_aux_data);
7302 err_free_env:
7303 	kfree(env);
7304 	return ret;
7305 }
7306