xref: /openbmc/linux/kernel/bpf/verifier.c (revision 711aab1d)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 
24 /* bpf_check() is a static code analyzer that walks eBPF program
25  * instruction by instruction and updates register/stack state.
26  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
27  *
28  * The first pass is depth-first-search to check that the program is a DAG.
29  * It rejects the following programs:
30  * - larger than BPF_MAXINSNS insns
31  * - if loop is present (detected via back-edge)
32  * - unreachable insns exist (shouldn't be a forest. program = one function)
33  * - out of bounds or malformed jumps
34  * The second pass is all possible path descent from the 1st insn.
35  * Since it's analyzing all pathes through the program, the length of the
36  * analysis is limited to 64k insn, which may be hit even if total number of
37  * insn is less then 4K, but there are too many branches that change stack/regs.
38  * Number of 'branches to be analyzed' is limited to 1k
39  *
40  * On entry to each instruction, each register has a type, and the instruction
41  * changes the types of the registers depending on instruction semantics.
42  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
43  * copied to R1.
44  *
45  * All registers are 64-bit.
46  * R0 - return register
47  * R1-R5 argument passing registers
48  * R6-R9 callee saved registers
49  * R10 - frame pointer read-only
50  *
51  * At the start of BPF program the register R1 contains a pointer to bpf_context
52  * and has type PTR_TO_CTX.
53  *
54  * Verifier tracks arithmetic operations on pointers in case:
55  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
56  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
57  * 1st insn copies R10 (which has FRAME_PTR) type into R1
58  * and 2nd arithmetic instruction is pattern matched to recognize
59  * that it wants to construct a pointer to some element within stack.
60  * So after 2nd insn, the register R1 has type PTR_TO_STACK
61  * (and -20 constant is saved for further stack bounds checking).
62  * Meaning that this reg is a pointer to stack plus known immediate constant.
63  *
64  * Most of the time the registers have SCALAR_VALUE type, which
65  * means the register has some value, but it's not a valid pointer.
66  * (like pointer plus pointer becomes SCALAR_VALUE type)
67  *
68  * When verifier sees load or store instructions the type of base register
69  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
70  * types recognized by check_mem_access() function.
71  *
72  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
73  * and the range of [ptr, ptr + map's value_size) is accessible.
74  *
75  * registers used to pass values to function calls are checked against
76  * function argument constraints.
77  *
78  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
79  * It means that the register type passed to this function must be
80  * PTR_TO_STACK and it will be used inside the function as
81  * 'pointer to map element key'
82  *
83  * For example the argument constraints for bpf_map_lookup_elem():
84  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
85  *   .arg1_type = ARG_CONST_MAP_PTR,
86  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
87  *
88  * ret_type says that this function returns 'pointer to map elem value or null'
89  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
90  * 2nd argument should be a pointer to stack, which will be used inside
91  * the helper function as a pointer to map element key.
92  *
93  * On the kernel side the helper function looks like:
94  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
95  * {
96  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
97  *    void *key = (void *) (unsigned long) r2;
98  *    void *value;
99  *
100  *    here kernel can access 'key' and 'map' pointers safely, knowing that
101  *    [key, key + map->key_size) bytes are valid and were initialized on
102  *    the stack of eBPF program.
103  * }
104  *
105  * Corresponding eBPF program may look like:
106  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
107  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
108  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
109  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
110  * here verifier looks at prototype of map_lookup_elem() and sees:
111  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
112  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
113  *
114  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
115  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
116  * and were initialized prior to this call.
117  * If it's ok, then verifier allows this BPF_CALL insn and looks at
118  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
119  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
120  * returns ether pointer to map value or NULL.
121  *
122  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
123  * insn, the register holding that pointer in the true branch changes state to
124  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
125  * branch. See check_cond_jmp_op().
126  *
127  * After the call R0 is set to return type of the function and registers R1-R5
128  * are set to NOT_INIT to indicate that they are no longer readable.
129  */
130 
131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
132 struct bpf_verifier_stack_elem {
133 	/* verifer state is 'st'
134 	 * before processing instruction 'insn_idx'
135 	 * and after processing instruction 'prev_insn_idx'
136 	 */
137 	struct bpf_verifier_state st;
138 	int insn_idx;
139 	int prev_insn_idx;
140 	struct bpf_verifier_stack_elem *next;
141 };
142 
143 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
144 #define BPF_COMPLEXITY_LIMIT_STACK	1024
145 
146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
147 
148 struct bpf_call_arg_meta {
149 	struct bpf_map *map_ptr;
150 	bool raw_mode;
151 	bool pkt_access;
152 	int regno;
153 	int access_size;
154 };
155 
156 /* verbose verifier prints what it's seeing
157  * bpf_check() is called under lock, so no race to access these global vars
158  */
159 static u32 log_level, log_size, log_len;
160 static char *log_buf;
161 
162 static DEFINE_MUTEX(bpf_verifier_lock);
163 
164 /* log_level controls verbosity level of eBPF verifier.
165  * verbose() is used to dump the verification trace to the log, so the user
166  * can figure out what's wrong with the program
167  */
168 static __printf(1, 2) void verbose(const char *fmt, ...)
169 {
170 	va_list args;
171 
172 	if (log_level == 0 || log_len >= log_size - 1)
173 		return;
174 
175 	va_start(args, fmt);
176 	log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
177 	va_end(args);
178 }
179 
180 /* string representation of 'enum bpf_reg_type' */
181 static const char * const reg_type_str[] = {
182 	[NOT_INIT]		= "?",
183 	[SCALAR_VALUE]		= "inv",
184 	[PTR_TO_CTX]		= "ctx",
185 	[CONST_PTR_TO_MAP]	= "map_ptr",
186 	[PTR_TO_MAP_VALUE]	= "map_value",
187 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
188 	[PTR_TO_STACK]		= "fp",
189 	[PTR_TO_PACKET]		= "pkt",
190 	[PTR_TO_PACKET_END]	= "pkt_end",
191 };
192 
193 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
194 static const char * const func_id_str[] = {
195 	__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
196 };
197 #undef __BPF_FUNC_STR_FN
198 
199 static const char *func_id_name(int id)
200 {
201 	BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
202 
203 	if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
204 		return func_id_str[id];
205 	else
206 		return "unknown";
207 }
208 
209 static void print_verifier_state(struct bpf_verifier_state *state)
210 {
211 	struct bpf_reg_state *reg;
212 	enum bpf_reg_type t;
213 	int i;
214 
215 	for (i = 0; i < MAX_BPF_REG; i++) {
216 		reg = &state->regs[i];
217 		t = reg->type;
218 		if (t == NOT_INIT)
219 			continue;
220 		verbose(" R%d=%s", i, reg_type_str[t]);
221 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
222 		    tnum_is_const(reg->var_off)) {
223 			/* reg->off should be 0 for SCALAR_VALUE */
224 			verbose("%lld", reg->var_off.value + reg->off);
225 		} else {
226 			verbose("(id=%d", reg->id);
227 			if (t != SCALAR_VALUE)
228 				verbose(",off=%d", reg->off);
229 			if (t == PTR_TO_PACKET)
230 				verbose(",r=%d", reg->range);
231 			else if (t == CONST_PTR_TO_MAP ||
232 				 t == PTR_TO_MAP_VALUE ||
233 				 t == PTR_TO_MAP_VALUE_OR_NULL)
234 				verbose(",ks=%d,vs=%d",
235 					reg->map_ptr->key_size,
236 					reg->map_ptr->value_size);
237 			if (tnum_is_const(reg->var_off)) {
238 				/* Typically an immediate SCALAR_VALUE, but
239 				 * could be a pointer whose offset is too big
240 				 * for reg->off
241 				 */
242 				verbose(",imm=%llx", reg->var_off.value);
243 			} else {
244 				if (reg->smin_value != reg->umin_value &&
245 				    reg->smin_value != S64_MIN)
246 					verbose(",smin_value=%lld",
247 						(long long)reg->smin_value);
248 				if (reg->smax_value != reg->umax_value &&
249 				    reg->smax_value != S64_MAX)
250 					verbose(",smax_value=%lld",
251 						(long long)reg->smax_value);
252 				if (reg->umin_value != 0)
253 					verbose(",umin_value=%llu",
254 						(unsigned long long)reg->umin_value);
255 				if (reg->umax_value != U64_MAX)
256 					verbose(",umax_value=%llu",
257 						(unsigned long long)reg->umax_value);
258 				if (!tnum_is_unknown(reg->var_off)) {
259 					char tn_buf[48];
260 
261 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
262 					verbose(",var_off=%s", tn_buf);
263 				}
264 			}
265 			verbose(")");
266 		}
267 	}
268 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
269 		if (state->stack_slot_type[i] == STACK_SPILL)
270 			verbose(" fp%d=%s", -MAX_BPF_STACK + i,
271 				reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
272 	}
273 	verbose("\n");
274 }
275 
276 static const char *const bpf_class_string[] = {
277 	[BPF_LD]    = "ld",
278 	[BPF_LDX]   = "ldx",
279 	[BPF_ST]    = "st",
280 	[BPF_STX]   = "stx",
281 	[BPF_ALU]   = "alu",
282 	[BPF_JMP]   = "jmp",
283 	[BPF_RET]   = "BUG",
284 	[BPF_ALU64] = "alu64",
285 };
286 
287 static const char *const bpf_alu_string[16] = {
288 	[BPF_ADD >> 4]  = "+=",
289 	[BPF_SUB >> 4]  = "-=",
290 	[BPF_MUL >> 4]  = "*=",
291 	[BPF_DIV >> 4]  = "/=",
292 	[BPF_OR  >> 4]  = "|=",
293 	[BPF_AND >> 4]  = "&=",
294 	[BPF_LSH >> 4]  = "<<=",
295 	[BPF_RSH >> 4]  = ">>=",
296 	[BPF_NEG >> 4]  = "neg",
297 	[BPF_MOD >> 4]  = "%=",
298 	[BPF_XOR >> 4]  = "^=",
299 	[BPF_MOV >> 4]  = "=",
300 	[BPF_ARSH >> 4] = "s>>=",
301 	[BPF_END >> 4]  = "endian",
302 };
303 
304 static const char *const bpf_ldst_string[] = {
305 	[BPF_W >> 3]  = "u32",
306 	[BPF_H >> 3]  = "u16",
307 	[BPF_B >> 3]  = "u8",
308 	[BPF_DW >> 3] = "u64",
309 };
310 
311 static const char *const bpf_jmp_string[16] = {
312 	[BPF_JA >> 4]   = "jmp",
313 	[BPF_JEQ >> 4]  = "==",
314 	[BPF_JGT >> 4]  = ">",
315 	[BPF_JLT >> 4]  = "<",
316 	[BPF_JGE >> 4]  = ">=",
317 	[BPF_JLE >> 4]  = "<=",
318 	[BPF_JSET >> 4] = "&",
319 	[BPF_JNE >> 4]  = "!=",
320 	[BPF_JSGT >> 4] = "s>",
321 	[BPF_JSLT >> 4] = "s<",
322 	[BPF_JSGE >> 4] = "s>=",
323 	[BPF_JSLE >> 4] = "s<=",
324 	[BPF_CALL >> 4] = "call",
325 	[BPF_EXIT >> 4] = "exit",
326 };
327 
328 static void print_bpf_insn(const struct bpf_verifier_env *env,
329 			   const struct bpf_insn *insn)
330 {
331 	u8 class = BPF_CLASS(insn->code);
332 
333 	if (class == BPF_ALU || class == BPF_ALU64) {
334 		if (BPF_SRC(insn->code) == BPF_X)
335 			verbose("(%02x) %sr%d %s %sr%d\n",
336 				insn->code, class == BPF_ALU ? "(u32) " : "",
337 				insn->dst_reg,
338 				bpf_alu_string[BPF_OP(insn->code) >> 4],
339 				class == BPF_ALU ? "(u32) " : "",
340 				insn->src_reg);
341 		else
342 			verbose("(%02x) %sr%d %s %s%d\n",
343 				insn->code, class == BPF_ALU ? "(u32) " : "",
344 				insn->dst_reg,
345 				bpf_alu_string[BPF_OP(insn->code) >> 4],
346 				class == BPF_ALU ? "(u32) " : "",
347 				insn->imm);
348 	} else if (class == BPF_STX) {
349 		if (BPF_MODE(insn->code) == BPF_MEM)
350 			verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
351 				insn->code,
352 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
353 				insn->dst_reg,
354 				insn->off, insn->src_reg);
355 		else if (BPF_MODE(insn->code) == BPF_XADD)
356 			verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
357 				insn->code,
358 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
359 				insn->dst_reg, insn->off,
360 				insn->src_reg);
361 		else
362 			verbose("BUG_%02x\n", insn->code);
363 	} else if (class == BPF_ST) {
364 		if (BPF_MODE(insn->code) != BPF_MEM) {
365 			verbose("BUG_st_%02x\n", insn->code);
366 			return;
367 		}
368 		verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
369 			insn->code,
370 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
371 			insn->dst_reg,
372 			insn->off, insn->imm);
373 	} else if (class == BPF_LDX) {
374 		if (BPF_MODE(insn->code) != BPF_MEM) {
375 			verbose("BUG_ldx_%02x\n", insn->code);
376 			return;
377 		}
378 		verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
379 			insn->code, insn->dst_reg,
380 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
381 			insn->src_reg, insn->off);
382 	} else if (class == BPF_LD) {
383 		if (BPF_MODE(insn->code) == BPF_ABS) {
384 			verbose("(%02x) r0 = *(%s *)skb[%d]\n",
385 				insn->code,
386 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
387 				insn->imm);
388 		} else if (BPF_MODE(insn->code) == BPF_IND) {
389 			verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
390 				insn->code,
391 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
392 				insn->src_reg, insn->imm);
393 		} else if (BPF_MODE(insn->code) == BPF_IMM &&
394 			   BPF_SIZE(insn->code) == BPF_DW) {
395 			/* At this point, we already made sure that the second
396 			 * part of the ldimm64 insn is accessible.
397 			 */
398 			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
399 			bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
400 
401 			if (map_ptr && !env->allow_ptr_leaks)
402 				imm = 0;
403 
404 			verbose("(%02x) r%d = 0x%llx\n", insn->code,
405 				insn->dst_reg, (unsigned long long)imm);
406 		} else {
407 			verbose("BUG_ld_%02x\n", insn->code);
408 			return;
409 		}
410 	} else if (class == BPF_JMP) {
411 		u8 opcode = BPF_OP(insn->code);
412 
413 		if (opcode == BPF_CALL) {
414 			verbose("(%02x) call %s#%d\n", insn->code,
415 				func_id_name(insn->imm), insn->imm);
416 		} else if (insn->code == (BPF_JMP | BPF_JA)) {
417 			verbose("(%02x) goto pc%+d\n",
418 				insn->code, insn->off);
419 		} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
420 			verbose("(%02x) exit\n", insn->code);
421 		} else if (BPF_SRC(insn->code) == BPF_X) {
422 			verbose("(%02x) if r%d %s r%d goto pc%+d\n",
423 				insn->code, insn->dst_reg,
424 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
425 				insn->src_reg, insn->off);
426 		} else {
427 			verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
428 				insn->code, insn->dst_reg,
429 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
430 				insn->imm, insn->off);
431 		}
432 	} else {
433 		verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
434 	}
435 }
436 
437 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
438 {
439 	struct bpf_verifier_stack_elem *elem;
440 	int insn_idx;
441 
442 	if (env->head == NULL)
443 		return -1;
444 
445 	memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
446 	insn_idx = env->head->insn_idx;
447 	if (prev_insn_idx)
448 		*prev_insn_idx = env->head->prev_insn_idx;
449 	elem = env->head->next;
450 	kfree(env->head);
451 	env->head = elem;
452 	env->stack_size--;
453 	return insn_idx;
454 }
455 
456 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
457 					     int insn_idx, int prev_insn_idx)
458 {
459 	struct bpf_verifier_stack_elem *elem;
460 
461 	elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
462 	if (!elem)
463 		goto err;
464 
465 	memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
466 	elem->insn_idx = insn_idx;
467 	elem->prev_insn_idx = prev_insn_idx;
468 	elem->next = env->head;
469 	env->head = elem;
470 	env->stack_size++;
471 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
472 		verbose("BPF program is too complex\n");
473 		goto err;
474 	}
475 	return &elem->st;
476 err:
477 	/* pop all elements and return */
478 	while (pop_stack(env, NULL) >= 0);
479 	return NULL;
480 }
481 
482 #define CALLER_SAVED_REGS 6
483 static const int caller_saved[CALLER_SAVED_REGS] = {
484 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
485 };
486 
487 static void __mark_reg_not_init(struct bpf_reg_state *reg);
488 
489 /* Mark the unknown part of a register (variable offset or scalar value) as
490  * known to have the value @imm.
491  */
492 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
493 {
494 	reg->id = 0;
495 	reg->var_off = tnum_const(imm);
496 	reg->smin_value = (s64)imm;
497 	reg->smax_value = (s64)imm;
498 	reg->umin_value = imm;
499 	reg->umax_value = imm;
500 }
501 
502 /* Mark the 'variable offset' part of a register as zero.  This should be
503  * used only on registers holding a pointer type.
504  */
505 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
506 {
507 	__mark_reg_known(reg, 0);
508 }
509 
510 static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno)
511 {
512 	if (WARN_ON(regno >= MAX_BPF_REG)) {
513 		verbose("mark_reg_known_zero(regs, %u)\n", regno);
514 		/* Something bad happened, let's kill all regs */
515 		for (regno = 0; regno < MAX_BPF_REG; regno++)
516 			__mark_reg_not_init(regs + regno);
517 		return;
518 	}
519 	__mark_reg_known_zero(regs + regno);
520 }
521 
522 /* Attempts to improve min/max values based on var_off information */
523 static void __update_reg_bounds(struct bpf_reg_state *reg)
524 {
525 	/* min signed is max(sign bit) | min(other bits) */
526 	reg->smin_value = max_t(s64, reg->smin_value,
527 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
528 	/* max signed is min(sign bit) | max(other bits) */
529 	reg->smax_value = min_t(s64, reg->smax_value,
530 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
531 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
532 	reg->umax_value = min(reg->umax_value,
533 			      reg->var_off.value | reg->var_off.mask);
534 }
535 
536 /* Uses signed min/max values to inform unsigned, and vice-versa */
537 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
538 {
539 	/* Learn sign from signed bounds.
540 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
541 	 * are the same, so combine.  This works even in the negative case, e.g.
542 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
543 	 */
544 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
545 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
546 							  reg->umin_value);
547 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
548 							  reg->umax_value);
549 		return;
550 	}
551 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
552 	 * boundary, so we must be careful.
553 	 */
554 	if ((s64)reg->umax_value >= 0) {
555 		/* Positive.  We can't learn anything from the smin, but smax
556 		 * is positive, hence safe.
557 		 */
558 		reg->smin_value = reg->umin_value;
559 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
560 							  reg->umax_value);
561 	} else if ((s64)reg->umin_value < 0) {
562 		/* Negative.  We can't learn anything from the smax, but smin
563 		 * is negative, hence safe.
564 		 */
565 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
566 							  reg->umin_value);
567 		reg->smax_value = reg->umax_value;
568 	}
569 }
570 
571 /* Attempts to improve var_off based on unsigned min/max information */
572 static void __reg_bound_offset(struct bpf_reg_state *reg)
573 {
574 	reg->var_off = tnum_intersect(reg->var_off,
575 				      tnum_range(reg->umin_value,
576 						 reg->umax_value));
577 }
578 
579 /* Reset the min/max bounds of a register */
580 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
581 {
582 	reg->smin_value = S64_MIN;
583 	reg->smax_value = S64_MAX;
584 	reg->umin_value = 0;
585 	reg->umax_value = U64_MAX;
586 }
587 
588 /* Mark a register as having a completely unknown (scalar) value. */
589 static void __mark_reg_unknown(struct bpf_reg_state *reg)
590 {
591 	reg->type = SCALAR_VALUE;
592 	reg->id = 0;
593 	reg->off = 0;
594 	reg->var_off = tnum_unknown;
595 	__mark_reg_unbounded(reg);
596 }
597 
598 static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno)
599 {
600 	if (WARN_ON(regno >= MAX_BPF_REG)) {
601 		verbose("mark_reg_unknown(regs, %u)\n", regno);
602 		/* Something bad happened, let's kill all regs */
603 		for (regno = 0; regno < MAX_BPF_REG; regno++)
604 			__mark_reg_not_init(regs + regno);
605 		return;
606 	}
607 	__mark_reg_unknown(regs + regno);
608 }
609 
610 static void __mark_reg_not_init(struct bpf_reg_state *reg)
611 {
612 	__mark_reg_unknown(reg);
613 	reg->type = NOT_INIT;
614 }
615 
616 static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
617 {
618 	if (WARN_ON(regno >= MAX_BPF_REG)) {
619 		verbose("mark_reg_not_init(regs, %u)\n", regno);
620 		/* Something bad happened, let's kill all regs */
621 		for (regno = 0; regno < MAX_BPF_REG; regno++)
622 			__mark_reg_not_init(regs + regno);
623 		return;
624 	}
625 	__mark_reg_not_init(regs + regno);
626 }
627 
628 static void init_reg_state(struct bpf_reg_state *regs)
629 {
630 	int i;
631 
632 	for (i = 0; i < MAX_BPF_REG; i++) {
633 		mark_reg_not_init(regs, i);
634 		regs[i].live = REG_LIVE_NONE;
635 	}
636 
637 	/* frame pointer */
638 	regs[BPF_REG_FP].type = PTR_TO_STACK;
639 	mark_reg_known_zero(regs, BPF_REG_FP);
640 
641 	/* 1st arg to a function */
642 	regs[BPF_REG_1].type = PTR_TO_CTX;
643 	mark_reg_known_zero(regs, BPF_REG_1);
644 }
645 
646 enum reg_arg_type {
647 	SRC_OP,		/* register is used as source operand */
648 	DST_OP,		/* register is used as destination operand */
649 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
650 };
651 
652 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
653 {
654 	struct bpf_verifier_state *parent = state->parent;
655 
656 	while (parent) {
657 		/* if read wasn't screened by an earlier write ... */
658 		if (state->regs[regno].live & REG_LIVE_WRITTEN)
659 			break;
660 		/* ... then we depend on parent's value */
661 		parent->regs[regno].live |= REG_LIVE_READ;
662 		state = parent;
663 		parent = state->parent;
664 	}
665 }
666 
667 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
668 			 enum reg_arg_type t)
669 {
670 	struct bpf_reg_state *regs = env->cur_state.regs;
671 
672 	if (regno >= MAX_BPF_REG) {
673 		verbose("R%d is invalid\n", regno);
674 		return -EINVAL;
675 	}
676 
677 	if (t == SRC_OP) {
678 		/* check whether register used as source operand can be read */
679 		if (regs[regno].type == NOT_INIT) {
680 			verbose("R%d !read_ok\n", regno);
681 			return -EACCES;
682 		}
683 		mark_reg_read(&env->cur_state, regno);
684 	} else {
685 		/* check whether register used as dest operand can be written to */
686 		if (regno == BPF_REG_FP) {
687 			verbose("frame pointer is read only\n");
688 			return -EACCES;
689 		}
690 		regs[regno].live |= REG_LIVE_WRITTEN;
691 		if (t == DST_OP)
692 			mark_reg_unknown(regs, regno);
693 	}
694 	return 0;
695 }
696 
697 static bool is_spillable_regtype(enum bpf_reg_type type)
698 {
699 	switch (type) {
700 	case PTR_TO_MAP_VALUE:
701 	case PTR_TO_MAP_VALUE_OR_NULL:
702 	case PTR_TO_STACK:
703 	case PTR_TO_CTX:
704 	case PTR_TO_PACKET:
705 	case PTR_TO_PACKET_END:
706 	case CONST_PTR_TO_MAP:
707 		return true;
708 	default:
709 		return false;
710 	}
711 }
712 
713 /* check_stack_read/write functions track spill/fill of registers,
714  * stack boundary and alignment are checked in check_mem_access()
715  */
716 static int check_stack_write(struct bpf_verifier_state *state, int off,
717 			     int size, int value_regno)
718 {
719 	int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
720 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
721 	 * so it's aligned access and [off, off + size) are within stack limits
722 	 */
723 
724 	if (value_regno >= 0 &&
725 	    is_spillable_regtype(state->regs[value_regno].type)) {
726 
727 		/* register containing pointer is being spilled into stack */
728 		if (size != BPF_REG_SIZE) {
729 			verbose("invalid size of register spill\n");
730 			return -EACCES;
731 		}
732 
733 		/* save register state */
734 		state->spilled_regs[spi] = state->regs[value_regno];
735 		state->spilled_regs[spi].live |= REG_LIVE_WRITTEN;
736 
737 		for (i = 0; i < BPF_REG_SIZE; i++)
738 			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
739 	} else {
740 		/* regular write of data into stack */
741 		state->spilled_regs[spi] = (struct bpf_reg_state) {};
742 
743 		for (i = 0; i < size; i++)
744 			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
745 	}
746 	return 0;
747 }
748 
749 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
750 {
751 	struct bpf_verifier_state *parent = state->parent;
752 
753 	while (parent) {
754 		/* if read wasn't screened by an earlier write ... */
755 		if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN)
756 			break;
757 		/* ... then we depend on parent's value */
758 		parent->spilled_regs[slot].live |= REG_LIVE_READ;
759 		state = parent;
760 		parent = state->parent;
761 	}
762 }
763 
764 static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
765 			    int value_regno)
766 {
767 	u8 *slot_type;
768 	int i, spi;
769 
770 	slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
771 
772 	if (slot_type[0] == STACK_SPILL) {
773 		if (size != BPF_REG_SIZE) {
774 			verbose("invalid size of register spill\n");
775 			return -EACCES;
776 		}
777 		for (i = 1; i < BPF_REG_SIZE; i++) {
778 			if (slot_type[i] != STACK_SPILL) {
779 				verbose("corrupted spill memory\n");
780 				return -EACCES;
781 			}
782 		}
783 
784 		spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
785 
786 		if (value_regno >= 0) {
787 			/* restore register state from stack */
788 			state->regs[value_regno] = state->spilled_regs[spi];
789 			mark_stack_slot_read(state, spi);
790 		}
791 		return 0;
792 	} else {
793 		for (i = 0; i < size; i++) {
794 			if (slot_type[i] != STACK_MISC) {
795 				verbose("invalid read from stack off %d+%d size %d\n",
796 					off, i, size);
797 				return -EACCES;
798 			}
799 		}
800 		if (value_regno >= 0)
801 			/* have read misc data from the stack */
802 			mark_reg_unknown(state->regs, value_regno);
803 		return 0;
804 	}
805 }
806 
807 /* check read/write into map element returned by bpf_map_lookup_elem() */
808 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
809 			    int size)
810 {
811 	struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
812 
813 	if (off < 0 || size <= 0 || off + size > map->value_size) {
814 		verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
815 			map->value_size, off, size);
816 		return -EACCES;
817 	}
818 	return 0;
819 }
820 
821 /* check read/write into a map element with possible variable offset */
822 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
823 				int off, int size)
824 {
825 	struct bpf_verifier_state *state = &env->cur_state;
826 	struct bpf_reg_state *reg = &state->regs[regno];
827 	int err;
828 
829 	/* We may have adjusted the register to this map value, so we
830 	 * need to try adding each of min_value and max_value to off
831 	 * to make sure our theoretical access will be safe.
832 	 */
833 	if (log_level)
834 		print_verifier_state(state);
835 	/* The minimum value is only important with signed
836 	 * comparisons where we can't assume the floor of a
837 	 * value is 0.  If we are using signed variables for our
838 	 * index'es we need to make sure that whatever we use
839 	 * will have a set floor within our range.
840 	 */
841 	if (reg->smin_value < 0) {
842 		verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
843 			regno);
844 		return -EACCES;
845 	}
846 	err = __check_map_access(env, regno, reg->smin_value + off, size);
847 	if (err) {
848 		verbose("R%d min value is outside of the array range\n", regno);
849 		return err;
850 	}
851 
852 	/* If we haven't set a max value then we need to bail since we can't be
853 	 * sure we won't do bad things.
854 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
855 	 */
856 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
857 		verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
858 			regno);
859 		return -EACCES;
860 	}
861 	err = __check_map_access(env, regno, reg->umax_value + off, size);
862 	if (err)
863 		verbose("R%d max value is outside of the array range\n", regno);
864 	return err;
865 }
866 
867 #define MAX_PACKET_OFF 0xffff
868 
869 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
870 				       const struct bpf_call_arg_meta *meta,
871 				       enum bpf_access_type t)
872 {
873 	switch (env->prog->type) {
874 	case BPF_PROG_TYPE_LWT_IN:
875 	case BPF_PROG_TYPE_LWT_OUT:
876 		/* dst_input() and dst_output() can't write for now */
877 		if (t == BPF_WRITE)
878 			return false;
879 		/* fallthrough */
880 	case BPF_PROG_TYPE_SCHED_CLS:
881 	case BPF_PROG_TYPE_SCHED_ACT:
882 	case BPF_PROG_TYPE_XDP:
883 	case BPF_PROG_TYPE_LWT_XMIT:
884 	case BPF_PROG_TYPE_SK_SKB:
885 		if (meta)
886 			return meta->pkt_access;
887 
888 		env->seen_direct_write = true;
889 		return true;
890 	default:
891 		return false;
892 	}
893 }
894 
895 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
896 				 int off, int size)
897 {
898 	struct bpf_reg_state *regs = env->cur_state.regs;
899 	struct bpf_reg_state *reg = &regs[regno];
900 
901 	if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
902 		verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
903 			off, size, regno, reg->id, reg->off, reg->range);
904 		return -EACCES;
905 	}
906 	return 0;
907 }
908 
909 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
910 			       int size)
911 {
912 	struct bpf_reg_state *regs = env->cur_state.regs;
913 	struct bpf_reg_state *reg = &regs[regno];
914 	int err;
915 
916 	/* We may have added a variable offset to the packet pointer; but any
917 	 * reg->range we have comes after that.  We are only checking the fixed
918 	 * offset.
919 	 */
920 
921 	/* We don't allow negative numbers, because we aren't tracking enough
922 	 * detail to prove they're safe.
923 	 */
924 	if (reg->smin_value < 0) {
925 		verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
926 			regno);
927 		return -EACCES;
928 	}
929 	err = __check_packet_access(env, regno, off, size);
930 	if (err) {
931 		verbose("R%d offset is outside of the packet\n", regno);
932 		return err;
933 	}
934 	return err;
935 }
936 
937 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
938 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
939 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
940 {
941 	struct bpf_insn_access_aux info = {
942 		.reg_type = *reg_type,
943 	};
944 
945 	/* for analyzer ctx accesses are already validated and converted */
946 	if (env->analyzer_ops)
947 		return 0;
948 
949 	if (env->prog->aux->ops->is_valid_access &&
950 	    env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
951 		/* A non zero info.ctx_field_size indicates that this field is a
952 		 * candidate for later verifier transformation to load the whole
953 		 * field and then apply a mask when accessed with a narrower
954 		 * access than actual ctx access size. A zero info.ctx_field_size
955 		 * will only allow for whole field access and rejects any other
956 		 * type of narrower access.
957 		 */
958 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
959 		*reg_type = info.reg_type;
960 
961 		/* remember the offset of last byte accessed in ctx */
962 		if (env->prog->aux->max_ctx_offset < off + size)
963 			env->prog->aux->max_ctx_offset = off + size;
964 		return 0;
965 	}
966 
967 	verbose("invalid bpf_context access off=%d size=%d\n", off, size);
968 	return -EACCES;
969 }
970 
971 static bool __is_pointer_value(bool allow_ptr_leaks,
972 			       const struct bpf_reg_state *reg)
973 {
974 	if (allow_ptr_leaks)
975 		return false;
976 
977 	return reg->type != SCALAR_VALUE;
978 }
979 
980 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
981 {
982 	return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
983 }
984 
985 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
986 				   int off, int size, bool strict)
987 {
988 	struct tnum reg_off;
989 	int ip_align;
990 
991 	/* Byte size accesses are always allowed. */
992 	if (!strict || size == 1)
993 		return 0;
994 
995 	/* For platforms that do not have a Kconfig enabling
996 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
997 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
998 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
999 	 * to this code only in strict mode where we want to emulate
1000 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
1001 	 * unconditional IP align value of '2'.
1002 	 */
1003 	ip_align = 2;
1004 
1005 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1006 	if (!tnum_is_aligned(reg_off, size)) {
1007 		char tn_buf[48];
1008 
1009 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1010 		verbose("misaligned packet access off %d+%s+%d+%d size %d\n",
1011 			ip_align, tn_buf, reg->off, off, size);
1012 		return -EACCES;
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 static int check_generic_ptr_alignment(const struct bpf_reg_state *reg,
1019 				       const char *pointer_desc,
1020 				       int off, int size, bool strict)
1021 {
1022 	struct tnum reg_off;
1023 
1024 	/* Byte size accesses are always allowed. */
1025 	if (!strict || size == 1)
1026 		return 0;
1027 
1028 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1029 	if (!tnum_is_aligned(reg_off, size)) {
1030 		char tn_buf[48];
1031 
1032 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1033 		verbose("misaligned %saccess off %s+%d+%d size %d\n",
1034 			pointer_desc, tn_buf, reg->off, off, size);
1035 		return -EACCES;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static int check_ptr_alignment(struct bpf_verifier_env *env,
1042 			       const struct bpf_reg_state *reg,
1043 			       int off, int size)
1044 {
1045 	bool strict = env->strict_alignment;
1046 	const char *pointer_desc = "";
1047 
1048 	switch (reg->type) {
1049 	case PTR_TO_PACKET:
1050 		/* special case, because of NET_IP_ALIGN */
1051 		return check_pkt_ptr_alignment(reg, off, size, strict);
1052 	case PTR_TO_MAP_VALUE:
1053 		pointer_desc = "value ";
1054 		break;
1055 	case PTR_TO_CTX:
1056 		pointer_desc = "context ";
1057 		break;
1058 	case PTR_TO_STACK:
1059 		pointer_desc = "stack ";
1060 		break;
1061 	default:
1062 		break;
1063 	}
1064 	return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict);
1065 }
1066 
1067 /* check whether memory at (regno + off) is accessible for t = (read | write)
1068  * if t==write, value_regno is a register which value is stored into memory
1069  * if t==read, value_regno is a register which will receive the value from memory
1070  * if t==write && value_regno==-1, some unknown value is stored into memory
1071  * if t==read && value_regno==-1, don't care what we read from memory
1072  */
1073 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
1074 			    int bpf_size, enum bpf_access_type t,
1075 			    int value_regno)
1076 {
1077 	struct bpf_verifier_state *state = &env->cur_state;
1078 	struct bpf_reg_state *reg = &state->regs[regno];
1079 	int size, err = 0;
1080 
1081 	size = bpf_size_to_bytes(bpf_size);
1082 	if (size < 0)
1083 		return size;
1084 
1085 	/* alignment checks will add in reg->off themselves */
1086 	err = check_ptr_alignment(env, reg, off, size);
1087 	if (err)
1088 		return err;
1089 
1090 	/* for access checks, reg->off is just part of off */
1091 	off += reg->off;
1092 
1093 	if (reg->type == PTR_TO_MAP_VALUE) {
1094 		if (t == BPF_WRITE && value_regno >= 0 &&
1095 		    is_pointer_value(env, value_regno)) {
1096 			verbose("R%d leaks addr into map\n", value_regno);
1097 			return -EACCES;
1098 		}
1099 
1100 		err = check_map_access(env, regno, off, size);
1101 		if (!err && t == BPF_READ && value_regno >= 0)
1102 			mark_reg_unknown(state->regs, value_regno);
1103 
1104 	} else if (reg->type == PTR_TO_CTX) {
1105 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1106 
1107 		if (t == BPF_WRITE && value_regno >= 0 &&
1108 		    is_pointer_value(env, value_regno)) {
1109 			verbose("R%d leaks addr into ctx\n", value_regno);
1110 			return -EACCES;
1111 		}
1112 		/* ctx accesses must be at a fixed offset, so that we can
1113 		 * determine what type of data were returned.
1114 		 */
1115 		if (!tnum_is_const(reg->var_off)) {
1116 			char tn_buf[48];
1117 
1118 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1119 			verbose("variable ctx access var_off=%s off=%d size=%d",
1120 				tn_buf, off, size);
1121 			return -EACCES;
1122 		}
1123 		off += reg->var_off.value;
1124 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1125 		if (!err && t == BPF_READ && value_regno >= 0) {
1126 			/* ctx access returns either a scalar, or a
1127 			 * PTR_TO_PACKET[_END].  In the latter case, we know
1128 			 * the offset is zero.
1129 			 */
1130 			if (reg_type == SCALAR_VALUE)
1131 				mark_reg_unknown(state->regs, value_regno);
1132 			else
1133 				mark_reg_known_zero(state->regs, value_regno);
1134 			state->regs[value_regno].id = 0;
1135 			state->regs[value_regno].off = 0;
1136 			state->regs[value_regno].range = 0;
1137 			state->regs[value_regno].type = reg_type;
1138 		}
1139 
1140 	} else if (reg->type == PTR_TO_STACK) {
1141 		/* stack accesses must be at a fixed offset, so that we can
1142 		 * determine what type of data were returned.
1143 		 * See check_stack_read().
1144 		 */
1145 		if (!tnum_is_const(reg->var_off)) {
1146 			char tn_buf[48];
1147 
1148 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1149 			verbose("variable stack access var_off=%s off=%d size=%d",
1150 				tn_buf, off, size);
1151 			return -EACCES;
1152 		}
1153 		off += reg->var_off.value;
1154 		if (off >= 0 || off < -MAX_BPF_STACK) {
1155 			verbose("invalid stack off=%d size=%d\n", off, size);
1156 			return -EACCES;
1157 		}
1158 
1159 		if (env->prog->aux->stack_depth < -off)
1160 			env->prog->aux->stack_depth = -off;
1161 
1162 		if (t == BPF_WRITE) {
1163 			if (!env->allow_ptr_leaks &&
1164 			    state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
1165 			    size != BPF_REG_SIZE) {
1166 				verbose("attempt to corrupt spilled pointer on stack\n");
1167 				return -EACCES;
1168 			}
1169 			err = check_stack_write(state, off, size, value_regno);
1170 		} else {
1171 			err = check_stack_read(state, off, size, value_regno);
1172 		}
1173 	} else if (reg->type == PTR_TO_PACKET) {
1174 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1175 			verbose("cannot write into packet\n");
1176 			return -EACCES;
1177 		}
1178 		if (t == BPF_WRITE && value_regno >= 0 &&
1179 		    is_pointer_value(env, value_regno)) {
1180 			verbose("R%d leaks addr into packet\n", value_regno);
1181 			return -EACCES;
1182 		}
1183 		err = check_packet_access(env, regno, off, size);
1184 		if (!err && t == BPF_READ && value_regno >= 0)
1185 			mark_reg_unknown(state->regs, value_regno);
1186 	} else {
1187 		verbose("R%d invalid mem access '%s'\n",
1188 			regno, reg_type_str[reg->type]);
1189 		return -EACCES;
1190 	}
1191 
1192 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1193 	    state->regs[value_regno].type == SCALAR_VALUE) {
1194 		/* b/h/w load zero-extends, mark upper bits as known 0 */
1195 		state->regs[value_regno].var_off = tnum_cast(
1196 					state->regs[value_regno].var_off, size);
1197 		__update_reg_bounds(&state->regs[value_regno]);
1198 	}
1199 	return err;
1200 }
1201 
1202 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1203 {
1204 	int err;
1205 
1206 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1207 	    insn->imm != 0) {
1208 		verbose("BPF_XADD uses reserved fields\n");
1209 		return -EINVAL;
1210 	}
1211 
1212 	/* check src1 operand */
1213 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1214 	if (err)
1215 		return err;
1216 
1217 	/* check src2 operand */
1218 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1219 	if (err)
1220 		return err;
1221 
1222 	if (is_pointer_value(env, insn->src_reg)) {
1223 		verbose("R%d leaks addr into mem\n", insn->src_reg);
1224 		return -EACCES;
1225 	}
1226 
1227 	/* check whether atomic_add can read the memory */
1228 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1229 			       BPF_SIZE(insn->code), BPF_READ, -1);
1230 	if (err)
1231 		return err;
1232 
1233 	/* check whether atomic_add can write into the same memory */
1234 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1235 				BPF_SIZE(insn->code), BPF_WRITE, -1);
1236 }
1237 
1238 /* Does this register contain a constant zero? */
1239 static bool register_is_null(struct bpf_reg_state reg)
1240 {
1241 	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
1242 }
1243 
1244 /* when register 'regno' is passed into function that will read 'access_size'
1245  * bytes from that pointer, make sure that it's within stack boundary
1246  * and all elements of stack are initialized.
1247  * Unlike most pointer bounds-checking functions, this one doesn't take an
1248  * 'off' argument, so it has to add in reg->off itself.
1249  */
1250 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1251 				int access_size, bool zero_size_allowed,
1252 				struct bpf_call_arg_meta *meta)
1253 {
1254 	struct bpf_verifier_state *state = &env->cur_state;
1255 	struct bpf_reg_state *regs = state->regs;
1256 	int off, i;
1257 
1258 	if (regs[regno].type != PTR_TO_STACK) {
1259 		/* Allow zero-byte read from NULL, regardless of pointer type */
1260 		if (zero_size_allowed && access_size == 0 &&
1261 		    register_is_null(regs[regno]))
1262 			return 0;
1263 
1264 		verbose("R%d type=%s expected=%s\n", regno,
1265 			reg_type_str[regs[regno].type],
1266 			reg_type_str[PTR_TO_STACK]);
1267 		return -EACCES;
1268 	}
1269 
1270 	/* Only allow fixed-offset stack reads */
1271 	if (!tnum_is_const(regs[regno].var_off)) {
1272 		char tn_buf[48];
1273 
1274 		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1275 		verbose("invalid variable stack read R%d var_off=%s\n",
1276 			regno, tn_buf);
1277 	}
1278 	off = regs[regno].off + regs[regno].var_off.value;
1279 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1280 	    access_size <= 0) {
1281 		verbose("invalid stack type R%d off=%d access_size=%d\n",
1282 			regno, off, access_size);
1283 		return -EACCES;
1284 	}
1285 
1286 	if (env->prog->aux->stack_depth < -off)
1287 		env->prog->aux->stack_depth = -off;
1288 
1289 	if (meta && meta->raw_mode) {
1290 		meta->access_size = access_size;
1291 		meta->regno = regno;
1292 		return 0;
1293 	}
1294 
1295 	for (i = 0; i < access_size; i++) {
1296 		if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
1297 			verbose("invalid indirect read from stack off %d+%d size %d\n",
1298 				off, i, access_size);
1299 			return -EACCES;
1300 		}
1301 	}
1302 	return 0;
1303 }
1304 
1305 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1306 				   int access_size, bool zero_size_allowed,
1307 				   struct bpf_call_arg_meta *meta)
1308 {
1309 	struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
1310 
1311 	switch (reg->type) {
1312 	case PTR_TO_PACKET:
1313 		return check_packet_access(env, regno, reg->off, access_size);
1314 	case PTR_TO_MAP_VALUE:
1315 		return check_map_access(env, regno, reg->off, access_size);
1316 	default: /* scalar_value|ptr_to_stack or invalid ptr */
1317 		return check_stack_boundary(env, regno, access_size,
1318 					    zero_size_allowed, meta);
1319 	}
1320 }
1321 
1322 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1323 			  enum bpf_arg_type arg_type,
1324 			  struct bpf_call_arg_meta *meta)
1325 {
1326 	struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
1327 	enum bpf_reg_type expected_type, type = reg->type;
1328 	int err = 0;
1329 
1330 	if (arg_type == ARG_DONTCARE)
1331 		return 0;
1332 
1333 	err = check_reg_arg(env, regno, SRC_OP);
1334 	if (err)
1335 		return err;
1336 
1337 	if (arg_type == ARG_ANYTHING) {
1338 		if (is_pointer_value(env, regno)) {
1339 			verbose("R%d leaks addr into helper function\n", regno);
1340 			return -EACCES;
1341 		}
1342 		return 0;
1343 	}
1344 
1345 	if (type == PTR_TO_PACKET &&
1346 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1347 		verbose("helper access to the packet is not allowed\n");
1348 		return -EACCES;
1349 	}
1350 
1351 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1352 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1353 		expected_type = PTR_TO_STACK;
1354 		if (type != PTR_TO_PACKET && type != expected_type)
1355 			goto err_type;
1356 	} else if (arg_type == ARG_CONST_SIZE ||
1357 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1358 		expected_type = SCALAR_VALUE;
1359 		if (type != expected_type)
1360 			goto err_type;
1361 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1362 		expected_type = CONST_PTR_TO_MAP;
1363 		if (type != expected_type)
1364 			goto err_type;
1365 	} else if (arg_type == ARG_PTR_TO_CTX) {
1366 		expected_type = PTR_TO_CTX;
1367 		if (type != expected_type)
1368 			goto err_type;
1369 	} else if (arg_type == ARG_PTR_TO_MEM ||
1370 		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
1371 		expected_type = PTR_TO_STACK;
1372 		/* One exception here. In case function allows for NULL to be
1373 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
1374 		 * happens during stack boundary checking.
1375 		 */
1376 		if (register_is_null(*reg))
1377 			/* final test in check_stack_boundary() */;
1378 		else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE &&
1379 			 type != expected_type)
1380 			goto err_type;
1381 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1382 	} else {
1383 		verbose("unsupported arg_type %d\n", arg_type);
1384 		return -EFAULT;
1385 	}
1386 
1387 	if (arg_type == ARG_CONST_MAP_PTR) {
1388 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1389 		meta->map_ptr = reg->map_ptr;
1390 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1391 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
1392 		 * check that [key, key + map->key_size) are within
1393 		 * stack limits and initialized
1394 		 */
1395 		if (!meta->map_ptr) {
1396 			/* in function declaration map_ptr must come before
1397 			 * map_key, so that it's verified and known before
1398 			 * we have to check map_key here. Otherwise it means
1399 			 * that kernel subsystem misconfigured verifier
1400 			 */
1401 			verbose("invalid map_ptr to access map->key\n");
1402 			return -EACCES;
1403 		}
1404 		if (type == PTR_TO_PACKET)
1405 			err = check_packet_access(env, regno, reg->off,
1406 						  meta->map_ptr->key_size);
1407 		else
1408 			err = check_stack_boundary(env, regno,
1409 						   meta->map_ptr->key_size,
1410 						   false, NULL);
1411 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1412 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
1413 		 * check [value, value + map->value_size) validity
1414 		 */
1415 		if (!meta->map_ptr) {
1416 			/* kernel subsystem misconfigured verifier */
1417 			verbose("invalid map_ptr to access map->value\n");
1418 			return -EACCES;
1419 		}
1420 		if (type == PTR_TO_PACKET)
1421 			err = check_packet_access(env, regno, reg->off,
1422 						  meta->map_ptr->value_size);
1423 		else
1424 			err = check_stack_boundary(env, regno,
1425 						   meta->map_ptr->value_size,
1426 						   false, NULL);
1427 	} else if (arg_type == ARG_CONST_SIZE ||
1428 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1429 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1430 
1431 		/* bpf_xxx(..., buf, len) call will access 'len' bytes
1432 		 * from stack pointer 'buf'. Check it
1433 		 * note: regno == len, regno - 1 == buf
1434 		 */
1435 		if (regno == 0) {
1436 			/* kernel subsystem misconfigured verifier */
1437 			verbose("ARG_CONST_SIZE cannot be first argument\n");
1438 			return -EACCES;
1439 		}
1440 
1441 		/* The register is SCALAR_VALUE; the access check
1442 		 * happens using its boundaries.
1443 		 */
1444 
1445 		if (!tnum_is_const(reg->var_off))
1446 			/* For unprivileged variable accesses, disable raw
1447 			 * mode so that the program is required to
1448 			 * initialize all the memory that the helper could
1449 			 * just partially fill up.
1450 			 */
1451 			meta = NULL;
1452 
1453 		if (reg->smin_value < 0) {
1454 			verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
1455 				regno);
1456 			return -EACCES;
1457 		}
1458 
1459 		if (reg->umin_value == 0) {
1460 			err = check_helper_mem_access(env, regno - 1, 0,
1461 						      zero_size_allowed,
1462 						      meta);
1463 			if (err)
1464 				return err;
1465 		}
1466 
1467 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
1468 			verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1469 				regno);
1470 			return -EACCES;
1471 		}
1472 		err = check_helper_mem_access(env, regno - 1,
1473 					      reg->umax_value,
1474 					      zero_size_allowed, meta);
1475 	}
1476 
1477 	return err;
1478 err_type:
1479 	verbose("R%d type=%s expected=%s\n", regno,
1480 		reg_type_str[type], reg_type_str[expected_type]);
1481 	return -EACCES;
1482 }
1483 
1484 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1485 {
1486 	if (!map)
1487 		return 0;
1488 
1489 	/* We need a two way check, first is from map perspective ... */
1490 	switch (map->map_type) {
1491 	case BPF_MAP_TYPE_PROG_ARRAY:
1492 		if (func_id != BPF_FUNC_tail_call)
1493 			goto error;
1494 		break;
1495 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1496 		if (func_id != BPF_FUNC_perf_event_read &&
1497 		    func_id != BPF_FUNC_perf_event_output)
1498 			goto error;
1499 		break;
1500 	case BPF_MAP_TYPE_STACK_TRACE:
1501 		if (func_id != BPF_FUNC_get_stackid)
1502 			goto error;
1503 		break;
1504 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1505 		if (func_id != BPF_FUNC_skb_under_cgroup &&
1506 		    func_id != BPF_FUNC_current_task_under_cgroup)
1507 			goto error;
1508 		break;
1509 	/* devmap returns a pointer to a live net_device ifindex that we cannot
1510 	 * allow to be modified from bpf side. So do not allow lookup elements
1511 	 * for now.
1512 	 */
1513 	case BPF_MAP_TYPE_DEVMAP:
1514 		if (func_id != BPF_FUNC_redirect_map)
1515 			goto error;
1516 		break;
1517 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1518 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1519 		if (func_id != BPF_FUNC_map_lookup_elem)
1520 			goto error;
1521 		break;
1522 	case BPF_MAP_TYPE_SOCKMAP:
1523 		if (func_id != BPF_FUNC_sk_redirect_map &&
1524 		    func_id != BPF_FUNC_sock_map_update &&
1525 		    func_id != BPF_FUNC_map_delete_elem)
1526 			goto error;
1527 		break;
1528 	default:
1529 		break;
1530 	}
1531 
1532 	/* ... and second from the function itself. */
1533 	switch (func_id) {
1534 	case BPF_FUNC_tail_call:
1535 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1536 			goto error;
1537 		break;
1538 	case BPF_FUNC_perf_event_read:
1539 	case BPF_FUNC_perf_event_output:
1540 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1541 			goto error;
1542 		break;
1543 	case BPF_FUNC_get_stackid:
1544 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1545 			goto error;
1546 		break;
1547 	case BPF_FUNC_current_task_under_cgroup:
1548 	case BPF_FUNC_skb_under_cgroup:
1549 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1550 			goto error;
1551 		break;
1552 	case BPF_FUNC_redirect_map:
1553 		if (map->map_type != BPF_MAP_TYPE_DEVMAP)
1554 			goto error;
1555 		break;
1556 	case BPF_FUNC_sk_redirect_map:
1557 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1558 			goto error;
1559 		break;
1560 	case BPF_FUNC_sock_map_update:
1561 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1562 			goto error;
1563 		break;
1564 	default:
1565 		break;
1566 	}
1567 
1568 	return 0;
1569 error:
1570 	verbose("cannot pass map_type %d into func %s#%d\n",
1571 		map->map_type, func_id_name(func_id), func_id);
1572 	return -EINVAL;
1573 }
1574 
1575 static int check_raw_mode(const struct bpf_func_proto *fn)
1576 {
1577 	int count = 0;
1578 
1579 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1580 		count++;
1581 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1582 		count++;
1583 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1584 		count++;
1585 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1586 		count++;
1587 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1588 		count++;
1589 
1590 	return count > 1 ? -EINVAL : 0;
1591 }
1592 
1593 /* Packet data might have moved, any old PTR_TO_PACKET[_END] are now invalid,
1594  * so turn them into unknown SCALAR_VALUE.
1595  */
1596 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1597 {
1598 	struct bpf_verifier_state *state = &env->cur_state;
1599 	struct bpf_reg_state *regs = state->regs, *reg;
1600 	int i;
1601 
1602 	for (i = 0; i < MAX_BPF_REG; i++)
1603 		if (regs[i].type == PTR_TO_PACKET ||
1604 		    regs[i].type == PTR_TO_PACKET_END)
1605 			mark_reg_unknown(regs, i);
1606 
1607 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1608 		if (state->stack_slot_type[i] != STACK_SPILL)
1609 			continue;
1610 		reg = &state->spilled_regs[i / BPF_REG_SIZE];
1611 		if (reg->type != PTR_TO_PACKET &&
1612 		    reg->type != PTR_TO_PACKET_END)
1613 			continue;
1614 		__mark_reg_unknown(reg);
1615 	}
1616 }
1617 
1618 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1619 {
1620 	struct bpf_verifier_state *state = &env->cur_state;
1621 	const struct bpf_func_proto *fn = NULL;
1622 	struct bpf_reg_state *regs = state->regs;
1623 	struct bpf_call_arg_meta meta;
1624 	bool changes_data;
1625 	int i, err;
1626 
1627 	/* find function prototype */
1628 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1629 		verbose("invalid func %s#%d\n", func_id_name(func_id), func_id);
1630 		return -EINVAL;
1631 	}
1632 
1633 	if (env->prog->aux->ops->get_func_proto)
1634 		fn = env->prog->aux->ops->get_func_proto(func_id);
1635 
1636 	if (!fn) {
1637 		verbose("unknown func %s#%d\n", func_id_name(func_id), func_id);
1638 		return -EINVAL;
1639 	}
1640 
1641 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1642 	if (!env->prog->gpl_compatible && fn->gpl_only) {
1643 		verbose("cannot call GPL only function from proprietary program\n");
1644 		return -EINVAL;
1645 	}
1646 
1647 	changes_data = bpf_helper_changes_pkt_data(fn->func);
1648 
1649 	memset(&meta, 0, sizeof(meta));
1650 	meta.pkt_access = fn->pkt_access;
1651 
1652 	/* We only support one arg being in raw mode at the moment, which
1653 	 * is sufficient for the helper functions we have right now.
1654 	 */
1655 	err = check_raw_mode(fn);
1656 	if (err) {
1657 		verbose("kernel subsystem misconfigured func %s#%d\n",
1658 			func_id_name(func_id), func_id);
1659 		return err;
1660 	}
1661 
1662 	/* check args */
1663 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1664 	if (err)
1665 		return err;
1666 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1667 	if (err)
1668 		return err;
1669 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1670 	if (err)
1671 		return err;
1672 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1673 	if (err)
1674 		return err;
1675 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1676 	if (err)
1677 		return err;
1678 
1679 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
1680 	 * is inferred from register state.
1681 	 */
1682 	for (i = 0; i < meta.access_size; i++) {
1683 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1684 		if (err)
1685 			return err;
1686 	}
1687 
1688 	/* reset caller saved regs */
1689 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
1690 		mark_reg_not_init(regs, caller_saved[i]);
1691 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
1692 	}
1693 
1694 	/* update return register (already marked as written above) */
1695 	if (fn->ret_type == RET_INTEGER) {
1696 		/* sets type to SCALAR_VALUE */
1697 		mark_reg_unknown(regs, BPF_REG_0);
1698 	} else if (fn->ret_type == RET_VOID) {
1699 		regs[BPF_REG_0].type = NOT_INIT;
1700 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1701 		struct bpf_insn_aux_data *insn_aux;
1702 
1703 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1704 		/* There is no offset yet applied, variable or fixed */
1705 		mark_reg_known_zero(regs, BPF_REG_0);
1706 		regs[BPF_REG_0].off = 0;
1707 		/* remember map_ptr, so that check_map_access()
1708 		 * can check 'value_size' boundary of memory access
1709 		 * to map element returned from bpf_map_lookup_elem()
1710 		 */
1711 		if (meta.map_ptr == NULL) {
1712 			verbose("kernel subsystem misconfigured verifier\n");
1713 			return -EINVAL;
1714 		}
1715 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
1716 		regs[BPF_REG_0].id = ++env->id_gen;
1717 		insn_aux = &env->insn_aux_data[insn_idx];
1718 		if (!insn_aux->map_ptr)
1719 			insn_aux->map_ptr = meta.map_ptr;
1720 		else if (insn_aux->map_ptr != meta.map_ptr)
1721 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1722 	} else {
1723 		verbose("unknown return type %d of func %s#%d\n",
1724 			fn->ret_type, func_id_name(func_id), func_id);
1725 		return -EINVAL;
1726 	}
1727 
1728 	err = check_map_func_compatibility(meta.map_ptr, func_id);
1729 	if (err)
1730 		return err;
1731 
1732 	if (changes_data)
1733 		clear_all_pkt_pointers(env);
1734 	return 0;
1735 }
1736 
1737 static void coerce_reg_to_32(struct bpf_reg_state *reg)
1738 {
1739 	/* clear high 32 bits */
1740 	reg->var_off = tnum_cast(reg->var_off, 4);
1741 	/* Update bounds */
1742 	__update_reg_bounds(reg);
1743 }
1744 
1745 static bool signed_add_overflows(s64 a, s64 b)
1746 {
1747 	/* Do the add in u64, where overflow is well-defined */
1748 	s64 res = (s64)((u64)a + (u64)b);
1749 
1750 	if (b < 0)
1751 		return res > a;
1752 	return res < a;
1753 }
1754 
1755 static bool signed_sub_overflows(s64 a, s64 b)
1756 {
1757 	/* Do the sub in u64, where overflow is well-defined */
1758 	s64 res = (s64)((u64)a - (u64)b);
1759 
1760 	if (b < 0)
1761 		return res < a;
1762 	return res > a;
1763 }
1764 
1765 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1766  * Caller should also handle BPF_MOV case separately.
1767  * If we return -EACCES, caller may want to try again treating pointer as a
1768  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
1769  */
1770 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1771 				   struct bpf_insn *insn,
1772 				   const struct bpf_reg_state *ptr_reg,
1773 				   const struct bpf_reg_state *off_reg)
1774 {
1775 	struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1776 	bool known = tnum_is_const(off_reg->var_off);
1777 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
1778 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1779 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1780 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1781 	u8 opcode = BPF_OP(insn->code);
1782 	u32 dst = insn->dst_reg;
1783 
1784 	dst_reg = &regs[dst];
1785 
1786 	if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1787 		print_verifier_state(&env->cur_state);
1788 		verbose("verifier internal error: known but bad sbounds\n");
1789 		return -EINVAL;
1790 	}
1791 	if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1792 		print_verifier_state(&env->cur_state);
1793 		verbose("verifier internal error: known but bad ubounds\n");
1794 		return -EINVAL;
1795 	}
1796 
1797 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
1798 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
1799 		if (!env->allow_ptr_leaks)
1800 			verbose("R%d 32-bit pointer arithmetic prohibited\n",
1801 				dst);
1802 		return -EACCES;
1803 	}
1804 
1805 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1806 		if (!env->allow_ptr_leaks)
1807 			verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1808 				dst);
1809 		return -EACCES;
1810 	}
1811 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
1812 		if (!env->allow_ptr_leaks)
1813 			verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1814 				dst);
1815 		return -EACCES;
1816 	}
1817 	if (ptr_reg->type == PTR_TO_PACKET_END) {
1818 		if (!env->allow_ptr_leaks)
1819 			verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1820 				dst);
1821 		return -EACCES;
1822 	}
1823 
1824 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1825 	 * The id may be overwritten later if we create a new variable offset.
1826 	 */
1827 	dst_reg->type = ptr_reg->type;
1828 	dst_reg->id = ptr_reg->id;
1829 
1830 	switch (opcode) {
1831 	case BPF_ADD:
1832 		/* We can take a fixed offset as long as it doesn't overflow
1833 		 * the s32 'off' field
1834 		 */
1835 		if (known && (ptr_reg->off + smin_val ==
1836 			      (s64)(s32)(ptr_reg->off + smin_val))) {
1837 			/* pointer += K.  Accumulate it into fixed offset */
1838 			dst_reg->smin_value = smin_ptr;
1839 			dst_reg->smax_value = smax_ptr;
1840 			dst_reg->umin_value = umin_ptr;
1841 			dst_reg->umax_value = umax_ptr;
1842 			dst_reg->var_off = ptr_reg->var_off;
1843 			dst_reg->off = ptr_reg->off + smin_val;
1844 			dst_reg->range = ptr_reg->range;
1845 			break;
1846 		}
1847 		/* A new variable offset is created.  Note that off_reg->off
1848 		 * == 0, since it's a scalar.
1849 		 * dst_reg gets the pointer type and since some positive
1850 		 * integer value was added to the pointer, give it a new 'id'
1851 		 * if it's a PTR_TO_PACKET.
1852 		 * this creates a new 'base' pointer, off_reg (variable) gets
1853 		 * added into the variable offset, and we copy the fixed offset
1854 		 * from ptr_reg.
1855 		 */
1856 		if (signed_add_overflows(smin_ptr, smin_val) ||
1857 		    signed_add_overflows(smax_ptr, smax_val)) {
1858 			dst_reg->smin_value = S64_MIN;
1859 			dst_reg->smax_value = S64_MAX;
1860 		} else {
1861 			dst_reg->smin_value = smin_ptr + smin_val;
1862 			dst_reg->smax_value = smax_ptr + smax_val;
1863 		}
1864 		if (umin_ptr + umin_val < umin_ptr ||
1865 		    umax_ptr + umax_val < umax_ptr) {
1866 			dst_reg->umin_value = 0;
1867 			dst_reg->umax_value = U64_MAX;
1868 		} else {
1869 			dst_reg->umin_value = umin_ptr + umin_val;
1870 			dst_reg->umax_value = umax_ptr + umax_val;
1871 		}
1872 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1873 		dst_reg->off = ptr_reg->off;
1874 		if (ptr_reg->type == PTR_TO_PACKET) {
1875 			dst_reg->id = ++env->id_gen;
1876 			/* something was added to pkt_ptr, set range to zero */
1877 			dst_reg->range = 0;
1878 		}
1879 		break;
1880 	case BPF_SUB:
1881 		if (dst_reg == off_reg) {
1882 			/* scalar -= pointer.  Creates an unknown scalar */
1883 			if (!env->allow_ptr_leaks)
1884 				verbose("R%d tried to subtract pointer from scalar\n",
1885 					dst);
1886 			return -EACCES;
1887 		}
1888 		/* We don't allow subtraction from FP, because (according to
1889 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
1890 		 * be able to deal with it.
1891 		 */
1892 		if (ptr_reg->type == PTR_TO_STACK) {
1893 			if (!env->allow_ptr_leaks)
1894 				verbose("R%d subtraction from stack pointer prohibited\n",
1895 					dst);
1896 			return -EACCES;
1897 		}
1898 		if (known && (ptr_reg->off - smin_val ==
1899 			      (s64)(s32)(ptr_reg->off - smin_val))) {
1900 			/* pointer -= K.  Subtract it from fixed offset */
1901 			dst_reg->smin_value = smin_ptr;
1902 			dst_reg->smax_value = smax_ptr;
1903 			dst_reg->umin_value = umin_ptr;
1904 			dst_reg->umax_value = umax_ptr;
1905 			dst_reg->var_off = ptr_reg->var_off;
1906 			dst_reg->id = ptr_reg->id;
1907 			dst_reg->off = ptr_reg->off - smin_val;
1908 			dst_reg->range = ptr_reg->range;
1909 			break;
1910 		}
1911 		/* A new variable offset is created.  If the subtrahend is known
1912 		 * nonnegative, then any reg->range we had before is still good.
1913 		 */
1914 		if (signed_sub_overflows(smin_ptr, smax_val) ||
1915 		    signed_sub_overflows(smax_ptr, smin_val)) {
1916 			/* Overflow possible, we know nothing */
1917 			dst_reg->smin_value = S64_MIN;
1918 			dst_reg->smax_value = S64_MAX;
1919 		} else {
1920 			dst_reg->smin_value = smin_ptr - smax_val;
1921 			dst_reg->smax_value = smax_ptr - smin_val;
1922 		}
1923 		if (umin_ptr < umax_val) {
1924 			/* Overflow possible, we know nothing */
1925 			dst_reg->umin_value = 0;
1926 			dst_reg->umax_value = U64_MAX;
1927 		} else {
1928 			/* Cannot overflow (as long as bounds are consistent) */
1929 			dst_reg->umin_value = umin_ptr - umax_val;
1930 			dst_reg->umax_value = umax_ptr - umin_val;
1931 		}
1932 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
1933 		dst_reg->off = ptr_reg->off;
1934 		if (ptr_reg->type == PTR_TO_PACKET) {
1935 			dst_reg->id = ++env->id_gen;
1936 			/* something was added to pkt_ptr, set range to zero */
1937 			if (smin_val < 0)
1938 				dst_reg->range = 0;
1939 		}
1940 		break;
1941 	case BPF_AND:
1942 	case BPF_OR:
1943 	case BPF_XOR:
1944 		/* bitwise ops on pointers are troublesome, prohibit for now.
1945 		 * (However, in principle we could allow some cases, e.g.
1946 		 * ptr &= ~3 which would reduce min_value by 3.)
1947 		 */
1948 		if (!env->allow_ptr_leaks)
1949 			verbose("R%d bitwise operator %s on pointer prohibited\n",
1950 				dst, bpf_alu_string[opcode >> 4]);
1951 		return -EACCES;
1952 	default:
1953 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
1954 		if (!env->allow_ptr_leaks)
1955 			verbose("R%d pointer arithmetic with %s operator prohibited\n",
1956 				dst, bpf_alu_string[opcode >> 4]);
1957 		return -EACCES;
1958 	}
1959 
1960 	__update_reg_bounds(dst_reg);
1961 	__reg_deduce_bounds(dst_reg);
1962 	__reg_bound_offset(dst_reg);
1963 	return 0;
1964 }
1965 
1966 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
1967 				      struct bpf_insn *insn,
1968 				      struct bpf_reg_state *dst_reg,
1969 				      struct bpf_reg_state src_reg)
1970 {
1971 	struct bpf_reg_state *regs = env->cur_state.regs;
1972 	u8 opcode = BPF_OP(insn->code);
1973 	bool src_known, dst_known;
1974 	s64 smin_val, smax_val;
1975 	u64 umin_val, umax_val;
1976 
1977 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
1978 		/* 32-bit ALU ops are (32,32)->64 */
1979 		coerce_reg_to_32(dst_reg);
1980 		coerce_reg_to_32(&src_reg);
1981 	}
1982 	smin_val = src_reg.smin_value;
1983 	smax_val = src_reg.smax_value;
1984 	umin_val = src_reg.umin_value;
1985 	umax_val = src_reg.umax_value;
1986 	src_known = tnum_is_const(src_reg.var_off);
1987 	dst_known = tnum_is_const(dst_reg->var_off);
1988 
1989 	switch (opcode) {
1990 	case BPF_ADD:
1991 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
1992 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
1993 			dst_reg->smin_value = S64_MIN;
1994 			dst_reg->smax_value = S64_MAX;
1995 		} else {
1996 			dst_reg->smin_value += smin_val;
1997 			dst_reg->smax_value += smax_val;
1998 		}
1999 		if (dst_reg->umin_value + umin_val < umin_val ||
2000 		    dst_reg->umax_value + umax_val < umax_val) {
2001 			dst_reg->umin_value = 0;
2002 			dst_reg->umax_value = U64_MAX;
2003 		} else {
2004 			dst_reg->umin_value += umin_val;
2005 			dst_reg->umax_value += umax_val;
2006 		}
2007 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2008 		break;
2009 	case BPF_SUB:
2010 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2011 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2012 			/* Overflow possible, we know nothing */
2013 			dst_reg->smin_value = S64_MIN;
2014 			dst_reg->smax_value = S64_MAX;
2015 		} else {
2016 			dst_reg->smin_value -= smax_val;
2017 			dst_reg->smax_value -= smin_val;
2018 		}
2019 		if (dst_reg->umin_value < umax_val) {
2020 			/* Overflow possible, we know nothing */
2021 			dst_reg->umin_value = 0;
2022 			dst_reg->umax_value = U64_MAX;
2023 		} else {
2024 			/* Cannot overflow (as long as bounds are consistent) */
2025 			dst_reg->umin_value -= umax_val;
2026 			dst_reg->umax_value -= umin_val;
2027 		}
2028 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2029 		break;
2030 	case BPF_MUL:
2031 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2032 		if (smin_val < 0 || dst_reg->smin_value < 0) {
2033 			/* Ain't nobody got time to multiply that sign */
2034 			__mark_reg_unbounded(dst_reg);
2035 			__update_reg_bounds(dst_reg);
2036 			break;
2037 		}
2038 		/* Both values are positive, so we can work with unsigned and
2039 		 * copy the result to signed (unless it exceeds S64_MAX).
2040 		 */
2041 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2042 			/* Potential overflow, we know nothing */
2043 			__mark_reg_unbounded(dst_reg);
2044 			/* (except what we can learn from the var_off) */
2045 			__update_reg_bounds(dst_reg);
2046 			break;
2047 		}
2048 		dst_reg->umin_value *= umin_val;
2049 		dst_reg->umax_value *= umax_val;
2050 		if (dst_reg->umax_value > S64_MAX) {
2051 			/* Overflow possible, we know nothing */
2052 			dst_reg->smin_value = S64_MIN;
2053 			dst_reg->smax_value = S64_MAX;
2054 		} else {
2055 			dst_reg->smin_value = dst_reg->umin_value;
2056 			dst_reg->smax_value = dst_reg->umax_value;
2057 		}
2058 		break;
2059 	case BPF_AND:
2060 		if (src_known && dst_known) {
2061 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
2062 						  src_reg.var_off.value);
2063 			break;
2064 		}
2065 		/* We get our minimum from the var_off, since that's inherently
2066 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
2067 		 */
2068 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2069 		dst_reg->umin_value = dst_reg->var_off.value;
2070 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2071 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2072 			/* Lose signed bounds when ANDing negative numbers,
2073 			 * ain't nobody got time for that.
2074 			 */
2075 			dst_reg->smin_value = S64_MIN;
2076 			dst_reg->smax_value = S64_MAX;
2077 		} else {
2078 			/* ANDing two positives gives a positive, so safe to
2079 			 * cast result into s64.
2080 			 */
2081 			dst_reg->smin_value = dst_reg->umin_value;
2082 			dst_reg->smax_value = dst_reg->umax_value;
2083 		}
2084 		/* We may learn something more from the var_off */
2085 		__update_reg_bounds(dst_reg);
2086 		break;
2087 	case BPF_OR:
2088 		if (src_known && dst_known) {
2089 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
2090 						  src_reg.var_off.value);
2091 			break;
2092 		}
2093 		/* We get our maximum from the var_off, and our minimum is the
2094 		 * maximum of the operands' minima
2095 		 */
2096 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2097 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2098 		dst_reg->umax_value = dst_reg->var_off.value |
2099 				      dst_reg->var_off.mask;
2100 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2101 			/* Lose signed bounds when ORing negative numbers,
2102 			 * ain't nobody got time for that.
2103 			 */
2104 			dst_reg->smin_value = S64_MIN;
2105 			dst_reg->smax_value = S64_MAX;
2106 		} else {
2107 			/* ORing two positives gives a positive, so safe to
2108 			 * cast result into s64.
2109 			 */
2110 			dst_reg->smin_value = dst_reg->umin_value;
2111 			dst_reg->smax_value = dst_reg->umax_value;
2112 		}
2113 		/* We may learn something more from the var_off */
2114 		__update_reg_bounds(dst_reg);
2115 		break;
2116 	case BPF_LSH:
2117 		if (umax_val > 63) {
2118 			/* Shifts greater than 63 are undefined.  This includes
2119 			 * shifts by a negative number.
2120 			 */
2121 			mark_reg_unknown(regs, insn->dst_reg);
2122 			break;
2123 		}
2124 		/* We lose all sign bit information (except what we can pick
2125 		 * up from var_off)
2126 		 */
2127 		dst_reg->smin_value = S64_MIN;
2128 		dst_reg->smax_value = S64_MAX;
2129 		/* If we might shift our top bit out, then we know nothing */
2130 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2131 			dst_reg->umin_value = 0;
2132 			dst_reg->umax_value = U64_MAX;
2133 		} else {
2134 			dst_reg->umin_value <<= umin_val;
2135 			dst_reg->umax_value <<= umax_val;
2136 		}
2137 		if (src_known)
2138 			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2139 		else
2140 			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2141 		/* We may learn something more from the var_off */
2142 		__update_reg_bounds(dst_reg);
2143 		break;
2144 	case BPF_RSH:
2145 		if (umax_val > 63) {
2146 			/* Shifts greater than 63 are undefined.  This includes
2147 			 * shifts by a negative number.
2148 			 */
2149 			mark_reg_unknown(regs, insn->dst_reg);
2150 			break;
2151 		}
2152 		/* BPF_RSH is an unsigned shift, so make the appropriate casts */
2153 		if (dst_reg->smin_value < 0) {
2154 			if (umin_val) {
2155 				/* Sign bit will be cleared */
2156 				dst_reg->smin_value = 0;
2157 			} else {
2158 				/* Lost sign bit information */
2159 				dst_reg->smin_value = S64_MIN;
2160 				dst_reg->smax_value = S64_MAX;
2161 			}
2162 		} else {
2163 			dst_reg->smin_value =
2164 				(u64)(dst_reg->smin_value) >> umax_val;
2165 		}
2166 		if (src_known)
2167 			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2168 						       umin_val);
2169 		else
2170 			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
2171 		dst_reg->umin_value >>= umax_val;
2172 		dst_reg->umax_value >>= umin_val;
2173 		/* We may learn something more from the var_off */
2174 		__update_reg_bounds(dst_reg);
2175 		break;
2176 	default:
2177 		mark_reg_unknown(regs, insn->dst_reg);
2178 		break;
2179 	}
2180 
2181 	__reg_deduce_bounds(dst_reg);
2182 	__reg_bound_offset(dst_reg);
2183 	return 0;
2184 }
2185 
2186 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2187  * and var_off.
2188  */
2189 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2190 				   struct bpf_insn *insn)
2191 {
2192 	struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg;
2193 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2194 	u8 opcode = BPF_OP(insn->code);
2195 	int rc;
2196 
2197 	dst_reg = &regs[insn->dst_reg];
2198 	src_reg = NULL;
2199 	if (dst_reg->type != SCALAR_VALUE)
2200 		ptr_reg = dst_reg;
2201 	if (BPF_SRC(insn->code) == BPF_X) {
2202 		src_reg = &regs[insn->src_reg];
2203 		if (src_reg->type != SCALAR_VALUE) {
2204 			if (dst_reg->type != SCALAR_VALUE) {
2205 				/* Combining two pointers by any ALU op yields
2206 				 * an arbitrary scalar.
2207 				 */
2208 				if (!env->allow_ptr_leaks) {
2209 					verbose("R%d pointer %s pointer prohibited\n",
2210 						insn->dst_reg,
2211 						bpf_alu_string[opcode >> 4]);
2212 					return -EACCES;
2213 				}
2214 				mark_reg_unknown(regs, insn->dst_reg);
2215 				return 0;
2216 			} else {
2217 				/* scalar += pointer
2218 				 * This is legal, but we have to reverse our
2219 				 * src/dest handling in computing the range
2220 				 */
2221 				rc = adjust_ptr_min_max_vals(env, insn,
2222 							     src_reg, dst_reg);
2223 				if (rc == -EACCES && env->allow_ptr_leaks) {
2224 					/* scalar += unknown scalar */
2225 					__mark_reg_unknown(&off_reg);
2226 					return adjust_scalar_min_max_vals(
2227 							env, insn,
2228 							dst_reg, off_reg);
2229 				}
2230 				return rc;
2231 			}
2232 		} else if (ptr_reg) {
2233 			/* pointer += scalar */
2234 			rc = adjust_ptr_min_max_vals(env, insn,
2235 						     dst_reg, src_reg);
2236 			if (rc == -EACCES && env->allow_ptr_leaks) {
2237 				/* unknown scalar += scalar */
2238 				__mark_reg_unknown(dst_reg);
2239 				return adjust_scalar_min_max_vals(
2240 						env, insn, dst_reg, *src_reg);
2241 			}
2242 			return rc;
2243 		}
2244 	} else {
2245 		/* Pretend the src is a reg with a known value, since we only
2246 		 * need to be able to read from this state.
2247 		 */
2248 		off_reg.type = SCALAR_VALUE;
2249 		__mark_reg_known(&off_reg, insn->imm);
2250 		src_reg = &off_reg;
2251 		if (ptr_reg) { /* pointer += K */
2252 			rc = adjust_ptr_min_max_vals(env, insn,
2253 						     ptr_reg, src_reg);
2254 			if (rc == -EACCES && env->allow_ptr_leaks) {
2255 				/* unknown scalar += K */
2256 				__mark_reg_unknown(dst_reg);
2257 				return adjust_scalar_min_max_vals(
2258 						env, insn, dst_reg, off_reg);
2259 			}
2260 			return rc;
2261 		}
2262 	}
2263 
2264 	/* Got here implies adding two SCALAR_VALUEs */
2265 	if (WARN_ON_ONCE(ptr_reg)) {
2266 		print_verifier_state(&env->cur_state);
2267 		verbose("verifier internal error: unexpected ptr_reg\n");
2268 		return -EINVAL;
2269 	}
2270 	if (WARN_ON(!src_reg)) {
2271 		print_verifier_state(&env->cur_state);
2272 		verbose("verifier internal error: no src_reg\n");
2273 		return -EINVAL;
2274 	}
2275 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
2276 }
2277 
2278 /* check validity of 32-bit and 64-bit arithmetic operations */
2279 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2280 {
2281 	struct bpf_reg_state *regs = env->cur_state.regs;
2282 	u8 opcode = BPF_OP(insn->code);
2283 	int err;
2284 
2285 	if (opcode == BPF_END || opcode == BPF_NEG) {
2286 		if (opcode == BPF_NEG) {
2287 			if (BPF_SRC(insn->code) != 0 ||
2288 			    insn->src_reg != BPF_REG_0 ||
2289 			    insn->off != 0 || insn->imm != 0) {
2290 				verbose("BPF_NEG uses reserved fields\n");
2291 				return -EINVAL;
2292 			}
2293 		} else {
2294 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
2295 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
2296 				verbose("BPF_END uses reserved fields\n");
2297 				return -EINVAL;
2298 			}
2299 		}
2300 
2301 		/* check src operand */
2302 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2303 		if (err)
2304 			return err;
2305 
2306 		if (is_pointer_value(env, insn->dst_reg)) {
2307 			verbose("R%d pointer arithmetic prohibited\n",
2308 				insn->dst_reg);
2309 			return -EACCES;
2310 		}
2311 
2312 		/* check dest operand */
2313 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2314 		if (err)
2315 			return err;
2316 
2317 	} else if (opcode == BPF_MOV) {
2318 
2319 		if (BPF_SRC(insn->code) == BPF_X) {
2320 			if (insn->imm != 0 || insn->off != 0) {
2321 				verbose("BPF_MOV uses reserved fields\n");
2322 				return -EINVAL;
2323 			}
2324 
2325 			/* check src operand */
2326 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2327 			if (err)
2328 				return err;
2329 		} else {
2330 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2331 				verbose("BPF_MOV uses reserved fields\n");
2332 				return -EINVAL;
2333 			}
2334 		}
2335 
2336 		/* check dest operand */
2337 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2338 		if (err)
2339 			return err;
2340 
2341 		if (BPF_SRC(insn->code) == BPF_X) {
2342 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2343 				/* case: R1 = R2
2344 				 * copy register state to dest reg
2345 				 */
2346 				regs[insn->dst_reg] = regs[insn->src_reg];
2347 			} else {
2348 				/* R1 = (u32) R2 */
2349 				if (is_pointer_value(env, insn->src_reg)) {
2350 					verbose("R%d partial copy of pointer\n",
2351 						insn->src_reg);
2352 					return -EACCES;
2353 				}
2354 				mark_reg_unknown(regs, insn->dst_reg);
2355 				/* high 32 bits are known zero. */
2356 				regs[insn->dst_reg].var_off = tnum_cast(
2357 						regs[insn->dst_reg].var_off, 4);
2358 				__update_reg_bounds(&regs[insn->dst_reg]);
2359 			}
2360 		} else {
2361 			/* case: R = imm
2362 			 * remember the value we stored into this reg
2363 			 */
2364 			regs[insn->dst_reg].type = SCALAR_VALUE;
2365 			__mark_reg_known(regs + insn->dst_reg, insn->imm);
2366 		}
2367 
2368 	} else if (opcode > BPF_END) {
2369 		verbose("invalid BPF_ALU opcode %x\n", opcode);
2370 		return -EINVAL;
2371 
2372 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
2373 
2374 		if (BPF_SRC(insn->code) == BPF_X) {
2375 			if (insn->imm != 0 || insn->off != 0) {
2376 				verbose("BPF_ALU uses reserved fields\n");
2377 				return -EINVAL;
2378 			}
2379 			/* check src1 operand */
2380 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2381 			if (err)
2382 				return err;
2383 		} else {
2384 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2385 				verbose("BPF_ALU uses reserved fields\n");
2386 				return -EINVAL;
2387 			}
2388 		}
2389 
2390 		/* check src2 operand */
2391 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2392 		if (err)
2393 			return err;
2394 
2395 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2396 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2397 			verbose("div by zero\n");
2398 			return -EINVAL;
2399 		}
2400 
2401 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2402 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2403 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2404 
2405 			if (insn->imm < 0 || insn->imm >= size) {
2406 				verbose("invalid shift %d\n", insn->imm);
2407 				return -EINVAL;
2408 			}
2409 		}
2410 
2411 		/* check dest operand */
2412 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
2413 		if (err)
2414 			return err;
2415 
2416 		return adjust_reg_min_max_vals(env, insn);
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2423 				   struct bpf_reg_state *dst_reg)
2424 {
2425 	struct bpf_reg_state *regs = state->regs, *reg;
2426 	int i;
2427 
2428 	if (dst_reg->off < 0)
2429 		/* This doesn't give us any range */
2430 		return;
2431 
2432 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
2433 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
2434 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
2435 		 * than pkt_end, but that's because it's also less than pkt.
2436 		 */
2437 		return;
2438 
2439 	/* LLVM can generate four kind of checks:
2440 	 *
2441 	 * Type 1/2:
2442 	 *
2443 	 *   r2 = r3;
2444 	 *   r2 += 8;
2445 	 *   if (r2 > pkt_end) goto <handle exception>
2446 	 *   <access okay>
2447 	 *
2448 	 *   r2 = r3;
2449 	 *   r2 += 8;
2450 	 *   if (r2 < pkt_end) goto <access okay>
2451 	 *   <handle exception>
2452 	 *
2453 	 *   Where:
2454 	 *     r2 == dst_reg, pkt_end == src_reg
2455 	 *     r2=pkt(id=n,off=8,r=0)
2456 	 *     r3=pkt(id=n,off=0,r=0)
2457 	 *
2458 	 * Type 3/4:
2459 	 *
2460 	 *   r2 = r3;
2461 	 *   r2 += 8;
2462 	 *   if (pkt_end >= r2) goto <access okay>
2463 	 *   <handle exception>
2464 	 *
2465 	 *   r2 = r3;
2466 	 *   r2 += 8;
2467 	 *   if (pkt_end <= r2) goto <handle exception>
2468 	 *   <access okay>
2469 	 *
2470 	 *   Where:
2471 	 *     pkt_end == dst_reg, r2 == src_reg
2472 	 *     r2=pkt(id=n,off=8,r=0)
2473 	 *     r3=pkt(id=n,off=0,r=0)
2474 	 *
2475 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2476 	 * so that range of bytes [r3, r3 + 8) is safe to access.
2477 	 */
2478 
2479 	/* If our ids match, then we must have the same max_value.  And we
2480 	 * don't care about the other reg's fixed offset, since if it's too big
2481 	 * the range won't allow anything.
2482 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2483 	 */
2484 	for (i = 0; i < MAX_BPF_REG; i++)
2485 		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
2486 			/* keep the maximum range already checked */
2487 			regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
2488 
2489 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2490 		if (state->stack_slot_type[i] != STACK_SPILL)
2491 			continue;
2492 		reg = &state->spilled_regs[i / BPF_REG_SIZE];
2493 		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
2494 			reg->range = max_t(u16, reg->range, dst_reg->off);
2495 	}
2496 }
2497 
2498 /* Adjusts the register min/max values in the case that the dst_reg is the
2499  * variable register that we are working on, and src_reg is a constant or we're
2500  * simply doing a BPF_K check.
2501  * In JEQ/JNE cases we also adjust the var_off values.
2502  */
2503 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2504 			    struct bpf_reg_state *false_reg, u64 val,
2505 			    u8 opcode)
2506 {
2507 	/* If the dst_reg is a pointer, we can't learn anything about its
2508 	 * variable offset from the compare (unless src_reg were a pointer into
2509 	 * the same object, but we don't bother with that.
2510 	 * Since false_reg and true_reg have the same type by construction, we
2511 	 * only need to check one of them for pointerness.
2512 	 */
2513 	if (__is_pointer_value(false, false_reg))
2514 		return;
2515 
2516 	switch (opcode) {
2517 	case BPF_JEQ:
2518 		/* If this is false then we know nothing Jon Snow, but if it is
2519 		 * true then we know for sure.
2520 		 */
2521 		__mark_reg_known(true_reg, val);
2522 		break;
2523 	case BPF_JNE:
2524 		/* If this is true we know nothing Jon Snow, but if it is false
2525 		 * we know the value for sure;
2526 		 */
2527 		__mark_reg_known(false_reg, val);
2528 		break;
2529 	case BPF_JGT:
2530 		false_reg->umax_value = min(false_reg->umax_value, val);
2531 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2532 		break;
2533 	case BPF_JSGT:
2534 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2535 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2536 		break;
2537 	case BPF_JLT:
2538 		false_reg->umin_value = max(false_reg->umin_value, val);
2539 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2540 		break;
2541 	case BPF_JSLT:
2542 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2543 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2544 		break;
2545 	case BPF_JGE:
2546 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2547 		true_reg->umin_value = max(true_reg->umin_value, val);
2548 		break;
2549 	case BPF_JSGE:
2550 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2551 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2552 		break;
2553 	case BPF_JLE:
2554 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2555 		true_reg->umax_value = min(true_reg->umax_value, val);
2556 		break;
2557 	case BPF_JSLE:
2558 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2559 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2560 		break;
2561 	default:
2562 		break;
2563 	}
2564 
2565 	__reg_deduce_bounds(false_reg);
2566 	__reg_deduce_bounds(true_reg);
2567 	/* We might have learned some bits from the bounds. */
2568 	__reg_bound_offset(false_reg);
2569 	__reg_bound_offset(true_reg);
2570 	/* Intersecting with the old var_off might have improved our bounds
2571 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2572 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2573 	 */
2574 	__update_reg_bounds(false_reg);
2575 	__update_reg_bounds(true_reg);
2576 }
2577 
2578 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2579  * the variable reg.
2580  */
2581 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2582 				struct bpf_reg_state *false_reg, u64 val,
2583 				u8 opcode)
2584 {
2585 	if (__is_pointer_value(false, false_reg))
2586 		return;
2587 
2588 	switch (opcode) {
2589 	case BPF_JEQ:
2590 		/* If this is false then we know nothing Jon Snow, but if it is
2591 		 * true then we know for sure.
2592 		 */
2593 		__mark_reg_known(true_reg, val);
2594 		break;
2595 	case BPF_JNE:
2596 		/* If this is true we know nothing Jon Snow, but if it is false
2597 		 * we know the value for sure;
2598 		 */
2599 		__mark_reg_known(false_reg, val);
2600 		break;
2601 	case BPF_JGT:
2602 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2603 		false_reg->umin_value = max(false_reg->umin_value, val);
2604 		break;
2605 	case BPF_JSGT:
2606 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2607 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2608 		break;
2609 	case BPF_JLT:
2610 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2611 		false_reg->umax_value = min(false_reg->umax_value, val);
2612 		break;
2613 	case BPF_JSLT:
2614 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2615 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2616 		break;
2617 	case BPF_JGE:
2618 		true_reg->umax_value = min(true_reg->umax_value, val);
2619 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2620 		break;
2621 	case BPF_JSGE:
2622 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2623 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2624 		break;
2625 	case BPF_JLE:
2626 		true_reg->umin_value = max(true_reg->umin_value, val);
2627 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2628 		break;
2629 	case BPF_JSLE:
2630 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2631 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2632 		break;
2633 	default:
2634 		break;
2635 	}
2636 
2637 	__reg_deduce_bounds(false_reg);
2638 	__reg_deduce_bounds(true_reg);
2639 	/* We might have learned some bits from the bounds. */
2640 	__reg_bound_offset(false_reg);
2641 	__reg_bound_offset(true_reg);
2642 	/* Intersecting with the old var_off might have improved our bounds
2643 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2644 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2645 	 */
2646 	__update_reg_bounds(false_reg);
2647 	__update_reg_bounds(true_reg);
2648 }
2649 
2650 /* Regs are known to be equal, so intersect their min/max/var_off */
2651 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
2652 				  struct bpf_reg_state *dst_reg)
2653 {
2654 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
2655 							dst_reg->umin_value);
2656 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
2657 							dst_reg->umax_value);
2658 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
2659 							dst_reg->smin_value);
2660 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
2661 							dst_reg->smax_value);
2662 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
2663 							     dst_reg->var_off);
2664 	/* We might have learned new bounds from the var_off. */
2665 	__update_reg_bounds(src_reg);
2666 	__update_reg_bounds(dst_reg);
2667 	/* We might have learned something about the sign bit. */
2668 	__reg_deduce_bounds(src_reg);
2669 	__reg_deduce_bounds(dst_reg);
2670 	/* We might have learned some bits from the bounds. */
2671 	__reg_bound_offset(src_reg);
2672 	__reg_bound_offset(dst_reg);
2673 	/* Intersecting with the old var_off might have improved our bounds
2674 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2675 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2676 	 */
2677 	__update_reg_bounds(src_reg);
2678 	__update_reg_bounds(dst_reg);
2679 }
2680 
2681 static void reg_combine_min_max(struct bpf_reg_state *true_src,
2682 				struct bpf_reg_state *true_dst,
2683 				struct bpf_reg_state *false_src,
2684 				struct bpf_reg_state *false_dst,
2685 				u8 opcode)
2686 {
2687 	switch (opcode) {
2688 	case BPF_JEQ:
2689 		__reg_combine_min_max(true_src, true_dst);
2690 		break;
2691 	case BPF_JNE:
2692 		__reg_combine_min_max(false_src, false_dst);
2693 		break;
2694 	}
2695 }
2696 
2697 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2698 			 bool is_null)
2699 {
2700 	struct bpf_reg_state *reg = &regs[regno];
2701 
2702 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2703 		/* Old offset (both fixed and variable parts) should
2704 		 * have been known-zero, because we don't allow pointer
2705 		 * arithmetic on pointers that might be NULL.
2706 		 */
2707 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
2708 				 !tnum_equals_const(reg->var_off, 0) ||
2709 				 reg->off)) {
2710 			__mark_reg_known_zero(reg);
2711 			reg->off = 0;
2712 		}
2713 		if (is_null) {
2714 			reg->type = SCALAR_VALUE;
2715 		} else if (reg->map_ptr->inner_map_meta) {
2716 			reg->type = CONST_PTR_TO_MAP;
2717 			reg->map_ptr = reg->map_ptr->inner_map_meta;
2718 		} else {
2719 			reg->type = PTR_TO_MAP_VALUE;
2720 		}
2721 		/* We don't need id from this point onwards anymore, thus we
2722 		 * should better reset it, so that state pruning has chances
2723 		 * to take effect.
2724 		 */
2725 		reg->id = 0;
2726 	}
2727 }
2728 
2729 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2730  * be folded together at some point.
2731  */
2732 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2733 			  bool is_null)
2734 {
2735 	struct bpf_reg_state *regs = state->regs;
2736 	u32 id = regs[regno].id;
2737 	int i;
2738 
2739 	for (i = 0; i < MAX_BPF_REG; i++)
2740 		mark_map_reg(regs, i, id, is_null);
2741 
2742 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2743 		if (state->stack_slot_type[i] != STACK_SPILL)
2744 			continue;
2745 		mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null);
2746 	}
2747 }
2748 
2749 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2750 			     struct bpf_insn *insn, int *insn_idx)
2751 {
2752 	struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
2753 	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2754 	u8 opcode = BPF_OP(insn->code);
2755 	int err;
2756 
2757 	if (opcode > BPF_JSLE) {
2758 		verbose("invalid BPF_JMP opcode %x\n", opcode);
2759 		return -EINVAL;
2760 	}
2761 
2762 	if (BPF_SRC(insn->code) == BPF_X) {
2763 		if (insn->imm != 0) {
2764 			verbose("BPF_JMP uses reserved fields\n");
2765 			return -EINVAL;
2766 		}
2767 
2768 		/* check src1 operand */
2769 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
2770 		if (err)
2771 			return err;
2772 
2773 		if (is_pointer_value(env, insn->src_reg)) {
2774 			verbose("R%d pointer comparison prohibited\n",
2775 				insn->src_reg);
2776 			return -EACCES;
2777 		}
2778 	} else {
2779 		if (insn->src_reg != BPF_REG_0) {
2780 			verbose("BPF_JMP uses reserved fields\n");
2781 			return -EINVAL;
2782 		}
2783 	}
2784 
2785 	/* check src2 operand */
2786 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2787 	if (err)
2788 		return err;
2789 
2790 	dst_reg = &regs[insn->dst_reg];
2791 
2792 	/* detect if R == 0 where R was initialized to zero earlier */
2793 	if (BPF_SRC(insn->code) == BPF_K &&
2794 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2795 	    dst_reg->type == SCALAR_VALUE &&
2796 	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
2797 		if (opcode == BPF_JEQ) {
2798 			/* if (imm == imm) goto pc+off;
2799 			 * only follow the goto, ignore fall-through
2800 			 */
2801 			*insn_idx += insn->off;
2802 			return 0;
2803 		} else {
2804 			/* if (imm != imm) goto pc+off;
2805 			 * only follow fall-through branch, since
2806 			 * that's where the program will go
2807 			 */
2808 			return 0;
2809 		}
2810 	}
2811 
2812 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
2813 	if (!other_branch)
2814 		return -EFAULT;
2815 
2816 	/* detect if we are comparing against a constant value so we can adjust
2817 	 * our min/max values for our dst register.
2818 	 * this is only legit if both are scalars (or pointers to the same
2819 	 * object, I suppose, but we don't support that right now), because
2820 	 * otherwise the different base pointers mean the offsets aren't
2821 	 * comparable.
2822 	 */
2823 	if (BPF_SRC(insn->code) == BPF_X) {
2824 		if (dst_reg->type == SCALAR_VALUE &&
2825 		    regs[insn->src_reg].type == SCALAR_VALUE) {
2826 			if (tnum_is_const(regs[insn->src_reg].var_off))
2827 				reg_set_min_max(&other_branch->regs[insn->dst_reg],
2828 						dst_reg, regs[insn->src_reg].var_off.value,
2829 						opcode);
2830 			else if (tnum_is_const(dst_reg->var_off))
2831 				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
2832 						    &regs[insn->src_reg],
2833 						    dst_reg->var_off.value, opcode);
2834 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
2835 				/* Comparing for equality, we can combine knowledge */
2836 				reg_combine_min_max(&other_branch->regs[insn->src_reg],
2837 						    &other_branch->regs[insn->dst_reg],
2838 						    &regs[insn->src_reg],
2839 						    &regs[insn->dst_reg], opcode);
2840 		}
2841 	} else if (dst_reg->type == SCALAR_VALUE) {
2842 		reg_set_min_max(&other_branch->regs[insn->dst_reg],
2843 					dst_reg, insn->imm, opcode);
2844 	}
2845 
2846 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2847 	if (BPF_SRC(insn->code) == BPF_K &&
2848 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2849 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2850 		/* Mark all identical map registers in each branch as either
2851 		 * safe or unknown depending R == 0 or R != 0 conditional.
2852 		 */
2853 		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
2854 		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
2855 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2856 		   dst_reg->type == PTR_TO_PACKET &&
2857 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2858 		find_good_pkt_pointers(this_branch, dst_reg);
2859 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2860 		   dst_reg->type == PTR_TO_PACKET &&
2861 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2862 		find_good_pkt_pointers(other_branch, dst_reg);
2863 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2864 		   dst_reg->type == PTR_TO_PACKET_END &&
2865 		   regs[insn->src_reg].type == PTR_TO_PACKET) {
2866 		find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
2867 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2868 		   dst_reg->type == PTR_TO_PACKET_END &&
2869 		   regs[insn->src_reg].type == PTR_TO_PACKET) {
2870 		find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
2871 	} else if (is_pointer_value(env, insn->dst_reg)) {
2872 		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
2873 		return -EACCES;
2874 	}
2875 	if (log_level)
2876 		print_verifier_state(this_branch);
2877 	return 0;
2878 }
2879 
2880 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
2881 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
2882 {
2883 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
2884 
2885 	return (struct bpf_map *) (unsigned long) imm64;
2886 }
2887 
2888 /* verify BPF_LD_IMM64 instruction */
2889 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
2890 {
2891 	struct bpf_reg_state *regs = env->cur_state.regs;
2892 	int err;
2893 
2894 	if (BPF_SIZE(insn->code) != BPF_DW) {
2895 		verbose("invalid BPF_LD_IMM insn\n");
2896 		return -EINVAL;
2897 	}
2898 	if (insn->off != 0) {
2899 		verbose("BPF_LD_IMM64 uses reserved fields\n");
2900 		return -EINVAL;
2901 	}
2902 
2903 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
2904 	if (err)
2905 		return err;
2906 
2907 	if (insn->src_reg == 0) {
2908 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
2909 
2910 		regs[insn->dst_reg].type = SCALAR_VALUE;
2911 		__mark_reg_known(&regs[insn->dst_reg], imm);
2912 		return 0;
2913 	}
2914 
2915 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
2916 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
2917 
2918 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
2919 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
2920 	return 0;
2921 }
2922 
2923 static bool may_access_skb(enum bpf_prog_type type)
2924 {
2925 	switch (type) {
2926 	case BPF_PROG_TYPE_SOCKET_FILTER:
2927 	case BPF_PROG_TYPE_SCHED_CLS:
2928 	case BPF_PROG_TYPE_SCHED_ACT:
2929 		return true;
2930 	default:
2931 		return false;
2932 	}
2933 }
2934 
2935 /* verify safety of LD_ABS|LD_IND instructions:
2936  * - they can only appear in the programs where ctx == skb
2937  * - since they are wrappers of function calls, they scratch R1-R5 registers,
2938  *   preserve R6-R9, and store return value into R0
2939  *
2940  * Implicit input:
2941  *   ctx == skb == R6 == CTX
2942  *
2943  * Explicit input:
2944  *   SRC == any register
2945  *   IMM == 32-bit immediate
2946  *
2947  * Output:
2948  *   R0 - 8/16/32-bit skb data converted to cpu endianness
2949  */
2950 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2951 {
2952 	struct bpf_reg_state *regs = env->cur_state.regs;
2953 	u8 mode = BPF_MODE(insn->code);
2954 	int i, err;
2955 
2956 	if (!may_access_skb(env->prog->type)) {
2957 		verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
2958 		return -EINVAL;
2959 	}
2960 
2961 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
2962 	    BPF_SIZE(insn->code) == BPF_DW ||
2963 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
2964 		verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
2965 		return -EINVAL;
2966 	}
2967 
2968 	/* check whether implicit source operand (register R6) is readable */
2969 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
2970 	if (err)
2971 		return err;
2972 
2973 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
2974 		verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
2975 		return -EINVAL;
2976 	}
2977 
2978 	if (mode == BPF_IND) {
2979 		/* check explicit source operand */
2980 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
2981 		if (err)
2982 			return err;
2983 	}
2984 
2985 	/* reset caller saved regs to unreadable */
2986 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2987 		mark_reg_not_init(regs, caller_saved[i]);
2988 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2989 	}
2990 
2991 	/* mark destination R0 register as readable, since it contains
2992 	 * the value fetched from the packet.
2993 	 * Already marked as written above.
2994 	 */
2995 	mark_reg_unknown(regs, BPF_REG_0);
2996 	return 0;
2997 }
2998 
2999 /* non-recursive DFS pseudo code
3000  * 1  procedure DFS-iterative(G,v):
3001  * 2      label v as discovered
3002  * 3      let S be a stack
3003  * 4      S.push(v)
3004  * 5      while S is not empty
3005  * 6            t <- S.pop()
3006  * 7            if t is what we're looking for:
3007  * 8                return t
3008  * 9            for all edges e in G.adjacentEdges(t) do
3009  * 10               if edge e is already labelled
3010  * 11                   continue with the next edge
3011  * 12               w <- G.adjacentVertex(t,e)
3012  * 13               if vertex w is not discovered and not explored
3013  * 14                   label e as tree-edge
3014  * 15                   label w as discovered
3015  * 16                   S.push(w)
3016  * 17                   continue at 5
3017  * 18               else if vertex w is discovered
3018  * 19                   label e as back-edge
3019  * 20               else
3020  * 21                   // vertex w is explored
3021  * 22                   label e as forward- or cross-edge
3022  * 23           label t as explored
3023  * 24           S.pop()
3024  *
3025  * convention:
3026  * 0x10 - discovered
3027  * 0x11 - discovered and fall-through edge labelled
3028  * 0x12 - discovered and fall-through and branch edges labelled
3029  * 0x20 - explored
3030  */
3031 
3032 enum {
3033 	DISCOVERED = 0x10,
3034 	EXPLORED = 0x20,
3035 	FALLTHROUGH = 1,
3036 	BRANCH = 2,
3037 };
3038 
3039 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3040 
3041 static int *insn_stack;	/* stack of insns to process */
3042 static int cur_stack;	/* current stack index */
3043 static int *insn_state;
3044 
3045 /* t, w, e - match pseudo-code above:
3046  * t - index of current instruction
3047  * w - next instruction
3048  * e - edge
3049  */
3050 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
3051 {
3052 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
3053 		return 0;
3054 
3055 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
3056 		return 0;
3057 
3058 	if (w < 0 || w >= env->prog->len) {
3059 		verbose("jump out of range from insn %d to %d\n", t, w);
3060 		return -EINVAL;
3061 	}
3062 
3063 	if (e == BRANCH)
3064 		/* mark branch target for state pruning */
3065 		env->explored_states[w] = STATE_LIST_MARK;
3066 
3067 	if (insn_state[w] == 0) {
3068 		/* tree-edge */
3069 		insn_state[t] = DISCOVERED | e;
3070 		insn_state[w] = DISCOVERED;
3071 		if (cur_stack >= env->prog->len)
3072 			return -E2BIG;
3073 		insn_stack[cur_stack++] = w;
3074 		return 1;
3075 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
3076 		verbose("back-edge from insn %d to %d\n", t, w);
3077 		return -EINVAL;
3078 	} else if (insn_state[w] == EXPLORED) {
3079 		/* forward- or cross-edge */
3080 		insn_state[t] = DISCOVERED | e;
3081 	} else {
3082 		verbose("insn state internal bug\n");
3083 		return -EFAULT;
3084 	}
3085 	return 0;
3086 }
3087 
3088 /* non-recursive depth-first-search to detect loops in BPF program
3089  * loop == back-edge in directed graph
3090  */
3091 static int check_cfg(struct bpf_verifier_env *env)
3092 {
3093 	struct bpf_insn *insns = env->prog->insnsi;
3094 	int insn_cnt = env->prog->len;
3095 	int ret = 0;
3096 	int i, t;
3097 
3098 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3099 	if (!insn_state)
3100 		return -ENOMEM;
3101 
3102 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3103 	if (!insn_stack) {
3104 		kfree(insn_state);
3105 		return -ENOMEM;
3106 	}
3107 
3108 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
3109 	insn_stack[0] = 0; /* 0 is the first instruction */
3110 	cur_stack = 1;
3111 
3112 peek_stack:
3113 	if (cur_stack == 0)
3114 		goto check_state;
3115 	t = insn_stack[cur_stack - 1];
3116 
3117 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
3118 		u8 opcode = BPF_OP(insns[t].code);
3119 
3120 		if (opcode == BPF_EXIT) {
3121 			goto mark_explored;
3122 		} else if (opcode == BPF_CALL) {
3123 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3124 			if (ret == 1)
3125 				goto peek_stack;
3126 			else if (ret < 0)
3127 				goto err_free;
3128 			if (t + 1 < insn_cnt)
3129 				env->explored_states[t + 1] = STATE_LIST_MARK;
3130 		} else if (opcode == BPF_JA) {
3131 			if (BPF_SRC(insns[t].code) != BPF_K) {
3132 				ret = -EINVAL;
3133 				goto err_free;
3134 			}
3135 			/* unconditional jump with single edge */
3136 			ret = push_insn(t, t + insns[t].off + 1,
3137 					FALLTHROUGH, env);
3138 			if (ret == 1)
3139 				goto peek_stack;
3140 			else if (ret < 0)
3141 				goto err_free;
3142 			/* tell verifier to check for equivalent states
3143 			 * after every call and jump
3144 			 */
3145 			if (t + 1 < insn_cnt)
3146 				env->explored_states[t + 1] = STATE_LIST_MARK;
3147 		} else {
3148 			/* conditional jump with two edges */
3149 			env->explored_states[t] = STATE_LIST_MARK;
3150 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3151 			if (ret == 1)
3152 				goto peek_stack;
3153 			else if (ret < 0)
3154 				goto err_free;
3155 
3156 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
3157 			if (ret == 1)
3158 				goto peek_stack;
3159 			else if (ret < 0)
3160 				goto err_free;
3161 		}
3162 	} else {
3163 		/* all other non-branch instructions with single
3164 		 * fall-through edge
3165 		 */
3166 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
3167 		if (ret == 1)
3168 			goto peek_stack;
3169 		else if (ret < 0)
3170 			goto err_free;
3171 	}
3172 
3173 mark_explored:
3174 	insn_state[t] = EXPLORED;
3175 	if (cur_stack-- <= 0) {
3176 		verbose("pop stack internal bug\n");
3177 		ret = -EFAULT;
3178 		goto err_free;
3179 	}
3180 	goto peek_stack;
3181 
3182 check_state:
3183 	for (i = 0; i < insn_cnt; i++) {
3184 		if (insn_state[i] != EXPLORED) {
3185 			verbose("unreachable insn %d\n", i);
3186 			ret = -EINVAL;
3187 			goto err_free;
3188 		}
3189 	}
3190 	ret = 0; /* cfg looks good */
3191 
3192 err_free:
3193 	kfree(insn_state);
3194 	kfree(insn_stack);
3195 	return ret;
3196 }
3197 
3198 /* check %cur's range satisfies %old's */
3199 static bool range_within(struct bpf_reg_state *old,
3200 			 struct bpf_reg_state *cur)
3201 {
3202 	return old->umin_value <= cur->umin_value &&
3203 	       old->umax_value >= cur->umax_value &&
3204 	       old->smin_value <= cur->smin_value &&
3205 	       old->smax_value >= cur->smax_value;
3206 }
3207 
3208 /* Maximum number of register states that can exist at once */
3209 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3210 struct idpair {
3211 	u32 old;
3212 	u32 cur;
3213 };
3214 
3215 /* If in the old state two registers had the same id, then they need to have
3216  * the same id in the new state as well.  But that id could be different from
3217  * the old state, so we need to track the mapping from old to new ids.
3218  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3219  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
3220  * regs with a different old id could still have new id 9, we don't care about
3221  * that.
3222  * So we look through our idmap to see if this old id has been seen before.  If
3223  * so, we require the new id to match; otherwise, we add the id pair to the map.
3224  */
3225 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3226 {
3227 	unsigned int i;
3228 
3229 	for (i = 0; i < ID_MAP_SIZE; i++) {
3230 		if (!idmap[i].old) {
3231 			/* Reached an empty slot; haven't seen this id before */
3232 			idmap[i].old = old_id;
3233 			idmap[i].cur = cur_id;
3234 			return true;
3235 		}
3236 		if (idmap[i].old == old_id)
3237 			return idmap[i].cur == cur_id;
3238 	}
3239 	/* We ran out of idmap slots, which should be impossible */
3240 	WARN_ON_ONCE(1);
3241 	return false;
3242 }
3243 
3244 /* Returns true if (rold safe implies rcur safe) */
3245 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3246 		    struct idpair *idmap)
3247 {
3248 	if (!(rold->live & REG_LIVE_READ))
3249 		/* explored state didn't use this */
3250 		return true;
3251 
3252 	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
3253 		return true;
3254 
3255 	if (rold->type == NOT_INIT)
3256 		/* explored state can't have used this */
3257 		return true;
3258 	if (rcur->type == NOT_INIT)
3259 		return false;
3260 	switch (rold->type) {
3261 	case SCALAR_VALUE:
3262 		if (rcur->type == SCALAR_VALUE) {
3263 			/* new val must satisfy old val knowledge */
3264 			return range_within(rold, rcur) &&
3265 			       tnum_in(rold->var_off, rcur->var_off);
3266 		} else {
3267 			/* if we knew anything about the old value, we're not
3268 			 * equal, because we can't know anything about the
3269 			 * scalar value of the pointer in the new value.
3270 			 */
3271 			return rold->umin_value == 0 &&
3272 			       rold->umax_value == U64_MAX &&
3273 			       rold->smin_value == S64_MIN &&
3274 			       rold->smax_value == S64_MAX &&
3275 			       tnum_is_unknown(rold->var_off);
3276 		}
3277 	case PTR_TO_MAP_VALUE:
3278 		/* If the new min/max/var_off satisfy the old ones and
3279 		 * everything else matches, we are OK.
3280 		 * We don't care about the 'id' value, because nothing
3281 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3282 		 */
3283 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3284 		       range_within(rold, rcur) &&
3285 		       tnum_in(rold->var_off, rcur->var_off);
3286 	case PTR_TO_MAP_VALUE_OR_NULL:
3287 		/* a PTR_TO_MAP_VALUE could be safe to use as a
3288 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3289 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3290 		 * checked, doing so could have affected others with the same
3291 		 * id, and we can't check for that because we lost the id when
3292 		 * we converted to a PTR_TO_MAP_VALUE.
3293 		 */
3294 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
3295 			return false;
3296 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
3297 			return false;
3298 		/* Check our ids match any regs they're supposed to */
3299 		return check_ids(rold->id, rcur->id, idmap);
3300 	case PTR_TO_PACKET:
3301 		if (rcur->type != PTR_TO_PACKET)
3302 			return false;
3303 		/* We must have at least as much range as the old ptr
3304 		 * did, so that any accesses which were safe before are
3305 		 * still safe.  This is true even if old range < old off,
3306 		 * since someone could have accessed through (ptr - k), or
3307 		 * even done ptr -= k in a register, to get a safe access.
3308 		 */
3309 		if (rold->range > rcur->range)
3310 			return false;
3311 		/* If the offsets don't match, we can't trust our alignment;
3312 		 * nor can we be sure that we won't fall out of range.
3313 		 */
3314 		if (rold->off != rcur->off)
3315 			return false;
3316 		/* id relations must be preserved */
3317 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
3318 			return false;
3319 		/* new val must satisfy old val knowledge */
3320 		return range_within(rold, rcur) &&
3321 		       tnum_in(rold->var_off, rcur->var_off);
3322 	case PTR_TO_CTX:
3323 	case CONST_PTR_TO_MAP:
3324 	case PTR_TO_STACK:
3325 	case PTR_TO_PACKET_END:
3326 		/* Only valid matches are exact, which memcmp() above
3327 		 * would have accepted
3328 		 */
3329 	default:
3330 		/* Don't know what's going on, just say it's not safe */
3331 		return false;
3332 	}
3333 
3334 	/* Shouldn't get here; if we do, say it's not safe */
3335 	WARN_ON_ONCE(1);
3336 	return false;
3337 }
3338 
3339 /* compare two verifier states
3340  *
3341  * all states stored in state_list are known to be valid, since
3342  * verifier reached 'bpf_exit' instruction through them
3343  *
3344  * this function is called when verifier exploring different branches of
3345  * execution popped from the state stack. If it sees an old state that has
3346  * more strict register state and more strict stack state then this execution
3347  * branch doesn't need to be explored further, since verifier already
3348  * concluded that more strict state leads to valid finish.
3349  *
3350  * Therefore two states are equivalent if register state is more conservative
3351  * and explored stack state is more conservative than the current one.
3352  * Example:
3353  *       explored                   current
3354  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3355  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3356  *
3357  * In other words if current stack state (one being explored) has more
3358  * valid slots than old one that already passed validation, it means
3359  * the verifier can stop exploring and conclude that current state is valid too
3360  *
3361  * Similarly with registers. If explored state has register type as invalid
3362  * whereas register type in current state is meaningful, it means that
3363  * the current state will reach 'bpf_exit' instruction safely
3364  */
3365 static bool states_equal(struct bpf_verifier_env *env,
3366 			 struct bpf_verifier_state *old,
3367 			 struct bpf_verifier_state *cur)
3368 {
3369 	struct idpair *idmap;
3370 	bool ret = false;
3371 	int i;
3372 
3373 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3374 	/* If we failed to allocate the idmap, just say it's not safe */
3375 	if (!idmap)
3376 		return false;
3377 
3378 	for (i = 0; i < MAX_BPF_REG; i++) {
3379 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3380 			goto out_free;
3381 	}
3382 
3383 	for (i = 0; i < MAX_BPF_STACK; i++) {
3384 		if (old->stack_slot_type[i] == STACK_INVALID)
3385 			continue;
3386 		if (old->stack_slot_type[i] != cur->stack_slot_type[i])
3387 			/* Ex: old explored (safe) state has STACK_SPILL in
3388 			 * this stack slot, but current has has STACK_MISC ->
3389 			 * this verifier states are not equivalent,
3390 			 * return false to continue verification of this path
3391 			 */
3392 			goto out_free;
3393 		if (i % BPF_REG_SIZE)
3394 			continue;
3395 		if (old->stack_slot_type[i] != STACK_SPILL)
3396 			continue;
3397 		if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE],
3398 			     &cur->spilled_regs[i / BPF_REG_SIZE],
3399 			     idmap))
3400 			/* when explored and current stack slot are both storing
3401 			 * spilled registers, check that stored pointers types
3402 			 * are the same as well.
3403 			 * Ex: explored safe path could have stored
3404 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3405 			 * but current path has stored:
3406 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3407 			 * such verifier states are not equivalent.
3408 			 * return false to continue verification of this path
3409 			 */
3410 			goto out_free;
3411 		else
3412 			continue;
3413 	}
3414 	ret = true;
3415 out_free:
3416 	kfree(idmap);
3417 	return ret;
3418 }
3419 
3420 /* A write screens off any subsequent reads; but write marks come from the
3421  * straight-line code between a state and its parent.  When we arrive at a
3422  * jump target (in the first iteration of the propagate_liveness() loop),
3423  * we didn't arrive by the straight-line code, so read marks in state must
3424  * propagate to parent regardless of state's write marks.
3425  */
3426 static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3427 				  struct bpf_verifier_state *parent)
3428 {
3429 	bool writes = parent == state->parent; /* Observe write marks */
3430 	bool touched = false; /* any changes made? */
3431 	int i;
3432 
3433 	if (!parent)
3434 		return touched;
3435 	/* Propagate read liveness of registers... */
3436 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
3437 	/* We don't need to worry about FP liveness because it's read-only */
3438 	for (i = 0; i < BPF_REG_FP; i++) {
3439 		if (parent->regs[i].live & REG_LIVE_READ)
3440 			continue;
3441 		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3442 			continue;
3443 		if (state->regs[i].live & REG_LIVE_READ) {
3444 			parent->regs[i].live |= REG_LIVE_READ;
3445 			touched = true;
3446 		}
3447 	}
3448 	/* ... and stack slots */
3449 	for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) {
3450 		if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
3451 			continue;
3452 		if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
3453 			continue;
3454 		if (parent->spilled_regs[i].live & REG_LIVE_READ)
3455 			continue;
3456 		if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN))
3457 			continue;
3458 		if (state->spilled_regs[i].live & REG_LIVE_READ) {
3459 			parent->spilled_regs[i].live |= REG_LIVE_READ;
3460 			touched = true;
3461 		}
3462 	}
3463 	return touched;
3464 }
3465 
3466 /* "parent" is "a state from which we reach the current state", but initially
3467  * it is not the state->parent (i.e. "the state whose straight-line code leads
3468  * to the current state"), instead it is the state that happened to arrive at
3469  * a (prunable) equivalent of the current state.  See comment above
3470  * do_propagate_liveness() for consequences of this.
3471  * This function is just a more efficient way of calling mark_reg_read() or
3472  * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3473  * though it requires that parent != state->parent in the call arguments.
3474  */
3475 static void propagate_liveness(const struct bpf_verifier_state *state,
3476 			       struct bpf_verifier_state *parent)
3477 {
3478 	while (do_propagate_liveness(state, parent)) {
3479 		/* Something changed, so we need to feed those changes onward */
3480 		state = parent;
3481 		parent = state->parent;
3482 	}
3483 }
3484 
3485 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3486 {
3487 	struct bpf_verifier_state_list *new_sl;
3488 	struct bpf_verifier_state_list *sl;
3489 	int i;
3490 
3491 	sl = env->explored_states[insn_idx];
3492 	if (!sl)
3493 		/* this 'insn_idx' instruction wasn't marked, so we will not
3494 		 * be doing state search here
3495 		 */
3496 		return 0;
3497 
3498 	while (sl != STATE_LIST_MARK) {
3499 		if (states_equal(env, &sl->state, &env->cur_state)) {
3500 			/* reached equivalent register/stack state,
3501 			 * prune the search.
3502 			 * Registers read by the continuation are read by us.
3503 			 * If we have any write marks in env->cur_state, they
3504 			 * will prevent corresponding reads in the continuation
3505 			 * from reaching our parent (an explored_state).  Our
3506 			 * own state will get the read marks recorded, but
3507 			 * they'll be immediately forgotten as we're pruning
3508 			 * this state and will pop a new one.
3509 			 */
3510 			propagate_liveness(&sl->state, &env->cur_state);
3511 			return 1;
3512 		}
3513 		sl = sl->next;
3514 	}
3515 
3516 	/* there were no equivalent states, remember current one.
3517 	 * technically the current state is not proven to be safe yet,
3518 	 * but it will either reach bpf_exit (which means it's safe) or
3519 	 * it will be rejected. Since there are no loops, we won't be
3520 	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3521 	 */
3522 	new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
3523 	if (!new_sl)
3524 		return -ENOMEM;
3525 
3526 	/* add new state to the head of linked list */
3527 	memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
3528 	new_sl->next = env->explored_states[insn_idx];
3529 	env->explored_states[insn_idx] = new_sl;
3530 	/* connect new state to parentage chain */
3531 	env->cur_state.parent = &new_sl->state;
3532 	/* clear write marks in current state: the writes we did are not writes
3533 	 * our child did, so they don't screen off its reads from us.
3534 	 * (There are no read marks in current state, because reads always mark
3535 	 * their parent and current state never has children yet.  Only
3536 	 * explored_states can get read marks.)
3537 	 */
3538 	for (i = 0; i < BPF_REG_FP; i++)
3539 		env->cur_state.regs[i].live = REG_LIVE_NONE;
3540 	for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++)
3541 		if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL)
3542 			env->cur_state.spilled_regs[i].live = REG_LIVE_NONE;
3543 	return 0;
3544 }
3545 
3546 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3547 				  int insn_idx, int prev_insn_idx)
3548 {
3549 	if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
3550 		return 0;
3551 
3552 	return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
3553 }
3554 
3555 static int do_check(struct bpf_verifier_env *env)
3556 {
3557 	struct bpf_verifier_state *state = &env->cur_state;
3558 	struct bpf_insn *insns = env->prog->insnsi;
3559 	struct bpf_reg_state *regs = state->regs;
3560 	int insn_cnt = env->prog->len;
3561 	int insn_idx, prev_insn_idx = 0;
3562 	int insn_processed = 0;
3563 	bool do_print_state = false;
3564 
3565 	init_reg_state(regs);
3566 	state->parent = NULL;
3567 	insn_idx = 0;
3568 	for (;;) {
3569 		struct bpf_insn *insn;
3570 		u8 class;
3571 		int err;
3572 
3573 		if (insn_idx >= insn_cnt) {
3574 			verbose("invalid insn idx %d insn_cnt %d\n",
3575 				insn_idx, insn_cnt);
3576 			return -EFAULT;
3577 		}
3578 
3579 		insn = &insns[insn_idx];
3580 		class = BPF_CLASS(insn->code);
3581 
3582 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3583 			verbose("BPF program is too large. Processed %d insn\n",
3584 				insn_processed);
3585 			return -E2BIG;
3586 		}
3587 
3588 		err = is_state_visited(env, insn_idx);
3589 		if (err < 0)
3590 			return err;
3591 		if (err == 1) {
3592 			/* found equivalent state, can prune the search */
3593 			if (log_level) {
3594 				if (do_print_state)
3595 					verbose("\nfrom %d to %d: safe\n",
3596 						prev_insn_idx, insn_idx);
3597 				else
3598 					verbose("%d: safe\n", insn_idx);
3599 			}
3600 			goto process_bpf_exit;
3601 		}
3602 
3603 		if (need_resched())
3604 			cond_resched();
3605 
3606 		if (log_level > 1 || (log_level && do_print_state)) {
3607 			if (log_level > 1)
3608 				verbose("%d:", insn_idx);
3609 			else
3610 				verbose("\nfrom %d to %d:",
3611 					prev_insn_idx, insn_idx);
3612 			print_verifier_state(&env->cur_state);
3613 			do_print_state = false;
3614 		}
3615 
3616 		if (log_level) {
3617 			verbose("%d: ", insn_idx);
3618 			print_bpf_insn(env, insn);
3619 		}
3620 
3621 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3622 		if (err)
3623 			return err;
3624 
3625 		if (class == BPF_ALU || class == BPF_ALU64) {
3626 			err = check_alu_op(env, insn);
3627 			if (err)
3628 				return err;
3629 
3630 		} else if (class == BPF_LDX) {
3631 			enum bpf_reg_type *prev_src_type, src_reg_type;
3632 
3633 			/* check for reserved fields is already done */
3634 
3635 			/* check src operand */
3636 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3637 			if (err)
3638 				return err;
3639 
3640 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3641 			if (err)
3642 				return err;
3643 
3644 			src_reg_type = regs[insn->src_reg].type;
3645 
3646 			/* check that memory (src_reg + off) is readable,
3647 			 * the state of dst_reg will be updated by this func
3648 			 */
3649 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3650 					       BPF_SIZE(insn->code), BPF_READ,
3651 					       insn->dst_reg);
3652 			if (err)
3653 				return err;
3654 
3655 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3656 
3657 			if (*prev_src_type == NOT_INIT) {
3658 				/* saw a valid insn
3659 				 * dst_reg = *(u32 *)(src_reg + off)
3660 				 * save type to validate intersecting paths
3661 				 */
3662 				*prev_src_type = src_reg_type;
3663 
3664 			} else if (src_reg_type != *prev_src_type &&
3665 				   (src_reg_type == PTR_TO_CTX ||
3666 				    *prev_src_type == PTR_TO_CTX)) {
3667 				/* ABuser program is trying to use the same insn
3668 				 * dst_reg = *(u32*) (src_reg + off)
3669 				 * with different pointer types:
3670 				 * src_reg == ctx in one branch and
3671 				 * src_reg == stack|map in some other branch.
3672 				 * Reject it.
3673 				 */
3674 				verbose("same insn cannot be used with different pointers\n");
3675 				return -EINVAL;
3676 			}
3677 
3678 		} else if (class == BPF_STX) {
3679 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
3680 
3681 			if (BPF_MODE(insn->code) == BPF_XADD) {
3682 				err = check_xadd(env, insn_idx, insn);
3683 				if (err)
3684 					return err;
3685 				insn_idx++;
3686 				continue;
3687 			}
3688 
3689 			/* check src1 operand */
3690 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3691 			if (err)
3692 				return err;
3693 			/* check src2 operand */
3694 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3695 			if (err)
3696 				return err;
3697 
3698 			dst_reg_type = regs[insn->dst_reg].type;
3699 
3700 			/* check that memory (dst_reg + off) is writeable */
3701 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3702 					       BPF_SIZE(insn->code), BPF_WRITE,
3703 					       insn->src_reg);
3704 			if (err)
3705 				return err;
3706 
3707 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3708 
3709 			if (*prev_dst_type == NOT_INIT) {
3710 				*prev_dst_type = dst_reg_type;
3711 			} else if (dst_reg_type != *prev_dst_type &&
3712 				   (dst_reg_type == PTR_TO_CTX ||
3713 				    *prev_dst_type == PTR_TO_CTX)) {
3714 				verbose("same insn cannot be used with different pointers\n");
3715 				return -EINVAL;
3716 			}
3717 
3718 		} else if (class == BPF_ST) {
3719 			if (BPF_MODE(insn->code) != BPF_MEM ||
3720 			    insn->src_reg != BPF_REG_0) {
3721 				verbose("BPF_ST uses reserved fields\n");
3722 				return -EINVAL;
3723 			}
3724 			/* check src operand */
3725 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3726 			if (err)
3727 				return err;
3728 
3729 			/* check that memory (dst_reg + off) is writeable */
3730 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3731 					       BPF_SIZE(insn->code), BPF_WRITE,
3732 					       -1);
3733 			if (err)
3734 				return err;
3735 
3736 		} else if (class == BPF_JMP) {
3737 			u8 opcode = BPF_OP(insn->code);
3738 
3739 			if (opcode == BPF_CALL) {
3740 				if (BPF_SRC(insn->code) != BPF_K ||
3741 				    insn->off != 0 ||
3742 				    insn->src_reg != BPF_REG_0 ||
3743 				    insn->dst_reg != BPF_REG_0) {
3744 					verbose("BPF_CALL uses reserved fields\n");
3745 					return -EINVAL;
3746 				}
3747 
3748 				err = check_call(env, insn->imm, insn_idx);
3749 				if (err)
3750 					return err;
3751 
3752 			} else if (opcode == BPF_JA) {
3753 				if (BPF_SRC(insn->code) != BPF_K ||
3754 				    insn->imm != 0 ||
3755 				    insn->src_reg != BPF_REG_0 ||
3756 				    insn->dst_reg != BPF_REG_0) {
3757 					verbose("BPF_JA uses reserved fields\n");
3758 					return -EINVAL;
3759 				}
3760 
3761 				insn_idx += insn->off + 1;
3762 				continue;
3763 
3764 			} else if (opcode == BPF_EXIT) {
3765 				if (BPF_SRC(insn->code) != BPF_K ||
3766 				    insn->imm != 0 ||
3767 				    insn->src_reg != BPF_REG_0 ||
3768 				    insn->dst_reg != BPF_REG_0) {
3769 					verbose("BPF_EXIT uses reserved fields\n");
3770 					return -EINVAL;
3771 				}
3772 
3773 				/* eBPF calling convetion is such that R0 is used
3774 				 * to return the value from eBPF program.
3775 				 * Make sure that it's readable at this time
3776 				 * of bpf_exit, which means that program wrote
3777 				 * something into it earlier
3778 				 */
3779 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
3780 				if (err)
3781 					return err;
3782 
3783 				if (is_pointer_value(env, BPF_REG_0)) {
3784 					verbose("R0 leaks addr as return value\n");
3785 					return -EACCES;
3786 				}
3787 
3788 process_bpf_exit:
3789 				insn_idx = pop_stack(env, &prev_insn_idx);
3790 				if (insn_idx < 0) {
3791 					break;
3792 				} else {
3793 					do_print_state = true;
3794 					continue;
3795 				}
3796 			} else {
3797 				err = check_cond_jmp_op(env, insn, &insn_idx);
3798 				if (err)
3799 					return err;
3800 			}
3801 		} else if (class == BPF_LD) {
3802 			u8 mode = BPF_MODE(insn->code);
3803 
3804 			if (mode == BPF_ABS || mode == BPF_IND) {
3805 				err = check_ld_abs(env, insn);
3806 				if (err)
3807 					return err;
3808 
3809 			} else if (mode == BPF_IMM) {
3810 				err = check_ld_imm(env, insn);
3811 				if (err)
3812 					return err;
3813 
3814 				insn_idx++;
3815 			} else {
3816 				verbose("invalid BPF_LD mode\n");
3817 				return -EINVAL;
3818 			}
3819 		} else {
3820 			verbose("unknown insn class %d\n", class);
3821 			return -EINVAL;
3822 		}
3823 
3824 		insn_idx++;
3825 	}
3826 
3827 	verbose("processed %d insns, stack depth %d\n",
3828 		insn_processed, env->prog->aux->stack_depth);
3829 	return 0;
3830 }
3831 
3832 static int check_map_prealloc(struct bpf_map *map)
3833 {
3834 	return (map->map_type != BPF_MAP_TYPE_HASH &&
3835 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
3836 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
3837 		!(map->map_flags & BPF_F_NO_PREALLOC);
3838 }
3839 
3840 static int check_map_prog_compatibility(struct bpf_map *map,
3841 					struct bpf_prog *prog)
3842 
3843 {
3844 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
3845 	 * preallocated hash maps, since doing memory allocation
3846 	 * in overflow_handler can crash depending on where nmi got
3847 	 * triggered.
3848 	 */
3849 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
3850 		if (!check_map_prealloc(map)) {
3851 			verbose("perf_event programs can only use preallocated hash map\n");
3852 			return -EINVAL;
3853 		}
3854 		if (map->inner_map_meta &&
3855 		    !check_map_prealloc(map->inner_map_meta)) {
3856 			verbose("perf_event programs can only use preallocated inner hash map\n");
3857 			return -EINVAL;
3858 		}
3859 	}
3860 	return 0;
3861 }
3862 
3863 /* look for pseudo eBPF instructions that access map FDs and
3864  * replace them with actual map pointers
3865  */
3866 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
3867 {
3868 	struct bpf_insn *insn = env->prog->insnsi;
3869 	int insn_cnt = env->prog->len;
3870 	int i, j, err;
3871 
3872 	err = bpf_prog_calc_tag(env->prog);
3873 	if (err)
3874 		return err;
3875 
3876 	for (i = 0; i < insn_cnt; i++, insn++) {
3877 		if (BPF_CLASS(insn->code) == BPF_LDX &&
3878 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
3879 			verbose("BPF_LDX uses reserved fields\n");
3880 			return -EINVAL;
3881 		}
3882 
3883 		if (BPF_CLASS(insn->code) == BPF_STX &&
3884 		    ((BPF_MODE(insn->code) != BPF_MEM &&
3885 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
3886 			verbose("BPF_STX uses reserved fields\n");
3887 			return -EINVAL;
3888 		}
3889 
3890 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
3891 			struct bpf_map *map;
3892 			struct fd f;
3893 
3894 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
3895 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
3896 			    insn[1].off != 0) {
3897 				verbose("invalid bpf_ld_imm64 insn\n");
3898 				return -EINVAL;
3899 			}
3900 
3901 			if (insn->src_reg == 0)
3902 				/* valid generic load 64-bit imm */
3903 				goto next_insn;
3904 
3905 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
3906 				verbose("unrecognized bpf_ld_imm64 insn\n");
3907 				return -EINVAL;
3908 			}
3909 
3910 			f = fdget(insn->imm);
3911 			map = __bpf_map_get(f);
3912 			if (IS_ERR(map)) {
3913 				verbose("fd %d is not pointing to valid bpf_map\n",
3914 					insn->imm);
3915 				return PTR_ERR(map);
3916 			}
3917 
3918 			err = check_map_prog_compatibility(map, env->prog);
3919 			if (err) {
3920 				fdput(f);
3921 				return err;
3922 			}
3923 
3924 			/* store map pointer inside BPF_LD_IMM64 instruction */
3925 			insn[0].imm = (u32) (unsigned long) map;
3926 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
3927 
3928 			/* check whether we recorded this map already */
3929 			for (j = 0; j < env->used_map_cnt; j++)
3930 				if (env->used_maps[j] == map) {
3931 					fdput(f);
3932 					goto next_insn;
3933 				}
3934 
3935 			if (env->used_map_cnt >= MAX_USED_MAPS) {
3936 				fdput(f);
3937 				return -E2BIG;
3938 			}
3939 
3940 			/* hold the map. If the program is rejected by verifier,
3941 			 * the map will be released by release_maps() or it
3942 			 * will be used by the valid program until it's unloaded
3943 			 * and all maps are released in free_bpf_prog_info()
3944 			 */
3945 			map = bpf_map_inc(map, false);
3946 			if (IS_ERR(map)) {
3947 				fdput(f);
3948 				return PTR_ERR(map);
3949 			}
3950 			env->used_maps[env->used_map_cnt++] = map;
3951 
3952 			fdput(f);
3953 next_insn:
3954 			insn++;
3955 			i++;
3956 		}
3957 	}
3958 
3959 	/* now all pseudo BPF_LD_IMM64 instructions load valid
3960 	 * 'struct bpf_map *' into a register instead of user map_fd.
3961 	 * These pointers will be used later by verifier to validate map access.
3962 	 */
3963 	return 0;
3964 }
3965 
3966 /* drop refcnt of maps used by the rejected program */
3967 static void release_maps(struct bpf_verifier_env *env)
3968 {
3969 	int i;
3970 
3971 	for (i = 0; i < env->used_map_cnt; i++)
3972 		bpf_map_put(env->used_maps[i]);
3973 }
3974 
3975 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
3976 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
3977 {
3978 	struct bpf_insn *insn = env->prog->insnsi;
3979 	int insn_cnt = env->prog->len;
3980 	int i;
3981 
3982 	for (i = 0; i < insn_cnt; i++, insn++)
3983 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
3984 			insn->src_reg = 0;
3985 }
3986 
3987 /* single env->prog->insni[off] instruction was replaced with the range
3988  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
3989  * [0, off) and [off, end) to new locations, so the patched range stays zero
3990  */
3991 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
3992 				u32 off, u32 cnt)
3993 {
3994 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3995 
3996 	if (cnt == 1)
3997 		return 0;
3998 	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
3999 	if (!new_data)
4000 		return -ENOMEM;
4001 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4002 	memcpy(new_data + off + cnt - 1, old_data + off,
4003 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4004 	env->insn_aux_data = new_data;
4005 	vfree(old_data);
4006 	return 0;
4007 }
4008 
4009 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
4010 					    const struct bpf_insn *patch, u32 len)
4011 {
4012 	struct bpf_prog *new_prog;
4013 
4014 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4015 	if (!new_prog)
4016 		return NULL;
4017 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
4018 		return NULL;
4019 	return new_prog;
4020 }
4021 
4022 /* convert load instructions that access fields of 'struct __sk_buff'
4023  * into sequence of instructions that access fields of 'struct sk_buff'
4024  */
4025 static int convert_ctx_accesses(struct bpf_verifier_env *env)
4026 {
4027 	const struct bpf_verifier_ops *ops = env->prog->aux->ops;
4028 	int i, cnt, size, ctx_field_size, delta = 0;
4029 	const int insn_cnt = env->prog->len;
4030 	struct bpf_insn insn_buf[16], *insn;
4031 	struct bpf_prog *new_prog;
4032 	enum bpf_access_type type;
4033 	bool is_narrower_load;
4034 	u32 target_size;
4035 
4036 	if (ops->gen_prologue) {
4037 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
4038 					env->prog);
4039 		if (cnt >= ARRAY_SIZE(insn_buf)) {
4040 			verbose("bpf verifier is misconfigured\n");
4041 			return -EINVAL;
4042 		} else if (cnt) {
4043 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
4044 			if (!new_prog)
4045 				return -ENOMEM;
4046 
4047 			env->prog = new_prog;
4048 			delta += cnt - 1;
4049 		}
4050 	}
4051 
4052 	if (!ops->convert_ctx_access)
4053 		return 0;
4054 
4055 	insn = env->prog->insnsi + delta;
4056 
4057 	for (i = 0; i < insn_cnt; i++, insn++) {
4058 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4059 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4060 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4061 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4062 			type = BPF_READ;
4063 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4064 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4065 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4066 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4067 			type = BPF_WRITE;
4068 		else
4069 			continue;
4070 
4071 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4072 			continue;
4073 
4074 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
4075 		size = BPF_LDST_BYTES(insn);
4076 
4077 		/* If the read access is a narrower load of the field,
4078 		 * convert to a 4/8-byte load, to minimum program type specific
4079 		 * convert_ctx_access changes. If conversion is successful,
4080 		 * we will apply proper mask to the result.
4081 		 */
4082 		is_narrower_load = size < ctx_field_size;
4083 		if (is_narrower_load) {
4084 			u32 off = insn->off;
4085 			u8 size_code;
4086 
4087 			if (type == BPF_WRITE) {
4088 				verbose("bpf verifier narrow ctx access misconfigured\n");
4089 				return -EINVAL;
4090 			}
4091 
4092 			size_code = BPF_H;
4093 			if (ctx_field_size == 4)
4094 				size_code = BPF_W;
4095 			else if (ctx_field_size == 8)
4096 				size_code = BPF_DW;
4097 
4098 			insn->off = off & ~(ctx_field_size - 1);
4099 			insn->code = BPF_LDX | BPF_MEM | size_code;
4100 		}
4101 
4102 		target_size = 0;
4103 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
4104 					      &target_size);
4105 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
4106 		    (ctx_field_size && !target_size)) {
4107 			verbose("bpf verifier is misconfigured\n");
4108 			return -EINVAL;
4109 		}
4110 
4111 		if (is_narrower_load && size < target_size) {
4112 			if (ctx_field_size <= 4)
4113 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
4114 								(1 << size * 8) - 1);
4115 			else
4116 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
4117 								(1 << size * 8) - 1);
4118 		}
4119 
4120 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4121 		if (!new_prog)
4122 			return -ENOMEM;
4123 
4124 		delta += cnt - 1;
4125 
4126 		/* keep walking new program and skip insns we just inserted */
4127 		env->prog = new_prog;
4128 		insn      = new_prog->insnsi + i + delta;
4129 	}
4130 
4131 	return 0;
4132 }
4133 
4134 /* fixup insn->imm field of bpf_call instructions
4135  * and inline eligible helpers as explicit sequence of BPF instructions
4136  *
4137  * this function is called after eBPF program passed verification
4138  */
4139 static int fixup_bpf_calls(struct bpf_verifier_env *env)
4140 {
4141 	struct bpf_prog *prog = env->prog;
4142 	struct bpf_insn *insn = prog->insnsi;
4143 	const struct bpf_func_proto *fn;
4144 	const int insn_cnt = prog->len;
4145 	struct bpf_insn insn_buf[16];
4146 	struct bpf_prog *new_prog;
4147 	struct bpf_map *map_ptr;
4148 	int i, cnt, delta = 0;
4149 
4150 	for (i = 0; i < insn_cnt; i++, insn++) {
4151 		if (insn->code != (BPF_JMP | BPF_CALL))
4152 			continue;
4153 
4154 		if (insn->imm == BPF_FUNC_get_route_realm)
4155 			prog->dst_needed = 1;
4156 		if (insn->imm == BPF_FUNC_get_prandom_u32)
4157 			bpf_user_rnd_init_once();
4158 		if (insn->imm == BPF_FUNC_tail_call) {
4159 			/* If we tail call into other programs, we
4160 			 * cannot make any assumptions since they can
4161 			 * be replaced dynamically during runtime in
4162 			 * the program array.
4163 			 */
4164 			prog->cb_access = 1;
4165 			env->prog->aux->stack_depth = MAX_BPF_STACK;
4166 
4167 			/* mark bpf_tail_call as different opcode to avoid
4168 			 * conditional branch in the interpeter for every normal
4169 			 * call and to prevent accidental JITing by JIT compiler
4170 			 * that doesn't support bpf_tail_call yet
4171 			 */
4172 			insn->imm = 0;
4173 			insn->code = BPF_JMP | BPF_TAIL_CALL;
4174 			continue;
4175 		}
4176 
4177 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4178 		 * handlers are currently limited to 64 bit only.
4179 		 */
4180 		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
4181 		    insn->imm == BPF_FUNC_map_lookup_elem) {
4182 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
4183 			if (map_ptr == BPF_MAP_PTR_POISON ||
4184 			    !map_ptr->ops->map_gen_lookup)
4185 				goto patch_call_imm;
4186 
4187 			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
4188 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
4189 				verbose("bpf verifier is misconfigured\n");
4190 				return -EINVAL;
4191 			}
4192 
4193 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
4194 						       cnt);
4195 			if (!new_prog)
4196 				return -ENOMEM;
4197 
4198 			delta += cnt - 1;
4199 
4200 			/* keep walking new program and skip insns we just inserted */
4201 			env->prog = prog = new_prog;
4202 			insn      = new_prog->insnsi + i + delta;
4203 			continue;
4204 		}
4205 
4206 		if (insn->imm == BPF_FUNC_redirect_map) {
4207 			u64 addr = (unsigned long)prog;
4208 			struct bpf_insn r4_ld[] = {
4209 				BPF_LD_IMM64(BPF_REG_4, addr),
4210 				*insn,
4211 			};
4212 			cnt = ARRAY_SIZE(r4_ld);
4213 
4214 			new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
4215 			if (!new_prog)
4216 				return -ENOMEM;
4217 
4218 			delta    += cnt - 1;
4219 			env->prog = prog = new_prog;
4220 			insn      = new_prog->insnsi + i + delta;
4221 		}
4222 patch_call_imm:
4223 		fn = prog->aux->ops->get_func_proto(insn->imm);
4224 		/* all functions that have prototype and verifier allowed
4225 		 * programs to call them, must be real in-kernel functions
4226 		 */
4227 		if (!fn->func) {
4228 			verbose("kernel subsystem misconfigured func %s#%d\n",
4229 				func_id_name(insn->imm), insn->imm);
4230 			return -EFAULT;
4231 		}
4232 		insn->imm = fn->func - __bpf_call_base;
4233 	}
4234 
4235 	return 0;
4236 }
4237 
4238 static void free_states(struct bpf_verifier_env *env)
4239 {
4240 	struct bpf_verifier_state_list *sl, *sln;
4241 	int i;
4242 
4243 	if (!env->explored_states)
4244 		return;
4245 
4246 	for (i = 0; i < env->prog->len; i++) {
4247 		sl = env->explored_states[i];
4248 
4249 		if (sl)
4250 			while (sl != STATE_LIST_MARK) {
4251 				sln = sl->next;
4252 				kfree(sl);
4253 				sl = sln;
4254 			}
4255 	}
4256 
4257 	kfree(env->explored_states);
4258 }
4259 
4260 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4261 {
4262 	char __user *log_ubuf = NULL;
4263 	struct bpf_verifier_env *env;
4264 	int ret = -EINVAL;
4265 
4266 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
4267 	 * allocate/free it every time bpf_check() is called
4268 	 */
4269 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4270 	if (!env)
4271 		return -ENOMEM;
4272 
4273 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4274 				     (*prog)->len);
4275 	ret = -ENOMEM;
4276 	if (!env->insn_aux_data)
4277 		goto err_free_env;
4278 	env->prog = *prog;
4279 
4280 	/* grab the mutex to protect few globals used by verifier */
4281 	mutex_lock(&bpf_verifier_lock);
4282 
4283 	if (attr->log_level || attr->log_buf || attr->log_size) {
4284 		/* user requested verbose verifier output
4285 		 * and supplied buffer to store the verification trace
4286 		 */
4287 		log_level = attr->log_level;
4288 		log_ubuf = (char __user *) (unsigned long) attr->log_buf;
4289 		log_size = attr->log_size;
4290 		log_len = 0;
4291 
4292 		ret = -EINVAL;
4293 		/* log_* values have to be sane */
4294 		if (log_size < 128 || log_size > UINT_MAX >> 8 ||
4295 		    log_level == 0 || log_ubuf == NULL)
4296 			goto err_unlock;
4297 
4298 		ret = -ENOMEM;
4299 		log_buf = vmalloc(log_size);
4300 		if (!log_buf)
4301 			goto err_unlock;
4302 	} else {
4303 		log_level = 0;
4304 	}
4305 
4306 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
4307 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4308 		env->strict_alignment = true;
4309 
4310 	ret = replace_map_fd_with_map_ptr(env);
4311 	if (ret < 0)
4312 		goto skip_full_check;
4313 
4314 	env->explored_states = kcalloc(env->prog->len,
4315 				       sizeof(struct bpf_verifier_state_list *),
4316 				       GFP_USER);
4317 	ret = -ENOMEM;
4318 	if (!env->explored_states)
4319 		goto skip_full_check;
4320 
4321 	ret = check_cfg(env);
4322 	if (ret < 0)
4323 		goto skip_full_check;
4324 
4325 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4326 
4327 	ret = do_check(env);
4328 
4329 skip_full_check:
4330 	while (pop_stack(env, NULL) >= 0);
4331 	free_states(env);
4332 
4333 	if (ret == 0)
4334 		/* program is valid, convert *(u32*)(ctx + off) accesses */
4335 		ret = convert_ctx_accesses(env);
4336 
4337 	if (ret == 0)
4338 		ret = fixup_bpf_calls(env);
4339 
4340 	if (log_level && log_len >= log_size - 1) {
4341 		BUG_ON(log_len >= log_size);
4342 		/* verifier log exceeded user supplied buffer */
4343 		ret = -ENOSPC;
4344 		/* fall through to return what was recorded */
4345 	}
4346 
4347 	/* copy verifier log back to user space including trailing zero */
4348 	if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
4349 		ret = -EFAULT;
4350 		goto free_log_buf;
4351 	}
4352 
4353 	if (ret == 0 && env->used_map_cnt) {
4354 		/* if program passed verifier, update used_maps in bpf_prog_info */
4355 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
4356 							  sizeof(env->used_maps[0]),
4357 							  GFP_KERNEL);
4358 
4359 		if (!env->prog->aux->used_maps) {
4360 			ret = -ENOMEM;
4361 			goto free_log_buf;
4362 		}
4363 
4364 		memcpy(env->prog->aux->used_maps, env->used_maps,
4365 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
4366 		env->prog->aux->used_map_cnt = env->used_map_cnt;
4367 
4368 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
4369 		 * bpf_ld_imm64 instructions
4370 		 */
4371 		convert_pseudo_ld_imm64(env);
4372 	}
4373 
4374 free_log_buf:
4375 	if (log_level)
4376 		vfree(log_buf);
4377 	if (!env->prog->aux->used_maps)
4378 		/* if we didn't copy map pointers into bpf_prog_info, release
4379 		 * them now. Otherwise free_bpf_prog_info() will release them.
4380 		 */
4381 		release_maps(env);
4382 	*prog = env->prog;
4383 err_unlock:
4384 	mutex_unlock(&bpf_verifier_lock);
4385 	vfree(env->insn_aux_data);
4386 err_free_env:
4387 	kfree(env);
4388 	return ret;
4389 }
4390 
4391 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
4392 		 void *priv)
4393 {
4394 	struct bpf_verifier_env *env;
4395 	int ret;
4396 
4397 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4398 	if (!env)
4399 		return -ENOMEM;
4400 
4401 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4402 				     prog->len);
4403 	ret = -ENOMEM;
4404 	if (!env->insn_aux_data)
4405 		goto err_free_env;
4406 	env->prog = prog;
4407 	env->analyzer_ops = ops;
4408 	env->analyzer_priv = priv;
4409 
4410 	/* grab the mutex to protect few globals used by verifier */
4411 	mutex_lock(&bpf_verifier_lock);
4412 
4413 	log_level = 0;
4414 
4415 	env->strict_alignment = false;
4416 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4417 		env->strict_alignment = true;
4418 
4419 	env->explored_states = kcalloc(env->prog->len,
4420 				       sizeof(struct bpf_verifier_state_list *),
4421 				       GFP_KERNEL);
4422 	ret = -ENOMEM;
4423 	if (!env->explored_states)
4424 		goto skip_full_check;
4425 
4426 	ret = check_cfg(env);
4427 	if (ret < 0)
4428 		goto skip_full_check;
4429 
4430 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4431 
4432 	ret = do_check(env);
4433 
4434 skip_full_check:
4435 	while (pop_stack(env, NULL) >= 0);
4436 	free_states(env);
4437 
4438 	mutex_unlock(&bpf_verifier_lock);
4439 	vfree(env->insn_aux_data);
4440 err_free_env:
4441 	kfree(env);
4442 	return ret;
4443 }
4444 EXPORT_SYMBOL_GPL(bpf_analyzer);
4445