xref: /openbmc/linux/kernel/bpf/verifier.c (revision 4da722ca)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 
24 /* bpf_check() is a static code analyzer that walks eBPF program
25  * instruction by instruction and updates register/stack state.
26  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
27  *
28  * The first pass is depth-first-search to check that the program is a DAG.
29  * It rejects the following programs:
30  * - larger than BPF_MAXINSNS insns
31  * - if loop is present (detected via back-edge)
32  * - unreachable insns exist (shouldn't be a forest. program = one function)
33  * - out of bounds or malformed jumps
34  * The second pass is all possible path descent from the 1st insn.
35  * Since it's analyzing all pathes through the program, the length of the
36  * analysis is limited to 64k insn, which may be hit even if total number of
37  * insn is less then 4K, but there are too many branches that change stack/regs.
38  * Number of 'branches to be analyzed' is limited to 1k
39  *
40  * On entry to each instruction, each register has a type, and the instruction
41  * changes the types of the registers depending on instruction semantics.
42  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
43  * copied to R1.
44  *
45  * All registers are 64-bit.
46  * R0 - return register
47  * R1-R5 argument passing registers
48  * R6-R9 callee saved registers
49  * R10 - frame pointer read-only
50  *
51  * At the start of BPF program the register R1 contains a pointer to bpf_context
52  * and has type PTR_TO_CTX.
53  *
54  * Verifier tracks arithmetic operations on pointers in case:
55  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
56  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
57  * 1st insn copies R10 (which has FRAME_PTR) type into R1
58  * and 2nd arithmetic instruction is pattern matched to recognize
59  * that it wants to construct a pointer to some element within stack.
60  * So after 2nd insn, the register R1 has type PTR_TO_STACK
61  * (and -20 constant is saved for further stack bounds checking).
62  * Meaning that this reg is a pointer to stack plus known immediate constant.
63  *
64  * Most of the time the registers have UNKNOWN_VALUE type, which
65  * means the register has some value, but it's not a valid pointer.
66  * (like pointer plus pointer becomes UNKNOWN_VALUE type)
67  *
68  * When verifier sees load or store instructions the type of base register
69  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
70  * types recognized by check_mem_access() function.
71  *
72  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
73  * and the range of [ptr, ptr + map's value_size) is accessible.
74  *
75  * registers used to pass values to function calls are checked against
76  * function argument constraints.
77  *
78  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
79  * It means that the register type passed to this function must be
80  * PTR_TO_STACK and it will be used inside the function as
81  * 'pointer to map element key'
82  *
83  * For example the argument constraints for bpf_map_lookup_elem():
84  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
85  *   .arg1_type = ARG_CONST_MAP_PTR,
86  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
87  *
88  * ret_type says that this function returns 'pointer to map elem value or null'
89  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
90  * 2nd argument should be a pointer to stack, which will be used inside
91  * the helper function as a pointer to map element key.
92  *
93  * On the kernel side the helper function looks like:
94  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
95  * {
96  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
97  *    void *key = (void *) (unsigned long) r2;
98  *    void *value;
99  *
100  *    here kernel can access 'key' and 'map' pointers safely, knowing that
101  *    [key, key + map->key_size) bytes are valid and were initialized on
102  *    the stack of eBPF program.
103  * }
104  *
105  * Corresponding eBPF program may look like:
106  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
107  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
108  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
109  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
110  * here verifier looks at prototype of map_lookup_elem() and sees:
111  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
112  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
113  *
114  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
115  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
116  * and were initialized prior to this call.
117  * If it's ok, then verifier allows this BPF_CALL insn and looks at
118  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
119  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
120  * returns ether pointer to map value or NULL.
121  *
122  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
123  * insn, the register holding that pointer in the true branch changes state to
124  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
125  * branch. See check_cond_jmp_op().
126  *
127  * After the call R0 is set to return type of the function and registers R1-R5
128  * are set to NOT_INIT to indicate that they are no longer readable.
129  */
130 
131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
132 struct bpf_verifier_stack_elem {
133 	/* verifer state is 'st'
134 	 * before processing instruction 'insn_idx'
135 	 * and after processing instruction 'prev_insn_idx'
136 	 */
137 	struct bpf_verifier_state st;
138 	int insn_idx;
139 	int prev_insn_idx;
140 	struct bpf_verifier_stack_elem *next;
141 };
142 
143 #define BPF_COMPLEXITY_LIMIT_INSNS	98304
144 #define BPF_COMPLEXITY_LIMIT_STACK	1024
145 
146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
147 
148 struct bpf_call_arg_meta {
149 	struct bpf_map *map_ptr;
150 	bool raw_mode;
151 	bool pkt_access;
152 	int regno;
153 	int access_size;
154 };
155 
156 /* verbose verifier prints what it's seeing
157  * bpf_check() is called under lock, so no race to access these global vars
158  */
159 static u32 log_level, log_size, log_len;
160 static char *log_buf;
161 
162 static DEFINE_MUTEX(bpf_verifier_lock);
163 
164 /* log_level controls verbosity level of eBPF verifier.
165  * verbose() is used to dump the verification trace to the log, so the user
166  * can figure out what's wrong with the program
167  */
168 static __printf(1, 2) void verbose(const char *fmt, ...)
169 {
170 	va_list args;
171 
172 	if (log_level == 0 || log_len >= log_size - 1)
173 		return;
174 
175 	va_start(args, fmt);
176 	log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
177 	va_end(args);
178 }
179 
180 /* string representation of 'enum bpf_reg_type' */
181 static const char * const reg_type_str[] = {
182 	[NOT_INIT]		= "?",
183 	[UNKNOWN_VALUE]		= "inv",
184 	[PTR_TO_CTX]		= "ctx",
185 	[CONST_PTR_TO_MAP]	= "map_ptr",
186 	[PTR_TO_MAP_VALUE]	= "map_value",
187 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
188 	[PTR_TO_MAP_VALUE_ADJ]	= "map_value_adj",
189 	[FRAME_PTR]		= "fp",
190 	[PTR_TO_STACK]		= "fp",
191 	[CONST_IMM]		= "imm",
192 	[PTR_TO_PACKET]		= "pkt",
193 	[PTR_TO_PACKET_END]	= "pkt_end",
194 };
195 
196 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
197 static const char * const func_id_str[] = {
198 	__BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
199 };
200 #undef __BPF_FUNC_STR_FN
201 
202 static const char *func_id_name(int id)
203 {
204 	BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
205 
206 	if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
207 		return func_id_str[id];
208 	else
209 		return "unknown";
210 }
211 
212 static void print_verifier_state(struct bpf_verifier_state *state)
213 {
214 	struct bpf_reg_state *reg;
215 	enum bpf_reg_type t;
216 	int i;
217 
218 	for (i = 0; i < MAX_BPF_REG; i++) {
219 		reg = &state->regs[i];
220 		t = reg->type;
221 		if (t == NOT_INIT)
222 			continue;
223 		verbose(" R%d=%s", i, reg_type_str[t]);
224 		if (t == CONST_IMM || t == PTR_TO_STACK)
225 			verbose("%lld", reg->imm);
226 		else if (t == PTR_TO_PACKET)
227 			verbose("(id=%d,off=%d,r=%d)",
228 				reg->id, reg->off, reg->range);
229 		else if (t == UNKNOWN_VALUE && reg->imm)
230 			verbose("%lld", reg->imm);
231 		else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
232 			 t == PTR_TO_MAP_VALUE_OR_NULL ||
233 			 t == PTR_TO_MAP_VALUE_ADJ)
234 			verbose("(ks=%d,vs=%d,id=%u)",
235 				reg->map_ptr->key_size,
236 				reg->map_ptr->value_size,
237 				reg->id);
238 		if (reg->min_value != BPF_REGISTER_MIN_RANGE)
239 			verbose(",min_value=%lld",
240 				(long long)reg->min_value);
241 		if (reg->max_value != BPF_REGISTER_MAX_RANGE)
242 			verbose(",max_value=%llu",
243 				(unsigned long long)reg->max_value);
244 		if (reg->min_align)
245 			verbose(",min_align=%u", reg->min_align);
246 		if (reg->aux_off)
247 			verbose(",aux_off=%u", reg->aux_off);
248 		if (reg->aux_off_align)
249 			verbose(",aux_off_align=%u", reg->aux_off_align);
250 	}
251 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
252 		if (state->stack_slot_type[i] == STACK_SPILL)
253 			verbose(" fp%d=%s", -MAX_BPF_STACK + i,
254 				reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
255 	}
256 	verbose("\n");
257 }
258 
259 static const char *const bpf_class_string[] = {
260 	[BPF_LD]    = "ld",
261 	[BPF_LDX]   = "ldx",
262 	[BPF_ST]    = "st",
263 	[BPF_STX]   = "stx",
264 	[BPF_ALU]   = "alu",
265 	[BPF_JMP]   = "jmp",
266 	[BPF_RET]   = "BUG",
267 	[BPF_ALU64] = "alu64",
268 };
269 
270 static const char *const bpf_alu_string[16] = {
271 	[BPF_ADD >> 4]  = "+=",
272 	[BPF_SUB >> 4]  = "-=",
273 	[BPF_MUL >> 4]  = "*=",
274 	[BPF_DIV >> 4]  = "/=",
275 	[BPF_OR  >> 4]  = "|=",
276 	[BPF_AND >> 4]  = "&=",
277 	[BPF_LSH >> 4]  = "<<=",
278 	[BPF_RSH >> 4]  = ">>=",
279 	[BPF_NEG >> 4]  = "neg",
280 	[BPF_MOD >> 4]  = "%=",
281 	[BPF_XOR >> 4]  = "^=",
282 	[BPF_MOV >> 4]  = "=",
283 	[BPF_ARSH >> 4] = "s>>=",
284 	[BPF_END >> 4]  = "endian",
285 };
286 
287 static const char *const bpf_ldst_string[] = {
288 	[BPF_W >> 3]  = "u32",
289 	[BPF_H >> 3]  = "u16",
290 	[BPF_B >> 3]  = "u8",
291 	[BPF_DW >> 3] = "u64",
292 };
293 
294 static const char *const bpf_jmp_string[16] = {
295 	[BPF_JA >> 4]   = "jmp",
296 	[BPF_JEQ >> 4]  = "==",
297 	[BPF_JGT >> 4]  = ">",
298 	[BPF_JGE >> 4]  = ">=",
299 	[BPF_JSET >> 4] = "&",
300 	[BPF_JNE >> 4]  = "!=",
301 	[BPF_JSGT >> 4] = "s>",
302 	[BPF_JSGE >> 4] = "s>=",
303 	[BPF_CALL >> 4] = "call",
304 	[BPF_EXIT >> 4] = "exit",
305 };
306 
307 static void print_bpf_insn(const struct bpf_verifier_env *env,
308 			   const struct bpf_insn *insn)
309 {
310 	u8 class = BPF_CLASS(insn->code);
311 
312 	if (class == BPF_ALU || class == BPF_ALU64) {
313 		if (BPF_SRC(insn->code) == BPF_X)
314 			verbose("(%02x) %sr%d %s %sr%d\n",
315 				insn->code, class == BPF_ALU ? "(u32) " : "",
316 				insn->dst_reg,
317 				bpf_alu_string[BPF_OP(insn->code) >> 4],
318 				class == BPF_ALU ? "(u32) " : "",
319 				insn->src_reg);
320 		else
321 			verbose("(%02x) %sr%d %s %s%d\n",
322 				insn->code, class == BPF_ALU ? "(u32) " : "",
323 				insn->dst_reg,
324 				bpf_alu_string[BPF_OP(insn->code) >> 4],
325 				class == BPF_ALU ? "(u32) " : "",
326 				insn->imm);
327 	} else if (class == BPF_STX) {
328 		if (BPF_MODE(insn->code) == BPF_MEM)
329 			verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
330 				insn->code,
331 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
332 				insn->dst_reg,
333 				insn->off, insn->src_reg);
334 		else if (BPF_MODE(insn->code) == BPF_XADD)
335 			verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
336 				insn->code,
337 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
338 				insn->dst_reg, insn->off,
339 				insn->src_reg);
340 		else
341 			verbose("BUG_%02x\n", insn->code);
342 	} else if (class == BPF_ST) {
343 		if (BPF_MODE(insn->code) != BPF_MEM) {
344 			verbose("BUG_st_%02x\n", insn->code);
345 			return;
346 		}
347 		verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
348 			insn->code,
349 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
350 			insn->dst_reg,
351 			insn->off, insn->imm);
352 	} else if (class == BPF_LDX) {
353 		if (BPF_MODE(insn->code) != BPF_MEM) {
354 			verbose("BUG_ldx_%02x\n", insn->code);
355 			return;
356 		}
357 		verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
358 			insn->code, insn->dst_reg,
359 			bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
360 			insn->src_reg, insn->off);
361 	} else if (class == BPF_LD) {
362 		if (BPF_MODE(insn->code) == BPF_ABS) {
363 			verbose("(%02x) r0 = *(%s *)skb[%d]\n",
364 				insn->code,
365 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
366 				insn->imm);
367 		} else if (BPF_MODE(insn->code) == BPF_IND) {
368 			verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
369 				insn->code,
370 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
371 				insn->src_reg, insn->imm);
372 		} else if (BPF_MODE(insn->code) == BPF_IMM &&
373 			   BPF_SIZE(insn->code) == BPF_DW) {
374 			/* At this point, we already made sure that the second
375 			 * part of the ldimm64 insn is accessible.
376 			 */
377 			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
378 			bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
379 
380 			if (map_ptr && !env->allow_ptr_leaks)
381 				imm = 0;
382 
383 			verbose("(%02x) r%d = 0x%llx\n", insn->code,
384 				insn->dst_reg, (unsigned long long)imm);
385 		} else {
386 			verbose("BUG_ld_%02x\n", insn->code);
387 			return;
388 		}
389 	} else if (class == BPF_JMP) {
390 		u8 opcode = BPF_OP(insn->code);
391 
392 		if (opcode == BPF_CALL) {
393 			verbose("(%02x) call %s#%d\n", insn->code,
394 				func_id_name(insn->imm), insn->imm);
395 		} else if (insn->code == (BPF_JMP | BPF_JA)) {
396 			verbose("(%02x) goto pc%+d\n",
397 				insn->code, insn->off);
398 		} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
399 			verbose("(%02x) exit\n", insn->code);
400 		} else if (BPF_SRC(insn->code) == BPF_X) {
401 			verbose("(%02x) if r%d %s r%d goto pc%+d\n",
402 				insn->code, insn->dst_reg,
403 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
404 				insn->src_reg, insn->off);
405 		} else {
406 			verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
407 				insn->code, insn->dst_reg,
408 				bpf_jmp_string[BPF_OP(insn->code) >> 4],
409 				insn->imm, insn->off);
410 		}
411 	} else {
412 		verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
413 	}
414 }
415 
416 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
417 {
418 	struct bpf_verifier_stack_elem *elem;
419 	int insn_idx;
420 
421 	if (env->head == NULL)
422 		return -1;
423 
424 	memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
425 	insn_idx = env->head->insn_idx;
426 	if (prev_insn_idx)
427 		*prev_insn_idx = env->head->prev_insn_idx;
428 	elem = env->head->next;
429 	kfree(env->head);
430 	env->head = elem;
431 	env->stack_size--;
432 	return insn_idx;
433 }
434 
435 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
436 					     int insn_idx, int prev_insn_idx)
437 {
438 	struct bpf_verifier_stack_elem *elem;
439 
440 	elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
441 	if (!elem)
442 		goto err;
443 
444 	memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
445 	elem->insn_idx = insn_idx;
446 	elem->prev_insn_idx = prev_insn_idx;
447 	elem->next = env->head;
448 	env->head = elem;
449 	env->stack_size++;
450 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
451 		verbose("BPF program is too complex\n");
452 		goto err;
453 	}
454 	return &elem->st;
455 err:
456 	/* pop all elements and return */
457 	while (pop_stack(env, NULL) >= 0);
458 	return NULL;
459 }
460 
461 #define CALLER_SAVED_REGS 6
462 static const int caller_saved[CALLER_SAVED_REGS] = {
463 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
464 };
465 
466 static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
467 {
468 	BUG_ON(regno >= MAX_BPF_REG);
469 
470 	memset(&regs[regno], 0, sizeof(regs[regno]));
471 	regs[regno].type = NOT_INIT;
472 	regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
473 	regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
474 }
475 
476 static void init_reg_state(struct bpf_reg_state *regs)
477 {
478 	int i;
479 
480 	for (i = 0; i < MAX_BPF_REG; i++)
481 		mark_reg_not_init(regs, i);
482 
483 	/* frame pointer */
484 	regs[BPF_REG_FP].type = FRAME_PTR;
485 
486 	/* 1st arg to a function */
487 	regs[BPF_REG_1].type = PTR_TO_CTX;
488 }
489 
490 static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
491 {
492 	regs[regno].type = UNKNOWN_VALUE;
493 	regs[regno].id = 0;
494 	regs[regno].imm = 0;
495 }
496 
497 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
498 {
499 	BUG_ON(regno >= MAX_BPF_REG);
500 	__mark_reg_unknown_value(regs, regno);
501 }
502 
503 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
504 {
505 	regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
506 	regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
507 	regs[regno].min_align = 0;
508 }
509 
510 static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
511 					     u32 regno)
512 {
513 	mark_reg_unknown_value(regs, regno);
514 	reset_reg_range_values(regs, regno);
515 }
516 
517 enum reg_arg_type {
518 	SRC_OP,		/* register is used as source operand */
519 	DST_OP,		/* register is used as destination operand */
520 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
521 };
522 
523 static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
524 			 enum reg_arg_type t)
525 {
526 	if (regno >= MAX_BPF_REG) {
527 		verbose("R%d is invalid\n", regno);
528 		return -EINVAL;
529 	}
530 
531 	if (t == SRC_OP) {
532 		/* check whether register used as source operand can be read */
533 		if (regs[regno].type == NOT_INIT) {
534 			verbose("R%d !read_ok\n", regno);
535 			return -EACCES;
536 		}
537 	} else {
538 		/* check whether register used as dest operand can be written to */
539 		if (regno == BPF_REG_FP) {
540 			verbose("frame pointer is read only\n");
541 			return -EACCES;
542 		}
543 		if (t == DST_OP)
544 			mark_reg_unknown_value(regs, regno);
545 	}
546 	return 0;
547 }
548 
549 static bool is_spillable_regtype(enum bpf_reg_type type)
550 {
551 	switch (type) {
552 	case PTR_TO_MAP_VALUE:
553 	case PTR_TO_MAP_VALUE_OR_NULL:
554 	case PTR_TO_MAP_VALUE_ADJ:
555 	case PTR_TO_STACK:
556 	case PTR_TO_CTX:
557 	case PTR_TO_PACKET:
558 	case PTR_TO_PACKET_END:
559 	case FRAME_PTR:
560 	case CONST_PTR_TO_MAP:
561 		return true;
562 	default:
563 		return false;
564 	}
565 }
566 
567 /* check_stack_read/write functions track spill/fill of registers,
568  * stack boundary and alignment are checked in check_mem_access()
569  */
570 static int check_stack_write(struct bpf_verifier_state *state, int off,
571 			     int size, int value_regno)
572 {
573 	int i;
574 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
575 	 * so it's aligned access and [off, off + size) are within stack limits
576 	 */
577 
578 	if (value_regno >= 0 &&
579 	    is_spillable_regtype(state->regs[value_regno].type)) {
580 
581 		/* register containing pointer is being spilled into stack */
582 		if (size != BPF_REG_SIZE) {
583 			verbose("invalid size of register spill\n");
584 			return -EACCES;
585 		}
586 
587 		/* save register state */
588 		state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
589 			state->regs[value_regno];
590 
591 		for (i = 0; i < BPF_REG_SIZE; i++)
592 			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
593 	} else {
594 		/* regular write of data into stack */
595 		state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
596 			(struct bpf_reg_state) {};
597 
598 		for (i = 0; i < size; i++)
599 			state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
600 	}
601 	return 0;
602 }
603 
604 static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
605 			    int value_regno)
606 {
607 	u8 *slot_type;
608 	int i;
609 
610 	slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
611 
612 	if (slot_type[0] == STACK_SPILL) {
613 		if (size != BPF_REG_SIZE) {
614 			verbose("invalid size of register spill\n");
615 			return -EACCES;
616 		}
617 		for (i = 1; i < BPF_REG_SIZE; i++) {
618 			if (slot_type[i] != STACK_SPILL) {
619 				verbose("corrupted spill memory\n");
620 				return -EACCES;
621 			}
622 		}
623 
624 		if (value_regno >= 0)
625 			/* restore register state from stack */
626 			state->regs[value_regno] =
627 				state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
628 		return 0;
629 	} else {
630 		for (i = 0; i < size; i++) {
631 			if (slot_type[i] != STACK_MISC) {
632 				verbose("invalid read from stack off %d+%d size %d\n",
633 					off, i, size);
634 				return -EACCES;
635 			}
636 		}
637 		if (value_regno >= 0)
638 			/* have read misc data from the stack */
639 			mark_reg_unknown_value_and_range(state->regs,
640 							 value_regno);
641 		return 0;
642 	}
643 }
644 
645 /* check read/write into map element returned by bpf_map_lookup_elem() */
646 static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
647 			    int size)
648 {
649 	struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
650 
651 	if (off < 0 || size <= 0 || off + size > map->value_size) {
652 		verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
653 			map->value_size, off, size);
654 		return -EACCES;
655 	}
656 	return 0;
657 }
658 
659 /* check read/write into an adjusted map element */
660 static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno,
661 				int off, int size)
662 {
663 	struct bpf_verifier_state *state = &env->cur_state;
664 	struct bpf_reg_state *reg = &state->regs[regno];
665 	int err;
666 
667 	/* We adjusted the register to this map value, so we
668 	 * need to change off and size to min_value and max_value
669 	 * respectively to make sure our theoretical access will be
670 	 * safe.
671 	 */
672 	if (log_level)
673 		print_verifier_state(state);
674 	env->varlen_map_value_access = true;
675 	/* The minimum value is only important with signed
676 	 * comparisons where we can't assume the floor of a
677 	 * value is 0.  If we are using signed variables for our
678 	 * index'es we need to make sure that whatever we use
679 	 * will have a set floor within our range.
680 	 */
681 	if (reg->min_value < 0) {
682 		verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
683 			regno);
684 		return -EACCES;
685 	}
686 	err = check_map_access(env, regno, reg->min_value + off, size);
687 	if (err) {
688 		verbose("R%d min value is outside of the array range\n",
689 			regno);
690 		return err;
691 	}
692 
693 	/* If we haven't set a max value then we need to bail
694 	 * since we can't be sure we won't do bad things.
695 	 */
696 	if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
697 		verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
698 			regno);
699 		return -EACCES;
700 	}
701 	return check_map_access(env, regno, reg->max_value + off, size);
702 }
703 
704 #define MAX_PACKET_OFF 0xffff
705 
706 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
707 				       const struct bpf_call_arg_meta *meta,
708 				       enum bpf_access_type t)
709 {
710 	switch (env->prog->type) {
711 	case BPF_PROG_TYPE_LWT_IN:
712 	case BPF_PROG_TYPE_LWT_OUT:
713 		/* dst_input() and dst_output() can't write for now */
714 		if (t == BPF_WRITE)
715 			return false;
716 		/* fallthrough */
717 	case BPF_PROG_TYPE_SCHED_CLS:
718 	case BPF_PROG_TYPE_SCHED_ACT:
719 	case BPF_PROG_TYPE_XDP:
720 	case BPF_PROG_TYPE_LWT_XMIT:
721 		if (meta)
722 			return meta->pkt_access;
723 
724 		env->seen_direct_write = true;
725 		return true;
726 	default:
727 		return false;
728 	}
729 }
730 
731 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
732 			       int size)
733 {
734 	struct bpf_reg_state *regs = env->cur_state.regs;
735 	struct bpf_reg_state *reg = &regs[regno];
736 
737 	off += reg->off;
738 	if (off < 0 || size <= 0 || off + size > reg->range) {
739 		verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
740 			off, size, regno, reg->id, reg->off, reg->range);
741 		return -EACCES;
742 	}
743 	return 0;
744 }
745 
746 /* check access to 'struct bpf_context' fields */
747 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
748 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
749 {
750 	struct bpf_insn_access_aux info = {
751 		.reg_type = *reg_type,
752 	};
753 
754 	/* for analyzer ctx accesses are already validated and converted */
755 	if (env->analyzer_ops)
756 		return 0;
757 
758 	if (env->prog->aux->ops->is_valid_access &&
759 	    env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
760 		/* A non zero info.ctx_field_size indicates that this field is a
761 		 * candidate for later verifier transformation to load the whole
762 		 * field and then apply a mask when accessed with a narrower
763 		 * access than actual ctx access size. A zero info.ctx_field_size
764 		 * will only allow for whole field access and rejects any other
765 		 * type of narrower access.
766 		 */
767 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
768 		*reg_type = info.reg_type;
769 
770 		/* remember the offset of last byte accessed in ctx */
771 		if (env->prog->aux->max_ctx_offset < off + size)
772 			env->prog->aux->max_ctx_offset = off + size;
773 		return 0;
774 	}
775 
776 	verbose("invalid bpf_context access off=%d size=%d\n", off, size);
777 	return -EACCES;
778 }
779 
780 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
781 {
782 	if (env->allow_ptr_leaks)
783 		return false;
784 
785 	switch (env->cur_state.regs[regno].type) {
786 	case UNKNOWN_VALUE:
787 	case CONST_IMM:
788 		return false;
789 	default:
790 		return true;
791 	}
792 }
793 
794 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
795 				   int off, int size, bool strict)
796 {
797 	int ip_align;
798 	int reg_off;
799 
800 	/* Byte size accesses are always allowed. */
801 	if (!strict || size == 1)
802 		return 0;
803 
804 	reg_off = reg->off;
805 	if (reg->id) {
806 		if (reg->aux_off_align % size) {
807 			verbose("Packet access is only %u byte aligned, %d byte access not allowed\n",
808 				reg->aux_off_align, size);
809 			return -EACCES;
810 		}
811 		reg_off += reg->aux_off;
812 	}
813 
814 	/* For platforms that do not have a Kconfig enabling
815 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
816 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
817 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
818 	 * to this code only in strict mode where we want to emulate
819 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
820 	 * unconditional IP align value of '2'.
821 	 */
822 	ip_align = 2;
823 	if ((ip_align + reg_off + off) % size != 0) {
824 		verbose("misaligned packet access off %d+%d+%d size %d\n",
825 			ip_align, reg_off, off, size);
826 		return -EACCES;
827 	}
828 
829 	return 0;
830 }
831 
832 static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
833 				   int size, bool strict)
834 {
835 	if (strict && size != 1) {
836 		verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
837 		return -EACCES;
838 	}
839 
840 	return 0;
841 }
842 
843 static int check_ptr_alignment(struct bpf_verifier_env *env,
844 			       const struct bpf_reg_state *reg,
845 			       int off, int size)
846 {
847 	bool strict = env->strict_alignment;
848 
849 	switch (reg->type) {
850 	case PTR_TO_PACKET:
851 		return check_pkt_ptr_alignment(reg, off, size, strict);
852 	case PTR_TO_MAP_VALUE_ADJ:
853 		return check_val_ptr_alignment(reg, size, strict);
854 	default:
855 		if (off % size != 0) {
856 			verbose("misaligned access off %d size %d\n",
857 				off, size);
858 			return -EACCES;
859 		}
860 
861 		return 0;
862 	}
863 }
864 
865 /* check whether memory at (regno + off) is accessible for t = (read | write)
866  * if t==write, value_regno is a register which value is stored into memory
867  * if t==read, value_regno is a register which will receive the value from memory
868  * if t==write && value_regno==-1, some unknown value is stored into memory
869  * if t==read && value_regno==-1, don't care what we read from memory
870  */
871 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
872 			    int bpf_size, enum bpf_access_type t,
873 			    int value_regno)
874 {
875 	struct bpf_verifier_state *state = &env->cur_state;
876 	struct bpf_reg_state *reg = &state->regs[regno];
877 	int size, err = 0;
878 
879 	if (reg->type == PTR_TO_STACK)
880 		off += reg->imm;
881 
882 	size = bpf_size_to_bytes(bpf_size);
883 	if (size < 0)
884 		return size;
885 
886 	err = check_ptr_alignment(env, reg, off, size);
887 	if (err)
888 		return err;
889 
890 	if (reg->type == PTR_TO_MAP_VALUE ||
891 	    reg->type == PTR_TO_MAP_VALUE_ADJ) {
892 		if (t == BPF_WRITE && value_regno >= 0 &&
893 		    is_pointer_value(env, value_regno)) {
894 			verbose("R%d leaks addr into map\n", value_regno);
895 			return -EACCES;
896 		}
897 
898 		if (reg->type == PTR_TO_MAP_VALUE_ADJ)
899 			err = check_map_access_adj(env, regno, off, size);
900 		else
901 			err = check_map_access(env, regno, off, size);
902 		if (!err && t == BPF_READ && value_regno >= 0)
903 			mark_reg_unknown_value_and_range(state->regs,
904 							 value_regno);
905 
906 	} else if (reg->type == PTR_TO_CTX) {
907 		enum bpf_reg_type reg_type = UNKNOWN_VALUE;
908 
909 		if (t == BPF_WRITE && value_regno >= 0 &&
910 		    is_pointer_value(env, value_regno)) {
911 			verbose("R%d leaks addr into ctx\n", value_regno);
912 			return -EACCES;
913 		}
914 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
915 		if (!err && t == BPF_READ && value_regno >= 0) {
916 			mark_reg_unknown_value_and_range(state->regs,
917 							 value_regno);
918 			/* note that reg.[id|off|range] == 0 */
919 			state->regs[value_regno].type = reg_type;
920 			state->regs[value_regno].aux_off = 0;
921 			state->regs[value_regno].aux_off_align = 0;
922 		}
923 
924 	} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
925 		if (off >= 0 || off < -MAX_BPF_STACK) {
926 			verbose("invalid stack off=%d size=%d\n", off, size);
927 			return -EACCES;
928 		}
929 
930 		if (env->prog->aux->stack_depth < -off)
931 			env->prog->aux->stack_depth = -off;
932 
933 		if (t == BPF_WRITE) {
934 			if (!env->allow_ptr_leaks &&
935 			    state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
936 			    size != BPF_REG_SIZE) {
937 				verbose("attempt to corrupt spilled pointer on stack\n");
938 				return -EACCES;
939 			}
940 			err = check_stack_write(state, off, size, value_regno);
941 		} else {
942 			err = check_stack_read(state, off, size, value_regno);
943 		}
944 	} else if (state->regs[regno].type == PTR_TO_PACKET) {
945 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
946 			verbose("cannot write into packet\n");
947 			return -EACCES;
948 		}
949 		if (t == BPF_WRITE && value_regno >= 0 &&
950 		    is_pointer_value(env, value_regno)) {
951 			verbose("R%d leaks addr into packet\n", value_regno);
952 			return -EACCES;
953 		}
954 		err = check_packet_access(env, regno, off, size);
955 		if (!err && t == BPF_READ && value_regno >= 0)
956 			mark_reg_unknown_value_and_range(state->regs,
957 							 value_regno);
958 	} else {
959 		verbose("R%d invalid mem access '%s'\n",
960 			regno, reg_type_str[reg->type]);
961 		return -EACCES;
962 	}
963 
964 	if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks &&
965 	    state->regs[value_regno].type == UNKNOWN_VALUE) {
966 		/* 1 or 2 byte load zero-extends, determine the number of
967 		 * zero upper bits. Not doing it fo 4 byte load, since
968 		 * such values cannot be added to ptr_to_packet anyway.
969 		 */
970 		state->regs[value_regno].imm = 64 - size * 8;
971 	}
972 	return err;
973 }
974 
975 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
976 {
977 	struct bpf_reg_state *regs = env->cur_state.regs;
978 	int err;
979 
980 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
981 	    insn->imm != 0) {
982 		verbose("BPF_XADD uses reserved fields\n");
983 		return -EINVAL;
984 	}
985 
986 	/* check src1 operand */
987 	err = check_reg_arg(regs, insn->src_reg, SRC_OP);
988 	if (err)
989 		return err;
990 
991 	/* check src2 operand */
992 	err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
993 	if (err)
994 		return err;
995 
996 	if (is_pointer_value(env, insn->src_reg)) {
997 		verbose("R%d leaks addr into mem\n", insn->src_reg);
998 		return -EACCES;
999 	}
1000 
1001 	/* check whether atomic_add can read the memory */
1002 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1003 			       BPF_SIZE(insn->code), BPF_READ, -1);
1004 	if (err)
1005 		return err;
1006 
1007 	/* check whether atomic_add can write into the same memory */
1008 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1009 				BPF_SIZE(insn->code), BPF_WRITE, -1);
1010 }
1011 
1012 /* when register 'regno' is passed into function that will read 'access_size'
1013  * bytes from that pointer, make sure that it's within stack boundary
1014  * and all elements of stack are initialized
1015  */
1016 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1017 				int access_size, bool zero_size_allowed,
1018 				struct bpf_call_arg_meta *meta)
1019 {
1020 	struct bpf_verifier_state *state = &env->cur_state;
1021 	struct bpf_reg_state *regs = state->regs;
1022 	int off, i;
1023 
1024 	if (regs[regno].type != PTR_TO_STACK) {
1025 		if (zero_size_allowed && access_size == 0 &&
1026 		    regs[regno].type == CONST_IMM &&
1027 		    regs[regno].imm  == 0)
1028 			return 0;
1029 
1030 		verbose("R%d type=%s expected=%s\n", regno,
1031 			reg_type_str[regs[regno].type],
1032 			reg_type_str[PTR_TO_STACK]);
1033 		return -EACCES;
1034 	}
1035 
1036 	off = regs[regno].imm;
1037 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1038 	    access_size <= 0) {
1039 		verbose("invalid stack type R%d off=%d access_size=%d\n",
1040 			regno, off, access_size);
1041 		return -EACCES;
1042 	}
1043 
1044 	if (env->prog->aux->stack_depth < -off)
1045 		env->prog->aux->stack_depth = -off;
1046 
1047 	if (meta && meta->raw_mode) {
1048 		meta->access_size = access_size;
1049 		meta->regno = regno;
1050 		return 0;
1051 	}
1052 
1053 	for (i = 0; i < access_size; i++) {
1054 		if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
1055 			verbose("invalid indirect read from stack off %d+%d size %d\n",
1056 				off, i, access_size);
1057 			return -EACCES;
1058 		}
1059 	}
1060 	return 0;
1061 }
1062 
1063 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1064 				   int access_size, bool zero_size_allowed,
1065 				   struct bpf_call_arg_meta *meta)
1066 {
1067 	struct bpf_reg_state *regs = env->cur_state.regs;
1068 
1069 	switch (regs[regno].type) {
1070 	case PTR_TO_PACKET:
1071 		return check_packet_access(env, regno, 0, access_size);
1072 	case PTR_TO_MAP_VALUE:
1073 		return check_map_access(env, regno, 0, access_size);
1074 	case PTR_TO_MAP_VALUE_ADJ:
1075 		return check_map_access_adj(env, regno, 0, access_size);
1076 	default: /* const_imm|ptr_to_stack or invalid ptr */
1077 		return check_stack_boundary(env, regno, access_size,
1078 					    zero_size_allowed, meta);
1079 	}
1080 }
1081 
1082 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1083 			  enum bpf_arg_type arg_type,
1084 			  struct bpf_call_arg_meta *meta)
1085 {
1086 	struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
1087 	enum bpf_reg_type expected_type, type = reg->type;
1088 	int err = 0;
1089 
1090 	if (arg_type == ARG_DONTCARE)
1091 		return 0;
1092 
1093 	if (type == NOT_INIT) {
1094 		verbose("R%d !read_ok\n", regno);
1095 		return -EACCES;
1096 	}
1097 
1098 	if (arg_type == ARG_ANYTHING) {
1099 		if (is_pointer_value(env, regno)) {
1100 			verbose("R%d leaks addr into helper function\n", regno);
1101 			return -EACCES;
1102 		}
1103 		return 0;
1104 	}
1105 
1106 	if (type == PTR_TO_PACKET &&
1107 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1108 		verbose("helper access to the packet is not allowed\n");
1109 		return -EACCES;
1110 	}
1111 
1112 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1113 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1114 		expected_type = PTR_TO_STACK;
1115 		if (type != PTR_TO_PACKET && type != expected_type)
1116 			goto err_type;
1117 	} else if (arg_type == ARG_CONST_SIZE ||
1118 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1119 		expected_type = CONST_IMM;
1120 		/* One exception. Allow UNKNOWN_VALUE registers when the
1121 		 * boundaries are known and don't cause unsafe memory accesses
1122 		 */
1123 		if (type != UNKNOWN_VALUE && type != expected_type)
1124 			goto err_type;
1125 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1126 		expected_type = CONST_PTR_TO_MAP;
1127 		if (type != expected_type)
1128 			goto err_type;
1129 	} else if (arg_type == ARG_PTR_TO_CTX) {
1130 		expected_type = PTR_TO_CTX;
1131 		if (type != expected_type)
1132 			goto err_type;
1133 	} else if (arg_type == ARG_PTR_TO_MEM ||
1134 		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
1135 		expected_type = PTR_TO_STACK;
1136 		/* One exception here. In case function allows for NULL to be
1137 		 * passed in as argument, it's a CONST_IMM type. Final test
1138 		 * happens during stack boundary checking.
1139 		 */
1140 		if (type == CONST_IMM && reg->imm == 0)
1141 			/* final test in check_stack_boundary() */;
1142 		else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE &&
1143 			 type != PTR_TO_MAP_VALUE_ADJ && type != expected_type)
1144 			goto err_type;
1145 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1146 	} else {
1147 		verbose("unsupported arg_type %d\n", arg_type);
1148 		return -EFAULT;
1149 	}
1150 
1151 	if (arg_type == ARG_CONST_MAP_PTR) {
1152 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1153 		meta->map_ptr = reg->map_ptr;
1154 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1155 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
1156 		 * check that [key, key + map->key_size) are within
1157 		 * stack limits and initialized
1158 		 */
1159 		if (!meta->map_ptr) {
1160 			/* in function declaration map_ptr must come before
1161 			 * map_key, so that it's verified and known before
1162 			 * we have to check map_key here. Otherwise it means
1163 			 * that kernel subsystem misconfigured verifier
1164 			 */
1165 			verbose("invalid map_ptr to access map->key\n");
1166 			return -EACCES;
1167 		}
1168 		if (type == PTR_TO_PACKET)
1169 			err = check_packet_access(env, regno, 0,
1170 						  meta->map_ptr->key_size);
1171 		else
1172 			err = check_stack_boundary(env, regno,
1173 						   meta->map_ptr->key_size,
1174 						   false, NULL);
1175 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1176 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
1177 		 * check [value, value + map->value_size) validity
1178 		 */
1179 		if (!meta->map_ptr) {
1180 			/* kernel subsystem misconfigured verifier */
1181 			verbose("invalid map_ptr to access map->value\n");
1182 			return -EACCES;
1183 		}
1184 		if (type == PTR_TO_PACKET)
1185 			err = check_packet_access(env, regno, 0,
1186 						  meta->map_ptr->value_size);
1187 		else
1188 			err = check_stack_boundary(env, regno,
1189 						   meta->map_ptr->value_size,
1190 						   false, NULL);
1191 	} else if (arg_type == ARG_CONST_SIZE ||
1192 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1193 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1194 
1195 		/* bpf_xxx(..., buf, len) call will access 'len' bytes
1196 		 * from stack pointer 'buf'. Check it
1197 		 * note: regno == len, regno - 1 == buf
1198 		 */
1199 		if (regno == 0) {
1200 			/* kernel subsystem misconfigured verifier */
1201 			verbose("ARG_CONST_SIZE cannot be first argument\n");
1202 			return -EACCES;
1203 		}
1204 
1205 		/* If the register is UNKNOWN_VALUE, the access check happens
1206 		 * using its boundaries. Otherwise, just use its imm
1207 		 */
1208 		if (type == UNKNOWN_VALUE) {
1209 			/* For unprivileged variable accesses, disable raw
1210 			 * mode so that the program is required to
1211 			 * initialize all the memory that the helper could
1212 			 * just partially fill up.
1213 			 */
1214 			meta = NULL;
1215 
1216 			if (reg->min_value < 0) {
1217 				verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
1218 					regno);
1219 				return -EACCES;
1220 			}
1221 
1222 			if (reg->min_value == 0) {
1223 				err = check_helper_mem_access(env, regno - 1, 0,
1224 							      zero_size_allowed,
1225 							      meta);
1226 				if (err)
1227 					return err;
1228 			}
1229 
1230 			if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
1231 				verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1232 					regno);
1233 				return -EACCES;
1234 			}
1235 			err = check_helper_mem_access(env, regno - 1,
1236 						      reg->max_value,
1237 						      zero_size_allowed, meta);
1238 			if (err)
1239 				return err;
1240 		} else {
1241 			/* register is CONST_IMM */
1242 			err = check_helper_mem_access(env, regno - 1, reg->imm,
1243 						      zero_size_allowed, meta);
1244 		}
1245 	}
1246 
1247 	return err;
1248 err_type:
1249 	verbose("R%d type=%s expected=%s\n", regno,
1250 		reg_type_str[type], reg_type_str[expected_type]);
1251 	return -EACCES;
1252 }
1253 
1254 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1255 {
1256 	if (!map)
1257 		return 0;
1258 
1259 	/* We need a two way check, first is from map perspective ... */
1260 	switch (map->map_type) {
1261 	case BPF_MAP_TYPE_PROG_ARRAY:
1262 		if (func_id != BPF_FUNC_tail_call)
1263 			goto error;
1264 		break;
1265 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1266 		if (func_id != BPF_FUNC_perf_event_read &&
1267 		    func_id != BPF_FUNC_perf_event_output)
1268 			goto error;
1269 		break;
1270 	case BPF_MAP_TYPE_STACK_TRACE:
1271 		if (func_id != BPF_FUNC_get_stackid)
1272 			goto error;
1273 		break;
1274 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1275 		if (func_id != BPF_FUNC_skb_under_cgroup &&
1276 		    func_id != BPF_FUNC_current_task_under_cgroup)
1277 			goto error;
1278 		break;
1279 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1280 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1281 		if (func_id != BPF_FUNC_map_lookup_elem)
1282 			goto error;
1283 	default:
1284 		break;
1285 	}
1286 
1287 	/* ... and second from the function itself. */
1288 	switch (func_id) {
1289 	case BPF_FUNC_tail_call:
1290 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1291 			goto error;
1292 		break;
1293 	case BPF_FUNC_perf_event_read:
1294 	case BPF_FUNC_perf_event_output:
1295 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1296 			goto error;
1297 		break;
1298 	case BPF_FUNC_get_stackid:
1299 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1300 			goto error;
1301 		break;
1302 	case BPF_FUNC_current_task_under_cgroup:
1303 	case BPF_FUNC_skb_under_cgroup:
1304 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1305 			goto error;
1306 		break;
1307 	default:
1308 		break;
1309 	}
1310 
1311 	return 0;
1312 error:
1313 	verbose("cannot pass map_type %d into func %s#%d\n",
1314 		map->map_type, func_id_name(func_id), func_id);
1315 	return -EINVAL;
1316 }
1317 
1318 static int check_raw_mode(const struct bpf_func_proto *fn)
1319 {
1320 	int count = 0;
1321 
1322 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1323 		count++;
1324 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1325 		count++;
1326 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1327 		count++;
1328 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1329 		count++;
1330 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1331 		count++;
1332 
1333 	return count > 1 ? -EINVAL : 0;
1334 }
1335 
1336 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1337 {
1338 	struct bpf_verifier_state *state = &env->cur_state;
1339 	struct bpf_reg_state *regs = state->regs, *reg;
1340 	int i;
1341 
1342 	for (i = 0; i < MAX_BPF_REG; i++)
1343 		if (regs[i].type == PTR_TO_PACKET ||
1344 		    regs[i].type == PTR_TO_PACKET_END)
1345 			mark_reg_unknown_value(regs, i);
1346 
1347 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1348 		if (state->stack_slot_type[i] != STACK_SPILL)
1349 			continue;
1350 		reg = &state->spilled_regs[i / BPF_REG_SIZE];
1351 		if (reg->type != PTR_TO_PACKET &&
1352 		    reg->type != PTR_TO_PACKET_END)
1353 			continue;
1354 		__mark_reg_unknown_value(state->spilled_regs,
1355 					 i / BPF_REG_SIZE);
1356 	}
1357 }
1358 
1359 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1360 {
1361 	struct bpf_verifier_state *state = &env->cur_state;
1362 	const struct bpf_func_proto *fn = NULL;
1363 	struct bpf_reg_state *regs = state->regs;
1364 	struct bpf_call_arg_meta meta;
1365 	bool changes_data;
1366 	int i, err;
1367 
1368 	/* find function prototype */
1369 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1370 		verbose("invalid func %s#%d\n", func_id_name(func_id), func_id);
1371 		return -EINVAL;
1372 	}
1373 
1374 	if (env->prog->aux->ops->get_func_proto)
1375 		fn = env->prog->aux->ops->get_func_proto(func_id);
1376 
1377 	if (!fn) {
1378 		verbose("unknown func %s#%d\n", func_id_name(func_id), func_id);
1379 		return -EINVAL;
1380 	}
1381 
1382 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1383 	if (!env->prog->gpl_compatible && fn->gpl_only) {
1384 		verbose("cannot call GPL only function from proprietary program\n");
1385 		return -EINVAL;
1386 	}
1387 
1388 	changes_data = bpf_helper_changes_pkt_data(fn->func);
1389 
1390 	memset(&meta, 0, sizeof(meta));
1391 	meta.pkt_access = fn->pkt_access;
1392 
1393 	/* We only support one arg being in raw mode at the moment, which
1394 	 * is sufficient for the helper functions we have right now.
1395 	 */
1396 	err = check_raw_mode(fn);
1397 	if (err) {
1398 		verbose("kernel subsystem misconfigured func %s#%d\n",
1399 			func_id_name(func_id), func_id);
1400 		return err;
1401 	}
1402 
1403 	/* check args */
1404 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1405 	if (err)
1406 		return err;
1407 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1408 	if (err)
1409 		return err;
1410 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1411 	if (err)
1412 		return err;
1413 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1414 	if (err)
1415 		return err;
1416 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1417 	if (err)
1418 		return err;
1419 
1420 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
1421 	 * is inferred from register state.
1422 	 */
1423 	for (i = 0; i < meta.access_size; i++) {
1424 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1425 		if (err)
1426 			return err;
1427 	}
1428 
1429 	/* reset caller saved regs */
1430 	for (i = 0; i < CALLER_SAVED_REGS; i++)
1431 		mark_reg_not_init(regs, caller_saved[i]);
1432 
1433 	/* update return register */
1434 	if (fn->ret_type == RET_INTEGER) {
1435 		regs[BPF_REG_0].type = UNKNOWN_VALUE;
1436 	} else if (fn->ret_type == RET_VOID) {
1437 		regs[BPF_REG_0].type = NOT_INIT;
1438 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1439 		struct bpf_insn_aux_data *insn_aux;
1440 
1441 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1442 		regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0;
1443 		/* remember map_ptr, so that check_map_access()
1444 		 * can check 'value_size' boundary of memory access
1445 		 * to map element returned from bpf_map_lookup_elem()
1446 		 */
1447 		if (meta.map_ptr == NULL) {
1448 			verbose("kernel subsystem misconfigured verifier\n");
1449 			return -EINVAL;
1450 		}
1451 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
1452 		regs[BPF_REG_0].id = ++env->id_gen;
1453 		insn_aux = &env->insn_aux_data[insn_idx];
1454 		if (!insn_aux->map_ptr)
1455 			insn_aux->map_ptr = meta.map_ptr;
1456 		else if (insn_aux->map_ptr != meta.map_ptr)
1457 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1458 	} else {
1459 		verbose("unknown return type %d of func %s#%d\n",
1460 			fn->ret_type, func_id_name(func_id), func_id);
1461 		return -EINVAL;
1462 	}
1463 
1464 	err = check_map_func_compatibility(meta.map_ptr, func_id);
1465 	if (err)
1466 		return err;
1467 
1468 	if (changes_data)
1469 		clear_all_pkt_pointers(env);
1470 	return 0;
1471 }
1472 
1473 static int check_packet_ptr_add(struct bpf_verifier_env *env,
1474 				struct bpf_insn *insn)
1475 {
1476 	struct bpf_reg_state *regs = env->cur_state.regs;
1477 	struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1478 	struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1479 	struct bpf_reg_state tmp_reg;
1480 	s32 imm;
1481 
1482 	if (BPF_SRC(insn->code) == BPF_K) {
1483 		/* pkt_ptr += imm */
1484 		imm = insn->imm;
1485 
1486 add_imm:
1487 		if (imm < 0) {
1488 			verbose("addition of negative constant to packet pointer is not allowed\n");
1489 			return -EACCES;
1490 		}
1491 		if (imm >= MAX_PACKET_OFF ||
1492 		    imm + dst_reg->off >= MAX_PACKET_OFF) {
1493 			verbose("constant %d is too large to add to packet pointer\n",
1494 				imm);
1495 			return -EACCES;
1496 		}
1497 		/* a constant was added to pkt_ptr.
1498 		 * Remember it while keeping the same 'id'
1499 		 */
1500 		dst_reg->off += imm;
1501 	} else {
1502 		bool had_id;
1503 
1504 		if (src_reg->type == PTR_TO_PACKET) {
1505 			/* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */
1506 			tmp_reg = *dst_reg;  /* save r7 state */
1507 			*dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */
1508 			src_reg = &tmp_reg;  /* pretend it's src_reg state */
1509 			/* if the checks below reject it, the copy won't matter,
1510 			 * since we're rejecting the whole program. If all ok,
1511 			 * then imm22 state will be added to r7
1512 			 * and r7 will be pkt(id=0,off=22,r=62) while
1513 			 * r6 will stay as pkt(id=0,off=0,r=62)
1514 			 */
1515 		}
1516 
1517 		if (src_reg->type == CONST_IMM) {
1518 			/* pkt_ptr += reg where reg is known constant */
1519 			imm = src_reg->imm;
1520 			goto add_imm;
1521 		}
1522 		/* disallow pkt_ptr += reg
1523 		 * if reg is not uknown_value with guaranteed zero upper bits
1524 		 * otherwise pkt_ptr may overflow and addition will become
1525 		 * subtraction which is not allowed
1526 		 */
1527 		if (src_reg->type != UNKNOWN_VALUE) {
1528 			verbose("cannot add '%s' to ptr_to_packet\n",
1529 				reg_type_str[src_reg->type]);
1530 			return -EACCES;
1531 		}
1532 		if (src_reg->imm < 48) {
1533 			verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n",
1534 				src_reg->imm);
1535 			return -EACCES;
1536 		}
1537 
1538 		had_id = (dst_reg->id != 0);
1539 
1540 		/* dst_reg stays as pkt_ptr type and since some positive
1541 		 * integer value was added to the pointer, increment its 'id'
1542 		 */
1543 		dst_reg->id = ++env->id_gen;
1544 
1545 		/* something was added to pkt_ptr, set range to zero */
1546 		dst_reg->aux_off += dst_reg->off;
1547 		dst_reg->off = 0;
1548 		dst_reg->range = 0;
1549 		if (had_id)
1550 			dst_reg->aux_off_align = min(dst_reg->aux_off_align,
1551 						     src_reg->min_align);
1552 		else
1553 			dst_reg->aux_off_align = src_reg->min_align;
1554 	}
1555 	return 0;
1556 }
1557 
1558 static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
1559 {
1560 	struct bpf_reg_state *regs = env->cur_state.regs;
1561 	struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1562 	u8 opcode = BPF_OP(insn->code);
1563 	s64 imm_log2;
1564 
1565 	/* for type == UNKNOWN_VALUE:
1566 	 * imm > 0 -> number of zero upper bits
1567 	 * imm == 0 -> don't track which is the same as all bits can be non-zero
1568 	 */
1569 
1570 	if (BPF_SRC(insn->code) == BPF_X) {
1571 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1572 
1573 		if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
1574 		    dst_reg->imm && opcode == BPF_ADD) {
1575 			/* dreg += sreg
1576 			 * where both have zero upper bits. Adding them
1577 			 * can only result making one more bit non-zero
1578 			 * in the larger value.
1579 			 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
1580 			 *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
1581 			 */
1582 			dst_reg->imm = min(dst_reg->imm, src_reg->imm);
1583 			dst_reg->imm--;
1584 			return 0;
1585 		}
1586 		if (src_reg->type == CONST_IMM && src_reg->imm > 0 &&
1587 		    dst_reg->imm && opcode == BPF_ADD) {
1588 			/* dreg += sreg
1589 			 * where dreg has zero upper bits and sreg is const.
1590 			 * Adding them can only result making one more bit
1591 			 * non-zero in the larger value.
1592 			 */
1593 			imm_log2 = __ilog2_u64((long long)src_reg->imm);
1594 			dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
1595 			dst_reg->imm--;
1596 			return 0;
1597 		}
1598 		/* all other cases non supported yet, just mark dst_reg */
1599 		dst_reg->imm = 0;
1600 		return 0;
1601 	}
1602 
1603 	/* sign extend 32-bit imm into 64-bit to make sure that
1604 	 * negative values occupy bit 63. Note ilog2() would have
1605 	 * been incorrect, since sizeof(insn->imm) == 4
1606 	 */
1607 	imm_log2 = __ilog2_u64((long long)insn->imm);
1608 
1609 	if (dst_reg->imm && opcode == BPF_LSH) {
1610 		/* reg <<= imm
1611 		 * if reg was a result of 2 byte load, then its imm == 48
1612 		 * which means that upper 48 bits are zero and shifting this reg
1613 		 * left by 4 would mean that upper 44 bits are still zero
1614 		 */
1615 		dst_reg->imm -= insn->imm;
1616 	} else if (dst_reg->imm && opcode == BPF_MUL) {
1617 		/* reg *= imm
1618 		 * if multiplying by 14 subtract 4
1619 		 * This is conservative calculation of upper zero bits.
1620 		 * It's not trying to special case insn->imm == 1 or 0 cases
1621 		 */
1622 		dst_reg->imm -= imm_log2 + 1;
1623 	} else if (opcode == BPF_AND) {
1624 		/* reg &= imm */
1625 		dst_reg->imm = 63 - imm_log2;
1626 	} else if (dst_reg->imm && opcode == BPF_ADD) {
1627 		/* reg += imm */
1628 		dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
1629 		dst_reg->imm--;
1630 	} else if (opcode == BPF_RSH) {
1631 		/* reg >>= imm
1632 		 * which means that after right shift, upper bits will be zero
1633 		 * note that verifier already checked that
1634 		 * 0 <= imm < 64 for shift insn
1635 		 */
1636 		dst_reg->imm += insn->imm;
1637 		if (unlikely(dst_reg->imm > 64))
1638 			/* some dumb code did:
1639 			 * r2 = *(u32 *)mem;
1640 			 * r2 >>= 32;
1641 			 * and all bits are zero now */
1642 			dst_reg->imm = 64;
1643 	} else {
1644 		/* all other alu ops, means that we don't know what will
1645 		 * happen to the value, mark it with unknown number of zero bits
1646 		 */
1647 		dst_reg->imm = 0;
1648 	}
1649 
1650 	if (dst_reg->imm < 0) {
1651 		/* all 64 bits of the register can contain non-zero bits
1652 		 * and such value cannot be added to ptr_to_packet, since it
1653 		 * may overflow, mark it as unknown to avoid further eval
1654 		 */
1655 		dst_reg->imm = 0;
1656 	}
1657 	return 0;
1658 }
1659 
1660 static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
1661 					struct bpf_insn *insn)
1662 {
1663 	struct bpf_reg_state *regs = env->cur_state.regs;
1664 	struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1665 	struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1666 	u8 opcode = BPF_OP(insn->code);
1667 	s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
1668 
1669 	/* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
1670 	if (src_reg->imm > 0 && dst_reg->imm) {
1671 		switch (opcode) {
1672 		case BPF_ADD:
1673 			/* dreg += sreg
1674 			 * where both have zero upper bits. Adding them
1675 			 * can only result making one more bit non-zero
1676 			 * in the larger value.
1677 			 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
1678 			 *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
1679 			 */
1680 			dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1681 			dst_reg->imm--;
1682 			break;
1683 		case BPF_AND:
1684 			/* dreg &= sreg
1685 			 * AND can not extend zero bits only shrink
1686 			 * Ex.  0x00..00ffffff
1687 			 *    & 0x0f..ffffffff
1688 			 *     ----------------
1689 			 *      0x00..00ffffff
1690 			 */
1691 			dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
1692 			break;
1693 		case BPF_OR:
1694 			/* dreg |= sreg
1695 			 * OR can only extend zero bits
1696 			 * Ex.  0x00..00ffffff
1697 			 *    | 0x0f..ffffffff
1698 			 *     ----------------
1699 			 *      0x0f..00ffffff
1700 			 */
1701 			dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1702 			break;
1703 		case BPF_SUB:
1704 		case BPF_MUL:
1705 		case BPF_RSH:
1706 		case BPF_LSH:
1707 			/* These may be flushed out later */
1708 		default:
1709 			mark_reg_unknown_value(regs, insn->dst_reg);
1710 		}
1711 	} else {
1712 		mark_reg_unknown_value(regs, insn->dst_reg);
1713 	}
1714 
1715 	dst_reg->type = UNKNOWN_VALUE;
1716 	return 0;
1717 }
1718 
1719 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1720 				struct bpf_insn *insn)
1721 {
1722 	struct bpf_reg_state *regs = env->cur_state.regs;
1723 	struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1724 	struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1725 	u8 opcode = BPF_OP(insn->code);
1726 	u64 dst_imm = dst_reg->imm;
1727 
1728 	if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
1729 		return evaluate_reg_imm_alu_unknown(env, insn);
1730 
1731 	/* dst_reg->type == CONST_IMM here. Simulate execution of insns
1732 	 * containing ALU ops. Don't care about overflow or negative
1733 	 * values, just add/sub/... them; registers are in u64.
1734 	 */
1735 	if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) {
1736 		dst_imm += insn->imm;
1737 	} else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
1738 		   src_reg->type == CONST_IMM) {
1739 		dst_imm += src_reg->imm;
1740 	} else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) {
1741 		dst_imm -= insn->imm;
1742 	} else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
1743 		   src_reg->type == CONST_IMM) {
1744 		dst_imm -= src_reg->imm;
1745 	} else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) {
1746 		dst_imm *= insn->imm;
1747 	} else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
1748 		   src_reg->type == CONST_IMM) {
1749 		dst_imm *= src_reg->imm;
1750 	} else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) {
1751 		dst_imm |= insn->imm;
1752 	} else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
1753 		   src_reg->type == CONST_IMM) {
1754 		dst_imm |= src_reg->imm;
1755 	} else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) {
1756 		dst_imm &= insn->imm;
1757 	} else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
1758 		   src_reg->type == CONST_IMM) {
1759 		dst_imm &= src_reg->imm;
1760 	} else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) {
1761 		dst_imm >>= insn->imm;
1762 	} else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
1763 		   src_reg->type == CONST_IMM) {
1764 		dst_imm >>= src_reg->imm;
1765 	} else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) {
1766 		dst_imm <<= insn->imm;
1767 	} else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
1768 		   src_reg->type == CONST_IMM) {
1769 		dst_imm <<= src_reg->imm;
1770 	} else {
1771 		mark_reg_unknown_value(regs, insn->dst_reg);
1772 		goto out;
1773 	}
1774 
1775 	dst_reg->imm = dst_imm;
1776 out:
1777 	return 0;
1778 }
1779 
1780 static void check_reg_overflow(struct bpf_reg_state *reg)
1781 {
1782 	if (reg->max_value > BPF_REGISTER_MAX_RANGE)
1783 		reg->max_value = BPF_REGISTER_MAX_RANGE;
1784 	if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
1785 	    reg->min_value > BPF_REGISTER_MAX_RANGE)
1786 		reg->min_value = BPF_REGISTER_MIN_RANGE;
1787 }
1788 
1789 static u32 calc_align(u32 imm)
1790 {
1791 	if (!imm)
1792 		return 1U << 31;
1793 	return imm - ((imm - 1) & imm);
1794 }
1795 
1796 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1797 				    struct bpf_insn *insn)
1798 {
1799 	struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1800 	s64 min_val = BPF_REGISTER_MIN_RANGE;
1801 	u64 max_val = BPF_REGISTER_MAX_RANGE;
1802 	u8 opcode = BPF_OP(insn->code);
1803 	u32 dst_align, src_align;
1804 
1805 	dst_reg = &regs[insn->dst_reg];
1806 	src_align = 0;
1807 	if (BPF_SRC(insn->code) == BPF_X) {
1808 		check_reg_overflow(&regs[insn->src_reg]);
1809 		min_val = regs[insn->src_reg].min_value;
1810 		max_val = regs[insn->src_reg].max_value;
1811 
1812 		/* If the source register is a random pointer then the
1813 		 * min_value/max_value values represent the range of the known
1814 		 * accesses into that value, not the actual min/max value of the
1815 		 * register itself.  In this case we have to reset the reg range
1816 		 * values so we know it is not safe to look at.
1817 		 */
1818 		if (regs[insn->src_reg].type != CONST_IMM &&
1819 		    regs[insn->src_reg].type != UNKNOWN_VALUE) {
1820 			min_val = BPF_REGISTER_MIN_RANGE;
1821 			max_val = BPF_REGISTER_MAX_RANGE;
1822 			src_align = 0;
1823 		} else {
1824 			src_align = regs[insn->src_reg].min_align;
1825 		}
1826 	} else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
1827 		   (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
1828 		min_val = max_val = insn->imm;
1829 		src_align = calc_align(insn->imm);
1830 	}
1831 
1832 	dst_align = dst_reg->min_align;
1833 
1834 	/* We don't know anything about what was done to this register, mark it
1835 	 * as unknown.
1836 	 */
1837 	if (min_val == BPF_REGISTER_MIN_RANGE &&
1838 	    max_val == BPF_REGISTER_MAX_RANGE) {
1839 		reset_reg_range_values(regs, insn->dst_reg);
1840 		return;
1841 	}
1842 
1843 	/* If one of our values was at the end of our ranges then we can't just
1844 	 * do our normal operations to the register, we need to set the values
1845 	 * to the min/max since they are undefined.
1846 	 */
1847 	if (min_val == BPF_REGISTER_MIN_RANGE)
1848 		dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1849 	if (max_val == BPF_REGISTER_MAX_RANGE)
1850 		dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1851 
1852 	switch (opcode) {
1853 	case BPF_ADD:
1854 		if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1855 			dst_reg->min_value += min_val;
1856 		if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1857 			dst_reg->max_value += max_val;
1858 		dst_reg->min_align = min(src_align, dst_align);
1859 		break;
1860 	case BPF_SUB:
1861 		if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1862 			dst_reg->min_value -= min_val;
1863 		if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1864 			dst_reg->max_value -= max_val;
1865 		dst_reg->min_align = min(src_align, dst_align);
1866 		break;
1867 	case BPF_MUL:
1868 		if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1869 			dst_reg->min_value *= min_val;
1870 		if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1871 			dst_reg->max_value *= max_val;
1872 		dst_reg->min_align = max(src_align, dst_align);
1873 		break;
1874 	case BPF_AND:
1875 		/* Disallow AND'ing of negative numbers, ain't nobody got time
1876 		 * for that.  Otherwise the minimum is 0 and the max is the max
1877 		 * value we could AND against.
1878 		 */
1879 		if (min_val < 0)
1880 			dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1881 		else
1882 			dst_reg->min_value = 0;
1883 		dst_reg->max_value = max_val;
1884 		dst_reg->min_align = max(src_align, dst_align);
1885 		break;
1886 	case BPF_LSH:
1887 		/* Gotta have special overflow logic here, if we're shifting
1888 		 * more than MAX_RANGE then just assume we have an invalid
1889 		 * range.
1890 		 */
1891 		if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) {
1892 			dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1893 			dst_reg->min_align = 1;
1894 		} else {
1895 			if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1896 				dst_reg->min_value <<= min_val;
1897 			if (!dst_reg->min_align)
1898 				dst_reg->min_align = 1;
1899 			dst_reg->min_align <<= min_val;
1900 		}
1901 		if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
1902 			dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1903 		else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1904 			dst_reg->max_value <<= max_val;
1905 		break;
1906 	case BPF_RSH:
1907 		/* RSH by a negative number is undefined, and the BPF_RSH is an
1908 		 * unsigned shift, so make the appropriate casts.
1909 		 */
1910 		if (min_val < 0 || dst_reg->min_value < 0) {
1911 			dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1912 		} else {
1913 			dst_reg->min_value =
1914 				(u64)(dst_reg->min_value) >> min_val;
1915 		}
1916 		if (min_val < 0) {
1917 			dst_reg->min_align = 1;
1918 		} else {
1919 			dst_reg->min_align >>= (u64) min_val;
1920 			if (!dst_reg->min_align)
1921 				dst_reg->min_align = 1;
1922 		}
1923 		if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1924 			dst_reg->max_value >>= max_val;
1925 		break;
1926 	default:
1927 		reset_reg_range_values(regs, insn->dst_reg);
1928 		break;
1929 	}
1930 
1931 	check_reg_overflow(dst_reg);
1932 }
1933 
1934 /* check validity of 32-bit and 64-bit arithmetic operations */
1935 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1936 {
1937 	struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1938 	u8 opcode = BPF_OP(insn->code);
1939 	int err;
1940 
1941 	if (opcode == BPF_END || opcode == BPF_NEG) {
1942 		if (opcode == BPF_NEG) {
1943 			if (BPF_SRC(insn->code) != 0 ||
1944 			    insn->src_reg != BPF_REG_0 ||
1945 			    insn->off != 0 || insn->imm != 0) {
1946 				verbose("BPF_NEG uses reserved fields\n");
1947 				return -EINVAL;
1948 			}
1949 		} else {
1950 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
1951 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
1952 				verbose("BPF_END uses reserved fields\n");
1953 				return -EINVAL;
1954 			}
1955 		}
1956 
1957 		/* check src operand */
1958 		err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1959 		if (err)
1960 			return err;
1961 
1962 		if (is_pointer_value(env, insn->dst_reg)) {
1963 			verbose("R%d pointer arithmetic prohibited\n",
1964 				insn->dst_reg);
1965 			return -EACCES;
1966 		}
1967 
1968 		/* check dest operand */
1969 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1970 		if (err)
1971 			return err;
1972 
1973 	} else if (opcode == BPF_MOV) {
1974 
1975 		if (BPF_SRC(insn->code) == BPF_X) {
1976 			if (insn->imm != 0 || insn->off != 0) {
1977 				verbose("BPF_MOV uses reserved fields\n");
1978 				return -EINVAL;
1979 			}
1980 
1981 			/* check src operand */
1982 			err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1983 			if (err)
1984 				return err;
1985 		} else {
1986 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1987 				verbose("BPF_MOV uses reserved fields\n");
1988 				return -EINVAL;
1989 			}
1990 		}
1991 
1992 		/* check dest operand */
1993 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1994 		if (err)
1995 			return err;
1996 
1997 		/* we are setting our register to something new, we need to
1998 		 * reset its range values.
1999 		 */
2000 		reset_reg_range_values(regs, insn->dst_reg);
2001 
2002 		if (BPF_SRC(insn->code) == BPF_X) {
2003 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2004 				/* case: R1 = R2
2005 				 * copy register state to dest reg
2006 				 */
2007 				regs[insn->dst_reg] = regs[insn->src_reg];
2008 			} else {
2009 				if (is_pointer_value(env, insn->src_reg)) {
2010 					verbose("R%d partial copy of pointer\n",
2011 						insn->src_reg);
2012 					return -EACCES;
2013 				}
2014 				mark_reg_unknown_value(regs, insn->dst_reg);
2015 			}
2016 		} else {
2017 			/* case: R = imm
2018 			 * remember the value we stored into this reg
2019 			 */
2020 			regs[insn->dst_reg].type = CONST_IMM;
2021 			regs[insn->dst_reg].imm = insn->imm;
2022 			regs[insn->dst_reg].id = 0;
2023 			regs[insn->dst_reg].max_value = insn->imm;
2024 			regs[insn->dst_reg].min_value = insn->imm;
2025 			regs[insn->dst_reg].min_align = calc_align(insn->imm);
2026 		}
2027 
2028 	} else if (opcode > BPF_END) {
2029 		verbose("invalid BPF_ALU opcode %x\n", opcode);
2030 		return -EINVAL;
2031 
2032 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
2033 
2034 		if (BPF_SRC(insn->code) == BPF_X) {
2035 			if (insn->imm != 0 || insn->off != 0) {
2036 				verbose("BPF_ALU uses reserved fields\n");
2037 				return -EINVAL;
2038 			}
2039 			/* check src1 operand */
2040 			err = check_reg_arg(regs, insn->src_reg, SRC_OP);
2041 			if (err)
2042 				return err;
2043 		} else {
2044 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2045 				verbose("BPF_ALU uses reserved fields\n");
2046 				return -EINVAL;
2047 			}
2048 		}
2049 
2050 		/* check src2 operand */
2051 		err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
2052 		if (err)
2053 			return err;
2054 
2055 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2056 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2057 			verbose("div by zero\n");
2058 			return -EINVAL;
2059 		}
2060 
2061 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2062 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2063 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2064 
2065 			if (insn->imm < 0 || insn->imm >= size) {
2066 				verbose("invalid shift %d\n", insn->imm);
2067 				return -EINVAL;
2068 			}
2069 		}
2070 
2071 		/* check dest operand */
2072 		err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
2073 		if (err)
2074 			return err;
2075 
2076 		dst_reg = &regs[insn->dst_reg];
2077 
2078 		/* first we want to adjust our ranges. */
2079 		adjust_reg_min_max_vals(env, insn);
2080 
2081 		/* pattern match 'bpf_add Rx, imm' instruction */
2082 		if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
2083 		    dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) {
2084 			dst_reg->type = PTR_TO_STACK;
2085 			dst_reg->imm = insn->imm;
2086 			return 0;
2087 		} else if (opcode == BPF_ADD &&
2088 			   BPF_CLASS(insn->code) == BPF_ALU64 &&
2089 			   dst_reg->type == PTR_TO_STACK &&
2090 			   ((BPF_SRC(insn->code) == BPF_X &&
2091 			     regs[insn->src_reg].type == CONST_IMM) ||
2092 			    BPF_SRC(insn->code) == BPF_K)) {
2093 			if (BPF_SRC(insn->code) == BPF_X)
2094 				dst_reg->imm += regs[insn->src_reg].imm;
2095 			else
2096 				dst_reg->imm += insn->imm;
2097 			return 0;
2098 		} else if (opcode == BPF_ADD &&
2099 			   BPF_CLASS(insn->code) == BPF_ALU64 &&
2100 			   (dst_reg->type == PTR_TO_PACKET ||
2101 			    (BPF_SRC(insn->code) == BPF_X &&
2102 			     regs[insn->src_reg].type == PTR_TO_PACKET))) {
2103 			/* ptr_to_packet += K|X */
2104 			return check_packet_ptr_add(env, insn);
2105 		} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
2106 			   dst_reg->type == UNKNOWN_VALUE &&
2107 			   env->allow_ptr_leaks) {
2108 			/* unknown += K|X */
2109 			return evaluate_reg_alu(env, insn);
2110 		} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
2111 			   dst_reg->type == CONST_IMM &&
2112 			   env->allow_ptr_leaks) {
2113 			/* reg_imm += K|X */
2114 			return evaluate_reg_imm_alu(env, insn);
2115 		} else if (is_pointer_value(env, insn->dst_reg)) {
2116 			verbose("R%d pointer arithmetic prohibited\n",
2117 				insn->dst_reg);
2118 			return -EACCES;
2119 		} else if (BPF_SRC(insn->code) == BPF_X &&
2120 			   is_pointer_value(env, insn->src_reg)) {
2121 			verbose("R%d pointer arithmetic prohibited\n",
2122 				insn->src_reg);
2123 			return -EACCES;
2124 		}
2125 
2126 		/* If we did pointer math on a map value then just set it to our
2127 		 * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or
2128 		 * loads to this register appropriately, otherwise just mark the
2129 		 * register as unknown.
2130 		 */
2131 		if (env->allow_ptr_leaks &&
2132 		    BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
2133 		    (dst_reg->type == PTR_TO_MAP_VALUE ||
2134 		     dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
2135 			dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
2136 		else
2137 			mark_reg_unknown_value(regs, insn->dst_reg);
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2144 				   struct bpf_reg_state *dst_reg)
2145 {
2146 	struct bpf_reg_state *regs = state->regs, *reg;
2147 	int i;
2148 
2149 	/* LLVM can generate two kind of checks:
2150 	 *
2151 	 * Type 1:
2152 	 *
2153 	 *   r2 = r3;
2154 	 *   r2 += 8;
2155 	 *   if (r2 > pkt_end) goto <handle exception>
2156 	 *   <access okay>
2157 	 *
2158 	 *   Where:
2159 	 *     r2 == dst_reg, pkt_end == src_reg
2160 	 *     r2=pkt(id=n,off=8,r=0)
2161 	 *     r3=pkt(id=n,off=0,r=0)
2162 	 *
2163 	 * Type 2:
2164 	 *
2165 	 *   r2 = r3;
2166 	 *   r2 += 8;
2167 	 *   if (pkt_end >= r2) goto <access okay>
2168 	 *   <handle exception>
2169 	 *
2170 	 *   Where:
2171 	 *     pkt_end == dst_reg, r2 == src_reg
2172 	 *     r2=pkt(id=n,off=8,r=0)
2173 	 *     r3=pkt(id=n,off=0,r=0)
2174 	 *
2175 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2176 	 * so that range of bytes [r3, r3 + 8) is safe to access.
2177 	 */
2178 
2179 	for (i = 0; i < MAX_BPF_REG; i++)
2180 		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
2181 			/* keep the maximum range already checked */
2182 			regs[i].range = max(regs[i].range, dst_reg->off);
2183 
2184 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2185 		if (state->stack_slot_type[i] != STACK_SPILL)
2186 			continue;
2187 		reg = &state->spilled_regs[i / BPF_REG_SIZE];
2188 		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
2189 			reg->range = max(reg->range, dst_reg->off);
2190 	}
2191 }
2192 
2193 /* Adjusts the register min/max values in the case that the dst_reg is the
2194  * variable register that we are working on, and src_reg is a constant or we're
2195  * simply doing a BPF_K check.
2196  */
2197 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2198 			    struct bpf_reg_state *false_reg, u64 val,
2199 			    u8 opcode)
2200 {
2201 	switch (opcode) {
2202 	case BPF_JEQ:
2203 		/* If this is false then we know nothing Jon Snow, but if it is
2204 		 * true then we know for sure.
2205 		 */
2206 		true_reg->max_value = true_reg->min_value = val;
2207 		break;
2208 	case BPF_JNE:
2209 		/* If this is true we know nothing Jon Snow, but if it is false
2210 		 * we know the value for sure;
2211 		 */
2212 		false_reg->max_value = false_reg->min_value = val;
2213 		break;
2214 	case BPF_JGT:
2215 		/* Unsigned comparison, the minimum value is 0. */
2216 		false_reg->min_value = 0;
2217 		/* fallthrough */
2218 	case BPF_JSGT:
2219 		/* If this is false then we know the maximum val is val,
2220 		 * otherwise we know the min val is val+1.
2221 		 */
2222 		false_reg->max_value = val;
2223 		true_reg->min_value = val + 1;
2224 		break;
2225 	case BPF_JGE:
2226 		/* Unsigned comparison, the minimum value is 0. */
2227 		false_reg->min_value = 0;
2228 		/* fallthrough */
2229 	case BPF_JSGE:
2230 		/* If this is false then we know the maximum value is val - 1,
2231 		 * otherwise we know the mimimum value is val.
2232 		 */
2233 		false_reg->max_value = val - 1;
2234 		true_reg->min_value = val;
2235 		break;
2236 	default:
2237 		break;
2238 	}
2239 
2240 	check_reg_overflow(false_reg);
2241 	check_reg_overflow(true_reg);
2242 }
2243 
2244 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
2245  * is the variable reg.
2246  */
2247 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2248 				struct bpf_reg_state *false_reg, u64 val,
2249 				u8 opcode)
2250 {
2251 	switch (opcode) {
2252 	case BPF_JEQ:
2253 		/* If this is false then we know nothing Jon Snow, but if it is
2254 		 * true then we know for sure.
2255 		 */
2256 		true_reg->max_value = true_reg->min_value = val;
2257 		break;
2258 	case BPF_JNE:
2259 		/* If this is true we know nothing Jon Snow, but if it is false
2260 		 * we know the value for sure;
2261 		 */
2262 		false_reg->max_value = false_reg->min_value = val;
2263 		break;
2264 	case BPF_JGT:
2265 		/* Unsigned comparison, the minimum value is 0. */
2266 		true_reg->min_value = 0;
2267 		/* fallthrough */
2268 	case BPF_JSGT:
2269 		/*
2270 		 * If this is false, then the val is <= the register, if it is
2271 		 * true the register <= to the val.
2272 		 */
2273 		false_reg->min_value = val;
2274 		true_reg->max_value = val - 1;
2275 		break;
2276 	case BPF_JGE:
2277 		/* Unsigned comparison, the minimum value is 0. */
2278 		true_reg->min_value = 0;
2279 		/* fallthrough */
2280 	case BPF_JSGE:
2281 		/* If this is false then constant < register, if it is true then
2282 		 * the register < constant.
2283 		 */
2284 		false_reg->min_value = val + 1;
2285 		true_reg->max_value = val;
2286 		break;
2287 	default:
2288 		break;
2289 	}
2290 
2291 	check_reg_overflow(false_reg);
2292 	check_reg_overflow(true_reg);
2293 }
2294 
2295 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2296 			 enum bpf_reg_type type)
2297 {
2298 	struct bpf_reg_state *reg = &regs[regno];
2299 
2300 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2301 		if (type == UNKNOWN_VALUE) {
2302 			__mark_reg_unknown_value(regs, regno);
2303 		} else if (reg->map_ptr->inner_map_meta) {
2304 			reg->type = CONST_PTR_TO_MAP;
2305 			reg->map_ptr = reg->map_ptr->inner_map_meta;
2306 		} else {
2307 			reg->type = type;
2308 		}
2309 		/* We don't need id from this point onwards anymore, thus we
2310 		 * should better reset it, so that state pruning has chances
2311 		 * to take effect.
2312 		 */
2313 		reg->id = 0;
2314 	}
2315 }
2316 
2317 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2318  * be folded together at some point.
2319  */
2320 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2321 			  enum bpf_reg_type type)
2322 {
2323 	struct bpf_reg_state *regs = state->regs;
2324 	u32 id = regs[regno].id;
2325 	int i;
2326 
2327 	for (i = 0; i < MAX_BPF_REG; i++)
2328 		mark_map_reg(regs, i, id, type);
2329 
2330 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2331 		if (state->stack_slot_type[i] != STACK_SPILL)
2332 			continue;
2333 		mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
2334 	}
2335 }
2336 
2337 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2338 			     struct bpf_insn *insn, int *insn_idx)
2339 {
2340 	struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
2341 	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2342 	u8 opcode = BPF_OP(insn->code);
2343 	int err;
2344 
2345 	if (opcode > BPF_EXIT) {
2346 		verbose("invalid BPF_JMP opcode %x\n", opcode);
2347 		return -EINVAL;
2348 	}
2349 
2350 	if (BPF_SRC(insn->code) == BPF_X) {
2351 		if (insn->imm != 0) {
2352 			verbose("BPF_JMP uses reserved fields\n");
2353 			return -EINVAL;
2354 		}
2355 
2356 		/* check src1 operand */
2357 		err = check_reg_arg(regs, insn->src_reg, SRC_OP);
2358 		if (err)
2359 			return err;
2360 
2361 		if (is_pointer_value(env, insn->src_reg)) {
2362 			verbose("R%d pointer comparison prohibited\n",
2363 				insn->src_reg);
2364 			return -EACCES;
2365 		}
2366 	} else {
2367 		if (insn->src_reg != BPF_REG_0) {
2368 			verbose("BPF_JMP uses reserved fields\n");
2369 			return -EINVAL;
2370 		}
2371 	}
2372 
2373 	/* check src2 operand */
2374 	err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
2375 	if (err)
2376 		return err;
2377 
2378 	dst_reg = &regs[insn->dst_reg];
2379 
2380 	/* detect if R == 0 where R was initialized to zero earlier */
2381 	if (BPF_SRC(insn->code) == BPF_K &&
2382 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2383 	    dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) {
2384 		if (opcode == BPF_JEQ) {
2385 			/* if (imm == imm) goto pc+off;
2386 			 * only follow the goto, ignore fall-through
2387 			 */
2388 			*insn_idx += insn->off;
2389 			return 0;
2390 		} else {
2391 			/* if (imm != imm) goto pc+off;
2392 			 * only follow fall-through branch, since
2393 			 * that's where the program will go
2394 			 */
2395 			return 0;
2396 		}
2397 	}
2398 
2399 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
2400 	if (!other_branch)
2401 		return -EFAULT;
2402 
2403 	/* detect if we are comparing against a constant value so we can adjust
2404 	 * our min/max values for our dst register.
2405 	 */
2406 	if (BPF_SRC(insn->code) == BPF_X) {
2407 		if (regs[insn->src_reg].type == CONST_IMM)
2408 			reg_set_min_max(&other_branch->regs[insn->dst_reg],
2409 					dst_reg, regs[insn->src_reg].imm,
2410 					opcode);
2411 		else if (dst_reg->type == CONST_IMM)
2412 			reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
2413 					    &regs[insn->src_reg], dst_reg->imm,
2414 					    opcode);
2415 	} else {
2416 		reg_set_min_max(&other_branch->regs[insn->dst_reg],
2417 					dst_reg, insn->imm, opcode);
2418 	}
2419 
2420 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2421 	if (BPF_SRC(insn->code) == BPF_K &&
2422 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2423 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2424 		/* Mark all identical map registers in each branch as either
2425 		 * safe or unknown depending R == 0 or R != 0 conditional.
2426 		 */
2427 		mark_map_regs(this_branch, insn->dst_reg,
2428 			      opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
2429 		mark_map_regs(other_branch, insn->dst_reg,
2430 			      opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
2431 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2432 		   dst_reg->type == PTR_TO_PACKET &&
2433 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2434 		find_good_pkt_pointers(this_branch, dst_reg);
2435 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2436 		   dst_reg->type == PTR_TO_PACKET_END &&
2437 		   regs[insn->src_reg].type == PTR_TO_PACKET) {
2438 		find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
2439 	} else if (is_pointer_value(env, insn->dst_reg)) {
2440 		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
2441 		return -EACCES;
2442 	}
2443 	if (log_level)
2444 		print_verifier_state(this_branch);
2445 	return 0;
2446 }
2447 
2448 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
2449 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
2450 {
2451 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
2452 
2453 	return (struct bpf_map *) (unsigned long) imm64;
2454 }
2455 
2456 /* verify BPF_LD_IMM64 instruction */
2457 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
2458 {
2459 	struct bpf_reg_state *regs = env->cur_state.regs;
2460 	int err;
2461 
2462 	if (BPF_SIZE(insn->code) != BPF_DW) {
2463 		verbose("invalid BPF_LD_IMM insn\n");
2464 		return -EINVAL;
2465 	}
2466 	if (insn->off != 0) {
2467 		verbose("BPF_LD_IMM64 uses reserved fields\n");
2468 		return -EINVAL;
2469 	}
2470 
2471 	err = check_reg_arg(regs, insn->dst_reg, DST_OP);
2472 	if (err)
2473 		return err;
2474 
2475 	if (insn->src_reg == 0) {
2476 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
2477 
2478 		regs[insn->dst_reg].type = CONST_IMM;
2479 		regs[insn->dst_reg].imm = imm;
2480 		regs[insn->dst_reg].id = 0;
2481 		return 0;
2482 	}
2483 
2484 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
2485 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
2486 
2487 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
2488 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
2489 	return 0;
2490 }
2491 
2492 static bool may_access_skb(enum bpf_prog_type type)
2493 {
2494 	switch (type) {
2495 	case BPF_PROG_TYPE_SOCKET_FILTER:
2496 	case BPF_PROG_TYPE_SCHED_CLS:
2497 	case BPF_PROG_TYPE_SCHED_ACT:
2498 		return true;
2499 	default:
2500 		return false;
2501 	}
2502 }
2503 
2504 /* verify safety of LD_ABS|LD_IND instructions:
2505  * - they can only appear in the programs where ctx == skb
2506  * - since they are wrappers of function calls, they scratch R1-R5 registers,
2507  *   preserve R6-R9, and store return value into R0
2508  *
2509  * Implicit input:
2510  *   ctx == skb == R6 == CTX
2511  *
2512  * Explicit input:
2513  *   SRC == any register
2514  *   IMM == 32-bit immediate
2515  *
2516  * Output:
2517  *   R0 - 8/16/32-bit skb data converted to cpu endianness
2518  */
2519 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2520 {
2521 	struct bpf_reg_state *regs = env->cur_state.regs;
2522 	u8 mode = BPF_MODE(insn->code);
2523 	int i, err;
2524 
2525 	if (!may_access_skb(env->prog->type)) {
2526 		verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
2527 		return -EINVAL;
2528 	}
2529 
2530 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
2531 	    BPF_SIZE(insn->code) == BPF_DW ||
2532 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
2533 		verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
2534 		return -EINVAL;
2535 	}
2536 
2537 	/* check whether implicit source operand (register R6) is readable */
2538 	err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
2539 	if (err)
2540 		return err;
2541 
2542 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
2543 		verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
2544 		return -EINVAL;
2545 	}
2546 
2547 	if (mode == BPF_IND) {
2548 		/* check explicit source operand */
2549 		err = check_reg_arg(regs, insn->src_reg, SRC_OP);
2550 		if (err)
2551 			return err;
2552 	}
2553 
2554 	/* reset caller saved regs to unreadable */
2555 	for (i = 0; i < CALLER_SAVED_REGS; i++)
2556 		mark_reg_not_init(regs, caller_saved[i]);
2557 
2558 	/* mark destination R0 register as readable, since it contains
2559 	 * the value fetched from the packet
2560 	 */
2561 	regs[BPF_REG_0].type = UNKNOWN_VALUE;
2562 	return 0;
2563 }
2564 
2565 /* non-recursive DFS pseudo code
2566  * 1  procedure DFS-iterative(G,v):
2567  * 2      label v as discovered
2568  * 3      let S be a stack
2569  * 4      S.push(v)
2570  * 5      while S is not empty
2571  * 6            t <- S.pop()
2572  * 7            if t is what we're looking for:
2573  * 8                return t
2574  * 9            for all edges e in G.adjacentEdges(t) do
2575  * 10               if edge e is already labelled
2576  * 11                   continue with the next edge
2577  * 12               w <- G.adjacentVertex(t,e)
2578  * 13               if vertex w is not discovered and not explored
2579  * 14                   label e as tree-edge
2580  * 15                   label w as discovered
2581  * 16                   S.push(w)
2582  * 17                   continue at 5
2583  * 18               else if vertex w is discovered
2584  * 19                   label e as back-edge
2585  * 20               else
2586  * 21                   // vertex w is explored
2587  * 22                   label e as forward- or cross-edge
2588  * 23           label t as explored
2589  * 24           S.pop()
2590  *
2591  * convention:
2592  * 0x10 - discovered
2593  * 0x11 - discovered and fall-through edge labelled
2594  * 0x12 - discovered and fall-through and branch edges labelled
2595  * 0x20 - explored
2596  */
2597 
2598 enum {
2599 	DISCOVERED = 0x10,
2600 	EXPLORED = 0x20,
2601 	FALLTHROUGH = 1,
2602 	BRANCH = 2,
2603 };
2604 
2605 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
2606 
2607 static int *insn_stack;	/* stack of insns to process */
2608 static int cur_stack;	/* current stack index */
2609 static int *insn_state;
2610 
2611 /* t, w, e - match pseudo-code above:
2612  * t - index of current instruction
2613  * w - next instruction
2614  * e - edge
2615  */
2616 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
2617 {
2618 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
2619 		return 0;
2620 
2621 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
2622 		return 0;
2623 
2624 	if (w < 0 || w >= env->prog->len) {
2625 		verbose("jump out of range from insn %d to %d\n", t, w);
2626 		return -EINVAL;
2627 	}
2628 
2629 	if (e == BRANCH)
2630 		/* mark branch target for state pruning */
2631 		env->explored_states[w] = STATE_LIST_MARK;
2632 
2633 	if (insn_state[w] == 0) {
2634 		/* tree-edge */
2635 		insn_state[t] = DISCOVERED | e;
2636 		insn_state[w] = DISCOVERED;
2637 		if (cur_stack >= env->prog->len)
2638 			return -E2BIG;
2639 		insn_stack[cur_stack++] = w;
2640 		return 1;
2641 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2642 		verbose("back-edge from insn %d to %d\n", t, w);
2643 		return -EINVAL;
2644 	} else if (insn_state[w] == EXPLORED) {
2645 		/* forward- or cross-edge */
2646 		insn_state[t] = DISCOVERED | e;
2647 	} else {
2648 		verbose("insn state internal bug\n");
2649 		return -EFAULT;
2650 	}
2651 	return 0;
2652 }
2653 
2654 /* non-recursive depth-first-search to detect loops in BPF program
2655  * loop == back-edge in directed graph
2656  */
2657 static int check_cfg(struct bpf_verifier_env *env)
2658 {
2659 	struct bpf_insn *insns = env->prog->insnsi;
2660 	int insn_cnt = env->prog->len;
2661 	int ret = 0;
2662 	int i, t;
2663 
2664 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
2665 	if (!insn_state)
2666 		return -ENOMEM;
2667 
2668 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
2669 	if (!insn_stack) {
2670 		kfree(insn_state);
2671 		return -ENOMEM;
2672 	}
2673 
2674 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
2675 	insn_stack[0] = 0; /* 0 is the first instruction */
2676 	cur_stack = 1;
2677 
2678 peek_stack:
2679 	if (cur_stack == 0)
2680 		goto check_state;
2681 	t = insn_stack[cur_stack - 1];
2682 
2683 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
2684 		u8 opcode = BPF_OP(insns[t].code);
2685 
2686 		if (opcode == BPF_EXIT) {
2687 			goto mark_explored;
2688 		} else if (opcode == BPF_CALL) {
2689 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
2690 			if (ret == 1)
2691 				goto peek_stack;
2692 			else if (ret < 0)
2693 				goto err_free;
2694 			if (t + 1 < insn_cnt)
2695 				env->explored_states[t + 1] = STATE_LIST_MARK;
2696 		} else if (opcode == BPF_JA) {
2697 			if (BPF_SRC(insns[t].code) != BPF_K) {
2698 				ret = -EINVAL;
2699 				goto err_free;
2700 			}
2701 			/* unconditional jump with single edge */
2702 			ret = push_insn(t, t + insns[t].off + 1,
2703 					FALLTHROUGH, env);
2704 			if (ret == 1)
2705 				goto peek_stack;
2706 			else if (ret < 0)
2707 				goto err_free;
2708 			/* tell verifier to check for equivalent states
2709 			 * after every call and jump
2710 			 */
2711 			if (t + 1 < insn_cnt)
2712 				env->explored_states[t + 1] = STATE_LIST_MARK;
2713 		} else {
2714 			/* conditional jump with two edges */
2715 			env->explored_states[t] = STATE_LIST_MARK;
2716 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
2717 			if (ret == 1)
2718 				goto peek_stack;
2719 			else if (ret < 0)
2720 				goto err_free;
2721 
2722 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
2723 			if (ret == 1)
2724 				goto peek_stack;
2725 			else if (ret < 0)
2726 				goto err_free;
2727 		}
2728 	} else {
2729 		/* all other non-branch instructions with single
2730 		 * fall-through edge
2731 		 */
2732 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
2733 		if (ret == 1)
2734 			goto peek_stack;
2735 		else if (ret < 0)
2736 			goto err_free;
2737 	}
2738 
2739 mark_explored:
2740 	insn_state[t] = EXPLORED;
2741 	if (cur_stack-- <= 0) {
2742 		verbose("pop stack internal bug\n");
2743 		ret = -EFAULT;
2744 		goto err_free;
2745 	}
2746 	goto peek_stack;
2747 
2748 check_state:
2749 	for (i = 0; i < insn_cnt; i++) {
2750 		if (insn_state[i] != EXPLORED) {
2751 			verbose("unreachable insn %d\n", i);
2752 			ret = -EINVAL;
2753 			goto err_free;
2754 		}
2755 	}
2756 	ret = 0; /* cfg looks good */
2757 
2758 err_free:
2759 	kfree(insn_state);
2760 	kfree(insn_stack);
2761 	return ret;
2762 }
2763 
2764 /* the following conditions reduce the number of explored insns
2765  * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
2766  */
2767 static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
2768 				   struct bpf_reg_state *old,
2769 				   struct bpf_reg_state *cur)
2770 {
2771 	if (old->id != cur->id)
2772 		return false;
2773 
2774 	/* old ptr_to_packet is more conservative, since it allows smaller
2775 	 * range. Ex:
2776 	 * old(off=0,r=10) is equal to cur(off=0,r=20), because
2777 	 * old(off=0,r=10) means that with range=10 the verifier proceeded
2778 	 * further and found no issues with the program. Now we're in the same
2779 	 * spot with cur(off=0,r=20), so we're safe too, since anything further
2780 	 * will only be looking at most 10 bytes after this pointer.
2781 	 */
2782 	if (old->off == cur->off && old->range < cur->range)
2783 		return true;
2784 
2785 	/* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0)
2786 	 * since both cannot be used for packet access and safe(old)
2787 	 * pointer has smaller off that could be used for further
2788 	 * 'if (ptr > data_end)' check
2789 	 * Ex:
2790 	 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean
2791 	 * that we cannot access the packet.
2792 	 * The safe range is:
2793 	 * [ptr, ptr + range - off)
2794 	 * so whenever off >=range, it means no safe bytes from this pointer.
2795 	 * When comparing old->off <= cur->off, it means that older code
2796 	 * went with smaller offset and that offset was later
2797 	 * used to figure out the safe range after 'if (ptr > data_end)' check
2798 	 * Say, 'old' state was explored like:
2799 	 * ... R3(off=0, r=0)
2800 	 * R4 = R3 + 20
2801 	 * ... now R4(off=20,r=0)  <-- here
2802 	 * if (R4 > data_end)
2803 	 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access.
2804 	 * ... the code further went all the way to bpf_exit.
2805 	 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0).
2806 	 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier
2807 	 * goes further, such cur_R4 will give larger safe packet range after
2808 	 * 'if (R4 > data_end)' and all further insn were already good with r=20,
2809 	 * so they will be good with r=30 and we can prune the search.
2810 	 */
2811 	if (!env->strict_alignment && old->off <= cur->off &&
2812 	    old->off >= old->range && cur->off >= cur->range)
2813 		return true;
2814 
2815 	return false;
2816 }
2817 
2818 /* compare two verifier states
2819  *
2820  * all states stored in state_list are known to be valid, since
2821  * verifier reached 'bpf_exit' instruction through them
2822  *
2823  * this function is called when verifier exploring different branches of
2824  * execution popped from the state stack. If it sees an old state that has
2825  * more strict register state and more strict stack state then this execution
2826  * branch doesn't need to be explored further, since verifier already
2827  * concluded that more strict state leads to valid finish.
2828  *
2829  * Therefore two states are equivalent if register state is more conservative
2830  * and explored stack state is more conservative than the current one.
2831  * Example:
2832  *       explored                   current
2833  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
2834  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
2835  *
2836  * In other words if current stack state (one being explored) has more
2837  * valid slots than old one that already passed validation, it means
2838  * the verifier can stop exploring and conclude that current state is valid too
2839  *
2840  * Similarly with registers. If explored state has register type as invalid
2841  * whereas register type in current state is meaningful, it means that
2842  * the current state will reach 'bpf_exit' instruction safely
2843  */
2844 static bool states_equal(struct bpf_verifier_env *env,
2845 			 struct bpf_verifier_state *old,
2846 			 struct bpf_verifier_state *cur)
2847 {
2848 	bool varlen_map_access = env->varlen_map_value_access;
2849 	struct bpf_reg_state *rold, *rcur;
2850 	int i;
2851 
2852 	for (i = 0; i < MAX_BPF_REG; i++) {
2853 		rold = &old->regs[i];
2854 		rcur = &cur->regs[i];
2855 
2856 		if (memcmp(rold, rcur, sizeof(*rold)) == 0)
2857 			continue;
2858 
2859 		/* If the ranges were not the same, but everything else was and
2860 		 * we didn't do a variable access into a map then we are a-ok.
2861 		 */
2862 		if (!varlen_map_access &&
2863 		    memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0)
2864 			continue;
2865 
2866 		/* If we didn't map access then again we don't care about the
2867 		 * mismatched range values and it's ok if our old type was
2868 		 * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
2869 		 */
2870 		if (rold->type == NOT_INIT ||
2871 		    (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
2872 		     rcur->type != NOT_INIT))
2873 			continue;
2874 
2875 		/* Don't care about the reg->id in this case. */
2876 		if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
2877 		    rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
2878 		    rold->map_ptr == rcur->map_ptr)
2879 			continue;
2880 
2881 		if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
2882 		    compare_ptrs_to_packet(env, rold, rcur))
2883 			continue;
2884 
2885 		return false;
2886 	}
2887 
2888 	for (i = 0; i < MAX_BPF_STACK; i++) {
2889 		if (old->stack_slot_type[i] == STACK_INVALID)
2890 			continue;
2891 		if (old->stack_slot_type[i] != cur->stack_slot_type[i])
2892 			/* Ex: old explored (safe) state has STACK_SPILL in
2893 			 * this stack slot, but current has has STACK_MISC ->
2894 			 * this verifier states are not equivalent,
2895 			 * return false to continue verification of this path
2896 			 */
2897 			return false;
2898 		if (i % BPF_REG_SIZE)
2899 			continue;
2900 		if (old->stack_slot_type[i] != STACK_SPILL)
2901 			continue;
2902 		if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
2903 			   &cur->spilled_regs[i / BPF_REG_SIZE],
2904 			   sizeof(old->spilled_regs[0])))
2905 			/* when explored and current stack slot types are
2906 			 * the same, check that stored pointers types
2907 			 * are the same as well.
2908 			 * Ex: explored safe path could have stored
2909 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8}
2910 			 * but current path has stored:
2911 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16}
2912 			 * such verifier states are not equivalent.
2913 			 * return false to continue verification of this path
2914 			 */
2915 			return false;
2916 		else
2917 			continue;
2918 	}
2919 	return true;
2920 }
2921 
2922 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
2923 {
2924 	struct bpf_verifier_state_list *new_sl;
2925 	struct bpf_verifier_state_list *sl;
2926 
2927 	sl = env->explored_states[insn_idx];
2928 	if (!sl)
2929 		/* this 'insn_idx' instruction wasn't marked, so we will not
2930 		 * be doing state search here
2931 		 */
2932 		return 0;
2933 
2934 	while (sl != STATE_LIST_MARK) {
2935 		if (states_equal(env, &sl->state, &env->cur_state))
2936 			/* reached equivalent register/stack state,
2937 			 * prune the search
2938 			 */
2939 			return 1;
2940 		sl = sl->next;
2941 	}
2942 
2943 	/* there were no equivalent states, remember current one.
2944 	 * technically the current state is not proven to be safe yet,
2945 	 * but it will either reach bpf_exit (which means it's safe) or
2946 	 * it will be rejected. Since there are no loops, we won't be
2947 	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
2948 	 */
2949 	new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
2950 	if (!new_sl)
2951 		return -ENOMEM;
2952 
2953 	/* add new state to the head of linked list */
2954 	memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
2955 	new_sl->next = env->explored_states[insn_idx];
2956 	env->explored_states[insn_idx] = new_sl;
2957 	return 0;
2958 }
2959 
2960 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
2961 				  int insn_idx, int prev_insn_idx)
2962 {
2963 	if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
2964 		return 0;
2965 
2966 	return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
2967 }
2968 
2969 static int do_check(struct bpf_verifier_env *env)
2970 {
2971 	struct bpf_verifier_state *state = &env->cur_state;
2972 	struct bpf_insn *insns = env->prog->insnsi;
2973 	struct bpf_reg_state *regs = state->regs;
2974 	int insn_cnt = env->prog->len;
2975 	int insn_idx, prev_insn_idx = 0;
2976 	int insn_processed = 0;
2977 	bool do_print_state = false;
2978 
2979 	init_reg_state(regs);
2980 	insn_idx = 0;
2981 	env->varlen_map_value_access = false;
2982 	for (;;) {
2983 		struct bpf_insn *insn;
2984 		u8 class;
2985 		int err;
2986 
2987 		if (insn_idx >= insn_cnt) {
2988 			verbose("invalid insn idx %d insn_cnt %d\n",
2989 				insn_idx, insn_cnt);
2990 			return -EFAULT;
2991 		}
2992 
2993 		insn = &insns[insn_idx];
2994 		class = BPF_CLASS(insn->code);
2995 
2996 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
2997 			verbose("BPF program is too large. Processed %d insn\n",
2998 				insn_processed);
2999 			return -E2BIG;
3000 		}
3001 
3002 		err = is_state_visited(env, insn_idx);
3003 		if (err < 0)
3004 			return err;
3005 		if (err == 1) {
3006 			/* found equivalent state, can prune the search */
3007 			if (log_level) {
3008 				if (do_print_state)
3009 					verbose("\nfrom %d to %d: safe\n",
3010 						prev_insn_idx, insn_idx);
3011 				else
3012 					verbose("%d: safe\n", insn_idx);
3013 			}
3014 			goto process_bpf_exit;
3015 		}
3016 
3017 		if (need_resched())
3018 			cond_resched();
3019 
3020 		if (log_level > 1 || (log_level && do_print_state)) {
3021 			if (log_level > 1)
3022 				verbose("%d:", insn_idx);
3023 			else
3024 				verbose("\nfrom %d to %d:",
3025 					prev_insn_idx, insn_idx);
3026 			print_verifier_state(&env->cur_state);
3027 			do_print_state = false;
3028 		}
3029 
3030 		if (log_level) {
3031 			verbose("%d: ", insn_idx);
3032 			print_bpf_insn(env, insn);
3033 		}
3034 
3035 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3036 		if (err)
3037 			return err;
3038 
3039 		if (class == BPF_ALU || class == BPF_ALU64) {
3040 			err = check_alu_op(env, insn);
3041 			if (err)
3042 				return err;
3043 
3044 		} else if (class == BPF_LDX) {
3045 			enum bpf_reg_type *prev_src_type, src_reg_type;
3046 
3047 			/* check for reserved fields is already done */
3048 
3049 			/* check src operand */
3050 			err = check_reg_arg(regs, insn->src_reg, SRC_OP);
3051 			if (err)
3052 				return err;
3053 
3054 			err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
3055 			if (err)
3056 				return err;
3057 
3058 			src_reg_type = regs[insn->src_reg].type;
3059 
3060 			/* check that memory (src_reg + off) is readable,
3061 			 * the state of dst_reg will be updated by this func
3062 			 */
3063 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3064 					       BPF_SIZE(insn->code), BPF_READ,
3065 					       insn->dst_reg);
3066 			if (err)
3067 				return err;
3068 
3069 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3070 
3071 			if (*prev_src_type == NOT_INIT) {
3072 				/* saw a valid insn
3073 				 * dst_reg = *(u32 *)(src_reg + off)
3074 				 * save type to validate intersecting paths
3075 				 */
3076 				*prev_src_type = src_reg_type;
3077 
3078 			} else if (src_reg_type != *prev_src_type &&
3079 				   (src_reg_type == PTR_TO_CTX ||
3080 				    *prev_src_type == PTR_TO_CTX)) {
3081 				/* ABuser program is trying to use the same insn
3082 				 * dst_reg = *(u32*) (src_reg + off)
3083 				 * with different pointer types:
3084 				 * src_reg == ctx in one branch and
3085 				 * src_reg == stack|map in some other branch.
3086 				 * Reject it.
3087 				 */
3088 				verbose("same insn cannot be used with different pointers\n");
3089 				return -EINVAL;
3090 			}
3091 
3092 		} else if (class == BPF_STX) {
3093 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
3094 
3095 			if (BPF_MODE(insn->code) == BPF_XADD) {
3096 				err = check_xadd(env, insn_idx, insn);
3097 				if (err)
3098 					return err;
3099 				insn_idx++;
3100 				continue;
3101 			}
3102 
3103 			/* check src1 operand */
3104 			err = check_reg_arg(regs, insn->src_reg, SRC_OP);
3105 			if (err)
3106 				return err;
3107 			/* check src2 operand */
3108 			err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
3109 			if (err)
3110 				return err;
3111 
3112 			dst_reg_type = regs[insn->dst_reg].type;
3113 
3114 			/* check that memory (dst_reg + off) is writeable */
3115 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3116 					       BPF_SIZE(insn->code), BPF_WRITE,
3117 					       insn->src_reg);
3118 			if (err)
3119 				return err;
3120 
3121 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3122 
3123 			if (*prev_dst_type == NOT_INIT) {
3124 				*prev_dst_type = dst_reg_type;
3125 			} else if (dst_reg_type != *prev_dst_type &&
3126 				   (dst_reg_type == PTR_TO_CTX ||
3127 				    *prev_dst_type == PTR_TO_CTX)) {
3128 				verbose("same insn cannot be used with different pointers\n");
3129 				return -EINVAL;
3130 			}
3131 
3132 		} else if (class == BPF_ST) {
3133 			if (BPF_MODE(insn->code) != BPF_MEM ||
3134 			    insn->src_reg != BPF_REG_0) {
3135 				verbose("BPF_ST uses reserved fields\n");
3136 				return -EINVAL;
3137 			}
3138 			/* check src operand */
3139 			err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
3140 			if (err)
3141 				return err;
3142 
3143 			/* check that memory (dst_reg + off) is writeable */
3144 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3145 					       BPF_SIZE(insn->code), BPF_WRITE,
3146 					       -1);
3147 			if (err)
3148 				return err;
3149 
3150 		} else if (class == BPF_JMP) {
3151 			u8 opcode = BPF_OP(insn->code);
3152 
3153 			if (opcode == BPF_CALL) {
3154 				if (BPF_SRC(insn->code) != BPF_K ||
3155 				    insn->off != 0 ||
3156 				    insn->src_reg != BPF_REG_0 ||
3157 				    insn->dst_reg != BPF_REG_0) {
3158 					verbose("BPF_CALL uses reserved fields\n");
3159 					return -EINVAL;
3160 				}
3161 
3162 				err = check_call(env, insn->imm, insn_idx);
3163 				if (err)
3164 					return err;
3165 
3166 			} else if (opcode == BPF_JA) {
3167 				if (BPF_SRC(insn->code) != BPF_K ||
3168 				    insn->imm != 0 ||
3169 				    insn->src_reg != BPF_REG_0 ||
3170 				    insn->dst_reg != BPF_REG_0) {
3171 					verbose("BPF_JA uses reserved fields\n");
3172 					return -EINVAL;
3173 				}
3174 
3175 				insn_idx += insn->off + 1;
3176 				continue;
3177 
3178 			} else if (opcode == BPF_EXIT) {
3179 				if (BPF_SRC(insn->code) != BPF_K ||
3180 				    insn->imm != 0 ||
3181 				    insn->src_reg != BPF_REG_0 ||
3182 				    insn->dst_reg != BPF_REG_0) {
3183 					verbose("BPF_EXIT uses reserved fields\n");
3184 					return -EINVAL;
3185 				}
3186 
3187 				/* eBPF calling convetion is such that R0 is used
3188 				 * to return the value from eBPF program.
3189 				 * Make sure that it's readable at this time
3190 				 * of bpf_exit, which means that program wrote
3191 				 * something into it earlier
3192 				 */
3193 				err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
3194 				if (err)
3195 					return err;
3196 
3197 				if (is_pointer_value(env, BPF_REG_0)) {
3198 					verbose("R0 leaks addr as return value\n");
3199 					return -EACCES;
3200 				}
3201 
3202 process_bpf_exit:
3203 				insn_idx = pop_stack(env, &prev_insn_idx);
3204 				if (insn_idx < 0) {
3205 					break;
3206 				} else {
3207 					do_print_state = true;
3208 					continue;
3209 				}
3210 			} else {
3211 				err = check_cond_jmp_op(env, insn, &insn_idx);
3212 				if (err)
3213 					return err;
3214 			}
3215 		} else if (class == BPF_LD) {
3216 			u8 mode = BPF_MODE(insn->code);
3217 
3218 			if (mode == BPF_ABS || mode == BPF_IND) {
3219 				err = check_ld_abs(env, insn);
3220 				if (err)
3221 					return err;
3222 
3223 			} else if (mode == BPF_IMM) {
3224 				err = check_ld_imm(env, insn);
3225 				if (err)
3226 					return err;
3227 
3228 				insn_idx++;
3229 			} else {
3230 				verbose("invalid BPF_LD mode\n");
3231 				return -EINVAL;
3232 			}
3233 			reset_reg_range_values(regs, insn->dst_reg);
3234 		} else {
3235 			verbose("unknown insn class %d\n", class);
3236 			return -EINVAL;
3237 		}
3238 
3239 		insn_idx++;
3240 	}
3241 
3242 	verbose("processed %d insns, stack depth %d\n",
3243 		insn_processed, env->prog->aux->stack_depth);
3244 	return 0;
3245 }
3246 
3247 static int check_map_prealloc(struct bpf_map *map)
3248 {
3249 	return (map->map_type != BPF_MAP_TYPE_HASH &&
3250 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
3251 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
3252 		!(map->map_flags & BPF_F_NO_PREALLOC);
3253 }
3254 
3255 static int check_map_prog_compatibility(struct bpf_map *map,
3256 					struct bpf_prog *prog)
3257 
3258 {
3259 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
3260 	 * preallocated hash maps, since doing memory allocation
3261 	 * in overflow_handler can crash depending on where nmi got
3262 	 * triggered.
3263 	 */
3264 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
3265 		if (!check_map_prealloc(map)) {
3266 			verbose("perf_event programs can only use preallocated hash map\n");
3267 			return -EINVAL;
3268 		}
3269 		if (map->inner_map_meta &&
3270 		    !check_map_prealloc(map->inner_map_meta)) {
3271 			verbose("perf_event programs can only use preallocated inner hash map\n");
3272 			return -EINVAL;
3273 		}
3274 	}
3275 	return 0;
3276 }
3277 
3278 /* look for pseudo eBPF instructions that access map FDs and
3279  * replace them with actual map pointers
3280  */
3281 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
3282 {
3283 	struct bpf_insn *insn = env->prog->insnsi;
3284 	int insn_cnt = env->prog->len;
3285 	int i, j, err;
3286 
3287 	err = bpf_prog_calc_tag(env->prog);
3288 	if (err)
3289 		return err;
3290 
3291 	for (i = 0; i < insn_cnt; i++, insn++) {
3292 		if (BPF_CLASS(insn->code) == BPF_LDX &&
3293 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
3294 			verbose("BPF_LDX uses reserved fields\n");
3295 			return -EINVAL;
3296 		}
3297 
3298 		if (BPF_CLASS(insn->code) == BPF_STX &&
3299 		    ((BPF_MODE(insn->code) != BPF_MEM &&
3300 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
3301 			verbose("BPF_STX uses reserved fields\n");
3302 			return -EINVAL;
3303 		}
3304 
3305 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
3306 			struct bpf_map *map;
3307 			struct fd f;
3308 
3309 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
3310 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
3311 			    insn[1].off != 0) {
3312 				verbose("invalid bpf_ld_imm64 insn\n");
3313 				return -EINVAL;
3314 			}
3315 
3316 			if (insn->src_reg == 0)
3317 				/* valid generic load 64-bit imm */
3318 				goto next_insn;
3319 
3320 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
3321 				verbose("unrecognized bpf_ld_imm64 insn\n");
3322 				return -EINVAL;
3323 			}
3324 
3325 			f = fdget(insn->imm);
3326 			map = __bpf_map_get(f);
3327 			if (IS_ERR(map)) {
3328 				verbose("fd %d is not pointing to valid bpf_map\n",
3329 					insn->imm);
3330 				return PTR_ERR(map);
3331 			}
3332 
3333 			err = check_map_prog_compatibility(map, env->prog);
3334 			if (err) {
3335 				fdput(f);
3336 				return err;
3337 			}
3338 
3339 			/* store map pointer inside BPF_LD_IMM64 instruction */
3340 			insn[0].imm = (u32) (unsigned long) map;
3341 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
3342 
3343 			/* check whether we recorded this map already */
3344 			for (j = 0; j < env->used_map_cnt; j++)
3345 				if (env->used_maps[j] == map) {
3346 					fdput(f);
3347 					goto next_insn;
3348 				}
3349 
3350 			if (env->used_map_cnt >= MAX_USED_MAPS) {
3351 				fdput(f);
3352 				return -E2BIG;
3353 			}
3354 
3355 			/* hold the map. If the program is rejected by verifier,
3356 			 * the map will be released by release_maps() or it
3357 			 * will be used by the valid program until it's unloaded
3358 			 * and all maps are released in free_bpf_prog_info()
3359 			 */
3360 			map = bpf_map_inc(map, false);
3361 			if (IS_ERR(map)) {
3362 				fdput(f);
3363 				return PTR_ERR(map);
3364 			}
3365 			env->used_maps[env->used_map_cnt++] = map;
3366 
3367 			fdput(f);
3368 next_insn:
3369 			insn++;
3370 			i++;
3371 		}
3372 	}
3373 
3374 	/* now all pseudo BPF_LD_IMM64 instructions load valid
3375 	 * 'struct bpf_map *' into a register instead of user map_fd.
3376 	 * These pointers will be used later by verifier to validate map access.
3377 	 */
3378 	return 0;
3379 }
3380 
3381 /* drop refcnt of maps used by the rejected program */
3382 static void release_maps(struct bpf_verifier_env *env)
3383 {
3384 	int i;
3385 
3386 	for (i = 0; i < env->used_map_cnt; i++)
3387 		bpf_map_put(env->used_maps[i]);
3388 }
3389 
3390 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
3391 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
3392 {
3393 	struct bpf_insn *insn = env->prog->insnsi;
3394 	int insn_cnt = env->prog->len;
3395 	int i;
3396 
3397 	for (i = 0; i < insn_cnt; i++, insn++)
3398 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
3399 			insn->src_reg = 0;
3400 }
3401 
3402 /* single env->prog->insni[off] instruction was replaced with the range
3403  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
3404  * [0, off) and [off, end) to new locations, so the patched range stays zero
3405  */
3406 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
3407 				u32 off, u32 cnt)
3408 {
3409 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3410 
3411 	if (cnt == 1)
3412 		return 0;
3413 	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
3414 	if (!new_data)
3415 		return -ENOMEM;
3416 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
3417 	memcpy(new_data + off + cnt - 1, old_data + off,
3418 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
3419 	env->insn_aux_data = new_data;
3420 	vfree(old_data);
3421 	return 0;
3422 }
3423 
3424 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
3425 					    const struct bpf_insn *patch, u32 len)
3426 {
3427 	struct bpf_prog *new_prog;
3428 
3429 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
3430 	if (!new_prog)
3431 		return NULL;
3432 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
3433 		return NULL;
3434 	return new_prog;
3435 }
3436 
3437 /* convert load instructions that access fields of 'struct __sk_buff'
3438  * into sequence of instructions that access fields of 'struct sk_buff'
3439  */
3440 static int convert_ctx_accesses(struct bpf_verifier_env *env)
3441 {
3442 	const struct bpf_verifier_ops *ops = env->prog->aux->ops;
3443 	int i, cnt, size, ctx_field_size, delta = 0;
3444 	const int insn_cnt = env->prog->len;
3445 	struct bpf_insn insn_buf[16], *insn;
3446 	struct bpf_prog *new_prog;
3447 	enum bpf_access_type type;
3448 	bool is_narrower_load;
3449 	u32 target_size;
3450 
3451 	if (ops->gen_prologue) {
3452 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
3453 					env->prog);
3454 		if (cnt >= ARRAY_SIZE(insn_buf)) {
3455 			verbose("bpf verifier is misconfigured\n");
3456 			return -EINVAL;
3457 		} else if (cnt) {
3458 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
3459 			if (!new_prog)
3460 				return -ENOMEM;
3461 
3462 			env->prog = new_prog;
3463 			delta += cnt - 1;
3464 		}
3465 	}
3466 
3467 	if (!ops->convert_ctx_access)
3468 		return 0;
3469 
3470 	insn = env->prog->insnsi + delta;
3471 
3472 	for (i = 0; i < insn_cnt; i++, insn++) {
3473 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
3474 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
3475 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
3476 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
3477 			type = BPF_READ;
3478 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
3479 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
3480 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
3481 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
3482 			type = BPF_WRITE;
3483 		else
3484 			continue;
3485 
3486 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
3487 			continue;
3488 
3489 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
3490 		size = BPF_LDST_BYTES(insn);
3491 
3492 		/* If the read access is a narrower load of the field,
3493 		 * convert to a 4/8-byte load, to minimum program type specific
3494 		 * convert_ctx_access changes. If conversion is successful,
3495 		 * we will apply proper mask to the result.
3496 		 */
3497 		is_narrower_load = size < ctx_field_size;
3498 		if (is_narrower_load) {
3499 			u32 off = insn->off;
3500 			u8 size_code;
3501 
3502 			if (type == BPF_WRITE) {
3503 				verbose("bpf verifier narrow ctx access misconfigured\n");
3504 				return -EINVAL;
3505 			}
3506 
3507 			size_code = BPF_H;
3508 			if (ctx_field_size == 4)
3509 				size_code = BPF_W;
3510 			else if (ctx_field_size == 8)
3511 				size_code = BPF_DW;
3512 
3513 			insn->off = off & ~(ctx_field_size - 1);
3514 			insn->code = BPF_LDX | BPF_MEM | size_code;
3515 		}
3516 
3517 		target_size = 0;
3518 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
3519 					      &target_size);
3520 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
3521 		    (ctx_field_size && !target_size)) {
3522 			verbose("bpf verifier is misconfigured\n");
3523 			return -EINVAL;
3524 		}
3525 
3526 		if (is_narrower_load && size < target_size) {
3527 			if (ctx_field_size <= 4)
3528 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
3529 								(1 << size * 8) - 1);
3530 			else
3531 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
3532 								(1 << size * 8) - 1);
3533 		}
3534 
3535 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
3536 		if (!new_prog)
3537 			return -ENOMEM;
3538 
3539 		delta += cnt - 1;
3540 
3541 		/* keep walking new program and skip insns we just inserted */
3542 		env->prog = new_prog;
3543 		insn      = new_prog->insnsi + i + delta;
3544 	}
3545 
3546 	return 0;
3547 }
3548 
3549 /* fixup insn->imm field of bpf_call instructions
3550  * and inline eligible helpers as explicit sequence of BPF instructions
3551  *
3552  * this function is called after eBPF program passed verification
3553  */
3554 static int fixup_bpf_calls(struct bpf_verifier_env *env)
3555 {
3556 	struct bpf_prog *prog = env->prog;
3557 	struct bpf_insn *insn = prog->insnsi;
3558 	const struct bpf_func_proto *fn;
3559 	const int insn_cnt = prog->len;
3560 	struct bpf_insn insn_buf[16];
3561 	struct bpf_prog *new_prog;
3562 	struct bpf_map *map_ptr;
3563 	int i, cnt, delta = 0;
3564 
3565 	for (i = 0; i < insn_cnt; i++, insn++) {
3566 		if (insn->code != (BPF_JMP | BPF_CALL))
3567 			continue;
3568 
3569 		if (insn->imm == BPF_FUNC_get_route_realm)
3570 			prog->dst_needed = 1;
3571 		if (insn->imm == BPF_FUNC_get_prandom_u32)
3572 			bpf_user_rnd_init_once();
3573 		if (insn->imm == BPF_FUNC_tail_call) {
3574 			/* If we tail call into other programs, we
3575 			 * cannot make any assumptions since they can
3576 			 * be replaced dynamically during runtime in
3577 			 * the program array.
3578 			 */
3579 			prog->cb_access = 1;
3580 			env->prog->aux->stack_depth = MAX_BPF_STACK;
3581 
3582 			/* mark bpf_tail_call as different opcode to avoid
3583 			 * conditional branch in the interpeter for every normal
3584 			 * call and to prevent accidental JITing by JIT compiler
3585 			 * that doesn't support bpf_tail_call yet
3586 			 */
3587 			insn->imm = 0;
3588 			insn->code = BPF_JMP | BPF_TAIL_CALL;
3589 			continue;
3590 		}
3591 
3592 		if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
3593 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
3594 			if (map_ptr == BPF_MAP_PTR_POISON ||
3595 			    !map_ptr->ops->map_gen_lookup)
3596 				goto patch_call_imm;
3597 
3598 			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
3599 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
3600 				verbose("bpf verifier is misconfigured\n");
3601 				return -EINVAL;
3602 			}
3603 
3604 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
3605 						       cnt);
3606 			if (!new_prog)
3607 				return -ENOMEM;
3608 
3609 			delta += cnt - 1;
3610 
3611 			/* keep walking new program and skip insns we just inserted */
3612 			env->prog = prog = new_prog;
3613 			insn      = new_prog->insnsi + i + delta;
3614 			continue;
3615 		}
3616 
3617 patch_call_imm:
3618 		fn = prog->aux->ops->get_func_proto(insn->imm);
3619 		/* all functions that have prototype and verifier allowed
3620 		 * programs to call them, must be real in-kernel functions
3621 		 */
3622 		if (!fn->func) {
3623 			verbose("kernel subsystem misconfigured func %s#%d\n",
3624 				func_id_name(insn->imm), insn->imm);
3625 			return -EFAULT;
3626 		}
3627 		insn->imm = fn->func - __bpf_call_base;
3628 	}
3629 
3630 	return 0;
3631 }
3632 
3633 static void free_states(struct bpf_verifier_env *env)
3634 {
3635 	struct bpf_verifier_state_list *sl, *sln;
3636 	int i;
3637 
3638 	if (!env->explored_states)
3639 		return;
3640 
3641 	for (i = 0; i < env->prog->len; i++) {
3642 		sl = env->explored_states[i];
3643 
3644 		if (sl)
3645 			while (sl != STATE_LIST_MARK) {
3646 				sln = sl->next;
3647 				kfree(sl);
3648 				sl = sln;
3649 			}
3650 	}
3651 
3652 	kfree(env->explored_states);
3653 }
3654 
3655 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3656 {
3657 	char __user *log_ubuf = NULL;
3658 	struct bpf_verifier_env *env;
3659 	int ret = -EINVAL;
3660 
3661 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
3662 	 * allocate/free it every time bpf_check() is called
3663 	 */
3664 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
3665 	if (!env)
3666 		return -ENOMEM;
3667 
3668 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
3669 				     (*prog)->len);
3670 	ret = -ENOMEM;
3671 	if (!env->insn_aux_data)
3672 		goto err_free_env;
3673 	env->prog = *prog;
3674 
3675 	/* grab the mutex to protect few globals used by verifier */
3676 	mutex_lock(&bpf_verifier_lock);
3677 
3678 	if (attr->log_level || attr->log_buf || attr->log_size) {
3679 		/* user requested verbose verifier output
3680 		 * and supplied buffer to store the verification trace
3681 		 */
3682 		log_level = attr->log_level;
3683 		log_ubuf = (char __user *) (unsigned long) attr->log_buf;
3684 		log_size = attr->log_size;
3685 		log_len = 0;
3686 
3687 		ret = -EINVAL;
3688 		/* log_* values have to be sane */
3689 		if (log_size < 128 || log_size > UINT_MAX >> 8 ||
3690 		    log_level == 0 || log_ubuf == NULL)
3691 			goto err_unlock;
3692 
3693 		ret = -ENOMEM;
3694 		log_buf = vmalloc(log_size);
3695 		if (!log_buf)
3696 			goto err_unlock;
3697 	} else {
3698 		log_level = 0;
3699 	}
3700 
3701 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
3702 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3703 		env->strict_alignment = true;
3704 
3705 	ret = replace_map_fd_with_map_ptr(env);
3706 	if (ret < 0)
3707 		goto skip_full_check;
3708 
3709 	env->explored_states = kcalloc(env->prog->len,
3710 				       sizeof(struct bpf_verifier_state_list *),
3711 				       GFP_USER);
3712 	ret = -ENOMEM;
3713 	if (!env->explored_states)
3714 		goto skip_full_check;
3715 
3716 	ret = check_cfg(env);
3717 	if (ret < 0)
3718 		goto skip_full_check;
3719 
3720 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
3721 
3722 	ret = do_check(env);
3723 
3724 skip_full_check:
3725 	while (pop_stack(env, NULL) >= 0);
3726 	free_states(env);
3727 
3728 	if (ret == 0)
3729 		/* program is valid, convert *(u32*)(ctx + off) accesses */
3730 		ret = convert_ctx_accesses(env);
3731 
3732 	if (ret == 0)
3733 		ret = fixup_bpf_calls(env);
3734 
3735 	if (log_level && log_len >= log_size - 1) {
3736 		BUG_ON(log_len >= log_size);
3737 		/* verifier log exceeded user supplied buffer */
3738 		ret = -ENOSPC;
3739 		/* fall through to return what was recorded */
3740 	}
3741 
3742 	/* copy verifier log back to user space including trailing zero */
3743 	if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
3744 		ret = -EFAULT;
3745 		goto free_log_buf;
3746 	}
3747 
3748 	if (ret == 0 && env->used_map_cnt) {
3749 		/* if program passed verifier, update used_maps in bpf_prog_info */
3750 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
3751 							  sizeof(env->used_maps[0]),
3752 							  GFP_KERNEL);
3753 
3754 		if (!env->prog->aux->used_maps) {
3755 			ret = -ENOMEM;
3756 			goto free_log_buf;
3757 		}
3758 
3759 		memcpy(env->prog->aux->used_maps, env->used_maps,
3760 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
3761 		env->prog->aux->used_map_cnt = env->used_map_cnt;
3762 
3763 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
3764 		 * bpf_ld_imm64 instructions
3765 		 */
3766 		convert_pseudo_ld_imm64(env);
3767 	}
3768 
3769 free_log_buf:
3770 	if (log_level)
3771 		vfree(log_buf);
3772 	if (!env->prog->aux->used_maps)
3773 		/* if we didn't copy map pointers into bpf_prog_info, release
3774 		 * them now. Otherwise free_bpf_prog_info() will release them.
3775 		 */
3776 		release_maps(env);
3777 	*prog = env->prog;
3778 err_unlock:
3779 	mutex_unlock(&bpf_verifier_lock);
3780 	vfree(env->insn_aux_data);
3781 err_free_env:
3782 	kfree(env);
3783 	return ret;
3784 }
3785 
3786 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
3787 		 void *priv)
3788 {
3789 	struct bpf_verifier_env *env;
3790 	int ret;
3791 
3792 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
3793 	if (!env)
3794 		return -ENOMEM;
3795 
3796 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
3797 				     prog->len);
3798 	ret = -ENOMEM;
3799 	if (!env->insn_aux_data)
3800 		goto err_free_env;
3801 	env->prog = prog;
3802 	env->analyzer_ops = ops;
3803 	env->analyzer_priv = priv;
3804 
3805 	/* grab the mutex to protect few globals used by verifier */
3806 	mutex_lock(&bpf_verifier_lock);
3807 
3808 	log_level = 0;
3809 
3810 	env->strict_alignment = false;
3811 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3812 		env->strict_alignment = true;
3813 
3814 	env->explored_states = kcalloc(env->prog->len,
3815 				       sizeof(struct bpf_verifier_state_list *),
3816 				       GFP_KERNEL);
3817 	ret = -ENOMEM;
3818 	if (!env->explored_states)
3819 		goto skip_full_check;
3820 
3821 	ret = check_cfg(env);
3822 	if (ret < 0)
3823 		goto skip_full_check;
3824 
3825 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
3826 
3827 	ret = do_check(env);
3828 
3829 skip_full_check:
3830 	while (pop_stack(env, NULL) >= 0);
3831 	free_states(env);
3832 
3833 	mutex_unlock(&bpf_verifier_lock);
3834 	vfree(env->insn_aux_data);
3835 err_free_env:
3836 	kfree(env);
3837 	return ret;
3838 }
3839 EXPORT_SYMBOL_GPL(bpf_analyzer);
3840