xref: /openbmc/linux/kernel/bpf/core.c (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 #include <linux/perf_event.h>
35 
36 #include <asm/unaligned.h>
37 
38 /* Registers */
39 #define BPF_R0	regs[BPF_REG_0]
40 #define BPF_R1	regs[BPF_REG_1]
41 #define BPF_R2	regs[BPF_REG_2]
42 #define BPF_R3	regs[BPF_REG_3]
43 #define BPF_R4	regs[BPF_REG_4]
44 #define BPF_R5	regs[BPF_REG_5]
45 #define BPF_R6	regs[BPF_REG_6]
46 #define BPF_R7	regs[BPF_REG_7]
47 #define BPF_R8	regs[BPF_REG_8]
48 #define BPF_R9	regs[BPF_REG_9]
49 #define BPF_R10	regs[BPF_REG_10]
50 
51 /* Named registers */
52 #define DST	regs[insn->dst_reg]
53 #define SRC	regs[insn->src_reg]
54 #define FP	regs[BPF_REG_FP]
55 #define ARG1	regs[BPF_REG_ARG1]
56 #define CTX	regs[BPF_REG_CTX]
57 #define IMM	insn->imm
58 
59 /* No hurry in this branch
60  *
61  * Exported for the bpf jit load helper.
62  */
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64 {
65 	u8 *ptr = NULL;
66 
67 	if (k >= SKF_NET_OFF)
68 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 	else if (k >= SKF_LL_OFF)
70 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
71 
72 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 		return ptr;
74 
75 	return NULL;
76 }
77 
78 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
79 {
80 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
81 	struct bpf_prog_aux *aux;
82 	struct bpf_prog *fp;
83 
84 	size = round_up(size, PAGE_SIZE);
85 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 	if (fp == NULL)
87 		return NULL;
88 
89 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 	if (aux == NULL) {
91 		vfree(fp);
92 		return NULL;
93 	}
94 
95 	fp->pages = size / PAGE_SIZE;
96 	fp->aux = aux;
97 	fp->aux->prog = fp;
98 	fp->jit_requested = ebpf_jit_enabled();
99 
100 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101 
102 	return fp;
103 }
104 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
105 
106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
107 				  gfp_t gfp_extra_flags)
108 {
109 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
110 	struct bpf_prog *fp;
111 	u32 pages, delta;
112 	int ret;
113 
114 	BUG_ON(fp_old == NULL);
115 
116 	size = round_up(size, PAGE_SIZE);
117 	pages = size / PAGE_SIZE;
118 	if (pages <= fp_old->pages)
119 		return fp_old;
120 
121 	delta = pages - fp_old->pages;
122 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
123 	if (ret)
124 		return NULL;
125 
126 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
127 	if (fp == NULL) {
128 		__bpf_prog_uncharge(fp_old->aux->user, delta);
129 	} else {
130 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
131 		fp->pages = pages;
132 		fp->aux->prog = fp;
133 
134 		/* We keep fp->aux from fp_old around in the new
135 		 * reallocated structure.
136 		 */
137 		fp_old->aux = NULL;
138 		__bpf_prog_free(fp_old);
139 	}
140 
141 	return fp;
142 }
143 
144 void __bpf_prog_free(struct bpf_prog *fp)
145 {
146 	kfree(fp->aux);
147 	vfree(fp);
148 }
149 
150 int bpf_prog_calc_tag(struct bpf_prog *fp)
151 {
152 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
153 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
154 	u32 digest[SHA_DIGEST_WORDS];
155 	u32 ws[SHA_WORKSPACE_WORDS];
156 	u32 i, bsize, psize, blocks;
157 	struct bpf_insn *dst;
158 	bool was_ld_map;
159 	u8 *raw, *todo;
160 	__be32 *result;
161 	__be64 *bits;
162 
163 	raw = vmalloc(raw_size);
164 	if (!raw)
165 		return -ENOMEM;
166 
167 	sha_init(digest);
168 	memset(ws, 0, sizeof(ws));
169 
170 	/* We need to take out the map fd for the digest calculation
171 	 * since they are unstable from user space side.
172 	 */
173 	dst = (void *)raw;
174 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
175 		dst[i] = fp->insnsi[i];
176 		if (!was_ld_map &&
177 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
178 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
179 			was_ld_map = true;
180 			dst[i].imm = 0;
181 		} else if (was_ld_map &&
182 			   dst[i].code == 0 &&
183 			   dst[i].dst_reg == 0 &&
184 			   dst[i].src_reg == 0 &&
185 			   dst[i].off == 0) {
186 			was_ld_map = false;
187 			dst[i].imm = 0;
188 		} else {
189 			was_ld_map = false;
190 		}
191 	}
192 
193 	psize = bpf_prog_insn_size(fp);
194 	memset(&raw[psize], 0, raw_size - psize);
195 	raw[psize++] = 0x80;
196 
197 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
198 	blocks = bsize / SHA_MESSAGE_BYTES;
199 	todo   = raw;
200 	if (bsize - psize >= sizeof(__be64)) {
201 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
202 	} else {
203 		bits = (__be64 *)(todo + bsize + bits_offset);
204 		blocks++;
205 	}
206 	*bits = cpu_to_be64((psize - 1) << 3);
207 
208 	while (blocks--) {
209 		sha_transform(digest, todo, ws);
210 		todo += SHA_MESSAGE_BYTES;
211 	}
212 
213 	result = (__force __be32 *)digest;
214 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
215 		result[i] = cpu_to_be32(digest[i]);
216 	memcpy(fp->tag, result, sizeof(fp->tag));
217 
218 	vfree(raw);
219 	return 0;
220 }
221 
222 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
223 				u32 curr, const bool probe_pass)
224 {
225 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
226 	s64 imm = insn->imm;
227 
228 	if (curr < pos && curr + imm + 1 > pos)
229 		imm += delta;
230 	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
231 		imm -= delta;
232 	if (imm < imm_min || imm > imm_max)
233 		return -ERANGE;
234 	if (!probe_pass)
235 		insn->imm = imm;
236 	return 0;
237 }
238 
239 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
240 				u32 curr, const bool probe_pass)
241 {
242 	const s32 off_min = S16_MIN, off_max = S16_MAX;
243 	s32 off = insn->off;
244 
245 	if (curr < pos && curr + off + 1 > pos)
246 		off += delta;
247 	else if (curr > pos + delta && curr + off + 1 <= pos + delta)
248 		off -= delta;
249 	if (off < off_min || off > off_max)
250 		return -ERANGE;
251 	if (!probe_pass)
252 		insn->off = off;
253 	return 0;
254 }
255 
256 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
257 			    const bool probe_pass)
258 {
259 	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
260 	struct bpf_insn *insn = prog->insnsi;
261 	int ret = 0;
262 
263 	for (i = 0; i < insn_cnt; i++, insn++) {
264 		u8 code;
265 
266 		/* In the probing pass we still operate on the original,
267 		 * unpatched image in order to check overflows before we
268 		 * do any other adjustments. Therefore skip the patchlet.
269 		 */
270 		if (probe_pass && i == pos) {
271 			i += delta + 1;
272 			insn++;
273 		}
274 		code = insn->code;
275 		if (BPF_CLASS(code) != BPF_JMP ||
276 		    BPF_OP(code) == BPF_EXIT)
277 			continue;
278 		/* Adjust offset of jmps if we cross patch boundaries. */
279 		if (BPF_OP(code) == BPF_CALL) {
280 			if (insn->src_reg != BPF_PSEUDO_CALL)
281 				continue;
282 			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
283 						   probe_pass);
284 		} else {
285 			ret = bpf_adj_delta_to_off(insn, pos, delta, i,
286 						   probe_pass);
287 		}
288 		if (ret)
289 			break;
290 	}
291 
292 	return ret;
293 }
294 
295 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
296 				       const struct bpf_insn *patch, u32 len)
297 {
298 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
299 	const u32 cnt_max = S16_MAX;
300 	struct bpf_prog *prog_adj;
301 
302 	/* Since our patchlet doesn't expand the image, we're done. */
303 	if (insn_delta == 0) {
304 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
305 		return prog;
306 	}
307 
308 	insn_adj_cnt = prog->len + insn_delta;
309 
310 	/* Reject anything that would potentially let the insn->off
311 	 * target overflow when we have excessive program expansions.
312 	 * We need to probe here before we do any reallocation where
313 	 * we afterwards may not fail anymore.
314 	 */
315 	if (insn_adj_cnt > cnt_max &&
316 	    bpf_adj_branches(prog, off, insn_delta, true))
317 		return NULL;
318 
319 	/* Several new instructions need to be inserted. Make room
320 	 * for them. Likely, there's no need for a new allocation as
321 	 * last page could have large enough tailroom.
322 	 */
323 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
324 				    GFP_USER);
325 	if (!prog_adj)
326 		return NULL;
327 
328 	prog_adj->len = insn_adj_cnt;
329 
330 	/* Patching happens in 3 steps:
331 	 *
332 	 * 1) Move over tail of insnsi from next instruction onwards,
333 	 *    so we can patch the single target insn with one or more
334 	 *    new ones (patching is always from 1 to n insns, n > 0).
335 	 * 2) Inject new instructions at the target location.
336 	 * 3) Adjust branch offsets if necessary.
337 	 */
338 	insn_rest = insn_adj_cnt - off - len;
339 
340 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
341 		sizeof(*patch) * insn_rest);
342 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
343 
344 	/* We are guaranteed to not fail at this point, otherwise
345 	 * the ship has sailed to reverse to the original state. An
346 	 * overflow cannot happen at this point.
347 	 */
348 	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
349 
350 	return prog_adj;
351 }
352 
353 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
354 {
355 	int i;
356 
357 	for (i = 0; i < fp->aux->func_cnt; i++)
358 		bpf_prog_kallsyms_del(fp->aux->func[i]);
359 }
360 
361 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
362 {
363 	bpf_prog_kallsyms_del_subprogs(fp);
364 	bpf_prog_kallsyms_del(fp);
365 }
366 
367 #ifdef CONFIG_BPF_JIT
368 # define BPF_JIT_LIMIT_DEFAULT	(PAGE_SIZE * 40000)
369 
370 /* All BPF JIT sysctl knobs here. */
371 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
372 int bpf_jit_harden   __read_mostly;
373 int bpf_jit_kallsyms __read_mostly;
374 int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
375 
376 static __always_inline void
377 bpf_get_prog_addr_region(const struct bpf_prog *prog,
378 			 unsigned long *symbol_start,
379 			 unsigned long *symbol_end)
380 {
381 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
382 	unsigned long addr = (unsigned long)hdr;
383 
384 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
385 
386 	*symbol_start = addr;
387 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
388 }
389 
390 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
391 {
392 	const char *end = sym + KSYM_NAME_LEN;
393 
394 	BUILD_BUG_ON(sizeof("bpf_prog_") +
395 		     sizeof(prog->tag) * 2 +
396 		     /* name has been null terminated.
397 		      * We should need +1 for the '_' preceding
398 		      * the name.  However, the null character
399 		      * is double counted between the name and the
400 		      * sizeof("bpf_prog_") above, so we omit
401 		      * the +1 here.
402 		      */
403 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
404 
405 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
406 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
407 	if (prog->aux->name[0])
408 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
409 	else
410 		*sym = 0;
411 }
412 
413 static __always_inline unsigned long
414 bpf_get_prog_addr_start(struct latch_tree_node *n)
415 {
416 	unsigned long symbol_start, symbol_end;
417 	const struct bpf_prog_aux *aux;
418 
419 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
420 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
421 
422 	return symbol_start;
423 }
424 
425 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
426 					  struct latch_tree_node *b)
427 {
428 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
429 }
430 
431 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
432 {
433 	unsigned long val = (unsigned long)key;
434 	unsigned long symbol_start, symbol_end;
435 	const struct bpf_prog_aux *aux;
436 
437 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
438 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
439 
440 	if (val < symbol_start)
441 		return -1;
442 	if (val >= symbol_end)
443 		return  1;
444 
445 	return 0;
446 }
447 
448 static const struct latch_tree_ops bpf_tree_ops = {
449 	.less	= bpf_tree_less,
450 	.comp	= bpf_tree_comp,
451 };
452 
453 static DEFINE_SPINLOCK(bpf_lock);
454 static LIST_HEAD(bpf_kallsyms);
455 static struct latch_tree_root bpf_tree __cacheline_aligned;
456 
457 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
458 {
459 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
460 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
461 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
462 }
463 
464 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
465 {
466 	if (list_empty(&aux->ksym_lnode))
467 		return;
468 
469 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
470 	list_del_rcu(&aux->ksym_lnode);
471 }
472 
473 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
474 {
475 	return fp->jited && !bpf_prog_was_classic(fp);
476 }
477 
478 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
479 {
480 	return list_empty(&fp->aux->ksym_lnode) ||
481 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
482 }
483 
484 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
485 {
486 	if (!bpf_prog_kallsyms_candidate(fp) ||
487 	    !capable(CAP_SYS_ADMIN))
488 		return;
489 
490 	spin_lock_bh(&bpf_lock);
491 	bpf_prog_ksym_node_add(fp->aux);
492 	spin_unlock_bh(&bpf_lock);
493 }
494 
495 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
496 {
497 	if (!bpf_prog_kallsyms_candidate(fp))
498 		return;
499 
500 	spin_lock_bh(&bpf_lock);
501 	bpf_prog_ksym_node_del(fp->aux);
502 	spin_unlock_bh(&bpf_lock);
503 }
504 
505 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
506 {
507 	struct latch_tree_node *n;
508 
509 	if (!bpf_jit_kallsyms_enabled())
510 		return NULL;
511 
512 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
513 	return n ?
514 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
515 	       NULL;
516 }
517 
518 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
519 				 unsigned long *off, char *sym)
520 {
521 	unsigned long symbol_start, symbol_end;
522 	struct bpf_prog *prog;
523 	char *ret = NULL;
524 
525 	rcu_read_lock();
526 	prog = bpf_prog_kallsyms_find(addr);
527 	if (prog) {
528 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
529 		bpf_get_prog_name(prog, sym);
530 
531 		ret = sym;
532 		if (size)
533 			*size = symbol_end - symbol_start;
534 		if (off)
535 			*off  = addr - symbol_start;
536 	}
537 	rcu_read_unlock();
538 
539 	return ret;
540 }
541 
542 bool is_bpf_text_address(unsigned long addr)
543 {
544 	bool ret;
545 
546 	rcu_read_lock();
547 	ret = bpf_prog_kallsyms_find(addr) != NULL;
548 	rcu_read_unlock();
549 
550 	return ret;
551 }
552 
553 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
554 		    char *sym)
555 {
556 	struct bpf_prog_aux *aux;
557 	unsigned int it = 0;
558 	int ret = -ERANGE;
559 
560 	if (!bpf_jit_kallsyms_enabled())
561 		return ret;
562 
563 	rcu_read_lock();
564 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
565 		if (it++ != symnum)
566 			continue;
567 
568 		bpf_get_prog_name(aux->prog, sym);
569 
570 		*value = (unsigned long)aux->prog->bpf_func;
571 		*type  = BPF_SYM_ELF_TYPE;
572 
573 		ret = 0;
574 		break;
575 	}
576 	rcu_read_unlock();
577 
578 	return ret;
579 }
580 
581 static atomic_long_t bpf_jit_current;
582 
583 #if defined(MODULES_VADDR)
584 static int __init bpf_jit_charge_init(void)
585 {
586 	/* Only used as heuristic here to derive limit. */
587 	bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
588 					    PAGE_SIZE), INT_MAX);
589 	return 0;
590 }
591 pure_initcall(bpf_jit_charge_init);
592 #endif
593 
594 static int bpf_jit_charge_modmem(u32 pages)
595 {
596 	if (atomic_long_add_return(pages, &bpf_jit_current) >
597 	    (bpf_jit_limit >> PAGE_SHIFT)) {
598 		if (!capable(CAP_SYS_ADMIN)) {
599 			atomic_long_sub(pages, &bpf_jit_current);
600 			return -EPERM;
601 		}
602 	}
603 
604 	return 0;
605 }
606 
607 static void bpf_jit_uncharge_modmem(u32 pages)
608 {
609 	atomic_long_sub(pages, &bpf_jit_current);
610 }
611 
612 struct bpf_binary_header *
613 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
614 		     unsigned int alignment,
615 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
616 {
617 	struct bpf_binary_header *hdr;
618 	u32 size, hole, start, pages;
619 
620 	/* Most of BPF filters are really small, but if some of them
621 	 * fill a page, allow at least 128 extra bytes to insert a
622 	 * random section of illegal instructions.
623 	 */
624 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
625 	pages = size / PAGE_SIZE;
626 
627 	if (bpf_jit_charge_modmem(pages))
628 		return NULL;
629 	hdr = module_alloc(size);
630 	if (!hdr) {
631 		bpf_jit_uncharge_modmem(pages);
632 		return NULL;
633 	}
634 
635 	/* Fill space with illegal/arch-dep instructions. */
636 	bpf_fill_ill_insns(hdr, size);
637 
638 	hdr->pages = pages;
639 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
640 		     PAGE_SIZE - sizeof(*hdr));
641 	start = (get_random_int() % hole) & ~(alignment - 1);
642 
643 	/* Leave a random number of instructions before BPF code. */
644 	*image_ptr = &hdr->image[start];
645 
646 	return hdr;
647 }
648 
649 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
650 {
651 	u32 pages = hdr->pages;
652 
653 	module_memfree(hdr);
654 	bpf_jit_uncharge_modmem(pages);
655 }
656 
657 /* This symbol is only overridden by archs that have different
658  * requirements than the usual eBPF JITs, f.e. when they only
659  * implement cBPF JIT, do not set images read-only, etc.
660  */
661 void __weak bpf_jit_free(struct bpf_prog *fp)
662 {
663 	if (fp->jited) {
664 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
665 
666 		bpf_jit_binary_unlock_ro(hdr);
667 		bpf_jit_binary_free(hdr);
668 
669 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
670 	}
671 
672 	bpf_prog_unlock_free(fp);
673 }
674 
675 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
676 			  const struct bpf_insn *insn, bool extra_pass,
677 			  u64 *func_addr, bool *func_addr_fixed)
678 {
679 	s16 off = insn->off;
680 	s32 imm = insn->imm;
681 	u8 *addr;
682 
683 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
684 	if (!*func_addr_fixed) {
685 		/* Place-holder address till the last pass has collected
686 		 * all addresses for JITed subprograms in which case we
687 		 * can pick them up from prog->aux.
688 		 */
689 		if (!extra_pass)
690 			addr = NULL;
691 		else if (prog->aux->func &&
692 			 off >= 0 && off < prog->aux->func_cnt)
693 			addr = (u8 *)prog->aux->func[off]->bpf_func;
694 		else
695 			return -EINVAL;
696 	} else {
697 		/* Address of a BPF helper call. Since part of the core
698 		 * kernel, it's always at a fixed location. __bpf_call_base
699 		 * and the helper with imm relative to it are both in core
700 		 * kernel.
701 		 */
702 		addr = (u8 *)__bpf_call_base + imm;
703 	}
704 
705 	*func_addr = (unsigned long)addr;
706 	return 0;
707 }
708 
709 static int bpf_jit_blind_insn(const struct bpf_insn *from,
710 			      const struct bpf_insn *aux,
711 			      struct bpf_insn *to_buff)
712 {
713 	struct bpf_insn *to = to_buff;
714 	u32 imm_rnd = get_random_int();
715 	s16 off;
716 
717 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
718 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
719 
720 	if (from->imm == 0 &&
721 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
722 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
723 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
724 		goto out;
725 	}
726 
727 	switch (from->code) {
728 	case BPF_ALU | BPF_ADD | BPF_K:
729 	case BPF_ALU | BPF_SUB | BPF_K:
730 	case BPF_ALU | BPF_AND | BPF_K:
731 	case BPF_ALU | BPF_OR  | BPF_K:
732 	case BPF_ALU | BPF_XOR | BPF_K:
733 	case BPF_ALU | BPF_MUL | BPF_K:
734 	case BPF_ALU | BPF_MOV | BPF_K:
735 	case BPF_ALU | BPF_DIV | BPF_K:
736 	case BPF_ALU | BPF_MOD | BPF_K:
737 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
738 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
739 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
740 		break;
741 
742 	case BPF_ALU64 | BPF_ADD | BPF_K:
743 	case BPF_ALU64 | BPF_SUB | BPF_K:
744 	case BPF_ALU64 | BPF_AND | BPF_K:
745 	case BPF_ALU64 | BPF_OR  | BPF_K:
746 	case BPF_ALU64 | BPF_XOR | BPF_K:
747 	case BPF_ALU64 | BPF_MUL | BPF_K:
748 	case BPF_ALU64 | BPF_MOV | BPF_K:
749 	case BPF_ALU64 | BPF_DIV | BPF_K:
750 	case BPF_ALU64 | BPF_MOD | BPF_K:
751 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
752 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
753 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
754 		break;
755 
756 	case BPF_JMP | BPF_JEQ  | BPF_K:
757 	case BPF_JMP | BPF_JNE  | BPF_K:
758 	case BPF_JMP | BPF_JGT  | BPF_K:
759 	case BPF_JMP | BPF_JLT  | BPF_K:
760 	case BPF_JMP | BPF_JGE  | BPF_K:
761 	case BPF_JMP | BPF_JLE  | BPF_K:
762 	case BPF_JMP | BPF_JSGT | BPF_K:
763 	case BPF_JMP | BPF_JSLT | BPF_K:
764 	case BPF_JMP | BPF_JSGE | BPF_K:
765 	case BPF_JMP | BPF_JSLE | BPF_K:
766 	case BPF_JMP | BPF_JSET | BPF_K:
767 		/* Accommodate for extra offset in case of a backjump. */
768 		off = from->off;
769 		if (off < 0)
770 			off -= 2;
771 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
772 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
773 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
774 		break;
775 
776 	case BPF_LD | BPF_IMM | BPF_DW:
777 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
778 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
779 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
780 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
781 		break;
782 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
783 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
784 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
785 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
786 		break;
787 
788 	case BPF_ST | BPF_MEM | BPF_DW:
789 	case BPF_ST | BPF_MEM | BPF_W:
790 	case BPF_ST | BPF_MEM | BPF_H:
791 	case BPF_ST | BPF_MEM | BPF_B:
792 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
793 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
794 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
795 		break;
796 	}
797 out:
798 	return to - to_buff;
799 }
800 
801 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
802 					      gfp_t gfp_extra_flags)
803 {
804 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
805 	struct bpf_prog *fp;
806 
807 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
808 	if (fp != NULL) {
809 		/* aux->prog still points to the fp_other one, so
810 		 * when promoting the clone to the real program,
811 		 * this still needs to be adapted.
812 		 */
813 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
814 	}
815 
816 	return fp;
817 }
818 
819 static void bpf_prog_clone_free(struct bpf_prog *fp)
820 {
821 	/* aux was stolen by the other clone, so we cannot free
822 	 * it from this path! It will be freed eventually by the
823 	 * other program on release.
824 	 *
825 	 * At this point, we don't need a deferred release since
826 	 * clone is guaranteed to not be locked.
827 	 */
828 	fp->aux = NULL;
829 	__bpf_prog_free(fp);
830 }
831 
832 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
833 {
834 	/* We have to repoint aux->prog to self, as we don't
835 	 * know whether fp here is the clone or the original.
836 	 */
837 	fp->aux->prog = fp;
838 	bpf_prog_clone_free(fp_other);
839 }
840 
841 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
842 {
843 	struct bpf_insn insn_buff[16], aux[2];
844 	struct bpf_prog *clone, *tmp;
845 	int insn_delta, insn_cnt;
846 	struct bpf_insn *insn;
847 	int i, rewritten;
848 
849 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
850 		return prog;
851 
852 	clone = bpf_prog_clone_create(prog, GFP_USER);
853 	if (!clone)
854 		return ERR_PTR(-ENOMEM);
855 
856 	insn_cnt = clone->len;
857 	insn = clone->insnsi;
858 
859 	for (i = 0; i < insn_cnt; i++, insn++) {
860 		/* We temporarily need to hold the original ld64 insn
861 		 * so that we can still access the first part in the
862 		 * second blinding run.
863 		 */
864 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
865 		    insn[1].code == 0)
866 			memcpy(aux, insn, sizeof(aux));
867 
868 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
869 		if (!rewritten)
870 			continue;
871 
872 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
873 		if (!tmp) {
874 			/* Patching may have repointed aux->prog during
875 			 * realloc from the original one, so we need to
876 			 * fix it up here on error.
877 			 */
878 			bpf_jit_prog_release_other(prog, clone);
879 			return ERR_PTR(-ENOMEM);
880 		}
881 
882 		clone = tmp;
883 		insn_delta = rewritten - 1;
884 
885 		/* Walk new program and skip insns we just inserted. */
886 		insn = clone->insnsi + i + insn_delta;
887 		insn_cnt += insn_delta;
888 		i        += insn_delta;
889 	}
890 
891 	clone->blinded = 1;
892 	return clone;
893 }
894 #endif /* CONFIG_BPF_JIT */
895 
896 /* Base function for offset calculation. Needs to go into .text section,
897  * therefore keeping it non-static as well; will also be used by JITs
898  * anyway later on, so do not let the compiler omit it. This also needs
899  * to go into kallsyms for correlation from e.g. bpftool, so naming
900  * must not change.
901  */
902 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
903 {
904 	return 0;
905 }
906 EXPORT_SYMBOL_GPL(__bpf_call_base);
907 
908 /* All UAPI available opcodes. */
909 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
910 	/* 32 bit ALU operations. */		\
911 	/*   Register based. */			\
912 	INSN_3(ALU, ADD, X),			\
913 	INSN_3(ALU, SUB, X),			\
914 	INSN_3(ALU, AND, X),			\
915 	INSN_3(ALU, OR,  X),			\
916 	INSN_3(ALU, LSH, X),			\
917 	INSN_3(ALU, RSH, X),			\
918 	INSN_3(ALU, XOR, X),			\
919 	INSN_3(ALU, MUL, X),			\
920 	INSN_3(ALU, MOV, X),			\
921 	INSN_3(ALU, DIV, X),			\
922 	INSN_3(ALU, MOD, X),			\
923 	INSN_2(ALU, NEG),			\
924 	INSN_3(ALU, END, TO_BE),		\
925 	INSN_3(ALU, END, TO_LE),		\
926 	/*   Immediate based. */		\
927 	INSN_3(ALU, ADD, K),			\
928 	INSN_3(ALU, SUB, K),			\
929 	INSN_3(ALU, AND, K),			\
930 	INSN_3(ALU, OR,  K),			\
931 	INSN_3(ALU, LSH, K),			\
932 	INSN_3(ALU, RSH, K),			\
933 	INSN_3(ALU, XOR, K),			\
934 	INSN_3(ALU, MUL, K),			\
935 	INSN_3(ALU, MOV, K),			\
936 	INSN_3(ALU, DIV, K),			\
937 	INSN_3(ALU, MOD, K),			\
938 	/* 64 bit ALU operations. */		\
939 	/*   Register based. */			\
940 	INSN_3(ALU64, ADD,  X),			\
941 	INSN_3(ALU64, SUB,  X),			\
942 	INSN_3(ALU64, AND,  X),			\
943 	INSN_3(ALU64, OR,   X),			\
944 	INSN_3(ALU64, LSH,  X),			\
945 	INSN_3(ALU64, RSH,  X),			\
946 	INSN_3(ALU64, XOR,  X),			\
947 	INSN_3(ALU64, MUL,  X),			\
948 	INSN_3(ALU64, MOV,  X),			\
949 	INSN_3(ALU64, ARSH, X),			\
950 	INSN_3(ALU64, DIV,  X),			\
951 	INSN_3(ALU64, MOD,  X),			\
952 	INSN_2(ALU64, NEG),			\
953 	/*   Immediate based. */		\
954 	INSN_3(ALU64, ADD,  K),			\
955 	INSN_3(ALU64, SUB,  K),			\
956 	INSN_3(ALU64, AND,  K),			\
957 	INSN_3(ALU64, OR,   K),			\
958 	INSN_3(ALU64, LSH,  K),			\
959 	INSN_3(ALU64, RSH,  K),			\
960 	INSN_3(ALU64, XOR,  K),			\
961 	INSN_3(ALU64, MUL,  K),			\
962 	INSN_3(ALU64, MOV,  K),			\
963 	INSN_3(ALU64, ARSH, K),			\
964 	INSN_3(ALU64, DIV,  K),			\
965 	INSN_3(ALU64, MOD,  K),			\
966 	/* Call instruction. */			\
967 	INSN_2(JMP, CALL),			\
968 	/* Exit instruction. */			\
969 	INSN_2(JMP, EXIT),			\
970 	/* Jump instructions. */		\
971 	/*   Register based. */			\
972 	INSN_3(JMP, JEQ,  X),			\
973 	INSN_3(JMP, JNE,  X),			\
974 	INSN_3(JMP, JGT,  X),			\
975 	INSN_3(JMP, JLT,  X),			\
976 	INSN_3(JMP, JGE,  X),			\
977 	INSN_3(JMP, JLE,  X),			\
978 	INSN_3(JMP, JSGT, X),			\
979 	INSN_3(JMP, JSLT, X),			\
980 	INSN_3(JMP, JSGE, X),			\
981 	INSN_3(JMP, JSLE, X),			\
982 	INSN_3(JMP, JSET, X),			\
983 	/*   Immediate based. */		\
984 	INSN_3(JMP, JEQ,  K),			\
985 	INSN_3(JMP, JNE,  K),			\
986 	INSN_3(JMP, JGT,  K),			\
987 	INSN_3(JMP, JLT,  K),			\
988 	INSN_3(JMP, JGE,  K),			\
989 	INSN_3(JMP, JLE,  K),			\
990 	INSN_3(JMP, JSGT, K),			\
991 	INSN_3(JMP, JSLT, K),			\
992 	INSN_3(JMP, JSGE, K),			\
993 	INSN_3(JMP, JSLE, K),			\
994 	INSN_3(JMP, JSET, K),			\
995 	INSN_2(JMP, JA),			\
996 	/* Store instructions. */		\
997 	/*   Register based. */			\
998 	INSN_3(STX, MEM,  B),			\
999 	INSN_3(STX, MEM,  H),			\
1000 	INSN_3(STX, MEM,  W),			\
1001 	INSN_3(STX, MEM,  DW),			\
1002 	INSN_3(STX, XADD, W),			\
1003 	INSN_3(STX, XADD, DW),			\
1004 	/*   Immediate based. */		\
1005 	INSN_3(ST, MEM, B),			\
1006 	INSN_3(ST, MEM, H),			\
1007 	INSN_3(ST, MEM, W),			\
1008 	INSN_3(ST, MEM, DW),			\
1009 	/* Load instructions. */		\
1010 	/*   Register based. */			\
1011 	INSN_3(LDX, MEM, B),			\
1012 	INSN_3(LDX, MEM, H),			\
1013 	INSN_3(LDX, MEM, W),			\
1014 	INSN_3(LDX, MEM, DW),			\
1015 	/*   Immediate based. */		\
1016 	INSN_3(LD, IMM, DW)
1017 
1018 bool bpf_opcode_in_insntable(u8 code)
1019 {
1020 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1021 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1022 	static const bool public_insntable[256] = {
1023 		[0 ... 255] = false,
1024 		/* Now overwrite non-defaults ... */
1025 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1026 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1027 		[BPF_LD | BPF_ABS | BPF_B] = true,
1028 		[BPF_LD | BPF_ABS | BPF_H] = true,
1029 		[BPF_LD | BPF_ABS | BPF_W] = true,
1030 		[BPF_LD | BPF_IND | BPF_B] = true,
1031 		[BPF_LD | BPF_IND | BPF_H] = true,
1032 		[BPF_LD | BPF_IND | BPF_W] = true,
1033 	};
1034 #undef BPF_INSN_3_TBL
1035 #undef BPF_INSN_2_TBL
1036 	return public_insntable[code];
1037 }
1038 
1039 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1040 /**
1041  *	__bpf_prog_run - run eBPF program on a given context
1042  *	@ctx: is the data we are operating on
1043  *	@insn: is the array of eBPF instructions
1044  *
1045  * Decode and execute eBPF instructions.
1046  */
1047 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1048 {
1049 	u64 tmp;
1050 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1051 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1052 	static const void *jumptable[256] = {
1053 		[0 ... 255] = &&default_label,
1054 		/* Now overwrite non-defaults ... */
1055 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1056 		/* Non-UAPI available opcodes. */
1057 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1058 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1059 	};
1060 #undef BPF_INSN_3_LBL
1061 #undef BPF_INSN_2_LBL
1062 	u32 tail_call_cnt = 0;
1063 
1064 #define CONT	 ({ insn++; goto select_insn; })
1065 #define CONT_JMP ({ insn++; goto select_insn; })
1066 
1067 select_insn:
1068 	goto *jumptable[insn->code];
1069 
1070 	/* ALU */
1071 #define ALU(OPCODE, OP)			\
1072 	ALU64_##OPCODE##_X:		\
1073 		DST = DST OP SRC;	\
1074 		CONT;			\
1075 	ALU_##OPCODE##_X:		\
1076 		DST = (u32) DST OP (u32) SRC;	\
1077 		CONT;			\
1078 	ALU64_##OPCODE##_K:		\
1079 		DST = DST OP IMM;		\
1080 		CONT;			\
1081 	ALU_##OPCODE##_K:		\
1082 		DST = (u32) DST OP (u32) IMM;	\
1083 		CONT;
1084 
1085 	ALU(ADD,  +)
1086 	ALU(SUB,  -)
1087 	ALU(AND,  &)
1088 	ALU(OR,   |)
1089 	ALU(LSH, <<)
1090 	ALU(RSH, >>)
1091 	ALU(XOR,  ^)
1092 	ALU(MUL,  *)
1093 #undef ALU
1094 	ALU_NEG:
1095 		DST = (u32) -DST;
1096 		CONT;
1097 	ALU64_NEG:
1098 		DST = -DST;
1099 		CONT;
1100 	ALU_MOV_X:
1101 		DST = (u32) SRC;
1102 		CONT;
1103 	ALU_MOV_K:
1104 		DST = (u32) IMM;
1105 		CONT;
1106 	ALU64_MOV_X:
1107 		DST = SRC;
1108 		CONT;
1109 	ALU64_MOV_K:
1110 		DST = IMM;
1111 		CONT;
1112 	LD_IMM_DW:
1113 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1114 		insn++;
1115 		CONT;
1116 	ALU64_ARSH_X:
1117 		(*(s64 *) &DST) >>= SRC;
1118 		CONT;
1119 	ALU64_ARSH_K:
1120 		(*(s64 *) &DST) >>= IMM;
1121 		CONT;
1122 	ALU64_MOD_X:
1123 		div64_u64_rem(DST, SRC, &tmp);
1124 		DST = tmp;
1125 		CONT;
1126 	ALU_MOD_X:
1127 		tmp = (u32) DST;
1128 		DST = do_div(tmp, (u32) SRC);
1129 		CONT;
1130 	ALU64_MOD_K:
1131 		div64_u64_rem(DST, IMM, &tmp);
1132 		DST = tmp;
1133 		CONT;
1134 	ALU_MOD_K:
1135 		tmp = (u32) DST;
1136 		DST = do_div(tmp, (u32) IMM);
1137 		CONT;
1138 	ALU64_DIV_X:
1139 		DST = div64_u64(DST, SRC);
1140 		CONT;
1141 	ALU_DIV_X:
1142 		tmp = (u32) DST;
1143 		do_div(tmp, (u32) SRC);
1144 		DST = (u32) tmp;
1145 		CONT;
1146 	ALU64_DIV_K:
1147 		DST = div64_u64(DST, IMM);
1148 		CONT;
1149 	ALU_DIV_K:
1150 		tmp = (u32) DST;
1151 		do_div(tmp, (u32) IMM);
1152 		DST = (u32) tmp;
1153 		CONT;
1154 	ALU_END_TO_BE:
1155 		switch (IMM) {
1156 		case 16:
1157 			DST = (__force u16) cpu_to_be16(DST);
1158 			break;
1159 		case 32:
1160 			DST = (__force u32) cpu_to_be32(DST);
1161 			break;
1162 		case 64:
1163 			DST = (__force u64) cpu_to_be64(DST);
1164 			break;
1165 		}
1166 		CONT;
1167 	ALU_END_TO_LE:
1168 		switch (IMM) {
1169 		case 16:
1170 			DST = (__force u16) cpu_to_le16(DST);
1171 			break;
1172 		case 32:
1173 			DST = (__force u32) cpu_to_le32(DST);
1174 			break;
1175 		case 64:
1176 			DST = (__force u64) cpu_to_le64(DST);
1177 			break;
1178 		}
1179 		CONT;
1180 
1181 	/* CALL */
1182 	JMP_CALL:
1183 		/* Function call scratches BPF_R1-BPF_R5 registers,
1184 		 * preserves BPF_R6-BPF_R9, and stores return value
1185 		 * into BPF_R0.
1186 		 */
1187 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1188 						       BPF_R4, BPF_R5);
1189 		CONT;
1190 
1191 	JMP_CALL_ARGS:
1192 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1193 							    BPF_R3, BPF_R4,
1194 							    BPF_R5,
1195 							    insn + insn->off + 1);
1196 		CONT;
1197 
1198 	JMP_TAIL_CALL: {
1199 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1200 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1201 		struct bpf_prog *prog;
1202 		u32 index = BPF_R3;
1203 
1204 		if (unlikely(index >= array->map.max_entries))
1205 			goto out;
1206 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1207 			goto out;
1208 
1209 		tail_call_cnt++;
1210 
1211 		prog = READ_ONCE(array->ptrs[index]);
1212 		if (!prog)
1213 			goto out;
1214 
1215 		/* ARG1 at this point is guaranteed to point to CTX from
1216 		 * the verifier side due to the fact that the tail call is
1217 		 * handeled like a helper, that is, bpf_tail_call_proto,
1218 		 * where arg1_type is ARG_PTR_TO_CTX.
1219 		 */
1220 		insn = prog->insnsi;
1221 		goto select_insn;
1222 out:
1223 		CONT;
1224 	}
1225 	/* JMP */
1226 	JMP_JA:
1227 		insn += insn->off;
1228 		CONT;
1229 	JMP_JEQ_X:
1230 		if (DST == SRC) {
1231 			insn += insn->off;
1232 			CONT_JMP;
1233 		}
1234 		CONT;
1235 	JMP_JEQ_K:
1236 		if (DST == IMM) {
1237 			insn += insn->off;
1238 			CONT_JMP;
1239 		}
1240 		CONT;
1241 	JMP_JNE_X:
1242 		if (DST != SRC) {
1243 			insn += insn->off;
1244 			CONT_JMP;
1245 		}
1246 		CONT;
1247 	JMP_JNE_K:
1248 		if (DST != IMM) {
1249 			insn += insn->off;
1250 			CONT_JMP;
1251 		}
1252 		CONT;
1253 	JMP_JGT_X:
1254 		if (DST > SRC) {
1255 			insn += insn->off;
1256 			CONT_JMP;
1257 		}
1258 		CONT;
1259 	JMP_JGT_K:
1260 		if (DST > IMM) {
1261 			insn += insn->off;
1262 			CONT_JMP;
1263 		}
1264 		CONT;
1265 	JMP_JLT_X:
1266 		if (DST < SRC) {
1267 			insn += insn->off;
1268 			CONT_JMP;
1269 		}
1270 		CONT;
1271 	JMP_JLT_K:
1272 		if (DST < IMM) {
1273 			insn += insn->off;
1274 			CONT_JMP;
1275 		}
1276 		CONT;
1277 	JMP_JGE_X:
1278 		if (DST >= SRC) {
1279 			insn += insn->off;
1280 			CONT_JMP;
1281 		}
1282 		CONT;
1283 	JMP_JGE_K:
1284 		if (DST >= IMM) {
1285 			insn += insn->off;
1286 			CONT_JMP;
1287 		}
1288 		CONT;
1289 	JMP_JLE_X:
1290 		if (DST <= SRC) {
1291 			insn += insn->off;
1292 			CONT_JMP;
1293 		}
1294 		CONT;
1295 	JMP_JLE_K:
1296 		if (DST <= IMM) {
1297 			insn += insn->off;
1298 			CONT_JMP;
1299 		}
1300 		CONT;
1301 	JMP_JSGT_X:
1302 		if (((s64) DST) > ((s64) SRC)) {
1303 			insn += insn->off;
1304 			CONT_JMP;
1305 		}
1306 		CONT;
1307 	JMP_JSGT_K:
1308 		if (((s64) DST) > ((s64) IMM)) {
1309 			insn += insn->off;
1310 			CONT_JMP;
1311 		}
1312 		CONT;
1313 	JMP_JSLT_X:
1314 		if (((s64) DST) < ((s64) SRC)) {
1315 			insn += insn->off;
1316 			CONT_JMP;
1317 		}
1318 		CONT;
1319 	JMP_JSLT_K:
1320 		if (((s64) DST) < ((s64) IMM)) {
1321 			insn += insn->off;
1322 			CONT_JMP;
1323 		}
1324 		CONT;
1325 	JMP_JSGE_X:
1326 		if (((s64) DST) >= ((s64) SRC)) {
1327 			insn += insn->off;
1328 			CONT_JMP;
1329 		}
1330 		CONT;
1331 	JMP_JSGE_K:
1332 		if (((s64) DST) >= ((s64) IMM)) {
1333 			insn += insn->off;
1334 			CONT_JMP;
1335 		}
1336 		CONT;
1337 	JMP_JSLE_X:
1338 		if (((s64) DST) <= ((s64) SRC)) {
1339 			insn += insn->off;
1340 			CONT_JMP;
1341 		}
1342 		CONT;
1343 	JMP_JSLE_K:
1344 		if (((s64) DST) <= ((s64) IMM)) {
1345 			insn += insn->off;
1346 			CONT_JMP;
1347 		}
1348 		CONT;
1349 	JMP_JSET_X:
1350 		if (DST & SRC) {
1351 			insn += insn->off;
1352 			CONT_JMP;
1353 		}
1354 		CONT;
1355 	JMP_JSET_K:
1356 		if (DST & IMM) {
1357 			insn += insn->off;
1358 			CONT_JMP;
1359 		}
1360 		CONT;
1361 	JMP_EXIT:
1362 		return BPF_R0;
1363 
1364 	/* STX and ST and LDX*/
1365 #define LDST(SIZEOP, SIZE)						\
1366 	STX_MEM_##SIZEOP:						\
1367 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1368 		CONT;							\
1369 	ST_MEM_##SIZEOP:						\
1370 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1371 		CONT;							\
1372 	LDX_MEM_##SIZEOP:						\
1373 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1374 		CONT;
1375 
1376 	LDST(B,   u8)
1377 	LDST(H,  u16)
1378 	LDST(W,  u32)
1379 	LDST(DW, u64)
1380 #undef LDST
1381 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1382 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1383 			   (DST + insn->off));
1384 		CONT;
1385 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1386 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1387 			     (DST + insn->off));
1388 		CONT;
1389 
1390 	default_label:
1391 		/* If we ever reach this, we have a bug somewhere. Die hard here
1392 		 * instead of just returning 0; we could be somewhere in a subprog,
1393 		 * so execution could continue otherwise which we do /not/ want.
1394 		 *
1395 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1396 		 */
1397 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1398 		BUG_ON(1);
1399 		return 0;
1400 }
1401 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1402 
1403 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1404 #define DEFINE_BPF_PROG_RUN(stack_size) \
1405 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1406 { \
1407 	u64 stack[stack_size / sizeof(u64)]; \
1408 	u64 regs[MAX_BPF_REG]; \
1409 \
1410 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1411 	ARG1 = (u64) (unsigned long) ctx; \
1412 	return ___bpf_prog_run(regs, insn, stack); \
1413 }
1414 
1415 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1416 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1417 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1418 				      const struct bpf_insn *insn) \
1419 { \
1420 	u64 stack[stack_size / sizeof(u64)]; \
1421 	u64 regs[MAX_BPF_REG]; \
1422 \
1423 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1424 	BPF_R1 = r1; \
1425 	BPF_R2 = r2; \
1426 	BPF_R3 = r3; \
1427 	BPF_R4 = r4; \
1428 	BPF_R5 = r5; \
1429 	return ___bpf_prog_run(regs, insn, stack); \
1430 }
1431 
1432 #define EVAL1(FN, X) FN(X)
1433 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1434 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1435 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1436 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1437 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1438 
1439 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1440 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1441 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1442 
1443 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1444 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1445 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1446 
1447 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1448 
1449 static unsigned int (*interpreters[])(const void *ctx,
1450 				      const struct bpf_insn *insn) = {
1451 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1452 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1453 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1454 };
1455 #undef PROG_NAME_LIST
1456 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1457 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1458 				  const struct bpf_insn *insn) = {
1459 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1460 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1461 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1462 };
1463 #undef PROG_NAME_LIST
1464 
1465 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1466 {
1467 	stack_depth = max_t(u32, stack_depth, 1);
1468 	insn->off = (s16) insn->imm;
1469 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1470 		__bpf_call_base_args;
1471 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1472 }
1473 
1474 #else
1475 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1476 					 const struct bpf_insn *insn)
1477 {
1478 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1479 	 * is not working properly, so warn about it!
1480 	 */
1481 	WARN_ON_ONCE(1);
1482 	return 0;
1483 }
1484 #endif
1485 
1486 bool bpf_prog_array_compatible(struct bpf_array *array,
1487 			       const struct bpf_prog *fp)
1488 {
1489 	if (fp->kprobe_override)
1490 		return false;
1491 
1492 	if (!array->owner_prog_type) {
1493 		/* There's no owner yet where we could check for
1494 		 * compatibility.
1495 		 */
1496 		array->owner_prog_type = fp->type;
1497 		array->owner_jited = fp->jited;
1498 
1499 		return true;
1500 	}
1501 
1502 	return array->owner_prog_type == fp->type &&
1503 	       array->owner_jited == fp->jited;
1504 }
1505 
1506 static int bpf_check_tail_call(const struct bpf_prog *fp)
1507 {
1508 	struct bpf_prog_aux *aux = fp->aux;
1509 	int i;
1510 
1511 	for (i = 0; i < aux->used_map_cnt; i++) {
1512 		struct bpf_map *map = aux->used_maps[i];
1513 		struct bpf_array *array;
1514 
1515 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1516 			continue;
1517 
1518 		array = container_of(map, struct bpf_array, map);
1519 		if (!bpf_prog_array_compatible(array, fp))
1520 			return -EINVAL;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static void bpf_prog_select_func(struct bpf_prog *fp)
1527 {
1528 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1529 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1530 
1531 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1532 #else
1533 	fp->bpf_func = __bpf_prog_ret0_warn;
1534 #endif
1535 }
1536 
1537 /**
1538  *	bpf_prog_select_runtime - select exec runtime for BPF program
1539  *	@fp: bpf_prog populated with internal BPF program
1540  *	@err: pointer to error variable
1541  *
1542  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1543  * The BPF program will be executed via BPF_PROG_RUN() macro.
1544  */
1545 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1546 {
1547 	/* In case of BPF to BPF calls, verifier did all the prep
1548 	 * work with regards to JITing, etc.
1549 	 */
1550 	if (fp->bpf_func)
1551 		goto finalize;
1552 
1553 	bpf_prog_select_func(fp);
1554 
1555 	/* eBPF JITs can rewrite the program in case constant
1556 	 * blinding is active. However, in case of error during
1557 	 * blinding, bpf_int_jit_compile() must always return a
1558 	 * valid program, which in this case would simply not
1559 	 * be JITed, but falls back to the interpreter.
1560 	 */
1561 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1562 		fp = bpf_int_jit_compile(fp);
1563 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1564 		if (!fp->jited) {
1565 			*err = -ENOTSUPP;
1566 			return fp;
1567 		}
1568 #endif
1569 	} else {
1570 		*err = bpf_prog_offload_compile(fp);
1571 		if (*err)
1572 			return fp;
1573 	}
1574 
1575 finalize:
1576 	bpf_prog_lock_ro(fp);
1577 
1578 	/* The tail call compatibility check can only be done at
1579 	 * this late stage as we need to determine, if we deal
1580 	 * with JITed or non JITed program concatenations and not
1581 	 * all eBPF JITs might immediately support all features.
1582 	 */
1583 	*err = bpf_check_tail_call(fp);
1584 
1585 	return fp;
1586 }
1587 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1588 
1589 static unsigned int __bpf_prog_ret1(const void *ctx,
1590 				    const struct bpf_insn *insn)
1591 {
1592 	return 1;
1593 }
1594 
1595 static struct bpf_prog_dummy {
1596 	struct bpf_prog prog;
1597 } dummy_bpf_prog = {
1598 	.prog = {
1599 		.bpf_func = __bpf_prog_ret1,
1600 	},
1601 };
1602 
1603 /* to avoid allocating empty bpf_prog_array for cgroups that
1604  * don't have bpf program attached use one global 'empty_prog_array'
1605  * It will not be modified the caller of bpf_prog_array_alloc()
1606  * (since caller requested prog_cnt == 0)
1607  * that pointer should be 'freed' by bpf_prog_array_free()
1608  */
1609 static struct {
1610 	struct bpf_prog_array hdr;
1611 	struct bpf_prog *null_prog;
1612 } empty_prog_array = {
1613 	.null_prog = NULL,
1614 };
1615 
1616 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1617 {
1618 	if (prog_cnt)
1619 		return kzalloc(sizeof(struct bpf_prog_array) +
1620 			       sizeof(struct bpf_prog_array_item) *
1621 			       (prog_cnt + 1),
1622 			       flags);
1623 
1624 	return &empty_prog_array.hdr;
1625 }
1626 
1627 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1628 {
1629 	if (!progs ||
1630 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1631 		return;
1632 	kfree_rcu(progs, rcu);
1633 }
1634 
1635 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1636 {
1637 	struct bpf_prog_array_item *item;
1638 	u32 cnt = 0;
1639 
1640 	rcu_read_lock();
1641 	item = rcu_dereference(array)->items;
1642 	for (; item->prog; item++)
1643 		if (item->prog != &dummy_bpf_prog.prog)
1644 			cnt++;
1645 	rcu_read_unlock();
1646 	return cnt;
1647 }
1648 
1649 
1650 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1651 				     u32 *prog_ids,
1652 				     u32 request_cnt)
1653 {
1654 	struct bpf_prog_array_item *item;
1655 	int i = 0;
1656 
1657 	item = rcu_dereference_check(array, 1)->items;
1658 	for (; item->prog; item++) {
1659 		if (item->prog == &dummy_bpf_prog.prog)
1660 			continue;
1661 		prog_ids[i] = item->prog->aux->id;
1662 		if (++i == request_cnt) {
1663 			item++;
1664 			break;
1665 		}
1666 	}
1667 
1668 	return !!(item->prog);
1669 }
1670 
1671 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1672 				__u32 __user *prog_ids, u32 cnt)
1673 {
1674 	unsigned long err = 0;
1675 	bool nospc;
1676 	u32 *ids;
1677 
1678 	/* users of this function are doing:
1679 	 * cnt = bpf_prog_array_length();
1680 	 * if (cnt > 0)
1681 	 *     bpf_prog_array_copy_to_user(..., cnt);
1682 	 * so below kcalloc doesn't need extra cnt > 0 check, but
1683 	 * bpf_prog_array_length() releases rcu lock and
1684 	 * prog array could have been swapped with empty or larger array,
1685 	 * so always copy 'cnt' prog_ids to the user.
1686 	 * In a rare race the user will see zero prog_ids
1687 	 */
1688 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1689 	if (!ids)
1690 		return -ENOMEM;
1691 	rcu_read_lock();
1692 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1693 	rcu_read_unlock();
1694 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1695 	kfree(ids);
1696 	if (err)
1697 		return -EFAULT;
1698 	if (nospc)
1699 		return -ENOSPC;
1700 	return 0;
1701 }
1702 
1703 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1704 				struct bpf_prog *old_prog)
1705 {
1706 	struct bpf_prog_array_item *item = array->items;
1707 
1708 	for (; item->prog; item++)
1709 		if (item->prog == old_prog) {
1710 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1711 			break;
1712 		}
1713 }
1714 
1715 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1716 			struct bpf_prog *exclude_prog,
1717 			struct bpf_prog *include_prog,
1718 			struct bpf_prog_array **new_array)
1719 {
1720 	int new_prog_cnt, carry_prog_cnt = 0;
1721 	struct bpf_prog_array_item *existing;
1722 	struct bpf_prog_array *array;
1723 	bool found_exclude = false;
1724 	int new_prog_idx = 0;
1725 
1726 	/* Figure out how many existing progs we need to carry over to
1727 	 * the new array.
1728 	 */
1729 	if (old_array) {
1730 		existing = old_array->items;
1731 		for (; existing->prog; existing++) {
1732 			if (existing->prog == exclude_prog) {
1733 				found_exclude = true;
1734 				continue;
1735 			}
1736 			if (existing->prog != &dummy_bpf_prog.prog)
1737 				carry_prog_cnt++;
1738 			if (existing->prog == include_prog)
1739 				return -EEXIST;
1740 		}
1741 	}
1742 
1743 	if (exclude_prog && !found_exclude)
1744 		return -ENOENT;
1745 
1746 	/* How many progs (not NULL) will be in the new array? */
1747 	new_prog_cnt = carry_prog_cnt;
1748 	if (include_prog)
1749 		new_prog_cnt += 1;
1750 
1751 	/* Do we have any prog (not NULL) in the new array? */
1752 	if (!new_prog_cnt) {
1753 		*new_array = NULL;
1754 		return 0;
1755 	}
1756 
1757 	/* +1 as the end of prog_array is marked with NULL */
1758 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1759 	if (!array)
1760 		return -ENOMEM;
1761 
1762 	/* Fill in the new prog array */
1763 	if (carry_prog_cnt) {
1764 		existing = old_array->items;
1765 		for (; existing->prog; existing++)
1766 			if (existing->prog != exclude_prog &&
1767 			    existing->prog != &dummy_bpf_prog.prog) {
1768 				array->items[new_prog_idx++].prog =
1769 					existing->prog;
1770 			}
1771 	}
1772 	if (include_prog)
1773 		array->items[new_prog_idx++].prog = include_prog;
1774 	array->items[new_prog_idx].prog = NULL;
1775 	*new_array = array;
1776 	return 0;
1777 }
1778 
1779 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1780 			     u32 *prog_ids, u32 request_cnt,
1781 			     u32 *prog_cnt)
1782 {
1783 	u32 cnt = 0;
1784 
1785 	if (array)
1786 		cnt = bpf_prog_array_length(array);
1787 
1788 	*prog_cnt = cnt;
1789 
1790 	/* return early if user requested only program count or nothing to copy */
1791 	if (!request_cnt || !cnt)
1792 		return 0;
1793 
1794 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1795 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1796 								     : 0;
1797 }
1798 
1799 static void bpf_prog_free_deferred(struct work_struct *work)
1800 {
1801 	struct bpf_prog_aux *aux;
1802 	int i;
1803 
1804 	aux = container_of(work, struct bpf_prog_aux, work);
1805 	if (bpf_prog_is_dev_bound(aux))
1806 		bpf_prog_offload_destroy(aux->prog);
1807 #ifdef CONFIG_PERF_EVENTS
1808 	if (aux->prog->has_callchain_buf)
1809 		put_callchain_buffers();
1810 #endif
1811 	for (i = 0; i < aux->func_cnt; i++)
1812 		bpf_jit_free(aux->func[i]);
1813 	if (aux->func_cnt) {
1814 		kfree(aux->func);
1815 		bpf_prog_unlock_free(aux->prog);
1816 	} else {
1817 		bpf_jit_free(aux->prog);
1818 	}
1819 }
1820 
1821 /* Free internal BPF program */
1822 void bpf_prog_free(struct bpf_prog *fp)
1823 {
1824 	struct bpf_prog_aux *aux = fp->aux;
1825 
1826 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1827 	schedule_work(&aux->work);
1828 }
1829 EXPORT_SYMBOL_GPL(bpf_prog_free);
1830 
1831 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1832 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1833 
1834 void bpf_user_rnd_init_once(void)
1835 {
1836 	prandom_init_once(&bpf_user_rnd_state);
1837 }
1838 
1839 BPF_CALL_0(bpf_user_rnd_u32)
1840 {
1841 	/* Should someone ever have the rather unwise idea to use some
1842 	 * of the registers passed into this function, then note that
1843 	 * this function is called from native eBPF and classic-to-eBPF
1844 	 * transformations. Register assignments from both sides are
1845 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1846 	 */
1847 	struct rnd_state *state;
1848 	u32 res;
1849 
1850 	state = &get_cpu_var(bpf_user_rnd_state);
1851 	res = prandom_u32_state(state);
1852 	put_cpu_var(bpf_user_rnd_state);
1853 
1854 	return res;
1855 }
1856 
1857 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1858 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1859 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1860 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1861 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
1862 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
1863 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
1864 
1865 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1866 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1867 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1868 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1869 
1870 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1871 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1872 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1873 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
1874 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
1875 
1876 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1877 {
1878 	return NULL;
1879 }
1880 
1881 u64 __weak
1882 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1883 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1884 {
1885 	return -ENOTSUPP;
1886 }
1887 EXPORT_SYMBOL_GPL(bpf_event_output);
1888 
1889 /* Always built-in helper functions. */
1890 const struct bpf_func_proto bpf_tail_call_proto = {
1891 	.func		= NULL,
1892 	.gpl_only	= false,
1893 	.ret_type	= RET_VOID,
1894 	.arg1_type	= ARG_PTR_TO_CTX,
1895 	.arg2_type	= ARG_CONST_MAP_PTR,
1896 	.arg3_type	= ARG_ANYTHING,
1897 };
1898 
1899 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1900  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1901  * eBPF and implicitly also cBPF can get JITed!
1902  */
1903 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1904 {
1905 	return prog;
1906 }
1907 
1908 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1909  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1910  */
1911 void __weak bpf_jit_compile(struct bpf_prog *prog)
1912 {
1913 }
1914 
1915 bool __weak bpf_helper_changes_pkt_data(void *func)
1916 {
1917 	return false;
1918 }
1919 
1920 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1921  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1922  */
1923 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1924 			 int len)
1925 {
1926 	return -EFAULT;
1927 }
1928 
1929 /* All definitions of tracepoints related to BPF. */
1930 #define CREATE_TRACE_POINTS
1931 #include <linux/bpf_trace.h>
1932 
1933 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1934