xref: /openbmc/linux/kernel/bpf/core.c (revision 6a6d6681ac1add9655b7ab5dd0b46b54aeb1b44f)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <uapi/linux/btf.h>
25 #include <linux/filter.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/random.h>
29 #include <linux/moduleloader.h>
30 #include <linux/bpf.h>
31 #include <linux/btf.h>
32 #include <linux/frame.h>
33 #include <linux/rbtree_latch.h>
34 #include <linux/kallsyms.h>
35 #include <linux/rcupdate.h>
36 #include <linux/perf_event.h>
37 
38 #include <asm/unaligned.h>
39 
40 /* Registers */
41 #define BPF_R0	regs[BPF_REG_0]
42 #define BPF_R1	regs[BPF_REG_1]
43 #define BPF_R2	regs[BPF_REG_2]
44 #define BPF_R3	regs[BPF_REG_3]
45 #define BPF_R4	regs[BPF_REG_4]
46 #define BPF_R5	regs[BPF_REG_5]
47 #define BPF_R6	regs[BPF_REG_6]
48 #define BPF_R7	regs[BPF_REG_7]
49 #define BPF_R8	regs[BPF_REG_8]
50 #define BPF_R9	regs[BPF_REG_9]
51 #define BPF_R10	regs[BPF_REG_10]
52 
53 /* Named registers */
54 #define DST	regs[insn->dst_reg]
55 #define SRC	regs[insn->src_reg]
56 #define FP	regs[BPF_REG_FP]
57 #define ARG1	regs[BPF_REG_ARG1]
58 #define CTX	regs[BPF_REG_CTX]
59 #define IMM	insn->imm
60 
61 /* No hurry in this branch
62  *
63  * Exported for the bpf jit load helper.
64  */
65 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
66 {
67 	u8 *ptr = NULL;
68 
69 	if (k >= SKF_NET_OFF)
70 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
71 	else if (k >= SKF_LL_OFF)
72 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
73 
74 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
75 		return ptr;
76 
77 	return NULL;
78 }
79 
80 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
81 {
82 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
83 	struct bpf_prog_aux *aux;
84 	struct bpf_prog *fp;
85 
86 	size = round_up(size, PAGE_SIZE);
87 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88 	if (fp == NULL)
89 		return NULL;
90 
91 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
92 	if (aux == NULL) {
93 		vfree(fp);
94 		return NULL;
95 	}
96 
97 	fp->pages = size / PAGE_SIZE;
98 	fp->aux = aux;
99 	fp->aux->prog = fp;
100 	fp->jit_requested = ebpf_jit_enabled();
101 
102 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
103 
104 	return fp;
105 }
106 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
107 
108 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
109 				  gfp_t gfp_extra_flags)
110 {
111 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
112 	struct bpf_prog *fp;
113 	u32 pages, delta;
114 	int ret;
115 
116 	BUG_ON(fp_old == NULL);
117 
118 	size = round_up(size, PAGE_SIZE);
119 	pages = size / PAGE_SIZE;
120 	if (pages <= fp_old->pages)
121 		return fp_old;
122 
123 	delta = pages - fp_old->pages;
124 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
125 	if (ret)
126 		return NULL;
127 
128 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
129 	if (fp == NULL) {
130 		__bpf_prog_uncharge(fp_old->aux->user, delta);
131 	} else {
132 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
133 		fp->pages = pages;
134 		fp->aux->prog = fp;
135 
136 		/* We keep fp->aux from fp_old around in the new
137 		 * reallocated structure.
138 		 */
139 		fp_old->aux = NULL;
140 		__bpf_prog_free(fp_old);
141 	}
142 
143 	return fp;
144 }
145 
146 void __bpf_prog_free(struct bpf_prog *fp)
147 {
148 	kfree(fp->aux);
149 	vfree(fp);
150 }
151 
152 int bpf_prog_calc_tag(struct bpf_prog *fp)
153 {
154 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
155 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
156 	u32 digest[SHA_DIGEST_WORDS];
157 	u32 ws[SHA_WORKSPACE_WORDS];
158 	u32 i, bsize, psize, blocks;
159 	struct bpf_insn *dst;
160 	bool was_ld_map;
161 	u8 *raw, *todo;
162 	__be32 *result;
163 	__be64 *bits;
164 
165 	raw = vmalloc(raw_size);
166 	if (!raw)
167 		return -ENOMEM;
168 
169 	sha_init(digest);
170 	memset(ws, 0, sizeof(ws));
171 
172 	/* We need to take out the map fd for the digest calculation
173 	 * since they are unstable from user space side.
174 	 */
175 	dst = (void *)raw;
176 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
177 		dst[i] = fp->insnsi[i];
178 		if (!was_ld_map &&
179 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
180 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
181 			was_ld_map = true;
182 			dst[i].imm = 0;
183 		} else if (was_ld_map &&
184 			   dst[i].code == 0 &&
185 			   dst[i].dst_reg == 0 &&
186 			   dst[i].src_reg == 0 &&
187 			   dst[i].off == 0) {
188 			was_ld_map = false;
189 			dst[i].imm = 0;
190 		} else {
191 			was_ld_map = false;
192 		}
193 	}
194 
195 	psize = bpf_prog_insn_size(fp);
196 	memset(&raw[psize], 0, raw_size - psize);
197 	raw[psize++] = 0x80;
198 
199 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
200 	blocks = bsize / SHA_MESSAGE_BYTES;
201 	todo   = raw;
202 	if (bsize - psize >= sizeof(__be64)) {
203 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
204 	} else {
205 		bits = (__be64 *)(todo + bsize + bits_offset);
206 		blocks++;
207 	}
208 	*bits = cpu_to_be64((psize - 1) << 3);
209 
210 	while (blocks--) {
211 		sha_transform(digest, todo, ws);
212 		todo += SHA_MESSAGE_BYTES;
213 	}
214 
215 	result = (__force __be32 *)digest;
216 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
217 		result[i] = cpu_to_be32(digest[i]);
218 	memcpy(fp->tag, result, sizeof(fp->tag));
219 
220 	vfree(raw);
221 	return 0;
222 }
223 
224 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
225 				u32 curr, const bool probe_pass)
226 {
227 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
228 	s64 imm = insn->imm;
229 
230 	if (curr < pos && curr + imm + 1 > pos)
231 		imm += delta;
232 	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
233 		imm -= delta;
234 	if (imm < imm_min || imm > imm_max)
235 		return -ERANGE;
236 	if (!probe_pass)
237 		insn->imm = imm;
238 	return 0;
239 }
240 
241 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
242 				u32 curr, const bool probe_pass)
243 {
244 	const s32 off_min = S16_MIN, off_max = S16_MAX;
245 	s32 off = insn->off;
246 
247 	if (curr < pos && curr + off + 1 > pos)
248 		off += delta;
249 	else if (curr > pos + delta && curr + off + 1 <= pos + delta)
250 		off -= delta;
251 	if (off < off_min || off > off_max)
252 		return -ERANGE;
253 	if (!probe_pass)
254 		insn->off = off;
255 	return 0;
256 }
257 
258 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
259 			    const bool probe_pass)
260 {
261 	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
262 	struct bpf_insn *insn = prog->insnsi;
263 	int ret = 0;
264 
265 	for (i = 0; i < insn_cnt; i++, insn++) {
266 		u8 code;
267 
268 		/* In the probing pass we still operate on the original,
269 		 * unpatched image in order to check overflows before we
270 		 * do any other adjustments. Therefore skip the patchlet.
271 		 */
272 		if (probe_pass && i == pos) {
273 			i += delta + 1;
274 			insn++;
275 		}
276 		code = insn->code;
277 		if (BPF_CLASS(code) != BPF_JMP ||
278 		    BPF_OP(code) == BPF_EXIT)
279 			continue;
280 		/* Adjust offset of jmps if we cross patch boundaries. */
281 		if (BPF_OP(code) == BPF_CALL) {
282 			if (insn->src_reg != BPF_PSEUDO_CALL)
283 				continue;
284 			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
285 						   probe_pass);
286 		} else {
287 			ret = bpf_adj_delta_to_off(insn, pos, delta, i,
288 						   probe_pass);
289 		}
290 		if (ret)
291 			break;
292 	}
293 
294 	return ret;
295 }
296 
297 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
298 				       const struct bpf_insn *patch, u32 len)
299 {
300 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
301 	const u32 cnt_max = S16_MAX;
302 	struct bpf_prog *prog_adj;
303 
304 	/* Since our patchlet doesn't expand the image, we're done. */
305 	if (insn_delta == 0) {
306 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
307 		return prog;
308 	}
309 
310 	insn_adj_cnt = prog->len + insn_delta;
311 
312 	/* Reject anything that would potentially let the insn->off
313 	 * target overflow when we have excessive program expansions.
314 	 * We need to probe here before we do any reallocation where
315 	 * we afterwards may not fail anymore.
316 	 */
317 	if (insn_adj_cnt > cnt_max &&
318 	    bpf_adj_branches(prog, off, insn_delta, true))
319 		return NULL;
320 
321 	/* Several new instructions need to be inserted. Make room
322 	 * for them. Likely, there's no need for a new allocation as
323 	 * last page could have large enough tailroom.
324 	 */
325 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
326 				    GFP_USER);
327 	if (!prog_adj)
328 		return NULL;
329 
330 	prog_adj->len = insn_adj_cnt;
331 
332 	/* Patching happens in 3 steps:
333 	 *
334 	 * 1) Move over tail of insnsi from next instruction onwards,
335 	 *    so we can patch the single target insn with one or more
336 	 *    new ones (patching is always from 1 to n insns, n > 0).
337 	 * 2) Inject new instructions at the target location.
338 	 * 3) Adjust branch offsets if necessary.
339 	 */
340 	insn_rest = insn_adj_cnt - off - len;
341 
342 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
343 		sizeof(*patch) * insn_rest);
344 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
345 
346 	/* We are guaranteed to not fail at this point, otherwise
347 	 * the ship has sailed to reverse to the original state. An
348 	 * overflow cannot happen at this point.
349 	 */
350 	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
351 
352 	return prog_adj;
353 }
354 
355 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
356 {
357 	int i;
358 
359 	for (i = 0; i < fp->aux->func_cnt; i++)
360 		bpf_prog_kallsyms_del(fp->aux->func[i]);
361 }
362 
363 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
364 {
365 	bpf_prog_kallsyms_del_subprogs(fp);
366 	bpf_prog_kallsyms_del(fp);
367 }
368 
369 #ifdef CONFIG_BPF_JIT
370 # define BPF_JIT_LIMIT_DEFAULT	(PAGE_SIZE * 40000)
371 
372 /* All BPF JIT sysctl knobs here. */
373 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
374 int bpf_jit_harden   __read_mostly;
375 int bpf_jit_kallsyms __read_mostly;
376 int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
377 
378 static __always_inline void
379 bpf_get_prog_addr_region(const struct bpf_prog *prog,
380 			 unsigned long *symbol_start,
381 			 unsigned long *symbol_end)
382 {
383 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
384 	unsigned long addr = (unsigned long)hdr;
385 
386 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
387 
388 	*symbol_start = addr;
389 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
390 }
391 
392 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
393 {
394 	const char *end = sym + KSYM_NAME_LEN;
395 	const struct btf_type *type;
396 	const char *func_name;
397 
398 	BUILD_BUG_ON(sizeof("bpf_prog_") +
399 		     sizeof(prog->tag) * 2 +
400 		     /* name has been null terminated.
401 		      * We should need +1 for the '_' preceding
402 		      * the name.  However, the null character
403 		      * is double counted between the name and the
404 		      * sizeof("bpf_prog_") above, so we omit
405 		      * the +1 here.
406 		      */
407 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
408 
409 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
410 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
411 
412 	/* prog->aux->name will be ignored if full btf name is available */
413 	if (prog->aux->btf) {
414 		type = btf_type_by_id(prog->aux->btf,
415 				      prog->aux->func_info[prog->aux->func_idx].type_id);
416 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
417 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
418 		return;
419 	}
420 
421 	if (prog->aux->name[0])
422 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
423 	else
424 		*sym = 0;
425 }
426 
427 static __always_inline unsigned long
428 bpf_get_prog_addr_start(struct latch_tree_node *n)
429 {
430 	unsigned long symbol_start, symbol_end;
431 	const struct bpf_prog_aux *aux;
432 
433 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
434 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
435 
436 	return symbol_start;
437 }
438 
439 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
440 					  struct latch_tree_node *b)
441 {
442 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
443 }
444 
445 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
446 {
447 	unsigned long val = (unsigned long)key;
448 	unsigned long symbol_start, symbol_end;
449 	const struct bpf_prog_aux *aux;
450 
451 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
452 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
453 
454 	if (val < symbol_start)
455 		return -1;
456 	if (val >= symbol_end)
457 		return  1;
458 
459 	return 0;
460 }
461 
462 static const struct latch_tree_ops bpf_tree_ops = {
463 	.less	= bpf_tree_less,
464 	.comp	= bpf_tree_comp,
465 };
466 
467 static DEFINE_SPINLOCK(bpf_lock);
468 static LIST_HEAD(bpf_kallsyms);
469 static struct latch_tree_root bpf_tree __cacheline_aligned;
470 
471 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
472 {
473 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
474 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
475 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
476 }
477 
478 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
479 {
480 	if (list_empty(&aux->ksym_lnode))
481 		return;
482 
483 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
484 	list_del_rcu(&aux->ksym_lnode);
485 }
486 
487 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
488 {
489 	return fp->jited && !bpf_prog_was_classic(fp);
490 }
491 
492 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
493 {
494 	return list_empty(&fp->aux->ksym_lnode) ||
495 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
496 }
497 
498 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
499 {
500 	if (!bpf_prog_kallsyms_candidate(fp) ||
501 	    !capable(CAP_SYS_ADMIN))
502 		return;
503 
504 	spin_lock_bh(&bpf_lock);
505 	bpf_prog_ksym_node_add(fp->aux);
506 	spin_unlock_bh(&bpf_lock);
507 }
508 
509 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
510 {
511 	if (!bpf_prog_kallsyms_candidate(fp))
512 		return;
513 
514 	spin_lock_bh(&bpf_lock);
515 	bpf_prog_ksym_node_del(fp->aux);
516 	spin_unlock_bh(&bpf_lock);
517 }
518 
519 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
520 {
521 	struct latch_tree_node *n;
522 
523 	if (!bpf_jit_kallsyms_enabled())
524 		return NULL;
525 
526 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
527 	return n ?
528 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
529 	       NULL;
530 }
531 
532 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
533 				 unsigned long *off, char *sym)
534 {
535 	unsigned long symbol_start, symbol_end;
536 	struct bpf_prog *prog;
537 	char *ret = NULL;
538 
539 	rcu_read_lock();
540 	prog = bpf_prog_kallsyms_find(addr);
541 	if (prog) {
542 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
543 		bpf_get_prog_name(prog, sym);
544 
545 		ret = sym;
546 		if (size)
547 			*size = symbol_end - symbol_start;
548 		if (off)
549 			*off  = addr - symbol_start;
550 	}
551 	rcu_read_unlock();
552 
553 	return ret;
554 }
555 
556 bool is_bpf_text_address(unsigned long addr)
557 {
558 	bool ret;
559 
560 	rcu_read_lock();
561 	ret = bpf_prog_kallsyms_find(addr) != NULL;
562 	rcu_read_unlock();
563 
564 	return ret;
565 }
566 
567 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
568 		    char *sym)
569 {
570 	struct bpf_prog_aux *aux;
571 	unsigned int it = 0;
572 	int ret = -ERANGE;
573 
574 	if (!bpf_jit_kallsyms_enabled())
575 		return ret;
576 
577 	rcu_read_lock();
578 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
579 		if (it++ != symnum)
580 			continue;
581 
582 		bpf_get_prog_name(aux->prog, sym);
583 
584 		*value = (unsigned long)aux->prog->bpf_func;
585 		*type  = BPF_SYM_ELF_TYPE;
586 
587 		ret = 0;
588 		break;
589 	}
590 	rcu_read_unlock();
591 
592 	return ret;
593 }
594 
595 static atomic_long_t bpf_jit_current;
596 
597 #if defined(MODULES_VADDR)
598 static int __init bpf_jit_charge_init(void)
599 {
600 	/* Only used as heuristic here to derive limit. */
601 	bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
602 					    PAGE_SIZE), INT_MAX);
603 	return 0;
604 }
605 pure_initcall(bpf_jit_charge_init);
606 #endif
607 
608 static int bpf_jit_charge_modmem(u32 pages)
609 {
610 	if (atomic_long_add_return(pages, &bpf_jit_current) >
611 	    (bpf_jit_limit >> PAGE_SHIFT)) {
612 		if (!capable(CAP_SYS_ADMIN)) {
613 			atomic_long_sub(pages, &bpf_jit_current);
614 			return -EPERM;
615 		}
616 	}
617 
618 	return 0;
619 }
620 
621 static void bpf_jit_uncharge_modmem(u32 pages)
622 {
623 	atomic_long_sub(pages, &bpf_jit_current);
624 }
625 
626 struct bpf_binary_header *
627 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
628 		     unsigned int alignment,
629 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
630 {
631 	struct bpf_binary_header *hdr;
632 	u32 size, hole, start, pages;
633 
634 	/* Most of BPF filters are really small, but if some of them
635 	 * fill a page, allow at least 128 extra bytes to insert a
636 	 * random section of illegal instructions.
637 	 */
638 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
639 	pages = size / PAGE_SIZE;
640 
641 	if (bpf_jit_charge_modmem(pages))
642 		return NULL;
643 	hdr = module_alloc(size);
644 	if (!hdr) {
645 		bpf_jit_uncharge_modmem(pages);
646 		return NULL;
647 	}
648 
649 	/* Fill space with illegal/arch-dep instructions. */
650 	bpf_fill_ill_insns(hdr, size);
651 
652 	hdr->pages = pages;
653 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
654 		     PAGE_SIZE - sizeof(*hdr));
655 	start = (get_random_int() % hole) & ~(alignment - 1);
656 
657 	/* Leave a random number of instructions before BPF code. */
658 	*image_ptr = &hdr->image[start];
659 
660 	return hdr;
661 }
662 
663 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
664 {
665 	u32 pages = hdr->pages;
666 
667 	module_memfree(hdr);
668 	bpf_jit_uncharge_modmem(pages);
669 }
670 
671 /* This symbol is only overridden by archs that have different
672  * requirements than the usual eBPF JITs, f.e. when they only
673  * implement cBPF JIT, do not set images read-only, etc.
674  */
675 void __weak bpf_jit_free(struct bpf_prog *fp)
676 {
677 	if (fp->jited) {
678 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
679 
680 		bpf_jit_binary_unlock_ro(hdr);
681 		bpf_jit_binary_free(hdr);
682 
683 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
684 	}
685 
686 	bpf_prog_unlock_free(fp);
687 }
688 
689 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
690 			  const struct bpf_insn *insn, bool extra_pass,
691 			  u64 *func_addr, bool *func_addr_fixed)
692 {
693 	s16 off = insn->off;
694 	s32 imm = insn->imm;
695 	u8 *addr;
696 
697 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
698 	if (!*func_addr_fixed) {
699 		/* Place-holder address till the last pass has collected
700 		 * all addresses for JITed subprograms in which case we
701 		 * can pick them up from prog->aux.
702 		 */
703 		if (!extra_pass)
704 			addr = NULL;
705 		else if (prog->aux->func &&
706 			 off >= 0 && off < prog->aux->func_cnt)
707 			addr = (u8 *)prog->aux->func[off]->bpf_func;
708 		else
709 			return -EINVAL;
710 	} else {
711 		/* Address of a BPF helper call. Since part of the core
712 		 * kernel, it's always at a fixed location. __bpf_call_base
713 		 * and the helper with imm relative to it are both in core
714 		 * kernel.
715 		 */
716 		addr = (u8 *)__bpf_call_base + imm;
717 	}
718 
719 	*func_addr = (unsigned long)addr;
720 	return 0;
721 }
722 
723 static int bpf_jit_blind_insn(const struct bpf_insn *from,
724 			      const struct bpf_insn *aux,
725 			      struct bpf_insn *to_buff)
726 {
727 	struct bpf_insn *to = to_buff;
728 	u32 imm_rnd = get_random_int();
729 	s16 off;
730 
731 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
732 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
733 
734 	if (from->imm == 0 &&
735 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
736 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
737 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
738 		goto out;
739 	}
740 
741 	switch (from->code) {
742 	case BPF_ALU | BPF_ADD | BPF_K:
743 	case BPF_ALU | BPF_SUB | BPF_K:
744 	case BPF_ALU | BPF_AND | BPF_K:
745 	case BPF_ALU | BPF_OR  | BPF_K:
746 	case BPF_ALU | BPF_XOR | BPF_K:
747 	case BPF_ALU | BPF_MUL | BPF_K:
748 	case BPF_ALU | BPF_MOV | BPF_K:
749 	case BPF_ALU | BPF_DIV | BPF_K:
750 	case BPF_ALU | BPF_MOD | BPF_K:
751 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
752 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
753 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
754 		break;
755 
756 	case BPF_ALU64 | BPF_ADD | BPF_K:
757 	case BPF_ALU64 | BPF_SUB | BPF_K:
758 	case BPF_ALU64 | BPF_AND | BPF_K:
759 	case BPF_ALU64 | BPF_OR  | BPF_K:
760 	case BPF_ALU64 | BPF_XOR | BPF_K:
761 	case BPF_ALU64 | BPF_MUL | BPF_K:
762 	case BPF_ALU64 | BPF_MOV | BPF_K:
763 	case BPF_ALU64 | BPF_DIV | BPF_K:
764 	case BPF_ALU64 | BPF_MOD | BPF_K:
765 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
766 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
767 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
768 		break;
769 
770 	case BPF_JMP | BPF_JEQ  | BPF_K:
771 	case BPF_JMP | BPF_JNE  | BPF_K:
772 	case BPF_JMP | BPF_JGT  | BPF_K:
773 	case BPF_JMP | BPF_JLT  | BPF_K:
774 	case BPF_JMP | BPF_JGE  | BPF_K:
775 	case BPF_JMP | BPF_JLE  | BPF_K:
776 	case BPF_JMP | BPF_JSGT | BPF_K:
777 	case BPF_JMP | BPF_JSLT | BPF_K:
778 	case BPF_JMP | BPF_JSGE | BPF_K:
779 	case BPF_JMP | BPF_JSLE | BPF_K:
780 	case BPF_JMP | BPF_JSET | BPF_K:
781 		/* Accommodate for extra offset in case of a backjump. */
782 		off = from->off;
783 		if (off < 0)
784 			off -= 2;
785 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
786 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
787 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
788 		break;
789 
790 	case BPF_LD | BPF_IMM | BPF_DW:
791 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
792 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
793 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
794 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
795 		break;
796 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
797 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
798 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
799 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
800 		break;
801 
802 	case BPF_ST | BPF_MEM | BPF_DW:
803 	case BPF_ST | BPF_MEM | BPF_W:
804 	case BPF_ST | BPF_MEM | BPF_H:
805 	case BPF_ST | BPF_MEM | BPF_B:
806 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
807 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
808 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
809 		break;
810 	}
811 out:
812 	return to - to_buff;
813 }
814 
815 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
816 					      gfp_t gfp_extra_flags)
817 {
818 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
819 	struct bpf_prog *fp;
820 
821 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
822 	if (fp != NULL) {
823 		/* aux->prog still points to the fp_other one, so
824 		 * when promoting the clone to the real program,
825 		 * this still needs to be adapted.
826 		 */
827 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
828 	}
829 
830 	return fp;
831 }
832 
833 static void bpf_prog_clone_free(struct bpf_prog *fp)
834 {
835 	/* aux was stolen by the other clone, so we cannot free
836 	 * it from this path! It will be freed eventually by the
837 	 * other program on release.
838 	 *
839 	 * At this point, we don't need a deferred release since
840 	 * clone is guaranteed to not be locked.
841 	 */
842 	fp->aux = NULL;
843 	__bpf_prog_free(fp);
844 }
845 
846 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
847 {
848 	/* We have to repoint aux->prog to self, as we don't
849 	 * know whether fp here is the clone or the original.
850 	 */
851 	fp->aux->prog = fp;
852 	bpf_prog_clone_free(fp_other);
853 }
854 
855 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
856 {
857 	struct bpf_insn insn_buff[16], aux[2];
858 	struct bpf_prog *clone, *tmp;
859 	int insn_delta, insn_cnt;
860 	struct bpf_insn *insn;
861 	int i, rewritten;
862 
863 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
864 		return prog;
865 
866 	clone = bpf_prog_clone_create(prog, GFP_USER);
867 	if (!clone)
868 		return ERR_PTR(-ENOMEM);
869 
870 	insn_cnt = clone->len;
871 	insn = clone->insnsi;
872 
873 	for (i = 0; i < insn_cnt; i++, insn++) {
874 		/* We temporarily need to hold the original ld64 insn
875 		 * so that we can still access the first part in the
876 		 * second blinding run.
877 		 */
878 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
879 		    insn[1].code == 0)
880 			memcpy(aux, insn, sizeof(aux));
881 
882 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
883 		if (!rewritten)
884 			continue;
885 
886 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
887 		if (!tmp) {
888 			/* Patching may have repointed aux->prog during
889 			 * realloc from the original one, so we need to
890 			 * fix it up here on error.
891 			 */
892 			bpf_jit_prog_release_other(prog, clone);
893 			return ERR_PTR(-ENOMEM);
894 		}
895 
896 		clone = tmp;
897 		insn_delta = rewritten - 1;
898 
899 		/* Walk new program and skip insns we just inserted. */
900 		insn = clone->insnsi + i + insn_delta;
901 		insn_cnt += insn_delta;
902 		i        += insn_delta;
903 	}
904 
905 	clone->blinded = 1;
906 	return clone;
907 }
908 #endif /* CONFIG_BPF_JIT */
909 
910 /* Base function for offset calculation. Needs to go into .text section,
911  * therefore keeping it non-static as well; will also be used by JITs
912  * anyway later on, so do not let the compiler omit it. This also needs
913  * to go into kallsyms for correlation from e.g. bpftool, so naming
914  * must not change.
915  */
916 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
917 {
918 	return 0;
919 }
920 EXPORT_SYMBOL_GPL(__bpf_call_base);
921 
922 /* All UAPI available opcodes. */
923 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
924 	/* 32 bit ALU operations. */		\
925 	/*   Register based. */			\
926 	INSN_3(ALU, ADD, X),			\
927 	INSN_3(ALU, SUB, X),			\
928 	INSN_3(ALU, AND, X),			\
929 	INSN_3(ALU, OR,  X),			\
930 	INSN_3(ALU, LSH, X),			\
931 	INSN_3(ALU, RSH, X),			\
932 	INSN_3(ALU, XOR, X),			\
933 	INSN_3(ALU, MUL, X),			\
934 	INSN_3(ALU, MOV, X),			\
935 	INSN_3(ALU, DIV, X),			\
936 	INSN_3(ALU, MOD, X),			\
937 	INSN_2(ALU, NEG),			\
938 	INSN_3(ALU, END, TO_BE),		\
939 	INSN_3(ALU, END, TO_LE),		\
940 	/*   Immediate based. */		\
941 	INSN_3(ALU, ADD, K),			\
942 	INSN_3(ALU, SUB, K),			\
943 	INSN_3(ALU, AND, K),			\
944 	INSN_3(ALU, OR,  K),			\
945 	INSN_3(ALU, LSH, K),			\
946 	INSN_3(ALU, RSH, K),			\
947 	INSN_3(ALU, XOR, K),			\
948 	INSN_3(ALU, MUL, K),			\
949 	INSN_3(ALU, MOV, K),			\
950 	INSN_3(ALU, DIV, K),			\
951 	INSN_3(ALU, MOD, K),			\
952 	/* 64 bit ALU operations. */		\
953 	/*   Register based. */			\
954 	INSN_3(ALU64, ADD,  X),			\
955 	INSN_3(ALU64, SUB,  X),			\
956 	INSN_3(ALU64, AND,  X),			\
957 	INSN_3(ALU64, OR,   X),			\
958 	INSN_3(ALU64, LSH,  X),			\
959 	INSN_3(ALU64, RSH,  X),			\
960 	INSN_3(ALU64, XOR,  X),			\
961 	INSN_3(ALU64, MUL,  X),			\
962 	INSN_3(ALU64, MOV,  X),			\
963 	INSN_3(ALU64, ARSH, X),			\
964 	INSN_3(ALU64, DIV,  X),			\
965 	INSN_3(ALU64, MOD,  X),			\
966 	INSN_2(ALU64, NEG),			\
967 	/*   Immediate based. */		\
968 	INSN_3(ALU64, ADD,  K),			\
969 	INSN_3(ALU64, SUB,  K),			\
970 	INSN_3(ALU64, AND,  K),			\
971 	INSN_3(ALU64, OR,   K),			\
972 	INSN_3(ALU64, LSH,  K),			\
973 	INSN_3(ALU64, RSH,  K),			\
974 	INSN_3(ALU64, XOR,  K),			\
975 	INSN_3(ALU64, MUL,  K),			\
976 	INSN_3(ALU64, MOV,  K),			\
977 	INSN_3(ALU64, ARSH, K),			\
978 	INSN_3(ALU64, DIV,  K),			\
979 	INSN_3(ALU64, MOD,  K),			\
980 	/* Call instruction. */			\
981 	INSN_2(JMP, CALL),			\
982 	/* Exit instruction. */			\
983 	INSN_2(JMP, EXIT),			\
984 	/* Jump instructions. */		\
985 	/*   Register based. */			\
986 	INSN_3(JMP, JEQ,  X),			\
987 	INSN_3(JMP, JNE,  X),			\
988 	INSN_3(JMP, JGT,  X),			\
989 	INSN_3(JMP, JLT,  X),			\
990 	INSN_3(JMP, JGE,  X),			\
991 	INSN_3(JMP, JLE,  X),			\
992 	INSN_3(JMP, JSGT, X),			\
993 	INSN_3(JMP, JSLT, X),			\
994 	INSN_3(JMP, JSGE, X),			\
995 	INSN_3(JMP, JSLE, X),			\
996 	INSN_3(JMP, JSET, X),			\
997 	/*   Immediate based. */		\
998 	INSN_3(JMP, JEQ,  K),			\
999 	INSN_3(JMP, JNE,  K),			\
1000 	INSN_3(JMP, JGT,  K),			\
1001 	INSN_3(JMP, JLT,  K),			\
1002 	INSN_3(JMP, JGE,  K),			\
1003 	INSN_3(JMP, JLE,  K),			\
1004 	INSN_3(JMP, JSGT, K),			\
1005 	INSN_3(JMP, JSLT, K),			\
1006 	INSN_3(JMP, JSGE, K),			\
1007 	INSN_3(JMP, JSLE, K),			\
1008 	INSN_3(JMP, JSET, K),			\
1009 	INSN_2(JMP, JA),			\
1010 	/* Store instructions. */		\
1011 	/*   Register based. */			\
1012 	INSN_3(STX, MEM,  B),			\
1013 	INSN_3(STX, MEM,  H),			\
1014 	INSN_3(STX, MEM,  W),			\
1015 	INSN_3(STX, MEM,  DW),			\
1016 	INSN_3(STX, XADD, W),			\
1017 	INSN_3(STX, XADD, DW),			\
1018 	/*   Immediate based. */		\
1019 	INSN_3(ST, MEM, B),			\
1020 	INSN_3(ST, MEM, H),			\
1021 	INSN_3(ST, MEM, W),			\
1022 	INSN_3(ST, MEM, DW),			\
1023 	/* Load instructions. */		\
1024 	/*   Register based. */			\
1025 	INSN_3(LDX, MEM, B),			\
1026 	INSN_3(LDX, MEM, H),			\
1027 	INSN_3(LDX, MEM, W),			\
1028 	INSN_3(LDX, MEM, DW),			\
1029 	/*   Immediate based. */		\
1030 	INSN_3(LD, IMM, DW)
1031 
1032 bool bpf_opcode_in_insntable(u8 code)
1033 {
1034 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1035 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1036 	static const bool public_insntable[256] = {
1037 		[0 ... 255] = false,
1038 		/* Now overwrite non-defaults ... */
1039 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1040 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1041 		[BPF_LD | BPF_ABS | BPF_B] = true,
1042 		[BPF_LD | BPF_ABS | BPF_H] = true,
1043 		[BPF_LD | BPF_ABS | BPF_W] = true,
1044 		[BPF_LD | BPF_IND | BPF_B] = true,
1045 		[BPF_LD | BPF_IND | BPF_H] = true,
1046 		[BPF_LD | BPF_IND | BPF_W] = true,
1047 	};
1048 #undef BPF_INSN_3_TBL
1049 #undef BPF_INSN_2_TBL
1050 	return public_insntable[code];
1051 }
1052 
1053 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1054 /**
1055  *	__bpf_prog_run - run eBPF program on a given context
1056  *	@ctx: is the data we are operating on
1057  *	@insn: is the array of eBPF instructions
1058  *
1059  * Decode and execute eBPF instructions.
1060  */
1061 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1062 {
1063 	u64 tmp;
1064 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1065 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1066 	static const void *jumptable[256] = {
1067 		[0 ... 255] = &&default_label,
1068 		/* Now overwrite non-defaults ... */
1069 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1070 		/* Non-UAPI available opcodes. */
1071 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1072 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1073 	};
1074 #undef BPF_INSN_3_LBL
1075 #undef BPF_INSN_2_LBL
1076 	u32 tail_call_cnt = 0;
1077 
1078 #define CONT	 ({ insn++; goto select_insn; })
1079 #define CONT_JMP ({ insn++; goto select_insn; })
1080 
1081 select_insn:
1082 	goto *jumptable[insn->code];
1083 
1084 	/* ALU */
1085 #define ALU(OPCODE, OP)			\
1086 	ALU64_##OPCODE##_X:		\
1087 		DST = DST OP SRC;	\
1088 		CONT;			\
1089 	ALU_##OPCODE##_X:		\
1090 		DST = (u32) DST OP (u32) SRC;	\
1091 		CONT;			\
1092 	ALU64_##OPCODE##_K:		\
1093 		DST = DST OP IMM;		\
1094 		CONT;			\
1095 	ALU_##OPCODE##_K:		\
1096 		DST = (u32) DST OP (u32) IMM;	\
1097 		CONT;
1098 
1099 	ALU(ADD,  +)
1100 	ALU(SUB,  -)
1101 	ALU(AND,  &)
1102 	ALU(OR,   |)
1103 	ALU(LSH, <<)
1104 	ALU(RSH, >>)
1105 	ALU(XOR,  ^)
1106 	ALU(MUL,  *)
1107 #undef ALU
1108 	ALU_NEG:
1109 		DST = (u32) -DST;
1110 		CONT;
1111 	ALU64_NEG:
1112 		DST = -DST;
1113 		CONT;
1114 	ALU_MOV_X:
1115 		DST = (u32) SRC;
1116 		CONT;
1117 	ALU_MOV_K:
1118 		DST = (u32) IMM;
1119 		CONT;
1120 	ALU64_MOV_X:
1121 		DST = SRC;
1122 		CONT;
1123 	ALU64_MOV_K:
1124 		DST = IMM;
1125 		CONT;
1126 	LD_IMM_DW:
1127 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1128 		insn++;
1129 		CONT;
1130 	ALU64_ARSH_X:
1131 		(*(s64 *) &DST) >>= SRC;
1132 		CONT;
1133 	ALU64_ARSH_K:
1134 		(*(s64 *) &DST) >>= IMM;
1135 		CONT;
1136 	ALU64_MOD_X:
1137 		div64_u64_rem(DST, SRC, &tmp);
1138 		DST = tmp;
1139 		CONT;
1140 	ALU_MOD_X:
1141 		tmp = (u32) DST;
1142 		DST = do_div(tmp, (u32) SRC);
1143 		CONT;
1144 	ALU64_MOD_K:
1145 		div64_u64_rem(DST, IMM, &tmp);
1146 		DST = tmp;
1147 		CONT;
1148 	ALU_MOD_K:
1149 		tmp = (u32) DST;
1150 		DST = do_div(tmp, (u32) IMM);
1151 		CONT;
1152 	ALU64_DIV_X:
1153 		DST = div64_u64(DST, SRC);
1154 		CONT;
1155 	ALU_DIV_X:
1156 		tmp = (u32) DST;
1157 		do_div(tmp, (u32) SRC);
1158 		DST = (u32) tmp;
1159 		CONT;
1160 	ALU64_DIV_K:
1161 		DST = div64_u64(DST, IMM);
1162 		CONT;
1163 	ALU_DIV_K:
1164 		tmp = (u32) DST;
1165 		do_div(tmp, (u32) IMM);
1166 		DST = (u32) tmp;
1167 		CONT;
1168 	ALU_END_TO_BE:
1169 		switch (IMM) {
1170 		case 16:
1171 			DST = (__force u16) cpu_to_be16(DST);
1172 			break;
1173 		case 32:
1174 			DST = (__force u32) cpu_to_be32(DST);
1175 			break;
1176 		case 64:
1177 			DST = (__force u64) cpu_to_be64(DST);
1178 			break;
1179 		}
1180 		CONT;
1181 	ALU_END_TO_LE:
1182 		switch (IMM) {
1183 		case 16:
1184 			DST = (__force u16) cpu_to_le16(DST);
1185 			break;
1186 		case 32:
1187 			DST = (__force u32) cpu_to_le32(DST);
1188 			break;
1189 		case 64:
1190 			DST = (__force u64) cpu_to_le64(DST);
1191 			break;
1192 		}
1193 		CONT;
1194 
1195 	/* CALL */
1196 	JMP_CALL:
1197 		/* Function call scratches BPF_R1-BPF_R5 registers,
1198 		 * preserves BPF_R6-BPF_R9, and stores return value
1199 		 * into BPF_R0.
1200 		 */
1201 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1202 						       BPF_R4, BPF_R5);
1203 		CONT;
1204 
1205 	JMP_CALL_ARGS:
1206 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1207 							    BPF_R3, BPF_R4,
1208 							    BPF_R5,
1209 							    insn + insn->off + 1);
1210 		CONT;
1211 
1212 	JMP_TAIL_CALL: {
1213 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1214 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1215 		struct bpf_prog *prog;
1216 		u32 index = BPF_R3;
1217 
1218 		if (unlikely(index >= array->map.max_entries))
1219 			goto out;
1220 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1221 			goto out;
1222 
1223 		tail_call_cnt++;
1224 
1225 		prog = READ_ONCE(array->ptrs[index]);
1226 		if (!prog)
1227 			goto out;
1228 
1229 		/* ARG1 at this point is guaranteed to point to CTX from
1230 		 * the verifier side due to the fact that the tail call is
1231 		 * handeled like a helper, that is, bpf_tail_call_proto,
1232 		 * where arg1_type is ARG_PTR_TO_CTX.
1233 		 */
1234 		insn = prog->insnsi;
1235 		goto select_insn;
1236 out:
1237 		CONT;
1238 	}
1239 	/* JMP */
1240 	JMP_JA:
1241 		insn += insn->off;
1242 		CONT;
1243 	JMP_JEQ_X:
1244 		if (DST == SRC) {
1245 			insn += insn->off;
1246 			CONT_JMP;
1247 		}
1248 		CONT;
1249 	JMP_JEQ_K:
1250 		if (DST == IMM) {
1251 			insn += insn->off;
1252 			CONT_JMP;
1253 		}
1254 		CONT;
1255 	JMP_JNE_X:
1256 		if (DST != SRC) {
1257 			insn += insn->off;
1258 			CONT_JMP;
1259 		}
1260 		CONT;
1261 	JMP_JNE_K:
1262 		if (DST != IMM) {
1263 			insn += insn->off;
1264 			CONT_JMP;
1265 		}
1266 		CONT;
1267 	JMP_JGT_X:
1268 		if (DST > SRC) {
1269 			insn += insn->off;
1270 			CONT_JMP;
1271 		}
1272 		CONT;
1273 	JMP_JGT_K:
1274 		if (DST > IMM) {
1275 			insn += insn->off;
1276 			CONT_JMP;
1277 		}
1278 		CONT;
1279 	JMP_JLT_X:
1280 		if (DST < SRC) {
1281 			insn += insn->off;
1282 			CONT_JMP;
1283 		}
1284 		CONT;
1285 	JMP_JLT_K:
1286 		if (DST < IMM) {
1287 			insn += insn->off;
1288 			CONT_JMP;
1289 		}
1290 		CONT;
1291 	JMP_JGE_X:
1292 		if (DST >= SRC) {
1293 			insn += insn->off;
1294 			CONT_JMP;
1295 		}
1296 		CONT;
1297 	JMP_JGE_K:
1298 		if (DST >= IMM) {
1299 			insn += insn->off;
1300 			CONT_JMP;
1301 		}
1302 		CONT;
1303 	JMP_JLE_X:
1304 		if (DST <= SRC) {
1305 			insn += insn->off;
1306 			CONT_JMP;
1307 		}
1308 		CONT;
1309 	JMP_JLE_K:
1310 		if (DST <= IMM) {
1311 			insn += insn->off;
1312 			CONT_JMP;
1313 		}
1314 		CONT;
1315 	JMP_JSGT_X:
1316 		if (((s64) DST) > ((s64) SRC)) {
1317 			insn += insn->off;
1318 			CONT_JMP;
1319 		}
1320 		CONT;
1321 	JMP_JSGT_K:
1322 		if (((s64) DST) > ((s64) IMM)) {
1323 			insn += insn->off;
1324 			CONT_JMP;
1325 		}
1326 		CONT;
1327 	JMP_JSLT_X:
1328 		if (((s64) DST) < ((s64) SRC)) {
1329 			insn += insn->off;
1330 			CONT_JMP;
1331 		}
1332 		CONT;
1333 	JMP_JSLT_K:
1334 		if (((s64) DST) < ((s64) IMM)) {
1335 			insn += insn->off;
1336 			CONT_JMP;
1337 		}
1338 		CONT;
1339 	JMP_JSGE_X:
1340 		if (((s64) DST) >= ((s64) SRC)) {
1341 			insn += insn->off;
1342 			CONT_JMP;
1343 		}
1344 		CONT;
1345 	JMP_JSGE_K:
1346 		if (((s64) DST) >= ((s64) IMM)) {
1347 			insn += insn->off;
1348 			CONT_JMP;
1349 		}
1350 		CONT;
1351 	JMP_JSLE_X:
1352 		if (((s64) DST) <= ((s64) SRC)) {
1353 			insn += insn->off;
1354 			CONT_JMP;
1355 		}
1356 		CONT;
1357 	JMP_JSLE_K:
1358 		if (((s64) DST) <= ((s64) IMM)) {
1359 			insn += insn->off;
1360 			CONT_JMP;
1361 		}
1362 		CONT;
1363 	JMP_JSET_X:
1364 		if (DST & SRC) {
1365 			insn += insn->off;
1366 			CONT_JMP;
1367 		}
1368 		CONT;
1369 	JMP_JSET_K:
1370 		if (DST & IMM) {
1371 			insn += insn->off;
1372 			CONT_JMP;
1373 		}
1374 		CONT;
1375 	JMP_EXIT:
1376 		return BPF_R0;
1377 
1378 	/* STX and ST and LDX*/
1379 #define LDST(SIZEOP, SIZE)						\
1380 	STX_MEM_##SIZEOP:						\
1381 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1382 		CONT;							\
1383 	ST_MEM_##SIZEOP:						\
1384 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1385 		CONT;							\
1386 	LDX_MEM_##SIZEOP:						\
1387 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1388 		CONT;
1389 
1390 	LDST(B,   u8)
1391 	LDST(H,  u16)
1392 	LDST(W,  u32)
1393 	LDST(DW, u64)
1394 #undef LDST
1395 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1396 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1397 			   (DST + insn->off));
1398 		CONT;
1399 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1400 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1401 			     (DST + insn->off));
1402 		CONT;
1403 
1404 	default_label:
1405 		/* If we ever reach this, we have a bug somewhere. Die hard here
1406 		 * instead of just returning 0; we could be somewhere in a subprog,
1407 		 * so execution could continue otherwise which we do /not/ want.
1408 		 *
1409 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1410 		 */
1411 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1412 		BUG_ON(1);
1413 		return 0;
1414 }
1415 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1416 
1417 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1418 #define DEFINE_BPF_PROG_RUN(stack_size) \
1419 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1420 { \
1421 	u64 stack[stack_size / sizeof(u64)]; \
1422 	u64 regs[MAX_BPF_REG]; \
1423 \
1424 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1425 	ARG1 = (u64) (unsigned long) ctx; \
1426 	return ___bpf_prog_run(regs, insn, stack); \
1427 }
1428 
1429 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1430 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1431 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1432 				      const struct bpf_insn *insn) \
1433 { \
1434 	u64 stack[stack_size / sizeof(u64)]; \
1435 	u64 regs[MAX_BPF_REG]; \
1436 \
1437 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1438 	BPF_R1 = r1; \
1439 	BPF_R2 = r2; \
1440 	BPF_R3 = r3; \
1441 	BPF_R4 = r4; \
1442 	BPF_R5 = r5; \
1443 	return ___bpf_prog_run(regs, insn, stack); \
1444 }
1445 
1446 #define EVAL1(FN, X) FN(X)
1447 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1448 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1449 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1450 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1451 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1452 
1453 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1454 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1455 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1456 
1457 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1458 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1459 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1460 
1461 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1462 
1463 static unsigned int (*interpreters[])(const void *ctx,
1464 				      const struct bpf_insn *insn) = {
1465 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1466 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1467 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1468 };
1469 #undef PROG_NAME_LIST
1470 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1471 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1472 				  const struct bpf_insn *insn) = {
1473 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1474 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1475 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1476 };
1477 #undef PROG_NAME_LIST
1478 
1479 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1480 {
1481 	stack_depth = max_t(u32, stack_depth, 1);
1482 	insn->off = (s16) insn->imm;
1483 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1484 		__bpf_call_base_args;
1485 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1486 }
1487 
1488 #else
1489 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1490 					 const struct bpf_insn *insn)
1491 {
1492 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1493 	 * is not working properly, so warn about it!
1494 	 */
1495 	WARN_ON_ONCE(1);
1496 	return 0;
1497 }
1498 #endif
1499 
1500 bool bpf_prog_array_compatible(struct bpf_array *array,
1501 			       const struct bpf_prog *fp)
1502 {
1503 	if (fp->kprobe_override)
1504 		return false;
1505 
1506 	if (!array->owner_prog_type) {
1507 		/* There's no owner yet where we could check for
1508 		 * compatibility.
1509 		 */
1510 		array->owner_prog_type = fp->type;
1511 		array->owner_jited = fp->jited;
1512 
1513 		return true;
1514 	}
1515 
1516 	return array->owner_prog_type == fp->type &&
1517 	       array->owner_jited == fp->jited;
1518 }
1519 
1520 static int bpf_check_tail_call(const struct bpf_prog *fp)
1521 {
1522 	struct bpf_prog_aux *aux = fp->aux;
1523 	int i;
1524 
1525 	for (i = 0; i < aux->used_map_cnt; i++) {
1526 		struct bpf_map *map = aux->used_maps[i];
1527 		struct bpf_array *array;
1528 
1529 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1530 			continue;
1531 
1532 		array = container_of(map, struct bpf_array, map);
1533 		if (!bpf_prog_array_compatible(array, fp))
1534 			return -EINVAL;
1535 	}
1536 
1537 	return 0;
1538 }
1539 
1540 static void bpf_prog_select_func(struct bpf_prog *fp)
1541 {
1542 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1543 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1544 
1545 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1546 #else
1547 	fp->bpf_func = __bpf_prog_ret0_warn;
1548 #endif
1549 }
1550 
1551 /**
1552  *	bpf_prog_select_runtime - select exec runtime for BPF program
1553  *	@fp: bpf_prog populated with internal BPF program
1554  *	@err: pointer to error variable
1555  *
1556  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1557  * The BPF program will be executed via BPF_PROG_RUN() macro.
1558  */
1559 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1560 {
1561 	/* In case of BPF to BPF calls, verifier did all the prep
1562 	 * work with regards to JITing, etc.
1563 	 */
1564 	if (fp->bpf_func)
1565 		goto finalize;
1566 
1567 	bpf_prog_select_func(fp);
1568 
1569 	/* eBPF JITs can rewrite the program in case constant
1570 	 * blinding is active. However, in case of error during
1571 	 * blinding, bpf_int_jit_compile() must always return a
1572 	 * valid program, which in this case would simply not
1573 	 * be JITed, but falls back to the interpreter.
1574 	 */
1575 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1576 		fp = bpf_int_jit_compile(fp);
1577 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1578 		if (!fp->jited) {
1579 			*err = -ENOTSUPP;
1580 			return fp;
1581 		}
1582 #endif
1583 	} else {
1584 		*err = bpf_prog_offload_compile(fp);
1585 		if (*err)
1586 			return fp;
1587 	}
1588 
1589 finalize:
1590 	bpf_prog_lock_ro(fp);
1591 
1592 	/* The tail call compatibility check can only be done at
1593 	 * this late stage as we need to determine, if we deal
1594 	 * with JITed or non JITed program concatenations and not
1595 	 * all eBPF JITs might immediately support all features.
1596 	 */
1597 	*err = bpf_check_tail_call(fp);
1598 
1599 	return fp;
1600 }
1601 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1602 
1603 static unsigned int __bpf_prog_ret1(const void *ctx,
1604 				    const struct bpf_insn *insn)
1605 {
1606 	return 1;
1607 }
1608 
1609 static struct bpf_prog_dummy {
1610 	struct bpf_prog prog;
1611 } dummy_bpf_prog = {
1612 	.prog = {
1613 		.bpf_func = __bpf_prog_ret1,
1614 	},
1615 };
1616 
1617 /* to avoid allocating empty bpf_prog_array for cgroups that
1618  * don't have bpf program attached use one global 'empty_prog_array'
1619  * It will not be modified the caller of bpf_prog_array_alloc()
1620  * (since caller requested prog_cnt == 0)
1621  * that pointer should be 'freed' by bpf_prog_array_free()
1622  */
1623 static struct {
1624 	struct bpf_prog_array hdr;
1625 	struct bpf_prog *null_prog;
1626 } empty_prog_array = {
1627 	.null_prog = NULL,
1628 };
1629 
1630 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1631 {
1632 	if (prog_cnt)
1633 		return kzalloc(sizeof(struct bpf_prog_array) +
1634 			       sizeof(struct bpf_prog_array_item) *
1635 			       (prog_cnt + 1),
1636 			       flags);
1637 
1638 	return &empty_prog_array.hdr;
1639 }
1640 
1641 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1642 {
1643 	if (!progs ||
1644 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1645 		return;
1646 	kfree_rcu(progs, rcu);
1647 }
1648 
1649 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1650 {
1651 	struct bpf_prog_array_item *item;
1652 	u32 cnt = 0;
1653 
1654 	rcu_read_lock();
1655 	item = rcu_dereference(array)->items;
1656 	for (; item->prog; item++)
1657 		if (item->prog != &dummy_bpf_prog.prog)
1658 			cnt++;
1659 	rcu_read_unlock();
1660 	return cnt;
1661 }
1662 
1663 
1664 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1665 				     u32 *prog_ids,
1666 				     u32 request_cnt)
1667 {
1668 	struct bpf_prog_array_item *item;
1669 	int i = 0;
1670 
1671 	item = rcu_dereference_check(array, 1)->items;
1672 	for (; item->prog; item++) {
1673 		if (item->prog == &dummy_bpf_prog.prog)
1674 			continue;
1675 		prog_ids[i] = item->prog->aux->id;
1676 		if (++i == request_cnt) {
1677 			item++;
1678 			break;
1679 		}
1680 	}
1681 
1682 	return !!(item->prog);
1683 }
1684 
1685 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1686 				__u32 __user *prog_ids, u32 cnt)
1687 {
1688 	unsigned long err = 0;
1689 	bool nospc;
1690 	u32 *ids;
1691 
1692 	/* users of this function are doing:
1693 	 * cnt = bpf_prog_array_length();
1694 	 * if (cnt > 0)
1695 	 *     bpf_prog_array_copy_to_user(..., cnt);
1696 	 * so below kcalloc doesn't need extra cnt > 0 check, but
1697 	 * bpf_prog_array_length() releases rcu lock and
1698 	 * prog array could have been swapped with empty or larger array,
1699 	 * so always copy 'cnt' prog_ids to the user.
1700 	 * In a rare race the user will see zero prog_ids
1701 	 */
1702 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1703 	if (!ids)
1704 		return -ENOMEM;
1705 	rcu_read_lock();
1706 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1707 	rcu_read_unlock();
1708 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1709 	kfree(ids);
1710 	if (err)
1711 		return -EFAULT;
1712 	if (nospc)
1713 		return -ENOSPC;
1714 	return 0;
1715 }
1716 
1717 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1718 				struct bpf_prog *old_prog)
1719 {
1720 	struct bpf_prog_array_item *item = array->items;
1721 
1722 	for (; item->prog; item++)
1723 		if (item->prog == old_prog) {
1724 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1725 			break;
1726 		}
1727 }
1728 
1729 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1730 			struct bpf_prog *exclude_prog,
1731 			struct bpf_prog *include_prog,
1732 			struct bpf_prog_array **new_array)
1733 {
1734 	int new_prog_cnt, carry_prog_cnt = 0;
1735 	struct bpf_prog_array_item *existing;
1736 	struct bpf_prog_array *array;
1737 	bool found_exclude = false;
1738 	int new_prog_idx = 0;
1739 
1740 	/* Figure out how many existing progs we need to carry over to
1741 	 * the new array.
1742 	 */
1743 	if (old_array) {
1744 		existing = old_array->items;
1745 		for (; existing->prog; existing++) {
1746 			if (existing->prog == exclude_prog) {
1747 				found_exclude = true;
1748 				continue;
1749 			}
1750 			if (existing->prog != &dummy_bpf_prog.prog)
1751 				carry_prog_cnt++;
1752 			if (existing->prog == include_prog)
1753 				return -EEXIST;
1754 		}
1755 	}
1756 
1757 	if (exclude_prog && !found_exclude)
1758 		return -ENOENT;
1759 
1760 	/* How many progs (not NULL) will be in the new array? */
1761 	new_prog_cnt = carry_prog_cnt;
1762 	if (include_prog)
1763 		new_prog_cnt += 1;
1764 
1765 	/* Do we have any prog (not NULL) in the new array? */
1766 	if (!new_prog_cnt) {
1767 		*new_array = NULL;
1768 		return 0;
1769 	}
1770 
1771 	/* +1 as the end of prog_array is marked with NULL */
1772 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1773 	if (!array)
1774 		return -ENOMEM;
1775 
1776 	/* Fill in the new prog array */
1777 	if (carry_prog_cnt) {
1778 		existing = old_array->items;
1779 		for (; existing->prog; existing++)
1780 			if (existing->prog != exclude_prog &&
1781 			    existing->prog != &dummy_bpf_prog.prog) {
1782 				array->items[new_prog_idx++].prog =
1783 					existing->prog;
1784 			}
1785 	}
1786 	if (include_prog)
1787 		array->items[new_prog_idx++].prog = include_prog;
1788 	array->items[new_prog_idx].prog = NULL;
1789 	*new_array = array;
1790 	return 0;
1791 }
1792 
1793 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1794 			     u32 *prog_ids, u32 request_cnt,
1795 			     u32 *prog_cnt)
1796 {
1797 	u32 cnt = 0;
1798 
1799 	if (array)
1800 		cnt = bpf_prog_array_length(array);
1801 
1802 	*prog_cnt = cnt;
1803 
1804 	/* return early if user requested only program count or nothing to copy */
1805 	if (!request_cnt || !cnt)
1806 		return 0;
1807 
1808 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1809 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1810 								     : 0;
1811 }
1812 
1813 static void bpf_prog_free_deferred(struct work_struct *work)
1814 {
1815 	struct bpf_prog_aux *aux;
1816 	int i;
1817 
1818 	aux = container_of(work, struct bpf_prog_aux, work);
1819 	if (bpf_prog_is_dev_bound(aux))
1820 		bpf_prog_offload_destroy(aux->prog);
1821 #ifdef CONFIG_PERF_EVENTS
1822 	if (aux->prog->has_callchain_buf)
1823 		put_callchain_buffers();
1824 #endif
1825 	for (i = 0; i < aux->func_cnt; i++)
1826 		bpf_jit_free(aux->func[i]);
1827 	if (aux->func_cnt) {
1828 		kfree(aux->func);
1829 		bpf_prog_unlock_free(aux->prog);
1830 	} else {
1831 		bpf_jit_free(aux->prog);
1832 	}
1833 }
1834 
1835 /* Free internal BPF program */
1836 void bpf_prog_free(struct bpf_prog *fp)
1837 {
1838 	struct bpf_prog_aux *aux = fp->aux;
1839 
1840 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1841 	schedule_work(&aux->work);
1842 }
1843 EXPORT_SYMBOL_GPL(bpf_prog_free);
1844 
1845 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1846 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1847 
1848 void bpf_user_rnd_init_once(void)
1849 {
1850 	prandom_init_once(&bpf_user_rnd_state);
1851 }
1852 
1853 BPF_CALL_0(bpf_user_rnd_u32)
1854 {
1855 	/* Should someone ever have the rather unwise idea to use some
1856 	 * of the registers passed into this function, then note that
1857 	 * this function is called from native eBPF and classic-to-eBPF
1858 	 * transformations. Register assignments from both sides are
1859 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1860 	 */
1861 	struct rnd_state *state;
1862 	u32 res;
1863 
1864 	state = &get_cpu_var(bpf_user_rnd_state);
1865 	res = prandom_u32_state(state);
1866 	put_cpu_var(bpf_user_rnd_state);
1867 
1868 	return res;
1869 }
1870 
1871 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1872 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1873 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1874 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1875 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
1876 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
1877 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
1878 
1879 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1880 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1881 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1882 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1883 
1884 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1885 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1886 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1887 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
1888 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
1889 
1890 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1891 {
1892 	return NULL;
1893 }
1894 
1895 u64 __weak
1896 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1897 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1898 {
1899 	return -ENOTSUPP;
1900 }
1901 EXPORT_SYMBOL_GPL(bpf_event_output);
1902 
1903 /* Always built-in helper functions. */
1904 const struct bpf_func_proto bpf_tail_call_proto = {
1905 	.func		= NULL,
1906 	.gpl_only	= false,
1907 	.ret_type	= RET_VOID,
1908 	.arg1_type	= ARG_PTR_TO_CTX,
1909 	.arg2_type	= ARG_CONST_MAP_PTR,
1910 	.arg3_type	= ARG_ANYTHING,
1911 };
1912 
1913 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1914  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1915  * eBPF and implicitly also cBPF can get JITed!
1916  */
1917 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1918 {
1919 	return prog;
1920 }
1921 
1922 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1923  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1924  */
1925 void __weak bpf_jit_compile(struct bpf_prog *prog)
1926 {
1927 }
1928 
1929 bool __weak bpf_helper_changes_pkt_data(void *func)
1930 {
1931 	return false;
1932 }
1933 
1934 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1935  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1936  */
1937 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1938 			 int len)
1939 {
1940 	return -EFAULT;
1941 }
1942 
1943 /* All definitions of tracepoints related to BPF. */
1944 #define CREATE_TRACE_POINTS
1945 #include <linux/bpf_trace.h>
1946 
1947 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1948