xref: /openbmc/linux/kernel/bpf/core.c (revision ff4a7481c3898ffc3cc271d6aca431d190c37247)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <uapi/linux/btf.h>
25 #include <linux/filter.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/random.h>
29 #include <linux/moduleloader.h>
30 #include <linux/bpf.h>
31 #include <linux/btf.h>
32 #include <linux/frame.h>
33 #include <linux/rbtree_latch.h>
34 #include <linux/kallsyms.h>
35 #include <linux/rcupdate.h>
36 #include <linux/perf_event.h>
37 
38 #include <asm/unaligned.h>
39 
40 /* Registers */
41 #define BPF_R0	regs[BPF_REG_0]
42 #define BPF_R1	regs[BPF_REG_1]
43 #define BPF_R2	regs[BPF_REG_2]
44 #define BPF_R3	regs[BPF_REG_3]
45 #define BPF_R4	regs[BPF_REG_4]
46 #define BPF_R5	regs[BPF_REG_5]
47 #define BPF_R6	regs[BPF_REG_6]
48 #define BPF_R7	regs[BPF_REG_7]
49 #define BPF_R8	regs[BPF_REG_8]
50 #define BPF_R9	regs[BPF_REG_9]
51 #define BPF_R10	regs[BPF_REG_10]
52 
53 /* Named registers */
54 #define DST	regs[insn->dst_reg]
55 #define SRC	regs[insn->src_reg]
56 #define FP	regs[BPF_REG_FP]
57 #define ARG1	regs[BPF_REG_ARG1]
58 #define CTX	regs[BPF_REG_CTX]
59 #define IMM	insn->imm
60 
61 /* No hurry in this branch
62  *
63  * Exported for the bpf jit load helper.
64  */
65 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
66 {
67 	u8 *ptr = NULL;
68 
69 	if (k >= SKF_NET_OFF)
70 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
71 	else if (k >= SKF_LL_OFF)
72 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
73 
74 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
75 		return ptr;
76 
77 	return NULL;
78 }
79 
80 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
81 {
82 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
83 	struct bpf_prog_aux *aux;
84 	struct bpf_prog *fp;
85 
86 	size = round_up(size, PAGE_SIZE);
87 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88 	if (fp == NULL)
89 		return NULL;
90 
91 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
92 	if (aux == NULL) {
93 		vfree(fp);
94 		return NULL;
95 	}
96 
97 	fp->pages = size / PAGE_SIZE;
98 	fp->aux = aux;
99 	fp->aux->prog = fp;
100 	fp->jit_requested = ebpf_jit_enabled();
101 
102 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
103 
104 	return fp;
105 }
106 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
107 
108 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
109 {
110 	if (!prog->aux->nr_linfo || !prog->jit_requested)
111 		return 0;
112 
113 	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
114 					 sizeof(*prog->aux->jited_linfo),
115 					 GFP_KERNEL | __GFP_NOWARN);
116 	if (!prog->aux->jited_linfo)
117 		return -ENOMEM;
118 
119 	return 0;
120 }
121 
122 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
123 {
124 	kfree(prog->aux->jited_linfo);
125 	prog->aux->jited_linfo = NULL;
126 }
127 
128 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
129 {
130 	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
131 		bpf_prog_free_jited_linfo(prog);
132 }
133 
134 /* The jit engine is responsible to provide an array
135  * for insn_off to the jited_off mapping (insn_to_jit_off).
136  *
137  * The idx to this array is the insn_off.  Hence, the insn_off
138  * here is relative to the prog itself instead of the main prog.
139  * This array has one entry for each xlated bpf insn.
140  *
141  * jited_off is the byte off to the last byte of the jited insn.
142  *
143  * Hence, with
144  * insn_start:
145  *      The first bpf insn off of the prog.  The insn off
146  *      here is relative to the main prog.
147  *      e.g. if prog is a subprog, insn_start > 0
148  * linfo_idx:
149  *      The prog's idx to prog->aux->linfo and jited_linfo
150  *
151  * jited_linfo[linfo_idx] = prog->bpf_func
152  *
153  * For i > linfo_idx,
154  *
155  * jited_linfo[i] = prog->bpf_func +
156  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
157  */
158 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
159 			       const u32 *insn_to_jit_off)
160 {
161 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
162 	const struct bpf_line_info *linfo;
163 	void **jited_linfo;
164 
165 	if (!prog->aux->jited_linfo)
166 		/* Userspace did not provide linfo */
167 		return;
168 
169 	linfo_idx = prog->aux->linfo_idx;
170 	linfo = &prog->aux->linfo[linfo_idx];
171 	insn_start = linfo[0].insn_off;
172 	insn_end = insn_start + prog->len;
173 
174 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
175 	jited_linfo[0] = prog->bpf_func;
176 
177 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
178 
179 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
180 		/* The verifier ensures that linfo[i].insn_off is
181 		 * strictly increasing
182 		 */
183 		jited_linfo[i] = prog->bpf_func +
184 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
185 }
186 
187 void bpf_prog_free_linfo(struct bpf_prog *prog)
188 {
189 	bpf_prog_free_jited_linfo(prog);
190 	kvfree(prog->aux->linfo);
191 }
192 
193 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
194 				  gfp_t gfp_extra_flags)
195 {
196 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
197 	struct bpf_prog *fp;
198 	u32 pages, delta;
199 	int ret;
200 
201 	BUG_ON(fp_old == NULL);
202 
203 	size = round_up(size, PAGE_SIZE);
204 	pages = size / PAGE_SIZE;
205 	if (pages <= fp_old->pages)
206 		return fp_old;
207 
208 	delta = pages - fp_old->pages;
209 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
210 	if (ret)
211 		return NULL;
212 
213 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
214 	if (fp == NULL) {
215 		__bpf_prog_uncharge(fp_old->aux->user, delta);
216 	} else {
217 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
218 		fp->pages = pages;
219 		fp->aux->prog = fp;
220 
221 		/* We keep fp->aux from fp_old around in the new
222 		 * reallocated structure.
223 		 */
224 		fp_old->aux = NULL;
225 		__bpf_prog_free(fp_old);
226 	}
227 
228 	return fp;
229 }
230 
231 void __bpf_prog_free(struct bpf_prog *fp)
232 {
233 	kfree(fp->aux);
234 	vfree(fp);
235 }
236 
237 int bpf_prog_calc_tag(struct bpf_prog *fp)
238 {
239 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
240 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
241 	u32 digest[SHA_DIGEST_WORDS];
242 	u32 ws[SHA_WORKSPACE_WORDS];
243 	u32 i, bsize, psize, blocks;
244 	struct bpf_insn *dst;
245 	bool was_ld_map;
246 	u8 *raw, *todo;
247 	__be32 *result;
248 	__be64 *bits;
249 
250 	raw = vmalloc(raw_size);
251 	if (!raw)
252 		return -ENOMEM;
253 
254 	sha_init(digest);
255 	memset(ws, 0, sizeof(ws));
256 
257 	/* We need to take out the map fd for the digest calculation
258 	 * since they are unstable from user space side.
259 	 */
260 	dst = (void *)raw;
261 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
262 		dst[i] = fp->insnsi[i];
263 		if (!was_ld_map &&
264 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
265 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
266 			was_ld_map = true;
267 			dst[i].imm = 0;
268 		} else if (was_ld_map &&
269 			   dst[i].code == 0 &&
270 			   dst[i].dst_reg == 0 &&
271 			   dst[i].src_reg == 0 &&
272 			   dst[i].off == 0) {
273 			was_ld_map = false;
274 			dst[i].imm = 0;
275 		} else {
276 			was_ld_map = false;
277 		}
278 	}
279 
280 	psize = bpf_prog_insn_size(fp);
281 	memset(&raw[psize], 0, raw_size - psize);
282 	raw[psize++] = 0x80;
283 
284 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
285 	blocks = bsize / SHA_MESSAGE_BYTES;
286 	todo   = raw;
287 	if (bsize - psize >= sizeof(__be64)) {
288 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
289 	} else {
290 		bits = (__be64 *)(todo + bsize + bits_offset);
291 		blocks++;
292 	}
293 	*bits = cpu_to_be64((psize - 1) << 3);
294 
295 	while (blocks--) {
296 		sha_transform(digest, todo, ws);
297 		todo += SHA_MESSAGE_BYTES;
298 	}
299 
300 	result = (__force __be32 *)digest;
301 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
302 		result[i] = cpu_to_be32(digest[i]);
303 	memcpy(fp->tag, result, sizeof(fp->tag));
304 
305 	vfree(raw);
306 	return 0;
307 }
308 
309 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
310 				u32 curr, const bool probe_pass)
311 {
312 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
313 	s64 imm = insn->imm;
314 
315 	if (curr < pos && curr + imm + 1 > pos)
316 		imm += delta;
317 	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
318 		imm -= delta;
319 	if (imm < imm_min || imm > imm_max)
320 		return -ERANGE;
321 	if (!probe_pass)
322 		insn->imm = imm;
323 	return 0;
324 }
325 
326 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
327 				u32 curr, const bool probe_pass)
328 {
329 	const s32 off_min = S16_MIN, off_max = S16_MAX;
330 	s32 off = insn->off;
331 
332 	if (curr < pos && curr + off + 1 > pos)
333 		off += delta;
334 	else if (curr > pos + delta && curr + off + 1 <= pos + delta)
335 		off -= delta;
336 	if (off < off_min || off > off_max)
337 		return -ERANGE;
338 	if (!probe_pass)
339 		insn->off = off;
340 	return 0;
341 }
342 
343 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
344 			    const bool probe_pass)
345 {
346 	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
347 	struct bpf_insn *insn = prog->insnsi;
348 	int ret = 0;
349 
350 	for (i = 0; i < insn_cnt; i++, insn++) {
351 		u8 code;
352 
353 		/* In the probing pass we still operate on the original,
354 		 * unpatched image in order to check overflows before we
355 		 * do any other adjustments. Therefore skip the patchlet.
356 		 */
357 		if (probe_pass && i == pos) {
358 			i += delta + 1;
359 			insn++;
360 		}
361 		code = insn->code;
362 		if (BPF_CLASS(code) != BPF_JMP ||
363 		    BPF_OP(code) == BPF_EXIT)
364 			continue;
365 		/* Adjust offset of jmps if we cross patch boundaries. */
366 		if (BPF_OP(code) == BPF_CALL) {
367 			if (insn->src_reg != BPF_PSEUDO_CALL)
368 				continue;
369 			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
370 						   probe_pass);
371 		} else {
372 			ret = bpf_adj_delta_to_off(insn, pos, delta, i,
373 						   probe_pass);
374 		}
375 		if (ret)
376 			break;
377 	}
378 
379 	return ret;
380 }
381 
382 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
383 {
384 	struct bpf_line_info *linfo;
385 	u32 i, nr_linfo;
386 
387 	nr_linfo = prog->aux->nr_linfo;
388 	if (!nr_linfo || !delta)
389 		return;
390 
391 	linfo = prog->aux->linfo;
392 
393 	for (i = 0; i < nr_linfo; i++)
394 		if (off < linfo[i].insn_off)
395 			break;
396 
397 	/* Push all off < linfo[i].insn_off by delta */
398 	for (; i < nr_linfo; i++)
399 		linfo[i].insn_off += delta;
400 }
401 
402 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
403 				       const struct bpf_insn *patch, u32 len)
404 {
405 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
406 	const u32 cnt_max = S16_MAX;
407 	struct bpf_prog *prog_adj;
408 
409 	/* Since our patchlet doesn't expand the image, we're done. */
410 	if (insn_delta == 0) {
411 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
412 		return prog;
413 	}
414 
415 	insn_adj_cnt = prog->len + insn_delta;
416 
417 	/* Reject anything that would potentially let the insn->off
418 	 * target overflow when we have excessive program expansions.
419 	 * We need to probe here before we do any reallocation where
420 	 * we afterwards may not fail anymore.
421 	 */
422 	if (insn_adj_cnt > cnt_max &&
423 	    bpf_adj_branches(prog, off, insn_delta, true))
424 		return NULL;
425 
426 	/* Several new instructions need to be inserted. Make room
427 	 * for them. Likely, there's no need for a new allocation as
428 	 * last page could have large enough tailroom.
429 	 */
430 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
431 				    GFP_USER);
432 	if (!prog_adj)
433 		return NULL;
434 
435 	prog_adj->len = insn_adj_cnt;
436 
437 	/* Patching happens in 3 steps:
438 	 *
439 	 * 1) Move over tail of insnsi from next instruction onwards,
440 	 *    so we can patch the single target insn with one or more
441 	 *    new ones (patching is always from 1 to n insns, n > 0).
442 	 * 2) Inject new instructions at the target location.
443 	 * 3) Adjust branch offsets if necessary.
444 	 */
445 	insn_rest = insn_adj_cnt - off - len;
446 
447 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
448 		sizeof(*patch) * insn_rest);
449 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
450 
451 	/* We are guaranteed to not fail at this point, otherwise
452 	 * the ship has sailed to reverse to the original state. An
453 	 * overflow cannot happen at this point.
454 	 */
455 	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
456 
457 	bpf_adj_linfo(prog_adj, off, insn_delta);
458 
459 	return prog_adj;
460 }
461 
462 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
463 {
464 	int i;
465 
466 	for (i = 0; i < fp->aux->func_cnt; i++)
467 		bpf_prog_kallsyms_del(fp->aux->func[i]);
468 }
469 
470 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
471 {
472 	bpf_prog_kallsyms_del_subprogs(fp);
473 	bpf_prog_kallsyms_del(fp);
474 }
475 
476 #ifdef CONFIG_BPF_JIT
477 /* All BPF JIT sysctl knobs here. */
478 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
479 int bpf_jit_harden   __read_mostly;
480 int bpf_jit_kallsyms __read_mostly;
481 long bpf_jit_limit   __read_mostly;
482 
483 static __always_inline void
484 bpf_get_prog_addr_region(const struct bpf_prog *prog,
485 			 unsigned long *symbol_start,
486 			 unsigned long *symbol_end)
487 {
488 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
489 	unsigned long addr = (unsigned long)hdr;
490 
491 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
492 
493 	*symbol_start = addr;
494 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
495 }
496 
497 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
498 {
499 	const char *end = sym + KSYM_NAME_LEN;
500 	const struct btf_type *type;
501 	const char *func_name;
502 
503 	BUILD_BUG_ON(sizeof("bpf_prog_") +
504 		     sizeof(prog->tag) * 2 +
505 		     /* name has been null terminated.
506 		      * We should need +1 for the '_' preceding
507 		      * the name.  However, the null character
508 		      * is double counted between the name and the
509 		      * sizeof("bpf_prog_") above, so we omit
510 		      * the +1 here.
511 		      */
512 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
513 
514 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
515 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
516 
517 	/* prog->aux->name will be ignored if full btf name is available */
518 	if (prog->aux->func_info_cnt) {
519 		type = btf_type_by_id(prog->aux->btf,
520 				      prog->aux->func_info[prog->aux->func_idx].type_id);
521 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
522 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
523 		return;
524 	}
525 
526 	if (prog->aux->name[0])
527 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
528 	else
529 		*sym = 0;
530 }
531 
532 static __always_inline unsigned long
533 bpf_get_prog_addr_start(struct latch_tree_node *n)
534 {
535 	unsigned long symbol_start, symbol_end;
536 	const struct bpf_prog_aux *aux;
537 
538 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
539 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
540 
541 	return symbol_start;
542 }
543 
544 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
545 					  struct latch_tree_node *b)
546 {
547 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
548 }
549 
550 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
551 {
552 	unsigned long val = (unsigned long)key;
553 	unsigned long symbol_start, symbol_end;
554 	const struct bpf_prog_aux *aux;
555 
556 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
557 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
558 
559 	if (val < symbol_start)
560 		return -1;
561 	if (val >= symbol_end)
562 		return  1;
563 
564 	return 0;
565 }
566 
567 static const struct latch_tree_ops bpf_tree_ops = {
568 	.less	= bpf_tree_less,
569 	.comp	= bpf_tree_comp,
570 };
571 
572 static DEFINE_SPINLOCK(bpf_lock);
573 static LIST_HEAD(bpf_kallsyms);
574 static struct latch_tree_root bpf_tree __cacheline_aligned;
575 
576 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
577 {
578 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
579 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
580 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
581 }
582 
583 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
584 {
585 	if (list_empty(&aux->ksym_lnode))
586 		return;
587 
588 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
589 	list_del_rcu(&aux->ksym_lnode);
590 }
591 
592 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
593 {
594 	return fp->jited && !bpf_prog_was_classic(fp);
595 }
596 
597 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
598 {
599 	return list_empty(&fp->aux->ksym_lnode) ||
600 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
601 }
602 
603 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
604 {
605 	if (!bpf_prog_kallsyms_candidate(fp) ||
606 	    !capable(CAP_SYS_ADMIN))
607 		return;
608 
609 	spin_lock_bh(&bpf_lock);
610 	bpf_prog_ksym_node_add(fp->aux);
611 	spin_unlock_bh(&bpf_lock);
612 }
613 
614 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
615 {
616 	if (!bpf_prog_kallsyms_candidate(fp))
617 		return;
618 
619 	spin_lock_bh(&bpf_lock);
620 	bpf_prog_ksym_node_del(fp->aux);
621 	spin_unlock_bh(&bpf_lock);
622 }
623 
624 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
625 {
626 	struct latch_tree_node *n;
627 
628 	if (!bpf_jit_kallsyms_enabled())
629 		return NULL;
630 
631 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
632 	return n ?
633 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
634 	       NULL;
635 }
636 
637 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
638 				 unsigned long *off, char *sym)
639 {
640 	unsigned long symbol_start, symbol_end;
641 	struct bpf_prog *prog;
642 	char *ret = NULL;
643 
644 	rcu_read_lock();
645 	prog = bpf_prog_kallsyms_find(addr);
646 	if (prog) {
647 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
648 		bpf_get_prog_name(prog, sym);
649 
650 		ret = sym;
651 		if (size)
652 			*size = symbol_end - symbol_start;
653 		if (off)
654 			*off  = addr - symbol_start;
655 	}
656 	rcu_read_unlock();
657 
658 	return ret;
659 }
660 
661 bool is_bpf_text_address(unsigned long addr)
662 {
663 	bool ret;
664 
665 	rcu_read_lock();
666 	ret = bpf_prog_kallsyms_find(addr) != NULL;
667 	rcu_read_unlock();
668 
669 	return ret;
670 }
671 
672 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
673 		    char *sym)
674 {
675 	struct bpf_prog_aux *aux;
676 	unsigned int it = 0;
677 	int ret = -ERANGE;
678 
679 	if (!bpf_jit_kallsyms_enabled())
680 		return ret;
681 
682 	rcu_read_lock();
683 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
684 		if (it++ != symnum)
685 			continue;
686 
687 		bpf_get_prog_name(aux->prog, sym);
688 
689 		*value = (unsigned long)aux->prog->bpf_func;
690 		*type  = BPF_SYM_ELF_TYPE;
691 
692 		ret = 0;
693 		break;
694 	}
695 	rcu_read_unlock();
696 
697 	return ret;
698 }
699 
700 static atomic_long_t bpf_jit_current;
701 
702 /* Can be overridden by an arch's JIT compiler if it has a custom,
703  * dedicated BPF backend memory area, or if neither of the two
704  * below apply.
705  */
706 u64 __weak bpf_jit_alloc_exec_limit(void)
707 {
708 #if defined(MODULES_VADDR)
709 	return MODULES_END - MODULES_VADDR;
710 #else
711 	return VMALLOC_END - VMALLOC_START;
712 #endif
713 }
714 
715 static int __init bpf_jit_charge_init(void)
716 {
717 	/* Only used as heuristic here to derive limit. */
718 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
719 					    PAGE_SIZE), LONG_MAX);
720 	return 0;
721 }
722 pure_initcall(bpf_jit_charge_init);
723 
724 static int bpf_jit_charge_modmem(u32 pages)
725 {
726 	if (atomic_long_add_return(pages, &bpf_jit_current) >
727 	    (bpf_jit_limit >> PAGE_SHIFT)) {
728 		if (!capable(CAP_SYS_ADMIN)) {
729 			atomic_long_sub(pages, &bpf_jit_current);
730 			return -EPERM;
731 		}
732 	}
733 
734 	return 0;
735 }
736 
737 static void bpf_jit_uncharge_modmem(u32 pages)
738 {
739 	atomic_long_sub(pages, &bpf_jit_current);
740 }
741 
742 void *__weak bpf_jit_alloc_exec(unsigned long size)
743 {
744 	return module_alloc(size);
745 }
746 
747 void __weak bpf_jit_free_exec(void *addr)
748 {
749 	module_memfree(addr);
750 }
751 
752 struct bpf_binary_header *
753 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
754 		     unsigned int alignment,
755 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
756 {
757 	struct bpf_binary_header *hdr;
758 	u32 size, hole, start, pages;
759 
760 	/* Most of BPF filters are really small, but if some of them
761 	 * fill a page, allow at least 128 extra bytes to insert a
762 	 * random section of illegal instructions.
763 	 */
764 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
765 	pages = size / PAGE_SIZE;
766 
767 	if (bpf_jit_charge_modmem(pages))
768 		return NULL;
769 	hdr = bpf_jit_alloc_exec(size);
770 	if (!hdr) {
771 		bpf_jit_uncharge_modmem(pages);
772 		return NULL;
773 	}
774 
775 	/* Fill space with illegal/arch-dep instructions. */
776 	bpf_fill_ill_insns(hdr, size);
777 
778 	hdr->pages = pages;
779 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
780 		     PAGE_SIZE - sizeof(*hdr));
781 	start = (get_random_int() % hole) & ~(alignment - 1);
782 
783 	/* Leave a random number of instructions before BPF code. */
784 	*image_ptr = &hdr->image[start];
785 
786 	return hdr;
787 }
788 
789 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
790 {
791 	u32 pages = hdr->pages;
792 
793 	bpf_jit_free_exec(hdr);
794 	bpf_jit_uncharge_modmem(pages);
795 }
796 
797 /* This symbol is only overridden by archs that have different
798  * requirements than the usual eBPF JITs, f.e. when they only
799  * implement cBPF JIT, do not set images read-only, etc.
800  */
801 void __weak bpf_jit_free(struct bpf_prog *fp)
802 {
803 	if (fp->jited) {
804 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
805 
806 		bpf_jit_binary_unlock_ro(hdr);
807 		bpf_jit_binary_free(hdr);
808 
809 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
810 	}
811 
812 	bpf_prog_unlock_free(fp);
813 }
814 
815 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
816 			  const struct bpf_insn *insn, bool extra_pass,
817 			  u64 *func_addr, bool *func_addr_fixed)
818 {
819 	s16 off = insn->off;
820 	s32 imm = insn->imm;
821 	u8 *addr;
822 
823 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
824 	if (!*func_addr_fixed) {
825 		/* Place-holder address till the last pass has collected
826 		 * all addresses for JITed subprograms in which case we
827 		 * can pick them up from prog->aux.
828 		 */
829 		if (!extra_pass)
830 			addr = NULL;
831 		else if (prog->aux->func &&
832 			 off >= 0 && off < prog->aux->func_cnt)
833 			addr = (u8 *)prog->aux->func[off]->bpf_func;
834 		else
835 			return -EINVAL;
836 	} else {
837 		/* Address of a BPF helper call. Since part of the core
838 		 * kernel, it's always at a fixed location. __bpf_call_base
839 		 * and the helper with imm relative to it are both in core
840 		 * kernel.
841 		 */
842 		addr = (u8 *)__bpf_call_base + imm;
843 	}
844 
845 	*func_addr = (unsigned long)addr;
846 	return 0;
847 }
848 
849 static int bpf_jit_blind_insn(const struct bpf_insn *from,
850 			      const struct bpf_insn *aux,
851 			      struct bpf_insn *to_buff)
852 {
853 	struct bpf_insn *to = to_buff;
854 	u32 imm_rnd = get_random_int();
855 	s16 off;
856 
857 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
858 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
859 
860 	if (from->imm == 0 &&
861 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
862 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
863 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
864 		goto out;
865 	}
866 
867 	switch (from->code) {
868 	case BPF_ALU | BPF_ADD | BPF_K:
869 	case BPF_ALU | BPF_SUB | BPF_K:
870 	case BPF_ALU | BPF_AND | BPF_K:
871 	case BPF_ALU | BPF_OR  | BPF_K:
872 	case BPF_ALU | BPF_XOR | BPF_K:
873 	case BPF_ALU | BPF_MUL | BPF_K:
874 	case BPF_ALU | BPF_MOV | BPF_K:
875 	case BPF_ALU | BPF_DIV | BPF_K:
876 	case BPF_ALU | BPF_MOD | BPF_K:
877 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
878 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
879 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
880 		break;
881 
882 	case BPF_ALU64 | BPF_ADD | BPF_K:
883 	case BPF_ALU64 | BPF_SUB | BPF_K:
884 	case BPF_ALU64 | BPF_AND | BPF_K:
885 	case BPF_ALU64 | BPF_OR  | BPF_K:
886 	case BPF_ALU64 | BPF_XOR | BPF_K:
887 	case BPF_ALU64 | BPF_MUL | BPF_K:
888 	case BPF_ALU64 | BPF_MOV | BPF_K:
889 	case BPF_ALU64 | BPF_DIV | BPF_K:
890 	case BPF_ALU64 | BPF_MOD | BPF_K:
891 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
892 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
893 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
894 		break;
895 
896 	case BPF_JMP | BPF_JEQ  | BPF_K:
897 	case BPF_JMP | BPF_JNE  | BPF_K:
898 	case BPF_JMP | BPF_JGT  | BPF_K:
899 	case BPF_JMP | BPF_JLT  | BPF_K:
900 	case BPF_JMP | BPF_JGE  | BPF_K:
901 	case BPF_JMP | BPF_JLE  | BPF_K:
902 	case BPF_JMP | BPF_JSGT | BPF_K:
903 	case BPF_JMP | BPF_JSLT | BPF_K:
904 	case BPF_JMP | BPF_JSGE | BPF_K:
905 	case BPF_JMP | BPF_JSLE | BPF_K:
906 	case BPF_JMP | BPF_JSET | BPF_K:
907 		/* Accommodate for extra offset in case of a backjump. */
908 		off = from->off;
909 		if (off < 0)
910 			off -= 2;
911 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
912 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
913 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
914 		break;
915 
916 	case BPF_LD | BPF_IMM | BPF_DW:
917 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
918 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
919 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
920 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
921 		break;
922 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
923 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
924 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
925 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
926 		break;
927 
928 	case BPF_ST | BPF_MEM | BPF_DW:
929 	case BPF_ST | BPF_MEM | BPF_W:
930 	case BPF_ST | BPF_MEM | BPF_H:
931 	case BPF_ST | BPF_MEM | BPF_B:
932 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
933 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
934 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
935 		break;
936 	}
937 out:
938 	return to - to_buff;
939 }
940 
941 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
942 					      gfp_t gfp_extra_flags)
943 {
944 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
945 	struct bpf_prog *fp;
946 
947 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
948 	if (fp != NULL) {
949 		/* aux->prog still points to the fp_other one, so
950 		 * when promoting the clone to the real program,
951 		 * this still needs to be adapted.
952 		 */
953 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
954 	}
955 
956 	return fp;
957 }
958 
959 static void bpf_prog_clone_free(struct bpf_prog *fp)
960 {
961 	/* aux was stolen by the other clone, so we cannot free
962 	 * it from this path! It will be freed eventually by the
963 	 * other program on release.
964 	 *
965 	 * At this point, we don't need a deferred release since
966 	 * clone is guaranteed to not be locked.
967 	 */
968 	fp->aux = NULL;
969 	__bpf_prog_free(fp);
970 }
971 
972 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
973 {
974 	/* We have to repoint aux->prog to self, as we don't
975 	 * know whether fp here is the clone or the original.
976 	 */
977 	fp->aux->prog = fp;
978 	bpf_prog_clone_free(fp_other);
979 }
980 
981 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
982 {
983 	struct bpf_insn insn_buff[16], aux[2];
984 	struct bpf_prog *clone, *tmp;
985 	int insn_delta, insn_cnt;
986 	struct bpf_insn *insn;
987 	int i, rewritten;
988 
989 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
990 		return prog;
991 
992 	clone = bpf_prog_clone_create(prog, GFP_USER);
993 	if (!clone)
994 		return ERR_PTR(-ENOMEM);
995 
996 	insn_cnt = clone->len;
997 	insn = clone->insnsi;
998 
999 	for (i = 0; i < insn_cnt; i++, insn++) {
1000 		/* We temporarily need to hold the original ld64 insn
1001 		 * so that we can still access the first part in the
1002 		 * second blinding run.
1003 		 */
1004 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1005 		    insn[1].code == 0)
1006 			memcpy(aux, insn, sizeof(aux));
1007 
1008 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1009 		if (!rewritten)
1010 			continue;
1011 
1012 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1013 		if (!tmp) {
1014 			/* Patching may have repointed aux->prog during
1015 			 * realloc from the original one, so we need to
1016 			 * fix it up here on error.
1017 			 */
1018 			bpf_jit_prog_release_other(prog, clone);
1019 			return ERR_PTR(-ENOMEM);
1020 		}
1021 
1022 		clone = tmp;
1023 		insn_delta = rewritten - 1;
1024 
1025 		/* Walk new program and skip insns we just inserted. */
1026 		insn = clone->insnsi + i + insn_delta;
1027 		insn_cnt += insn_delta;
1028 		i        += insn_delta;
1029 	}
1030 
1031 	clone->blinded = 1;
1032 	return clone;
1033 }
1034 #endif /* CONFIG_BPF_JIT */
1035 
1036 /* Base function for offset calculation. Needs to go into .text section,
1037  * therefore keeping it non-static as well; will also be used by JITs
1038  * anyway later on, so do not let the compiler omit it. This also needs
1039  * to go into kallsyms for correlation from e.g. bpftool, so naming
1040  * must not change.
1041  */
1042 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1043 {
1044 	return 0;
1045 }
1046 EXPORT_SYMBOL_GPL(__bpf_call_base);
1047 
1048 /* All UAPI available opcodes. */
1049 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1050 	/* 32 bit ALU operations. */		\
1051 	/*   Register based. */			\
1052 	INSN_3(ALU, ADD,  X),			\
1053 	INSN_3(ALU, SUB,  X),			\
1054 	INSN_3(ALU, AND,  X),			\
1055 	INSN_3(ALU, OR,   X),			\
1056 	INSN_3(ALU, LSH,  X),			\
1057 	INSN_3(ALU, RSH,  X),			\
1058 	INSN_3(ALU, XOR,  X),			\
1059 	INSN_3(ALU, MUL,  X),			\
1060 	INSN_3(ALU, MOV,  X),			\
1061 	INSN_3(ALU, ARSH, X),			\
1062 	INSN_3(ALU, DIV,  X),			\
1063 	INSN_3(ALU, MOD,  X),			\
1064 	INSN_2(ALU, NEG),			\
1065 	INSN_3(ALU, END, TO_BE),		\
1066 	INSN_3(ALU, END, TO_LE),		\
1067 	/*   Immediate based. */		\
1068 	INSN_3(ALU, ADD,  K),			\
1069 	INSN_3(ALU, SUB,  K),			\
1070 	INSN_3(ALU, AND,  K),			\
1071 	INSN_3(ALU, OR,   K),			\
1072 	INSN_3(ALU, LSH,  K),			\
1073 	INSN_3(ALU, RSH,  K),			\
1074 	INSN_3(ALU, XOR,  K),			\
1075 	INSN_3(ALU, MUL,  K),			\
1076 	INSN_3(ALU, MOV,  K),			\
1077 	INSN_3(ALU, ARSH, K),			\
1078 	INSN_3(ALU, DIV,  K),			\
1079 	INSN_3(ALU, MOD,  K),			\
1080 	/* 64 bit ALU operations. */		\
1081 	/*   Register based. */			\
1082 	INSN_3(ALU64, ADD,  X),			\
1083 	INSN_3(ALU64, SUB,  X),			\
1084 	INSN_3(ALU64, AND,  X),			\
1085 	INSN_3(ALU64, OR,   X),			\
1086 	INSN_3(ALU64, LSH,  X),			\
1087 	INSN_3(ALU64, RSH,  X),			\
1088 	INSN_3(ALU64, XOR,  X),			\
1089 	INSN_3(ALU64, MUL,  X),			\
1090 	INSN_3(ALU64, MOV,  X),			\
1091 	INSN_3(ALU64, ARSH, X),			\
1092 	INSN_3(ALU64, DIV,  X),			\
1093 	INSN_3(ALU64, MOD,  X),			\
1094 	INSN_2(ALU64, NEG),			\
1095 	/*   Immediate based. */		\
1096 	INSN_3(ALU64, ADD,  K),			\
1097 	INSN_3(ALU64, SUB,  K),			\
1098 	INSN_3(ALU64, AND,  K),			\
1099 	INSN_3(ALU64, OR,   K),			\
1100 	INSN_3(ALU64, LSH,  K),			\
1101 	INSN_3(ALU64, RSH,  K),			\
1102 	INSN_3(ALU64, XOR,  K),			\
1103 	INSN_3(ALU64, MUL,  K),			\
1104 	INSN_3(ALU64, MOV,  K),			\
1105 	INSN_3(ALU64, ARSH, K),			\
1106 	INSN_3(ALU64, DIV,  K),			\
1107 	INSN_3(ALU64, MOD,  K),			\
1108 	/* Call instruction. */			\
1109 	INSN_2(JMP, CALL),			\
1110 	/* Exit instruction. */			\
1111 	INSN_2(JMP, EXIT),			\
1112 	/* Jump instructions. */		\
1113 	/*   Register based. */			\
1114 	INSN_3(JMP, JEQ,  X),			\
1115 	INSN_3(JMP, JNE,  X),			\
1116 	INSN_3(JMP, JGT,  X),			\
1117 	INSN_3(JMP, JLT,  X),			\
1118 	INSN_3(JMP, JGE,  X),			\
1119 	INSN_3(JMP, JLE,  X),			\
1120 	INSN_3(JMP, JSGT, X),			\
1121 	INSN_3(JMP, JSLT, X),			\
1122 	INSN_3(JMP, JSGE, X),			\
1123 	INSN_3(JMP, JSLE, X),			\
1124 	INSN_3(JMP, JSET, X),			\
1125 	/*   Immediate based. */		\
1126 	INSN_3(JMP, JEQ,  K),			\
1127 	INSN_3(JMP, JNE,  K),			\
1128 	INSN_3(JMP, JGT,  K),			\
1129 	INSN_3(JMP, JLT,  K),			\
1130 	INSN_3(JMP, JGE,  K),			\
1131 	INSN_3(JMP, JLE,  K),			\
1132 	INSN_3(JMP, JSGT, K),			\
1133 	INSN_3(JMP, JSLT, K),			\
1134 	INSN_3(JMP, JSGE, K),			\
1135 	INSN_3(JMP, JSLE, K),			\
1136 	INSN_3(JMP, JSET, K),			\
1137 	INSN_2(JMP, JA),			\
1138 	/* Store instructions. */		\
1139 	/*   Register based. */			\
1140 	INSN_3(STX, MEM,  B),			\
1141 	INSN_3(STX, MEM,  H),			\
1142 	INSN_3(STX, MEM,  W),			\
1143 	INSN_3(STX, MEM,  DW),			\
1144 	INSN_3(STX, XADD, W),			\
1145 	INSN_3(STX, XADD, DW),			\
1146 	/*   Immediate based. */		\
1147 	INSN_3(ST, MEM, B),			\
1148 	INSN_3(ST, MEM, H),			\
1149 	INSN_3(ST, MEM, W),			\
1150 	INSN_3(ST, MEM, DW),			\
1151 	/* Load instructions. */		\
1152 	/*   Register based. */			\
1153 	INSN_3(LDX, MEM, B),			\
1154 	INSN_3(LDX, MEM, H),			\
1155 	INSN_3(LDX, MEM, W),			\
1156 	INSN_3(LDX, MEM, DW),			\
1157 	/*   Immediate based. */		\
1158 	INSN_3(LD, IMM, DW)
1159 
1160 bool bpf_opcode_in_insntable(u8 code)
1161 {
1162 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1163 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1164 	static const bool public_insntable[256] = {
1165 		[0 ... 255] = false,
1166 		/* Now overwrite non-defaults ... */
1167 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1168 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1169 		[BPF_LD | BPF_ABS | BPF_B] = true,
1170 		[BPF_LD | BPF_ABS | BPF_H] = true,
1171 		[BPF_LD | BPF_ABS | BPF_W] = true,
1172 		[BPF_LD | BPF_IND | BPF_B] = true,
1173 		[BPF_LD | BPF_IND | BPF_H] = true,
1174 		[BPF_LD | BPF_IND | BPF_W] = true,
1175 	};
1176 #undef BPF_INSN_3_TBL
1177 #undef BPF_INSN_2_TBL
1178 	return public_insntable[code];
1179 }
1180 
1181 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1182 /**
1183  *	__bpf_prog_run - run eBPF program on a given context
1184  *	@ctx: is the data we are operating on
1185  *	@insn: is the array of eBPF instructions
1186  *
1187  * Decode and execute eBPF instructions.
1188  */
1189 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1190 {
1191 	u64 tmp;
1192 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1193 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1194 	static const void *jumptable[256] = {
1195 		[0 ... 255] = &&default_label,
1196 		/* Now overwrite non-defaults ... */
1197 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1198 		/* Non-UAPI available opcodes. */
1199 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1200 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1201 	};
1202 #undef BPF_INSN_3_LBL
1203 #undef BPF_INSN_2_LBL
1204 	u32 tail_call_cnt = 0;
1205 
1206 #define CONT	 ({ insn++; goto select_insn; })
1207 #define CONT_JMP ({ insn++; goto select_insn; })
1208 
1209 select_insn:
1210 	goto *jumptable[insn->code];
1211 
1212 	/* ALU */
1213 #define ALU(OPCODE, OP)			\
1214 	ALU64_##OPCODE##_X:		\
1215 		DST = DST OP SRC;	\
1216 		CONT;			\
1217 	ALU_##OPCODE##_X:		\
1218 		DST = (u32) DST OP (u32) SRC;	\
1219 		CONT;			\
1220 	ALU64_##OPCODE##_K:		\
1221 		DST = DST OP IMM;		\
1222 		CONT;			\
1223 	ALU_##OPCODE##_K:		\
1224 		DST = (u32) DST OP (u32) IMM;	\
1225 		CONT;
1226 
1227 	ALU(ADD,  +)
1228 	ALU(SUB,  -)
1229 	ALU(AND,  &)
1230 	ALU(OR,   |)
1231 	ALU(LSH, <<)
1232 	ALU(RSH, >>)
1233 	ALU(XOR,  ^)
1234 	ALU(MUL,  *)
1235 #undef ALU
1236 	ALU_NEG:
1237 		DST = (u32) -DST;
1238 		CONT;
1239 	ALU64_NEG:
1240 		DST = -DST;
1241 		CONT;
1242 	ALU_MOV_X:
1243 		DST = (u32) SRC;
1244 		CONT;
1245 	ALU_MOV_K:
1246 		DST = (u32) IMM;
1247 		CONT;
1248 	ALU64_MOV_X:
1249 		DST = SRC;
1250 		CONT;
1251 	ALU64_MOV_K:
1252 		DST = IMM;
1253 		CONT;
1254 	LD_IMM_DW:
1255 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1256 		insn++;
1257 		CONT;
1258 	ALU_ARSH_X:
1259 		DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
1260 		CONT;
1261 	ALU_ARSH_K:
1262 		DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
1263 		CONT;
1264 	ALU64_ARSH_X:
1265 		(*(s64 *) &DST) >>= SRC;
1266 		CONT;
1267 	ALU64_ARSH_K:
1268 		(*(s64 *) &DST) >>= IMM;
1269 		CONT;
1270 	ALU64_MOD_X:
1271 		div64_u64_rem(DST, SRC, &tmp);
1272 		DST = tmp;
1273 		CONT;
1274 	ALU_MOD_X:
1275 		tmp = (u32) DST;
1276 		DST = do_div(tmp, (u32) SRC);
1277 		CONT;
1278 	ALU64_MOD_K:
1279 		div64_u64_rem(DST, IMM, &tmp);
1280 		DST = tmp;
1281 		CONT;
1282 	ALU_MOD_K:
1283 		tmp = (u32) DST;
1284 		DST = do_div(tmp, (u32) IMM);
1285 		CONT;
1286 	ALU64_DIV_X:
1287 		DST = div64_u64(DST, SRC);
1288 		CONT;
1289 	ALU_DIV_X:
1290 		tmp = (u32) DST;
1291 		do_div(tmp, (u32) SRC);
1292 		DST = (u32) tmp;
1293 		CONT;
1294 	ALU64_DIV_K:
1295 		DST = div64_u64(DST, IMM);
1296 		CONT;
1297 	ALU_DIV_K:
1298 		tmp = (u32) DST;
1299 		do_div(tmp, (u32) IMM);
1300 		DST = (u32) tmp;
1301 		CONT;
1302 	ALU_END_TO_BE:
1303 		switch (IMM) {
1304 		case 16:
1305 			DST = (__force u16) cpu_to_be16(DST);
1306 			break;
1307 		case 32:
1308 			DST = (__force u32) cpu_to_be32(DST);
1309 			break;
1310 		case 64:
1311 			DST = (__force u64) cpu_to_be64(DST);
1312 			break;
1313 		}
1314 		CONT;
1315 	ALU_END_TO_LE:
1316 		switch (IMM) {
1317 		case 16:
1318 			DST = (__force u16) cpu_to_le16(DST);
1319 			break;
1320 		case 32:
1321 			DST = (__force u32) cpu_to_le32(DST);
1322 			break;
1323 		case 64:
1324 			DST = (__force u64) cpu_to_le64(DST);
1325 			break;
1326 		}
1327 		CONT;
1328 
1329 	/* CALL */
1330 	JMP_CALL:
1331 		/* Function call scratches BPF_R1-BPF_R5 registers,
1332 		 * preserves BPF_R6-BPF_R9, and stores return value
1333 		 * into BPF_R0.
1334 		 */
1335 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1336 						       BPF_R4, BPF_R5);
1337 		CONT;
1338 
1339 	JMP_CALL_ARGS:
1340 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1341 							    BPF_R3, BPF_R4,
1342 							    BPF_R5,
1343 							    insn + insn->off + 1);
1344 		CONT;
1345 
1346 	JMP_TAIL_CALL: {
1347 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1348 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1349 		struct bpf_prog *prog;
1350 		u32 index = BPF_R3;
1351 
1352 		if (unlikely(index >= array->map.max_entries))
1353 			goto out;
1354 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1355 			goto out;
1356 
1357 		tail_call_cnt++;
1358 
1359 		prog = READ_ONCE(array->ptrs[index]);
1360 		if (!prog)
1361 			goto out;
1362 
1363 		/* ARG1 at this point is guaranteed to point to CTX from
1364 		 * the verifier side due to the fact that the tail call is
1365 		 * handeled like a helper, that is, bpf_tail_call_proto,
1366 		 * where arg1_type is ARG_PTR_TO_CTX.
1367 		 */
1368 		insn = prog->insnsi;
1369 		goto select_insn;
1370 out:
1371 		CONT;
1372 	}
1373 	/* JMP */
1374 	JMP_JA:
1375 		insn += insn->off;
1376 		CONT;
1377 	JMP_JEQ_X:
1378 		if (DST == SRC) {
1379 			insn += insn->off;
1380 			CONT_JMP;
1381 		}
1382 		CONT;
1383 	JMP_JEQ_K:
1384 		if (DST == IMM) {
1385 			insn += insn->off;
1386 			CONT_JMP;
1387 		}
1388 		CONT;
1389 	JMP_JNE_X:
1390 		if (DST != SRC) {
1391 			insn += insn->off;
1392 			CONT_JMP;
1393 		}
1394 		CONT;
1395 	JMP_JNE_K:
1396 		if (DST != IMM) {
1397 			insn += insn->off;
1398 			CONT_JMP;
1399 		}
1400 		CONT;
1401 	JMP_JGT_X:
1402 		if (DST > SRC) {
1403 			insn += insn->off;
1404 			CONT_JMP;
1405 		}
1406 		CONT;
1407 	JMP_JGT_K:
1408 		if (DST > IMM) {
1409 			insn += insn->off;
1410 			CONT_JMP;
1411 		}
1412 		CONT;
1413 	JMP_JLT_X:
1414 		if (DST < SRC) {
1415 			insn += insn->off;
1416 			CONT_JMP;
1417 		}
1418 		CONT;
1419 	JMP_JLT_K:
1420 		if (DST < IMM) {
1421 			insn += insn->off;
1422 			CONT_JMP;
1423 		}
1424 		CONT;
1425 	JMP_JGE_X:
1426 		if (DST >= SRC) {
1427 			insn += insn->off;
1428 			CONT_JMP;
1429 		}
1430 		CONT;
1431 	JMP_JGE_K:
1432 		if (DST >= IMM) {
1433 			insn += insn->off;
1434 			CONT_JMP;
1435 		}
1436 		CONT;
1437 	JMP_JLE_X:
1438 		if (DST <= SRC) {
1439 			insn += insn->off;
1440 			CONT_JMP;
1441 		}
1442 		CONT;
1443 	JMP_JLE_K:
1444 		if (DST <= IMM) {
1445 			insn += insn->off;
1446 			CONT_JMP;
1447 		}
1448 		CONT;
1449 	JMP_JSGT_X:
1450 		if (((s64) DST) > ((s64) SRC)) {
1451 			insn += insn->off;
1452 			CONT_JMP;
1453 		}
1454 		CONT;
1455 	JMP_JSGT_K:
1456 		if (((s64) DST) > ((s64) IMM)) {
1457 			insn += insn->off;
1458 			CONT_JMP;
1459 		}
1460 		CONT;
1461 	JMP_JSLT_X:
1462 		if (((s64) DST) < ((s64) SRC)) {
1463 			insn += insn->off;
1464 			CONT_JMP;
1465 		}
1466 		CONT;
1467 	JMP_JSLT_K:
1468 		if (((s64) DST) < ((s64) IMM)) {
1469 			insn += insn->off;
1470 			CONT_JMP;
1471 		}
1472 		CONT;
1473 	JMP_JSGE_X:
1474 		if (((s64) DST) >= ((s64) SRC)) {
1475 			insn += insn->off;
1476 			CONT_JMP;
1477 		}
1478 		CONT;
1479 	JMP_JSGE_K:
1480 		if (((s64) DST) >= ((s64) IMM)) {
1481 			insn += insn->off;
1482 			CONT_JMP;
1483 		}
1484 		CONT;
1485 	JMP_JSLE_X:
1486 		if (((s64) DST) <= ((s64) SRC)) {
1487 			insn += insn->off;
1488 			CONT_JMP;
1489 		}
1490 		CONT;
1491 	JMP_JSLE_K:
1492 		if (((s64) DST) <= ((s64) IMM)) {
1493 			insn += insn->off;
1494 			CONT_JMP;
1495 		}
1496 		CONT;
1497 	JMP_JSET_X:
1498 		if (DST & SRC) {
1499 			insn += insn->off;
1500 			CONT_JMP;
1501 		}
1502 		CONT;
1503 	JMP_JSET_K:
1504 		if (DST & IMM) {
1505 			insn += insn->off;
1506 			CONT_JMP;
1507 		}
1508 		CONT;
1509 	JMP_EXIT:
1510 		return BPF_R0;
1511 
1512 	/* STX and ST and LDX*/
1513 #define LDST(SIZEOP, SIZE)						\
1514 	STX_MEM_##SIZEOP:						\
1515 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1516 		CONT;							\
1517 	ST_MEM_##SIZEOP:						\
1518 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1519 		CONT;							\
1520 	LDX_MEM_##SIZEOP:						\
1521 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1522 		CONT;
1523 
1524 	LDST(B,   u8)
1525 	LDST(H,  u16)
1526 	LDST(W,  u32)
1527 	LDST(DW, u64)
1528 #undef LDST
1529 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1530 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1531 			   (DST + insn->off));
1532 		CONT;
1533 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1534 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1535 			     (DST + insn->off));
1536 		CONT;
1537 
1538 	default_label:
1539 		/* If we ever reach this, we have a bug somewhere. Die hard here
1540 		 * instead of just returning 0; we could be somewhere in a subprog,
1541 		 * so execution could continue otherwise which we do /not/ want.
1542 		 *
1543 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1544 		 */
1545 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1546 		BUG_ON(1);
1547 		return 0;
1548 }
1549 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1550 
1551 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1552 #define DEFINE_BPF_PROG_RUN(stack_size) \
1553 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1554 { \
1555 	u64 stack[stack_size / sizeof(u64)]; \
1556 	u64 regs[MAX_BPF_REG]; \
1557 \
1558 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1559 	ARG1 = (u64) (unsigned long) ctx; \
1560 	return ___bpf_prog_run(regs, insn, stack); \
1561 }
1562 
1563 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1564 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1565 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1566 				      const struct bpf_insn *insn) \
1567 { \
1568 	u64 stack[stack_size / sizeof(u64)]; \
1569 	u64 regs[MAX_BPF_REG]; \
1570 \
1571 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1572 	BPF_R1 = r1; \
1573 	BPF_R2 = r2; \
1574 	BPF_R3 = r3; \
1575 	BPF_R4 = r4; \
1576 	BPF_R5 = r5; \
1577 	return ___bpf_prog_run(regs, insn, stack); \
1578 }
1579 
1580 #define EVAL1(FN, X) FN(X)
1581 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1582 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1583 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1584 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1585 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1586 
1587 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1588 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1589 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1590 
1591 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1592 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1593 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1594 
1595 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1596 
1597 static unsigned int (*interpreters[])(const void *ctx,
1598 				      const struct bpf_insn *insn) = {
1599 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1600 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1601 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1602 };
1603 #undef PROG_NAME_LIST
1604 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1605 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1606 				  const struct bpf_insn *insn) = {
1607 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1608 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1609 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1610 };
1611 #undef PROG_NAME_LIST
1612 
1613 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1614 {
1615 	stack_depth = max_t(u32, stack_depth, 1);
1616 	insn->off = (s16) insn->imm;
1617 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1618 		__bpf_call_base_args;
1619 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1620 }
1621 
1622 #else
1623 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1624 					 const struct bpf_insn *insn)
1625 {
1626 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1627 	 * is not working properly, so warn about it!
1628 	 */
1629 	WARN_ON_ONCE(1);
1630 	return 0;
1631 }
1632 #endif
1633 
1634 bool bpf_prog_array_compatible(struct bpf_array *array,
1635 			       const struct bpf_prog *fp)
1636 {
1637 	if (fp->kprobe_override)
1638 		return false;
1639 
1640 	if (!array->owner_prog_type) {
1641 		/* There's no owner yet where we could check for
1642 		 * compatibility.
1643 		 */
1644 		array->owner_prog_type = fp->type;
1645 		array->owner_jited = fp->jited;
1646 
1647 		return true;
1648 	}
1649 
1650 	return array->owner_prog_type == fp->type &&
1651 	       array->owner_jited == fp->jited;
1652 }
1653 
1654 static int bpf_check_tail_call(const struct bpf_prog *fp)
1655 {
1656 	struct bpf_prog_aux *aux = fp->aux;
1657 	int i;
1658 
1659 	for (i = 0; i < aux->used_map_cnt; i++) {
1660 		struct bpf_map *map = aux->used_maps[i];
1661 		struct bpf_array *array;
1662 
1663 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1664 			continue;
1665 
1666 		array = container_of(map, struct bpf_array, map);
1667 		if (!bpf_prog_array_compatible(array, fp))
1668 			return -EINVAL;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static void bpf_prog_select_func(struct bpf_prog *fp)
1675 {
1676 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1677 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1678 
1679 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1680 #else
1681 	fp->bpf_func = __bpf_prog_ret0_warn;
1682 #endif
1683 }
1684 
1685 /**
1686  *	bpf_prog_select_runtime - select exec runtime for BPF program
1687  *	@fp: bpf_prog populated with internal BPF program
1688  *	@err: pointer to error variable
1689  *
1690  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1691  * The BPF program will be executed via BPF_PROG_RUN() macro.
1692  */
1693 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1694 {
1695 	/* In case of BPF to BPF calls, verifier did all the prep
1696 	 * work with regards to JITing, etc.
1697 	 */
1698 	if (fp->bpf_func)
1699 		goto finalize;
1700 
1701 	bpf_prog_select_func(fp);
1702 
1703 	/* eBPF JITs can rewrite the program in case constant
1704 	 * blinding is active. However, in case of error during
1705 	 * blinding, bpf_int_jit_compile() must always return a
1706 	 * valid program, which in this case would simply not
1707 	 * be JITed, but falls back to the interpreter.
1708 	 */
1709 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1710 		*err = bpf_prog_alloc_jited_linfo(fp);
1711 		if (*err)
1712 			return fp;
1713 
1714 		fp = bpf_int_jit_compile(fp);
1715 		if (!fp->jited) {
1716 			bpf_prog_free_jited_linfo(fp);
1717 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1718 			*err = -ENOTSUPP;
1719 			return fp;
1720 #endif
1721 		} else {
1722 			bpf_prog_free_unused_jited_linfo(fp);
1723 		}
1724 	} else {
1725 		*err = bpf_prog_offload_compile(fp);
1726 		if (*err)
1727 			return fp;
1728 	}
1729 
1730 finalize:
1731 	bpf_prog_lock_ro(fp);
1732 
1733 	/* The tail call compatibility check can only be done at
1734 	 * this late stage as we need to determine, if we deal
1735 	 * with JITed or non JITed program concatenations and not
1736 	 * all eBPF JITs might immediately support all features.
1737 	 */
1738 	*err = bpf_check_tail_call(fp);
1739 
1740 	return fp;
1741 }
1742 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1743 
1744 static unsigned int __bpf_prog_ret1(const void *ctx,
1745 				    const struct bpf_insn *insn)
1746 {
1747 	return 1;
1748 }
1749 
1750 static struct bpf_prog_dummy {
1751 	struct bpf_prog prog;
1752 } dummy_bpf_prog = {
1753 	.prog = {
1754 		.bpf_func = __bpf_prog_ret1,
1755 	},
1756 };
1757 
1758 /* to avoid allocating empty bpf_prog_array for cgroups that
1759  * don't have bpf program attached use one global 'empty_prog_array'
1760  * It will not be modified the caller of bpf_prog_array_alloc()
1761  * (since caller requested prog_cnt == 0)
1762  * that pointer should be 'freed' by bpf_prog_array_free()
1763  */
1764 static struct {
1765 	struct bpf_prog_array hdr;
1766 	struct bpf_prog *null_prog;
1767 } empty_prog_array = {
1768 	.null_prog = NULL,
1769 };
1770 
1771 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1772 {
1773 	if (prog_cnt)
1774 		return kzalloc(sizeof(struct bpf_prog_array) +
1775 			       sizeof(struct bpf_prog_array_item) *
1776 			       (prog_cnt + 1),
1777 			       flags);
1778 
1779 	return &empty_prog_array.hdr;
1780 }
1781 
1782 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1783 {
1784 	if (!progs ||
1785 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1786 		return;
1787 	kfree_rcu(progs, rcu);
1788 }
1789 
1790 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1791 {
1792 	struct bpf_prog_array_item *item;
1793 	u32 cnt = 0;
1794 
1795 	rcu_read_lock();
1796 	item = rcu_dereference(array)->items;
1797 	for (; item->prog; item++)
1798 		if (item->prog != &dummy_bpf_prog.prog)
1799 			cnt++;
1800 	rcu_read_unlock();
1801 	return cnt;
1802 }
1803 
1804 
1805 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1806 				     u32 *prog_ids,
1807 				     u32 request_cnt)
1808 {
1809 	struct bpf_prog_array_item *item;
1810 	int i = 0;
1811 
1812 	item = rcu_dereference_check(array, 1)->items;
1813 	for (; item->prog; item++) {
1814 		if (item->prog == &dummy_bpf_prog.prog)
1815 			continue;
1816 		prog_ids[i] = item->prog->aux->id;
1817 		if (++i == request_cnt) {
1818 			item++;
1819 			break;
1820 		}
1821 	}
1822 
1823 	return !!(item->prog);
1824 }
1825 
1826 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1827 				__u32 __user *prog_ids, u32 cnt)
1828 {
1829 	unsigned long err = 0;
1830 	bool nospc;
1831 	u32 *ids;
1832 
1833 	/* users of this function are doing:
1834 	 * cnt = bpf_prog_array_length();
1835 	 * if (cnt > 0)
1836 	 *     bpf_prog_array_copy_to_user(..., cnt);
1837 	 * so below kcalloc doesn't need extra cnt > 0 check, but
1838 	 * bpf_prog_array_length() releases rcu lock and
1839 	 * prog array could have been swapped with empty or larger array,
1840 	 * so always copy 'cnt' prog_ids to the user.
1841 	 * In a rare race the user will see zero prog_ids
1842 	 */
1843 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1844 	if (!ids)
1845 		return -ENOMEM;
1846 	rcu_read_lock();
1847 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1848 	rcu_read_unlock();
1849 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1850 	kfree(ids);
1851 	if (err)
1852 		return -EFAULT;
1853 	if (nospc)
1854 		return -ENOSPC;
1855 	return 0;
1856 }
1857 
1858 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1859 				struct bpf_prog *old_prog)
1860 {
1861 	struct bpf_prog_array_item *item = array->items;
1862 
1863 	for (; item->prog; item++)
1864 		if (item->prog == old_prog) {
1865 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1866 			break;
1867 		}
1868 }
1869 
1870 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1871 			struct bpf_prog *exclude_prog,
1872 			struct bpf_prog *include_prog,
1873 			struct bpf_prog_array **new_array)
1874 {
1875 	int new_prog_cnt, carry_prog_cnt = 0;
1876 	struct bpf_prog_array_item *existing;
1877 	struct bpf_prog_array *array;
1878 	bool found_exclude = false;
1879 	int new_prog_idx = 0;
1880 
1881 	/* Figure out how many existing progs we need to carry over to
1882 	 * the new array.
1883 	 */
1884 	if (old_array) {
1885 		existing = old_array->items;
1886 		for (; existing->prog; existing++) {
1887 			if (existing->prog == exclude_prog) {
1888 				found_exclude = true;
1889 				continue;
1890 			}
1891 			if (existing->prog != &dummy_bpf_prog.prog)
1892 				carry_prog_cnt++;
1893 			if (existing->prog == include_prog)
1894 				return -EEXIST;
1895 		}
1896 	}
1897 
1898 	if (exclude_prog && !found_exclude)
1899 		return -ENOENT;
1900 
1901 	/* How many progs (not NULL) will be in the new array? */
1902 	new_prog_cnt = carry_prog_cnt;
1903 	if (include_prog)
1904 		new_prog_cnt += 1;
1905 
1906 	/* Do we have any prog (not NULL) in the new array? */
1907 	if (!new_prog_cnt) {
1908 		*new_array = NULL;
1909 		return 0;
1910 	}
1911 
1912 	/* +1 as the end of prog_array is marked with NULL */
1913 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1914 	if (!array)
1915 		return -ENOMEM;
1916 
1917 	/* Fill in the new prog array */
1918 	if (carry_prog_cnt) {
1919 		existing = old_array->items;
1920 		for (; existing->prog; existing++)
1921 			if (existing->prog != exclude_prog &&
1922 			    existing->prog != &dummy_bpf_prog.prog) {
1923 				array->items[new_prog_idx++].prog =
1924 					existing->prog;
1925 			}
1926 	}
1927 	if (include_prog)
1928 		array->items[new_prog_idx++].prog = include_prog;
1929 	array->items[new_prog_idx].prog = NULL;
1930 	*new_array = array;
1931 	return 0;
1932 }
1933 
1934 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1935 			     u32 *prog_ids, u32 request_cnt,
1936 			     u32 *prog_cnt)
1937 {
1938 	u32 cnt = 0;
1939 
1940 	if (array)
1941 		cnt = bpf_prog_array_length(array);
1942 
1943 	*prog_cnt = cnt;
1944 
1945 	/* return early if user requested only program count or nothing to copy */
1946 	if (!request_cnt || !cnt)
1947 		return 0;
1948 
1949 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1950 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1951 								     : 0;
1952 }
1953 
1954 static void bpf_prog_free_deferred(struct work_struct *work)
1955 {
1956 	struct bpf_prog_aux *aux;
1957 	int i;
1958 
1959 	aux = container_of(work, struct bpf_prog_aux, work);
1960 	if (bpf_prog_is_dev_bound(aux))
1961 		bpf_prog_offload_destroy(aux->prog);
1962 #ifdef CONFIG_PERF_EVENTS
1963 	if (aux->prog->has_callchain_buf)
1964 		put_callchain_buffers();
1965 #endif
1966 	for (i = 0; i < aux->func_cnt; i++)
1967 		bpf_jit_free(aux->func[i]);
1968 	if (aux->func_cnt) {
1969 		kfree(aux->func);
1970 		bpf_prog_unlock_free(aux->prog);
1971 	} else {
1972 		bpf_jit_free(aux->prog);
1973 	}
1974 }
1975 
1976 /* Free internal BPF program */
1977 void bpf_prog_free(struct bpf_prog *fp)
1978 {
1979 	struct bpf_prog_aux *aux = fp->aux;
1980 
1981 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1982 	schedule_work(&aux->work);
1983 }
1984 EXPORT_SYMBOL_GPL(bpf_prog_free);
1985 
1986 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1987 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1988 
1989 void bpf_user_rnd_init_once(void)
1990 {
1991 	prandom_init_once(&bpf_user_rnd_state);
1992 }
1993 
1994 BPF_CALL_0(bpf_user_rnd_u32)
1995 {
1996 	/* Should someone ever have the rather unwise idea to use some
1997 	 * of the registers passed into this function, then note that
1998 	 * this function is called from native eBPF and classic-to-eBPF
1999 	 * transformations. Register assignments from both sides are
2000 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2001 	 */
2002 	struct rnd_state *state;
2003 	u32 res;
2004 
2005 	state = &get_cpu_var(bpf_user_rnd_state);
2006 	res = prandom_u32_state(state);
2007 	put_cpu_var(bpf_user_rnd_state);
2008 
2009 	return res;
2010 }
2011 
2012 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2013 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2014 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2015 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2016 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2017 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2018 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2019 
2020 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2021 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2022 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2023 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2024 
2025 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2026 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2027 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2028 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2029 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2030 
2031 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2032 {
2033 	return NULL;
2034 }
2035 
2036 u64 __weak
2037 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2038 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2039 {
2040 	return -ENOTSUPP;
2041 }
2042 EXPORT_SYMBOL_GPL(bpf_event_output);
2043 
2044 /* Always built-in helper functions. */
2045 const struct bpf_func_proto bpf_tail_call_proto = {
2046 	.func		= NULL,
2047 	.gpl_only	= false,
2048 	.ret_type	= RET_VOID,
2049 	.arg1_type	= ARG_PTR_TO_CTX,
2050 	.arg2_type	= ARG_CONST_MAP_PTR,
2051 	.arg3_type	= ARG_ANYTHING,
2052 };
2053 
2054 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2055  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2056  * eBPF and implicitly also cBPF can get JITed!
2057  */
2058 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2059 {
2060 	return prog;
2061 }
2062 
2063 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2064  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2065  */
2066 void __weak bpf_jit_compile(struct bpf_prog *prog)
2067 {
2068 }
2069 
2070 bool __weak bpf_helper_changes_pkt_data(void *func)
2071 {
2072 	return false;
2073 }
2074 
2075 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2076  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2077  */
2078 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2079 			 int len)
2080 {
2081 	return -EFAULT;
2082 }
2083 
2084 /* All definitions of tracepoints related to BPF. */
2085 #define CREATE_TRACE_POINTS
2086 #include <linux/bpf_trace.h>
2087 
2088 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2089