xref: /openbmc/linux/kernel/bpf/core.c (revision 5ffd8c73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40 
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
43 
44 /* Registers */
45 #define BPF_R0	regs[BPF_REG_0]
46 #define BPF_R1	regs[BPF_REG_1]
47 #define BPF_R2	regs[BPF_REG_2]
48 #define BPF_R3	regs[BPF_REG_3]
49 #define BPF_R4	regs[BPF_REG_4]
50 #define BPF_R5	regs[BPF_REG_5]
51 #define BPF_R6	regs[BPF_REG_6]
52 #define BPF_R7	regs[BPF_REG_7]
53 #define BPF_R8	regs[BPF_REG_8]
54 #define BPF_R9	regs[BPF_REG_9]
55 #define BPF_R10	regs[BPF_REG_10]
56 
57 /* Named registers */
58 #define DST	regs[insn->dst_reg]
59 #define SRC	regs[insn->src_reg]
60 #define FP	regs[BPF_REG_FP]
61 #define AX	regs[BPF_REG_AX]
62 #define ARG1	regs[BPF_REG_ARG1]
63 #define CTX	regs[BPF_REG_CTX]
64 #define OFF	insn->off
65 #define IMM	insn->imm
66 
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
69 
70 /* No hurry in this branch
71  *
72  * Exported for the bpf jit load helper.
73  */
74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
75 {
76 	u8 *ptr = NULL;
77 
78 	if (k >= SKF_NET_OFF) {
79 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80 	} else if (k >= SKF_LL_OFF) {
81 		if (unlikely(!skb_mac_header_was_set(skb)))
82 			return NULL;
83 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
84 	}
85 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
86 		return ptr;
87 
88 	return NULL;
89 }
90 
91 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
92 {
93 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
94 	struct bpf_prog_aux *aux;
95 	struct bpf_prog *fp;
96 
97 	size = round_up(size, PAGE_SIZE);
98 	fp = __vmalloc(size, gfp_flags);
99 	if (fp == NULL)
100 		return NULL;
101 
102 	aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
103 	if (aux == NULL) {
104 		vfree(fp);
105 		return NULL;
106 	}
107 	fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
108 	if (!fp->active) {
109 		vfree(fp);
110 		kfree(aux);
111 		return NULL;
112 	}
113 
114 	fp->pages = size / PAGE_SIZE;
115 	fp->aux = aux;
116 	fp->aux->prog = fp;
117 	fp->jit_requested = ebpf_jit_enabled();
118 	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
119 #ifdef CONFIG_CGROUP_BPF
120 	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
121 #endif
122 
123 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
124 	mutex_init(&fp->aux->used_maps_mutex);
125 	mutex_init(&fp->aux->dst_mutex);
126 
127 	return fp;
128 }
129 
130 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
131 {
132 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
133 	struct bpf_prog *prog;
134 	int cpu;
135 
136 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
137 	if (!prog)
138 		return NULL;
139 
140 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
141 	if (!prog->stats) {
142 		free_percpu(prog->active);
143 		kfree(prog->aux);
144 		vfree(prog);
145 		return NULL;
146 	}
147 
148 	for_each_possible_cpu(cpu) {
149 		struct bpf_prog_stats *pstats;
150 
151 		pstats = per_cpu_ptr(prog->stats, cpu);
152 		u64_stats_init(&pstats->syncp);
153 	}
154 	return prog;
155 }
156 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
157 
158 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
159 {
160 	if (!prog->aux->nr_linfo || !prog->jit_requested)
161 		return 0;
162 
163 	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
164 					  sizeof(*prog->aux->jited_linfo),
165 					  bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
166 	if (!prog->aux->jited_linfo)
167 		return -ENOMEM;
168 
169 	return 0;
170 }
171 
172 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
173 {
174 	if (prog->aux->jited_linfo &&
175 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
176 		kvfree(prog->aux->jited_linfo);
177 		prog->aux->jited_linfo = NULL;
178 	}
179 
180 	kfree(prog->aux->kfunc_tab);
181 	prog->aux->kfunc_tab = NULL;
182 }
183 
184 /* The jit engine is responsible to provide an array
185  * for insn_off to the jited_off mapping (insn_to_jit_off).
186  *
187  * The idx to this array is the insn_off.  Hence, the insn_off
188  * here is relative to the prog itself instead of the main prog.
189  * This array has one entry for each xlated bpf insn.
190  *
191  * jited_off is the byte off to the end of the jited insn.
192  *
193  * Hence, with
194  * insn_start:
195  *      The first bpf insn off of the prog.  The insn off
196  *      here is relative to the main prog.
197  *      e.g. if prog is a subprog, insn_start > 0
198  * linfo_idx:
199  *      The prog's idx to prog->aux->linfo and jited_linfo
200  *
201  * jited_linfo[linfo_idx] = prog->bpf_func
202  *
203  * For i > linfo_idx,
204  *
205  * jited_linfo[i] = prog->bpf_func +
206  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
207  */
208 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
209 			       const u32 *insn_to_jit_off)
210 {
211 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
212 	const struct bpf_line_info *linfo;
213 	void **jited_linfo;
214 
215 	if (!prog->aux->jited_linfo)
216 		/* Userspace did not provide linfo */
217 		return;
218 
219 	linfo_idx = prog->aux->linfo_idx;
220 	linfo = &prog->aux->linfo[linfo_idx];
221 	insn_start = linfo[0].insn_off;
222 	insn_end = insn_start + prog->len;
223 
224 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
225 	jited_linfo[0] = prog->bpf_func;
226 
227 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
228 
229 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
230 		/* The verifier ensures that linfo[i].insn_off is
231 		 * strictly increasing
232 		 */
233 		jited_linfo[i] = prog->bpf_func +
234 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
235 }
236 
237 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
238 				  gfp_t gfp_extra_flags)
239 {
240 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
241 	struct bpf_prog *fp;
242 	u32 pages;
243 
244 	size = round_up(size, PAGE_SIZE);
245 	pages = size / PAGE_SIZE;
246 	if (pages <= fp_old->pages)
247 		return fp_old;
248 
249 	fp = __vmalloc(size, gfp_flags);
250 	if (fp) {
251 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
252 		fp->pages = pages;
253 		fp->aux->prog = fp;
254 
255 		/* We keep fp->aux from fp_old around in the new
256 		 * reallocated structure.
257 		 */
258 		fp_old->aux = NULL;
259 		fp_old->stats = NULL;
260 		fp_old->active = NULL;
261 		__bpf_prog_free(fp_old);
262 	}
263 
264 	return fp;
265 }
266 
267 void __bpf_prog_free(struct bpf_prog *fp)
268 {
269 	if (fp->aux) {
270 		mutex_destroy(&fp->aux->used_maps_mutex);
271 		mutex_destroy(&fp->aux->dst_mutex);
272 		kfree(fp->aux->poke_tab);
273 		kfree(fp->aux);
274 	}
275 	free_percpu(fp->stats);
276 	free_percpu(fp->active);
277 	vfree(fp);
278 }
279 
280 int bpf_prog_calc_tag(struct bpf_prog *fp)
281 {
282 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
283 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
284 	u32 digest[SHA1_DIGEST_WORDS];
285 	u32 ws[SHA1_WORKSPACE_WORDS];
286 	u32 i, bsize, psize, blocks;
287 	struct bpf_insn *dst;
288 	bool was_ld_map;
289 	u8 *raw, *todo;
290 	__be32 *result;
291 	__be64 *bits;
292 
293 	raw = vmalloc(raw_size);
294 	if (!raw)
295 		return -ENOMEM;
296 
297 	sha1_init(digest);
298 	memset(ws, 0, sizeof(ws));
299 
300 	/* We need to take out the map fd for the digest calculation
301 	 * since they are unstable from user space side.
302 	 */
303 	dst = (void *)raw;
304 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
305 		dst[i] = fp->insnsi[i];
306 		if (!was_ld_map &&
307 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
308 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
309 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
310 			was_ld_map = true;
311 			dst[i].imm = 0;
312 		} else if (was_ld_map &&
313 			   dst[i].code == 0 &&
314 			   dst[i].dst_reg == 0 &&
315 			   dst[i].src_reg == 0 &&
316 			   dst[i].off == 0) {
317 			was_ld_map = false;
318 			dst[i].imm = 0;
319 		} else {
320 			was_ld_map = false;
321 		}
322 	}
323 
324 	psize = bpf_prog_insn_size(fp);
325 	memset(&raw[psize], 0, raw_size - psize);
326 	raw[psize++] = 0x80;
327 
328 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
329 	blocks = bsize / SHA1_BLOCK_SIZE;
330 	todo   = raw;
331 	if (bsize - psize >= sizeof(__be64)) {
332 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
333 	} else {
334 		bits = (__be64 *)(todo + bsize + bits_offset);
335 		blocks++;
336 	}
337 	*bits = cpu_to_be64((psize - 1) << 3);
338 
339 	while (blocks--) {
340 		sha1_transform(digest, todo, ws);
341 		todo += SHA1_BLOCK_SIZE;
342 	}
343 
344 	result = (__force __be32 *)digest;
345 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
346 		result[i] = cpu_to_be32(digest[i]);
347 	memcpy(fp->tag, result, sizeof(fp->tag));
348 
349 	vfree(raw);
350 	return 0;
351 }
352 
353 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
354 				s32 end_new, s32 curr, const bool probe_pass)
355 {
356 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
357 	s32 delta = end_new - end_old;
358 	s64 imm = insn->imm;
359 
360 	if (curr < pos && curr + imm + 1 >= end_old)
361 		imm += delta;
362 	else if (curr >= end_new && curr + imm + 1 < end_new)
363 		imm -= delta;
364 	if (imm < imm_min || imm > imm_max)
365 		return -ERANGE;
366 	if (!probe_pass)
367 		insn->imm = imm;
368 	return 0;
369 }
370 
371 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
372 				s32 end_new, s32 curr, const bool probe_pass)
373 {
374 	const s32 off_min = S16_MIN, off_max = S16_MAX;
375 	s32 delta = end_new - end_old;
376 	s32 off;
377 
378 	if (insn->code == (BPF_JMP32 | BPF_JA))
379 		off = insn->imm;
380 	else
381 		off = insn->off;
382 
383 	if (curr < pos && curr + off + 1 >= end_old)
384 		off += delta;
385 	else if (curr >= end_new && curr + off + 1 < end_new)
386 		off -= delta;
387 	if (off < off_min || off > off_max)
388 		return -ERANGE;
389 	if (!probe_pass) {
390 		if (insn->code == (BPF_JMP32 | BPF_JA))
391 			insn->imm = off;
392 		else
393 			insn->off = off;
394 	}
395 	return 0;
396 }
397 
398 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
399 			    s32 end_new, const bool probe_pass)
400 {
401 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
402 	struct bpf_insn *insn = prog->insnsi;
403 	int ret = 0;
404 
405 	for (i = 0; i < insn_cnt; i++, insn++) {
406 		u8 code;
407 
408 		/* In the probing pass we still operate on the original,
409 		 * unpatched image in order to check overflows before we
410 		 * do any other adjustments. Therefore skip the patchlet.
411 		 */
412 		if (probe_pass && i == pos) {
413 			i = end_new;
414 			insn = prog->insnsi + end_old;
415 		}
416 		if (bpf_pseudo_func(insn)) {
417 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
418 						   end_new, i, probe_pass);
419 			if (ret)
420 				return ret;
421 			continue;
422 		}
423 		code = insn->code;
424 		if ((BPF_CLASS(code) != BPF_JMP &&
425 		     BPF_CLASS(code) != BPF_JMP32) ||
426 		    BPF_OP(code) == BPF_EXIT)
427 			continue;
428 		/* Adjust offset of jmps if we cross patch boundaries. */
429 		if (BPF_OP(code) == BPF_CALL) {
430 			if (insn->src_reg != BPF_PSEUDO_CALL)
431 				continue;
432 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
433 						   end_new, i, probe_pass);
434 		} else {
435 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
436 						   end_new, i, probe_pass);
437 		}
438 		if (ret)
439 			break;
440 	}
441 
442 	return ret;
443 }
444 
445 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
446 {
447 	struct bpf_line_info *linfo;
448 	u32 i, nr_linfo;
449 
450 	nr_linfo = prog->aux->nr_linfo;
451 	if (!nr_linfo || !delta)
452 		return;
453 
454 	linfo = prog->aux->linfo;
455 
456 	for (i = 0; i < nr_linfo; i++)
457 		if (off < linfo[i].insn_off)
458 			break;
459 
460 	/* Push all off < linfo[i].insn_off by delta */
461 	for (; i < nr_linfo; i++)
462 		linfo[i].insn_off += delta;
463 }
464 
465 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
466 				       const struct bpf_insn *patch, u32 len)
467 {
468 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
469 	const u32 cnt_max = S16_MAX;
470 	struct bpf_prog *prog_adj;
471 	int err;
472 
473 	/* Since our patchlet doesn't expand the image, we're done. */
474 	if (insn_delta == 0) {
475 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
476 		return prog;
477 	}
478 
479 	insn_adj_cnt = prog->len + insn_delta;
480 
481 	/* Reject anything that would potentially let the insn->off
482 	 * target overflow when we have excessive program expansions.
483 	 * We need to probe here before we do any reallocation where
484 	 * we afterwards may not fail anymore.
485 	 */
486 	if (insn_adj_cnt > cnt_max &&
487 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
488 		return ERR_PTR(err);
489 
490 	/* Several new instructions need to be inserted. Make room
491 	 * for them. Likely, there's no need for a new allocation as
492 	 * last page could have large enough tailroom.
493 	 */
494 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
495 				    GFP_USER);
496 	if (!prog_adj)
497 		return ERR_PTR(-ENOMEM);
498 
499 	prog_adj->len = insn_adj_cnt;
500 
501 	/* Patching happens in 3 steps:
502 	 *
503 	 * 1) Move over tail of insnsi from next instruction onwards,
504 	 *    so we can patch the single target insn with one or more
505 	 *    new ones (patching is always from 1 to n insns, n > 0).
506 	 * 2) Inject new instructions at the target location.
507 	 * 3) Adjust branch offsets if necessary.
508 	 */
509 	insn_rest = insn_adj_cnt - off - len;
510 
511 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
512 		sizeof(*patch) * insn_rest);
513 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
514 
515 	/* We are guaranteed to not fail at this point, otherwise
516 	 * the ship has sailed to reverse to the original state. An
517 	 * overflow cannot happen at this point.
518 	 */
519 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
520 
521 	bpf_adj_linfo(prog_adj, off, insn_delta);
522 
523 	return prog_adj;
524 }
525 
526 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
527 {
528 	/* Branch offsets can't overflow when program is shrinking, no need
529 	 * to call bpf_adj_branches(..., true) here
530 	 */
531 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
532 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
533 	prog->len -= cnt;
534 
535 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
536 }
537 
538 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
539 {
540 	int i;
541 
542 	for (i = 0; i < fp->aux->func_cnt; i++)
543 		bpf_prog_kallsyms_del(fp->aux->func[i]);
544 }
545 
546 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
547 {
548 	bpf_prog_kallsyms_del_subprogs(fp);
549 	bpf_prog_kallsyms_del(fp);
550 }
551 
552 #ifdef CONFIG_BPF_JIT
553 /* All BPF JIT sysctl knobs here. */
554 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
555 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
556 int bpf_jit_harden   __read_mostly;
557 long bpf_jit_limit   __read_mostly;
558 long bpf_jit_limit_max __read_mostly;
559 
560 static void
561 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
562 {
563 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
564 
565 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
566 	prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
567 }
568 
569 static void
570 bpf_prog_ksym_set_name(struct bpf_prog *prog)
571 {
572 	char *sym = prog->aux->ksym.name;
573 	const char *end = sym + KSYM_NAME_LEN;
574 	const struct btf_type *type;
575 	const char *func_name;
576 
577 	BUILD_BUG_ON(sizeof("bpf_prog_") +
578 		     sizeof(prog->tag) * 2 +
579 		     /* name has been null terminated.
580 		      * We should need +1 for the '_' preceding
581 		      * the name.  However, the null character
582 		      * is double counted between the name and the
583 		      * sizeof("bpf_prog_") above, so we omit
584 		      * the +1 here.
585 		      */
586 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
587 
588 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
589 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
590 
591 	/* prog->aux->name will be ignored if full btf name is available */
592 	if (prog->aux->func_info_cnt) {
593 		type = btf_type_by_id(prog->aux->btf,
594 				      prog->aux->func_info[prog->aux->func_idx].type_id);
595 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
596 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
597 		return;
598 	}
599 
600 	if (prog->aux->name[0])
601 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
602 	else
603 		*sym = 0;
604 }
605 
606 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
607 {
608 	return container_of(n, struct bpf_ksym, tnode)->start;
609 }
610 
611 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
612 					  struct latch_tree_node *b)
613 {
614 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
615 }
616 
617 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
618 {
619 	unsigned long val = (unsigned long)key;
620 	const struct bpf_ksym *ksym;
621 
622 	ksym = container_of(n, struct bpf_ksym, tnode);
623 
624 	if (val < ksym->start)
625 		return -1;
626 	/* Ensure that we detect return addresses as part of the program, when
627 	 * the final instruction is a call for a program part of the stack
628 	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
629 	 */
630 	if (val > ksym->end)
631 		return  1;
632 
633 	return 0;
634 }
635 
636 static const struct latch_tree_ops bpf_tree_ops = {
637 	.less	= bpf_tree_less,
638 	.comp	= bpf_tree_comp,
639 };
640 
641 static DEFINE_SPINLOCK(bpf_lock);
642 static LIST_HEAD(bpf_kallsyms);
643 static struct latch_tree_root bpf_tree __cacheline_aligned;
644 
645 void bpf_ksym_add(struct bpf_ksym *ksym)
646 {
647 	spin_lock_bh(&bpf_lock);
648 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
649 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
650 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
651 	spin_unlock_bh(&bpf_lock);
652 }
653 
654 static void __bpf_ksym_del(struct bpf_ksym *ksym)
655 {
656 	if (list_empty(&ksym->lnode))
657 		return;
658 
659 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
660 	list_del_rcu(&ksym->lnode);
661 }
662 
663 void bpf_ksym_del(struct bpf_ksym *ksym)
664 {
665 	spin_lock_bh(&bpf_lock);
666 	__bpf_ksym_del(ksym);
667 	spin_unlock_bh(&bpf_lock);
668 }
669 
670 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
671 {
672 	return fp->jited && !bpf_prog_was_classic(fp);
673 }
674 
675 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
676 {
677 	if (!bpf_prog_kallsyms_candidate(fp) ||
678 	    !bpf_capable())
679 		return;
680 
681 	bpf_prog_ksym_set_addr(fp);
682 	bpf_prog_ksym_set_name(fp);
683 	fp->aux->ksym.prog = true;
684 
685 	bpf_ksym_add(&fp->aux->ksym);
686 }
687 
688 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
689 {
690 	if (!bpf_prog_kallsyms_candidate(fp))
691 		return;
692 
693 	bpf_ksym_del(&fp->aux->ksym);
694 }
695 
696 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
697 {
698 	struct latch_tree_node *n;
699 
700 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
701 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
702 }
703 
704 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
705 				 unsigned long *off, char *sym)
706 {
707 	struct bpf_ksym *ksym;
708 	char *ret = NULL;
709 
710 	rcu_read_lock();
711 	ksym = bpf_ksym_find(addr);
712 	if (ksym) {
713 		unsigned long symbol_start = ksym->start;
714 		unsigned long symbol_end = ksym->end;
715 
716 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
717 
718 		ret = sym;
719 		if (size)
720 			*size = symbol_end - symbol_start;
721 		if (off)
722 			*off  = addr - symbol_start;
723 	}
724 	rcu_read_unlock();
725 
726 	return ret;
727 }
728 
729 bool is_bpf_text_address(unsigned long addr)
730 {
731 	bool ret;
732 
733 	rcu_read_lock();
734 	ret = bpf_ksym_find(addr) != NULL;
735 	rcu_read_unlock();
736 
737 	return ret;
738 }
739 
740 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
741 {
742 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
743 
744 	return ksym && ksym->prog ?
745 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
746 	       NULL;
747 }
748 
749 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
750 {
751 	const struct exception_table_entry *e = NULL;
752 	struct bpf_prog *prog;
753 
754 	rcu_read_lock();
755 	prog = bpf_prog_ksym_find(addr);
756 	if (!prog)
757 		goto out;
758 	if (!prog->aux->num_exentries)
759 		goto out;
760 
761 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
762 out:
763 	rcu_read_unlock();
764 	return e;
765 }
766 
767 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
768 		    char *sym)
769 {
770 	struct bpf_ksym *ksym;
771 	unsigned int it = 0;
772 	int ret = -ERANGE;
773 
774 	if (!bpf_jit_kallsyms_enabled())
775 		return ret;
776 
777 	rcu_read_lock();
778 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
779 		if (it++ != symnum)
780 			continue;
781 
782 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
783 
784 		*value = ksym->start;
785 		*type  = BPF_SYM_ELF_TYPE;
786 
787 		ret = 0;
788 		break;
789 	}
790 	rcu_read_unlock();
791 
792 	return ret;
793 }
794 
795 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
796 				struct bpf_jit_poke_descriptor *poke)
797 {
798 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
799 	static const u32 poke_tab_max = 1024;
800 	u32 slot = prog->aux->size_poke_tab;
801 	u32 size = slot + 1;
802 
803 	if (size > poke_tab_max)
804 		return -ENOSPC;
805 	if (poke->tailcall_target || poke->tailcall_target_stable ||
806 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
807 		return -EINVAL;
808 
809 	switch (poke->reason) {
810 	case BPF_POKE_REASON_TAIL_CALL:
811 		if (!poke->tail_call.map)
812 			return -EINVAL;
813 		break;
814 	default:
815 		return -EINVAL;
816 	}
817 
818 	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
819 	if (!tab)
820 		return -ENOMEM;
821 
822 	memcpy(&tab[slot], poke, sizeof(*poke));
823 	prog->aux->size_poke_tab = size;
824 	prog->aux->poke_tab = tab;
825 
826 	return slot;
827 }
828 
829 /*
830  * BPF program pack allocator.
831  *
832  * Most BPF programs are pretty small. Allocating a hole page for each
833  * program is sometime a waste. Many small bpf program also adds pressure
834  * to instruction TLB. To solve this issue, we introduce a BPF program pack
835  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
836  * to host BPF programs.
837  */
838 #define BPF_PROG_CHUNK_SHIFT	6
839 #define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
840 #define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
841 
842 struct bpf_prog_pack {
843 	struct list_head list;
844 	void *ptr;
845 	unsigned long bitmap[];
846 };
847 
848 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
849 {
850 	memset(area, 0, size);
851 }
852 
853 #define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
854 
855 static DEFINE_MUTEX(pack_mutex);
856 static LIST_HEAD(pack_list);
857 
858 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
859  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
860  */
861 #ifdef PMD_SIZE
862 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
863 #else
864 #define BPF_PROG_PACK_SIZE PAGE_SIZE
865 #endif
866 
867 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
868 
869 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
870 {
871 	struct bpf_prog_pack *pack;
872 
873 	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
874 		       GFP_KERNEL);
875 	if (!pack)
876 		return NULL;
877 	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
878 	if (!pack->ptr) {
879 		kfree(pack);
880 		return NULL;
881 	}
882 	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
883 	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
884 	list_add_tail(&pack->list, &pack_list);
885 
886 	set_vm_flush_reset_perms(pack->ptr);
887 	set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
888 	return pack;
889 }
890 
891 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
892 {
893 	unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
894 	struct bpf_prog_pack *pack;
895 	unsigned long pos;
896 	void *ptr = NULL;
897 
898 	mutex_lock(&pack_mutex);
899 	if (size > BPF_PROG_PACK_SIZE) {
900 		size = round_up(size, PAGE_SIZE);
901 		ptr = bpf_jit_alloc_exec(size);
902 		if (ptr) {
903 			bpf_fill_ill_insns(ptr, size);
904 			set_vm_flush_reset_perms(ptr);
905 			set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
906 		}
907 		goto out;
908 	}
909 	list_for_each_entry(pack, &pack_list, list) {
910 		pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
911 						 nbits, 0);
912 		if (pos < BPF_PROG_CHUNK_COUNT)
913 			goto found_free_area;
914 	}
915 
916 	pack = alloc_new_pack(bpf_fill_ill_insns);
917 	if (!pack)
918 		goto out;
919 
920 	pos = 0;
921 
922 found_free_area:
923 	bitmap_set(pack->bitmap, pos, nbits);
924 	ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
925 
926 out:
927 	mutex_unlock(&pack_mutex);
928 	return ptr;
929 }
930 
931 void bpf_prog_pack_free(struct bpf_binary_header *hdr)
932 {
933 	struct bpf_prog_pack *pack = NULL, *tmp;
934 	unsigned int nbits;
935 	unsigned long pos;
936 
937 	mutex_lock(&pack_mutex);
938 	if (hdr->size > BPF_PROG_PACK_SIZE) {
939 		bpf_jit_free_exec(hdr);
940 		goto out;
941 	}
942 
943 	list_for_each_entry(tmp, &pack_list, list) {
944 		if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
945 			pack = tmp;
946 			break;
947 		}
948 	}
949 
950 	if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
951 		goto out;
952 
953 	nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
954 	pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
955 
956 	WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
957 		  "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
958 
959 	bitmap_clear(pack->bitmap, pos, nbits);
960 	if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
961 				       BPF_PROG_CHUNK_COUNT, 0) == 0) {
962 		list_del(&pack->list);
963 		bpf_jit_free_exec(pack->ptr);
964 		kfree(pack);
965 	}
966 out:
967 	mutex_unlock(&pack_mutex);
968 }
969 
970 static atomic_long_t bpf_jit_current;
971 
972 /* Can be overridden by an arch's JIT compiler if it has a custom,
973  * dedicated BPF backend memory area, or if neither of the two
974  * below apply.
975  */
976 u64 __weak bpf_jit_alloc_exec_limit(void)
977 {
978 #if defined(MODULES_VADDR)
979 	return MODULES_END - MODULES_VADDR;
980 #else
981 	return VMALLOC_END - VMALLOC_START;
982 #endif
983 }
984 
985 static int __init bpf_jit_charge_init(void)
986 {
987 	/* Only used as heuristic here to derive limit. */
988 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
989 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
990 					    PAGE_SIZE), LONG_MAX);
991 	return 0;
992 }
993 pure_initcall(bpf_jit_charge_init);
994 
995 int bpf_jit_charge_modmem(u32 size)
996 {
997 	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
998 		if (!bpf_capable()) {
999 			atomic_long_sub(size, &bpf_jit_current);
1000 			return -EPERM;
1001 		}
1002 	}
1003 
1004 	return 0;
1005 }
1006 
1007 void bpf_jit_uncharge_modmem(u32 size)
1008 {
1009 	atomic_long_sub(size, &bpf_jit_current);
1010 }
1011 
1012 void *__weak bpf_jit_alloc_exec(unsigned long size)
1013 {
1014 	return module_alloc(size);
1015 }
1016 
1017 void __weak bpf_jit_free_exec(void *addr)
1018 {
1019 	module_memfree(addr);
1020 }
1021 
1022 struct bpf_binary_header *
1023 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1024 		     unsigned int alignment,
1025 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
1026 {
1027 	struct bpf_binary_header *hdr;
1028 	u32 size, hole, start;
1029 
1030 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1031 		     alignment > BPF_IMAGE_ALIGNMENT);
1032 
1033 	/* Most of BPF filters are really small, but if some of them
1034 	 * fill a page, allow at least 128 extra bytes to insert a
1035 	 * random section of illegal instructions.
1036 	 */
1037 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1038 
1039 	if (bpf_jit_charge_modmem(size))
1040 		return NULL;
1041 	hdr = bpf_jit_alloc_exec(size);
1042 	if (!hdr) {
1043 		bpf_jit_uncharge_modmem(size);
1044 		return NULL;
1045 	}
1046 
1047 	/* Fill space with illegal/arch-dep instructions. */
1048 	bpf_fill_ill_insns(hdr, size);
1049 
1050 	hdr->size = size;
1051 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1052 		     PAGE_SIZE - sizeof(*hdr));
1053 	start = get_random_u32_below(hole) & ~(alignment - 1);
1054 
1055 	/* Leave a random number of instructions before BPF code. */
1056 	*image_ptr = &hdr->image[start];
1057 
1058 	return hdr;
1059 }
1060 
1061 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1062 {
1063 	u32 size = hdr->size;
1064 
1065 	bpf_jit_free_exec(hdr);
1066 	bpf_jit_uncharge_modmem(size);
1067 }
1068 
1069 /* Allocate jit binary from bpf_prog_pack allocator.
1070  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1071  * to the memory. To solve this problem, a RW buffer is also allocated at
1072  * as the same time. The JIT engine should calculate offsets based on the
1073  * RO memory address, but write JITed program to the RW buffer. Once the
1074  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1075  * the JITed program to the RO memory.
1076  */
1077 struct bpf_binary_header *
1078 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1079 			  unsigned int alignment,
1080 			  struct bpf_binary_header **rw_header,
1081 			  u8 **rw_image,
1082 			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
1083 {
1084 	struct bpf_binary_header *ro_header;
1085 	u32 size, hole, start;
1086 
1087 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1088 		     alignment > BPF_IMAGE_ALIGNMENT);
1089 
1090 	/* add 16 bytes for a random section of illegal instructions */
1091 	size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1092 
1093 	if (bpf_jit_charge_modmem(size))
1094 		return NULL;
1095 	ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1096 	if (!ro_header) {
1097 		bpf_jit_uncharge_modmem(size);
1098 		return NULL;
1099 	}
1100 
1101 	*rw_header = kvmalloc(size, GFP_KERNEL);
1102 	if (!*rw_header) {
1103 		bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1104 		bpf_prog_pack_free(ro_header);
1105 		bpf_jit_uncharge_modmem(size);
1106 		return NULL;
1107 	}
1108 
1109 	/* Fill space with illegal/arch-dep instructions. */
1110 	bpf_fill_ill_insns(*rw_header, size);
1111 	(*rw_header)->size = size;
1112 
1113 	hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1114 		     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1115 	start = get_random_u32_below(hole) & ~(alignment - 1);
1116 
1117 	*image_ptr = &ro_header->image[start];
1118 	*rw_image = &(*rw_header)->image[start];
1119 
1120 	return ro_header;
1121 }
1122 
1123 /* Copy JITed text from rw_header to its final location, the ro_header. */
1124 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1125 				 struct bpf_binary_header *ro_header,
1126 				 struct bpf_binary_header *rw_header)
1127 {
1128 	void *ptr;
1129 
1130 	ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1131 
1132 	kvfree(rw_header);
1133 
1134 	if (IS_ERR(ptr)) {
1135 		bpf_prog_pack_free(ro_header);
1136 		return PTR_ERR(ptr);
1137 	}
1138 	return 0;
1139 }
1140 
1141 /* bpf_jit_binary_pack_free is called in two different scenarios:
1142  *   1) when the program is freed after;
1143  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1144  * For case 2), we need to free both the RO memory and the RW buffer.
1145  *
1146  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1147  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1148  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1149  * bpf_arch_text_copy (when jit fails).
1150  */
1151 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1152 			      struct bpf_binary_header *rw_header)
1153 {
1154 	u32 size = ro_header->size;
1155 
1156 	bpf_prog_pack_free(ro_header);
1157 	kvfree(rw_header);
1158 	bpf_jit_uncharge_modmem(size);
1159 }
1160 
1161 struct bpf_binary_header *
1162 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1163 {
1164 	unsigned long real_start = (unsigned long)fp->bpf_func;
1165 	unsigned long addr;
1166 
1167 	addr = real_start & BPF_PROG_CHUNK_MASK;
1168 	return (void *)addr;
1169 }
1170 
1171 static inline struct bpf_binary_header *
1172 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1173 {
1174 	unsigned long real_start = (unsigned long)fp->bpf_func;
1175 	unsigned long addr;
1176 
1177 	addr = real_start & PAGE_MASK;
1178 	return (void *)addr;
1179 }
1180 
1181 /* This symbol is only overridden by archs that have different
1182  * requirements than the usual eBPF JITs, f.e. when they only
1183  * implement cBPF JIT, do not set images read-only, etc.
1184  */
1185 void __weak bpf_jit_free(struct bpf_prog *fp)
1186 {
1187 	if (fp->jited) {
1188 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1189 
1190 		bpf_jit_binary_free(hdr);
1191 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1192 	}
1193 
1194 	bpf_prog_unlock_free(fp);
1195 }
1196 
1197 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1198 			  const struct bpf_insn *insn, bool extra_pass,
1199 			  u64 *func_addr, bool *func_addr_fixed)
1200 {
1201 	s16 off = insn->off;
1202 	s32 imm = insn->imm;
1203 	u8 *addr;
1204 	int err;
1205 
1206 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1207 	if (!*func_addr_fixed) {
1208 		/* Place-holder address till the last pass has collected
1209 		 * all addresses for JITed subprograms in which case we
1210 		 * can pick them up from prog->aux.
1211 		 */
1212 		if (!extra_pass)
1213 			addr = NULL;
1214 		else if (prog->aux->func &&
1215 			 off >= 0 && off < prog->aux->func_cnt)
1216 			addr = (u8 *)prog->aux->func[off]->bpf_func;
1217 		else
1218 			return -EINVAL;
1219 	} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1220 		   bpf_jit_supports_far_kfunc_call()) {
1221 		err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1222 		if (err)
1223 			return err;
1224 	} else {
1225 		/* Address of a BPF helper call. Since part of the core
1226 		 * kernel, it's always at a fixed location. __bpf_call_base
1227 		 * and the helper with imm relative to it are both in core
1228 		 * kernel.
1229 		 */
1230 		addr = (u8 *)__bpf_call_base + imm;
1231 	}
1232 
1233 	*func_addr = (unsigned long)addr;
1234 	return 0;
1235 }
1236 
1237 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1238 			      const struct bpf_insn *aux,
1239 			      struct bpf_insn *to_buff,
1240 			      bool emit_zext)
1241 {
1242 	struct bpf_insn *to = to_buff;
1243 	u32 imm_rnd = get_random_u32();
1244 	s16 off;
1245 
1246 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1247 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1248 
1249 	/* Constraints on AX register:
1250 	 *
1251 	 * AX register is inaccessible from user space. It is mapped in
1252 	 * all JITs, and used here for constant blinding rewrites. It is
1253 	 * typically "stateless" meaning its contents are only valid within
1254 	 * the executed instruction, but not across several instructions.
1255 	 * There are a few exceptions however which are further detailed
1256 	 * below.
1257 	 *
1258 	 * Constant blinding is only used by JITs, not in the interpreter.
1259 	 * The interpreter uses AX in some occasions as a local temporary
1260 	 * register e.g. in DIV or MOD instructions.
1261 	 *
1262 	 * In restricted circumstances, the verifier can also use the AX
1263 	 * register for rewrites as long as they do not interfere with
1264 	 * the above cases!
1265 	 */
1266 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1267 		goto out;
1268 
1269 	if (from->imm == 0 &&
1270 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1271 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1272 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1273 		goto out;
1274 	}
1275 
1276 	switch (from->code) {
1277 	case BPF_ALU | BPF_ADD | BPF_K:
1278 	case BPF_ALU | BPF_SUB | BPF_K:
1279 	case BPF_ALU | BPF_AND | BPF_K:
1280 	case BPF_ALU | BPF_OR  | BPF_K:
1281 	case BPF_ALU | BPF_XOR | BPF_K:
1282 	case BPF_ALU | BPF_MUL | BPF_K:
1283 	case BPF_ALU | BPF_MOV | BPF_K:
1284 	case BPF_ALU | BPF_DIV | BPF_K:
1285 	case BPF_ALU | BPF_MOD | BPF_K:
1286 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1287 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1288 		*to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1289 		break;
1290 
1291 	case BPF_ALU64 | BPF_ADD | BPF_K:
1292 	case BPF_ALU64 | BPF_SUB | BPF_K:
1293 	case BPF_ALU64 | BPF_AND | BPF_K:
1294 	case BPF_ALU64 | BPF_OR  | BPF_K:
1295 	case BPF_ALU64 | BPF_XOR | BPF_K:
1296 	case BPF_ALU64 | BPF_MUL | BPF_K:
1297 	case BPF_ALU64 | BPF_MOV | BPF_K:
1298 	case BPF_ALU64 | BPF_DIV | BPF_K:
1299 	case BPF_ALU64 | BPF_MOD | BPF_K:
1300 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1301 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1302 		*to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1303 		break;
1304 
1305 	case BPF_JMP | BPF_JEQ  | BPF_K:
1306 	case BPF_JMP | BPF_JNE  | BPF_K:
1307 	case BPF_JMP | BPF_JGT  | BPF_K:
1308 	case BPF_JMP | BPF_JLT  | BPF_K:
1309 	case BPF_JMP | BPF_JGE  | BPF_K:
1310 	case BPF_JMP | BPF_JLE  | BPF_K:
1311 	case BPF_JMP | BPF_JSGT | BPF_K:
1312 	case BPF_JMP | BPF_JSLT | BPF_K:
1313 	case BPF_JMP | BPF_JSGE | BPF_K:
1314 	case BPF_JMP | BPF_JSLE | BPF_K:
1315 	case BPF_JMP | BPF_JSET | BPF_K:
1316 		/* Accommodate for extra offset in case of a backjump. */
1317 		off = from->off;
1318 		if (off < 0)
1319 			off -= 2;
1320 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1321 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1322 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1323 		break;
1324 
1325 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1326 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1327 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1328 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1329 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1330 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1331 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1332 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1333 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1334 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1335 	case BPF_JMP32 | BPF_JSET | BPF_K:
1336 		/* Accommodate for extra offset in case of a backjump. */
1337 		off = from->off;
1338 		if (off < 0)
1339 			off -= 2;
1340 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1341 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1342 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1343 				      off);
1344 		break;
1345 
1346 	case BPF_LD | BPF_IMM | BPF_DW:
1347 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1348 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1349 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1350 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1351 		break;
1352 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1353 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1354 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1355 		if (emit_zext)
1356 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1357 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1358 		break;
1359 
1360 	case BPF_ST | BPF_MEM | BPF_DW:
1361 	case BPF_ST | BPF_MEM | BPF_W:
1362 	case BPF_ST | BPF_MEM | BPF_H:
1363 	case BPF_ST | BPF_MEM | BPF_B:
1364 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1365 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1366 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1367 		break;
1368 	}
1369 out:
1370 	return to - to_buff;
1371 }
1372 
1373 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1374 					      gfp_t gfp_extra_flags)
1375 {
1376 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1377 	struct bpf_prog *fp;
1378 
1379 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1380 	if (fp != NULL) {
1381 		/* aux->prog still points to the fp_other one, so
1382 		 * when promoting the clone to the real program,
1383 		 * this still needs to be adapted.
1384 		 */
1385 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1386 	}
1387 
1388 	return fp;
1389 }
1390 
1391 static void bpf_prog_clone_free(struct bpf_prog *fp)
1392 {
1393 	/* aux was stolen by the other clone, so we cannot free
1394 	 * it from this path! It will be freed eventually by the
1395 	 * other program on release.
1396 	 *
1397 	 * At this point, we don't need a deferred release since
1398 	 * clone is guaranteed to not be locked.
1399 	 */
1400 	fp->aux = NULL;
1401 	fp->stats = NULL;
1402 	fp->active = NULL;
1403 	__bpf_prog_free(fp);
1404 }
1405 
1406 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1407 {
1408 	/* We have to repoint aux->prog to self, as we don't
1409 	 * know whether fp here is the clone or the original.
1410 	 */
1411 	fp->aux->prog = fp;
1412 	bpf_prog_clone_free(fp_other);
1413 }
1414 
1415 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1416 {
1417 	struct bpf_insn insn_buff[16], aux[2];
1418 	struct bpf_prog *clone, *tmp;
1419 	int insn_delta, insn_cnt;
1420 	struct bpf_insn *insn;
1421 	int i, rewritten;
1422 
1423 	if (!prog->blinding_requested || prog->blinded)
1424 		return prog;
1425 
1426 	clone = bpf_prog_clone_create(prog, GFP_USER);
1427 	if (!clone)
1428 		return ERR_PTR(-ENOMEM);
1429 
1430 	insn_cnt = clone->len;
1431 	insn = clone->insnsi;
1432 
1433 	for (i = 0; i < insn_cnt; i++, insn++) {
1434 		if (bpf_pseudo_func(insn)) {
1435 			/* ld_imm64 with an address of bpf subprog is not
1436 			 * a user controlled constant. Don't randomize it,
1437 			 * since it will conflict with jit_subprogs() logic.
1438 			 */
1439 			insn++;
1440 			i++;
1441 			continue;
1442 		}
1443 
1444 		/* We temporarily need to hold the original ld64 insn
1445 		 * so that we can still access the first part in the
1446 		 * second blinding run.
1447 		 */
1448 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1449 		    insn[1].code == 0)
1450 			memcpy(aux, insn, sizeof(aux));
1451 
1452 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1453 						clone->aux->verifier_zext);
1454 		if (!rewritten)
1455 			continue;
1456 
1457 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1458 		if (IS_ERR(tmp)) {
1459 			/* Patching may have repointed aux->prog during
1460 			 * realloc from the original one, so we need to
1461 			 * fix it up here on error.
1462 			 */
1463 			bpf_jit_prog_release_other(prog, clone);
1464 			return tmp;
1465 		}
1466 
1467 		clone = tmp;
1468 		insn_delta = rewritten - 1;
1469 
1470 		/* Walk new program and skip insns we just inserted. */
1471 		insn = clone->insnsi + i + insn_delta;
1472 		insn_cnt += insn_delta;
1473 		i        += insn_delta;
1474 	}
1475 
1476 	clone->blinded = 1;
1477 	return clone;
1478 }
1479 #endif /* CONFIG_BPF_JIT */
1480 
1481 /* Base function for offset calculation. Needs to go into .text section,
1482  * therefore keeping it non-static as well; will also be used by JITs
1483  * anyway later on, so do not let the compiler omit it. This also needs
1484  * to go into kallsyms for correlation from e.g. bpftool, so naming
1485  * must not change.
1486  */
1487 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1488 {
1489 	return 0;
1490 }
1491 EXPORT_SYMBOL_GPL(__bpf_call_base);
1492 
1493 /* All UAPI available opcodes. */
1494 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1495 	/* 32 bit ALU operations. */		\
1496 	/*   Register based. */			\
1497 	INSN_3(ALU, ADD,  X),			\
1498 	INSN_3(ALU, SUB,  X),			\
1499 	INSN_3(ALU, AND,  X),			\
1500 	INSN_3(ALU, OR,   X),			\
1501 	INSN_3(ALU, LSH,  X),			\
1502 	INSN_3(ALU, RSH,  X),			\
1503 	INSN_3(ALU, XOR,  X),			\
1504 	INSN_3(ALU, MUL,  X),			\
1505 	INSN_3(ALU, MOV,  X),			\
1506 	INSN_3(ALU, ARSH, X),			\
1507 	INSN_3(ALU, DIV,  X),			\
1508 	INSN_3(ALU, MOD,  X),			\
1509 	INSN_2(ALU, NEG),			\
1510 	INSN_3(ALU, END, TO_BE),		\
1511 	INSN_3(ALU, END, TO_LE),		\
1512 	/*   Immediate based. */		\
1513 	INSN_3(ALU, ADD,  K),			\
1514 	INSN_3(ALU, SUB,  K),			\
1515 	INSN_3(ALU, AND,  K),			\
1516 	INSN_3(ALU, OR,   K),			\
1517 	INSN_3(ALU, LSH,  K),			\
1518 	INSN_3(ALU, RSH,  K),			\
1519 	INSN_3(ALU, XOR,  K),			\
1520 	INSN_3(ALU, MUL,  K),			\
1521 	INSN_3(ALU, MOV,  K),			\
1522 	INSN_3(ALU, ARSH, K),			\
1523 	INSN_3(ALU, DIV,  K),			\
1524 	INSN_3(ALU, MOD,  K),			\
1525 	/* 64 bit ALU operations. */		\
1526 	/*   Register based. */			\
1527 	INSN_3(ALU64, ADD,  X),			\
1528 	INSN_3(ALU64, SUB,  X),			\
1529 	INSN_3(ALU64, AND,  X),			\
1530 	INSN_3(ALU64, OR,   X),			\
1531 	INSN_3(ALU64, LSH,  X),			\
1532 	INSN_3(ALU64, RSH,  X),			\
1533 	INSN_3(ALU64, XOR,  X),			\
1534 	INSN_3(ALU64, MUL,  X),			\
1535 	INSN_3(ALU64, MOV,  X),			\
1536 	INSN_3(ALU64, ARSH, X),			\
1537 	INSN_3(ALU64, DIV,  X),			\
1538 	INSN_3(ALU64, MOD,  X),			\
1539 	INSN_2(ALU64, NEG),			\
1540 	INSN_3(ALU64, END, TO_LE),		\
1541 	/*   Immediate based. */		\
1542 	INSN_3(ALU64, ADD,  K),			\
1543 	INSN_3(ALU64, SUB,  K),			\
1544 	INSN_3(ALU64, AND,  K),			\
1545 	INSN_3(ALU64, OR,   K),			\
1546 	INSN_3(ALU64, LSH,  K),			\
1547 	INSN_3(ALU64, RSH,  K),			\
1548 	INSN_3(ALU64, XOR,  K),			\
1549 	INSN_3(ALU64, MUL,  K),			\
1550 	INSN_3(ALU64, MOV,  K),			\
1551 	INSN_3(ALU64, ARSH, K),			\
1552 	INSN_3(ALU64, DIV,  K),			\
1553 	INSN_3(ALU64, MOD,  K),			\
1554 	/* Call instruction. */			\
1555 	INSN_2(JMP, CALL),			\
1556 	/* Exit instruction. */			\
1557 	INSN_2(JMP, EXIT),			\
1558 	/* 32-bit Jump instructions. */		\
1559 	/*   Register based. */			\
1560 	INSN_3(JMP32, JEQ,  X),			\
1561 	INSN_3(JMP32, JNE,  X),			\
1562 	INSN_3(JMP32, JGT,  X),			\
1563 	INSN_3(JMP32, JLT,  X),			\
1564 	INSN_3(JMP32, JGE,  X),			\
1565 	INSN_3(JMP32, JLE,  X),			\
1566 	INSN_3(JMP32, JSGT, X),			\
1567 	INSN_3(JMP32, JSLT, X),			\
1568 	INSN_3(JMP32, JSGE, X),			\
1569 	INSN_3(JMP32, JSLE, X),			\
1570 	INSN_3(JMP32, JSET, X),			\
1571 	/*   Immediate based. */		\
1572 	INSN_3(JMP32, JEQ,  K),			\
1573 	INSN_3(JMP32, JNE,  K),			\
1574 	INSN_3(JMP32, JGT,  K),			\
1575 	INSN_3(JMP32, JLT,  K),			\
1576 	INSN_3(JMP32, JGE,  K),			\
1577 	INSN_3(JMP32, JLE,  K),			\
1578 	INSN_3(JMP32, JSGT, K),			\
1579 	INSN_3(JMP32, JSLT, K),			\
1580 	INSN_3(JMP32, JSGE, K),			\
1581 	INSN_3(JMP32, JSLE, K),			\
1582 	INSN_3(JMP32, JSET, K),			\
1583 	/* Jump instructions. */		\
1584 	/*   Register based. */			\
1585 	INSN_3(JMP, JEQ,  X),			\
1586 	INSN_3(JMP, JNE,  X),			\
1587 	INSN_3(JMP, JGT,  X),			\
1588 	INSN_3(JMP, JLT,  X),			\
1589 	INSN_3(JMP, JGE,  X),			\
1590 	INSN_3(JMP, JLE,  X),			\
1591 	INSN_3(JMP, JSGT, X),			\
1592 	INSN_3(JMP, JSLT, X),			\
1593 	INSN_3(JMP, JSGE, X),			\
1594 	INSN_3(JMP, JSLE, X),			\
1595 	INSN_3(JMP, JSET, X),			\
1596 	/*   Immediate based. */		\
1597 	INSN_3(JMP, JEQ,  K),			\
1598 	INSN_3(JMP, JNE,  K),			\
1599 	INSN_3(JMP, JGT,  K),			\
1600 	INSN_3(JMP, JLT,  K),			\
1601 	INSN_3(JMP, JGE,  K),			\
1602 	INSN_3(JMP, JLE,  K),			\
1603 	INSN_3(JMP, JSGT, K),			\
1604 	INSN_3(JMP, JSLT, K),			\
1605 	INSN_3(JMP, JSGE, K),			\
1606 	INSN_3(JMP, JSLE, K),			\
1607 	INSN_3(JMP, JSET, K),			\
1608 	INSN_2(JMP, JA),			\
1609 	INSN_2(JMP32, JA),			\
1610 	/* Store instructions. */		\
1611 	/*   Register based. */			\
1612 	INSN_3(STX, MEM,  B),			\
1613 	INSN_3(STX, MEM,  H),			\
1614 	INSN_3(STX, MEM,  W),			\
1615 	INSN_3(STX, MEM,  DW),			\
1616 	INSN_3(STX, ATOMIC, W),			\
1617 	INSN_3(STX, ATOMIC, DW),		\
1618 	/*   Immediate based. */		\
1619 	INSN_3(ST, MEM, B),			\
1620 	INSN_3(ST, MEM, H),			\
1621 	INSN_3(ST, MEM, W),			\
1622 	INSN_3(ST, MEM, DW),			\
1623 	/* Load instructions. */		\
1624 	/*   Register based. */			\
1625 	INSN_3(LDX, MEM, B),			\
1626 	INSN_3(LDX, MEM, H),			\
1627 	INSN_3(LDX, MEM, W),			\
1628 	INSN_3(LDX, MEM, DW),			\
1629 	INSN_3(LDX, MEMSX, B),			\
1630 	INSN_3(LDX, MEMSX, H),			\
1631 	INSN_3(LDX, MEMSX, W),			\
1632 	/*   Immediate based. */		\
1633 	INSN_3(LD, IMM, DW)
1634 
1635 bool bpf_opcode_in_insntable(u8 code)
1636 {
1637 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1638 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1639 	static const bool public_insntable[256] = {
1640 		[0 ... 255] = false,
1641 		/* Now overwrite non-defaults ... */
1642 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1643 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1644 		[BPF_LD | BPF_ABS | BPF_B] = true,
1645 		[BPF_LD | BPF_ABS | BPF_H] = true,
1646 		[BPF_LD | BPF_ABS | BPF_W] = true,
1647 		[BPF_LD | BPF_IND | BPF_B] = true,
1648 		[BPF_LD | BPF_IND | BPF_H] = true,
1649 		[BPF_LD | BPF_IND | BPF_W] = true,
1650 	};
1651 #undef BPF_INSN_3_TBL
1652 #undef BPF_INSN_2_TBL
1653 	return public_insntable[code];
1654 }
1655 
1656 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1657 /**
1658  *	___bpf_prog_run - run eBPF program on a given context
1659  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1660  *	@insn: is the array of eBPF instructions
1661  *
1662  * Decode and execute eBPF instructions.
1663  *
1664  * Return: whatever value is in %BPF_R0 at program exit
1665  */
1666 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1667 {
1668 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1669 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1670 	static const void * const jumptable[256] __annotate_jump_table = {
1671 		[0 ... 255] = &&default_label,
1672 		/* Now overwrite non-defaults ... */
1673 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1674 		/* Non-UAPI available opcodes. */
1675 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1676 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1677 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1678 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1679 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1680 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1681 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1682 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1683 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1684 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1685 	};
1686 #undef BPF_INSN_3_LBL
1687 #undef BPF_INSN_2_LBL
1688 	u32 tail_call_cnt = 0;
1689 
1690 #define CONT	 ({ insn++; goto select_insn; })
1691 #define CONT_JMP ({ insn++; goto select_insn; })
1692 
1693 select_insn:
1694 	goto *jumptable[insn->code];
1695 
1696 	/* Explicitly mask the register-based shift amounts with 63 or 31
1697 	 * to avoid undefined behavior. Normally this won't affect the
1698 	 * generated code, for example, in case of native 64 bit archs such
1699 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1700 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1701 	 * the BPF shift operations to machine instructions which produce
1702 	 * implementation-defined results in such a case; the resulting
1703 	 * contents of the register may be arbitrary, but program behaviour
1704 	 * as a whole remains defined. In other words, in case of JIT backends,
1705 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1706 	 */
1707 	/* ALU (shifts) */
1708 #define SHT(OPCODE, OP)					\
1709 	ALU64_##OPCODE##_X:				\
1710 		DST = DST OP (SRC & 63);		\
1711 		CONT;					\
1712 	ALU_##OPCODE##_X:				\
1713 		DST = (u32) DST OP ((u32) SRC & 31);	\
1714 		CONT;					\
1715 	ALU64_##OPCODE##_K:				\
1716 		DST = DST OP IMM;			\
1717 		CONT;					\
1718 	ALU_##OPCODE##_K:				\
1719 		DST = (u32) DST OP (u32) IMM;		\
1720 		CONT;
1721 	/* ALU (rest) */
1722 #define ALU(OPCODE, OP)					\
1723 	ALU64_##OPCODE##_X:				\
1724 		DST = DST OP SRC;			\
1725 		CONT;					\
1726 	ALU_##OPCODE##_X:				\
1727 		DST = (u32) DST OP (u32) SRC;		\
1728 		CONT;					\
1729 	ALU64_##OPCODE##_K:				\
1730 		DST = DST OP IMM;			\
1731 		CONT;					\
1732 	ALU_##OPCODE##_K:				\
1733 		DST = (u32) DST OP (u32) IMM;		\
1734 		CONT;
1735 	ALU(ADD,  +)
1736 	ALU(SUB,  -)
1737 	ALU(AND,  &)
1738 	ALU(OR,   |)
1739 	ALU(XOR,  ^)
1740 	ALU(MUL,  *)
1741 	SHT(LSH, <<)
1742 	SHT(RSH, >>)
1743 #undef SHT
1744 #undef ALU
1745 	ALU_NEG:
1746 		DST = (u32) -DST;
1747 		CONT;
1748 	ALU64_NEG:
1749 		DST = -DST;
1750 		CONT;
1751 	ALU_MOV_X:
1752 		switch (OFF) {
1753 		case 0:
1754 			DST = (u32) SRC;
1755 			break;
1756 		case 8:
1757 			DST = (u32)(s8) SRC;
1758 			break;
1759 		case 16:
1760 			DST = (u32)(s16) SRC;
1761 			break;
1762 		}
1763 		CONT;
1764 	ALU_MOV_K:
1765 		DST = (u32) IMM;
1766 		CONT;
1767 	ALU64_MOV_X:
1768 		switch (OFF) {
1769 		case 0:
1770 			DST = SRC;
1771 			break;
1772 		case 8:
1773 			DST = (s8) SRC;
1774 			break;
1775 		case 16:
1776 			DST = (s16) SRC;
1777 			break;
1778 		case 32:
1779 			DST = (s32) SRC;
1780 			break;
1781 		}
1782 		CONT;
1783 	ALU64_MOV_K:
1784 		DST = IMM;
1785 		CONT;
1786 	LD_IMM_DW:
1787 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1788 		insn++;
1789 		CONT;
1790 	ALU_ARSH_X:
1791 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1792 		CONT;
1793 	ALU_ARSH_K:
1794 		DST = (u64) (u32) (((s32) DST) >> IMM);
1795 		CONT;
1796 	ALU64_ARSH_X:
1797 		(*(s64 *) &DST) >>= (SRC & 63);
1798 		CONT;
1799 	ALU64_ARSH_K:
1800 		(*(s64 *) &DST) >>= IMM;
1801 		CONT;
1802 	ALU64_MOD_X:
1803 		switch (OFF) {
1804 		case 0:
1805 			div64_u64_rem(DST, SRC, &AX);
1806 			DST = AX;
1807 			break;
1808 		case 1:
1809 			AX = div64_s64(DST, SRC);
1810 			DST = DST - AX * SRC;
1811 			break;
1812 		}
1813 		CONT;
1814 	ALU_MOD_X:
1815 		switch (OFF) {
1816 		case 0:
1817 			AX = (u32) DST;
1818 			DST = do_div(AX, (u32) SRC);
1819 			break;
1820 		case 1:
1821 			AX = abs((s32)DST);
1822 			AX = do_div(AX, abs((s32)SRC));
1823 			if ((s32)DST < 0)
1824 				DST = (u32)-AX;
1825 			else
1826 				DST = (u32)AX;
1827 			break;
1828 		}
1829 		CONT;
1830 	ALU64_MOD_K:
1831 		switch (OFF) {
1832 		case 0:
1833 			div64_u64_rem(DST, IMM, &AX);
1834 			DST = AX;
1835 			break;
1836 		case 1:
1837 			AX = div64_s64(DST, IMM);
1838 			DST = DST - AX * IMM;
1839 			break;
1840 		}
1841 		CONT;
1842 	ALU_MOD_K:
1843 		switch (OFF) {
1844 		case 0:
1845 			AX = (u32) DST;
1846 			DST = do_div(AX, (u32) IMM);
1847 			break;
1848 		case 1:
1849 			AX = abs((s32)DST);
1850 			AX = do_div(AX, abs((s32)IMM));
1851 			if ((s32)DST < 0)
1852 				DST = (u32)-AX;
1853 			else
1854 				DST = (u32)AX;
1855 			break;
1856 		}
1857 		CONT;
1858 	ALU64_DIV_X:
1859 		switch (OFF) {
1860 		case 0:
1861 			DST = div64_u64(DST, SRC);
1862 			break;
1863 		case 1:
1864 			DST = div64_s64(DST, SRC);
1865 			break;
1866 		}
1867 		CONT;
1868 	ALU_DIV_X:
1869 		switch (OFF) {
1870 		case 0:
1871 			AX = (u32) DST;
1872 			do_div(AX, (u32) SRC);
1873 			DST = (u32) AX;
1874 			break;
1875 		case 1:
1876 			AX = abs((s32)DST);
1877 			do_div(AX, abs((s32)SRC));
1878 			if (((s32)DST < 0) == ((s32)SRC < 0))
1879 				DST = (u32)AX;
1880 			else
1881 				DST = (u32)-AX;
1882 			break;
1883 		}
1884 		CONT;
1885 	ALU64_DIV_K:
1886 		switch (OFF) {
1887 		case 0:
1888 			DST = div64_u64(DST, IMM);
1889 			break;
1890 		case 1:
1891 			DST = div64_s64(DST, IMM);
1892 			break;
1893 		}
1894 		CONT;
1895 	ALU_DIV_K:
1896 		switch (OFF) {
1897 		case 0:
1898 			AX = (u32) DST;
1899 			do_div(AX, (u32) IMM);
1900 			DST = (u32) AX;
1901 			break;
1902 		case 1:
1903 			AX = abs((s32)DST);
1904 			do_div(AX, abs((s32)IMM));
1905 			if (((s32)DST < 0) == ((s32)IMM < 0))
1906 				DST = (u32)AX;
1907 			else
1908 				DST = (u32)-AX;
1909 			break;
1910 		}
1911 		CONT;
1912 	ALU_END_TO_BE:
1913 		switch (IMM) {
1914 		case 16:
1915 			DST = (__force u16) cpu_to_be16(DST);
1916 			break;
1917 		case 32:
1918 			DST = (__force u32) cpu_to_be32(DST);
1919 			break;
1920 		case 64:
1921 			DST = (__force u64) cpu_to_be64(DST);
1922 			break;
1923 		}
1924 		CONT;
1925 	ALU_END_TO_LE:
1926 		switch (IMM) {
1927 		case 16:
1928 			DST = (__force u16) cpu_to_le16(DST);
1929 			break;
1930 		case 32:
1931 			DST = (__force u32) cpu_to_le32(DST);
1932 			break;
1933 		case 64:
1934 			DST = (__force u64) cpu_to_le64(DST);
1935 			break;
1936 		}
1937 		CONT;
1938 	ALU64_END_TO_LE:
1939 		switch (IMM) {
1940 		case 16:
1941 			DST = (__force u16) __swab16(DST);
1942 			break;
1943 		case 32:
1944 			DST = (__force u32) __swab32(DST);
1945 			break;
1946 		case 64:
1947 			DST = (__force u64) __swab64(DST);
1948 			break;
1949 		}
1950 		CONT;
1951 
1952 	/* CALL */
1953 	JMP_CALL:
1954 		/* Function call scratches BPF_R1-BPF_R5 registers,
1955 		 * preserves BPF_R6-BPF_R9, and stores return value
1956 		 * into BPF_R0.
1957 		 */
1958 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1959 						       BPF_R4, BPF_R5);
1960 		CONT;
1961 
1962 	JMP_CALL_ARGS:
1963 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1964 							    BPF_R3, BPF_R4,
1965 							    BPF_R5,
1966 							    insn + insn->off + 1);
1967 		CONT;
1968 
1969 	JMP_TAIL_CALL: {
1970 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1971 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1972 		struct bpf_prog *prog;
1973 		u32 index = BPF_R3;
1974 
1975 		if (unlikely(index >= array->map.max_entries))
1976 			goto out;
1977 
1978 		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1979 			goto out;
1980 
1981 		tail_call_cnt++;
1982 
1983 		prog = READ_ONCE(array->ptrs[index]);
1984 		if (!prog)
1985 			goto out;
1986 
1987 		/* ARG1 at this point is guaranteed to point to CTX from
1988 		 * the verifier side due to the fact that the tail call is
1989 		 * handled like a helper, that is, bpf_tail_call_proto,
1990 		 * where arg1_type is ARG_PTR_TO_CTX.
1991 		 */
1992 		insn = prog->insnsi;
1993 		goto select_insn;
1994 out:
1995 		CONT;
1996 	}
1997 	JMP_JA:
1998 		insn += insn->off;
1999 		CONT;
2000 	JMP32_JA:
2001 		insn += insn->imm;
2002 		CONT;
2003 	JMP_EXIT:
2004 		return BPF_R0;
2005 	/* JMP */
2006 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
2007 	JMP_##OPCODE##_X:					\
2008 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
2009 			insn += insn->off;			\
2010 			CONT_JMP;				\
2011 		}						\
2012 		CONT;						\
2013 	JMP32_##OPCODE##_X:					\
2014 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
2015 			insn += insn->off;			\
2016 			CONT_JMP;				\
2017 		}						\
2018 		CONT;						\
2019 	JMP_##OPCODE##_K:					\
2020 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
2021 			insn += insn->off;			\
2022 			CONT_JMP;				\
2023 		}						\
2024 		CONT;						\
2025 	JMP32_##OPCODE##_K:					\
2026 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
2027 			insn += insn->off;			\
2028 			CONT_JMP;				\
2029 		}						\
2030 		CONT;
2031 	COND_JMP(u, JEQ, ==)
2032 	COND_JMP(u, JNE, !=)
2033 	COND_JMP(u, JGT, >)
2034 	COND_JMP(u, JLT, <)
2035 	COND_JMP(u, JGE, >=)
2036 	COND_JMP(u, JLE, <=)
2037 	COND_JMP(u, JSET, &)
2038 	COND_JMP(s, JSGT, >)
2039 	COND_JMP(s, JSLT, <)
2040 	COND_JMP(s, JSGE, >=)
2041 	COND_JMP(s, JSLE, <=)
2042 #undef COND_JMP
2043 	/* ST, STX and LDX*/
2044 	ST_NOSPEC:
2045 		/* Speculation barrier for mitigating Speculative Store Bypass.
2046 		 * In case of arm64, we rely on the firmware mitigation as
2047 		 * controlled via the ssbd kernel parameter. Whenever the
2048 		 * mitigation is enabled, it works for all of the kernel code
2049 		 * with no need to provide any additional instructions here.
2050 		 * In case of x86, we use 'lfence' insn for mitigation. We
2051 		 * reuse preexisting logic from Spectre v1 mitigation that
2052 		 * happens to produce the required code on x86 for v4 as well.
2053 		 */
2054 		barrier_nospec();
2055 		CONT;
2056 #define LDST(SIZEOP, SIZE)						\
2057 	STX_MEM_##SIZEOP:						\
2058 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
2059 		CONT;							\
2060 	ST_MEM_##SIZEOP:						\
2061 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
2062 		CONT;							\
2063 	LDX_MEM_##SIZEOP:						\
2064 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2065 		CONT;							\
2066 	LDX_PROBE_MEM_##SIZEOP:						\
2067 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),	\
2068 			      (const void *)(long) (SRC + insn->off));	\
2069 		DST = *((SIZE *)&DST);					\
2070 		CONT;
2071 
2072 	LDST(B,   u8)
2073 	LDST(H,  u16)
2074 	LDST(W,  u32)
2075 	LDST(DW, u64)
2076 #undef LDST
2077 
2078 #define LDSX(SIZEOP, SIZE)						\
2079 	LDX_MEMSX_##SIZEOP:						\
2080 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2081 		CONT;							\
2082 	LDX_PROBE_MEMSX_##SIZEOP:					\
2083 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),		\
2084 				      (const void *)(long) (SRC + insn->off));	\
2085 		DST = *((SIZE *)&DST);					\
2086 		CONT;
2087 
2088 	LDSX(B,   s8)
2089 	LDSX(H,  s16)
2090 	LDSX(W,  s32)
2091 #undef LDSX
2092 
2093 #define ATOMIC_ALU_OP(BOP, KOP)						\
2094 		case BOP:						\
2095 			if (BPF_SIZE(insn->code) == BPF_W)		\
2096 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2097 					     (DST + insn->off));	\
2098 			else						\
2099 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2100 					       (DST + insn->off));	\
2101 			break;						\
2102 		case BOP | BPF_FETCH:					\
2103 			if (BPF_SIZE(insn->code) == BPF_W)		\
2104 				SRC = (u32) atomic_fetch_##KOP(		\
2105 					(u32) SRC,			\
2106 					(atomic_t *)(unsigned long) (DST + insn->off)); \
2107 			else						\
2108 				SRC = (u64) atomic64_fetch_##KOP(	\
2109 					(u64) SRC,			\
2110 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
2111 			break;
2112 
2113 	STX_ATOMIC_DW:
2114 	STX_ATOMIC_W:
2115 		switch (IMM) {
2116 		ATOMIC_ALU_OP(BPF_ADD, add)
2117 		ATOMIC_ALU_OP(BPF_AND, and)
2118 		ATOMIC_ALU_OP(BPF_OR, or)
2119 		ATOMIC_ALU_OP(BPF_XOR, xor)
2120 #undef ATOMIC_ALU_OP
2121 
2122 		case BPF_XCHG:
2123 			if (BPF_SIZE(insn->code) == BPF_W)
2124 				SRC = (u32) atomic_xchg(
2125 					(atomic_t *)(unsigned long) (DST + insn->off),
2126 					(u32) SRC);
2127 			else
2128 				SRC = (u64) atomic64_xchg(
2129 					(atomic64_t *)(unsigned long) (DST + insn->off),
2130 					(u64) SRC);
2131 			break;
2132 		case BPF_CMPXCHG:
2133 			if (BPF_SIZE(insn->code) == BPF_W)
2134 				BPF_R0 = (u32) atomic_cmpxchg(
2135 					(atomic_t *)(unsigned long) (DST + insn->off),
2136 					(u32) BPF_R0, (u32) SRC);
2137 			else
2138 				BPF_R0 = (u64) atomic64_cmpxchg(
2139 					(atomic64_t *)(unsigned long) (DST + insn->off),
2140 					(u64) BPF_R0, (u64) SRC);
2141 			break;
2142 
2143 		default:
2144 			goto default_label;
2145 		}
2146 		CONT;
2147 
2148 	default_label:
2149 		/* If we ever reach this, we have a bug somewhere. Die hard here
2150 		 * instead of just returning 0; we could be somewhere in a subprog,
2151 		 * so execution could continue otherwise which we do /not/ want.
2152 		 *
2153 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2154 		 */
2155 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2156 			insn->code, insn->imm);
2157 		BUG_ON(1);
2158 		return 0;
2159 }
2160 
2161 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2162 #define DEFINE_BPF_PROG_RUN(stack_size) \
2163 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2164 { \
2165 	u64 stack[stack_size / sizeof(u64)]; \
2166 	u64 regs[MAX_BPF_EXT_REG] = {}; \
2167 \
2168 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2169 	ARG1 = (u64) (unsigned long) ctx; \
2170 	return ___bpf_prog_run(regs, insn); \
2171 }
2172 
2173 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2174 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2175 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2176 				      const struct bpf_insn *insn) \
2177 { \
2178 	u64 stack[stack_size / sizeof(u64)]; \
2179 	u64 regs[MAX_BPF_EXT_REG]; \
2180 \
2181 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2182 	BPF_R1 = r1; \
2183 	BPF_R2 = r2; \
2184 	BPF_R3 = r3; \
2185 	BPF_R4 = r4; \
2186 	BPF_R5 = r5; \
2187 	return ___bpf_prog_run(regs, insn); \
2188 }
2189 
2190 #define EVAL1(FN, X) FN(X)
2191 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2192 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2193 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2194 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2195 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2196 
2197 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2198 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2199 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2200 
2201 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2202 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2203 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2204 
2205 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2206 
2207 static unsigned int (*interpreters[])(const void *ctx,
2208 				      const struct bpf_insn *insn) = {
2209 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2210 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2211 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2212 };
2213 #undef PROG_NAME_LIST
2214 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2215 static __maybe_unused
2216 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2217 			   const struct bpf_insn *insn) = {
2218 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2219 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2220 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2221 };
2222 #undef PROG_NAME_LIST
2223 
2224 #ifdef CONFIG_BPF_SYSCALL
2225 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2226 {
2227 	stack_depth = max_t(u32, stack_depth, 1);
2228 	insn->off = (s16) insn->imm;
2229 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2230 		__bpf_call_base_args;
2231 	insn->code = BPF_JMP | BPF_CALL_ARGS;
2232 }
2233 #endif
2234 #else
2235 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2236 					 const struct bpf_insn *insn)
2237 {
2238 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2239 	 * is not working properly, so warn about it!
2240 	 */
2241 	WARN_ON_ONCE(1);
2242 	return 0;
2243 }
2244 #endif
2245 
2246 bool bpf_prog_map_compatible(struct bpf_map *map,
2247 			     const struct bpf_prog *fp)
2248 {
2249 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
2250 	bool ret;
2251 
2252 	if (fp->kprobe_override)
2253 		return false;
2254 
2255 	/* XDP programs inserted into maps are not guaranteed to run on
2256 	 * a particular netdev (and can run outside driver context entirely
2257 	 * in the case of devmap and cpumap). Until device checks
2258 	 * are implemented, prohibit adding dev-bound programs to program maps.
2259 	 */
2260 	if (bpf_prog_is_dev_bound(fp->aux))
2261 		return false;
2262 
2263 	spin_lock(&map->owner.lock);
2264 	if (!map->owner.type) {
2265 		/* There's no owner yet where we could check for
2266 		 * compatibility.
2267 		 */
2268 		map->owner.type  = prog_type;
2269 		map->owner.jited = fp->jited;
2270 		map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2271 		ret = true;
2272 	} else {
2273 		ret = map->owner.type  == prog_type &&
2274 		      map->owner.jited == fp->jited &&
2275 		      map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2276 	}
2277 	spin_unlock(&map->owner.lock);
2278 
2279 	return ret;
2280 }
2281 
2282 static int bpf_check_tail_call(const struct bpf_prog *fp)
2283 {
2284 	struct bpf_prog_aux *aux = fp->aux;
2285 	int i, ret = 0;
2286 
2287 	mutex_lock(&aux->used_maps_mutex);
2288 	for (i = 0; i < aux->used_map_cnt; i++) {
2289 		struct bpf_map *map = aux->used_maps[i];
2290 
2291 		if (!map_type_contains_progs(map))
2292 			continue;
2293 
2294 		if (!bpf_prog_map_compatible(map, fp)) {
2295 			ret = -EINVAL;
2296 			goto out;
2297 		}
2298 	}
2299 
2300 out:
2301 	mutex_unlock(&aux->used_maps_mutex);
2302 	return ret;
2303 }
2304 
2305 static void bpf_prog_select_func(struct bpf_prog *fp)
2306 {
2307 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2308 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2309 
2310 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2311 #else
2312 	fp->bpf_func = __bpf_prog_ret0_warn;
2313 #endif
2314 }
2315 
2316 /**
2317  *	bpf_prog_select_runtime - select exec runtime for BPF program
2318  *	@fp: bpf_prog populated with BPF program
2319  *	@err: pointer to error variable
2320  *
2321  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2322  * The BPF program will be executed via bpf_prog_run() function.
2323  *
2324  * Return: the &fp argument along with &err set to 0 for success or
2325  * a negative errno code on failure
2326  */
2327 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2328 {
2329 	/* In case of BPF to BPF calls, verifier did all the prep
2330 	 * work with regards to JITing, etc.
2331 	 */
2332 	bool jit_needed = false;
2333 
2334 	if (fp->bpf_func)
2335 		goto finalize;
2336 
2337 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2338 	    bpf_prog_has_kfunc_call(fp))
2339 		jit_needed = true;
2340 
2341 	bpf_prog_select_func(fp);
2342 
2343 	/* eBPF JITs can rewrite the program in case constant
2344 	 * blinding is active. However, in case of error during
2345 	 * blinding, bpf_int_jit_compile() must always return a
2346 	 * valid program, which in this case would simply not
2347 	 * be JITed, but falls back to the interpreter.
2348 	 */
2349 	if (!bpf_prog_is_offloaded(fp->aux)) {
2350 		*err = bpf_prog_alloc_jited_linfo(fp);
2351 		if (*err)
2352 			return fp;
2353 
2354 		fp = bpf_int_jit_compile(fp);
2355 		bpf_prog_jit_attempt_done(fp);
2356 		if (!fp->jited && jit_needed) {
2357 			*err = -ENOTSUPP;
2358 			return fp;
2359 		}
2360 	} else {
2361 		*err = bpf_prog_offload_compile(fp);
2362 		if (*err)
2363 			return fp;
2364 	}
2365 
2366 finalize:
2367 	bpf_prog_lock_ro(fp);
2368 
2369 	/* The tail call compatibility check can only be done at
2370 	 * this late stage as we need to determine, if we deal
2371 	 * with JITed or non JITed program concatenations and not
2372 	 * all eBPF JITs might immediately support all features.
2373 	 */
2374 	*err = bpf_check_tail_call(fp);
2375 
2376 	return fp;
2377 }
2378 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2379 
2380 static unsigned int __bpf_prog_ret1(const void *ctx,
2381 				    const struct bpf_insn *insn)
2382 {
2383 	return 1;
2384 }
2385 
2386 static struct bpf_prog_dummy {
2387 	struct bpf_prog prog;
2388 } dummy_bpf_prog = {
2389 	.prog = {
2390 		.bpf_func = __bpf_prog_ret1,
2391 	},
2392 };
2393 
2394 struct bpf_empty_prog_array bpf_empty_prog_array = {
2395 	.null_prog = NULL,
2396 };
2397 EXPORT_SYMBOL(bpf_empty_prog_array);
2398 
2399 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2400 {
2401 	if (prog_cnt)
2402 		return kzalloc(sizeof(struct bpf_prog_array) +
2403 			       sizeof(struct bpf_prog_array_item) *
2404 			       (prog_cnt + 1),
2405 			       flags);
2406 
2407 	return &bpf_empty_prog_array.hdr;
2408 }
2409 
2410 void bpf_prog_array_free(struct bpf_prog_array *progs)
2411 {
2412 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2413 		return;
2414 	kfree_rcu(progs, rcu);
2415 }
2416 
2417 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2418 {
2419 	struct bpf_prog_array *progs;
2420 
2421 	/* If RCU Tasks Trace grace period implies RCU grace period, there is
2422 	 * no need to call kfree_rcu(), just call kfree() directly.
2423 	 */
2424 	progs = container_of(rcu, struct bpf_prog_array, rcu);
2425 	if (rcu_trace_implies_rcu_gp())
2426 		kfree(progs);
2427 	else
2428 		kfree_rcu(progs, rcu);
2429 }
2430 
2431 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2432 {
2433 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2434 		return;
2435 	call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2436 }
2437 
2438 int bpf_prog_array_length(struct bpf_prog_array *array)
2439 {
2440 	struct bpf_prog_array_item *item;
2441 	u32 cnt = 0;
2442 
2443 	for (item = array->items; item->prog; item++)
2444 		if (item->prog != &dummy_bpf_prog.prog)
2445 			cnt++;
2446 	return cnt;
2447 }
2448 
2449 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2450 {
2451 	struct bpf_prog_array_item *item;
2452 
2453 	for (item = array->items; item->prog; item++)
2454 		if (item->prog != &dummy_bpf_prog.prog)
2455 			return false;
2456 	return true;
2457 }
2458 
2459 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2460 				     u32 *prog_ids,
2461 				     u32 request_cnt)
2462 {
2463 	struct bpf_prog_array_item *item;
2464 	int i = 0;
2465 
2466 	for (item = array->items; item->prog; item++) {
2467 		if (item->prog == &dummy_bpf_prog.prog)
2468 			continue;
2469 		prog_ids[i] = item->prog->aux->id;
2470 		if (++i == request_cnt) {
2471 			item++;
2472 			break;
2473 		}
2474 	}
2475 
2476 	return !!(item->prog);
2477 }
2478 
2479 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2480 				__u32 __user *prog_ids, u32 cnt)
2481 {
2482 	unsigned long err = 0;
2483 	bool nospc;
2484 	u32 *ids;
2485 
2486 	/* users of this function are doing:
2487 	 * cnt = bpf_prog_array_length();
2488 	 * if (cnt > 0)
2489 	 *     bpf_prog_array_copy_to_user(..., cnt);
2490 	 * so below kcalloc doesn't need extra cnt > 0 check.
2491 	 */
2492 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2493 	if (!ids)
2494 		return -ENOMEM;
2495 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2496 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2497 	kfree(ids);
2498 	if (err)
2499 		return -EFAULT;
2500 	if (nospc)
2501 		return -ENOSPC;
2502 	return 0;
2503 }
2504 
2505 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2506 				struct bpf_prog *old_prog)
2507 {
2508 	struct bpf_prog_array_item *item;
2509 
2510 	for (item = array->items; item->prog; item++)
2511 		if (item->prog == old_prog) {
2512 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2513 			break;
2514 		}
2515 }
2516 
2517 /**
2518  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2519  *                                   index into the program array with
2520  *                                   a dummy no-op program.
2521  * @array: a bpf_prog_array
2522  * @index: the index of the program to replace
2523  *
2524  * Skips over dummy programs, by not counting them, when calculating
2525  * the position of the program to replace.
2526  *
2527  * Return:
2528  * * 0		- Success
2529  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2530  * * -ENOENT	- Index out of range
2531  */
2532 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2533 {
2534 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2535 }
2536 
2537 /**
2538  * bpf_prog_array_update_at() - Updates the program at the given index
2539  *                              into the program array.
2540  * @array: a bpf_prog_array
2541  * @index: the index of the program to update
2542  * @prog: the program to insert into the array
2543  *
2544  * Skips over dummy programs, by not counting them, when calculating
2545  * the position of the program to update.
2546  *
2547  * Return:
2548  * * 0		- Success
2549  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2550  * * -ENOENT	- Index out of range
2551  */
2552 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2553 			     struct bpf_prog *prog)
2554 {
2555 	struct bpf_prog_array_item *item;
2556 
2557 	if (unlikely(index < 0))
2558 		return -EINVAL;
2559 
2560 	for (item = array->items; item->prog; item++) {
2561 		if (item->prog == &dummy_bpf_prog.prog)
2562 			continue;
2563 		if (!index) {
2564 			WRITE_ONCE(item->prog, prog);
2565 			return 0;
2566 		}
2567 		index--;
2568 	}
2569 	return -ENOENT;
2570 }
2571 
2572 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2573 			struct bpf_prog *exclude_prog,
2574 			struct bpf_prog *include_prog,
2575 			u64 bpf_cookie,
2576 			struct bpf_prog_array **new_array)
2577 {
2578 	int new_prog_cnt, carry_prog_cnt = 0;
2579 	struct bpf_prog_array_item *existing, *new;
2580 	struct bpf_prog_array *array;
2581 	bool found_exclude = false;
2582 
2583 	/* Figure out how many existing progs we need to carry over to
2584 	 * the new array.
2585 	 */
2586 	if (old_array) {
2587 		existing = old_array->items;
2588 		for (; existing->prog; existing++) {
2589 			if (existing->prog == exclude_prog) {
2590 				found_exclude = true;
2591 				continue;
2592 			}
2593 			if (existing->prog != &dummy_bpf_prog.prog)
2594 				carry_prog_cnt++;
2595 			if (existing->prog == include_prog)
2596 				return -EEXIST;
2597 		}
2598 	}
2599 
2600 	if (exclude_prog && !found_exclude)
2601 		return -ENOENT;
2602 
2603 	/* How many progs (not NULL) will be in the new array? */
2604 	new_prog_cnt = carry_prog_cnt;
2605 	if (include_prog)
2606 		new_prog_cnt += 1;
2607 
2608 	/* Do we have any prog (not NULL) in the new array? */
2609 	if (!new_prog_cnt) {
2610 		*new_array = NULL;
2611 		return 0;
2612 	}
2613 
2614 	/* +1 as the end of prog_array is marked with NULL */
2615 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2616 	if (!array)
2617 		return -ENOMEM;
2618 	new = array->items;
2619 
2620 	/* Fill in the new prog array */
2621 	if (carry_prog_cnt) {
2622 		existing = old_array->items;
2623 		for (; existing->prog; existing++) {
2624 			if (existing->prog == exclude_prog ||
2625 			    existing->prog == &dummy_bpf_prog.prog)
2626 				continue;
2627 
2628 			new->prog = existing->prog;
2629 			new->bpf_cookie = existing->bpf_cookie;
2630 			new++;
2631 		}
2632 	}
2633 	if (include_prog) {
2634 		new->prog = include_prog;
2635 		new->bpf_cookie = bpf_cookie;
2636 		new++;
2637 	}
2638 	new->prog = NULL;
2639 	*new_array = array;
2640 	return 0;
2641 }
2642 
2643 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2644 			     u32 *prog_ids, u32 request_cnt,
2645 			     u32 *prog_cnt)
2646 {
2647 	u32 cnt = 0;
2648 
2649 	if (array)
2650 		cnt = bpf_prog_array_length(array);
2651 
2652 	*prog_cnt = cnt;
2653 
2654 	/* return early if user requested only program count or nothing to copy */
2655 	if (!request_cnt || !cnt)
2656 		return 0;
2657 
2658 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2659 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2660 								     : 0;
2661 }
2662 
2663 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2664 			  struct bpf_map **used_maps, u32 len)
2665 {
2666 	struct bpf_map *map;
2667 	u32 i;
2668 
2669 	for (i = 0; i < len; i++) {
2670 		map = used_maps[i];
2671 		if (map->ops->map_poke_untrack)
2672 			map->ops->map_poke_untrack(map, aux);
2673 		bpf_map_put(map);
2674 	}
2675 }
2676 
2677 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2678 {
2679 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2680 	kfree(aux->used_maps);
2681 }
2682 
2683 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2684 			  struct btf_mod_pair *used_btfs, u32 len)
2685 {
2686 #ifdef CONFIG_BPF_SYSCALL
2687 	struct btf_mod_pair *btf_mod;
2688 	u32 i;
2689 
2690 	for (i = 0; i < len; i++) {
2691 		btf_mod = &used_btfs[i];
2692 		if (btf_mod->module)
2693 			module_put(btf_mod->module);
2694 		btf_put(btf_mod->btf);
2695 	}
2696 #endif
2697 }
2698 
2699 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2700 {
2701 	__bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2702 	kfree(aux->used_btfs);
2703 }
2704 
2705 static void bpf_prog_free_deferred(struct work_struct *work)
2706 {
2707 	struct bpf_prog_aux *aux;
2708 	int i;
2709 
2710 	aux = container_of(work, struct bpf_prog_aux, work);
2711 #ifdef CONFIG_BPF_SYSCALL
2712 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2713 #endif
2714 #ifdef CONFIG_CGROUP_BPF
2715 	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2716 		bpf_cgroup_atype_put(aux->cgroup_atype);
2717 #endif
2718 	bpf_free_used_maps(aux);
2719 	bpf_free_used_btfs(aux);
2720 	if (bpf_prog_is_dev_bound(aux))
2721 		bpf_prog_dev_bound_destroy(aux->prog);
2722 #ifdef CONFIG_PERF_EVENTS
2723 	if (aux->prog->has_callchain_buf)
2724 		put_callchain_buffers();
2725 #endif
2726 	if (aux->dst_trampoline)
2727 		bpf_trampoline_put(aux->dst_trampoline);
2728 	for (i = 0; i < aux->func_cnt; i++) {
2729 		/* We can just unlink the subprog poke descriptor table as
2730 		 * it was originally linked to the main program and is also
2731 		 * released along with it.
2732 		 */
2733 		aux->func[i]->aux->poke_tab = NULL;
2734 		bpf_jit_free(aux->func[i]);
2735 	}
2736 	if (aux->func_cnt) {
2737 		kfree(aux->func);
2738 		bpf_prog_unlock_free(aux->prog);
2739 	} else {
2740 		bpf_jit_free(aux->prog);
2741 	}
2742 }
2743 
2744 void bpf_prog_free(struct bpf_prog *fp)
2745 {
2746 	struct bpf_prog_aux *aux = fp->aux;
2747 
2748 	if (aux->dst_prog)
2749 		bpf_prog_put(aux->dst_prog);
2750 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2751 	schedule_work(&aux->work);
2752 }
2753 EXPORT_SYMBOL_GPL(bpf_prog_free);
2754 
2755 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2756 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2757 
2758 void bpf_user_rnd_init_once(void)
2759 {
2760 	prandom_init_once(&bpf_user_rnd_state);
2761 }
2762 
2763 BPF_CALL_0(bpf_user_rnd_u32)
2764 {
2765 	/* Should someone ever have the rather unwise idea to use some
2766 	 * of the registers passed into this function, then note that
2767 	 * this function is called from native eBPF and classic-to-eBPF
2768 	 * transformations. Register assignments from both sides are
2769 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2770 	 */
2771 	struct rnd_state *state;
2772 	u32 res;
2773 
2774 	state = &get_cpu_var(bpf_user_rnd_state);
2775 	res = prandom_u32_state(state);
2776 	put_cpu_var(bpf_user_rnd_state);
2777 
2778 	return res;
2779 }
2780 
2781 BPF_CALL_0(bpf_get_raw_cpu_id)
2782 {
2783 	return raw_smp_processor_id();
2784 }
2785 
2786 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2787 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2788 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2789 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2790 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2791 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2792 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2793 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2794 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2795 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2796 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2797 
2798 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2799 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2800 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2801 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2802 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2803 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2804 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2805 
2806 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2807 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2808 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2809 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2810 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2811 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2812 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2813 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2814 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2815 const struct bpf_func_proto bpf_set_retval_proto __weak;
2816 const struct bpf_func_proto bpf_get_retval_proto __weak;
2817 
2818 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2819 {
2820 	return NULL;
2821 }
2822 
2823 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2824 {
2825 	return NULL;
2826 }
2827 
2828 u64 __weak
2829 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2830 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2831 {
2832 	return -ENOTSUPP;
2833 }
2834 EXPORT_SYMBOL_GPL(bpf_event_output);
2835 
2836 /* Always built-in helper functions. */
2837 const struct bpf_func_proto bpf_tail_call_proto = {
2838 	.func		= NULL,
2839 	.gpl_only	= false,
2840 	.ret_type	= RET_VOID,
2841 	.arg1_type	= ARG_PTR_TO_CTX,
2842 	.arg2_type	= ARG_CONST_MAP_PTR,
2843 	.arg3_type	= ARG_ANYTHING,
2844 };
2845 
2846 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2847  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2848  * eBPF and implicitly also cBPF can get JITed!
2849  */
2850 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2851 {
2852 	return prog;
2853 }
2854 
2855 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2856  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2857  */
2858 void __weak bpf_jit_compile(struct bpf_prog *prog)
2859 {
2860 }
2861 
2862 bool __weak bpf_helper_changes_pkt_data(void *func)
2863 {
2864 	return false;
2865 }
2866 
2867 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2868  * analysis code and wants explicit zero extension inserted by verifier.
2869  * Otherwise, return FALSE.
2870  *
2871  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2872  * you don't override this. JITs that don't want these extra insns can detect
2873  * them using insn_is_zext.
2874  */
2875 bool __weak bpf_jit_needs_zext(void)
2876 {
2877 	return false;
2878 }
2879 
2880 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2881 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2882 {
2883 	return false;
2884 }
2885 
2886 bool __weak bpf_jit_supports_kfunc_call(void)
2887 {
2888 	return false;
2889 }
2890 
2891 bool __weak bpf_jit_supports_far_kfunc_call(void)
2892 {
2893 	return false;
2894 }
2895 
2896 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2897  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2898  */
2899 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2900 			 int len)
2901 {
2902 	return -EFAULT;
2903 }
2904 
2905 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2906 			      void *addr1, void *addr2)
2907 {
2908 	return -ENOTSUPP;
2909 }
2910 
2911 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2912 {
2913 	return ERR_PTR(-ENOTSUPP);
2914 }
2915 
2916 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2917 {
2918 	return -ENOTSUPP;
2919 }
2920 
2921 #ifdef CONFIG_BPF_SYSCALL
2922 static int __init bpf_global_ma_init(void)
2923 {
2924 	int ret;
2925 
2926 	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2927 	bpf_global_ma_set = !ret;
2928 	return ret;
2929 }
2930 late_initcall(bpf_global_ma_init);
2931 #endif
2932 
2933 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2934 EXPORT_SYMBOL(bpf_stats_enabled_key);
2935 
2936 /* All definitions of tracepoints related to BPF. */
2937 #define CREATE_TRACE_POINTS
2938 #include <linux/bpf_trace.h>
2939 
2940 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2941 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2942