xref: /openbmc/linux/kernel/bpf/core.c (revision 827beb77)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 
37 #include <asm/barrier.h>
38 #include <asm/unaligned.h>
39 
40 /* Registers */
41 #define BPF_R0	regs[BPF_REG_0]
42 #define BPF_R1	regs[BPF_REG_1]
43 #define BPF_R2	regs[BPF_REG_2]
44 #define BPF_R3	regs[BPF_REG_3]
45 #define BPF_R4	regs[BPF_REG_4]
46 #define BPF_R5	regs[BPF_REG_5]
47 #define BPF_R6	regs[BPF_REG_6]
48 #define BPF_R7	regs[BPF_REG_7]
49 #define BPF_R8	regs[BPF_REG_8]
50 #define BPF_R9	regs[BPF_REG_9]
51 #define BPF_R10	regs[BPF_REG_10]
52 
53 /* Named registers */
54 #define DST	regs[insn->dst_reg]
55 #define SRC	regs[insn->src_reg]
56 #define FP	regs[BPF_REG_FP]
57 #define AX	regs[BPF_REG_AX]
58 #define ARG1	regs[BPF_REG_ARG1]
59 #define CTX	regs[BPF_REG_CTX]
60 #define IMM	insn->imm
61 
62 /* No hurry in this branch
63  *
64  * Exported for the bpf jit load helper.
65  */
66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
67 {
68 	u8 *ptr = NULL;
69 
70 	if (k >= SKF_NET_OFF)
71 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
72 	else if (k >= SKF_LL_OFF)
73 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
74 
75 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
76 		return ptr;
77 
78 	return NULL;
79 }
80 
81 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
82 {
83 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
84 	struct bpf_prog_aux *aux;
85 	struct bpf_prog *fp;
86 
87 	size = round_up(size, PAGE_SIZE);
88 	fp = __vmalloc(size, gfp_flags);
89 	if (fp == NULL)
90 		return NULL;
91 
92 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
93 	if (aux == NULL) {
94 		vfree(fp);
95 		return NULL;
96 	}
97 	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
98 	if (!fp->active) {
99 		vfree(fp);
100 		kfree(aux);
101 		return NULL;
102 	}
103 
104 	fp->pages = size / PAGE_SIZE;
105 	fp->aux = aux;
106 	fp->aux->prog = fp;
107 	fp->jit_requested = ebpf_jit_enabled();
108 
109 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
110 	mutex_init(&fp->aux->used_maps_mutex);
111 	mutex_init(&fp->aux->dst_mutex);
112 
113 	return fp;
114 }
115 
116 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
117 {
118 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
119 	struct bpf_prog *prog;
120 	int cpu;
121 
122 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
123 	if (!prog)
124 		return NULL;
125 
126 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
127 	if (!prog->stats) {
128 		free_percpu(prog->active);
129 		kfree(prog->aux);
130 		vfree(prog);
131 		return NULL;
132 	}
133 
134 	for_each_possible_cpu(cpu) {
135 		struct bpf_prog_stats *pstats;
136 
137 		pstats = per_cpu_ptr(prog->stats, cpu);
138 		u64_stats_init(&pstats->syncp);
139 	}
140 	return prog;
141 }
142 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
143 
144 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
145 {
146 	if (!prog->aux->nr_linfo || !prog->jit_requested)
147 		return 0;
148 
149 	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
150 					  sizeof(*prog->aux->jited_linfo),
151 					  GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
152 	if (!prog->aux->jited_linfo)
153 		return -ENOMEM;
154 
155 	return 0;
156 }
157 
158 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
159 {
160 	if (prog->aux->jited_linfo &&
161 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
162 		kvfree(prog->aux->jited_linfo);
163 		prog->aux->jited_linfo = NULL;
164 	}
165 
166 	kfree(prog->aux->kfunc_tab);
167 	prog->aux->kfunc_tab = NULL;
168 }
169 
170 /* The jit engine is responsible to provide an array
171  * for insn_off to the jited_off mapping (insn_to_jit_off).
172  *
173  * The idx to this array is the insn_off.  Hence, the insn_off
174  * here is relative to the prog itself instead of the main prog.
175  * This array has one entry for each xlated bpf insn.
176  *
177  * jited_off is the byte off to the last byte of the jited insn.
178  *
179  * Hence, with
180  * insn_start:
181  *      The first bpf insn off of the prog.  The insn off
182  *      here is relative to the main prog.
183  *      e.g. if prog is a subprog, insn_start > 0
184  * linfo_idx:
185  *      The prog's idx to prog->aux->linfo and jited_linfo
186  *
187  * jited_linfo[linfo_idx] = prog->bpf_func
188  *
189  * For i > linfo_idx,
190  *
191  * jited_linfo[i] = prog->bpf_func +
192  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
193  */
194 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
195 			       const u32 *insn_to_jit_off)
196 {
197 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
198 	const struct bpf_line_info *linfo;
199 	void **jited_linfo;
200 
201 	if (!prog->aux->jited_linfo)
202 		/* Userspace did not provide linfo */
203 		return;
204 
205 	linfo_idx = prog->aux->linfo_idx;
206 	linfo = &prog->aux->linfo[linfo_idx];
207 	insn_start = linfo[0].insn_off;
208 	insn_end = insn_start + prog->len;
209 
210 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
211 	jited_linfo[0] = prog->bpf_func;
212 
213 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
214 
215 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
216 		/* The verifier ensures that linfo[i].insn_off is
217 		 * strictly increasing
218 		 */
219 		jited_linfo[i] = prog->bpf_func +
220 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
221 }
222 
223 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
224 				  gfp_t gfp_extra_flags)
225 {
226 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
227 	struct bpf_prog *fp;
228 	u32 pages;
229 
230 	size = round_up(size, PAGE_SIZE);
231 	pages = size / PAGE_SIZE;
232 	if (pages <= fp_old->pages)
233 		return fp_old;
234 
235 	fp = __vmalloc(size, gfp_flags);
236 	if (fp) {
237 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
238 		fp->pages = pages;
239 		fp->aux->prog = fp;
240 
241 		/* We keep fp->aux from fp_old around in the new
242 		 * reallocated structure.
243 		 */
244 		fp_old->aux = NULL;
245 		fp_old->stats = NULL;
246 		fp_old->active = NULL;
247 		__bpf_prog_free(fp_old);
248 	}
249 
250 	return fp;
251 }
252 
253 void __bpf_prog_free(struct bpf_prog *fp)
254 {
255 	if (fp->aux) {
256 		mutex_destroy(&fp->aux->used_maps_mutex);
257 		mutex_destroy(&fp->aux->dst_mutex);
258 		kfree(fp->aux->poke_tab);
259 		kfree(fp->aux);
260 	}
261 	free_percpu(fp->stats);
262 	free_percpu(fp->active);
263 	vfree(fp);
264 }
265 
266 int bpf_prog_calc_tag(struct bpf_prog *fp)
267 {
268 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
269 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
270 	u32 digest[SHA1_DIGEST_WORDS];
271 	u32 ws[SHA1_WORKSPACE_WORDS];
272 	u32 i, bsize, psize, blocks;
273 	struct bpf_insn *dst;
274 	bool was_ld_map;
275 	u8 *raw, *todo;
276 	__be32 *result;
277 	__be64 *bits;
278 
279 	raw = vmalloc(raw_size);
280 	if (!raw)
281 		return -ENOMEM;
282 
283 	sha1_init(digest);
284 	memset(ws, 0, sizeof(ws));
285 
286 	/* We need to take out the map fd for the digest calculation
287 	 * since they are unstable from user space side.
288 	 */
289 	dst = (void *)raw;
290 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
291 		dst[i] = fp->insnsi[i];
292 		if (!was_ld_map &&
293 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
294 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
295 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
296 			was_ld_map = true;
297 			dst[i].imm = 0;
298 		} else if (was_ld_map &&
299 			   dst[i].code == 0 &&
300 			   dst[i].dst_reg == 0 &&
301 			   dst[i].src_reg == 0 &&
302 			   dst[i].off == 0) {
303 			was_ld_map = false;
304 			dst[i].imm = 0;
305 		} else {
306 			was_ld_map = false;
307 		}
308 	}
309 
310 	psize = bpf_prog_insn_size(fp);
311 	memset(&raw[psize], 0, raw_size - psize);
312 	raw[psize++] = 0x80;
313 
314 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
315 	blocks = bsize / SHA1_BLOCK_SIZE;
316 	todo   = raw;
317 	if (bsize - psize >= sizeof(__be64)) {
318 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
319 	} else {
320 		bits = (__be64 *)(todo + bsize + bits_offset);
321 		blocks++;
322 	}
323 	*bits = cpu_to_be64((psize - 1) << 3);
324 
325 	while (blocks--) {
326 		sha1_transform(digest, todo, ws);
327 		todo += SHA1_BLOCK_SIZE;
328 	}
329 
330 	result = (__force __be32 *)digest;
331 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
332 		result[i] = cpu_to_be32(digest[i]);
333 	memcpy(fp->tag, result, sizeof(fp->tag));
334 
335 	vfree(raw);
336 	return 0;
337 }
338 
339 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
340 				s32 end_new, s32 curr, const bool probe_pass)
341 {
342 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
343 	s32 delta = end_new - end_old;
344 	s64 imm = insn->imm;
345 
346 	if (curr < pos && curr + imm + 1 >= end_old)
347 		imm += delta;
348 	else if (curr >= end_new && curr + imm + 1 < end_new)
349 		imm -= delta;
350 	if (imm < imm_min || imm > imm_max)
351 		return -ERANGE;
352 	if (!probe_pass)
353 		insn->imm = imm;
354 	return 0;
355 }
356 
357 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
358 				s32 end_new, s32 curr, const bool probe_pass)
359 {
360 	const s32 off_min = S16_MIN, off_max = S16_MAX;
361 	s32 delta = end_new - end_old;
362 	s32 off = insn->off;
363 
364 	if (curr < pos && curr + off + 1 >= end_old)
365 		off += delta;
366 	else if (curr >= end_new && curr + off + 1 < end_new)
367 		off -= delta;
368 	if (off < off_min || off > off_max)
369 		return -ERANGE;
370 	if (!probe_pass)
371 		insn->off = off;
372 	return 0;
373 }
374 
375 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
376 			    s32 end_new, const bool probe_pass)
377 {
378 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
379 	struct bpf_insn *insn = prog->insnsi;
380 	int ret = 0;
381 
382 	for (i = 0; i < insn_cnt; i++, insn++) {
383 		u8 code;
384 
385 		/* In the probing pass we still operate on the original,
386 		 * unpatched image in order to check overflows before we
387 		 * do any other adjustments. Therefore skip the patchlet.
388 		 */
389 		if (probe_pass && i == pos) {
390 			i = end_new;
391 			insn = prog->insnsi + end_old;
392 		}
393 		code = insn->code;
394 		if ((BPF_CLASS(code) != BPF_JMP &&
395 		     BPF_CLASS(code) != BPF_JMP32) ||
396 		    BPF_OP(code) == BPF_EXIT)
397 			continue;
398 		/* Adjust offset of jmps if we cross patch boundaries. */
399 		if (BPF_OP(code) == BPF_CALL) {
400 			if (insn->src_reg != BPF_PSEUDO_CALL)
401 				continue;
402 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
403 						   end_new, i, probe_pass);
404 		} else {
405 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
406 						   end_new, i, probe_pass);
407 		}
408 		if (ret)
409 			break;
410 	}
411 
412 	return ret;
413 }
414 
415 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
416 {
417 	struct bpf_line_info *linfo;
418 	u32 i, nr_linfo;
419 
420 	nr_linfo = prog->aux->nr_linfo;
421 	if (!nr_linfo || !delta)
422 		return;
423 
424 	linfo = prog->aux->linfo;
425 
426 	for (i = 0; i < nr_linfo; i++)
427 		if (off < linfo[i].insn_off)
428 			break;
429 
430 	/* Push all off < linfo[i].insn_off by delta */
431 	for (; i < nr_linfo; i++)
432 		linfo[i].insn_off += delta;
433 }
434 
435 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
436 				       const struct bpf_insn *patch, u32 len)
437 {
438 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
439 	const u32 cnt_max = S16_MAX;
440 	struct bpf_prog *prog_adj;
441 	int err;
442 
443 	/* Since our patchlet doesn't expand the image, we're done. */
444 	if (insn_delta == 0) {
445 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
446 		return prog;
447 	}
448 
449 	insn_adj_cnt = prog->len + insn_delta;
450 
451 	/* Reject anything that would potentially let the insn->off
452 	 * target overflow when we have excessive program expansions.
453 	 * We need to probe here before we do any reallocation where
454 	 * we afterwards may not fail anymore.
455 	 */
456 	if (insn_adj_cnt > cnt_max &&
457 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
458 		return ERR_PTR(err);
459 
460 	/* Several new instructions need to be inserted. Make room
461 	 * for them. Likely, there's no need for a new allocation as
462 	 * last page could have large enough tailroom.
463 	 */
464 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
465 				    GFP_USER);
466 	if (!prog_adj)
467 		return ERR_PTR(-ENOMEM);
468 
469 	prog_adj->len = insn_adj_cnt;
470 
471 	/* Patching happens in 3 steps:
472 	 *
473 	 * 1) Move over tail of insnsi from next instruction onwards,
474 	 *    so we can patch the single target insn with one or more
475 	 *    new ones (patching is always from 1 to n insns, n > 0).
476 	 * 2) Inject new instructions at the target location.
477 	 * 3) Adjust branch offsets if necessary.
478 	 */
479 	insn_rest = insn_adj_cnt - off - len;
480 
481 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
482 		sizeof(*patch) * insn_rest);
483 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
484 
485 	/* We are guaranteed to not fail at this point, otherwise
486 	 * the ship has sailed to reverse to the original state. An
487 	 * overflow cannot happen at this point.
488 	 */
489 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
490 
491 	bpf_adj_linfo(prog_adj, off, insn_delta);
492 
493 	return prog_adj;
494 }
495 
496 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
497 {
498 	/* Branch offsets can't overflow when program is shrinking, no need
499 	 * to call bpf_adj_branches(..., true) here
500 	 */
501 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
502 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
503 	prog->len -= cnt;
504 
505 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
506 }
507 
508 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
509 {
510 	int i;
511 
512 	for (i = 0; i < fp->aux->func_cnt; i++)
513 		bpf_prog_kallsyms_del(fp->aux->func[i]);
514 }
515 
516 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
517 {
518 	bpf_prog_kallsyms_del_subprogs(fp);
519 	bpf_prog_kallsyms_del(fp);
520 }
521 
522 #ifdef CONFIG_BPF_JIT
523 /* All BPF JIT sysctl knobs here. */
524 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
525 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
526 int bpf_jit_harden   __read_mostly;
527 long bpf_jit_limit   __read_mostly;
528 long bpf_jit_limit_max __read_mostly;
529 
530 static void
531 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
532 {
533 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
534 	unsigned long addr = (unsigned long)hdr;
535 
536 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
537 
538 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
539 	prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
540 }
541 
542 static void
543 bpf_prog_ksym_set_name(struct bpf_prog *prog)
544 {
545 	char *sym = prog->aux->ksym.name;
546 	const char *end = sym + KSYM_NAME_LEN;
547 	const struct btf_type *type;
548 	const char *func_name;
549 
550 	BUILD_BUG_ON(sizeof("bpf_prog_") +
551 		     sizeof(prog->tag) * 2 +
552 		     /* name has been null terminated.
553 		      * We should need +1 for the '_' preceding
554 		      * the name.  However, the null character
555 		      * is double counted between the name and the
556 		      * sizeof("bpf_prog_") above, so we omit
557 		      * the +1 here.
558 		      */
559 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
560 
561 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
562 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
563 
564 	/* prog->aux->name will be ignored if full btf name is available */
565 	if (prog->aux->func_info_cnt) {
566 		type = btf_type_by_id(prog->aux->btf,
567 				      prog->aux->func_info[prog->aux->func_idx].type_id);
568 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
569 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
570 		return;
571 	}
572 
573 	if (prog->aux->name[0])
574 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
575 	else
576 		*sym = 0;
577 }
578 
579 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
580 {
581 	return container_of(n, struct bpf_ksym, tnode)->start;
582 }
583 
584 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
585 					  struct latch_tree_node *b)
586 {
587 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
588 }
589 
590 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
591 {
592 	unsigned long val = (unsigned long)key;
593 	const struct bpf_ksym *ksym;
594 
595 	ksym = container_of(n, struct bpf_ksym, tnode);
596 
597 	if (val < ksym->start)
598 		return -1;
599 	if (val >= ksym->end)
600 		return  1;
601 
602 	return 0;
603 }
604 
605 static const struct latch_tree_ops bpf_tree_ops = {
606 	.less	= bpf_tree_less,
607 	.comp	= bpf_tree_comp,
608 };
609 
610 static DEFINE_SPINLOCK(bpf_lock);
611 static LIST_HEAD(bpf_kallsyms);
612 static struct latch_tree_root bpf_tree __cacheline_aligned;
613 
614 void bpf_ksym_add(struct bpf_ksym *ksym)
615 {
616 	spin_lock_bh(&bpf_lock);
617 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
618 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
619 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
620 	spin_unlock_bh(&bpf_lock);
621 }
622 
623 static void __bpf_ksym_del(struct bpf_ksym *ksym)
624 {
625 	if (list_empty(&ksym->lnode))
626 		return;
627 
628 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
629 	list_del_rcu(&ksym->lnode);
630 }
631 
632 void bpf_ksym_del(struct bpf_ksym *ksym)
633 {
634 	spin_lock_bh(&bpf_lock);
635 	__bpf_ksym_del(ksym);
636 	spin_unlock_bh(&bpf_lock);
637 }
638 
639 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
640 {
641 	return fp->jited && !bpf_prog_was_classic(fp);
642 }
643 
644 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
645 {
646 	return list_empty(&fp->aux->ksym.lnode) ||
647 	       fp->aux->ksym.lnode.prev == LIST_POISON2;
648 }
649 
650 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
651 {
652 	if (!bpf_prog_kallsyms_candidate(fp) ||
653 	    !bpf_capable())
654 		return;
655 
656 	bpf_prog_ksym_set_addr(fp);
657 	bpf_prog_ksym_set_name(fp);
658 	fp->aux->ksym.prog = true;
659 
660 	bpf_ksym_add(&fp->aux->ksym);
661 }
662 
663 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
664 {
665 	if (!bpf_prog_kallsyms_candidate(fp))
666 		return;
667 
668 	bpf_ksym_del(&fp->aux->ksym);
669 }
670 
671 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
672 {
673 	struct latch_tree_node *n;
674 
675 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
676 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
677 }
678 
679 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
680 				 unsigned long *off, char *sym)
681 {
682 	struct bpf_ksym *ksym;
683 	char *ret = NULL;
684 
685 	rcu_read_lock();
686 	ksym = bpf_ksym_find(addr);
687 	if (ksym) {
688 		unsigned long symbol_start = ksym->start;
689 		unsigned long symbol_end = ksym->end;
690 
691 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
692 
693 		ret = sym;
694 		if (size)
695 			*size = symbol_end - symbol_start;
696 		if (off)
697 			*off  = addr - symbol_start;
698 	}
699 	rcu_read_unlock();
700 
701 	return ret;
702 }
703 
704 bool is_bpf_text_address(unsigned long addr)
705 {
706 	bool ret;
707 
708 	rcu_read_lock();
709 	ret = bpf_ksym_find(addr) != NULL;
710 	rcu_read_unlock();
711 
712 	return ret;
713 }
714 
715 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
716 {
717 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
718 
719 	return ksym && ksym->prog ?
720 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
721 	       NULL;
722 }
723 
724 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
725 {
726 	const struct exception_table_entry *e = NULL;
727 	struct bpf_prog *prog;
728 
729 	rcu_read_lock();
730 	prog = bpf_prog_ksym_find(addr);
731 	if (!prog)
732 		goto out;
733 	if (!prog->aux->num_exentries)
734 		goto out;
735 
736 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
737 out:
738 	rcu_read_unlock();
739 	return e;
740 }
741 
742 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
743 		    char *sym)
744 {
745 	struct bpf_ksym *ksym;
746 	unsigned int it = 0;
747 	int ret = -ERANGE;
748 
749 	if (!bpf_jit_kallsyms_enabled())
750 		return ret;
751 
752 	rcu_read_lock();
753 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
754 		if (it++ != symnum)
755 			continue;
756 
757 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
758 
759 		*value = ksym->start;
760 		*type  = BPF_SYM_ELF_TYPE;
761 
762 		ret = 0;
763 		break;
764 	}
765 	rcu_read_unlock();
766 
767 	return ret;
768 }
769 
770 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
771 				struct bpf_jit_poke_descriptor *poke)
772 {
773 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
774 	static const u32 poke_tab_max = 1024;
775 	u32 slot = prog->aux->size_poke_tab;
776 	u32 size = slot + 1;
777 
778 	if (size > poke_tab_max)
779 		return -ENOSPC;
780 	if (poke->tailcall_target || poke->tailcall_target_stable ||
781 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
782 		return -EINVAL;
783 
784 	switch (poke->reason) {
785 	case BPF_POKE_REASON_TAIL_CALL:
786 		if (!poke->tail_call.map)
787 			return -EINVAL;
788 		break;
789 	default:
790 		return -EINVAL;
791 	}
792 
793 	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
794 	if (!tab)
795 		return -ENOMEM;
796 
797 	memcpy(&tab[slot], poke, sizeof(*poke));
798 	prog->aux->size_poke_tab = size;
799 	prog->aux->poke_tab = tab;
800 
801 	return slot;
802 }
803 
804 static atomic_long_t bpf_jit_current;
805 
806 /* Can be overridden by an arch's JIT compiler if it has a custom,
807  * dedicated BPF backend memory area, or if neither of the two
808  * below apply.
809  */
810 u64 __weak bpf_jit_alloc_exec_limit(void)
811 {
812 #if defined(MODULES_VADDR)
813 	return MODULES_END - MODULES_VADDR;
814 #else
815 	return VMALLOC_END - VMALLOC_START;
816 #endif
817 }
818 
819 static int __init bpf_jit_charge_init(void)
820 {
821 	/* Only used as heuristic here to derive limit. */
822 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
823 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
824 					    PAGE_SIZE), LONG_MAX);
825 	return 0;
826 }
827 pure_initcall(bpf_jit_charge_init);
828 
829 int bpf_jit_charge_modmem(u32 pages)
830 {
831 	if (atomic_long_add_return(pages, &bpf_jit_current) >
832 	    (bpf_jit_limit >> PAGE_SHIFT)) {
833 		if (!bpf_capable()) {
834 			atomic_long_sub(pages, &bpf_jit_current);
835 			return -EPERM;
836 		}
837 	}
838 
839 	return 0;
840 }
841 
842 void bpf_jit_uncharge_modmem(u32 pages)
843 {
844 	atomic_long_sub(pages, &bpf_jit_current);
845 }
846 
847 void *__weak bpf_jit_alloc_exec(unsigned long size)
848 {
849 	return module_alloc(size);
850 }
851 
852 void __weak bpf_jit_free_exec(void *addr)
853 {
854 	module_memfree(addr);
855 }
856 
857 struct bpf_binary_header *
858 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
859 		     unsigned int alignment,
860 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
861 {
862 	struct bpf_binary_header *hdr;
863 	u32 size, hole, start, pages;
864 
865 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
866 		     alignment > BPF_IMAGE_ALIGNMENT);
867 
868 	/* Most of BPF filters are really small, but if some of them
869 	 * fill a page, allow at least 128 extra bytes to insert a
870 	 * random section of illegal instructions.
871 	 */
872 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
873 	pages = size / PAGE_SIZE;
874 
875 	if (bpf_jit_charge_modmem(pages))
876 		return NULL;
877 	hdr = bpf_jit_alloc_exec(size);
878 	if (!hdr) {
879 		bpf_jit_uncharge_modmem(pages);
880 		return NULL;
881 	}
882 
883 	/* Fill space with illegal/arch-dep instructions. */
884 	bpf_fill_ill_insns(hdr, size);
885 
886 	hdr->pages = pages;
887 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
888 		     PAGE_SIZE - sizeof(*hdr));
889 	start = (get_random_int() % hole) & ~(alignment - 1);
890 
891 	/* Leave a random number of instructions before BPF code. */
892 	*image_ptr = &hdr->image[start];
893 
894 	return hdr;
895 }
896 
897 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
898 {
899 	u32 pages = hdr->pages;
900 
901 	bpf_jit_free_exec(hdr);
902 	bpf_jit_uncharge_modmem(pages);
903 }
904 
905 /* This symbol is only overridden by archs that have different
906  * requirements than the usual eBPF JITs, f.e. when they only
907  * implement cBPF JIT, do not set images read-only, etc.
908  */
909 void __weak bpf_jit_free(struct bpf_prog *fp)
910 {
911 	if (fp->jited) {
912 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
913 
914 		bpf_jit_binary_free(hdr);
915 
916 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
917 	}
918 
919 	bpf_prog_unlock_free(fp);
920 }
921 
922 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
923 			  const struct bpf_insn *insn, bool extra_pass,
924 			  u64 *func_addr, bool *func_addr_fixed)
925 {
926 	s16 off = insn->off;
927 	s32 imm = insn->imm;
928 	u8 *addr;
929 
930 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
931 	if (!*func_addr_fixed) {
932 		/* Place-holder address till the last pass has collected
933 		 * all addresses for JITed subprograms in which case we
934 		 * can pick them up from prog->aux.
935 		 */
936 		if (!extra_pass)
937 			addr = NULL;
938 		else if (prog->aux->func &&
939 			 off >= 0 && off < prog->aux->func_cnt)
940 			addr = (u8 *)prog->aux->func[off]->bpf_func;
941 		else
942 			return -EINVAL;
943 	} else {
944 		/* Address of a BPF helper call. Since part of the core
945 		 * kernel, it's always at a fixed location. __bpf_call_base
946 		 * and the helper with imm relative to it are both in core
947 		 * kernel.
948 		 */
949 		addr = (u8 *)__bpf_call_base + imm;
950 	}
951 
952 	*func_addr = (unsigned long)addr;
953 	return 0;
954 }
955 
956 static int bpf_jit_blind_insn(const struct bpf_insn *from,
957 			      const struct bpf_insn *aux,
958 			      struct bpf_insn *to_buff,
959 			      bool emit_zext)
960 {
961 	struct bpf_insn *to = to_buff;
962 	u32 imm_rnd = get_random_int();
963 	s16 off;
964 
965 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
966 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
967 
968 	/* Constraints on AX register:
969 	 *
970 	 * AX register is inaccessible from user space. It is mapped in
971 	 * all JITs, and used here for constant blinding rewrites. It is
972 	 * typically "stateless" meaning its contents are only valid within
973 	 * the executed instruction, but not across several instructions.
974 	 * There are a few exceptions however which are further detailed
975 	 * below.
976 	 *
977 	 * Constant blinding is only used by JITs, not in the interpreter.
978 	 * The interpreter uses AX in some occasions as a local temporary
979 	 * register e.g. in DIV or MOD instructions.
980 	 *
981 	 * In restricted circumstances, the verifier can also use the AX
982 	 * register for rewrites as long as they do not interfere with
983 	 * the above cases!
984 	 */
985 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
986 		goto out;
987 
988 	if (from->imm == 0 &&
989 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
990 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
991 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
992 		goto out;
993 	}
994 
995 	switch (from->code) {
996 	case BPF_ALU | BPF_ADD | BPF_K:
997 	case BPF_ALU | BPF_SUB | BPF_K:
998 	case BPF_ALU | BPF_AND | BPF_K:
999 	case BPF_ALU | BPF_OR  | BPF_K:
1000 	case BPF_ALU | BPF_XOR | BPF_K:
1001 	case BPF_ALU | BPF_MUL | BPF_K:
1002 	case BPF_ALU | BPF_MOV | BPF_K:
1003 	case BPF_ALU | BPF_DIV | BPF_K:
1004 	case BPF_ALU | BPF_MOD | BPF_K:
1005 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1006 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1007 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1008 		break;
1009 
1010 	case BPF_ALU64 | BPF_ADD | BPF_K:
1011 	case BPF_ALU64 | BPF_SUB | BPF_K:
1012 	case BPF_ALU64 | BPF_AND | BPF_K:
1013 	case BPF_ALU64 | BPF_OR  | BPF_K:
1014 	case BPF_ALU64 | BPF_XOR | BPF_K:
1015 	case BPF_ALU64 | BPF_MUL | BPF_K:
1016 	case BPF_ALU64 | BPF_MOV | BPF_K:
1017 	case BPF_ALU64 | BPF_DIV | BPF_K:
1018 	case BPF_ALU64 | BPF_MOD | BPF_K:
1019 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1020 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1021 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1022 		break;
1023 
1024 	case BPF_JMP | BPF_JEQ  | BPF_K:
1025 	case BPF_JMP | BPF_JNE  | BPF_K:
1026 	case BPF_JMP | BPF_JGT  | BPF_K:
1027 	case BPF_JMP | BPF_JLT  | BPF_K:
1028 	case BPF_JMP | BPF_JGE  | BPF_K:
1029 	case BPF_JMP | BPF_JLE  | BPF_K:
1030 	case BPF_JMP | BPF_JSGT | BPF_K:
1031 	case BPF_JMP | BPF_JSLT | BPF_K:
1032 	case BPF_JMP | BPF_JSGE | BPF_K:
1033 	case BPF_JMP | BPF_JSLE | BPF_K:
1034 	case BPF_JMP | BPF_JSET | BPF_K:
1035 		/* Accommodate for extra offset in case of a backjump. */
1036 		off = from->off;
1037 		if (off < 0)
1038 			off -= 2;
1039 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1040 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1041 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1042 		break;
1043 
1044 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1045 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1046 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1047 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1048 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1049 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1050 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1051 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1052 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1053 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1054 	case BPF_JMP32 | BPF_JSET | BPF_K:
1055 		/* Accommodate for extra offset in case of a backjump. */
1056 		off = from->off;
1057 		if (off < 0)
1058 			off -= 2;
1059 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1060 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1061 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1062 				      off);
1063 		break;
1064 
1065 	case BPF_LD | BPF_IMM | BPF_DW:
1066 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1067 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1068 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1069 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1070 		break;
1071 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1072 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1073 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1074 		if (emit_zext)
1075 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1076 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1077 		break;
1078 
1079 	case BPF_ST | BPF_MEM | BPF_DW:
1080 	case BPF_ST | BPF_MEM | BPF_W:
1081 	case BPF_ST | BPF_MEM | BPF_H:
1082 	case BPF_ST | BPF_MEM | BPF_B:
1083 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1084 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1085 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1086 		break;
1087 	}
1088 out:
1089 	return to - to_buff;
1090 }
1091 
1092 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1093 					      gfp_t gfp_extra_flags)
1094 {
1095 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1096 	struct bpf_prog *fp;
1097 
1098 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1099 	if (fp != NULL) {
1100 		/* aux->prog still points to the fp_other one, so
1101 		 * when promoting the clone to the real program,
1102 		 * this still needs to be adapted.
1103 		 */
1104 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1105 	}
1106 
1107 	return fp;
1108 }
1109 
1110 static void bpf_prog_clone_free(struct bpf_prog *fp)
1111 {
1112 	/* aux was stolen by the other clone, so we cannot free
1113 	 * it from this path! It will be freed eventually by the
1114 	 * other program on release.
1115 	 *
1116 	 * At this point, we don't need a deferred release since
1117 	 * clone is guaranteed to not be locked.
1118 	 */
1119 	fp->aux = NULL;
1120 	fp->stats = NULL;
1121 	fp->active = NULL;
1122 	__bpf_prog_free(fp);
1123 }
1124 
1125 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1126 {
1127 	/* We have to repoint aux->prog to self, as we don't
1128 	 * know whether fp here is the clone or the original.
1129 	 */
1130 	fp->aux->prog = fp;
1131 	bpf_prog_clone_free(fp_other);
1132 }
1133 
1134 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1135 {
1136 	struct bpf_insn insn_buff[16], aux[2];
1137 	struct bpf_prog *clone, *tmp;
1138 	int insn_delta, insn_cnt;
1139 	struct bpf_insn *insn;
1140 	int i, rewritten;
1141 
1142 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1143 		return prog;
1144 
1145 	clone = bpf_prog_clone_create(prog, GFP_USER);
1146 	if (!clone)
1147 		return ERR_PTR(-ENOMEM);
1148 
1149 	insn_cnt = clone->len;
1150 	insn = clone->insnsi;
1151 
1152 	for (i = 0; i < insn_cnt; i++, insn++) {
1153 		/* We temporarily need to hold the original ld64 insn
1154 		 * so that we can still access the first part in the
1155 		 * second blinding run.
1156 		 */
1157 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1158 		    insn[1].code == 0)
1159 			memcpy(aux, insn, sizeof(aux));
1160 
1161 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1162 						clone->aux->verifier_zext);
1163 		if (!rewritten)
1164 			continue;
1165 
1166 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1167 		if (IS_ERR(tmp)) {
1168 			/* Patching may have repointed aux->prog during
1169 			 * realloc from the original one, so we need to
1170 			 * fix it up here on error.
1171 			 */
1172 			bpf_jit_prog_release_other(prog, clone);
1173 			return tmp;
1174 		}
1175 
1176 		clone = tmp;
1177 		insn_delta = rewritten - 1;
1178 
1179 		/* Walk new program and skip insns we just inserted. */
1180 		insn = clone->insnsi + i + insn_delta;
1181 		insn_cnt += insn_delta;
1182 		i        += insn_delta;
1183 	}
1184 
1185 	clone->blinded = 1;
1186 	return clone;
1187 }
1188 #endif /* CONFIG_BPF_JIT */
1189 
1190 /* Base function for offset calculation. Needs to go into .text section,
1191  * therefore keeping it non-static as well; will also be used by JITs
1192  * anyway later on, so do not let the compiler omit it. This also needs
1193  * to go into kallsyms for correlation from e.g. bpftool, so naming
1194  * must not change.
1195  */
1196 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1197 {
1198 	return 0;
1199 }
1200 EXPORT_SYMBOL_GPL(__bpf_call_base);
1201 
1202 /* All UAPI available opcodes. */
1203 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1204 	/* 32 bit ALU operations. */		\
1205 	/*   Register based. */			\
1206 	INSN_3(ALU, ADD,  X),			\
1207 	INSN_3(ALU, SUB,  X),			\
1208 	INSN_3(ALU, AND,  X),			\
1209 	INSN_3(ALU, OR,   X),			\
1210 	INSN_3(ALU, LSH,  X),			\
1211 	INSN_3(ALU, RSH,  X),			\
1212 	INSN_3(ALU, XOR,  X),			\
1213 	INSN_3(ALU, MUL,  X),			\
1214 	INSN_3(ALU, MOV,  X),			\
1215 	INSN_3(ALU, ARSH, X),			\
1216 	INSN_3(ALU, DIV,  X),			\
1217 	INSN_3(ALU, MOD,  X),			\
1218 	INSN_2(ALU, NEG),			\
1219 	INSN_3(ALU, END, TO_BE),		\
1220 	INSN_3(ALU, END, TO_LE),		\
1221 	/*   Immediate based. */		\
1222 	INSN_3(ALU, ADD,  K),			\
1223 	INSN_3(ALU, SUB,  K),			\
1224 	INSN_3(ALU, AND,  K),			\
1225 	INSN_3(ALU, OR,   K),			\
1226 	INSN_3(ALU, LSH,  K),			\
1227 	INSN_3(ALU, RSH,  K),			\
1228 	INSN_3(ALU, XOR,  K),			\
1229 	INSN_3(ALU, MUL,  K),			\
1230 	INSN_3(ALU, MOV,  K),			\
1231 	INSN_3(ALU, ARSH, K),			\
1232 	INSN_3(ALU, DIV,  K),			\
1233 	INSN_3(ALU, MOD,  K),			\
1234 	/* 64 bit ALU operations. */		\
1235 	/*   Register based. */			\
1236 	INSN_3(ALU64, ADD,  X),			\
1237 	INSN_3(ALU64, SUB,  X),			\
1238 	INSN_3(ALU64, AND,  X),			\
1239 	INSN_3(ALU64, OR,   X),			\
1240 	INSN_3(ALU64, LSH,  X),			\
1241 	INSN_3(ALU64, RSH,  X),			\
1242 	INSN_3(ALU64, XOR,  X),			\
1243 	INSN_3(ALU64, MUL,  X),			\
1244 	INSN_3(ALU64, MOV,  X),			\
1245 	INSN_3(ALU64, ARSH, X),			\
1246 	INSN_3(ALU64, DIV,  X),			\
1247 	INSN_3(ALU64, MOD,  X),			\
1248 	INSN_2(ALU64, NEG),			\
1249 	/*   Immediate based. */		\
1250 	INSN_3(ALU64, ADD,  K),			\
1251 	INSN_3(ALU64, SUB,  K),			\
1252 	INSN_3(ALU64, AND,  K),			\
1253 	INSN_3(ALU64, OR,   K),			\
1254 	INSN_3(ALU64, LSH,  K),			\
1255 	INSN_3(ALU64, RSH,  K),			\
1256 	INSN_3(ALU64, XOR,  K),			\
1257 	INSN_3(ALU64, MUL,  K),			\
1258 	INSN_3(ALU64, MOV,  K),			\
1259 	INSN_3(ALU64, ARSH, K),			\
1260 	INSN_3(ALU64, DIV,  K),			\
1261 	INSN_3(ALU64, MOD,  K),			\
1262 	/* Call instruction. */			\
1263 	INSN_2(JMP, CALL),			\
1264 	/* Exit instruction. */			\
1265 	INSN_2(JMP, EXIT),			\
1266 	/* 32-bit Jump instructions. */		\
1267 	/*   Register based. */			\
1268 	INSN_3(JMP32, JEQ,  X),			\
1269 	INSN_3(JMP32, JNE,  X),			\
1270 	INSN_3(JMP32, JGT,  X),			\
1271 	INSN_3(JMP32, JLT,  X),			\
1272 	INSN_3(JMP32, JGE,  X),			\
1273 	INSN_3(JMP32, JLE,  X),			\
1274 	INSN_3(JMP32, JSGT, X),			\
1275 	INSN_3(JMP32, JSLT, X),			\
1276 	INSN_3(JMP32, JSGE, X),			\
1277 	INSN_3(JMP32, JSLE, X),			\
1278 	INSN_3(JMP32, JSET, X),			\
1279 	/*   Immediate based. */		\
1280 	INSN_3(JMP32, JEQ,  K),			\
1281 	INSN_3(JMP32, JNE,  K),			\
1282 	INSN_3(JMP32, JGT,  K),			\
1283 	INSN_3(JMP32, JLT,  K),			\
1284 	INSN_3(JMP32, JGE,  K),			\
1285 	INSN_3(JMP32, JLE,  K),			\
1286 	INSN_3(JMP32, JSGT, K),			\
1287 	INSN_3(JMP32, JSLT, K),			\
1288 	INSN_3(JMP32, JSGE, K),			\
1289 	INSN_3(JMP32, JSLE, K),			\
1290 	INSN_3(JMP32, JSET, K),			\
1291 	/* Jump instructions. */		\
1292 	/*   Register based. */			\
1293 	INSN_3(JMP, JEQ,  X),			\
1294 	INSN_3(JMP, JNE,  X),			\
1295 	INSN_3(JMP, JGT,  X),			\
1296 	INSN_3(JMP, JLT,  X),			\
1297 	INSN_3(JMP, JGE,  X),			\
1298 	INSN_3(JMP, JLE,  X),			\
1299 	INSN_3(JMP, JSGT, X),			\
1300 	INSN_3(JMP, JSLT, X),			\
1301 	INSN_3(JMP, JSGE, X),			\
1302 	INSN_3(JMP, JSLE, X),			\
1303 	INSN_3(JMP, JSET, X),			\
1304 	/*   Immediate based. */		\
1305 	INSN_3(JMP, JEQ,  K),			\
1306 	INSN_3(JMP, JNE,  K),			\
1307 	INSN_3(JMP, JGT,  K),			\
1308 	INSN_3(JMP, JLT,  K),			\
1309 	INSN_3(JMP, JGE,  K),			\
1310 	INSN_3(JMP, JLE,  K),			\
1311 	INSN_3(JMP, JSGT, K),			\
1312 	INSN_3(JMP, JSLT, K),			\
1313 	INSN_3(JMP, JSGE, K),			\
1314 	INSN_3(JMP, JSLE, K),			\
1315 	INSN_3(JMP, JSET, K),			\
1316 	INSN_2(JMP, JA),			\
1317 	/* Store instructions. */		\
1318 	/*   Register based. */			\
1319 	INSN_3(STX, MEM,  B),			\
1320 	INSN_3(STX, MEM,  H),			\
1321 	INSN_3(STX, MEM,  W),			\
1322 	INSN_3(STX, MEM,  DW),			\
1323 	INSN_3(STX, ATOMIC, W),			\
1324 	INSN_3(STX, ATOMIC, DW),		\
1325 	/*   Immediate based. */		\
1326 	INSN_3(ST, MEM, B),			\
1327 	INSN_3(ST, MEM, H),			\
1328 	INSN_3(ST, MEM, W),			\
1329 	INSN_3(ST, MEM, DW),			\
1330 	/* Load instructions. */		\
1331 	/*   Register based. */			\
1332 	INSN_3(LDX, MEM, B),			\
1333 	INSN_3(LDX, MEM, H),			\
1334 	INSN_3(LDX, MEM, W),			\
1335 	INSN_3(LDX, MEM, DW),			\
1336 	/*   Immediate based. */		\
1337 	INSN_3(LD, IMM, DW)
1338 
1339 bool bpf_opcode_in_insntable(u8 code)
1340 {
1341 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1342 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1343 	static const bool public_insntable[256] = {
1344 		[0 ... 255] = false,
1345 		/* Now overwrite non-defaults ... */
1346 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1347 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1348 		[BPF_LD | BPF_ABS | BPF_B] = true,
1349 		[BPF_LD | BPF_ABS | BPF_H] = true,
1350 		[BPF_LD | BPF_ABS | BPF_W] = true,
1351 		[BPF_LD | BPF_IND | BPF_B] = true,
1352 		[BPF_LD | BPF_IND | BPF_H] = true,
1353 		[BPF_LD | BPF_IND | BPF_W] = true,
1354 	};
1355 #undef BPF_INSN_3_TBL
1356 #undef BPF_INSN_2_TBL
1357 	return public_insntable[code];
1358 }
1359 
1360 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1361 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1362 {
1363 	memset(dst, 0, size);
1364 	return -EFAULT;
1365 }
1366 
1367 /**
1368  *	___bpf_prog_run - run eBPF program on a given context
1369  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1370  *	@insn: is the array of eBPF instructions
1371  *
1372  * Decode and execute eBPF instructions.
1373  *
1374  * Return: whatever value is in %BPF_R0 at program exit
1375  */
1376 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1377 {
1378 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1379 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1380 	static const void * const jumptable[256] __annotate_jump_table = {
1381 		[0 ... 255] = &&default_label,
1382 		/* Now overwrite non-defaults ... */
1383 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1384 		/* Non-UAPI available opcodes. */
1385 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1386 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1387 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1388 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1389 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1390 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1391 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1392 	};
1393 #undef BPF_INSN_3_LBL
1394 #undef BPF_INSN_2_LBL
1395 	u32 tail_call_cnt = 0;
1396 
1397 #define CONT	 ({ insn++; goto select_insn; })
1398 #define CONT_JMP ({ insn++; goto select_insn; })
1399 
1400 select_insn:
1401 	goto *jumptable[insn->code];
1402 
1403 	/* Explicitly mask the register-based shift amounts with 63 or 31
1404 	 * to avoid undefined behavior. Normally this won't affect the
1405 	 * generated code, for example, in case of native 64 bit archs such
1406 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1407 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1408 	 * the BPF shift operations to machine instructions which produce
1409 	 * implementation-defined results in such a case; the resulting
1410 	 * contents of the register may be arbitrary, but program behaviour
1411 	 * as a whole remains defined. In other words, in case of JIT backends,
1412 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1413 	 */
1414 	/* ALU (shifts) */
1415 #define SHT(OPCODE, OP)					\
1416 	ALU64_##OPCODE##_X:				\
1417 		DST = DST OP (SRC & 63);		\
1418 		CONT;					\
1419 	ALU_##OPCODE##_X:				\
1420 		DST = (u32) DST OP ((u32) SRC & 31);	\
1421 		CONT;					\
1422 	ALU64_##OPCODE##_K:				\
1423 		DST = DST OP IMM;			\
1424 		CONT;					\
1425 	ALU_##OPCODE##_K:				\
1426 		DST = (u32) DST OP (u32) IMM;		\
1427 		CONT;
1428 	/* ALU (rest) */
1429 #define ALU(OPCODE, OP)					\
1430 	ALU64_##OPCODE##_X:				\
1431 		DST = DST OP SRC;			\
1432 		CONT;					\
1433 	ALU_##OPCODE##_X:				\
1434 		DST = (u32) DST OP (u32) SRC;		\
1435 		CONT;					\
1436 	ALU64_##OPCODE##_K:				\
1437 		DST = DST OP IMM;			\
1438 		CONT;					\
1439 	ALU_##OPCODE##_K:				\
1440 		DST = (u32) DST OP (u32) IMM;		\
1441 		CONT;
1442 	ALU(ADD,  +)
1443 	ALU(SUB,  -)
1444 	ALU(AND,  &)
1445 	ALU(OR,   |)
1446 	ALU(XOR,  ^)
1447 	ALU(MUL,  *)
1448 	SHT(LSH, <<)
1449 	SHT(RSH, >>)
1450 #undef SHT
1451 #undef ALU
1452 	ALU_NEG:
1453 		DST = (u32) -DST;
1454 		CONT;
1455 	ALU64_NEG:
1456 		DST = -DST;
1457 		CONT;
1458 	ALU_MOV_X:
1459 		DST = (u32) SRC;
1460 		CONT;
1461 	ALU_MOV_K:
1462 		DST = (u32) IMM;
1463 		CONT;
1464 	ALU64_MOV_X:
1465 		DST = SRC;
1466 		CONT;
1467 	ALU64_MOV_K:
1468 		DST = IMM;
1469 		CONT;
1470 	LD_IMM_DW:
1471 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1472 		insn++;
1473 		CONT;
1474 	ALU_ARSH_X:
1475 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1476 		CONT;
1477 	ALU_ARSH_K:
1478 		DST = (u64) (u32) (((s32) DST) >> IMM);
1479 		CONT;
1480 	ALU64_ARSH_X:
1481 		(*(s64 *) &DST) >>= (SRC & 63);
1482 		CONT;
1483 	ALU64_ARSH_K:
1484 		(*(s64 *) &DST) >>= IMM;
1485 		CONT;
1486 	ALU64_MOD_X:
1487 		div64_u64_rem(DST, SRC, &AX);
1488 		DST = AX;
1489 		CONT;
1490 	ALU_MOD_X:
1491 		AX = (u32) DST;
1492 		DST = do_div(AX, (u32) SRC);
1493 		CONT;
1494 	ALU64_MOD_K:
1495 		div64_u64_rem(DST, IMM, &AX);
1496 		DST = AX;
1497 		CONT;
1498 	ALU_MOD_K:
1499 		AX = (u32) DST;
1500 		DST = do_div(AX, (u32) IMM);
1501 		CONT;
1502 	ALU64_DIV_X:
1503 		DST = div64_u64(DST, SRC);
1504 		CONT;
1505 	ALU_DIV_X:
1506 		AX = (u32) DST;
1507 		do_div(AX, (u32) SRC);
1508 		DST = (u32) AX;
1509 		CONT;
1510 	ALU64_DIV_K:
1511 		DST = div64_u64(DST, IMM);
1512 		CONT;
1513 	ALU_DIV_K:
1514 		AX = (u32) DST;
1515 		do_div(AX, (u32) IMM);
1516 		DST = (u32) AX;
1517 		CONT;
1518 	ALU_END_TO_BE:
1519 		switch (IMM) {
1520 		case 16:
1521 			DST = (__force u16) cpu_to_be16(DST);
1522 			break;
1523 		case 32:
1524 			DST = (__force u32) cpu_to_be32(DST);
1525 			break;
1526 		case 64:
1527 			DST = (__force u64) cpu_to_be64(DST);
1528 			break;
1529 		}
1530 		CONT;
1531 	ALU_END_TO_LE:
1532 		switch (IMM) {
1533 		case 16:
1534 			DST = (__force u16) cpu_to_le16(DST);
1535 			break;
1536 		case 32:
1537 			DST = (__force u32) cpu_to_le32(DST);
1538 			break;
1539 		case 64:
1540 			DST = (__force u64) cpu_to_le64(DST);
1541 			break;
1542 		}
1543 		CONT;
1544 
1545 	/* CALL */
1546 	JMP_CALL:
1547 		/* Function call scratches BPF_R1-BPF_R5 registers,
1548 		 * preserves BPF_R6-BPF_R9, and stores return value
1549 		 * into BPF_R0.
1550 		 */
1551 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1552 						       BPF_R4, BPF_R5);
1553 		CONT;
1554 
1555 	JMP_CALL_ARGS:
1556 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1557 							    BPF_R3, BPF_R4,
1558 							    BPF_R5,
1559 							    insn + insn->off + 1);
1560 		CONT;
1561 
1562 	JMP_TAIL_CALL: {
1563 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1564 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1565 		struct bpf_prog *prog;
1566 		u32 index = BPF_R3;
1567 
1568 		if (unlikely(index >= array->map.max_entries))
1569 			goto out;
1570 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1571 			goto out;
1572 
1573 		tail_call_cnt++;
1574 
1575 		prog = READ_ONCE(array->ptrs[index]);
1576 		if (!prog)
1577 			goto out;
1578 
1579 		/* ARG1 at this point is guaranteed to point to CTX from
1580 		 * the verifier side due to the fact that the tail call is
1581 		 * handled like a helper, that is, bpf_tail_call_proto,
1582 		 * where arg1_type is ARG_PTR_TO_CTX.
1583 		 */
1584 		insn = prog->insnsi;
1585 		goto select_insn;
1586 out:
1587 		CONT;
1588 	}
1589 	JMP_JA:
1590 		insn += insn->off;
1591 		CONT;
1592 	JMP_EXIT:
1593 		return BPF_R0;
1594 	/* JMP */
1595 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1596 	JMP_##OPCODE##_X:					\
1597 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1598 			insn += insn->off;			\
1599 			CONT_JMP;				\
1600 		}						\
1601 		CONT;						\
1602 	JMP32_##OPCODE##_X:					\
1603 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1604 			insn += insn->off;			\
1605 			CONT_JMP;				\
1606 		}						\
1607 		CONT;						\
1608 	JMP_##OPCODE##_K:					\
1609 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1610 			insn += insn->off;			\
1611 			CONT_JMP;				\
1612 		}						\
1613 		CONT;						\
1614 	JMP32_##OPCODE##_K:					\
1615 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1616 			insn += insn->off;			\
1617 			CONT_JMP;				\
1618 		}						\
1619 		CONT;
1620 	COND_JMP(u, JEQ, ==)
1621 	COND_JMP(u, JNE, !=)
1622 	COND_JMP(u, JGT, >)
1623 	COND_JMP(u, JLT, <)
1624 	COND_JMP(u, JGE, >=)
1625 	COND_JMP(u, JLE, <=)
1626 	COND_JMP(u, JSET, &)
1627 	COND_JMP(s, JSGT, >)
1628 	COND_JMP(s, JSLT, <)
1629 	COND_JMP(s, JSGE, >=)
1630 	COND_JMP(s, JSLE, <=)
1631 #undef COND_JMP
1632 	/* ST, STX and LDX*/
1633 	ST_NOSPEC:
1634 		/* Speculation barrier for mitigating Speculative Store Bypass.
1635 		 * In case of arm64, we rely on the firmware mitigation as
1636 		 * controlled via the ssbd kernel parameter. Whenever the
1637 		 * mitigation is enabled, it works for all of the kernel code
1638 		 * with no need to provide any additional instructions here.
1639 		 * In case of x86, we use 'lfence' insn for mitigation. We
1640 		 * reuse preexisting logic from Spectre v1 mitigation that
1641 		 * happens to produce the required code on x86 for v4 as well.
1642 		 */
1643 #ifdef CONFIG_X86
1644 		barrier_nospec();
1645 #endif
1646 		CONT;
1647 #define LDST(SIZEOP, SIZE)						\
1648 	STX_MEM_##SIZEOP:						\
1649 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1650 		CONT;							\
1651 	ST_MEM_##SIZEOP:						\
1652 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1653 		CONT;							\
1654 	LDX_MEM_##SIZEOP:						\
1655 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1656 		CONT;
1657 
1658 	LDST(B,   u8)
1659 	LDST(H,  u16)
1660 	LDST(W,  u32)
1661 	LDST(DW, u64)
1662 #undef LDST
1663 #define LDX_PROBE(SIZEOP, SIZE)							\
1664 	LDX_PROBE_MEM_##SIZEOP:							\
1665 		bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));	\
1666 		CONT;
1667 	LDX_PROBE(B,  1)
1668 	LDX_PROBE(H,  2)
1669 	LDX_PROBE(W,  4)
1670 	LDX_PROBE(DW, 8)
1671 #undef LDX_PROBE
1672 
1673 #define ATOMIC_ALU_OP(BOP, KOP)						\
1674 		case BOP:						\
1675 			if (BPF_SIZE(insn->code) == BPF_W)		\
1676 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1677 					     (DST + insn->off));	\
1678 			else						\
1679 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1680 					       (DST + insn->off));	\
1681 			break;						\
1682 		case BOP | BPF_FETCH:					\
1683 			if (BPF_SIZE(insn->code) == BPF_W)		\
1684 				SRC = (u32) atomic_fetch_##KOP(		\
1685 					(u32) SRC,			\
1686 					(atomic_t *)(unsigned long) (DST + insn->off)); \
1687 			else						\
1688 				SRC = (u64) atomic64_fetch_##KOP(	\
1689 					(u64) SRC,			\
1690 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
1691 			break;
1692 
1693 	STX_ATOMIC_DW:
1694 	STX_ATOMIC_W:
1695 		switch (IMM) {
1696 		ATOMIC_ALU_OP(BPF_ADD, add)
1697 		ATOMIC_ALU_OP(BPF_AND, and)
1698 		ATOMIC_ALU_OP(BPF_OR, or)
1699 		ATOMIC_ALU_OP(BPF_XOR, xor)
1700 #undef ATOMIC_ALU_OP
1701 
1702 		case BPF_XCHG:
1703 			if (BPF_SIZE(insn->code) == BPF_W)
1704 				SRC = (u32) atomic_xchg(
1705 					(atomic_t *)(unsigned long) (DST + insn->off),
1706 					(u32) SRC);
1707 			else
1708 				SRC = (u64) atomic64_xchg(
1709 					(atomic64_t *)(unsigned long) (DST + insn->off),
1710 					(u64) SRC);
1711 			break;
1712 		case BPF_CMPXCHG:
1713 			if (BPF_SIZE(insn->code) == BPF_W)
1714 				BPF_R0 = (u32) atomic_cmpxchg(
1715 					(atomic_t *)(unsigned long) (DST + insn->off),
1716 					(u32) BPF_R0, (u32) SRC);
1717 			else
1718 				BPF_R0 = (u64) atomic64_cmpxchg(
1719 					(atomic64_t *)(unsigned long) (DST + insn->off),
1720 					(u64) BPF_R0, (u64) SRC);
1721 			break;
1722 
1723 		default:
1724 			goto default_label;
1725 		}
1726 		CONT;
1727 
1728 	default_label:
1729 		/* If we ever reach this, we have a bug somewhere. Die hard here
1730 		 * instead of just returning 0; we could be somewhere in a subprog,
1731 		 * so execution could continue otherwise which we do /not/ want.
1732 		 *
1733 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1734 		 */
1735 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1736 			insn->code, insn->imm);
1737 		BUG_ON(1);
1738 		return 0;
1739 }
1740 
1741 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1742 #define DEFINE_BPF_PROG_RUN(stack_size) \
1743 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1744 { \
1745 	u64 stack[stack_size / sizeof(u64)]; \
1746 	u64 regs[MAX_BPF_EXT_REG]; \
1747 \
1748 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1749 	ARG1 = (u64) (unsigned long) ctx; \
1750 	return ___bpf_prog_run(regs, insn); \
1751 }
1752 
1753 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1754 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1755 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1756 				      const struct bpf_insn *insn) \
1757 { \
1758 	u64 stack[stack_size / sizeof(u64)]; \
1759 	u64 regs[MAX_BPF_EXT_REG]; \
1760 \
1761 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1762 	BPF_R1 = r1; \
1763 	BPF_R2 = r2; \
1764 	BPF_R3 = r3; \
1765 	BPF_R4 = r4; \
1766 	BPF_R5 = r5; \
1767 	return ___bpf_prog_run(regs, insn); \
1768 }
1769 
1770 #define EVAL1(FN, X) FN(X)
1771 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1772 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1773 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1774 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1775 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1776 
1777 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1778 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1779 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1780 
1781 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1782 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1783 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1784 
1785 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1786 
1787 static unsigned int (*interpreters[])(const void *ctx,
1788 				      const struct bpf_insn *insn) = {
1789 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1790 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1791 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1792 };
1793 #undef PROG_NAME_LIST
1794 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1795 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1796 				  const struct bpf_insn *insn) = {
1797 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1798 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1799 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1800 };
1801 #undef PROG_NAME_LIST
1802 
1803 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1804 {
1805 	stack_depth = max_t(u32, stack_depth, 1);
1806 	insn->off = (s16) insn->imm;
1807 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1808 		__bpf_call_base_args;
1809 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1810 }
1811 
1812 #else
1813 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1814 					 const struct bpf_insn *insn)
1815 {
1816 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1817 	 * is not working properly, so warn about it!
1818 	 */
1819 	WARN_ON_ONCE(1);
1820 	return 0;
1821 }
1822 #endif
1823 
1824 bool bpf_prog_array_compatible(struct bpf_array *array,
1825 			       const struct bpf_prog *fp)
1826 {
1827 	bool ret;
1828 
1829 	if (fp->kprobe_override)
1830 		return false;
1831 
1832 	spin_lock(&array->aux->owner.lock);
1833 
1834 	if (!array->aux->owner.type) {
1835 		/* There's no owner yet where we could check for
1836 		 * compatibility.
1837 		 */
1838 		array->aux->owner.type  = fp->type;
1839 		array->aux->owner.jited = fp->jited;
1840 		ret = true;
1841 	} else {
1842 		ret = array->aux->owner.type  == fp->type &&
1843 		      array->aux->owner.jited == fp->jited;
1844 	}
1845 	spin_unlock(&array->aux->owner.lock);
1846 	return ret;
1847 }
1848 
1849 static int bpf_check_tail_call(const struct bpf_prog *fp)
1850 {
1851 	struct bpf_prog_aux *aux = fp->aux;
1852 	int i, ret = 0;
1853 
1854 	mutex_lock(&aux->used_maps_mutex);
1855 	for (i = 0; i < aux->used_map_cnt; i++) {
1856 		struct bpf_map *map = aux->used_maps[i];
1857 		struct bpf_array *array;
1858 
1859 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1860 			continue;
1861 
1862 		array = container_of(map, struct bpf_array, map);
1863 		if (!bpf_prog_array_compatible(array, fp)) {
1864 			ret = -EINVAL;
1865 			goto out;
1866 		}
1867 	}
1868 
1869 out:
1870 	mutex_unlock(&aux->used_maps_mutex);
1871 	return ret;
1872 }
1873 
1874 static void bpf_prog_select_func(struct bpf_prog *fp)
1875 {
1876 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1877 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1878 
1879 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1880 #else
1881 	fp->bpf_func = __bpf_prog_ret0_warn;
1882 #endif
1883 }
1884 
1885 /**
1886  *	bpf_prog_select_runtime - select exec runtime for BPF program
1887  *	@fp: bpf_prog populated with internal BPF program
1888  *	@err: pointer to error variable
1889  *
1890  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1891  * The BPF program will be executed via bpf_prog_run() function.
1892  *
1893  * Return: the &fp argument along with &err set to 0 for success or
1894  * a negative errno code on failure
1895  */
1896 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1897 {
1898 	/* In case of BPF to BPF calls, verifier did all the prep
1899 	 * work with regards to JITing, etc.
1900 	 */
1901 	bool jit_needed = false;
1902 
1903 	if (fp->bpf_func)
1904 		goto finalize;
1905 
1906 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1907 	    bpf_prog_has_kfunc_call(fp))
1908 		jit_needed = true;
1909 
1910 	bpf_prog_select_func(fp);
1911 
1912 	/* eBPF JITs can rewrite the program in case constant
1913 	 * blinding is active. However, in case of error during
1914 	 * blinding, bpf_int_jit_compile() must always return a
1915 	 * valid program, which in this case would simply not
1916 	 * be JITed, but falls back to the interpreter.
1917 	 */
1918 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1919 		*err = bpf_prog_alloc_jited_linfo(fp);
1920 		if (*err)
1921 			return fp;
1922 
1923 		fp = bpf_int_jit_compile(fp);
1924 		bpf_prog_jit_attempt_done(fp);
1925 		if (!fp->jited && jit_needed) {
1926 			*err = -ENOTSUPP;
1927 			return fp;
1928 		}
1929 	} else {
1930 		*err = bpf_prog_offload_compile(fp);
1931 		if (*err)
1932 			return fp;
1933 	}
1934 
1935 finalize:
1936 	bpf_prog_lock_ro(fp);
1937 
1938 	/* The tail call compatibility check can only be done at
1939 	 * this late stage as we need to determine, if we deal
1940 	 * with JITed or non JITed program concatenations and not
1941 	 * all eBPF JITs might immediately support all features.
1942 	 */
1943 	*err = bpf_check_tail_call(fp);
1944 
1945 	return fp;
1946 }
1947 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1948 
1949 static unsigned int __bpf_prog_ret1(const void *ctx,
1950 				    const struct bpf_insn *insn)
1951 {
1952 	return 1;
1953 }
1954 
1955 static struct bpf_prog_dummy {
1956 	struct bpf_prog prog;
1957 } dummy_bpf_prog = {
1958 	.prog = {
1959 		.bpf_func = __bpf_prog_ret1,
1960 	},
1961 };
1962 
1963 /* to avoid allocating empty bpf_prog_array for cgroups that
1964  * don't have bpf program attached use one global 'empty_prog_array'
1965  * It will not be modified the caller of bpf_prog_array_alloc()
1966  * (since caller requested prog_cnt == 0)
1967  * that pointer should be 'freed' by bpf_prog_array_free()
1968  */
1969 static struct {
1970 	struct bpf_prog_array hdr;
1971 	struct bpf_prog *null_prog;
1972 } empty_prog_array = {
1973 	.null_prog = NULL,
1974 };
1975 
1976 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1977 {
1978 	if (prog_cnt)
1979 		return kzalloc(sizeof(struct bpf_prog_array) +
1980 			       sizeof(struct bpf_prog_array_item) *
1981 			       (prog_cnt + 1),
1982 			       flags);
1983 
1984 	return &empty_prog_array.hdr;
1985 }
1986 
1987 void bpf_prog_array_free(struct bpf_prog_array *progs)
1988 {
1989 	if (!progs || progs == &empty_prog_array.hdr)
1990 		return;
1991 	kfree_rcu(progs, rcu);
1992 }
1993 
1994 int bpf_prog_array_length(struct bpf_prog_array *array)
1995 {
1996 	struct bpf_prog_array_item *item;
1997 	u32 cnt = 0;
1998 
1999 	for (item = array->items; item->prog; item++)
2000 		if (item->prog != &dummy_bpf_prog.prog)
2001 			cnt++;
2002 	return cnt;
2003 }
2004 
2005 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2006 {
2007 	struct bpf_prog_array_item *item;
2008 
2009 	for (item = array->items; item->prog; item++)
2010 		if (item->prog != &dummy_bpf_prog.prog)
2011 			return false;
2012 	return true;
2013 }
2014 
2015 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2016 				     u32 *prog_ids,
2017 				     u32 request_cnt)
2018 {
2019 	struct bpf_prog_array_item *item;
2020 	int i = 0;
2021 
2022 	for (item = array->items; item->prog; item++) {
2023 		if (item->prog == &dummy_bpf_prog.prog)
2024 			continue;
2025 		prog_ids[i] = item->prog->aux->id;
2026 		if (++i == request_cnt) {
2027 			item++;
2028 			break;
2029 		}
2030 	}
2031 
2032 	return !!(item->prog);
2033 }
2034 
2035 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2036 				__u32 __user *prog_ids, u32 cnt)
2037 {
2038 	unsigned long err = 0;
2039 	bool nospc;
2040 	u32 *ids;
2041 
2042 	/* users of this function are doing:
2043 	 * cnt = bpf_prog_array_length();
2044 	 * if (cnt > 0)
2045 	 *     bpf_prog_array_copy_to_user(..., cnt);
2046 	 * so below kcalloc doesn't need extra cnt > 0 check.
2047 	 */
2048 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2049 	if (!ids)
2050 		return -ENOMEM;
2051 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2052 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2053 	kfree(ids);
2054 	if (err)
2055 		return -EFAULT;
2056 	if (nospc)
2057 		return -ENOSPC;
2058 	return 0;
2059 }
2060 
2061 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2062 				struct bpf_prog *old_prog)
2063 {
2064 	struct bpf_prog_array_item *item;
2065 
2066 	for (item = array->items; item->prog; item++)
2067 		if (item->prog == old_prog) {
2068 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2069 			break;
2070 		}
2071 }
2072 
2073 /**
2074  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2075  *                                   index into the program array with
2076  *                                   a dummy no-op program.
2077  * @array: a bpf_prog_array
2078  * @index: the index of the program to replace
2079  *
2080  * Skips over dummy programs, by not counting them, when calculating
2081  * the position of the program to replace.
2082  *
2083  * Return:
2084  * * 0		- Success
2085  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2086  * * -ENOENT	- Index out of range
2087  */
2088 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2089 {
2090 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2091 }
2092 
2093 /**
2094  * bpf_prog_array_update_at() - Updates the program at the given index
2095  *                              into the program array.
2096  * @array: a bpf_prog_array
2097  * @index: the index of the program to update
2098  * @prog: the program to insert into the array
2099  *
2100  * Skips over dummy programs, by not counting them, when calculating
2101  * the position of the program to update.
2102  *
2103  * Return:
2104  * * 0		- Success
2105  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2106  * * -ENOENT	- Index out of range
2107  */
2108 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2109 			     struct bpf_prog *prog)
2110 {
2111 	struct bpf_prog_array_item *item;
2112 
2113 	if (unlikely(index < 0))
2114 		return -EINVAL;
2115 
2116 	for (item = array->items; item->prog; item++) {
2117 		if (item->prog == &dummy_bpf_prog.prog)
2118 			continue;
2119 		if (!index) {
2120 			WRITE_ONCE(item->prog, prog);
2121 			return 0;
2122 		}
2123 		index--;
2124 	}
2125 	return -ENOENT;
2126 }
2127 
2128 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2129 			struct bpf_prog *exclude_prog,
2130 			struct bpf_prog *include_prog,
2131 			u64 bpf_cookie,
2132 			struct bpf_prog_array **new_array)
2133 {
2134 	int new_prog_cnt, carry_prog_cnt = 0;
2135 	struct bpf_prog_array_item *existing, *new;
2136 	struct bpf_prog_array *array;
2137 	bool found_exclude = false;
2138 
2139 	/* Figure out how many existing progs we need to carry over to
2140 	 * the new array.
2141 	 */
2142 	if (old_array) {
2143 		existing = old_array->items;
2144 		for (; existing->prog; existing++) {
2145 			if (existing->prog == exclude_prog) {
2146 				found_exclude = true;
2147 				continue;
2148 			}
2149 			if (existing->prog != &dummy_bpf_prog.prog)
2150 				carry_prog_cnt++;
2151 			if (existing->prog == include_prog)
2152 				return -EEXIST;
2153 		}
2154 	}
2155 
2156 	if (exclude_prog && !found_exclude)
2157 		return -ENOENT;
2158 
2159 	/* How many progs (not NULL) will be in the new array? */
2160 	new_prog_cnt = carry_prog_cnt;
2161 	if (include_prog)
2162 		new_prog_cnt += 1;
2163 
2164 	/* Do we have any prog (not NULL) in the new array? */
2165 	if (!new_prog_cnt) {
2166 		*new_array = NULL;
2167 		return 0;
2168 	}
2169 
2170 	/* +1 as the end of prog_array is marked with NULL */
2171 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2172 	if (!array)
2173 		return -ENOMEM;
2174 	new = array->items;
2175 
2176 	/* Fill in the new prog array */
2177 	if (carry_prog_cnt) {
2178 		existing = old_array->items;
2179 		for (; existing->prog; existing++) {
2180 			if (existing->prog == exclude_prog ||
2181 			    existing->prog == &dummy_bpf_prog.prog)
2182 				continue;
2183 
2184 			new->prog = existing->prog;
2185 			new->bpf_cookie = existing->bpf_cookie;
2186 			new++;
2187 		}
2188 	}
2189 	if (include_prog) {
2190 		new->prog = include_prog;
2191 		new->bpf_cookie = bpf_cookie;
2192 		new++;
2193 	}
2194 	new->prog = NULL;
2195 	*new_array = array;
2196 	return 0;
2197 }
2198 
2199 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2200 			     u32 *prog_ids, u32 request_cnt,
2201 			     u32 *prog_cnt)
2202 {
2203 	u32 cnt = 0;
2204 
2205 	if (array)
2206 		cnt = bpf_prog_array_length(array);
2207 
2208 	*prog_cnt = cnt;
2209 
2210 	/* return early if user requested only program count or nothing to copy */
2211 	if (!request_cnt || !cnt)
2212 		return 0;
2213 
2214 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2215 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2216 								     : 0;
2217 }
2218 
2219 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2220 			  struct bpf_map **used_maps, u32 len)
2221 {
2222 	struct bpf_map *map;
2223 	u32 i;
2224 
2225 	for (i = 0; i < len; i++) {
2226 		map = used_maps[i];
2227 		if (map->ops->map_poke_untrack)
2228 			map->ops->map_poke_untrack(map, aux);
2229 		bpf_map_put(map);
2230 	}
2231 }
2232 
2233 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2234 {
2235 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2236 	kfree(aux->used_maps);
2237 }
2238 
2239 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2240 			  struct btf_mod_pair *used_btfs, u32 len)
2241 {
2242 #ifdef CONFIG_BPF_SYSCALL
2243 	struct btf_mod_pair *btf_mod;
2244 	u32 i;
2245 
2246 	for (i = 0; i < len; i++) {
2247 		btf_mod = &used_btfs[i];
2248 		if (btf_mod->module)
2249 			module_put(btf_mod->module);
2250 		btf_put(btf_mod->btf);
2251 	}
2252 #endif
2253 }
2254 
2255 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2256 {
2257 	__bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2258 	kfree(aux->used_btfs);
2259 }
2260 
2261 static void bpf_prog_free_deferred(struct work_struct *work)
2262 {
2263 	struct bpf_prog_aux *aux;
2264 	int i;
2265 
2266 	aux = container_of(work, struct bpf_prog_aux, work);
2267 #ifdef CONFIG_BPF_SYSCALL
2268 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2269 #endif
2270 	bpf_free_used_maps(aux);
2271 	bpf_free_used_btfs(aux);
2272 	if (bpf_prog_is_dev_bound(aux))
2273 		bpf_prog_offload_destroy(aux->prog);
2274 #ifdef CONFIG_PERF_EVENTS
2275 	if (aux->prog->has_callchain_buf)
2276 		put_callchain_buffers();
2277 #endif
2278 	if (aux->dst_trampoline)
2279 		bpf_trampoline_put(aux->dst_trampoline);
2280 	for (i = 0; i < aux->func_cnt; i++) {
2281 		/* We can just unlink the subprog poke descriptor table as
2282 		 * it was originally linked to the main program and is also
2283 		 * released along with it.
2284 		 */
2285 		aux->func[i]->aux->poke_tab = NULL;
2286 		bpf_jit_free(aux->func[i]);
2287 	}
2288 	if (aux->func_cnt) {
2289 		kfree(aux->func);
2290 		bpf_prog_unlock_free(aux->prog);
2291 	} else {
2292 		bpf_jit_free(aux->prog);
2293 	}
2294 }
2295 
2296 /* Free internal BPF program */
2297 void bpf_prog_free(struct bpf_prog *fp)
2298 {
2299 	struct bpf_prog_aux *aux = fp->aux;
2300 
2301 	if (aux->dst_prog)
2302 		bpf_prog_put(aux->dst_prog);
2303 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2304 	schedule_work(&aux->work);
2305 }
2306 EXPORT_SYMBOL_GPL(bpf_prog_free);
2307 
2308 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2309 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2310 
2311 void bpf_user_rnd_init_once(void)
2312 {
2313 	prandom_init_once(&bpf_user_rnd_state);
2314 }
2315 
2316 BPF_CALL_0(bpf_user_rnd_u32)
2317 {
2318 	/* Should someone ever have the rather unwise idea to use some
2319 	 * of the registers passed into this function, then note that
2320 	 * this function is called from native eBPF and classic-to-eBPF
2321 	 * transformations. Register assignments from both sides are
2322 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2323 	 */
2324 	struct rnd_state *state;
2325 	u32 res;
2326 
2327 	state = &get_cpu_var(bpf_user_rnd_state);
2328 	res = prandom_u32_state(state);
2329 	put_cpu_var(bpf_user_rnd_state);
2330 
2331 	return res;
2332 }
2333 
2334 BPF_CALL_0(bpf_get_raw_cpu_id)
2335 {
2336 	return raw_smp_processor_id();
2337 }
2338 
2339 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2340 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2341 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2342 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2343 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2344 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2345 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2346 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2347 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2348 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2349 
2350 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2351 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2352 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2353 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2354 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2355 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2356 
2357 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2358 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2359 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2360 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2361 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2362 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2363 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2364 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2365 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2366 
2367 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2368 {
2369 	return NULL;
2370 }
2371 
2372 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2373 {
2374 	return NULL;
2375 }
2376 
2377 u64 __weak
2378 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2379 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2380 {
2381 	return -ENOTSUPP;
2382 }
2383 EXPORT_SYMBOL_GPL(bpf_event_output);
2384 
2385 /* Always built-in helper functions. */
2386 const struct bpf_func_proto bpf_tail_call_proto = {
2387 	.func		= NULL,
2388 	.gpl_only	= false,
2389 	.ret_type	= RET_VOID,
2390 	.arg1_type	= ARG_PTR_TO_CTX,
2391 	.arg2_type	= ARG_CONST_MAP_PTR,
2392 	.arg3_type	= ARG_ANYTHING,
2393 };
2394 
2395 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2396  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2397  * eBPF and implicitly also cBPF can get JITed!
2398  */
2399 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2400 {
2401 	return prog;
2402 }
2403 
2404 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2405  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2406  */
2407 void __weak bpf_jit_compile(struct bpf_prog *prog)
2408 {
2409 }
2410 
2411 bool __weak bpf_helper_changes_pkt_data(void *func)
2412 {
2413 	return false;
2414 }
2415 
2416 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2417  * analysis code and wants explicit zero extension inserted by verifier.
2418  * Otherwise, return FALSE.
2419  *
2420  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2421  * you don't override this. JITs that don't want these extra insns can detect
2422  * them using insn_is_zext.
2423  */
2424 bool __weak bpf_jit_needs_zext(void)
2425 {
2426 	return false;
2427 }
2428 
2429 bool __weak bpf_jit_supports_kfunc_call(void)
2430 {
2431 	return false;
2432 }
2433 
2434 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2435  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2436  */
2437 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2438 			 int len)
2439 {
2440 	return -EFAULT;
2441 }
2442 
2443 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2444 			      void *addr1, void *addr2)
2445 {
2446 	return -ENOTSUPP;
2447 }
2448 
2449 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2450 EXPORT_SYMBOL(bpf_stats_enabled_key);
2451 
2452 /* All definitions of tracepoints related to BPF. */
2453 #define CREATE_TRACE_POINTS
2454 #include <linux/bpf_trace.h>
2455 
2456 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2457 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2458