xref: /openbmc/linux/kernel/bpf/core.c (revision 816cd1688331e0ffa1927889c15e7ed56650a183)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 
38 #include <asm/barrier.h>
39 #include <asm/unaligned.h>
40 
41 /* Registers */
42 #define BPF_R0	regs[BPF_REG_0]
43 #define BPF_R1	regs[BPF_REG_1]
44 #define BPF_R2	regs[BPF_REG_2]
45 #define BPF_R3	regs[BPF_REG_3]
46 #define BPF_R4	regs[BPF_REG_4]
47 #define BPF_R5	regs[BPF_REG_5]
48 #define BPF_R6	regs[BPF_REG_6]
49 #define BPF_R7	regs[BPF_REG_7]
50 #define BPF_R8	regs[BPF_REG_8]
51 #define BPF_R9	regs[BPF_REG_9]
52 #define BPF_R10	regs[BPF_REG_10]
53 
54 /* Named registers */
55 #define DST	regs[insn->dst_reg]
56 #define SRC	regs[insn->src_reg]
57 #define FP	regs[BPF_REG_FP]
58 #define AX	regs[BPF_REG_AX]
59 #define ARG1	regs[BPF_REG_ARG1]
60 #define CTX	regs[BPF_REG_CTX]
61 #define IMM	insn->imm
62 
63 /* No hurry in this branch
64  *
65  * Exported for the bpf jit load helper.
66  */
67 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
68 {
69 	u8 *ptr = NULL;
70 
71 	if (k >= SKF_NET_OFF) {
72 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
73 	} else if (k >= SKF_LL_OFF) {
74 		if (unlikely(!skb_mac_header_was_set(skb)))
75 			return NULL;
76 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
77 	}
78 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
79 		return ptr;
80 
81 	return NULL;
82 }
83 
84 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
85 {
86 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
87 	struct bpf_prog_aux *aux;
88 	struct bpf_prog *fp;
89 
90 	size = round_up(size, PAGE_SIZE);
91 	fp = __vmalloc(size, gfp_flags);
92 	if (fp == NULL)
93 		return NULL;
94 
95 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
96 	if (aux == NULL) {
97 		vfree(fp);
98 		return NULL;
99 	}
100 	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
101 	if (!fp->active) {
102 		vfree(fp);
103 		kfree(aux);
104 		return NULL;
105 	}
106 
107 	fp->pages = size / PAGE_SIZE;
108 	fp->aux = aux;
109 	fp->aux->prog = fp;
110 	fp->jit_requested = ebpf_jit_enabled();
111 	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
112 #ifdef CONFIG_CGROUP_BPF
113 	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
114 #endif
115 
116 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
117 	mutex_init(&fp->aux->used_maps_mutex);
118 	mutex_init(&fp->aux->dst_mutex);
119 
120 	return fp;
121 }
122 
123 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
124 {
125 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
126 	struct bpf_prog *prog;
127 	int cpu;
128 
129 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
130 	if (!prog)
131 		return NULL;
132 
133 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
134 	if (!prog->stats) {
135 		free_percpu(prog->active);
136 		kfree(prog->aux);
137 		vfree(prog);
138 		return NULL;
139 	}
140 
141 	for_each_possible_cpu(cpu) {
142 		struct bpf_prog_stats *pstats;
143 
144 		pstats = per_cpu_ptr(prog->stats, cpu);
145 		u64_stats_init(&pstats->syncp);
146 	}
147 	return prog;
148 }
149 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
150 
151 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
152 {
153 	if (!prog->aux->nr_linfo || !prog->jit_requested)
154 		return 0;
155 
156 	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
157 					  sizeof(*prog->aux->jited_linfo),
158 					  GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
159 	if (!prog->aux->jited_linfo)
160 		return -ENOMEM;
161 
162 	return 0;
163 }
164 
165 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
166 {
167 	if (prog->aux->jited_linfo &&
168 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
169 		kvfree(prog->aux->jited_linfo);
170 		prog->aux->jited_linfo = NULL;
171 	}
172 
173 	kfree(prog->aux->kfunc_tab);
174 	prog->aux->kfunc_tab = NULL;
175 }
176 
177 /* The jit engine is responsible to provide an array
178  * for insn_off to the jited_off mapping (insn_to_jit_off).
179  *
180  * The idx to this array is the insn_off.  Hence, the insn_off
181  * here is relative to the prog itself instead of the main prog.
182  * This array has one entry for each xlated bpf insn.
183  *
184  * jited_off is the byte off to the end of the jited insn.
185  *
186  * Hence, with
187  * insn_start:
188  *      The first bpf insn off of the prog.  The insn off
189  *      here is relative to the main prog.
190  *      e.g. if prog is a subprog, insn_start > 0
191  * linfo_idx:
192  *      The prog's idx to prog->aux->linfo and jited_linfo
193  *
194  * jited_linfo[linfo_idx] = prog->bpf_func
195  *
196  * For i > linfo_idx,
197  *
198  * jited_linfo[i] = prog->bpf_func +
199  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
200  */
201 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
202 			       const u32 *insn_to_jit_off)
203 {
204 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
205 	const struct bpf_line_info *linfo;
206 	void **jited_linfo;
207 
208 	if (!prog->aux->jited_linfo)
209 		/* Userspace did not provide linfo */
210 		return;
211 
212 	linfo_idx = prog->aux->linfo_idx;
213 	linfo = &prog->aux->linfo[linfo_idx];
214 	insn_start = linfo[0].insn_off;
215 	insn_end = insn_start + prog->len;
216 
217 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
218 	jited_linfo[0] = prog->bpf_func;
219 
220 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
221 
222 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
223 		/* The verifier ensures that linfo[i].insn_off is
224 		 * strictly increasing
225 		 */
226 		jited_linfo[i] = prog->bpf_func +
227 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
228 }
229 
230 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
231 				  gfp_t gfp_extra_flags)
232 {
233 	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
234 	struct bpf_prog *fp;
235 	u32 pages;
236 
237 	size = round_up(size, PAGE_SIZE);
238 	pages = size / PAGE_SIZE;
239 	if (pages <= fp_old->pages)
240 		return fp_old;
241 
242 	fp = __vmalloc(size, gfp_flags);
243 	if (fp) {
244 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
245 		fp->pages = pages;
246 		fp->aux->prog = fp;
247 
248 		/* We keep fp->aux from fp_old around in the new
249 		 * reallocated structure.
250 		 */
251 		fp_old->aux = NULL;
252 		fp_old->stats = NULL;
253 		fp_old->active = NULL;
254 		__bpf_prog_free(fp_old);
255 	}
256 
257 	return fp;
258 }
259 
260 void __bpf_prog_free(struct bpf_prog *fp)
261 {
262 	if (fp->aux) {
263 		mutex_destroy(&fp->aux->used_maps_mutex);
264 		mutex_destroy(&fp->aux->dst_mutex);
265 		kfree(fp->aux->poke_tab);
266 		kfree(fp->aux);
267 	}
268 	free_percpu(fp->stats);
269 	free_percpu(fp->active);
270 	vfree(fp);
271 }
272 
273 int bpf_prog_calc_tag(struct bpf_prog *fp)
274 {
275 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
276 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
277 	u32 digest[SHA1_DIGEST_WORDS];
278 	u32 ws[SHA1_WORKSPACE_WORDS];
279 	u32 i, bsize, psize, blocks;
280 	struct bpf_insn *dst;
281 	bool was_ld_map;
282 	u8 *raw, *todo;
283 	__be32 *result;
284 	__be64 *bits;
285 
286 	raw = vmalloc(raw_size);
287 	if (!raw)
288 		return -ENOMEM;
289 
290 	sha1_init(digest);
291 	memset(ws, 0, sizeof(ws));
292 
293 	/* We need to take out the map fd for the digest calculation
294 	 * since they are unstable from user space side.
295 	 */
296 	dst = (void *)raw;
297 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
298 		dst[i] = fp->insnsi[i];
299 		if (!was_ld_map &&
300 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
301 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
302 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
303 			was_ld_map = true;
304 			dst[i].imm = 0;
305 		} else if (was_ld_map &&
306 			   dst[i].code == 0 &&
307 			   dst[i].dst_reg == 0 &&
308 			   dst[i].src_reg == 0 &&
309 			   dst[i].off == 0) {
310 			was_ld_map = false;
311 			dst[i].imm = 0;
312 		} else {
313 			was_ld_map = false;
314 		}
315 	}
316 
317 	psize = bpf_prog_insn_size(fp);
318 	memset(&raw[psize], 0, raw_size - psize);
319 	raw[psize++] = 0x80;
320 
321 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
322 	blocks = bsize / SHA1_BLOCK_SIZE;
323 	todo   = raw;
324 	if (bsize - psize >= sizeof(__be64)) {
325 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
326 	} else {
327 		bits = (__be64 *)(todo + bsize + bits_offset);
328 		blocks++;
329 	}
330 	*bits = cpu_to_be64((psize - 1) << 3);
331 
332 	while (blocks--) {
333 		sha1_transform(digest, todo, ws);
334 		todo += SHA1_BLOCK_SIZE;
335 	}
336 
337 	result = (__force __be32 *)digest;
338 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
339 		result[i] = cpu_to_be32(digest[i]);
340 	memcpy(fp->tag, result, sizeof(fp->tag));
341 
342 	vfree(raw);
343 	return 0;
344 }
345 
346 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
347 				s32 end_new, s32 curr, const bool probe_pass)
348 {
349 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
350 	s32 delta = end_new - end_old;
351 	s64 imm = insn->imm;
352 
353 	if (curr < pos && curr + imm + 1 >= end_old)
354 		imm += delta;
355 	else if (curr >= end_new && curr + imm + 1 < end_new)
356 		imm -= delta;
357 	if (imm < imm_min || imm > imm_max)
358 		return -ERANGE;
359 	if (!probe_pass)
360 		insn->imm = imm;
361 	return 0;
362 }
363 
364 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
365 				s32 end_new, s32 curr, const bool probe_pass)
366 {
367 	const s32 off_min = S16_MIN, off_max = S16_MAX;
368 	s32 delta = end_new - end_old;
369 	s32 off = insn->off;
370 
371 	if (curr < pos && curr + off + 1 >= end_old)
372 		off += delta;
373 	else if (curr >= end_new && curr + off + 1 < end_new)
374 		off -= delta;
375 	if (off < off_min || off > off_max)
376 		return -ERANGE;
377 	if (!probe_pass)
378 		insn->off = off;
379 	return 0;
380 }
381 
382 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
383 			    s32 end_new, const bool probe_pass)
384 {
385 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
386 	struct bpf_insn *insn = prog->insnsi;
387 	int ret = 0;
388 
389 	for (i = 0; i < insn_cnt; i++, insn++) {
390 		u8 code;
391 
392 		/* In the probing pass we still operate on the original,
393 		 * unpatched image in order to check overflows before we
394 		 * do any other adjustments. Therefore skip the patchlet.
395 		 */
396 		if (probe_pass && i == pos) {
397 			i = end_new;
398 			insn = prog->insnsi + end_old;
399 		}
400 		if (bpf_pseudo_func(insn)) {
401 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
402 						   end_new, i, probe_pass);
403 			if (ret)
404 				return ret;
405 			continue;
406 		}
407 		code = insn->code;
408 		if ((BPF_CLASS(code) != BPF_JMP &&
409 		     BPF_CLASS(code) != BPF_JMP32) ||
410 		    BPF_OP(code) == BPF_EXIT)
411 			continue;
412 		/* Adjust offset of jmps if we cross patch boundaries. */
413 		if (BPF_OP(code) == BPF_CALL) {
414 			if (insn->src_reg != BPF_PSEUDO_CALL)
415 				continue;
416 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
417 						   end_new, i, probe_pass);
418 		} else {
419 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
420 						   end_new, i, probe_pass);
421 		}
422 		if (ret)
423 			break;
424 	}
425 
426 	return ret;
427 }
428 
429 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
430 {
431 	struct bpf_line_info *linfo;
432 	u32 i, nr_linfo;
433 
434 	nr_linfo = prog->aux->nr_linfo;
435 	if (!nr_linfo || !delta)
436 		return;
437 
438 	linfo = prog->aux->linfo;
439 
440 	for (i = 0; i < nr_linfo; i++)
441 		if (off < linfo[i].insn_off)
442 			break;
443 
444 	/* Push all off < linfo[i].insn_off by delta */
445 	for (; i < nr_linfo; i++)
446 		linfo[i].insn_off += delta;
447 }
448 
449 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
450 				       const struct bpf_insn *patch, u32 len)
451 {
452 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
453 	const u32 cnt_max = S16_MAX;
454 	struct bpf_prog *prog_adj;
455 	int err;
456 
457 	/* Since our patchlet doesn't expand the image, we're done. */
458 	if (insn_delta == 0) {
459 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
460 		return prog;
461 	}
462 
463 	insn_adj_cnt = prog->len + insn_delta;
464 
465 	/* Reject anything that would potentially let the insn->off
466 	 * target overflow when we have excessive program expansions.
467 	 * We need to probe here before we do any reallocation where
468 	 * we afterwards may not fail anymore.
469 	 */
470 	if (insn_adj_cnt > cnt_max &&
471 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
472 		return ERR_PTR(err);
473 
474 	/* Several new instructions need to be inserted. Make room
475 	 * for them. Likely, there's no need for a new allocation as
476 	 * last page could have large enough tailroom.
477 	 */
478 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
479 				    GFP_USER);
480 	if (!prog_adj)
481 		return ERR_PTR(-ENOMEM);
482 
483 	prog_adj->len = insn_adj_cnt;
484 
485 	/* Patching happens in 3 steps:
486 	 *
487 	 * 1) Move over tail of insnsi from next instruction onwards,
488 	 *    so we can patch the single target insn with one or more
489 	 *    new ones (patching is always from 1 to n insns, n > 0).
490 	 * 2) Inject new instructions at the target location.
491 	 * 3) Adjust branch offsets if necessary.
492 	 */
493 	insn_rest = insn_adj_cnt - off - len;
494 
495 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
496 		sizeof(*patch) * insn_rest);
497 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
498 
499 	/* We are guaranteed to not fail at this point, otherwise
500 	 * the ship has sailed to reverse to the original state. An
501 	 * overflow cannot happen at this point.
502 	 */
503 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
504 
505 	bpf_adj_linfo(prog_adj, off, insn_delta);
506 
507 	return prog_adj;
508 }
509 
510 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
511 {
512 	/* Branch offsets can't overflow when program is shrinking, no need
513 	 * to call bpf_adj_branches(..., true) here
514 	 */
515 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
516 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
517 	prog->len -= cnt;
518 
519 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
520 }
521 
522 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
523 {
524 	int i;
525 
526 	for (i = 0; i < fp->aux->func_cnt; i++)
527 		bpf_prog_kallsyms_del(fp->aux->func[i]);
528 }
529 
530 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
531 {
532 	bpf_prog_kallsyms_del_subprogs(fp);
533 	bpf_prog_kallsyms_del(fp);
534 }
535 
536 #ifdef CONFIG_BPF_JIT
537 /* All BPF JIT sysctl knobs here. */
538 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
539 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
540 int bpf_jit_harden   __read_mostly;
541 long bpf_jit_limit   __read_mostly;
542 long bpf_jit_limit_max __read_mostly;
543 
544 static void
545 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
546 {
547 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
548 
549 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
550 	prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
551 }
552 
553 static void
554 bpf_prog_ksym_set_name(struct bpf_prog *prog)
555 {
556 	char *sym = prog->aux->ksym.name;
557 	const char *end = sym + KSYM_NAME_LEN;
558 	const struct btf_type *type;
559 	const char *func_name;
560 
561 	BUILD_BUG_ON(sizeof("bpf_prog_") +
562 		     sizeof(prog->tag) * 2 +
563 		     /* name has been null terminated.
564 		      * We should need +1 for the '_' preceding
565 		      * the name.  However, the null character
566 		      * is double counted between the name and the
567 		      * sizeof("bpf_prog_") above, so we omit
568 		      * the +1 here.
569 		      */
570 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
571 
572 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
573 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
574 
575 	/* prog->aux->name will be ignored if full btf name is available */
576 	if (prog->aux->func_info_cnt) {
577 		type = btf_type_by_id(prog->aux->btf,
578 				      prog->aux->func_info[prog->aux->func_idx].type_id);
579 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
580 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
581 		return;
582 	}
583 
584 	if (prog->aux->name[0])
585 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
586 	else
587 		*sym = 0;
588 }
589 
590 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
591 {
592 	return container_of(n, struct bpf_ksym, tnode)->start;
593 }
594 
595 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
596 					  struct latch_tree_node *b)
597 {
598 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
599 }
600 
601 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
602 {
603 	unsigned long val = (unsigned long)key;
604 	const struct bpf_ksym *ksym;
605 
606 	ksym = container_of(n, struct bpf_ksym, tnode);
607 
608 	if (val < ksym->start)
609 		return -1;
610 	if (val >= ksym->end)
611 		return  1;
612 
613 	return 0;
614 }
615 
616 static const struct latch_tree_ops bpf_tree_ops = {
617 	.less	= bpf_tree_less,
618 	.comp	= bpf_tree_comp,
619 };
620 
621 static DEFINE_SPINLOCK(bpf_lock);
622 static LIST_HEAD(bpf_kallsyms);
623 static struct latch_tree_root bpf_tree __cacheline_aligned;
624 
625 void bpf_ksym_add(struct bpf_ksym *ksym)
626 {
627 	spin_lock_bh(&bpf_lock);
628 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
629 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
630 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
631 	spin_unlock_bh(&bpf_lock);
632 }
633 
634 static void __bpf_ksym_del(struct bpf_ksym *ksym)
635 {
636 	if (list_empty(&ksym->lnode))
637 		return;
638 
639 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
640 	list_del_rcu(&ksym->lnode);
641 }
642 
643 void bpf_ksym_del(struct bpf_ksym *ksym)
644 {
645 	spin_lock_bh(&bpf_lock);
646 	__bpf_ksym_del(ksym);
647 	spin_unlock_bh(&bpf_lock);
648 }
649 
650 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
651 {
652 	return fp->jited && !bpf_prog_was_classic(fp);
653 }
654 
655 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
656 {
657 	return list_empty(&fp->aux->ksym.lnode) ||
658 	       fp->aux->ksym.lnode.prev == LIST_POISON2;
659 }
660 
661 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
662 {
663 	if (!bpf_prog_kallsyms_candidate(fp) ||
664 	    !bpf_capable())
665 		return;
666 
667 	bpf_prog_ksym_set_addr(fp);
668 	bpf_prog_ksym_set_name(fp);
669 	fp->aux->ksym.prog = true;
670 
671 	bpf_ksym_add(&fp->aux->ksym);
672 }
673 
674 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
675 {
676 	if (!bpf_prog_kallsyms_candidate(fp))
677 		return;
678 
679 	bpf_ksym_del(&fp->aux->ksym);
680 }
681 
682 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
683 {
684 	struct latch_tree_node *n;
685 
686 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
687 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
688 }
689 
690 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
691 				 unsigned long *off, char *sym)
692 {
693 	struct bpf_ksym *ksym;
694 	char *ret = NULL;
695 
696 	rcu_read_lock();
697 	ksym = bpf_ksym_find(addr);
698 	if (ksym) {
699 		unsigned long symbol_start = ksym->start;
700 		unsigned long symbol_end = ksym->end;
701 
702 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
703 
704 		ret = sym;
705 		if (size)
706 			*size = symbol_end - symbol_start;
707 		if (off)
708 			*off  = addr - symbol_start;
709 	}
710 	rcu_read_unlock();
711 
712 	return ret;
713 }
714 
715 bool is_bpf_text_address(unsigned long addr)
716 {
717 	bool ret;
718 
719 	rcu_read_lock();
720 	ret = bpf_ksym_find(addr) != NULL;
721 	rcu_read_unlock();
722 
723 	return ret;
724 }
725 
726 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
727 {
728 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
729 
730 	return ksym && ksym->prog ?
731 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
732 	       NULL;
733 }
734 
735 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
736 {
737 	const struct exception_table_entry *e = NULL;
738 	struct bpf_prog *prog;
739 
740 	rcu_read_lock();
741 	prog = bpf_prog_ksym_find(addr);
742 	if (!prog)
743 		goto out;
744 	if (!prog->aux->num_exentries)
745 		goto out;
746 
747 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
748 out:
749 	rcu_read_unlock();
750 	return e;
751 }
752 
753 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
754 		    char *sym)
755 {
756 	struct bpf_ksym *ksym;
757 	unsigned int it = 0;
758 	int ret = -ERANGE;
759 
760 	if (!bpf_jit_kallsyms_enabled())
761 		return ret;
762 
763 	rcu_read_lock();
764 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
765 		if (it++ != symnum)
766 			continue;
767 
768 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
769 
770 		*value = ksym->start;
771 		*type  = BPF_SYM_ELF_TYPE;
772 
773 		ret = 0;
774 		break;
775 	}
776 	rcu_read_unlock();
777 
778 	return ret;
779 }
780 
781 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
782 				struct bpf_jit_poke_descriptor *poke)
783 {
784 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
785 	static const u32 poke_tab_max = 1024;
786 	u32 slot = prog->aux->size_poke_tab;
787 	u32 size = slot + 1;
788 
789 	if (size > poke_tab_max)
790 		return -ENOSPC;
791 	if (poke->tailcall_target || poke->tailcall_target_stable ||
792 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
793 		return -EINVAL;
794 
795 	switch (poke->reason) {
796 	case BPF_POKE_REASON_TAIL_CALL:
797 		if (!poke->tail_call.map)
798 			return -EINVAL;
799 		break;
800 	default:
801 		return -EINVAL;
802 	}
803 
804 	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
805 	if (!tab)
806 		return -ENOMEM;
807 
808 	memcpy(&tab[slot], poke, sizeof(*poke));
809 	prog->aux->size_poke_tab = size;
810 	prog->aux->poke_tab = tab;
811 
812 	return slot;
813 }
814 
815 /*
816  * BPF program pack allocator.
817  *
818  * Most BPF programs are pretty small. Allocating a hole page for each
819  * program is sometime a waste. Many small bpf program also adds pressure
820  * to instruction TLB. To solve this issue, we introduce a BPF program pack
821  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
822  * to host BPF programs.
823  */
824 #define BPF_PROG_CHUNK_SHIFT	6
825 #define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
826 #define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
827 
828 struct bpf_prog_pack {
829 	struct list_head list;
830 	void *ptr;
831 	unsigned long bitmap[];
832 };
833 
834 #define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
835 
836 static size_t bpf_prog_pack_size = -1;
837 static size_t bpf_prog_pack_mask = -1;
838 
839 static int bpf_prog_chunk_count(void)
840 {
841 	WARN_ON_ONCE(bpf_prog_pack_size == -1);
842 	return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
843 }
844 
845 static DEFINE_MUTEX(pack_mutex);
846 static LIST_HEAD(pack_list);
847 
848 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
849  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
850  */
851 #ifdef PMD_SIZE
852 #define BPF_HPAGE_SIZE PMD_SIZE
853 #define BPF_HPAGE_MASK PMD_MASK
854 #else
855 #define BPF_HPAGE_SIZE PAGE_SIZE
856 #define BPF_HPAGE_MASK PAGE_MASK
857 #endif
858 
859 static size_t select_bpf_prog_pack_size(void)
860 {
861 	size_t size;
862 	void *ptr;
863 
864 	size = BPF_HPAGE_SIZE * num_online_nodes();
865 	ptr = module_alloc(size);
866 
867 	/* Test whether we can get huge pages. If not just use PAGE_SIZE
868 	 * packs.
869 	 */
870 	if (!ptr || !is_vm_area_hugepages(ptr)) {
871 		size = PAGE_SIZE;
872 		bpf_prog_pack_mask = PAGE_MASK;
873 	} else {
874 		bpf_prog_pack_mask = BPF_HPAGE_MASK;
875 	}
876 
877 	vfree(ptr);
878 	return size;
879 }
880 
881 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
882 {
883 	struct bpf_prog_pack *pack;
884 
885 	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
886 		       GFP_KERNEL);
887 	if (!pack)
888 		return NULL;
889 	pack->ptr = module_alloc(bpf_prog_pack_size);
890 	if (!pack->ptr) {
891 		kfree(pack);
892 		return NULL;
893 	}
894 	bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
895 	bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
896 	list_add_tail(&pack->list, &pack_list);
897 
898 	set_vm_flush_reset_perms(pack->ptr);
899 	set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
900 	set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
901 	return pack;
902 }
903 
904 static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
905 {
906 	unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
907 	struct bpf_prog_pack *pack;
908 	unsigned long pos;
909 	void *ptr = NULL;
910 
911 	mutex_lock(&pack_mutex);
912 	if (bpf_prog_pack_size == -1)
913 		bpf_prog_pack_size = select_bpf_prog_pack_size();
914 
915 	if (size > bpf_prog_pack_size) {
916 		size = round_up(size, PAGE_SIZE);
917 		ptr = module_alloc(size);
918 		if (ptr) {
919 			bpf_fill_ill_insns(ptr, size);
920 			set_vm_flush_reset_perms(ptr);
921 			set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
922 			set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
923 		}
924 		goto out;
925 	}
926 	list_for_each_entry(pack, &pack_list, list) {
927 		pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
928 						 nbits, 0);
929 		if (pos < bpf_prog_chunk_count())
930 			goto found_free_area;
931 	}
932 
933 	pack = alloc_new_pack(bpf_fill_ill_insns);
934 	if (!pack)
935 		goto out;
936 
937 	pos = 0;
938 
939 found_free_area:
940 	bitmap_set(pack->bitmap, pos, nbits);
941 	ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
942 
943 out:
944 	mutex_unlock(&pack_mutex);
945 	return ptr;
946 }
947 
948 static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
949 {
950 	struct bpf_prog_pack *pack = NULL, *tmp;
951 	unsigned int nbits;
952 	unsigned long pos;
953 	void *pack_ptr;
954 
955 	mutex_lock(&pack_mutex);
956 	if (hdr->size > bpf_prog_pack_size) {
957 		module_memfree(hdr);
958 		goto out;
959 	}
960 
961 	pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask);
962 
963 	list_for_each_entry(tmp, &pack_list, list) {
964 		if (tmp->ptr == pack_ptr) {
965 			pack = tmp;
966 			break;
967 		}
968 	}
969 
970 	if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
971 		goto out;
972 
973 	nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
974 	pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
975 
976 	WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
977 		  "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
978 
979 	bitmap_clear(pack->bitmap, pos, nbits);
980 	if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
981 				       bpf_prog_chunk_count(), 0) == 0) {
982 		list_del(&pack->list);
983 		module_memfree(pack->ptr);
984 		kfree(pack);
985 	}
986 out:
987 	mutex_unlock(&pack_mutex);
988 }
989 
990 static atomic_long_t bpf_jit_current;
991 
992 /* Can be overridden by an arch's JIT compiler if it has a custom,
993  * dedicated BPF backend memory area, or if neither of the two
994  * below apply.
995  */
996 u64 __weak bpf_jit_alloc_exec_limit(void)
997 {
998 #if defined(MODULES_VADDR)
999 	return MODULES_END - MODULES_VADDR;
1000 #else
1001 	return VMALLOC_END - VMALLOC_START;
1002 #endif
1003 }
1004 
1005 static int __init bpf_jit_charge_init(void)
1006 {
1007 	/* Only used as heuristic here to derive limit. */
1008 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1009 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
1010 					    PAGE_SIZE), LONG_MAX);
1011 	return 0;
1012 }
1013 pure_initcall(bpf_jit_charge_init);
1014 
1015 int bpf_jit_charge_modmem(u32 size)
1016 {
1017 	if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
1018 		if (!bpf_capable()) {
1019 			atomic_long_sub(size, &bpf_jit_current);
1020 			return -EPERM;
1021 		}
1022 	}
1023 
1024 	return 0;
1025 }
1026 
1027 void bpf_jit_uncharge_modmem(u32 size)
1028 {
1029 	atomic_long_sub(size, &bpf_jit_current);
1030 }
1031 
1032 void *__weak bpf_jit_alloc_exec(unsigned long size)
1033 {
1034 	return module_alloc(size);
1035 }
1036 
1037 void __weak bpf_jit_free_exec(void *addr)
1038 {
1039 	module_memfree(addr);
1040 }
1041 
1042 struct bpf_binary_header *
1043 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1044 		     unsigned int alignment,
1045 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
1046 {
1047 	struct bpf_binary_header *hdr;
1048 	u32 size, hole, start;
1049 
1050 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1051 		     alignment > BPF_IMAGE_ALIGNMENT);
1052 
1053 	/* Most of BPF filters are really small, but if some of them
1054 	 * fill a page, allow at least 128 extra bytes to insert a
1055 	 * random section of illegal instructions.
1056 	 */
1057 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1058 
1059 	if (bpf_jit_charge_modmem(size))
1060 		return NULL;
1061 	hdr = bpf_jit_alloc_exec(size);
1062 	if (!hdr) {
1063 		bpf_jit_uncharge_modmem(size);
1064 		return NULL;
1065 	}
1066 
1067 	/* Fill space with illegal/arch-dep instructions. */
1068 	bpf_fill_ill_insns(hdr, size);
1069 
1070 	hdr->size = size;
1071 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1072 		     PAGE_SIZE - sizeof(*hdr));
1073 	start = (get_random_int() % hole) & ~(alignment - 1);
1074 
1075 	/* Leave a random number of instructions before BPF code. */
1076 	*image_ptr = &hdr->image[start];
1077 
1078 	return hdr;
1079 }
1080 
1081 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1082 {
1083 	u32 size = hdr->size;
1084 
1085 	bpf_jit_free_exec(hdr);
1086 	bpf_jit_uncharge_modmem(size);
1087 }
1088 
1089 /* Allocate jit binary from bpf_prog_pack allocator.
1090  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1091  * to the memory. To solve this problem, a RW buffer is also allocated at
1092  * as the same time. The JIT engine should calculate offsets based on the
1093  * RO memory address, but write JITed program to the RW buffer. Once the
1094  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1095  * the JITed program to the RO memory.
1096  */
1097 struct bpf_binary_header *
1098 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1099 			  unsigned int alignment,
1100 			  struct bpf_binary_header **rw_header,
1101 			  u8 **rw_image,
1102 			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
1103 {
1104 	struct bpf_binary_header *ro_header;
1105 	u32 size, hole, start;
1106 
1107 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1108 		     alignment > BPF_IMAGE_ALIGNMENT);
1109 
1110 	/* add 16 bytes for a random section of illegal instructions */
1111 	size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1112 
1113 	if (bpf_jit_charge_modmem(size))
1114 		return NULL;
1115 	ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1116 	if (!ro_header) {
1117 		bpf_jit_uncharge_modmem(size);
1118 		return NULL;
1119 	}
1120 
1121 	*rw_header = kvmalloc(size, GFP_KERNEL);
1122 	if (!*rw_header) {
1123 		bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1124 		bpf_prog_pack_free(ro_header);
1125 		bpf_jit_uncharge_modmem(size);
1126 		return NULL;
1127 	}
1128 
1129 	/* Fill space with illegal/arch-dep instructions. */
1130 	bpf_fill_ill_insns(*rw_header, size);
1131 	(*rw_header)->size = size;
1132 
1133 	hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1134 		     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1135 	start = (get_random_int() % hole) & ~(alignment - 1);
1136 
1137 	*image_ptr = &ro_header->image[start];
1138 	*rw_image = &(*rw_header)->image[start];
1139 
1140 	return ro_header;
1141 }
1142 
1143 /* Copy JITed text from rw_header to its final location, the ro_header. */
1144 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1145 				 struct bpf_binary_header *ro_header,
1146 				 struct bpf_binary_header *rw_header)
1147 {
1148 	void *ptr;
1149 
1150 	ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1151 
1152 	kvfree(rw_header);
1153 
1154 	if (IS_ERR(ptr)) {
1155 		bpf_prog_pack_free(ro_header);
1156 		return PTR_ERR(ptr);
1157 	}
1158 	prog->aux->use_bpf_prog_pack = true;
1159 	return 0;
1160 }
1161 
1162 /* bpf_jit_binary_pack_free is called in two different scenarios:
1163  *   1) when the program is freed after;
1164  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1165  * For case 2), we need to free both the RO memory and the RW buffer.
1166  *
1167  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1168  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1169  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1170  * bpf_arch_text_copy (when jit fails).
1171  */
1172 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1173 			      struct bpf_binary_header *rw_header)
1174 {
1175 	u32 size = ro_header->size;
1176 
1177 	bpf_prog_pack_free(ro_header);
1178 	kvfree(rw_header);
1179 	bpf_jit_uncharge_modmem(size);
1180 }
1181 
1182 static inline struct bpf_binary_header *
1183 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1184 {
1185 	unsigned long real_start = (unsigned long)fp->bpf_func;
1186 	unsigned long addr;
1187 
1188 	if (fp->aux->use_bpf_prog_pack)
1189 		addr = real_start & BPF_PROG_CHUNK_MASK;
1190 	else
1191 		addr = real_start & PAGE_MASK;
1192 
1193 	return (void *)addr;
1194 }
1195 
1196 /* This symbol is only overridden by archs that have different
1197  * requirements than the usual eBPF JITs, f.e. when they only
1198  * implement cBPF JIT, do not set images read-only, etc.
1199  */
1200 void __weak bpf_jit_free(struct bpf_prog *fp)
1201 {
1202 	if (fp->jited) {
1203 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1204 
1205 		if (fp->aux->use_bpf_prog_pack)
1206 			bpf_jit_binary_pack_free(hdr, NULL /* rw_buffer */);
1207 		else
1208 			bpf_jit_binary_free(hdr);
1209 
1210 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1211 	}
1212 
1213 	bpf_prog_unlock_free(fp);
1214 }
1215 
1216 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1217 			  const struct bpf_insn *insn, bool extra_pass,
1218 			  u64 *func_addr, bool *func_addr_fixed)
1219 {
1220 	s16 off = insn->off;
1221 	s32 imm = insn->imm;
1222 	u8 *addr;
1223 
1224 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1225 	if (!*func_addr_fixed) {
1226 		/* Place-holder address till the last pass has collected
1227 		 * all addresses for JITed subprograms in which case we
1228 		 * can pick them up from prog->aux.
1229 		 */
1230 		if (!extra_pass)
1231 			addr = NULL;
1232 		else if (prog->aux->func &&
1233 			 off >= 0 && off < prog->aux->func_cnt)
1234 			addr = (u8 *)prog->aux->func[off]->bpf_func;
1235 		else
1236 			return -EINVAL;
1237 	} else {
1238 		/* Address of a BPF helper call. Since part of the core
1239 		 * kernel, it's always at a fixed location. __bpf_call_base
1240 		 * and the helper with imm relative to it are both in core
1241 		 * kernel.
1242 		 */
1243 		addr = (u8 *)__bpf_call_base + imm;
1244 	}
1245 
1246 	*func_addr = (unsigned long)addr;
1247 	return 0;
1248 }
1249 
1250 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1251 			      const struct bpf_insn *aux,
1252 			      struct bpf_insn *to_buff,
1253 			      bool emit_zext)
1254 {
1255 	struct bpf_insn *to = to_buff;
1256 	u32 imm_rnd = get_random_int();
1257 	s16 off;
1258 
1259 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1260 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1261 
1262 	/* Constraints on AX register:
1263 	 *
1264 	 * AX register is inaccessible from user space. It is mapped in
1265 	 * all JITs, and used here for constant blinding rewrites. It is
1266 	 * typically "stateless" meaning its contents are only valid within
1267 	 * the executed instruction, but not across several instructions.
1268 	 * There are a few exceptions however which are further detailed
1269 	 * below.
1270 	 *
1271 	 * Constant blinding is only used by JITs, not in the interpreter.
1272 	 * The interpreter uses AX in some occasions as a local temporary
1273 	 * register e.g. in DIV or MOD instructions.
1274 	 *
1275 	 * In restricted circumstances, the verifier can also use the AX
1276 	 * register for rewrites as long as they do not interfere with
1277 	 * the above cases!
1278 	 */
1279 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1280 		goto out;
1281 
1282 	if (from->imm == 0 &&
1283 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1284 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1285 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1286 		goto out;
1287 	}
1288 
1289 	switch (from->code) {
1290 	case BPF_ALU | BPF_ADD | BPF_K:
1291 	case BPF_ALU | BPF_SUB | BPF_K:
1292 	case BPF_ALU | BPF_AND | BPF_K:
1293 	case BPF_ALU | BPF_OR  | BPF_K:
1294 	case BPF_ALU | BPF_XOR | BPF_K:
1295 	case BPF_ALU | BPF_MUL | BPF_K:
1296 	case BPF_ALU | BPF_MOV | BPF_K:
1297 	case BPF_ALU | BPF_DIV | BPF_K:
1298 	case BPF_ALU | BPF_MOD | BPF_K:
1299 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1300 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1301 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1302 		break;
1303 
1304 	case BPF_ALU64 | BPF_ADD | BPF_K:
1305 	case BPF_ALU64 | BPF_SUB | BPF_K:
1306 	case BPF_ALU64 | BPF_AND | BPF_K:
1307 	case BPF_ALU64 | BPF_OR  | BPF_K:
1308 	case BPF_ALU64 | BPF_XOR | BPF_K:
1309 	case BPF_ALU64 | BPF_MUL | BPF_K:
1310 	case BPF_ALU64 | BPF_MOV | BPF_K:
1311 	case BPF_ALU64 | BPF_DIV | BPF_K:
1312 	case BPF_ALU64 | BPF_MOD | BPF_K:
1313 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1314 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1315 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1316 		break;
1317 
1318 	case BPF_JMP | BPF_JEQ  | BPF_K:
1319 	case BPF_JMP | BPF_JNE  | BPF_K:
1320 	case BPF_JMP | BPF_JGT  | BPF_K:
1321 	case BPF_JMP | BPF_JLT  | BPF_K:
1322 	case BPF_JMP | BPF_JGE  | BPF_K:
1323 	case BPF_JMP | BPF_JLE  | BPF_K:
1324 	case BPF_JMP | BPF_JSGT | BPF_K:
1325 	case BPF_JMP | BPF_JSLT | BPF_K:
1326 	case BPF_JMP | BPF_JSGE | BPF_K:
1327 	case BPF_JMP | BPF_JSLE | BPF_K:
1328 	case BPF_JMP | BPF_JSET | BPF_K:
1329 		/* Accommodate for extra offset in case of a backjump. */
1330 		off = from->off;
1331 		if (off < 0)
1332 			off -= 2;
1333 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1334 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1335 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1336 		break;
1337 
1338 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1339 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1340 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1341 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1342 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1343 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1344 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1345 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1346 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1347 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1348 	case BPF_JMP32 | BPF_JSET | BPF_K:
1349 		/* Accommodate for extra offset in case of a backjump. */
1350 		off = from->off;
1351 		if (off < 0)
1352 			off -= 2;
1353 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1354 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1355 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1356 				      off);
1357 		break;
1358 
1359 	case BPF_LD | BPF_IMM | BPF_DW:
1360 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1361 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1362 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1363 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1364 		break;
1365 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1366 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1367 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1368 		if (emit_zext)
1369 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1370 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1371 		break;
1372 
1373 	case BPF_ST | BPF_MEM | BPF_DW:
1374 	case BPF_ST | BPF_MEM | BPF_W:
1375 	case BPF_ST | BPF_MEM | BPF_H:
1376 	case BPF_ST | BPF_MEM | BPF_B:
1377 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1378 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1379 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1380 		break;
1381 	}
1382 out:
1383 	return to - to_buff;
1384 }
1385 
1386 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1387 					      gfp_t gfp_extra_flags)
1388 {
1389 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1390 	struct bpf_prog *fp;
1391 
1392 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1393 	if (fp != NULL) {
1394 		/* aux->prog still points to the fp_other one, so
1395 		 * when promoting the clone to the real program,
1396 		 * this still needs to be adapted.
1397 		 */
1398 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1399 	}
1400 
1401 	return fp;
1402 }
1403 
1404 static void bpf_prog_clone_free(struct bpf_prog *fp)
1405 {
1406 	/* aux was stolen by the other clone, so we cannot free
1407 	 * it from this path! It will be freed eventually by the
1408 	 * other program on release.
1409 	 *
1410 	 * At this point, we don't need a deferred release since
1411 	 * clone is guaranteed to not be locked.
1412 	 */
1413 	fp->aux = NULL;
1414 	fp->stats = NULL;
1415 	fp->active = NULL;
1416 	__bpf_prog_free(fp);
1417 }
1418 
1419 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1420 {
1421 	/* We have to repoint aux->prog to self, as we don't
1422 	 * know whether fp here is the clone or the original.
1423 	 */
1424 	fp->aux->prog = fp;
1425 	bpf_prog_clone_free(fp_other);
1426 }
1427 
1428 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1429 {
1430 	struct bpf_insn insn_buff[16], aux[2];
1431 	struct bpf_prog *clone, *tmp;
1432 	int insn_delta, insn_cnt;
1433 	struct bpf_insn *insn;
1434 	int i, rewritten;
1435 
1436 	if (!prog->blinding_requested || prog->blinded)
1437 		return prog;
1438 
1439 	clone = bpf_prog_clone_create(prog, GFP_USER);
1440 	if (!clone)
1441 		return ERR_PTR(-ENOMEM);
1442 
1443 	insn_cnt = clone->len;
1444 	insn = clone->insnsi;
1445 
1446 	for (i = 0; i < insn_cnt; i++, insn++) {
1447 		if (bpf_pseudo_func(insn)) {
1448 			/* ld_imm64 with an address of bpf subprog is not
1449 			 * a user controlled constant. Don't randomize it,
1450 			 * since it will conflict with jit_subprogs() logic.
1451 			 */
1452 			insn++;
1453 			i++;
1454 			continue;
1455 		}
1456 
1457 		/* We temporarily need to hold the original ld64 insn
1458 		 * so that we can still access the first part in the
1459 		 * second blinding run.
1460 		 */
1461 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1462 		    insn[1].code == 0)
1463 			memcpy(aux, insn, sizeof(aux));
1464 
1465 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1466 						clone->aux->verifier_zext);
1467 		if (!rewritten)
1468 			continue;
1469 
1470 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1471 		if (IS_ERR(tmp)) {
1472 			/* Patching may have repointed aux->prog during
1473 			 * realloc from the original one, so we need to
1474 			 * fix it up here on error.
1475 			 */
1476 			bpf_jit_prog_release_other(prog, clone);
1477 			return tmp;
1478 		}
1479 
1480 		clone = tmp;
1481 		insn_delta = rewritten - 1;
1482 
1483 		/* Walk new program and skip insns we just inserted. */
1484 		insn = clone->insnsi + i + insn_delta;
1485 		insn_cnt += insn_delta;
1486 		i        += insn_delta;
1487 	}
1488 
1489 	clone->blinded = 1;
1490 	return clone;
1491 }
1492 #endif /* CONFIG_BPF_JIT */
1493 
1494 /* Base function for offset calculation. Needs to go into .text section,
1495  * therefore keeping it non-static as well; will also be used by JITs
1496  * anyway later on, so do not let the compiler omit it. This also needs
1497  * to go into kallsyms for correlation from e.g. bpftool, so naming
1498  * must not change.
1499  */
1500 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1501 {
1502 	return 0;
1503 }
1504 EXPORT_SYMBOL_GPL(__bpf_call_base);
1505 
1506 /* All UAPI available opcodes. */
1507 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1508 	/* 32 bit ALU operations. */		\
1509 	/*   Register based. */			\
1510 	INSN_3(ALU, ADD,  X),			\
1511 	INSN_3(ALU, SUB,  X),			\
1512 	INSN_3(ALU, AND,  X),			\
1513 	INSN_3(ALU, OR,   X),			\
1514 	INSN_3(ALU, LSH,  X),			\
1515 	INSN_3(ALU, RSH,  X),			\
1516 	INSN_3(ALU, XOR,  X),			\
1517 	INSN_3(ALU, MUL,  X),			\
1518 	INSN_3(ALU, MOV,  X),			\
1519 	INSN_3(ALU, ARSH, X),			\
1520 	INSN_3(ALU, DIV,  X),			\
1521 	INSN_3(ALU, MOD,  X),			\
1522 	INSN_2(ALU, NEG),			\
1523 	INSN_3(ALU, END, TO_BE),		\
1524 	INSN_3(ALU, END, TO_LE),		\
1525 	/*   Immediate based. */		\
1526 	INSN_3(ALU, ADD,  K),			\
1527 	INSN_3(ALU, SUB,  K),			\
1528 	INSN_3(ALU, AND,  K),			\
1529 	INSN_3(ALU, OR,   K),			\
1530 	INSN_3(ALU, LSH,  K),			\
1531 	INSN_3(ALU, RSH,  K),			\
1532 	INSN_3(ALU, XOR,  K),			\
1533 	INSN_3(ALU, MUL,  K),			\
1534 	INSN_3(ALU, MOV,  K),			\
1535 	INSN_3(ALU, ARSH, K),			\
1536 	INSN_3(ALU, DIV,  K),			\
1537 	INSN_3(ALU, MOD,  K),			\
1538 	/* 64 bit ALU operations. */		\
1539 	/*   Register based. */			\
1540 	INSN_3(ALU64, ADD,  X),			\
1541 	INSN_3(ALU64, SUB,  X),			\
1542 	INSN_3(ALU64, AND,  X),			\
1543 	INSN_3(ALU64, OR,   X),			\
1544 	INSN_3(ALU64, LSH,  X),			\
1545 	INSN_3(ALU64, RSH,  X),			\
1546 	INSN_3(ALU64, XOR,  X),			\
1547 	INSN_3(ALU64, MUL,  X),			\
1548 	INSN_3(ALU64, MOV,  X),			\
1549 	INSN_3(ALU64, ARSH, X),			\
1550 	INSN_3(ALU64, DIV,  X),			\
1551 	INSN_3(ALU64, MOD,  X),			\
1552 	INSN_2(ALU64, NEG),			\
1553 	/*   Immediate based. */		\
1554 	INSN_3(ALU64, ADD,  K),			\
1555 	INSN_3(ALU64, SUB,  K),			\
1556 	INSN_3(ALU64, AND,  K),			\
1557 	INSN_3(ALU64, OR,   K),			\
1558 	INSN_3(ALU64, LSH,  K),			\
1559 	INSN_3(ALU64, RSH,  K),			\
1560 	INSN_3(ALU64, XOR,  K),			\
1561 	INSN_3(ALU64, MUL,  K),			\
1562 	INSN_3(ALU64, MOV,  K),			\
1563 	INSN_3(ALU64, ARSH, K),			\
1564 	INSN_3(ALU64, DIV,  K),			\
1565 	INSN_3(ALU64, MOD,  K),			\
1566 	/* Call instruction. */			\
1567 	INSN_2(JMP, CALL),			\
1568 	/* Exit instruction. */			\
1569 	INSN_2(JMP, EXIT),			\
1570 	/* 32-bit Jump instructions. */		\
1571 	/*   Register based. */			\
1572 	INSN_3(JMP32, JEQ,  X),			\
1573 	INSN_3(JMP32, JNE,  X),			\
1574 	INSN_3(JMP32, JGT,  X),			\
1575 	INSN_3(JMP32, JLT,  X),			\
1576 	INSN_3(JMP32, JGE,  X),			\
1577 	INSN_3(JMP32, JLE,  X),			\
1578 	INSN_3(JMP32, JSGT, X),			\
1579 	INSN_3(JMP32, JSLT, X),			\
1580 	INSN_3(JMP32, JSGE, X),			\
1581 	INSN_3(JMP32, JSLE, X),			\
1582 	INSN_3(JMP32, JSET, X),			\
1583 	/*   Immediate based. */		\
1584 	INSN_3(JMP32, JEQ,  K),			\
1585 	INSN_3(JMP32, JNE,  K),			\
1586 	INSN_3(JMP32, JGT,  K),			\
1587 	INSN_3(JMP32, JLT,  K),			\
1588 	INSN_3(JMP32, JGE,  K),			\
1589 	INSN_3(JMP32, JLE,  K),			\
1590 	INSN_3(JMP32, JSGT, K),			\
1591 	INSN_3(JMP32, JSLT, K),			\
1592 	INSN_3(JMP32, JSGE, K),			\
1593 	INSN_3(JMP32, JSLE, K),			\
1594 	INSN_3(JMP32, JSET, K),			\
1595 	/* Jump instructions. */		\
1596 	/*   Register based. */			\
1597 	INSN_3(JMP, JEQ,  X),			\
1598 	INSN_3(JMP, JNE,  X),			\
1599 	INSN_3(JMP, JGT,  X),			\
1600 	INSN_3(JMP, JLT,  X),			\
1601 	INSN_3(JMP, JGE,  X),			\
1602 	INSN_3(JMP, JLE,  X),			\
1603 	INSN_3(JMP, JSGT, X),			\
1604 	INSN_3(JMP, JSLT, X),			\
1605 	INSN_3(JMP, JSGE, X),			\
1606 	INSN_3(JMP, JSLE, X),			\
1607 	INSN_3(JMP, JSET, X),			\
1608 	/*   Immediate based. */		\
1609 	INSN_3(JMP, JEQ,  K),			\
1610 	INSN_3(JMP, JNE,  K),			\
1611 	INSN_3(JMP, JGT,  K),			\
1612 	INSN_3(JMP, JLT,  K),			\
1613 	INSN_3(JMP, JGE,  K),			\
1614 	INSN_3(JMP, JLE,  K),			\
1615 	INSN_3(JMP, JSGT, K),			\
1616 	INSN_3(JMP, JSLT, K),			\
1617 	INSN_3(JMP, JSGE, K),			\
1618 	INSN_3(JMP, JSLE, K),			\
1619 	INSN_3(JMP, JSET, K),			\
1620 	INSN_2(JMP, JA),			\
1621 	/* Store instructions. */		\
1622 	/*   Register based. */			\
1623 	INSN_3(STX, MEM,  B),			\
1624 	INSN_3(STX, MEM,  H),			\
1625 	INSN_3(STX, MEM,  W),			\
1626 	INSN_3(STX, MEM,  DW),			\
1627 	INSN_3(STX, ATOMIC, W),			\
1628 	INSN_3(STX, ATOMIC, DW),		\
1629 	/*   Immediate based. */		\
1630 	INSN_3(ST, MEM, B),			\
1631 	INSN_3(ST, MEM, H),			\
1632 	INSN_3(ST, MEM, W),			\
1633 	INSN_3(ST, MEM, DW),			\
1634 	/* Load instructions. */		\
1635 	/*   Register based. */			\
1636 	INSN_3(LDX, MEM, B),			\
1637 	INSN_3(LDX, MEM, H),			\
1638 	INSN_3(LDX, MEM, W),			\
1639 	INSN_3(LDX, MEM, DW),			\
1640 	/*   Immediate based. */		\
1641 	INSN_3(LD, IMM, DW)
1642 
1643 bool bpf_opcode_in_insntable(u8 code)
1644 {
1645 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1646 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1647 	static const bool public_insntable[256] = {
1648 		[0 ... 255] = false,
1649 		/* Now overwrite non-defaults ... */
1650 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1651 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1652 		[BPF_LD | BPF_ABS | BPF_B] = true,
1653 		[BPF_LD | BPF_ABS | BPF_H] = true,
1654 		[BPF_LD | BPF_ABS | BPF_W] = true,
1655 		[BPF_LD | BPF_IND | BPF_B] = true,
1656 		[BPF_LD | BPF_IND | BPF_H] = true,
1657 		[BPF_LD | BPF_IND | BPF_W] = true,
1658 	};
1659 #undef BPF_INSN_3_TBL
1660 #undef BPF_INSN_2_TBL
1661 	return public_insntable[code];
1662 }
1663 
1664 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1665 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1666 {
1667 	memset(dst, 0, size);
1668 	return -EFAULT;
1669 }
1670 
1671 /**
1672  *	___bpf_prog_run - run eBPF program on a given context
1673  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1674  *	@insn: is the array of eBPF instructions
1675  *
1676  * Decode and execute eBPF instructions.
1677  *
1678  * Return: whatever value is in %BPF_R0 at program exit
1679  */
1680 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1681 {
1682 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1683 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1684 	static const void * const jumptable[256] __annotate_jump_table = {
1685 		[0 ... 255] = &&default_label,
1686 		/* Now overwrite non-defaults ... */
1687 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1688 		/* Non-UAPI available opcodes. */
1689 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1690 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1691 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1692 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1693 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1694 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1695 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1696 	};
1697 #undef BPF_INSN_3_LBL
1698 #undef BPF_INSN_2_LBL
1699 	u32 tail_call_cnt = 0;
1700 
1701 #define CONT	 ({ insn++; goto select_insn; })
1702 #define CONT_JMP ({ insn++; goto select_insn; })
1703 
1704 select_insn:
1705 	goto *jumptable[insn->code];
1706 
1707 	/* Explicitly mask the register-based shift amounts with 63 or 31
1708 	 * to avoid undefined behavior. Normally this won't affect the
1709 	 * generated code, for example, in case of native 64 bit archs such
1710 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1711 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1712 	 * the BPF shift operations to machine instructions which produce
1713 	 * implementation-defined results in such a case; the resulting
1714 	 * contents of the register may be arbitrary, but program behaviour
1715 	 * as a whole remains defined. In other words, in case of JIT backends,
1716 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1717 	 */
1718 	/* ALU (shifts) */
1719 #define SHT(OPCODE, OP)					\
1720 	ALU64_##OPCODE##_X:				\
1721 		DST = DST OP (SRC & 63);		\
1722 		CONT;					\
1723 	ALU_##OPCODE##_X:				\
1724 		DST = (u32) DST OP ((u32) SRC & 31);	\
1725 		CONT;					\
1726 	ALU64_##OPCODE##_K:				\
1727 		DST = DST OP IMM;			\
1728 		CONT;					\
1729 	ALU_##OPCODE##_K:				\
1730 		DST = (u32) DST OP (u32) IMM;		\
1731 		CONT;
1732 	/* ALU (rest) */
1733 #define ALU(OPCODE, OP)					\
1734 	ALU64_##OPCODE##_X:				\
1735 		DST = DST OP SRC;			\
1736 		CONT;					\
1737 	ALU_##OPCODE##_X:				\
1738 		DST = (u32) DST OP (u32) SRC;		\
1739 		CONT;					\
1740 	ALU64_##OPCODE##_K:				\
1741 		DST = DST OP IMM;			\
1742 		CONT;					\
1743 	ALU_##OPCODE##_K:				\
1744 		DST = (u32) DST OP (u32) IMM;		\
1745 		CONT;
1746 	ALU(ADD,  +)
1747 	ALU(SUB,  -)
1748 	ALU(AND,  &)
1749 	ALU(OR,   |)
1750 	ALU(XOR,  ^)
1751 	ALU(MUL,  *)
1752 	SHT(LSH, <<)
1753 	SHT(RSH, >>)
1754 #undef SHT
1755 #undef ALU
1756 	ALU_NEG:
1757 		DST = (u32) -DST;
1758 		CONT;
1759 	ALU64_NEG:
1760 		DST = -DST;
1761 		CONT;
1762 	ALU_MOV_X:
1763 		DST = (u32) SRC;
1764 		CONT;
1765 	ALU_MOV_K:
1766 		DST = (u32) IMM;
1767 		CONT;
1768 	ALU64_MOV_X:
1769 		DST = SRC;
1770 		CONT;
1771 	ALU64_MOV_K:
1772 		DST = IMM;
1773 		CONT;
1774 	LD_IMM_DW:
1775 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1776 		insn++;
1777 		CONT;
1778 	ALU_ARSH_X:
1779 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1780 		CONT;
1781 	ALU_ARSH_K:
1782 		DST = (u64) (u32) (((s32) DST) >> IMM);
1783 		CONT;
1784 	ALU64_ARSH_X:
1785 		(*(s64 *) &DST) >>= (SRC & 63);
1786 		CONT;
1787 	ALU64_ARSH_K:
1788 		(*(s64 *) &DST) >>= IMM;
1789 		CONT;
1790 	ALU64_MOD_X:
1791 		div64_u64_rem(DST, SRC, &AX);
1792 		DST = AX;
1793 		CONT;
1794 	ALU_MOD_X:
1795 		AX = (u32) DST;
1796 		DST = do_div(AX, (u32) SRC);
1797 		CONT;
1798 	ALU64_MOD_K:
1799 		div64_u64_rem(DST, IMM, &AX);
1800 		DST = AX;
1801 		CONT;
1802 	ALU_MOD_K:
1803 		AX = (u32) DST;
1804 		DST = do_div(AX, (u32) IMM);
1805 		CONT;
1806 	ALU64_DIV_X:
1807 		DST = div64_u64(DST, SRC);
1808 		CONT;
1809 	ALU_DIV_X:
1810 		AX = (u32) DST;
1811 		do_div(AX, (u32) SRC);
1812 		DST = (u32) AX;
1813 		CONT;
1814 	ALU64_DIV_K:
1815 		DST = div64_u64(DST, IMM);
1816 		CONT;
1817 	ALU_DIV_K:
1818 		AX = (u32) DST;
1819 		do_div(AX, (u32) IMM);
1820 		DST = (u32) AX;
1821 		CONT;
1822 	ALU_END_TO_BE:
1823 		switch (IMM) {
1824 		case 16:
1825 			DST = (__force u16) cpu_to_be16(DST);
1826 			break;
1827 		case 32:
1828 			DST = (__force u32) cpu_to_be32(DST);
1829 			break;
1830 		case 64:
1831 			DST = (__force u64) cpu_to_be64(DST);
1832 			break;
1833 		}
1834 		CONT;
1835 	ALU_END_TO_LE:
1836 		switch (IMM) {
1837 		case 16:
1838 			DST = (__force u16) cpu_to_le16(DST);
1839 			break;
1840 		case 32:
1841 			DST = (__force u32) cpu_to_le32(DST);
1842 			break;
1843 		case 64:
1844 			DST = (__force u64) cpu_to_le64(DST);
1845 			break;
1846 		}
1847 		CONT;
1848 
1849 	/* CALL */
1850 	JMP_CALL:
1851 		/* Function call scratches BPF_R1-BPF_R5 registers,
1852 		 * preserves BPF_R6-BPF_R9, and stores return value
1853 		 * into BPF_R0.
1854 		 */
1855 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1856 						       BPF_R4, BPF_R5);
1857 		CONT;
1858 
1859 	JMP_CALL_ARGS:
1860 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1861 							    BPF_R3, BPF_R4,
1862 							    BPF_R5,
1863 							    insn + insn->off + 1);
1864 		CONT;
1865 
1866 	JMP_TAIL_CALL: {
1867 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1868 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1869 		struct bpf_prog *prog;
1870 		u32 index = BPF_R3;
1871 
1872 		if (unlikely(index >= array->map.max_entries))
1873 			goto out;
1874 
1875 		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1876 			goto out;
1877 
1878 		tail_call_cnt++;
1879 
1880 		prog = READ_ONCE(array->ptrs[index]);
1881 		if (!prog)
1882 			goto out;
1883 
1884 		/* ARG1 at this point is guaranteed to point to CTX from
1885 		 * the verifier side due to the fact that the tail call is
1886 		 * handled like a helper, that is, bpf_tail_call_proto,
1887 		 * where arg1_type is ARG_PTR_TO_CTX.
1888 		 */
1889 		insn = prog->insnsi;
1890 		goto select_insn;
1891 out:
1892 		CONT;
1893 	}
1894 	JMP_JA:
1895 		insn += insn->off;
1896 		CONT;
1897 	JMP_EXIT:
1898 		return BPF_R0;
1899 	/* JMP */
1900 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1901 	JMP_##OPCODE##_X:					\
1902 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1903 			insn += insn->off;			\
1904 			CONT_JMP;				\
1905 		}						\
1906 		CONT;						\
1907 	JMP32_##OPCODE##_X:					\
1908 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1909 			insn += insn->off;			\
1910 			CONT_JMP;				\
1911 		}						\
1912 		CONT;						\
1913 	JMP_##OPCODE##_K:					\
1914 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1915 			insn += insn->off;			\
1916 			CONT_JMP;				\
1917 		}						\
1918 		CONT;						\
1919 	JMP32_##OPCODE##_K:					\
1920 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1921 			insn += insn->off;			\
1922 			CONT_JMP;				\
1923 		}						\
1924 		CONT;
1925 	COND_JMP(u, JEQ, ==)
1926 	COND_JMP(u, JNE, !=)
1927 	COND_JMP(u, JGT, >)
1928 	COND_JMP(u, JLT, <)
1929 	COND_JMP(u, JGE, >=)
1930 	COND_JMP(u, JLE, <=)
1931 	COND_JMP(u, JSET, &)
1932 	COND_JMP(s, JSGT, >)
1933 	COND_JMP(s, JSLT, <)
1934 	COND_JMP(s, JSGE, >=)
1935 	COND_JMP(s, JSLE, <=)
1936 #undef COND_JMP
1937 	/* ST, STX and LDX*/
1938 	ST_NOSPEC:
1939 		/* Speculation barrier for mitigating Speculative Store Bypass.
1940 		 * In case of arm64, we rely on the firmware mitigation as
1941 		 * controlled via the ssbd kernel parameter. Whenever the
1942 		 * mitigation is enabled, it works for all of the kernel code
1943 		 * with no need to provide any additional instructions here.
1944 		 * In case of x86, we use 'lfence' insn for mitigation. We
1945 		 * reuse preexisting logic from Spectre v1 mitigation that
1946 		 * happens to produce the required code on x86 for v4 as well.
1947 		 */
1948 #ifdef CONFIG_X86
1949 		barrier_nospec();
1950 #endif
1951 		CONT;
1952 #define LDST(SIZEOP, SIZE)						\
1953 	STX_MEM_##SIZEOP:						\
1954 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1955 		CONT;							\
1956 	ST_MEM_##SIZEOP:						\
1957 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1958 		CONT;							\
1959 	LDX_MEM_##SIZEOP:						\
1960 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1961 		CONT;							\
1962 	LDX_PROBE_MEM_##SIZEOP:						\
1963 		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\
1964 				      (const void *)(long) (SRC + insn->off));	\
1965 		DST = *((SIZE *)&DST);					\
1966 		CONT;
1967 
1968 	LDST(B,   u8)
1969 	LDST(H,  u16)
1970 	LDST(W,  u32)
1971 	LDST(DW, u64)
1972 #undef LDST
1973 
1974 #define ATOMIC_ALU_OP(BOP, KOP)						\
1975 		case BOP:						\
1976 			if (BPF_SIZE(insn->code) == BPF_W)		\
1977 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1978 					     (DST + insn->off));	\
1979 			else						\
1980 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1981 					       (DST + insn->off));	\
1982 			break;						\
1983 		case BOP | BPF_FETCH:					\
1984 			if (BPF_SIZE(insn->code) == BPF_W)		\
1985 				SRC = (u32) atomic_fetch_##KOP(		\
1986 					(u32) SRC,			\
1987 					(atomic_t *)(unsigned long) (DST + insn->off)); \
1988 			else						\
1989 				SRC = (u64) atomic64_fetch_##KOP(	\
1990 					(u64) SRC,			\
1991 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
1992 			break;
1993 
1994 	STX_ATOMIC_DW:
1995 	STX_ATOMIC_W:
1996 		switch (IMM) {
1997 		ATOMIC_ALU_OP(BPF_ADD, add)
1998 		ATOMIC_ALU_OP(BPF_AND, and)
1999 		ATOMIC_ALU_OP(BPF_OR, or)
2000 		ATOMIC_ALU_OP(BPF_XOR, xor)
2001 #undef ATOMIC_ALU_OP
2002 
2003 		case BPF_XCHG:
2004 			if (BPF_SIZE(insn->code) == BPF_W)
2005 				SRC = (u32) atomic_xchg(
2006 					(atomic_t *)(unsigned long) (DST + insn->off),
2007 					(u32) SRC);
2008 			else
2009 				SRC = (u64) atomic64_xchg(
2010 					(atomic64_t *)(unsigned long) (DST + insn->off),
2011 					(u64) SRC);
2012 			break;
2013 		case BPF_CMPXCHG:
2014 			if (BPF_SIZE(insn->code) == BPF_W)
2015 				BPF_R0 = (u32) atomic_cmpxchg(
2016 					(atomic_t *)(unsigned long) (DST + insn->off),
2017 					(u32) BPF_R0, (u32) SRC);
2018 			else
2019 				BPF_R0 = (u64) atomic64_cmpxchg(
2020 					(atomic64_t *)(unsigned long) (DST + insn->off),
2021 					(u64) BPF_R0, (u64) SRC);
2022 			break;
2023 
2024 		default:
2025 			goto default_label;
2026 		}
2027 		CONT;
2028 
2029 	default_label:
2030 		/* If we ever reach this, we have a bug somewhere. Die hard here
2031 		 * instead of just returning 0; we could be somewhere in a subprog,
2032 		 * so execution could continue otherwise which we do /not/ want.
2033 		 *
2034 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2035 		 */
2036 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2037 			insn->code, insn->imm);
2038 		BUG_ON(1);
2039 		return 0;
2040 }
2041 
2042 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2043 #define DEFINE_BPF_PROG_RUN(stack_size) \
2044 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2045 { \
2046 	u64 stack[stack_size / sizeof(u64)]; \
2047 	u64 regs[MAX_BPF_EXT_REG]; \
2048 \
2049 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2050 	ARG1 = (u64) (unsigned long) ctx; \
2051 	return ___bpf_prog_run(regs, insn); \
2052 }
2053 
2054 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2055 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2056 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2057 				      const struct bpf_insn *insn) \
2058 { \
2059 	u64 stack[stack_size / sizeof(u64)]; \
2060 	u64 regs[MAX_BPF_EXT_REG]; \
2061 \
2062 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2063 	BPF_R1 = r1; \
2064 	BPF_R2 = r2; \
2065 	BPF_R3 = r3; \
2066 	BPF_R4 = r4; \
2067 	BPF_R5 = r5; \
2068 	return ___bpf_prog_run(regs, insn); \
2069 }
2070 
2071 #define EVAL1(FN, X) FN(X)
2072 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2073 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2074 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2075 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2076 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2077 
2078 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2079 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2080 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2081 
2082 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2083 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2084 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2085 
2086 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2087 
2088 static unsigned int (*interpreters[])(const void *ctx,
2089 				      const struct bpf_insn *insn) = {
2090 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2091 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2092 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2093 };
2094 #undef PROG_NAME_LIST
2095 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2096 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2097 				  const struct bpf_insn *insn) = {
2098 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2099 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2100 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2101 };
2102 #undef PROG_NAME_LIST
2103 
2104 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2105 {
2106 	stack_depth = max_t(u32, stack_depth, 1);
2107 	insn->off = (s16) insn->imm;
2108 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2109 		__bpf_call_base_args;
2110 	insn->code = BPF_JMP | BPF_CALL_ARGS;
2111 }
2112 
2113 #else
2114 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2115 					 const struct bpf_insn *insn)
2116 {
2117 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2118 	 * is not working properly, so warn about it!
2119 	 */
2120 	WARN_ON_ONCE(1);
2121 	return 0;
2122 }
2123 #endif
2124 
2125 bool bpf_prog_map_compatible(struct bpf_map *map,
2126 			     const struct bpf_prog *fp)
2127 {
2128 	bool ret;
2129 
2130 	if (fp->kprobe_override)
2131 		return false;
2132 
2133 	spin_lock(&map->owner.lock);
2134 	if (!map->owner.type) {
2135 		/* There's no owner yet where we could check for
2136 		 * compatibility.
2137 		 */
2138 		map->owner.type  = fp->type;
2139 		map->owner.jited = fp->jited;
2140 		map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2141 		ret = true;
2142 	} else {
2143 		ret = map->owner.type  == fp->type &&
2144 		      map->owner.jited == fp->jited &&
2145 		      map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2146 	}
2147 	spin_unlock(&map->owner.lock);
2148 
2149 	return ret;
2150 }
2151 
2152 static int bpf_check_tail_call(const struct bpf_prog *fp)
2153 {
2154 	struct bpf_prog_aux *aux = fp->aux;
2155 	int i, ret = 0;
2156 
2157 	mutex_lock(&aux->used_maps_mutex);
2158 	for (i = 0; i < aux->used_map_cnt; i++) {
2159 		struct bpf_map *map = aux->used_maps[i];
2160 
2161 		if (!map_type_contains_progs(map))
2162 			continue;
2163 
2164 		if (!bpf_prog_map_compatible(map, fp)) {
2165 			ret = -EINVAL;
2166 			goto out;
2167 		}
2168 	}
2169 
2170 out:
2171 	mutex_unlock(&aux->used_maps_mutex);
2172 	return ret;
2173 }
2174 
2175 static void bpf_prog_select_func(struct bpf_prog *fp)
2176 {
2177 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2178 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2179 
2180 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2181 #else
2182 	fp->bpf_func = __bpf_prog_ret0_warn;
2183 #endif
2184 }
2185 
2186 /**
2187  *	bpf_prog_select_runtime - select exec runtime for BPF program
2188  *	@fp: bpf_prog populated with BPF program
2189  *	@err: pointer to error variable
2190  *
2191  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2192  * The BPF program will be executed via bpf_prog_run() function.
2193  *
2194  * Return: the &fp argument along with &err set to 0 for success or
2195  * a negative errno code on failure
2196  */
2197 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2198 {
2199 	/* In case of BPF to BPF calls, verifier did all the prep
2200 	 * work with regards to JITing, etc.
2201 	 */
2202 	bool jit_needed = false;
2203 
2204 	if (fp->bpf_func)
2205 		goto finalize;
2206 
2207 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2208 	    bpf_prog_has_kfunc_call(fp))
2209 		jit_needed = true;
2210 
2211 	bpf_prog_select_func(fp);
2212 
2213 	/* eBPF JITs can rewrite the program in case constant
2214 	 * blinding is active. However, in case of error during
2215 	 * blinding, bpf_int_jit_compile() must always return a
2216 	 * valid program, which in this case would simply not
2217 	 * be JITed, but falls back to the interpreter.
2218 	 */
2219 	if (!bpf_prog_is_dev_bound(fp->aux)) {
2220 		*err = bpf_prog_alloc_jited_linfo(fp);
2221 		if (*err)
2222 			return fp;
2223 
2224 		fp = bpf_int_jit_compile(fp);
2225 		bpf_prog_jit_attempt_done(fp);
2226 		if (!fp->jited && jit_needed) {
2227 			*err = -ENOTSUPP;
2228 			return fp;
2229 		}
2230 	} else {
2231 		*err = bpf_prog_offload_compile(fp);
2232 		if (*err)
2233 			return fp;
2234 	}
2235 
2236 finalize:
2237 	bpf_prog_lock_ro(fp);
2238 
2239 	/* The tail call compatibility check can only be done at
2240 	 * this late stage as we need to determine, if we deal
2241 	 * with JITed or non JITed program concatenations and not
2242 	 * all eBPF JITs might immediately support all features.
2243 	 */
2244 	*err = bpf_check_tail_call(fp);
2245 
2246 	return fp;
2247 }
2248 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2249 
2250 static unsigned int __bpf_prog_ret1(const void *ctx,
2251 				    const struct bpf_insn *insn)
2252 {
2253 	return 1;
2254 }
2255 
2256 static struct bpf_prog_dummy {
2257 	struct bpf_prog prog;
2258 } dummy_bpf_prog = {
2259 	.prog = {
2260 		.bpf_func = __bpf_prog_ret1,
2261 	},
2262 };
2263 
2264 struct bpf_empty_prog_array bpf_empty_prog_array = {
2265 	.null_prog = NULL,
2266 };
2267 EXPORT_SYMBOL(bpf_empty_prog_array);
2268 
2269 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2270 {
2271 	if (prog_cnt)
2272 		return kzalloc(sizeof(struct bpf_prog_array) +
2273 			       sizeof(struct bpf_prog_array_item) *
2274 			       (prog_cnt + 1),
2275 			       flags);
2276 
2277 	return &bpf_empty_prog_array.hdr;
2278 }
2279 
2280 void bpf_prog_array_free(struct bpf_prog_array *progs)
2281 {
2282 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2283 		return;
2284 	kfree_rcu(progs, rcu);
2285 }
2286 
2287 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2288 {
2289 	struct bpf_prog_array *progs;
2290 
2291 	progs = container_of(rcu, struct bpf_prog_array, rcu);
2292 	kfree_rcu(progs, rcu);
2293 }
2294 
2295 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2296 {
2297 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2298 		return;
2299 	call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2300 }
2301 
2302 int bpf_prog_array_length(struct bpf_prog_array *array)
2303 {
2304 	struct bpf_prog_array_item *item;
2305 	u32 cnt = 0;
2306 
2307 	for (item = array->items; item->prog; item++)
2308 		if (item->prog != &dummy_bpf_prog.prog)
2309 			cnt++;
2310 	return cnt;
2311 }
2312 
2313 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2314 {
2315 	struct bpf_prog_array_item *item;
2316 
2317 	for (item = array->items; item->prog; item++)
2318 		if (item->prog != &dummy_bpf_prog.prog)
2319 			return false;
2320 	return true;
2321 }
2322 
2323 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2324 				     u32 *prog_ids,
2325 				     u32 request_cnt)
2326 {
2327 	struct bpf_prog_array_item *item;
2328 	int i = 0;
2329 
2330 	for (item = array->items; item->prog; item++) {
2331 		if (item->prog == &dummy_bpf_prog.prog)
2332 			continue;
2333 		prog_ids[i] = item->prog->aux->id;
2334 		if (++i == request_cnt) {
2335 			item++;
2336 			break;
2337 		}
2338 	}
2339 
2340 	return !!(item->prog);
2341 }
2342 
2343 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2344 				__u32 __user *prog_ids, u32 cnt)
2345 {
2346 	unsigned long err = 0;
2347 	bool nospc;
2348 	u32 *ids;
2349 
2350 	/* users of this function are doing:
2351 	 * cnt = bpf_prog_array_length();
2352 	 * if (cnt > 0)
2353 	 *     bpf_prog_array_copy_to_user(..., cnt);
2354 	 * so below kcalloc doesn't need extra cnt > 0 check.
2355 	 */
2356 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2357 	if (!ids)
2358 		return -ENOMEM;
2359 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2360 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2361 	kfree(ids);
2362 	if (err)
2363 		return -EFAULT;
2364 	if (nospc)
2365 		return -ENOSPC;
2366 	return 0;
2367 }
2368 
2369 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2370 				struct bpf_prog *old_prog)
2371 {
2372 	struct bpf_prog_array_item *item;
2373 
2374 	for (item = array->items; item->prog; item++)
2375 		if (item->prog == old_prog) {
2376 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2377 			break;
2378 		}
2379 }
2380 
2381 /**
2382  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2383  *                                   index into the program array with
2384  *                                   a dummy no-op program.
2385  * @array: a bpf_prog_array
2386  * @index: the index of the program to replace
2387  *
2388  * Skips over dummy programs, by not counting them, when calculating
2389  * the position of the program to replace.
2390  *
2391  * Return:
2392  * * 0		- Success
2393  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2394  * * -ENOENT	- Index out of range
2395  */
2396 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2397 {
2398 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2399 }
2400 
2401 /**
2402  * bpf_prog_array_update_at() - Updates the program at the given index
2403  *                              into the program array.
2404  * @array: a bpf_prog_array
2405  * @index: the index of the program to update
2406  * @prog: the program to insert into the array
2407  *
2408  * Skips over dummy programs, by not counting them, when calculating
2409  * the position of the program to update.
2410  *
2411  * Return:
2412  * * 0		- Success
2413  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2414  * * -ENOENT	- Index out of range
2415  */
2416 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2417 			     struct bpf_prog *prog)
2418 {
2419 	struct bpf_prog_array_item *item;
2420 
2421 	if (unlikely(index < 0))
2422 		return -EINVAL;
2423 
2424 	for (item = array->items; item->prog; item++) {
2425 		if (item->prog == &dummy_bpf_prog.prog)
2426 			continue;
2427 		if (!index) {
2428 			WRITE_ONCE(item->prog, prog);
2429 			return 0;
2430 		}
2431 		index--;
2432 	}
2433 	return -ENOENT;
2434 }
2435 
2436 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2437 			struct bpf_prog *exclude_prog,
2438 			struct bpf_prog *include_prog,
2439 			u64 bpf_cookie,
2440 			struct bpf_prog_array **new_array)
2441 {
2442 	int new_prog_cnt, carry_prog_cnt = 0;
2443 	struct bpf_prog_array_item *existing, *new;
2444 	struct bpf_prog_array *array;
2445 	bool found_exclude = false;
2446 
2447 	/* Figure out how many existing progs we need to carry over to
2448 	 * the new array.
2449 	 */
2450 	if (old_array) {
2451 		existing = old_array->items;
2452 		for (; existing->prog; existing++) {
2453 			if (existing->prog == exclude_prog) {
2454 				found_exclude = true;
2455 				continue;
2456 			}
2457 			if (existing->prog != &dummy_bpf_prog.prog)
2458 				carry_prog_cnt++;
2459 			if (existing->prog == include_prog)
2460 				return -EEXIST;
2461 		}
2462 	}
2463 
2464 	if (exclude_prog && !found_exclude)
2465 		return -ENOENT;
2466 
2467 	/* How many progs (not NULL) will be in the new array? */
2468 	new_prog_cnt = carry_prog_cnt;
2469 	if (include_prog)
2470 		new_prog_cnt += 1;
2471 
2472 	/* Do we have any prog (not NULL) in the new array? */
2473 	if (!new_prog_cnt) {
2474 		*new_array = NULL;
2475 		return 0;
2476 	}
2477 
2478 	/* +1 as the end of prog_array is marked with NULL */
2479 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2480 	if (!array)
2481 		return -ENOMEM;
2482 	new = array->items;
2483 
2484 	/* Fill in the new prog array */
2485 	if (carry_prog_cnt) {
2486 		existing = old_array->items;
2487 		for (; existing->prog; existing++) {
2488 			if (existing->prog == exclude_prog ||
2489 			    existing->prog == &dummy_bpf_prog.prog)
2490 				continue;
2491 
2492 			new->prog = existing->prog;
2493 			new->bpf_cookie = existing->bpf_cookie;
2494 			new++;
2495 		}
2496 	}
2497 	if (include_prog) {
2498 		new->prog = include_prog;
2499 		new->bpf_cookie = bpf_cookie;
2500 		new++;
2501 	}
2502 	new->prog = NULL;
2503 	*new_array = array;
2504 	return 0;
2505 }
2506 
2507 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2508 			     u32 *prog_ids, u32 request_cnt,
2509 			     u32 *prog_cnt)
2510 {
2511 	u32 cnt = 0;
2512 
2513 	if (array)
2514 		cnt = bpf_prog_array_length(array);
2515 
2516 	*prog_cnt = cnt;
2517 
2518 	/* return early if user requested only program count or nothing to copy */
2519 	if (!request_cnt || !cnt)
2520 		return 0;
2521 
2522 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2523 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2524 								     : 0;
2525 }
2526 
2527 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2528 			  struct bpf_map **used_maps, u32 len)
2529 {
2530 	struct bpf_map *map;
2531 	u32 i;
2532 
2533 	for (i = 0; i < len; i++) {
2534 		map = used_maps[i];
2535 		if (map->ops->map_poke_untrack)
2536 			map->ops->map_poke_untrack(map, aux);
2537 		bpf_map_put(map);
2538 	}
2539 }
2540 
2541 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2542 {
2543 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2544 	kfree(aux->used_maps);
2545 }
2546 
2547 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2548 			  struct btf_mod_pair *used_btfs, u32 len)
2549 {
2550 #ifdef CONFIG_BPF_SYSCALL
2551 	struct btf_mod_pair *btf_mod;
2552 	u32 i;
2553 
2554 	for (i = 0; i < len; i++) {
2555 		btf_mod = &used_btfs[i];
2556 		if (btf_mod->module)
2557 			module_put(btf_mod->module);
2558 		btf_put(btf_mod->btf);
2559 	}
2560 #endif
2561 }
2562 
2563 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2564 {
2565 	__bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2566 	kfree(aux->used_btfs);
2567 }
2568 
2569 static void bpf_prog_free_deferred(struct work_struct *work)
2570 {
2571 	struct bpf_prog_aux *aux;
2572 	int i;
2573 
2574 	aux = container_of(work, struct bpf_prog_aux, work);
2575 #ifdef CONFIG_BPF_SYSCALL
2576 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2577 #endif
2578 #ifdef CONFIG_CGROUP_BPF
2579 	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2580 		bpf_cgroup_atype_put(aux->cgroup_atype);
2581 #endif
2582 	bpf_free_used_maps(aux);
2583 	bpf_free_used_btfs(aux);
2584 	if (bpf_prog_is_dev_bound(aux))
2585 		bpf_prog_offload_destroy(aux->prog);
2586 #ifdef CONFIG_PERF_EVENTS
2587 	if (aux->prog->has_callchain_buf)
2588 		put_callchain_buffers();
2589 #endif
2590 	if (aux->dst_trampoline)
2591 		bpf_trampoline_put(aux->dst_trampoline);
2592 	for (i = 0; i < aux->func_cnt; i++) {
2593 		/* We can just unlink the subprog poke descriptor table as
2594 		 * it was originally linked to the main program and is also
2595 		 * released along with it.
2596 		 */
2597 		aux->func[i]->aux->poke_tab = NULL;
2598 		bpf_jit_free(aux->func[i]);
2599 	}
2600 	if (aux->func_cnt) {
2601 		kfree(aux->func);
2602 		bpf_prog_unlock_free(aux->prog);
2603 	} else {
2604 		bpf_jit_free(aux->prog);
2605 	}
2606 }
2607 
2608 void bpf_prog_free(struct bpf_prog *fp)
2609 {
2610 	struct bpf_prog_aux *aux = fp->aux;
2611 
2612 	if (aux->dst_prog)
2613 		bpf_prog_put(aux->dst_prog);
2614 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2615 	schedule_work(&aux->work);
2616 }
2617 EXPORT_SYMBOL_GPL(bpf_prog_free);
2618 
2619 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2620 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2621 
2622 void bpf_user_rnd_init_once(void)
2623 {
2624 	prandom_init_once(&bpf_user_rnd_state);
2625 }
2626 
2627 BPF_CALL_0(bpf_user_rnd_u32)
2628 {
2629 	/* Should someone ever have the rather unwise idea to use some
2630 	 * of the registers passed into this function, then note that
2631 	 * this function is called from native eBPF and classic-to-eBPF
2632 	 * transformations. Register assignments from both sides are
2633 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2634 	 */
2635 	struct rnd_state *state;
2636 	u32 res;
2637 
2638 	state = &get_cpu_var(bpf_user_rnd_state);
2639 	res = prandom_u32_state(state);
2640 	put_cpu_var(bpf_user_rnd_state);
2641 
2642 	return res;
2643 }
2644 
2645 BPF_CALL_0(bpf_get_raw_cpu_id)
2646 {
2647 	return raw_smp_processor_id();
2648 }
2649 
2650 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2651 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2652 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2653 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2654 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2655 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2656 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2657 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2658 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2659 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2660 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2661 
2662 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2663 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2664 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2665 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2666 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2667 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2668 
2669 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2670 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2671 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2672 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2673 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2674 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2675 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2676 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2677 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2678 const struct bpf_func_proto bpf_set_retval_proto __weak;
2679 const struct bpf_func_proto bpf_get_retval_proto __weak;
2680 
2681 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2682 {
2683 	return NULL;
2684 }
2685 
2686 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2687 {
2688 	return NULL;
2689 }
2690 
2691 u64 __weak
2692 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2693 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2694 {
2695 	return -ENOTSUPP;
2696 }
2697 EXPORT_SYMBOL_GPL(bpf_event_output);
2698 
2699 /* Always built-in helper functions. */
2700 const struct bpf_func_proto bpf_tail_call_proto = {
2701 	.func		= NULL,
2702 	.gpl_only	= false,
2703 	.ret_type	= RET_VOID,
2704 	.arg1_type	= ARG_PTR_TO_CTX,
2705 	.arg2_type	= ARG_CONST_MAP_PTR,
2706 	.arg3_type	= ARG_ANYTHING,
2707 };
2708 
2709 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2710  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2711  * eBPF and implicitly also cBPF can get JITed!
2712  */
2713 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2714 {
2715 	return prog;
2716 }
2717 
2718 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2719  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2720  */
2721 void __weak bpf_jit_compile(struct bpf_prog *prog)
2722 {
2723 }
2724 
2725 bool __weak bpf_helper_changes_pkt_data(void *func)
2726 {
2727 	return false;
2728 }
2729 
2730 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2731  * analysis code and wants explicit zero extension inserted by verifier.
2732  * Otherwise, return FALSE.
2733  *
2734  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2735  * you don't override this. JITs that don't want these extra insns can detect
2736  * them using insn_is_zext.
2737  */
2738 bool __weak bpf_jit_needs_zext(void)
2739 {
2740 	return false;
2741 }
2742 
2743 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2744 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2745 {
2746 	return false;
2747 }
2748 
2749 bool __weak bpf_jit_supports_kfunc_call(void)
2750 {
2751 	return false;
2752 }
2753 
2754 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2755  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2756  */
2757 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2758 			 int len)
2759 {
2760 	return -EFAULT;
2761 }
2762 
2763 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2764 			      void *addr1, void *addr2)
2765 {
2766 	return -ENOTSUPP;
2767 }
2768 
2769 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2770 {
2771 	return ERR_PTR(-ENOTSUPP);
2772 }
2773 
2774 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2775 {
2776 	return -ENOTSUPP;
2777 }
2778 
2779 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2780 EXPORT_SYMBOL(bpf_stats_enabled_key);
2781 
2782 /* All definitions of tracepoints related to BPF. */
2783 #define CREATE_TRACE_POINTS
2784 #include <linux/bpf_trace.h>
2785 
2786 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2787 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2788