xref: /openbmc/linux/kernel/bpf/core.c (revision d2ba09c1)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 #include <linux/perf_event.h>
35 
36 #include <asm/unaligned.h>
37 
38 /* Registers */
39 #define BPF_R0	regs[BPF_REG_0]
40 #define BPF_R1	regs[BPF_REG_1]
41 #define BPF_R2	regs[BPF_REG_2]
42 #define BPF_R3	regs[BPF_REG_3]
43 #define BPF_R4	regs[BPF_REG_4]
44 #define BPF_R5	regs[BPF_REG_5]
45 #define BPF_R6	regs[BPF_REG_6]
46 #define BPF_R7	regs[BPF_REG_7]
47 #define BPF_R8	regs[BPF_REG_8]
48 #define BPF_R9	regs[BPF_REG_9]
49 #define BPF_R10	regs[BPF_REG_10]
50 
51 /* Named registers */
52 #define DST	regs[insn->dst_reg]
53 #define SRC	regs[insn->src_reg]
54 #define FP	regs[BPF_REG_FP]
55 #define ARG1	regs[BPF_REG_ARG1]
56 #define CTX	regs[BPF_REG_CTX]
57 #define IMM	insn->imm
58 
59 /* No hurry in this branch
60  *
61  * Exported for the bpf jit load helper.
62  */
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64 {
65 	u8 *ptr = NULL;
66 
67 	if (k >= SKF_NET_OFF)
68 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 	else if (k >= SKF_LL_OFF)
70 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
71 
72 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 		return ptr;
74 
75 	return NULL;
76 }
77 
78 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
79 {
80 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
81 	struct bpf_prog_aux *aux;
82 	struct bpf_prog *fp;
83 
84 	size = round_up(size, PAGE_SIZE);
85 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 	if (fp == NULL)
87 		return NULL;
88 
89 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 	if (aux == NULL) {
91 		vfree(fp);
92 		return NULL;
93 	}
94 
95 	fp->pages = size / PAGE_SIZE;
96 	fp->aux = aux;
97 	fp->aux->prog = fp;
98 	fp->jit_requested = ebpf_jit_enabled();
99 
100 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101 
102 	return fp;
103 }
104 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
105 
106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
107 				  gfp_t gfp_extra_flags)
108 {
109 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
110 	struct bpf_prog *fp;
111 	u32 pages, delta;
112 	int ret;
113 
114 	BUG_ON(fp_old == NULL);
115 
116 	size = round_up(size, PAGE_SIZE);
117 	pages = size / PAGE_SIZE;
118 	if (pages <= fp_old->pages)
119 		return fp_old;
120 
121 	delta = pages - fp_old->pages;
122 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
123 	if (ret)
124 		return NULL;
125 
126 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
127 	if (fp == NULL) {
128 		__bpf_prog_uncharge(fp_old->aux->user, delta);
129 	} else {
130 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
131 		fp->pages = pages;
132 		fp->aux->prog = fp;
133 
134 		/* We keep fp->aux from fp_old around in the new
135 		 * reallocated structure.
136 		 */
137 		fp_old->aux = NULL;
138 		__bpf_prog_free(fp_old);
139 	}
140 
141 	return fp;
142 }
143 
144 void __bpf_prog_free(struct bpf_prog *fp)
145 {
146 	kfree(fp->aux);
147 	vfree(fp);
148 }
149 
150 int bpf_prog_calc_tag(struct bpf_prog *fp)
151 {
152 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
153 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
154 	u32 digest[SHA_DIGEST_WORDS];
155 	u32 ws[SHA_WORKSPACE_WORDS];
156 	u32 i, bsize, psize, blocks;
157 	struct bpf_insn *dst;
158 	bool was_ld_map;
159 	u8 *raw, *todo;
160 	__be32 *result;
161 	__be64 *bits;
162 
163 	raw = vmalloc(raw_size);
164 	if (!raw)
165 		return -ENOMEM;
166 
167 	sha_init(digest);
168 	memset(ws, 0, sizeof(ws));
169 
170 	/* We need to take out the map fd for the digest calculation
171 	 * since they are unstable from user space side.
172 	 */
173 	dst = (void *)raw;
174 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
175 		dst[i] = fp->insnsi[i];
176 		if (!was_ld_map &&
177 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
178 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
179 			was_ld_map = true;
180 			dst[i].imm = 0;
181 		} else if (was_ld_map &&
182 			   dst[i].code == 0 &&
183 			   dst[i].dst_reg == 0 &&
184 			   dst[i].src_reg == 0 &&
185 			   dst[i].off == 0) {
186 			was_ld_map = false;
187 			dst[i].imm = 0;
188 		} else {
189 			was_ld_map = false;
190 		}
191 	}
192 
193 	psize = bpf_prog_insn_size(fp);
194 	memset(&raw[psize], 0, raw_size - psize);
195 	raw[psize++] = 0x80;
196 
197 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
198 	blocks = bsize / SHA_MESSAGE_BYTES;
199 	todo   = raw;
200 	if (bsize - psize >= sizeof(__be64)) {
201 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
202 	} else {
203 		bits = (__be64 *)(todo + bsize + bits_offset);
204 		blocks++;
205 	}
206 	*bits = cpu_to_be64((psize - 1) << 3);
207 
208 	while (blocks--) {
209 		sha_transform(digest, todo, ws);
210 		todo += SHA_MESSAGE_BYTES;
211 	}
212 
213 	result = (__force __be32 *)digest;
214 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
215 		result[i] = cpu_to_be32(digest[i]);
216 	memcpy(fp->tag, result, sizeof(fp->tag));
217 
218 	vfree(raw);
219 	return 0;
220 }
221 
222 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
223 				u32 curr, const bool probe_pass)
224 {
225 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
226 	s64 imm = insn->imm;
227 
228 	if (curr < pos && curr + imm + 1 > pos)
229 		imm += delta;
230 	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
231 		imm -= delta;
232 	if (imm < imm_min || imm > imm_max)
233 		return -ERANGE;
234 	if (!probe_pass)
235 		insn->imm = imm;
236 	return 0;
237 }
238 
239 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
240 				u32 curr, const bool probe_pass)
241 {
242 	const s32 off_min = S16_MIN, off_max = S16_MAX;
243 	s32 off = insn->off;
244 
245 	if (curr < pos && curr + off + 1 > pos)
246 		off += delta;
247 	else if (curr > pos + delta && curr + off + 1 <= pos + delta)
248 		off -= delta;
249 	if (off < off_min || off > off_max)
250 		return -ERANGE;
251 	if (!probe_pass)
252 		insn->off = off;
253 	return 0;
254 }
255 
256 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
257 			    const bool probe_pass)
258 {
259 	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
260 	struct bpf_insn *insn = prog->insnsi;
261 	int ret = 0;
262 
263 	for (i = 0; i < insn_cnt; i++, insn++) {
264 		u8 code;
265 
266 		/* In the probing pass we still operate on the original,
267 		 * unpatched image in order to check overflows before we
268 		 * do any other adjustments. Therefore skip the patchlet.
269 		 */
270 		if (probe_pass && i == pos) {
271 			i += delta + 1;
272 			insn++;
273 		}
274 		code = insn->code;
275 		if (BPF_CLASS(code) != BPF_JMP ||
276 		    BPF_OP(code) == BPF_EXIT)
277 			continue;
278 		/* Adjust offset of jmps if we cross patch boundaries. */
279 		if (BPF_OP(code) == BPF_CALL) {
280 			if (insn->src_reg != BPF_PSEUDO_CALL)
281 				continue;
282 			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
283 						   probe_pass);
284 		} else {
285 			ret = bpf_adj_delta_to_off(insn, pos, delta, i,
286 						   probe_pass);
287 		}
288 		if (ret)
289 			break;
290 	}
291 
292 	return ret;
293 }
294 
295 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
296 				       const struct bpf_insn *patch, u32 len)
297 {
298 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
299 	const u32 cnt_max = S16_MAX;
300 	struct bpf_prog *prog_adj;
301 
302 	/* Since our patchlet doesn't expand the image, we're done. */
303 	if (insn_delta == 0) {
304 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
305 		return prog;
306 	}
307 
308 	insn_adj_cnt = prog->len + insn_delta;
309 
310 	/* Reject anything that would potentially let the insn->off
311 	 * target overflow when we have excessive program expansions.
312 	 * We need to probe here before we do any reallocation where
313 	 * we afterwards may not fail anymore.
314 	 */
315 	if (insn_adj_cnt > cnt_max &&
316 	    bpf_adj_branches(prog, off, insn_delta, true))
317 		return NULL;
318 
319 	/* Several new instructions need to be inserted. Make room
320 	 * for them. Likely, there's no need for a new allocation as
321 	 * last page could have large enough tailroom.
322 	 */
323 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
324 				    GFP_USER);
325 	if (!prog_adj)
326 		return NULL;
327 
328 	prog_adj->len = insn_adj_cnt;
329 
330 	/* Patching happens in 3 steps:
331 	 *
332 	 * 1) Move over tail of insnsi from next instruction onwards,
333 	 *    so we can patch the single target insn with one or more
334 	 *    new ones (patching is always from 1 to n insns, n > 0).
335 	 * 2) Inject new instructions at the target location.
336 	 * 3) Adjust branch offsets if necessary.
337 	 */
338 	insn_rest = insn_adj_cnt - off - len;
339 
340 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
341 		sizeof(*patch) * insn_rest);
342 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
343 
344 	/* We are guaranteed to not fail at this point, otherwise
345 	 * the ship has sailed to reverse to the original state. An
346 	 * overflow cannot happen at this point.
347 	 */
348 	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
349 
350 	return prog_adj;
351 }
352 
353 #ifdef CONFIG_BPF_JIT
354 /* All BPF JIT sysctl knobs here. */
355 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
356 int bpf_jit_harden   __read_mostly;
357 int bpf_jit_kallsyms __read_mostly;
358 
359 static __always_inline void
360 bpf_get_prog_addr_region(const struct bpf_prog *prog,
361 			 unsigned long *symbol_start,
362 			 unsigned long *symbol_end)
363 {
364 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
365 	unsigned long addr = (unsigned long)hdr;
366 
367 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
368 
369 	*symbol_start = addr;
370 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
371 }
372 
373 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
374 {
375 	const char *end = sym + KSYM_NAME_LEN;
376 
377 	BUILD_BUG_ON(sizeof("bpf_prog_") +
378 		     sizeof(prog->tag) * 2 +
379 		     /* name has been null terminated.
380 		      * We should need +1 for the '_' preceding
381 		      * the name.  However, the null character
382 		      * is double counted between the name and the
383 		      * sizeof("bpf_prog_") above, so we omit
384 		      * the +1 here.
385 		      */
386 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
387 
388 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
389 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
390 	if (prog->aux->name[0])
391 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
392 	else
393 		*sym = 0;
394 }
395 
396 static __always_inline unsigned long
397 bpf_get_prog_addr_start(struct latch_tree_node *n)
398 {
399 	unsigned long symbol_start, symbol_end;
400 	const struct bpf_prog_aux *aux;
401 
402 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
403 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
404 
405 	return symbol_start;
406 }
407 
408 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
409 					  struct latch_tree_node *b)
410 {
411 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
412 }
413 
414 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
415 {
416 	unsigned long val = (unsigned long)key;
417 	unsigned long symbol_start, symbol_end;
418 	const struct bpf_prog_aux *aux;
419 
420 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
421 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
422 
423 	if (val < symbol_start)
424 		return -1;
425 	if (val >= symbol_end)
426 		return  1;
427 
428 	return 0;
429 }
430 
431 static const struct latch_tree_ops bpf_tree_ops = {
432 	.less	= bpf_tree_less,
433 	.comp	= bpf_tree_comp,
434 };
435 
436 static DEFINE_SPINLOCK(bpf_lock);
437 static LIST_HEAD(bpf_kallsyms);
438 static struct latch_tree_root bpf_tree __cacheline_aligned;
439 
440 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
441 {
442 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
443 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
444 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
445 }
446 
447 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
448 {
449 	if (list_empty(&aux->ksym_lnode))
450 		return;
451 
452 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
453 	list_del_rcu(&aux->ksym_lnode);
454 }
455 
456 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
457 {
458 	return fp->jited && !bpf_prog_was_classic(fp);
459 }
460 
461 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
462 {
463 	return list_empty(&fp->aux->ksym_lnode) ||
464 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
465 }
466 
467 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
468 {
469 	if (!bpf_prog_kallsyms_candidate(fp) ||
470 	    !capable(CAP_SYS_ADMIN))
471 		return;
472 
473 	spin_lock_bh(&bpf_lock);
474 	bpf_prog_ksym_node_add(fp->aux);
475 	spin_unlock_bh(&bpf_lock);
476 }
477 
478 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
479 {
480 	if (!bpf_prog_kallsyms_candidate(fp))
481 		return;
482 
483 	spin_lock_bh(&bpf_lock);
484 	bpf_prog_ksym_node_del(fp->aux);
485 	spin_unlock_bh(&bpf_lock);
486 }
487 
488 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
489 {
490 	struct latch_tree_node *n;
491 
492 	if (!bpf_jit_kallsyms_enabled())
493 		return NULL;
494 
495 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
496 	return n ?
497 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
498 	       NULL;
499 }
500 
501 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
502 				 unsigned long *off, char *sym)
503 {
504 	unsigned long symbol_start, symbol_end;
505 	struct bpf_prog *prog;
506 	char *ret = NULL;
507 
508 	rcu_read_lock();
509 	prog = bpf_prog_kallsyms_find(addr);
510 	if (prog) {
511 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
512 		bpf_get_prog_name(prog, sym);
513 
514 		ret = sym;
515 		if (size)
516 			*size = symbol_end - symbol_start;
517 		if (off)
518 			*off  = addr - symbol_start;
519 	}
520 	rcu_read_unlock();
521 
522 	return ret;
523 }
524 
525 bool is_bpf_text_address(unsigned long addr)
526 {
527 	bool ret;
528 
529 	rcu_read_lock();
530 	ret = bpf_prog_kallsyms_find(addr) != NULL;
531 	rcu_read_unlock();
532 
533 	return ret;
534 }
535 
536 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
537 		    char *sym)
538 {
539 	unsigned long symbol_start, symbol_end;
540 	struct bpf_prog_aux *aux;
541 	unsigned int it = 0;
542 	int ret = -ERANGE;
543 
544 	if (!bpf_jit_kallsyms_enabled())
545 		return ret;
546 
547 	rcu_read_lock();
548 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
549 		if (it++ != symnum)
550 			continue;
551 
552 		bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
553 		bpf_get_prog_name(aux->prog, sym);
554 
555 		*value = symbol_start;
556 		*type  = BPF_SYM_ELF_TYPE;
557 
558 		ret = 0;
559 		break;
560 	}
561 	rcu_read_unlock();
562 
563 	return ret;
564 }
565 
566 struct bpf_binary_header *
567 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
568 		     unsigned int alignment,
569 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
570 {
571 	struct bpf_binary_header *hdr;
572 	unsigned int size, hole, start;
573 
574 	/* Most of BPF filters are really small, but if some of them
575 	 * fill a page, allow at least 128 extra bytes to insert a
576 	 * random section of illegal instructions.
577 	 */
578 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
579 	hdr = module_alloc(size);
580 	if (hdr == NULL)
581 		return NULL;
582 
583 	/* Fill space with illegal/arch-dep instructions. */
584 	bpf_fill_ill_insns(hdr, size);
585 
586 	hdr->pages = size / PAGE_SIZE;
587 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
588 		     PAGE_SIZE - sizeof(*hdr));
589 	start = (get_random_int() % hole) & ~(alignment - 1);
590 
591 	/* Leave a random number of instructions before BPF code. */
592 	*image_ptr = &hdr->image[start];
593 
594 	return hdr;
595 }
596 
597 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
598 {
599 	module_memfree(hdr);
600 }
601 
602 /* This symbol is only overridden by archs that have different
603  * requirements than the usual eBPF JITs, f.e. when they only
604  * implement cBPF JIT, do not set images read-only, etc.
605  */
606 void __weak bpf_jit_free(struct bpf_prog *fp)
607 {
608 	if (fp->jited) {
609 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
610 
611 		bpf_jit_binary_unlock_ro(hdr);
612 		bpf_jit_binary_free(hdr);
613 
614 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
615 	}
616 
617 	bpf_prog_unlock_free(fp);
618 }
619 
620 static int bpf_jit_blind_insn(const struct bpf_insn *from,
621 			      const struct bpf_insn *aux,
622 			      struct bpf_insn *to_buff)
623 {
624 	struct bpf_insn *to = to_buff;
625 	u32 imm_rnd = get_random_int();
626 	s16 off;
627 
628 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
629 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
630 
631 	if (from->imm == 0 &&
632 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
633 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
634 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
635 		goto out;
636 	}
637 
638 	switch (from->code) {
639 	case BPF_ALU | BPF_ADD | BPF_K:
640 	case BPF_ALU | BPF_SUB | BPF_K:
641 	case BPF_ALU | BPF_AND | BPF_K:
642 	case BPF_ALU | BPF_OR  | BPF_K:
643 	case BPF_ALU | BPF_XOR | BPF_K:
644 	case BPF_ALU | BPF_MUL | BPF_K:
645 	case BPF_ALU | BPF_MOV | BPF_K:
646 	case BPF_ALU | BPF_DIV | BPF_K:
647 	case BPF_ALU | BPF_MOD | BPF_K:
648 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
649 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
650 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
651 		break;
652 
653 	case BPF_ALU64 | BPF_ADD | BPF_K:
654 	case BPF_ALU64 | BPF_SUB | BPF_K:
655 	case BPF_ALU64 | BPF_AND | BPF_K:
656 	case BPF_ALU64 | BPF_OR  | BPF_K:
657 	case BPF_ALU64 | BPF_XOR | BPF_K:
658 	case BPF_ALU64 | BPF_MUL | BPF_K:
659 	case BPF_ALU64 | BPF_MOV | BPF_K:
660 	case BPF_ALU64 | BPF_DIV | BPF_K:
661 	case BPF_ALU64 | BPF_MOD | BPF_K:
662 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
663 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
664 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
665 		break;
666 
667 	case BPF_JMP | BPF_JEQ  | BPF_K:
668 	case BPF_JMP | BPF_JNE  | BPF_K:
669 	case BPF_JMP | BPF_JGT  | BPF_K:
670 	case BPF_JMP | BPF_JLT  | BPF_K:
671 	case BPF_JMP | BPF_JGE  | BPF_K:
672 	case BPF_JMP | BPF_JLE  | BPF_K:
673 	case BPF_JMP | BPF_JSGT | BPF_K:
674 	case BPF_JMP | BPF_JSLT | BPF_K:
675 	case BPF_JMP | BPF_JSGE | BPF_K:
676 	case BPF_JMP | BPF_JSLE | BPF_K:
677 	case BPF_JMP | BPF_JSET | BPF_K:
678 		/* Accommodate for extra offset in case of a backjump. */
679 		off = from->off;
680 		if (off < 0)
681 			off -= 2;
682 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
683 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
684 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
685 		break;
686 
687 	case BPF_LD | BPF_IMM | BPF_DW:
688 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
689 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
690 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
691 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
692 		break;
693 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
694 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
695 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
696 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
697 		break;
698 
699 	case BPF_ST | BPF_MEM | BPF_DW:
700 	case BPF_ST | BPF_MEM | BPF_W:
701 	case BPF_ST | BPF_MEM | BPF_H:
702 	case BPF_ST | BPF_MEM | BPF_B:
703 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
704 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
705 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
706 		break;
707 	}
708 out:
709 	return to - to_buff;
710 }
711 
712 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
713 					      gfp_t gfp_extra_flags)
714 {
715 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
716 	struct bpf_prog *fp;
717 
718 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
719 	if (fp != NULL) {
720 		/* aux->prog still points to the fp_other one, so
721 		 * when promoting the clone to the real program,
722 		 * this still needs to be adapted.
723 		 */
724 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
725 	}
726 
727 	return fp;
728 }
729 
730 static void bpf_prog_clone_free(struct bpf_prog *fp)
731 {
732 	/* aux was stolen by the other clone, so we cannot free
733 	 * it from this path! It will be freed eventually by the
734 	 * other program on release.
735 	 *
736 	 * At this point, we don't need a deferred release since
737 	 * clone is guaranteed to not be locked.
738 	 */
739 	fp->aux = NULL;
740 	__bpf_prog_free(fp);
741 }
742 
743 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
744 {
745 	/* We have to repoint aux->prog to self, as we don't
746 	 * know whether fp here is the clone or the original.
747 	 */
748 	fp->aux->prog = fp;
749 	bpf_prog_clone_free(fp_other);
750 }
751 
752 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
753 {
754 	struct bpf_insn insn_buff[16], aux[2];
755 	struct bpf_prog *clone, *tmp;
756 	int insn_delta, insn_cnt;
757 	struct bpf_insn *insn;
758 	int i, rewritten;
759 
760 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
761 		return prog;
762 
763 	clone = bpf_prog_clone_create(prog, GFP_USER);
764 	if (!clone)
765 		return ERR_PTR(-ENOMEM);
766 
767 	insn_cnt = clone->len;
768 	insn = clone->insnsi;
769 
770 	for (i = 0; i < insn_cnt; i++, insn++) {
771 		/* We temporarily need to hold the original ld64 insn
772 		 * so that we can still access the first part in the
773 		 * second blinding run.
774 		 */
775 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
776 		    insn[1].code == 0)
777 			memcpy(aux, insn, sizeof(aux));
778 
779 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
780 		if (!rewritten)
781 			continue;
782 
783 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
784 		if (!tmp) {
785 			/* Patching may have repointed aux->prog during
786 			 * realloc from the original one, so we need to
787 			 * fix it up here on error.
788 			 */
789 			bpf_jit_prog_release_other(prog, clone);
790 			return ERR_PTR(-ENOMEM);
791 		}
792 
793 		clone = tmp;
794 		insn_delta = rewritten - 1;
795 
796 		/* Walk new program and skip insns we just inserted. */
797 		insn = clone->insnsi + i + insn_delta;
798 		insn_cnt += insn_delta;
799 		i        += insn_delta;
800 	}
801 
802 	clone->blinded = 1;
803 	return clone;
804 }
805 #endif /* CONFIG_BPF_JIT */
806 
807 /* Base function for offset calculation. Needs to go into .text section,
808  * therefore keeping it non-static as well; will also be used by JITs
809  * anyway later on, so do not let the compiler omit it. This also needs
810  * to go into kallsyms for correlation from e.g. bpftool, so naming
811  * must not change.
812  */
813 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
814 {
815 	return 0;
816 }
817 EXPORT_SYMBOL_GPL(__bpf_call_base);
818 
819 /* All UAPI available opcodes. */
820 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
821 	/* 32 bit ALU operations. */		\
822 	/*   Register based. */			\
823 	INSN_3(ALU, ADD, X),			\
824 	INSN_3(ALU, SUB, X),			\
825 	INSN_3(ALU, AND, X),			\
826 	INSN_3(ALU, OR,  X),			\
827 	INSN_3(ALU, LSH, X),			\
828 	INSN_3(ALU, RSH, X),			\
829 	INSN_3(ALU, XOR, X),			\
830 	INSN_3(ALU, MUL, X),			\
831 	INSN_3(ALU, MOV, X),			\
832 	INSN_3(ALU, DIV, X),			\
833 	INSN_3(ALU, MOD, X),			\
834 	INSN_2(ALU, NEG),			\
835 	INSN_3(ALU, END, TO_BE),		\
836 	INSN_3(ALU, END, TO_LE),		\
837 	/*   Immediate based. */		\
838 	INSN_3(ALU, ADD, K),			\
839 	INSN_3(ALU, SUB, K),			\
840 	INSN_3(ALU, AND, K),			\
841 	INSN_3(ALU, OR,  K),			\
842 	INSN_3(ALU, LSH, K),			\
843 	INSN_3(ALU, RSH, K),			\
844 	INSN_3(ALU, XOR, K),			\
845 	INSN_3(ALU, MUL, K),			\
846 	INSN_3(ALU, MOV, K),			\
847 	INSN_3(ALU, DIV, K),			\
848 	INSN_3(ALU, MOD, K),			\
849 	/* 64 bit ALU operations. */		\
850 	/*   Register based. */			\
851 	INSN_3(ALU64, ADD,  X),			\
852 	INSN_3(ALU64, SUB,  X),			\
853 	INSN_3(ALU64, AND,  X),			\
854 	INSN_3(ALU64, OR,   X),			\
855 	INSN_3(ALU64, LSH,  X),			\
856 	INSN_3(ALU64, RSH,  X),			\
857 	INSN_3(ALU64, XOR,  X),			\
858 	INSN_3(ALU64, MUL,  X),			\
859 	INSN_3(ALU64, MOV,  X),			\
860 	INSN_3(ALU64, ARSH, X),			\
861 	INSN_3(ALU64, DIV,  X),			\
862 	INSN_3(ALU64, MOD,  X),			\
863 	INSN_2(ALU64, NEG),			\
864 	/*   Immediate based. */		\
865 	INSN_3(ALU64, ADD,  K),			\
866 	INSN_3(ALU64, SUB,  K),			\
867 	INSN_3(ALU64, AND,  K),			\
868 	INSN_3(ALU64, OR,   K),			\
869 	INSN_3(ALU64, LSH,  K),			\
870 	INSN_3(ALU64, RSH,  K),			\
871 	INSN_3(ALU64, XOR,  K),			\
872 	INSN_3(ALU64, MUL,  K),			\
873 	INSN_3(ALU64, MOV,  K),			\
874 	INSN_3(ALU64, ARSH, K),			\
875 	INSN_3(ALU64, DIV,  K),			\
876 	INSN_3(ALU64, MOD,  K),			\
877 	/* Call instruction. */			\
878 	INSN_2(JMP, CALL),			\
879 	/* Exit instruction. */			\
880 	INSN_2(JMP, EXIT),			\
881 	/* Jump instructions. */		\
882 	/*   Register based. */			\
883 	INSN_3(JMP, JEQ,  X),			\
884 	INSN_3(JMP, JNE,  X),			\
885 	INSN_3(JMP, JGT,  X),			\
886 	INSN_3(JMP, JLT,  X),			\
887 	INSN_3(JMP, JGE,  X),			\
888 	INSN_3(JMP, JLE,  X),			\
889 	INSN_3(JMP, JSGT, X),			\
890 	INSN_3(JMP, JSLT, X),			\
891 	INSN_3(JMP, JSGE, X),			\
892 	INSN_3(JMP, JSLE, X),			\
893 	INSN_3(JMP, JSET, X),			\
894 	/*   Immediate based. */		\
895 	INSN_3(JMP, JEQ,  K),			\
896 	INSN_3(JMP, JNE,  K),			\
897 	INSN_3(JMP, JGT,  K),			\
898 	INSN_3(JMP, JLT,  K),			\
899 	INSN_3(JMP, JGE,  K),			\
900 	INSN_3(JMP, JLE,  K),			\
901 	INSN_3(JMP, JSGT, K),			\
902 	INSN_3(JMP, JSLT, K),			\
903 	INSN_3(JMP, JSGE, K),			\
904 	INSN_3(JMP, JSLE, K),			\
905 	INSN_3(JMP, JSET, K),			\
906 	INSN_2(JMP, JA),			\
907 	/* Store instructions. */		\
908 	/*   Register based. */			\
909 	INSN_3(STX, MEM,  B),			\
910 	INSN_3(STX, MEM,  H),			\
911 	INSN_3(STX, MEM,  W),			\
912 	INSN_3(STX, MEM,  DW),			\
913 	INSN_3(STX, XADD, W),			\
914 	INSN_3(STX, XADD, DW),			\
915 	/*   Immediate based. */		\
916 	INSN_3(ST, MEM, B),			\
917 	INSN_3(ST, MEM, H),			\
918 	INSN_3(ST, MEM, W),			\
919 	INSN_3(ST, MEM, DW),			\
920 	/* Load instructions. */		\
921 	/*   Register based. */			\
922 	INSN_3(LDX, MEM, B),			\
923 	INSN_3(LDX, MEM, H),			\
924 	INSN_3(LDX, MEM, W),			\
925 	INSN_3(LDX, MEM, DW),			\
926 	/*   Immediate based. */		\
927 	INSN_3(LD, IMM, DW)
928 
929 bool bpf_opcode_in_insntable(u8 code)
930 {
931 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
932 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
933 	static const bool public_insntable[256] = {
934 		[0 ... 255] = false,
935 		/* Now overwrite non-defaults ... */
936 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
937 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
938 		[BPF_LD | BPF_ABS | BPF_B] = true,
939 		[BPF_LD | BPF_ABS | BPF_H] = true,
940 		[BPF_LD | BPF_ABS | BPF_W] = true,
941 		[BPF_LD | BPF_IND | BPF_B] = true,
942 		[BPF_LD | BPF_IND | BPF_H] = true,
943 		[BPF_LD | BPF_IND | BPF_W] = true,
944 	};
945 #undef BPF_INSN_3_TBL
946 #undef BPF_INSN_2_TBL
947 	return public_insntable[code];
948 }
949 
950 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
951 /**
952  *	__bpf_prog_run - run eBPF program on a given context
953  *	@ctx: is the data we are operating on
954  *	@insn: is the array of eBPF instructions
955  *
956  * Decode and execute eBPF instructions.
957  */
958 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
959 {
960 	u64 tmp;
961 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
962 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
963 	static const void *jumptable[256] = {
964 		[0 ... 255] = &&default_label,
965 		/* Now overwrite non-defaults ... */
966 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
967 		/* Non-UAPI available opcodes. */
968 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
969 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
970 	};
971 #undef BPF_INSN_3_LBL
972 #undef BPF_INSN_2_LBL
973 	u32 tail_call_cnt = 0;
974 
975 #define CONT	 ({ insn++; goto select_insn; })
976 #define CONT_JMP ({ insn++; goto select_insn; })
977 
978 select_insn:
979 	goto *jumptable[insn->code];
980 
981 	/* ALU */
982 #define ALU(OPCODE, OP)			\
983 	ALU64_##OPCODE##_X:		\
984 		DST = DST OP SRC;	\
985 		CONT;			\
986 	ALU_##OPCODE##_X:		\
987 		DST = (u32) DST OP (u32) SRC;	\
988 		CONT;			\
989 	ALU64_##OPCODE##_K:		\
990 		DST = DST OP IMM;		\
991 		CONT;			\
992 	ALU_##OPCODE##_K:		\
993 		DST = (u32) DST OP (u32) IMM;	\
994 		CONT;
995 
996 	ALU(ADD,  +)
997 	ALU(SUB,  -)
998 	ALU(AND,  &)
999 	ALU(OR,   |)
1000 	ALU(LSH, <<)
1001 	ALU(RSH, >>)
1002 	ALU(XOR,  ^)
1003 	ALU(MUL,  *)
1004 #undef ALU
1005 	ALU_NEG:
1006 		DST = (u32) -DST;
1007 		CONT;
1008 	ALU64_NEG:
1009 		DST = -DST;
1010 		CONT;
1011 	ALU_MOV_X:
1012 		DST = (u32) SRC;
1013 		CONT;
1014 	ALU_MOV_K:
1015 		DST = (u32) IMM;
1016 		CONT;
1017 	ALU64_MOV_X:
1018 		DST = SRC;
1019 		CONT;
1020 	ALU64_MOV_K:
1021 		DST = IMM;
1022 		CONT;
1023 	LD_IMM_DW:
1024 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1025 		insn++;
1026 		CONT;
1027 	ALU64_ARSH_X:
1028 		(*(s64 *) &DST) >>= SRC;
1029 		CONT;
1030 	ALU64_ARSH_K:
1031 		(*(s64 *) &DST) >>= IMM;
1032 		CONT;
1033 	ALU64_MOD_X:
1034 		div64_u64_rem(DST, SRC, &tmp);
1035 		DST = tmp;
1036 		CONT;
1037 	ALU_MOD_X:
1038 		tmp = (u32) DST;
1039 		DST = do_div(tmp, (u32) SRC);
1040 		CONT;
1041 	ALU64_MOD_K:
1042 		div64_u64_rem(DST, IMM, &tmp);
1043 		DST = tmp;
1044 		CONT;
1045 	ALU_MOD_K:
1046 		tmp = (u32) DST;
1047 		DST = do_div(tmp, (u32) IMM);
1048 		CONT;
1049 	ALU64_DIV_X:
1050 		DST = div64_u64(DST, SRC);
1051 		CONT;
1052 	ALU_DIV_X:
1053 		tmp = (u32) DST;
1054 		do_div(tmp, (u32) SRC);
1055 		DST = (u32) tmp;
1056 		CONT;
1057 	ALU64_DIV_K:
1058 		DST = div64_u64(DST, IMM);
1059 		CONT;
1060 	ALU_DIV_K:
1061 		tmp = (u32) DST;
1062 		do_div(tmp, (u32) IMM);
1063 		DST = (u32) tmp;
1064 		CONT;
1065 	ALU_END_TO_BE:
1066 		switch (IMM) {
1067 		case 16:
1068 			DST = (__force u16) cpu_to_be16(DST);
1069 			break;
1070 		case 32:
1071 			DST = (__force u32) cpu_to_be32(DST);
1072 			break;
1073 		case 64:
1074 			DST = (__force u64) cpu_to_be64(DST);
1075 			break;
1076 		}
1077 		CONT;
1078 	ALU_END_TO_LE:
1079 		switch (IMM) {
1080 		case 16:
1081 			DST = (__force u16) cpu_to_le16(DST);
1082 			break;
1083 		case 32:
1084 			DST = (__force u32) cpu_to_le32(DST);
1085 			break;
1086 		case 64:
1087 			DST = (__force u64) cpu_to_le64(DST);
1088 			break;
1089 		}
1090 		CONT;
1091 
1092 	/* CALL */
1093 	JMP_CALL:
1094 		/* Function call scratches BPF_R1-BPF_R5 registers,
1095 		 * preserves BPF_R6-BPF_R9, and stores return value
1096 		 * into BPF_R0.
1097 		 */
1098 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1099 						       BPF_R4, BPF_R5);
1100 		CONT;
1101 
1102 	JMP_CALL_ARGS:
1103 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1104 							    BPF_R3, BPF_R4,
1105 							    BPF_R5,
1106 							    insn + insn->off + 1);
1107 		CONT;
1108 
1109 	JMP_TAIL_CALL: {
1110 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1111 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1112 		struct bpf_prog *prog;
1113 		u32 index = BPF_R3;
1114 
1115 		if (unlikely(index >= array->map.max_entries))
1116 			goto out;
1117 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1118 			goto out;
1119 
1120 		tail_call_cnt++;
1121 
1122 		prog = READ_ONCE(array->ptrs[index]);
1123 		if (!prog)
1124 			goto out;
1125 
1126 		/* ARG1 at this point is guaranteed to point to CTX from
1127 		 * the verifier side due to the fact that the tail call is
1128 		 * handeled like a helper, that is, bpf_tail_call_proto,
1129 		 * where arg1_type is ARG_PTR_TO_CTX.
1130 		 */
1131 		insn = prog->insnsi;
1132 		goto select_insn;
1133 out:
1134 		CONT;
1135 	}
1136 	/* JMP */
1137 	JMP_JA:
1138 		insn += insn->off;
1139 		CONT;
1140 	JMP_JEQ_X:
1141 		if (DST == SRC) {
1142 			insn += insn->off;
1143 			CONT_JMP;
1144 		}
1145 		CONT;
1146 	JMP_JEQ_K:
1147 		if (DST == IMM) {
1148 			insn += insn->off;
1149 			CONT_JMP;
1150 		}
1151 		CONT;
1152 	JMP_JNE_X:
1153 		if (DST != SRC) {
1154 			insn += insn->off;
1155 			CONT_JMP;
1156 		}
1157 		CONT;
1158 	JMP_JNE_K:
1159 		if (DST != IMM) {
1160 			insn += insn->off;
1161 			CONT_JMP;
1162 		}
1163 		CONT;
1164 	JMP_JGT_X:
1165 		if (DST > SRC) {
1166 			insn += insn->off;
1167 			CONT_JMP;
1168 		}
1169 		CONT;
1170 	JMP_JGT_K:
1171 		if (DST > IMM) {
1172 			insn += insn->off;
1173 			CONT_JMP;
1174 		}
1175 		CONT;
1176 	JMP_JLT_X:
1177 		if (DST < SRC) {
1178 			insn += insn->off;
1179 			CONT_JMP;
1180 		}
1181 		CONT;
1182 	JMP_JLT_K:
1183 		if (DST < IMM) {
1184 			insn += insn->off;
1185 			CONT_JMP;
1186 		}
1187 		CONT;
1188 	JMP_JGE_X:
1189 		if (DST >= SRC) {
1190 			insn += insn->off;
1191 			CONT_JMP;
1192 		}
1193 		CONT;
1194 	JMP_JGE_K:
1195 		if (DST >= IMM) {
1196 			insn += insn->off;
1197 			CONT_JMP;
1198 		}
1199 		CONT;
1200 	JMP_JLE_X:
1201 		if (DST <= SRC) {
1202 			insn += insn->off;
1203 			CONT_JMP;
1204 		}
1205 		CONT;
1206 	JMP_JLE_K:
1207 		if (DST <= IMM) {
1208 			insn += insn->off;
1209 			CONT_JMP;
1210 		}
1211 		CONT;
1212 	JMP_JSGT_X:
1213 		if (((s64) DST) > ((s64) SRC)) {
1214 			insn += insn->off;
1215 			CONT_JMP;
1216 		}
1217 		CONT;
1218 	JMP_JSGT_K:
1219 		if (((s64) DST) > ((s64) IMM)) {
1220 			insn += insn->off;
1221 			CONT_JMP;
1222 		}
1223 		CONT;
1224 	JMP_JSLT_X:
1225 		if (((s64) DST) < ((s64) SRC)) {
1226 			insn += insn->off;
1227 			CONT_JMP;
1228 		}
1229 		CONT;
1230 	JMP_JSLT_K:
1231 		if (((s64) DST) < ((s64) IMM)) {
1232 			insn += insn->off;
1233 			CONT_JMP;
1234 		}
1235 		CONT;
1236 	JMP_JSGE_X:
1237 		if (((s64) DST) >= ((s64) SRC)) {
1238 			insn += insn->off;
1239 			CONT_JMP;
1240 		}
1241 		CONT;
1242 	JMP_JSGE_K:
1243 		if (((s64) DST) >= ((s64) IMM)) {
1244 			insn += insn->off;
1245 			CONT_JMP;
1246 		}
1247 		CONT;
1248 	JMP_JSLE_X:
1249 		if (((s64) DST) <= ((s64) SRC)) {
1250 			insn += insn->off;
1251 			CONT_JMP;
1252 		}
1253 		CONT;
1254 	JMP_JSLE_K:
1255 		if (((s64) DST) <= ((s64) IMM)) {
1256 			insn += insn->off;
1257 			CONT_JMP;
1258 		}
1259 		CONT;
1260 	JMP_JSET_X:
1261 		if (DST & SRC) {
1262 			insn += insn->off;
1263 			CONT_JMP;
1264 		}
1265 		CONT;
1266 	JMP_JSET_K:
1267 		if (DST & IMM) {
1268 			insn += insn->off;
1269 			CONT_JMP;
1270 		}
1271 		CONT;
1272 	JMP_EXIT:
1273 		return BPF_R0;
1274 
1275 	/* STX and ST and LDX*/
1276 #define LDST(SIZEOP, SIZE)						\
1277 	STX_MEM_##SIZEOP:						\
1278 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1279 		CONT;							\
1280 	ST_MEM_##SIZEOP:						\
1281 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1282 		CONT;							\
1283 	LDX_MEM_##SIZEOP:						\
1284 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1285 		CONT;
1286 
1287 	LDST(B,   u8)
1288 	LDST(H,  u16)
1289 	LDST(W,  u32)
1290 	LDST(DW, u64)
1291 #undef LDST
1292 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1293 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1294 			   (DST + insn->off));
1295 		CONT;
1296 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1297 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1298 			     (DST + insn->off));
1299 		CONT;
1300 
1301 	default_label:
1302 		/* If we ever reach this, we have a bug somewhere. Die hard here
1303 		 * instead of just returning 0; we could be somewhere in a subprog,
1304 		 * so execution could continue otherwise which we do /not/ want.
1305 		 *
1306 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1307 		 */
1308 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1309 		BUG_ON(1);
1310 		return 0;
1311 }
1312 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1313 
1314 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1315 #define DEFINE_BPF_PROG_RUN(stack_size) \
1316 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1317 { \
1318 	u64 stack[stack_size / sizeof(u64)]; \
1319 	u64 regs[MAX_BPF_REG]; \
1320 \
1321 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1322 	ARG1 = (u64) (unsigned long) ctx; \
1323 	return ___bpf_prog_run(regs, insn, stack); \
1324 }
1325 
1326 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1327 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1328 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1329 				      const struct bpf_insn *insn) \
1330 { \
1331 	u64 stack[stack_size / sizeof(u64)]; \
1332 	u64 regs[MAX_BPF_REG]; \
1333 \
1334 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1335 	BPF_R1 = r1; \
1336 	BPF_R2 = r2; \
1337 	BPF_R3 = r3; \
1338 	BPF_R4 = r4; \
1339 	BPF_R5 = r5; \
1340 	return ___bpf_prog_run(regs, insn, stack); \
1341 }
1342 
1343 #define EVAL1(FN, X) FN(X)
1344 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1345 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1346 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1347 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1348 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1349 
1350 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1351 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1352 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1353 
1354 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1355 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1356 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1357 
1358 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1359 
1360 static unsigned int (*interpreters[])(const void *ctx,
1361 				      const struct bpf_insn *insn) = {
1362 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1363 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1364 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1365 };
1366 #undef PROG_NAME_LIST
1367 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1368 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1369 				  const struct bpf_insn *insn) = {
1370 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1371 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1372 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1373 };
1374 #undef PROG_NAME_LIST
1375 
1376 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1377 {
1378 	stack_depth = max_t(u32, stack_depth, 1);
1379 	insn->off = (s16) insn->imm;
1380 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1381 		__bpf_call_base_args;
1382 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1383 }
1384 
1385 #else
1386 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1387 					 const struct bpf_insn *insn)
1388 {
1389 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1390 	 * is not working properly, so warn about it!
1391 	 */
1392 	WARN_ON_ONCE(1);
1393 	return 0;
1394 }
1395 #endif
1396 
1397 bool bpf_prog_array_compatible(struct bpf_array *array,
1398 			       const struct bpf_prog *fp)
1399 {
1400 	if (fp->kprobe_override)
1401 		return false;
1402 
1403 	if (!array->owner_prog_type) {
1404 		/* There's no owner yet where we could check for
1405 		 * compatibility.
1406 		 */
1407 		array->owner_prog_type = fp->type;
1408 		array->owner_jited = fp->jited;
1409 
1410 		return true;
1411 	}
1412 
1413 	return array->owner_prog_type == fp->type &&
1414 	       array->owner_jited == fp->jited;
1415 }
1416 
1417 static int bpf_check_tail_call(const struct bpf_prog *fp)
1418 {
1419 	struct bpf_prog_aux *aux = fp->aux;
1420 	int i;
1421 
1422 	for (i = 0; i < aux->used_map_cnt; i++) {
1423 		struct bpf_map *map = aux->used_maps[i];
1424 		struct bpf_array *array;
1425 
1426 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1427 			continue;
1428 
1429 		array = container_of(map, struct bpf_array, map);
1430 		if (!bpf_prog_array_compatible(array, fp))
1431 			return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 /**
1438  *	bpf_prog_select_runtime - select exec runtime for BPF program
1439  *	@fp: bpf_prog populated with internal BPF program
1440  *	@err: pointer to error variable
1441  *
1442  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1443  * The BPF program will be executed via BPF_PROG_RUN() macro.
1444  */
1445 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1446 {
1447 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1448 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1449 
1450 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1451 #else
1452 	fp->bpf_func = __bpf_prog_ret0_warn;
1453 #endif
1454 
1455 	/* eBPF JITs can rewrite the program in case constant
1456 	 * blinding is active. However, in case of error during
1457 	 * blinding, bpf_int_jit_compile() must always return a
1458 	 * valid program, which in this case would simply not
1459 	 * be JITed, but falls back to the interpreter.
1460 	 */
1461 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1462 		fp = bpf_int_jit_compile(fp);
1463 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1464 		if (!fp->jited) {
1465 			*err = -ENOTSUPP;
1466 			return fp;
1467 		}
1468 #endif
1469 	} else {
1470 		*err = bpf_prog_offload_compile(fp);
1471 		if (*err)
1472 			return fp;
1473 	}
1474 	bpf_prog_lock_ro(fp);
1475 
1476 	/* The tail call compatibility check can only be done at
1477 	 * this late stage as we need to determine, if we deal
1478 	 * with JITed or non JITed program concatenations and not
1479 	 * all eBPF JITs might immediately support all features.
1480 	 */
1481 	*err = bpf_check_tail_call(fp);
1482 
1483 	return fp;
1484 }
1485 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1486 
1487 static unsigned int __bpf_prog_ret1(const void *ctx,
1488 				    const struct bpf_insn *insn)
1489 {
1490 	return 1;
1491 }
1492 
1493 static struct bpf_prog_dummy {
1494 	struct bpf_prog prog;
1495 } dummy_bpf_prog = {
1496 	.prog = {
1497 		.bpf_func = __bpf_prog_ret1,
1498 	},
1499 };
1500 
1501 /* to avoid allocating empty bpf_prog_array for cgroups that
1502  * don't have bpf program attached use one global 'empty_prog_array'
1503  * It will not be modified the caller of bpf_prog_array_alloc()
1504  * (since caller requested prog_cnt == 0)
1505  * that pointer should be 'freed' by bpf_prog_array_free()
1506  */
1507 static struct {
1508 	struct bpf_prog_array hdr;
1509 	struct bpf_prog *null_prog;
1510 } empty_prog_array = {
1511 	.null_prog = NULL,
1512 };
1513 
1514 struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1515 {
1516 	if (prog_cnt)
1517 		return kzalloc(sizeof(struct bpf_prog_array) +
1518 			       sizeof(struct bpf_prog *) * (prog_cnt + 1),
1519 			       flags);
1520 
1521 	return &empty_prog_array.hdr;
1522 }
1523 
1524 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1525 {
1526 	if (!progs ||
1527 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1528 		return;
1529 	kfree_rcu(progs, rcu);
1530 }
1531 
1532 int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1533 {
1534 	struct bpf_prog **prog;
1535 	u32 cnt = 0;
1536 
1537 	rcu_read_lock();
1538 	prog = rcu_dereference(progs)->progs;
1539 	for (; *prog; prog++)
1540 		if (*prog != &dummy_bpf_prog.prog)
1541 			cnt++;
1542 	rcu_read_unlock();
1543 	return cnt;
1544 }
1545 
1546 static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1547 				     u32 *prog_ids,
1548 				     u32 request_cnt)
1549 {
1550 	int i = 0;
1551 
1552 	for (; *prog; prog++) {
1553 		if (*prog == &dummy_bpf_prog.prog)
1554 			continue;
1555 		prog_ids[i] = (*prog)->aux->id;
1556 		if (++i == request_cnt) {
1557 			prog++;
1558 			break;
1559 		}
1560 	}
1561 
1562 	return !!(*prog);
1563 }
1564 
1565 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1566 				__u32 __user *prog_ids, u32 cnt)
1567 {
1568 	struct bpf_prog **prog;
1569 	unsigned long err = 0;
1570 	bool nospc;
1571 	u32 *ids;
1572 
1573 	/* users of this function are doing:
1574 	 * cnt = bpf_prog_array_length();
1575 	 * if (cnt > 0)
1576 	 *     bpf_prog_array_copy_to_user(..., cnt);
1577 	 * so below kcalloc doesn't need extra cnt > 0 check, but
1578 	 * bpf_prog_array_length() releases rcu lock and
1579 	 * prog array could have been swapped with empty or larger array,
1580 	 * so always copy 'cnt' prog_ids to the user.
1581 	 * In a rare race the user will see zero prog_ids
1582 	 */
1583 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1584 	if (!ids)
1585 		return -ENOMEM;
1586 	rcu_read_lock();
1587 	prog = rcu_dereference(progs)->progs;
1588 	nospc = bpf_prog_array_copy_core(prog, ids, cnt);
1589 	rcu_read_unlock();
1590 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1591 	kfree(ids);
1592 	if (err)
1593 		return -EFAULT;
1594 	if (nospc)
1595 		return -ENOSPC;
1596 	return 0;
1597 }
1598 
1599 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1600 				struct bpf_prog *old_prog)
1601 {
1602 	struct bpf_prog **prog = progs->progs;
1603 
1604 	for (; *prog; prog++)
1605 		if (*prog == old_prog) {
1606 			WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1607 			break;
1608 		}
1609 }
1610 
1611 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1612 			struct bpf_prog *exclude_prog,
1613 			struct bpf_prog *include_prog,
1614 			struct bpf_prog_array **new_array)
1615 {
1616 	int new_prog_cnt, carry_prog_cnt = 0;
1617 	struct bpf_prog **existing_prog;
1618 	struct bpf_prog_array *array;
1619 	int new_prog_idx = 0;
1620 
1621 	/* Figure out how many existing progs we need to carry over to
1622 	 * the new array.
1623 	 */
1624 	if (old_array) {
1625 		existing_prog = old_array->progs;
1626 		for (; *existing_prog; existing_prog++) {
1627 			if (*existing_prog != exclude_prog &&
1628 			    *existing_prog != &dummy_bpf_prog.prog)
1629 				carry_prog_cnt++;
1630 			if (*existing_prog == include_prog)
1631 				return -EEXIST;
1632 		}
1633 	}
1634 
1635 	/* How many progs (not NULL) will be in the new array? */
1636 	new_prog_cnt = carry_prog_cnt;
1637 	if (include_prog)
1638 		new_prog_cnt += 1;
1639 
1640 	/* Do we have any prog (not NULL) in the new array? */
1641 	if (!new_prog_cnt) {
1642 		*new_array = NULL;
1643 		return 0;
1644 	}
1645 
1646 	/* +1 as the end of prog_array is marked with NULL */
1647 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1648 	if (!array)
1649 		return -ENOMEM;
1650 
1651 	/* Fill in the new prog array */
1652 	if (carry_prog_cnt) {
1653 		existing_prog = old_array->progs;
1654 		for (; *existing_prog; existing_prog++)
1655 			if (*existing_prog != exclude_prog &&
1656 			    *existing_prog != &dummy_bpf_prog.prog)
1657 				array->progs[new_prog_idx++] = *existing_prog;
1658 	}
1659 	if (include_prog)
1660 		array->progs[new_prog_idx++] = include_prog;
1661 	array->progs[new_prog_idx] = NULL;
1662 	*new_array = array;
1663 	return 0;
1664 }
1665 
1666 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1667 			     u32 *prog_ids, u32 request_cnt,
1668 			     u32 *prog_cnt)
1669 {
1670 	struct bpf_prog **prog;
1671 	u32 cnt = 0;
1672 
1673 	if (array)
1674 		cnt = bpf_prog_array_length(array);
1675 
1676 	*prog_cnt = cnt;
1677 
1678 	/* return early if user requested only program count or nothing to copy */
1679 	if (!request_cnt || !cnt)
1680 		return 0;
1681 
1682 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1683 	prog = rcu_dereference_check(array, 1)->progs;
1684 	return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1685 								     : 0;
1686 }
1687 
1688 static void bpf_prog_free_deferred(struct work_struct *work)
1689 {
1690 	struct bpf_prog_aux *aux;
1691 	int i;
1692 
1693 	aux = container_of(work, struct bpf_prog_aux, work);
1694 	if (bpf_prog_is_dev_bound(aux))
1695 		bpf_prog_offload_destroy(aux->prog);
1696 #ifdef CONFIG_PERF_EVENTS
1697 	if (aux->prog->has_callchain_buf)
1698 		put_callchain_buffers();
1699 #endif
1700 	for (i = 0; i < aux->func_cnt; i++)
1701 		bpf_jit_free(aux->func[i]);
1702 	if (aux->func_cnt) {
1703 		kfree(aux->func);
1704 		bpf_prog_unlock_free(aux->prog);
1705 	} else {
1706 		bpf_jit_free(aux->prog);
1707 	}
1708 }
1709 
1710 /* Free internal BPF program */
1711 void bpf_prog_free(struct bpf_prog *fp)
1712 {
1713 	struct bpf_prog_aux *aux = fp->aux;
1714 
1715 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1716 	schedule_work(&aux->work);
1717 }
1718 EXPORT_SYMBOL_GPL(bpf_prog_free);
1719 
1720 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1721 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1722 
1723 void bpf_user_rnd_init_once(void)
1724 {
1725 	prandom_init_once(&bpf_user_rnd_state);
1726 }
1727 
1728 BPF_CALL_0(bpf_user_rnd_u32)
1729 {
1730 	/* Should someone ever have the rather unwise idea to use some
1731 	 * of the registers passed into this function, then note that
1732 	 * this function is called from native eBPF and classic-to-eBPF
1733 	 * transformations. Register assignments from both sides are
1734 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1735 	 */
1736 	struct rnd_state *state;
1737 	u32 res;
1738 
1739 	state = &get_cpu_var(bpf_user_rnd_state);
1740 	res = prandom_u32_state(state);
1741 	put_cpu_var(bpf_user_rnd_state);
1742 
1743 	return res;
1744 }
1745 
1746 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1747 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1748 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1749 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1750 
1751 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1752 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1753 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1754 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1755 
1756 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1757 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1758 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1759 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1760 const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
1761 
1762 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1763 {
1764 	return NULL;
1765 }
1766 
1767 u64 __weak
1768 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1769 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1770 {
1771 	return -ENOTSUPP;
1772 }
1773 EXPORT_SYMBOL_GPL(bpf_event_output);
1774 
1775 /* Always built-in helper functions. */
1776 const struct bpf_func_proto bpf_tail_call_proto = {
1777 	.func		= NULL,
1778 	.gpl_only	= false,
1779 	.ret_type	= RET_VOID,
1780 	.arg1_type	= ARG_PTR_TO_CTX,
1781 	.arg2_type	= ARG_CONST_MAP_PTR,
1782 	.arg3_type	= ARG_ANYTHING,
1783 };
1784 
1785 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1786  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1787  * eBPF and implicitly also cBPF can get JITed!
1788  */
1789 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1790 {
1791 	return prog;
1792 }
1793 
1794 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1795  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1796  */
1797 void __weak bpf_jit_compile(struct bpf_prog *prog)
1798 {
1799 }
1800 
1801 bool __weak bpf_helper_changes_pkt_data(void *func)
1802 {
1803 	return false;
1804 }
1805 
1806 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1807  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1808  */
1809 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1810 			 int len)
1811 {
1812 	return -EFAULT;
1813 }
1814 
1815 /* All definitions of tracepoints related to BPF. */
1816 #define CREATE_TRACE_POINTS
1817 #include <linux/bpf_trace.h>
1818 
1819 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1820