xref: /openbmc/linux/kernel/bpf/core.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 
35 #include <asm/unaligned.h>
36 
37 /* Registers */
38 #define BPF_R0	regs[BPF_REG_0]
39 #define BPF_R1	regs[BPF_REG_1]
40 #define BPF_R2	regs[BPF_REG_2]
41 #define BPF_R3	regs[BPF_REG_3]
42 #define BPF_R4	regs[BPF_REG_4]
43 #define BPF_R5	regs[BPF_REG_5]
44 #define BPF_R6	regs[BPF_REG_6]
45 #define BPF_R7	regs[BPF_REG_7]
46 #define BPF_R8	regs[BPF_REG_8]
47 #define BPF_R9	regs[BPF_REG_9]
48 #define BPF_R10	regs[BPF_REG_10]
49 
50 /* Named registers */
51 #define DST	regs[insn->dst_reg]
52 #define SRC	regs[insn->src_reg]
53 #define FP	regs[BPF_REG_FP]
54 #define ARG1	regs[BPF_REG_ARG1]
55 #define CTX	regs[BPF_REG_CTX]
56 #define IMM	insn->imm
57 
58 /* No hurry in this branch
59  *
60  * Exported for the bpf jit load helper.
61  */
62 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63 {
64 	u8 *ptr = NULL;
65 
66 	if (k >= SKF_NET_OFF)
67 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 	else if (k >= SKF_LL_OFF)
69 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70 
71 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 		return ptr;
73 
74 	return NULL;
75 }
76 
77 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78 {
79 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80 	struct bpf_prog_aux *aux;
81 	struct bpf_prog *fp;
82 
83 	size = round_up(size, PAGE_SIZE);
84 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 	if (fp == NULL)
86 		return NULL;
87 
88 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 	if (aux == NULL) {
90 		vfree(fp);
91 		return NULL;
92 	}
93 
94 	fp->pages = size / PAGE_SIZE;
95 	fp->aux = aux;
96 	fp->aux->prog = fp;
97 	fp->jit_requested = ebpf_jit_enabled();
98 
99 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100 
101 	return fp;
102 }
103 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
104 
105 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
106 				  gfp_t gfp_extra_flags)
107 {
108 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
109 	struct bpf_prog *fp;
110 	u32 pages, delta;
111 	int ret;
112 
113 	BUG_ON(fp_old == NULL);
114 
115 	size = round_up(size, PAGE_SIZE);
116 	pages = size / PAGE_SIZE;
117 	if (pages <= fp_old->pages)
118 		return fp_old;
119 
120 	delta = pages - fp_old->pages;
121 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
122 	if (ret)
123 		return NULL;
124 
125 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
126 	if (fp == NULL) {
127 		__bpf_prog_uncharge(fp_old->aux->user, delta);
128 	} else {
129 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
130 		fp->pages = pages;
131 		fp->aux->prog = fp;
132 
133 		/* We keep fp->aux from fp_old around in the new
134 		 * reallocated structure.
135 		 */
136 		fp_old->aux = NULL;
137 		__bpf_prog_free(fp_old);
138 	}
139 
140 	return fp;
141 }
142 
143 void __bpf_prog_free(struct bpf_prog *fp)
144 {
145 	kfree(fp->aux);
146 	vfree(fp);
147 }
148 
149 int bpf_prog_calc_tag(struct bpf_prog *fp)
150 {
151 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
152 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 	u32 digest[SHA_DIGEST_WORDS];
154 	u32 ws[SHA_WORKSPACE_WORDS];
155 	u32 i, bsize, psize, blocks;
156 	struct bpf_insn *dst;
157 	bool was_ld_map;
158 	u8 *raw, *todo;
159 	__be32 *result;
160 	__be64 *bits;
161 
162 	raw = vmalloc(raw_size);
163 	if (!raw)
164 		return -ENOMEM;
165 
166 	sha_init(digest);
167 	memset(ws, 0, sizeof(ws));
168 
169 	/* We need to take out the map fd for the digest calculation
170 	 * since they are unstable from user space side.
171 	 */
172 	dst = (void *)raw;
173 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
174 		dst[i] = fp->insnsi[i];
175 		if (!was_ld_map &&
176 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
177 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
178 			was_ld_map = true;
179 			dst[i].imm = 0;
180 		} else if (was_ld_map &&
181 			   dst[i].code == 0 &&
182 			   dst[i].dst_reg == 0 &&
183 			   dst[i].src_reg == 0 &&
184 			   dst[i].off == 0) {
185 			was_ld_map = false;
186 			dst[i].imm = 0;
187 		} else {
188 			was_ld_map = false;
189 		}
190 	}
191 
192 	psize = bpf_prog_insn_size(fp);
193 	memset(&raw[psize], 0, raw_size - psize);
194 	raw[psize++] = 0x80;
195 
196 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
197 	blocks = bsize / SHA_MESSAGE_BYTES;
198 	todo   = raw;
199 	if (bsize - psize >= sizeof(__be64)) {
200 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
201 	} else {
202 		bits = (__be64 *)(todo + bsize + bits_offset);
203 		blocks++;
204 	}
205 	*bits = cpu_to_be64((psize - 1) << 3);
206 
207 	while (blocks--) {
208 		sha_transform(digest, todo, ws);
209 		todo += SHA_MESSAGE_BYTES;
210 	}
211 
212 	result = (__force __be32 *)digest;
213 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
214 		result[i] = cpu_to_be32(digest[i]);
215 	memcpy(fp->tag, result, sizeof(fp->tag));
216 
217 	vfree(raw);
218 	return 0;
219 }
220 
221 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
222 {
223 	struct bpf_insn *insn = prog->insnsi;
224 	u32 i, insn_cnt = prog->len;
225 	bool pseudo_call;
226 	u8 code;
227 	int off;
228 
229 	for (i = 0; i < insn_cnt; i++, insn++) {
230 		code = insn->code;
231 		if (BPF_CLASS(code) != BPF_JMP)
232 			continue;
233 		if (BPF_OP(code) == BPF_EXIT)
234 			continue;
235 		if (BPF_OP(code) == BPF_CALL) {
236 			if (insn->src_reg == BPF_PSEUDO_CALL)
237 				pseudo_call = true;
238 			else
239 				continue;
240 		} else {
241 			pseudo_call = false;
242 		}
243 		off = pseudo_call ? insn->imm : insn->off;
244 
245 		/* Adjust offset of jmps if we cross boundaries. */
246 		if (i < pos && i + off + 1 > pos)
247 			off += delta;
248 		else if (i > pos + delta && i + off + 1 <= pos + delta)
249 			off -= delta;
250 
251 		if (pseudo_call)
252 			insn->imm = off;
253 		else
254 			insn->off = off;
255 	}
256 }
257 
258 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
259 				       const struct bpf_insn *patch, u32 len)
260 {
261 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
262 	struct bpf_prog *prog_adj;
263 
264 	/* Since our patchlet doesn't expand the image, we're done. */
265 	if (insn_delta == 0) {
266 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
267 		return prog;
268 	}
269 
270 	insn_adj_cnt = prog->len + insn_delta;
271 
272 	/* Several new instructions need to be inserted. Make room
273 	 * for them. Likely, there's no need for a new allocation as
274 	 * last page could have large enough tailroom.
275 	 */
276 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
277 				    GFP_USER);
278 	if (!prog_adj)
279 		return NULL;
280 
281 	prog_adj->len = insn_adj_cnt;
282 
283 	/* Patching happens in 3 steps:
284 	 *
285 	 * 1) Move over tail of insnsi from next instruction onwards,
286 	 *    so we can patch the single target insn with one or more
287 	 *    new ones (patching is always from 1 to n insns, n > 0).
288 	 * 2) Inject new instructions at the target location.
289 	 * 3) Adjust branch offsets if necessary.
290 	 */
291 	insn_rest = insn_adj_cnt - off - len;
292 
293 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
294 		sizeof(*patch) * insn_rest);
295 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
296 
297 	bpf_adj_branches(prog_adj, off, insn_delta);
298 
299 	return prog_adj;
300 }
301 
302 #ifdef CONFIG_BPF_JIT
303 static __always_inline void
304 bpf_get_prog_addr_region(const struct bpf_prog *prog,
305 			 unsigned long *symbol_start,
306 			 unsigned long *symbol_end)
307 {
308 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
309 	unsigned long addr = (unsigned long)hdr;
310 
311 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
312 
313 	*symbol_start = addr;
314 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
315 }
316 
317 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
318 {
319 	const char *end = sym + KSYM_NAME_LEN;
320 
321 	BUILD_BUG_ON(sizeof("bpf_prog_") +
322 		     sizeof(prog->tag) * 2 +
323 		     /* name has been null terminated.
324 		      * We should need +1 for the '_' preceding
325 		      * the name.  However, the null character
326 		      * is double counted between the name and the
327 		      * sizeof("bpf_prog_") above, so we omit
328 		      * the +1 here.
329 		      */
330 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
331 
332 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
333 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
334 	if (prog->aux->name[0])
335 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
336 	else
337 		*sym = 0;
338 }
339 
340 static __always_inline unsigned long
341 bpf_get_prog_addr_start(struct latch_tree_node *n)
342 {
343 	unsigned long symbol_start, symbol_end;
344 	const struct bpf_prog_aux *aux;
345 
346 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
347 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
348 
349 	return symbol_start;
350 }
351 
352 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
353 					  struct latch_tree_node *b)
354 {
355 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
356 }
357 
358 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
359 {
360 	unsigned long val = (unsigned long)key;
361 	unsigned long symbol_start, symbol_end;
362 	const struct bpf_prog_aux *aux;
363 
364 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
365 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
366 
367 	if (val < symbol_start)
368 		return -1;
369 	if (val >= symbol_end)
370 		return  1;
371 
372 	return 0;
373 }
374 
375 static const struct latch_tree_ops bpf_tree_ops = {
376 	.less	= bpf_tree_less,
377 	.comp	= bpf_tree_comp,
378 };
379 
380 static DEFINE_SPINLOCK(bpf_lock);
381 static LIST_HEAD(bpf_kallsyms);
382 static struct latch_tree_root bpf_tree __cacheline_aligned;
383 
384 int bpf_jit_kallsyms __read_mostly;
385 
386 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
387 {
388 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
389 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
390 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
391 }
392 
393 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
394 {
395 	if (list_empty(&aux->ksym_lnode))
396 		return;
397 
398 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
399 	list_del_rcu(&aux->ksym_lnode);
400 }
401 
402 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
403 {
404 	return fp->jited && !bpf_prog_was_classic(fp);
405 }
406 
407 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
408 {
409 	return list_empty(&fp->aux->ksym_lnode) ||
410 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
411 }
412 
413 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
414 {
415 	if (!bpf_prog_kallsyms_candidate(fp) ||
416 	    !capable(CAP_SYS_ADMIN))
417 		return;
418 
419 	spin_lock_bh(&bpf_lock);
420 	bpf_prog_ksym_node_add(fp->aux);
421 	spin_unlock_bh(&bpf_lock);
422 }
423 
424 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
425 {
426 	if (!bpf_prog_kallsyms_candidate(fp))
427 		return;
428 
429 	spin_lock_bh(&bpf_lock);
430 	bpf_prog_ksym_node_del(fp->aux);
431 	spin_unlock_bh(&bpf_lock);
432 }
433 
434 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
435 {
436 	struct latch_tree_node *n;
437 
438 	if (!bpf_jit_kallsyms_enabled())
439 		return NULL;
440 
441 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
442 	return n ?
443 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
444 	       NULL;
445 }
446 
447 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
448 				 unsigned long *off, char *sym)
449 {
450 	unsigned long symbol_start, symbol_end;
451 	struct bpf_prog *prog;
452 	char *ret = NULL;
453 
454 	rcu_read_lock();
455 	prog = bpf_prog_kallsyms_find(addr);
456 	if (prog) {
457 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
458 		bpf_get_prog_name(prog, sym);
459 
460 		ret = sym;
461 		if (size)
462 			*size = symbol_end - symbol_start;
463 		if (off)
464 			*off  = addr - symbol_start;
465 	}
466 	rcu_read_unlock();
467 
468 	return ret;
469 }
470 
471 bool is_bpf_text_address(unsigned long addr)
472 {
473 	bool ret;
474 
475 	rcu_read_lock();
476 	ret = bpf_prog_kallsyms_find(addr) != NULL;
477 	rcu_read_unlock();
478 
479 	return ret;
480 }
481 
482 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
483 		    char *sym)
484 {
485 	unsigned long symbol_start, symbol_end;
486 	struct bpf_prog_aux *aux;
487 	unsigned int it = 0;
488 	int ret = -ERANGE;
489 
490 	if (!bpf_jit_kallsyms_enabled())
491 		return ret;
492 
493 	rcu_read_lock();
494 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
495 		if (it++ != symnum)
496 			continue;
497 
498 		bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
499 		bpf_get_prog_name(aux->prog, sym);
500 
501 		*value = symbol_start;
502 		*type  = BPF_SYM_ELF_TYPE;
503 
504 		ret = 0;
505 		break;
506 	}
507 	rcu_read_unlock();
508 
509 	return ret;
510 }
511 
512 struct bpf_binary_header *
513 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
514 		     unsigned int alignment,
515 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
516 {
517 	struct bpf_binary_header *hdr;
518 	unsigned int size, hole, start;
519 
520 	/* Most of BPF filters are really small, but if some of them
521 	 * fill a page, allow at least 128 extra bytes to insert a
522 	 * random section of illegal instructions.
523 	 */
524 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
525 	hdr = module_alloc(size);
526 	if (hdr == NULL)
527 		return NULL;
528 
529 	/* Fill space with illegal/arch-dep instructions. */
530 	bpf_fill_ill_insns(hdr, size);
531 
532 	hdr->pages = size / PAGE_SIZE;
533 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
534 		     PAGE_SIZE - sizeof(*hdr));
535 	start = (get_random_int() % hole) & ~(alignment - 1);
536 
537 	/* Leave a random number of instructions before BPF code. */
538 	*image_ptr = &hdr->image[start];
539 
540 	return hdr;
541 }
542 
543 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
544 {
545 	module_memfree(hdr);
546 }
547 
548 /* This symbol is only overridden by archs that have different
549  * requirements than the usual eBPF JITs, f.e. when they only
550  * implement cBPF JIT, do not set images read-only, etc.
551  */
552 void __weak bpf_jit_free(struct bpf_prog *fp)
553 {
554 	if (fp->jited) {
555 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
556 
557 		bpf_jit_binary_unlock_ro(hdr);
558 		bpf_jit_binary_free(hdr);
559 
560 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
561 	}
562 
563 	bpf_prog_unlock_free(fp);
564 }
565 
566 int bpf_jit_harden __read_mostly;
567 
568 static int bpf_jit_blind_insn(const struct bpf_insn *from,
569 			      const struct bpf_insn *aux,
570 			      struct bpf_insn *to_buff)
571 {
572 	struct bpf_insn *to = to_buff;
573 	u32 imm_rnd = get_random_int();
574 	s16 off;
575 
576 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
577 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
578 
579 	if (from->imm == 0 &&
580 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
581 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
582 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
583 		goto out;
584 	}
585 
586 	switch (from->code) {
587 	case BPF_ALU | BPF_ADD | BPF_K:
588 	case BPF_ALU | BPF_SUB | BPF_K:
589 	case BPF_ALU | BPF_AND | BPF_K:
590 	case BPF_ALU | BPF_OR  | BPF_K:
591 	case BPF_ALU | BPF_XOR | BPF_K:
592 	case BPF_ALU | BPF_MUL | BPF_K:
593 	case BPF_ALU | BPF_MOV | BPF_K:
594 	case BPF_ALU | BPF_DIV | BPF_K:
595 	case BPF_ALU | BPF_MOD | BPF_K:
596 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
597 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
598 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
599 		break;
600 
601 	case BPF_ALU64 | BPF_ADD | BPF_K:
602 	case BPF_ALU64 | BPF_SUB | BPF_K:
603 	case BPF_ALU64 | BPF_AND | BPF_K:
604 	case BPF_ALU64 | BPF_OR  | BPF_K:
605 	case BPF_ALU64 | BPF_XOR | BPF_K:
606 	case BPF_ALU64 | BPF_MUL | BPF_K:
607 	case BPF_ALU64 | BPF_MOV | BPF_K:
608 	case BPF_ALU64 | BPF_DIV | BPF_K:
609 	case BPF_ALU64 | BPF_MOD | BPF_K:
610 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
611 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
612 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
613 		break;
614 
615 	case BPF_JMP | BPF_JEQ  | BPF_K:
616 	case BPF_JMP | BPF_JNE  | BPF_K:
617 	case BPF_JMP | BPF_JGT  | BPF_K:
618 	case BPF_JMP | BPF_JLT  | BPF_K:
619 	case BPF_JMP | BPF_JGE  | BPF_K:
620 	case BPF_JMP | BPF_JLE  | BPF_K:
621 	case BPF_JMP | BPF_JSGT | BPF_K:
622 	case BPF_JMP | BPF_JSLT | BPF_K:
623 	case BPF_JMP | BPF_JSGE | BPF_K:
624 	case BPF_JMP | BPF_JSLE | BPF_K:
625 	case BPF_JMP | BPF_JSET | BPF_K:
626 		/* Accommodate for extra offset in case of a backjump. */
627 		off = from->off;
628 		if (off < 0)
629 			off -= 2;
630 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
631 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
632 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
633 		break;
634 
635 	case BPF_LD | BPF_ABS | BPF_W:
636 	case BPF_LD | BPF_ABS | BPF_H:
637 	case BPF_LD | BPF_ABS | BPF_B:
638 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
639 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
640 		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
641 		break;
642 
643 	case BPF_LD | BPF_IND | BPF_W:
644 	case BPF_LD | BPF_IND | BPF_H:
645 	case BPF_LD | BPF_IND | BPF_B:
646 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
647 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
648 		*to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
649 		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
650 		break;
651 
652 	case BPF_LD | BPF_IMM | BPF_DW:
653 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
654 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
655 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
656 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
657 		break;
658 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
659 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
660 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
661 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
662 		break;
663 
664 	case BPF_ST | BPF_MEM | BPF_DW:
665 	case BPF_ST | BPF_MEM | BPF_W:
666 	case BPF_ST | BPF_MEM | BPF_H:
667 	case BPF_ST | BPF_MEM | BPF_B:
668 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
669 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
670 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
671 		break;
672 	}
673 out:
674 	return to - to_buff;
675 }
676 
677 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
678 					      gfp_t gfp_extra_flags)
679 {
680 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
681 	struct bpf_prog *fp;
682 
683 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
684 	if (fp != NULL) {
685 		/* aux->prog still points to the fp_other one, so
686 		 * when promoting the clone to the real program,
687 		 * this still needs to be adapted.
688 		 */
689 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
690 	}
691 
692 	return fp;
693 }
694 
695 static void bpf_prog_clone_free(struct bpf_prog *fp)
696 {
697 	/* aux was stolen by the other clone, so we cannot free
698 	 * it from this path! It will be freed eventually by the
699 	 * other program on release.
700 	 *
701 	 * At this point, we don't need a deferred release since
702 	 * clone is guaranteed to not be locked.
703 	 */
704 	fp->aux = NULL;
705 	__bpf_prog_free(fp);
706 }
707 
708 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
709 {
710 	/* We have to repoint aux->prog to self, as we don't
711 	 * know whether fp here is the clone or the original.
712 	 */
713 	fp->aux->prog = fp;
714 	bpf_prog_clone_free(fp_other);
715 }
716 
717 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
718 {
719 	struct bpf_insn insn_buff[16], aux[2];
720 	struct bpf_prog *clone, *tmp;
721 	int insn_delta, insn_cnt;
722 	struct bpf_insn *insn;
723 	int i, rewritten;
724 
725 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
726 		return prog;
727 
728 	clone = bpf_prog_clone_create(prog, GFP_USER);
729 	if (!clone)
730 		return ERR_PTR(-ENOMEM);
731 
732 	insn_cnt = clone->len;
733 	insn = clone->insnsi;
734 
735 	for (i = 0; i < insn_cnt; i++, insn++) {
736 		/* We temporarily need to hold the original ld64 insn
737 		 * so that we can still access the first part in the
738 		 * second blinding run.
739 		 */
740 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
741 		    insn[1].code == 0)
742 			memcpy(aux, insn, sizeof(aux));
743 
744 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
745 		if (!rewritten)
746 			continue;
747 
748 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
749 		if (!tmp) {
750 			/* Patching may have repointed aux->prog during
751 			 * realloc from the original one, so we need to
752 			 * fix it up here on error.
753 			 */
754 			bpf_jit_prog_release_other(prog, clone);
755 			return ERR_PTR(-ENOMEM);
756 		}
757 
758 		clone = tmp;
759 		insn_delta = rewritten - 1;
760 
761 		/* Walk new program and skip insns we just inserted. */
762 		insn = clone->insnsi + i + insn_delta;
763 		insn_cnt += insn_delta;
764 		i        += insn_delta;
765 	}
766 
767 	clone->blinded = 1;
768 	return clone;
769 }
770 #endif /* CONFIG_BPF_JIT */
771 
772 /* Base function for offset calculation. Needs to go into .text section,
773  * therefore keeping it non-static as well; will also be used by JITs
774  * anyway later on, so do not let the compiler omit it. This also needs
775  * to go into kallsyms for correlation from e.g. bpftool, so naming
776  * must not change.
777  */
778 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
779 {
780 	return 0;
781 }
782 EXPORT_SYMBOL_GPL(__bpf_call_base);
783 
784 /**
785  *	__bpf_prog_run - run eBPF program on a given context
786  *	@ctx: is the data we are operating on
787  *	@insn: is the array of eBPF instructions
788  *
789  * Decode and execute eBPF instructions.
790  */
791 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
792 {
793 	u64 tmp;
794 	static const void *jumptable[256] = {
795 		[0 ... 255] = &&default_label,
796 		/* Now overwrite non-defaults ... */
797 		/* 32 bit ALU operations */
798 		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
799 		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
800 		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
801 		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
802 		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
803 		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
804 		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
805 		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
806 		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
807 		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
808 		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
809 		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
810 		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
811 		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
812 		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
813 		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
814 		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
815 		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
816 		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
817 		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
818 		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
819 		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
820 		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
821 		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
822 		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
823 		/* 64 bit ALU operations */
824 		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
825 		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
826 		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
827 		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
828 		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
829 		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
830 		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
831 		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
832 		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
833 		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
834 		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
835 		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
836 		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
837 		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
838 		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
839 		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
840 		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
841 		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
842 		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
843 		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
844 		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
845 		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
846 		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
847 		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
848 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
849 		/* Call instruction */
850 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
851 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
852 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
853 		/* Jumps */
854 		[BPF_JMP | BPF_JA] = &&JMP_JA,
855 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
856 		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
857 		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
858 		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
859 		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
860 		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
861 		[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
862 		[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
863 		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
864 		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
865 		[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
866 		[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
867 		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
868 		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
869 		[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
870 		[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
871 		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
872 		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
873 		[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
874 		[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
875 		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
876 		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
877 		/* Program return */
878 		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
879 		/* Store instructions */
880 		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
881 		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
882 		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
883 		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
884 		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
885 		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
886 		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
887 		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
888 		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
889 		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
890 		/* Load instructions */
891 		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
892 		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
893 		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
894 		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
895 		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
896 		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
897 		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
898 		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
899 		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
900 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
901 		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
902 	};
903 	u32 tail_call_cnt = 0;
904 	void *ptr;
905 	int off;
906 
907 #define CONT	 ({ insn++; goto select_insn; })
908 #define CONT_JMP ({ insn++; goto select_insn; })
909 
910 select_insn:
911 	goto *jumptable[insn->code];
912 
913 	/* ALU */
914 #define ALU(OPCODE, OP)			\
915 	ALU64_##OPCODE##_X:		\
916 		DST = DST OP SRC;	\
917 		CONT;			\
918 	ALU_##OPCODE##_X:		\
919 		DST = (u32) DST OP (u32) SRC;	\
920 		CONT;			\
921 	ALU64_##OPCODE##_K:		\
922 		DST = DST OP IMM;		\
923 		CONT;			\
924 	ALU_##OPCODE##_K:		\
925 		DST = (u32) DST OP (u32) IMM;	\
926 		CONT;
927 
928 	ALU(ADD,  +)
929 	ALU(SUB,  -)
930 	ALU(AND,  &)
931 	ALU(OR,   |)
932 	ALU(LSH, <<)
933 	ALU(RSH, >>)
934 	ALU(XOR,  ^)
935 	ALU(MUL,  *)
936 #undef ALU
937 	ALU_NEG:
938 		DST = (u32) -DST;
939 		CONT;
940 	ALU64_NEG:
941 		DST = -DST;
942 		CONT;
943 	ALU_MOV_X:
944 		DST = (u32) SRC;
945 		CONT;
946 	ALU_MOV_K:
947 		DST = (u32) IMM;
948 		CONT;
949 	ALU64_MOV_X:
950 		DST = SRC;
951 		CONT;
952 	ALU64_MOV_K:
953 		DST = IMM;
954 		CONT;
955 	LD_IMM_DW:
956 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
957 		insn++;
958 		CONT;
959 	ALU64_ARSH_X:
960 		(*(s64 *) &DST) >>= SRC;
961 		CONT;
962 	ALU64_ARSH_K:
963 		(*(s64 *) &DST) >>= IMM;
964 		CONT;
965 	ALU64_MOD_X:
966 		if (unlikely(SRC == 0))
967 			return 0;
968 		div64_u64_rem(DST, SRC, &tmp);
969 		DST = tmp;
970 		CONT;
971 	ALU_MOD_X:
972 		if (unlikely(SRC == 0))
973 			return 0;
974 		tmp = (u32) DST;
975 		DST = do_div(tmp, (u32) SRC);
976 		CONT;
977 	ALU64_MOD_K:
978 		div64_u64_rem(DST, IMM, &tmp);
979 		DST = tmp;
980 		CONT;
981 	ALU_MOD_K:
982 		tmp = (u32) DST;
983 		DST = do_div(tmp, (u32) IMM);
984 		CONT;
985 	ALU64_DIV_X:
986 		if (unlikely(SRC == 0))
987 			return 0;
988 		DST = div64_u64(DST, SRC);
989 		CONT;
990 	ALU_DIV_X:
991 		if (unlikely(SRC == 0))
992 			return 0;
993 		tmp = (u32) DST;
994 		do_div(tmp, (u32) SRC);
995 		DST = (u32) tmp;
996 		CONT;
997 	ALU64_DIV_K:
998 		DST = div64_u64(DST, IMM);
999 		CONT;
1000 	ALU_DIV_K:
1001 		tmp = (u32) DST;
1002 		do_div(tmp, (u32) IMM);
1003 		DST = (u32) tmp;
1004 		CONT;
1005 	ALU_END_TO_BE:
1006 		switch (IMM) {
1007 		case 16:
1008 			DST = (__force u16) cpu_to_be16(DST);
1009 			break;
1010 		case 32:
1011 			DST = (__force u32) cpu_to_be32(DST);
1012 			break;
1013 		case 64:
1014 			DST = (__force u64) cpu_to_be64(DST);
1015 			break;
1016 		}
1017 		CONT;
1018 	ALU_END_TO_LE:
1019 		switch (IMM) {
1020 		case 16:
1021 			DST = (__force u16) cpu_to_le16(DST);
1022 			break;
1023 		case 32:
1024 			DST = (__force u32) cpu_to_le32(DST);
1025 			break;
1026 		case 64:
1027 			DST = (__force u64) cpu_to_le64(DST);
1028 			break;
1029 		}
1030 		CONT;
1031 
1032 	/* CALL */
1033 	JMP_CALL:
1034 		/* Function call scratches BPF_R1-BPF_R5 registers,
1035 		 * preserves BPF_R6-BPF_R9, and stores return value
1036 		 * into BPF_R0.
1037 		 */
1038 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1039 						       BPF_R4, BPF_R5);
1040 		CONT;
1041 
1042 	JMP_CALL_ARGS:
1043 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1044 							    BPF_R3, BPF_R4,
1045 							    BPF_R5,
1046 							    insn + insn->off + 1);
1047 		CONT;
1048 
1049 	JMP_TAIL_CALL: {
1050 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1051 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1052 		struct bpf_prog *prog;
1053 		u32 index = BPF_R3;
1054 
1055 		if (unlikely(index >= array->map.max_entries))
1056 			goto out;
1057 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1058 			goto out;
1059 
1060 		tail_call_cnt++;
1061 
1062 		prog = READ_ONCE(array->ptrs[index]);
1063 		if (!prog)
1064 			goto out;
1065 
1066 		/* ARG1 at this point is guaranteed to point to CTX from
1067 		 * the verifier side due to the fact that the tail call is
1068 		 * handeled like a helper, that is, bpf_tail_call_proto,
1069 		 * where arg1_type is ARG_PTR_TO_CTX.
1070 		 */
1071 		insn = prog->insnsi;
1072 		goto select_insn;
1073 out:
1074 		CONT;
1075 	}
1076 	/* JMP */
1077 	JMP_JA:
1078 		insn += insn->off;
1079 		CONT;
1080 	JMP_JEQ_X:
1081 		if (DST == SRC) {
1082 			insn += insn->off;
1083 			CONT_JMP;
1084 		}
1085 		CONT;
1086 	JMP_JEQ_K:
1087 		if (DST == IMM) {
1088 			insn += insn->off;
1089 			CONT_JMP;
1090 		}
1091 		CONT;
1092 	JMP_JNE_X:
1093 		if (DST != SRC) {
1094 			insn += insn->off;
1095 			CONT_JMP;
1096 		}
1097 		CONT;
1098 	JMP_JNE_K:
1099 		if (DST != IMM) {
1100 			insn += insn->off;
1101 			CONT_JMP;
1102 		}
1103 		CONT;
1104 	JMP_JGT_X:
1105 		if (DST > SRC) {
1106 			insn += insn->off;
1107 			CONT_JMP;
1108 		}
1109 		CONT;
1110 	JMP_JGT_K:
1111 		if (DST > IMM) {
1112 			insn += insn->off;
1113 			CONT_JMP;
1114 		}
1115 		CONT;
1116 	JMP_JLT_X:
1117 		if (DST < SRC) {
1118 			insn += insn->off;
1119 			CONT_JMP;
1120 		}
1121 		CONT;
1122 	JMP_JLT_K:
1123 		if (DST < IMM) {
1124 			insn += insn->off;
1125 			CONT_JMP;
1126 		}
1127 		CONT;
1128 	JMP_JGE_X:
1129 		if (DST >= SRC) {
1130 			insn += insn->off;
1131 			CONT_JMP;
1132 		}
1133 		CONT;
1134 	JMP_JGE_K:
1135 		if (DST >= IMM) {
1136 			insn += insn->off;
1137 			CONT_JMP;
1138 		}
1139 		CONT;
1140 	JMP_JLE_X:
1141 		if (DST <= SRC) {
1142 			insn += insn->off;
1143 			CONT_JMP;
1144 		}
1145 		CONT;
1146 	JMP_JLE_K:
1147 		if (DST <= IMM) {
1148 			insn += insn->off;
1149 			CONT_JMP;
1150 		}
1151 		CONT;
1152 	JMP_JSGT_X:
1153 		if (((s64) DST) > ((s64) SRC)) {
1154 			insn += insn->off;
1155 			CONT_JMP;
1156 		}
1157 		CONT;
1158 	JMP_JSGT_K:
1159 		if (((s64) DST) > ((s64) IMM)) {
1160 			insn += insn->off;
1161 			CONT_JMP;
1162 		}
1163 		CONT;
1164 	JMP_JSLT_X:
1165 		if (((s64) DST) < ((s64) SRC)) {
1166 			insn += insn->off;
1167 			CONT_JMP;
1168 		}
1169 		CONT;
1170 	JMP_JSLT_K:
1171 		if (((s64) DST) < ((s64) IMM)) {
1172 			insn += insn->off;
1173 			CONT_JMP;
1174 		}
1175 		CONT;
1176 	JMP_JSGE_X:
1177 		if (((s64) DST) >= ((s64) SRC)) {
1178 			insn += insn->off;
1179 			CONT_JMP;
1180 		}
1181 		CONT;
1182 	JMP_JSGE_K:
1183 		if (((s64) DST) >= ((s64) IMM)) {
1184 			insn += insn->off;
1185 			CONT_JMP;
1186 		}
1187 		CONT;
1188 	JMP_JSLE_X:
1189 		if (((s64) DST) <= ((s64) SRC)) {
1190 			insn += insn->off;
1191 			CONT_JMP;
1192 		}
1193 		CONT;
1194 	JMP_JSLE_K:
1195 		if (((s64) DST) <= ((s64) IMM)) {
1196 			insn += insn->off;
1197 			CONT_JMP;
1198 		}
1199 		CONT;
1200 	JMP_JSET_X:
1201 		if (DST & SRC) {
1202 			insn += insn->off;
1203 			CONT_JMP;
1204 		}
1205 		CONT;
1206 	JMP_JSET_K:
1207 		if (DST & IMM) {
1208 			insn += insn->off;
1209 			CONT_JMP;
1210 		}
1211 		CONT;
1212 	JMP_EXIT:
1213 		return BPF_R0;
1214 
1215 	/* STX and ST and LDX*/
1216 #define LDST(SIZEOP, SIZE)						\
1217 	STX_MEM_##SIZEOP:						\
1218 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1219 		CONT;							\
1220 	ST_MEM_##SIZEOP:						\
1221 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1222 		CONT;							\
1223 	LDX_MEM_##SIZEOP:						\
1224 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1225 		CONT;
1226 
1227 	LDST(B,   u8)
1228 	LDST(H,  u16)
1229 	LDST(W,  u32)
1230 	LDST(DW, u64)
1231 #undef LDST
1232 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1233 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1234 			   (DST + insn->off));
1235 		CONT;
1236 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1237 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1238 			     (DST + insn->off));
1239 		CONT;
1240 	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1241 		off = IMM;
1242 load_word:
1243 		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1244 		 * appearing in the programs where ctx == skb
1245 		 * (see may_access_skb() in the verifier). All programs
1246 		 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1247 		 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1248 		 * verifier will check that BPF_R6 == ctx.
1249 		 *
1250 		 * BPF_ABS and BPF_IND are wrappers of function calls,
1251 		 * so they scratch BPF_R1-BPF_R5 registers, preserve
1252 		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1253 		 *
1254 		 * Implicit input:
1255 		 *   ctx == skb == BPF_R6 == CTX
1256 		 *
1257 		 * Explicit input:
1258 		 *   SRC == any register
1259 		 *   IMM == 32-bit immediate
1260 		 *
1261 		 * Output:
1262 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1263 		 */
1264 
1265 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1266 		if (likely(ptr != NULL)) {
1267 			BPF_R0 = get_unaligned_be32(ptr);
1268 			CONT;
1269 		}
1270 
1271 		return 0;
1272 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1273 		off = IMM;
1274 load_half:
1275 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1276 		if (likely(ptr != NULL)) {
1277 			BPF_R0 = get_unaligned_be16(ptr);
1278 			CONT;
1279 		}
1280 
1281 		return 0;
1282 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1283 		off = IMM;
1284 load_byte:
1285 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1286 		if (likely(ptr != NULL)) {
1287 			BPF_R0 = *(u8 *)ptr;
1288 			CONT;
1289 		}
1290 
1291 		return 0;
1292 	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1293 		off = IMM + SRC;
1294 		goto load_word;
1295 	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1296 		off = IMM + SRC;
1297 		goto load_half;
1298 	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1299 		off = IMM + SRC;
1300 		goto load_byte;
1301 
1302 	default_label:
1303 		/* If we ever reach this, we have a bug somewhere. */
1304 		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1305 		return 0;
1306 }
1307 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1308 
1309 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1310 #define DEFINE_BPF_PROG_RUN(stack_size) \
1311 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1312 { \
1313 	u64 stack[stack_size / sizeof(u64)]; \
1314 	u64 regs[MAX_BPF_REG]; \
1315 \
1316 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1317 	ARG1 = (u64) (unsigned long) ctx; \
1318 	return ___bpf_prog_run(regs, insn, stack); \
1319 }
1320 
1321 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1322 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1323 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1324 				      const struct bpf_insn *insn) \
1325 { \
1326 	u64 stack[stack_size / sizeof(u64)]; \
1327 	u64 regs[MAX_BPF_REG]; \
1328 \
1329 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1330 	BPF_R1 = r1; \
1331 	BPF_R2 = r2; \
1332 	BPF_R3 = r3; \
1333 	BPF_R4 = r4; \
1334 	BPF_R5 = r5; \
1335 	return ___bpf_prog_run(regs, insn, stack); \
1336 }
1337 
1338 #define EVAL1(FN, X) FN(X)
1339 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1340 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1341 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1342 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1343 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1344 
1345 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1346 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1347 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1348 
1349 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1350 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1351 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1352 
1353 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1354 
1355 static unsigned int (*interpreters[])(const void *ctx,
1356 				      const struct bpf_insn *insn) = {
1357 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1358 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1359 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1360 };
1361 #undef PROG_NAME_LIST
1362 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1363 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1364 				  const struct bpf_insn *insn) = {
1365 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1366 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1367 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1368 };
1369 #undef PROG_NAME_LIST
1370 
1371 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1372 {
1373 	stack_depth = max_t(u32, stack_depth, 1);
1374 	insn->off = (s16) insn->imm;
1375 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1376 		__bpf_call_base_args;
1377 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1378 }
1379 
1380 bool bpf_prog_array_compatible(struct bpf_array *array,
1381 			       const struct bpf_prog *fp)
1382 {
1383 	if (fp->kprobe_override)
1384 		return false;
1385 
1386 	if (!array->owner_prog_type) {
1387 		/* There's no owner yet where we could check for
1388 		 * compatibility.
1389 		 */
1390 		array->owner_prog_type = fp->type;
1391 		array->owner_jited = fp->jited;
1392 
1393 		return true;
1394 	}
1395 
1396 	return array->owner_prog_type == fp->type &&
1397 	       array->owner_jited == fp->jited;
1398 }
1399 
1400 static int bpf_check_tail_call(const struct bpf_prog *fp)
1401 {
1402 	struct bpf_prog_aux *aux = fp->aux;
1403 	int i;
1404 
1405 	for (i = 0; i < aux->used_map_cnt; i++) {
1406 		struct bpf_map *map = aux->used_maps[i];
1407 		struct bpf_array *array;
1408 
1409 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1410 			continue;
1411 
1412 		array = container_of(map, struct bpf_array, map);
1413 		if (!bpf_prog_array_compatible(array, fp))
1414 			return -EINVAL;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 /**
1421  *	bpf_prog_select_runtime - select exec runtime for BPF program
1422  *	@fp: bpf_prog populated with internal BPF program
1423  *	@err: pointer to error variable
1424  *
1425  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1426  * The BPF program will be executed via BPF_PROG_RUN() macro.
1427  */
1428 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1429 {
1430 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1431 
1432 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1433 
1434 	/* eBPF JITs can rewrite the program in case constant
1435 	 * blinding is active. However, in case of error during
1436 	 * blinding, bpf_int_jit_compile() must always return a
1437 	 * valid program, which in this case would simply not
1438 	 * be JITed, but falls back to the interpreter.
1439 	 */
1440 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1441 		fp = bpf_int_jit_compile(fp);
1442 	} else {
1443 		*err = bpf_prog_offload_compile(fp);
1444 		if (*err)
1445 			return fp;
1446 	}
1447 	bpf_prog_lock_ro(fp);
1448 
1449 	/* The tail call compatibility check can only be done at
1450 	 * this late stage as we need to determine, if we deal
1451 	 * with JITed or non JITed program concatenations and not
1452 	 * all eBPF JITs might immediately support all features.
1453 	 */
1454 	*err = bpf_check_tail_call(fp);
1455 
1456 	return fp;
1457 }
1458 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1459 
1460 static unsigned int __bpf_prog_ret1(const void *ctx,
1461 				    const struct bpf_insn *insn)
1462 {
1463 	return 1;
1464 }
1465 
1466 static struct bpf_prog_dummy {
1467 	struct bpf_prog prog;
1468 } dummy_bpf_prog = {
1469 	.prog = {
1470 		.bpf_func = __bpf_prog_ret1,
1471 	},
1472 };
1473 
1474 /* to avoid allocating empty bpf_prog_array for cgroups that
1475  * don't have bpf program attached use one global 'empty_prog_array'
1476  * It will not be modified the caller of bpf_prog_array_alloc()
1477  * (since caller requested prog_cnt == 0)
1478  * that pointer should be 'freed' by bpf_prog_array_free()
1479  */
1480 static struct {
1481 	struct bpf_prog_array hdr;
1482 	struct bpf_prog *null_prog;
1483 } empty_prog_array = {
1484 	.null_prog = NULL,
1485 };
1486 
1487 struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1488 {
1489 	if (prog_cnt)
1490 		return kzalloc(sizeof(struct bpf_prog_array) +
1491 			       sizeof(struct bpf_prog *) * (prog_cnt + 1),
1492 			       flags);
1493 
1494 	return &empty_prog_array.hdr;
1495 }
1496 
1497 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1498 {
1499 	if (!progs ||
1500 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1501 		return;
1502 	kfree_rcu(progs, rcu);
1503 }
1504 
1505 int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1506 {
1507 	struct bpf_prog **prog;
1508 	u32 cnt = 0;
1509 
1510 	rcu_read_lock();
1511 	prog = rcu_dereference(progs)->progs;
1512 	for (; *prog; prog++)
1513 		if (*prog != &dummy_bpf_prog.prog)
1514 			cnt++;
1515 	rcu_read_unlock();
1516 	return cnt;
1517 }
1518 
1519 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1520 				__u32 __user *prog_ids, u32 cnt)
1521 {
1522 	struct bpf_prog **prog;
1523 	u32 i = 0, id;
1524 
1525 	rcu_read_lock();
1526 	prog = rcu_dereference(progs)->progs;
1527 	for (; *prog; prog++) {
1528 		if (*prog == &dummy_bpf_prog.prog)
1529 			continue;
1530 		id = (*prog)->aux->id;
1531 		if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
1532 			rcu_read_unlock();
1533 			return -EFAULT;
1534 		}
1535 		if (++i == cnt) {
1536 			prog++;
1537 			break;
1538 		}
1539 	}
1540 	rcu_read_unlock();
1541 	if (*prog)
1542 		return -ENOSPC;
1543 	return 0;
1544 }
1545 
1546 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1547 				struct bpf_prog *old_prog)
1548 {
1549 	struct bpf_prog **prog = progs->progs;
1550 
1551 	for (; *prog; prog++)
1552 		if (*prog == old_prog) {
1553 			WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1554 			break;
1555 		}
1556 }
1557 
1558 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1559 			struct bpf_prog *exclude_prog,
1560 			struct bpf_prog *include_prog,
1561 			struct bpf_prog_array **new_array)
1562 {
1563 	int new_prog_cnt, carry_prog_cnt = 0;
1564 	struct bpf_prog **existing_prog;
1565 	struct bpf_prog_array *array;
1566 	int new_prog_idx = 0;
1567 
1568 	/* Figure out how many existing progs we need to carry over to
1569 	 * the new array.
1570 	 */
1571 	if (old_array) {
1572 		existing_prog = old_array->progs;
1573 		for (; *existing_prog; existing_prog++) {
1574 			if (*existing_prog != exclude_prog &&
1575 			    *existing_prog != &dummy_bpf_prog.prog)
1576 				carry_prog_cnt++;
1577 			if (*existing_prog == include_prog)
1578 				return -EEXIST;
1579 		}
1580 	}
1581 
1582 	/* How many progs (not NULL) will be in the new array? */
1583 	new_prog_cnt = carry_prog_cnt;
1584 	if (include_prog)
1585 		new_prog_cnt += 1;
1586 
1587 	/* Do we have any prog (not NULL) in the new array? */
1588 	if (!new_prog_cnt) {
1589 		*new_array = NULL;
1590 		return 0;
1591 	}
1592 
1593 	/* +1 as the end of prog_array is marked with NULL */
1594 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1595 	if (!array)
1596 		return -ENOMEM;
1597 
1598 	/* Fill in the new prog array */
1599 	if (carry_prog_cnt) {
1600 		existing_prog = old_array->progs;
1601 		for (; *existing_prog; existing_prog++)
1602 			if (*existing_prog != exclude_prog &&
1603 			    *existing_prog != &dummy_bpf_prog.prog)
1604 				array->progs[new_prog_idx++] = *existing_prog;
1605 	}
1606 	if (include_prog)
1607 		array->progs[new_prog_idx++] = include_prog;
1608 	array->progs[new_prog_idx] = NULL;
1609 	*new_array = array;
1610 	return 0;
1611 }
1612 
1613 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1614 			     __u32 __user *prog_ids, u32 request_cnt,
1615 			     __u32 __user *prog_cnt)
1616 {
1617 	u32 cnt = 0;
1618 
1619 	if (array)
1620 		cnt = bpf_prog_array_length(array);
1621 
1622 	if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
1623 		return -EFAULT;
1624 
1625 	/* return early if user requested only program count or nothing to copy */
1626 	if (!request_cnt || !cnt)
1627 		return 0;
1628 
1629 	return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
1630 }
1631 
1632 static void bpf_prog_free_deferred(struct work_struct *work)
1633 {
1634 	struct bpf_prog_aux *aux;
1635 	int i;
1636 
1637 	aux = container_of(work, struct bpf_prog_aux, work);
1638 	if (bpf_prog_is_dev_bound(aux))
1639 		bpf_prog_offload_destroy(aux->prog);
1640 	for (i = 0; i < aux->func_cnt; i++)
1641 		bpf_jit_free(aux->func[i]);
1642 	if (aux->func_cnt) {
1643 		kfree(aux->func);
1644 		bpf_prog_unlock_free(aux->prog);
1645 	} else {
1646 		bpf_jit_free(aux->prog);
1647 	}
1648 }
1649 
1650 /* Free internal BPF program */
1651 void bpf_prog_free(struct bpf_prog *fp)
1652 {
1653 	struct bpf_prog_aux *aux = fp->aux;
1654 
1655 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1656 	schedule_work(&aux->work);
1657 }
1658 EXPORT_SYMBOL_GPL(bpf_prog_free);
1659 
1660 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1661 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1662 
1663 void bpf_user_rnd_init_once(void)
1664 {
1665 	prandom_init_once(&bpf_user_rnd_state);
1666 }
1667 
1668 BPF_CALL_0(bpf_user_rnd_u32)
1669 {
1670 	/* Should someone ever have the rather unwise idea to use some
1671 	 * of the registers passed into this function, then note that
1672 	 * this function is called from native eBPF and classic-to-eBPF
1673 	 * transformations. Register assignments from both sides are
1674 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1675 	 */
1676 	struct rnd_state *state;
1677 	u32 res;
1678 
1679 	state = &get_cpu_var(bpf_user_rnd_state);
1680 	res = prandom_u32_state(state);
1681 	put_cpu_var(bpf_user_rnd_state);
1682 
1683 	return res;
1684 }
1685 
1686 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1687 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1688 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1689 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1690 
1691 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1692 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1693 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1694 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1695 
1696 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1697 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1698 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1699 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1700 
1701 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1702 {
1703 	return NULL;
1704 }
1705 
1706 u64 __weak
1707 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1708 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1709 {
1710 	return -ENOTSUPP;
1711 }
1712 
1713 /* Always built-in helper functions. */
1714 const struct bpf_func_proto bpf_tail_call_proto = {
1715 	.func		= NULL,
1716 	.gpl_only	= false,
1717 	.ret_type	= RET_VOID,
1718 	.arg1_type	= ARG_PTR_TO_CTX,
1719 	.arg2_type	= ARG_CONST_MAP_PTR,
1720 	.arg3_type	= ARG_ANYTHING,
1721 };
1722 
1723 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1724  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1725  * eBPF and implicitly also cBPF can get JITed!
1726  */
1727 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1728 {
1729 	return prog;
1730 }
1731 
1732 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1733  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1734  */
1735 void __weak bpf_jit_compile(struct bpf_prog *prog)
1736 {
1737 }
1738 
1739 bool __weak bpf_helper_changes_pkt_data(void *func)
1740 {
1741 	return false;
1742 }
1743 
1744 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1745  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1746  */
1747 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1748 			 int len)
1749 {
1750 	return -EFAULT;
1751 }
1752 
1753 /* All definitions of tracepoints related to BPF. */
1754 #define CREATE_TRACE_POINTS
1755 #include <linux/bpf_trace.h>
1756 
1757 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1758 
1759 /* These are only used within the BPF_SYSCALL code */
1760 #ifdef CONFIG_BPF_SYSCALL
1761 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1762 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
1763 #endif
1764