xref: /openbmc/linux/kernel/bpf/core.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f5bffecdSAlexei Starovoitov /*
3f5bffecdSAlexei Starovoitov  * Linux Socket Filter - Kernel level socket filtering
4f5bffecdSAlexei Starovoitov  *
5f5bffecdSAlexei Starovoitov  * Based on the design of the Berkeley Packet Filter. The new
6f5bffecdSAlexei Starovoitov  * internal format has been designed by PLUMgrid:
7f5bffecdSAlexei Starovoitov  *
8f5bffecdSAlexei Starovoitov  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9f5bffecdSAlexei Starovoitov  *
10f5bffecdSAlexei Starovoitov  * Authors:
11f5bffecdSAlexei Starovoitov  *
12f5bffecdSAlexei Starovoitov  *	Jay Schulist <jschlst@samba.org>
13f5bffecdSAlexei Starovoitov  *	Alexei Starovoitov <ast@plumgrid.com>
14f5bffecdSAlexei Starovoitov  *	Daniel Borkmann <dborkman@redhat.com>
15f5bffecdSAlexei Starovoitov  *
16f5bffecdSAlexei Starovoitov  * Andi Kleen - Fix a few bad bugs and races.
174df95ff4SAlexei Starovoitov  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18f5bffecdSAlexei Starovoitov  */
19738cbe72SDaniel Borkmann 
20838e9690SYonghong Song #include <uapi/linux/btf.h>
21f5bffecdSAlexei Starovoitov #include <linux/filter.h>
22f5bffecdSAlexei Starovoitov #include <linux/skbuff.h>
2360a3b225SDaniel Borkmann #include <linux/vmalloc.h>
24738cbe72SDaniel Borkmann #include <linux/random.h>
25738cbe72SDaniel Borkmann #include <linux/moduleloader.h>
2609756af4SAlexei Starovoitov #include <linux/bpf.h>
27838e9690SYonghong Song #include <linux/btf.h>
2800089c04SJulien Thierry #include <linux/objtool.h>
2974451e66SDaniel Borkmann #include <linux/rbtree_latch.h>
3074451e66SDaniel Borkmann #include <linux/kallsyms.h>
3174451e66SDaniel Borkmann #include <linux/rcupdate.h>
32c195651eSYonghong Song #include <linux/perf_event.h>
333dec541bSAlexei Starovoitov #include <linux/extable.h>
34b7b3fc8dSIlya Leoshkevich #include <linux/log2.h>
352357672cSKumar Kartikeya Dwivedi #include <linux/bpf_verifier.h>
36ef078600SSong Liu #include <linux/nodemask.h>
37f3dd0c53SLinus Torvalds #include <linux/nospec.h>
38958cf2e2SKumar Kartikeya Dwivedi #include <linux/bpf_mem_alloc.h>
39bf396508SYafang Shao #include <linux/memcontrol.h>
40f5e81d11SDaniel Borkmann 
41f5e81d11SDaniel Borkmann #include <asm/barrier.h>
423324b584SDaniel Borkmann #include <asm/unaligned.h>
433324b584SDaniel Borkmann 
44f5bffecdSAlexei Starovoitov /* Registers */
45f5bffecdSAlexei Starovoitov #define BPF_R0	regs[BPF_REG_0]
46f5bffecdSAlexei Starovoitov #define BPF_R1	regs[BPF_REG_1]
47f5bffecdSAlexei Starovoitov #define BPF_R2	regs[BPF_REG_2]
48f5bffecdSAlexei Starovoitov #define BPF_R3	regs[BPF_REG_3]
49f5bffecdSAlexei Starovoitov #define BPF_R4	regs[BPF_REG_4]
50f5bffecdSAlexei Starovoitov #define BPF_R5	regs[BPF_REG_5]
51f5bffecdSAlexei Starovoitov #define BPF_R6	regs[BPF_REG_6]
52f5bffecdSAlexei Starovoitov #define BPF_R7	regs[BPF_REG_7]
53f5bffecdSAlexei Starovoitov #define BPF_R8	regs[BPF_REG_8]
54f5bffecdSAlexei Starovoitov #define BPF_R9	regs[BPF_REG_9]
55f5bffecdSAlexei Starovoitov #define BPF_R10	regs[BPF_REG_10]
56f5bffecdSAlexei Starovoitov 
57f5bffecdSAlexei Starovoitov /* Named registers */
58f5bffecdSAlexei Starovoitov #define DST	regs[insn->dst_reg]
59f5bffecdSAlexei Starovoitov #define SRC	regs[insn->src_reg]
60f5bffecdSAlexei Starovoitov #define FP	regs[BPF_REG_FP]
61144cd91cSDaniel Borkmann #define AX	regs[BPF_REG_AX]
62f5bffecdSAlexei Starovoitov #define ARG1	regs[BPF_REG_ARG1]
63f5bffecdSAlexei Starovoitov #define CTX	regs[BPF_REG_CTX]
648100928cSYonghong Song #define OFF	insn->off
65f5bffecdSAlexei Starovoitov #define IMM	insn->imm
66f5bffecdSAlexei Starovoitov 
67958cf2e2SKumar Kartikeya Dwivedi struct bpf_mem_alloc bpf_global_ma;
68958cf2e2SKumar Kartikeya Dwivedi bool bpf_global_ma_set;
69958cf2e2SKumar Kartikeya Dwivedi 
70f5bffecdSAlexei Starovoitov /* No hurry in this branch
71f5bffecdSAlexei Starovoitov  *
72f5bffecdSAlexei Starovoitov  * Exported for the bpf jit load helper.
73f5bffecdSAlexei Starovoitov  */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)74f5bffecdSAlexei Starovoitov void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
75f5bffecdSAlexei Starovoitov {
76f5bffecdSAlexei Starovoitov 	u8 *ptr = NULL;
77f5bffecdSAlexei Starovoitov 
780326195fSEric Dumazet 	if (k >= SKF_NET_OFF) {
79f5bffecdSAlexei Starovoitov 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
800326195fSEric Dumazet 	} else if (k >= SKF_LL_OFF) {
810326195fSEric Dumazet 		if (unlikely(!skb_mac_header_was_set(skb)))
820326195fSEric Dumazet 			return NULL;
83f5bffecdSAlexei Starovoitov 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
840326195fSEric Dumazet 	}
85f5bffecdSAlexei Starovoitov 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
86f5bffecdSAlexei Starovoitov 		return ptr;
87f5bffecdSAlexei Starovoitov 
88f5bffecdSAlexei Starovoitov 	return NULL;
89f5bffecdSAlexei Starovoitov }
90f5bffecdSAlexei Starovoitov 
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)91492ecee8SAlexei Starovoitov struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
9260a3b225SDaniel Borkmann {
93bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
9409756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux;
9560a3b225SDaniel Borkmann 	struct bpf_prog *fp;
9660a3b225SDaniel Borkmann 
9760a3b225SDaniel Borkmann 	size = round_up(size, PAGE_SIZE);
9888dca4caSChristoph Hellwig 	fp = __vmalloc(size, gfp_flags);
9960a3b225SDaniel Borkmann 	if (fp == NULL)
10060a3b225SDaniel Borkmann 		return NULL;
10160a3b225SDaniel Borkmann 
102bf396508SYafang Shao 	aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
10309756af4SAlexei Starovoitov 	if (aux == NULL) {
10460a3b225SDaniel Borkmann 		vfree(fp);
10560a3b225SDaniel Borkmann 		return NULL;
10660a3b225SDaniel Borkmann 	}
107bf396508SYafang Shao 	fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
108ca06f55bSAlexei Starovoitov 	if (!fp->active) {
109ca06f55bSAlexei Starovoitov 		vfree(fp);
110ca06f55bSAlexei Starovoitov 		kfree(aux);
111ca06f55bSAlexei Starovoitov 		return NULL;
112ca06f55bSAlexei Starovoitov 	}
11360a3b225SDaniel Borkmann 
11460a3b225SDaniel Borkmann 	fp->pages = size / PAGE_SIZE;
11509756af4SAlexei Starovoitov 	fp->aux = aux;
116e9d8afa9SDaniel Borkmann 	fp->aux->prog = fp;
11760b58afcSAlexei Starovoitov 	fp->jit_requested = ebpf_jit_enabled();
118d2a3b7c5SHou Tao 	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
119c0e19f2cSStanislav Fomichev #ifdef CONFIG_CGROUP_BPF
120c0e19f2cSStanislav Fomichev 	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
121c0e19f2cSStanislav Fomichev #endif
12260a3b225SDaniel Borkmann 
123ecb60d1cSJiri Olsa 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
124984fe94fSYiFei Zhu 	mutex_init(&fp->aux->used_maps_mutex);
1253aac1eadSToke Høiland-Jørgensen 	mutex_init(&fp->aux->dst_mutex);
12674451e66SDaniel Borkmann 
12760a3b225SDaniel Borkmann 	return fp;
12860a3b225SDaniel Borkmann }
129492ecee8SAlexei Starovoitov 
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)130492ecee8SAlexei Starovoitov struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
131492ecee8SAlexei Starovoitov {
132bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
133492ecee8SAlexei Starovoitov 	struct bpf_prog *prog;
1344b911304SEric Dumazet 	int cpu;
135492ecee8SAlexei Starovoitov 
136492ecee8SAlexei Starovoitov 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
137492ecee8SAlexei Starovoitov 	if (!prog)
138492ecee8SAlexei Starovoitov 		return NULL;
139492ecee8SAlexei Starovoitov 
140700d4796SAlexei Starovoitov 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
141700d4796SAlexei Starovoitov 	if (!prog->stats) {
142ca06f55bSAlexei Starovoitov 		free_percpu(prog->active);
143492ecee8SAlexei Starovoitov 		kfree(prog->aux);
144492ecee8SAlexei Starovoitov 		vfree(prog);
145492ecee8SAlexei Starovoitov 		return NULL;
146492ecee8SAlexei Starovoitov 	}
147492ecee8SAlexei Starovoitov 
1484b911304SEric Dumazet 	for_each_possible_cpu(cpu) {
1494b911304SEric Dumazet 		struct bpf_prog_stats *pstats;
1504b911304SEric Dumazet 
151700d4796SAlexei Starovoitov 		pstats = per_cpu_ptr(prog->stats, cpu);
1524b911304SEric Dumazet 		u64_stats_init(&pstats->syncp);
1534b911304SEric Dumazet 	}
154492ecee8SAlexei Starovoitov 	return prog;
155492ecee8SAlexei Starovoitov }
15660a3b225SDaniel Borkmann EXPORT_SYMBOL_GPL(bpf_prog_alloc);
15760a3b225SDaniel Borkmann 
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)158c454a46bSMartin KaFai Lau int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
159c454a46bSMartin KaFai Lau {
160c454a46bSMartin KaFai Lau 	if (!prog->aux->nr_linfo || !prog->jit_requested)
161c454a46bSMartin KaFai Lau 		return 0;
162c454a46bSMartin KaFai Lau 
163e16301fbSMartin KaFai Lau 	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
164c454a46bSMartin KaFai Lau 					  sizeof(*prog->aux->jited_linfo),
165bf396508SYafang Shao 					  bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
166c454a46bSMartin KaFai Lau 	if (!prog->aux->jited_linfo)
167c454a46bSMartin KaFai Lau 		return -ENOMEM;
168c454a46bSMartin KaFai Lau 
169c454a46bSMartin KaFai Lau 	return 0;
170c454a46bSMartin KaFai Lau }
171c454a46bSMartin KaFai Lau 
bpf_prog_jit_attempt_done(struct bpf_prog * prog)172e16301fbSMartin KaFai Lau void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
173c454a46bSMartin KaFai Lau {
174e16301fbSMartin KaFai Lau 	if (prog->aux->jited_linfo &&
175e16301fbSMartin KaFai Lau 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
176e16301fbSMartin KaFai Lau 		kvfree(prog->aux->jited_linfo);
177c454a46bSMartin KaFai Lau 		prog->aux->jited_linfo = NULL;
178c454a46bSMartin KaFai Lau 	}
179e6ac2450SMartin KaFai Lau 
180e6ac2450SMartin KaFai Lau 	kfree(prog->aux->kfunc_tab);
181e6ac2450SMartin KaFai Lau 	prog->aux->kfunc_tab = NULL;
182c454a46bSMartin KaFai Lau }
183c454a46bSMartin KaFai Lau 
184c454a46bSMartin KaFai Lau /* The jit engine is responsible to provide an array
185c454a46bSMartin KaFai Lau  * for insn_off to the jited_off mapping (insn_to_jit_off).
186c454a46bSMartin KaFai Lau  *
187c454a46bSMartin KaFai Lau  * The idx to this array is the insn_off.  Hence, the insn_off
188c454a46bSMartin KaFai Lau  * here is relative to the prog itself instead of the main prog.
189c454a46bSMartin KaFai Lau  * This array has one entry for each xlated bpf insn.
190c454a46bSMartin KaFai Lau  *
191cc168554SPu Lehui  * jited_off is the byte off to the end of the jited insn.
192c454a46bSMartin KaFai Lau  *
193c454a46bSMartin KaFai Lau  * Hence, with
194c454a46bSMartin KaFai Lau  * insn_start:
195c454a46bSMartin KaFai Lau  *      The first bpf insn off of the prog.  The insn off
196c454a46bSMartin KaFai Lau  *      here is relative to the main prog.
197c454a46bSMartin KaFai Lau  *      e.g. if prog is a subprog, insn_start > 0
198c454a46bSMartin KaFai Lau  * linfo_idx:
199c454a46bSMartin KaFai Lau  *      The prog's idx to prog->aux->linfo and jited_linfo
200c454a46bSMartin KaFai Lau  *
201c454a46bSMartin KaFai Lau  * jited_linfo[linfo_idx] = prog->bpf_func
202c454a46bSMartin KaFai Lau  *
203c454a46bSMartin KaFai Lau  * For i > linfo_idx,
204c454a46bSMartin KaFai Lau  *
205c454a46bSMartin KaFai Lau  * jited_linfo[i] = prog->bpf_func +
206c454a46bSMartin KaFai Lau  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
207c454a46bSMartin KaFai Lau  */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)208c454a46bSMartin KaFai Lau void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
209c454a46bSMartin KaFai Lau 			       const u32 *insn_to_jit_off)
210c454a46bSMartin KaFai Lau {
211c454a46bSMartin KaFai Lau 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
212c454a46bSMartin KaFai Lau 	const struct bpf_line_info *linfo;
213c454a46bSMartin KaFai Lau 	void **jited_linfo;
214c454a46bSMartin KaFai Lau 
215c454a46bSMartin KaFai Lau 	if (!prog->aux->jited_linfo)
216c454a46bSMartin KaFai Lau 		/* Userspace did not provide linfo */
217c454a46bSMartin KaFai Lau 		return;
218c454a46bSMartin KaFai Lau 
219c454a46bSMartin KaFai Lau 	linfo_idx = prog->aux->linfo_idx;
220c454a46bSMartin KaFai Lau 	linfo = &prog->aux->linfo[linfo_idx];
221c454a46bSMartin KaFai Lau 	insn_start = linfo[0].insn_off;
222c454a46bSMartin KaFai Lau 	insn_end = insn_start + prog->len;
223c454a46bSMartin KaFai Lau 
224c454a46bSMartin KaFai Lau 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
225c454a46bSMartin KaFai Lau 	jited_linfo[0] = prog->bpf_func;
226c454a46bSMartin KaFai Lau 
227c454a46bSMartin KaFai Lau 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
228c454a46bSMartin KaFai Lau 
229c454a46bSMartin KaFai Lau 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
230c454a46bSMartin KaFai Lau 		/* The verifier ensures that linfo[i].insn_off is
231c454a46bSMartin KaFai Lau 		 * strictly increasing
232c454a46bSMartin KaFai Lau 		 */
233c454a46bSMartin KaFai Lau 		jited_linfo[i] = prog->bpf_func +
234c454a46bSMartin KaFai Lau 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
235c454a46bSMartin KaFai Lau }
236c454a46bSMartin KaFai Lau 
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)23760a3b225SDaniel Borkmann struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
23860a3b225SDaniel Borkmann 				  gfp_t gfp_extra_flags)
23960a3b225SDaniel Borkmann {
240bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
24160a3b225SDaniel Borkmann 	struct bpf_prog *fp;
2423ac1f01bSRoman Gushchin 	u32 pages;
24360a3b225SDaniel Borkmann 
24460a3b225SDaniel Borkmann 	size = round_up(size, PAGE_SIZE);
2455ccb071eSDaniel Borkmann 	pages = size / PAGE_SIZE;
2465ccb071eSDaniel Borkmann 	if (pages <= fp_old->pages)
24760a3b225SDaniel Borkmann 		return fp_old;
24860a3b225SDaniel Borkmann 
24988dca4caSChristoph Hellwig 	fp = __vmalloc(size, gfp_flags);
2503ac1f01bSRoman Gushchin 	if (fp) {
25160a3b225SDaniel Borkmann 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
2525ccb071eSDaniel Borkmann 		fp->pages = pages;
253e9d8afa9SDaniel Borkmann 		fp->aux->prog = fp;
25460a3b225SDaniel Borkmann 
25509756af4SAlexei Starovoitov 		/* We keep fp->aux from fp_old around in the new
25660a3b225SDaniel Borkmann 		 * reallocated structure.
25760a3b225SDaniel Borkmann 		 */
25809756af4SAlexei Starovoitov 		fp_old->aux = NULL;
2591336c662SAlexei Starovoitov 		fp_old->stats = NULL;
2601336c662SAlexei Starovoitov 		fp_old->active = NULL;
26160a3b225SDaniel Borkmann 		__bpf_prog_free(fp_old);
26260a3b225SDaniel Borkmann 	}
26360a3b225SDaniel Borkmann 
26460a3b225SDaniel Borkmann 	return fp;
26560a3b225SDaniel Borkmann }
26660a3b225SDaniel Borkmann 
__bpf_prog_free(struct bpf_prog * fp)26760a3b225SDaniel Borkmann void __bpf_prog_free(struct bpf_prog *fp)
26860a3b225SDaniel Borkmann {
269492ecee8SAlexei Starovoitov 	if (fp->aux) {
270984fe94fSYiFei Zhu 		mutex_destroy(&fp->aux->used_maps_mutex);
2713aac1eadSToke Høiland-Jørgensen 		mutex_destroy(&fp->aux->dst_mutex);
272a66886feSDaniel Borkmann 		kfree(fp->aux->poke_tab);
27309756af4SAlexei Starovoitov 		kfree(fp->aux);
274492ecee8SAlexei Starovoitov 	}
275700d4796SAlexei Starovoitov 	free_percpu(fp->stats);
276ca06f55bSAlexei Starovoitov 	free_percpu(fp->active);
27760a3b225SDaniel Borkmann 	vfree(fp);
27860a3b225SDaniel Borkmann }
27960a3b225SDaniel Borkmann 
bpf_prog_calc_tag(struct bpf_prog * fp)280f1f7714eSDaniel Borkmann int bpf_prog_calc_tag(struct bpf_prog *fp)
2817bd509e3SDaniel Borkmann {
2826b0b0fa2SEric Biggers 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
283f1f7714eSDaniel Borkmann 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
2846b0b0fa2SEric Biggers 	u32 digest[SHA1_DIGEST_WORDS];
2856b0b0fa2SEric Biggers 	u32 ws[SHA1_WORKSPACE_WORDS];
2867bd509e3SDaniel Borkmann 	u32 i, bsize, psize, blocks;
287aafe6ae9SDaniel Borkmann 	struct bpf_insn *dst;
2887bd509e3SDaniel Borkmann 	bool was_ld_map;
289aafe6ae9SDaniel Borkmann 	u8 *raw, *todo;
2907bd509e3SDaniel Borkmann 	__be32 *result;
2917bd509e3SDaniel Borkmann 	__be64 *bits;
2927bd509e3SDaniel Borkmann 
293aafe6ae9SDaniel Borkmann 	raw = vmalloc(raw_size);
294aafe6ae9SDaniel Borkmann 	if (!raw)
295aafe6ae9SDaniel Borkmann 		return -ENOMEM;
296aafe6ae9SDaniel Borkmann 
2976b0b0fa2SEric Biggers 	sha1_init(digest);
2987bd509e3SDaniel Borkmann 	memset(ws, 0, sizeof(ws));
2997bd509e3SDaniel Borkmann 
3007bd509e3SDaniel Borkmann 	/* We need to take out the map fd for the digest calculation
3017bd509e3SDaniel Borkmann 	 * since they are unstable from user space side.
3027bd509e3SDaniel Borkmann 	 */
303aafe6ae9SDaniel Borkmann 	dst = (void *)raw;
3047bd509e3SDaniel Borkmann 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
3057bd509e3SDaniel Borkmann 		dst[i] = fp->insnsi[i];
3067bd509e3SDaniel Borkmann 		if (!was_ld_map &&
3077bd509e3SDaniel Borkmann 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
308d8eca5bbSDaniel Borkmann 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
309d8eca5bbSDaniel Borkmann 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
3107bd509e3SDaniel Borkmann 			was_ld_map = true;
3117bd509e3SDaniel Borkmann 			dst[i].imm = 0;
3127bd509e3SDaniel Borkmann 		} else if (was_ld_map &&
3137bd509e3SDaniel Borkmann 			   dst[i].code == 0 &&
3147bd509e3SDaniel Borkmann 			   dst[i].dst_reg == 0 &&
3157bd509e3SDaniel Borkmann 			   dst[i].src_reg == 0 &&
3167bd509e3SDaniel Borkmann 			   dst[i].off == 0) {
3177bd509e3SDaniel Borkmann 			was_ld_map = false;
3187bd509e3SDaniel Borkmann 			dst[i].imm = 0;
3197bd509e3SDaniel Borkmann 		} else {
3207bd509e3SDaniel Borkmann 			was_ld_map = false;
3217bd509e3SDaniel Borkmann 		}
3227bd509e3SDaniel Borkmann 	}
3237bd509e3SDaniel Borkmann 
324aafe6ae9SDaniel Borkmann 	psize = bpf_prog_insn_size(fp);
325aafe6ae9SDaniel Borkmann 	memset(&raw[psize], 0, raw_size - psize);
3267bd509e3SDaniel Borkmann 	raw[psize++] = 0x80;
3277bd509e3SDaniel Borkmann 
3286b0b0fa2SEric Biggers 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
3296b0b0fa2SEric Biggers 	blocks = bsize / SHA1_BLOCK_SIZE;
330aafe6ae9SDaniel Borkmann 	todo   = raw;
3317bd509e3SDaniel Borkmann 	if (bsize - psize >= sizeof(__be64)) {
3327bd509e3SDaniel Borkmann 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
3337bd509e3SDaniel Borkmann 	} else {
3347bd509e3SDaniel Borkmann 		bits = (__be64 *)(todo + bsize + bits_offset);
3357bd509e3SDaniel Borkmann 		blocks++;
3367bd509e3SDaniel Borkmann 	}
3377bd509e3SDaniel Borkmann 	*bits = cpu_to_be64((psize - 1) << 3);
3387bd509e3SDaniel Borkmann 
3397bd509e3SDaniel Borkmann 	while (blocks--) {
3406b0b0fa2SEric Biggers 		sha1_transform(digest, todo, ws);
3416b0b0fa2SEric Biggers 		todo += SHA1_BLOCK_SIZE;
3427bd509e3SDaniel Borkmann 	}
3437bd509e3SDaniel Borkmann 
344f1f7714eSDaniel Borkmann 	result = (__force __be32 *)digest;
3456b0b0fa2SEric Biggers 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
346f1f7714eSDaniel Borkmann 		result[i] = cpu_to_be32(digest[i]);
347f1f7714eSDaniel Borkmann 	memcpy(fp->tag, result, sizeof(fp->tag));
348aafe6ae9SDaniel Borkmann 
349aafe6ae9SDaniel Borkmann 	vfree(raw);
350aafe6ae9SDaniel Borkmann 	return 0;
3517bd509e3SDaniel Borkmann }
3527bd509e3SDaniel Borkmann 
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)3532cbd95a5SJakub Kicinski static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
354af959b18SDaniel Borkmann 				s32 end_new, s32 curr, const bool probe_pass)
355c237ee5eSDaniel Borkmann {
356050fad7cSDaniel Borkmann 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
3572cbd95a5SJakub Kicinski 	s32 delta = end_new - end_old;
358050fad7cSDaniel Borkmann 	s64 imm = insn->imm;
359050fad7cSDaniel Borkmann 
3602cbd95a5SJakub Kicinski 	if (curr < pos && curr + imm + 1 >= end_old)
361050fad7cSDaniel Borkmann 		imm += delta;
3622cbd95a5SJakub Kicinski 	else if (curr >= end_new && curr + imm + 1 < end_new)
363050fad7cSDaniel Borkmann 		imm -= delta;
364050fad7cSDaniel Borkmann 	if (imm < imm_min || imm > imm_max)
365050fad7cSDaniel Borkmann 		return -ERANGE;
366050fad7cSDaniel Borkmann 	if (!probe_pass)
367050fad7cSDaniel Borkmann 		insn->imm = imm;
368050fad7cSDaniel Borkmann 	return 0;
369050fad7cSDaniel Borkmann }
370050fad7cSDaniel Borkmann 
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)3712cbd95a5SJakub Kicinski static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
372af959b18SDaniel Borkmann 				s32 end_new, s32 curr, const bool probe_pass)
373050fad7cSDaniel Borkmann {
37428b8ed4aSYonghong Song 	s64 off_min, off_max, off;
3752cbd95a5SJakub Kicinski 	s32 delta = end_new - end_old;
3764cd58e9aSYonghong Song 
37728b8ed4aSYonghong Song 	if (insn->code == (BPF_JMP32 | BPF_JA)) {
3784cd58e9aSYonghong Song 		off = insn->imm;
37928b8ed4aSYonghong Song 		off_min = S32_MIN;
38028b8ed4aSYonghong Song 		off_max = S32_MAX;
38128b8ed4aSYonghong Song 	} else {
3824cd58e9aSYonghong Song 		off = insn->off;
38328b8ed4aSYonghong Song 		off_min = S16_MIN;
38428b8ed4aSYonghong Song 		off_max = S16_MAX;
38528b8ed4aSYonghong Song 	}
386050fad7cSDaniel Borkmann 
3872cbd95a5SJakub Kicinski 	if (curr < pos && curr + off + 1 >= end_old)
388050fad7cSDaniel Borkmann 		off += delta;
3892cbd95a5SJakub Kicinski 	else if (curr >= end_new && curr + off + 1 < end_new)
390050fad7cSDaniel Borkmann 		off -= delta;
391050fad7cSDaniel Borkmann 	if (off < off_min || off > off_max)
392050fad7cSDaniel Borkmann 		return -ERANGE;
3934cd58e9aSYonghong Song 	if (!probe_pass) {
3944cd58e9aSYonghong Song 		if (insn->code == (BPF_JMP32 | BPF_JA))
3954cd58e9aSYonghong Song 			insn->imm = off;
3964cd58e9aSYonghong Song 		else
397050fad7cSDaniel Borkmann 			insn->off = off;
3984cd58e9aSYonghong Song 	}
399050fad7cSDaniel Borkmann 	return 0;
400050fad7cSDaniel Borkmann }
401050fad7cSDaniel Borkmann 
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)4022cbd95a5SJakub Kicinski static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
4032cbd95a5SJakub Kicinski 			    s32 end_new, const bool probe_pass)
404050fad7cSDaniel Borkmann {
4052cbd95a5SJakub Kicinski 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
406c237ee5eSDaniel Borkmann 	struct bpf_insn *insn = prog->insnsi;
407050fad7cSDaniel Borkmann 	int ret = 0;
408c237ee5eSDaniel Borkmann 
409c237ee5eSDaniel Borkmann 	for (i = 0; i < insn_cnt; i++, insn++) {
410050fad7cSDaniel Borkmann 		u8 code;
411050fad7cSDaniel Borkmann 
412050fad7cSDaniel Borkmann 		/* In the probing pass we still operate on the original,
413050fad7cSDaniel Borkmann 		 * unpatched image in order to check overflows before we
414050fad7cSDaniel Borkmann 		 * do any other adjustments. Therefore skip the patchlet.
415050fad7cSDaniel Borkmann 		 */
416050fad7cSDaniel Borkmann 		if (probe_pass && i == pos) {
4172cbd95a5SJakub Kicinski 			i = end_new;
4182cbd95a5SJakub Kicinski 			insn = prog->insnsi + end_old;
419050fad7cSDaniel Borkmann 		}
4203990ed4cSMartin KaFai Lau 		if (bpf_pseudo_func(insn)) {
4213990ed4cSMartin KaFai Lau 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
4223990ed4cSMartin KaFai Lau 						   end_new, i, probe_pass);
4233990ed4cSMartin KaFai Lau 			if (ret)
4243990ed4cSMartin KaFai Lau 				return ret;
4253990ed4cSMartin KaFai Lau 			continue;
4263990ed4cSMartin KaFai Lau 		}
4271ea47e01SAlexei Starovoitov 		code = insn->code;
428092ed096SJiong Wang 		if ((BPF_CLASS(code) != BPF_JMP &&
429092ed096SJiong Wang 		     BPF_CLASS(code) != BPF_JMP32) ||
430050fad7cSDaniel Borkmann 		    BPF_OP(code) == BPF_EXIT)
431c237ee5eSDaniel Borkmann 			continue;
432050fad7cSDaniel Borkmann 		/* Adjust offset of jmps if we cross patch boundaries. */
4331ea47e01SAlexei Starovoitov 		if (BPF_OP(code) == BPF_CALL) {
434050fad7cSDaniel Borkmann 			if (insn->src_reg != BPF_PSEUDO_CALL)
4351ea47e01SAlexei Starovoitov 				continue;
4362cbd95a5SJakub Kicinski 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
4372cbd95a5SJakub Kicinski 						   end_new, i, probe_pass);
4381ea47e01SAlexei Starovoitov 		} else {
4392cbd95a5SJakub Kicinski 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
4402cbd95a5SJakub Kicinski 						   end_new, i, probe_pass);
4411ea47e01SAlexei Starovoitov 		}
442050fad7cSDaniel Borkmann 		if (ret)
443050fad7cSDaniel Borkmann 			break;
444c237ee5eSDaniel Borkmann 	}
445050fad7cSDaniel Borkmann 
446050fad7cSDaniel Borkmann 	return ret;
447c237ee5eSDaniel Borkmann }
448c237ee5eSDaniel Borkmann 
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)449c454a46bSMartin KaFai Lau static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
450c454a46bSMartin KaFai Lau {
451c454a46bSMartin KaFai Lau 	struct bpf_line_info *linfo;
452c454a46bSMartin KaFai Lau 	u32 i, nr_linfo;
453c454a46bSMartin KaFai Lau 
454c454a46bSMartin KaFai Lau 	nr_linfo = prog->aux->nr_linfo;
455c454a46bSMartin KaFai Lau 	if (!nr_linfo || !delta)
456c454a46bSMartin KaFai Lau 		return;
457c454a46bSMartin KaFai Lau 
458c454a46bSMartin KaFai Lau 	linfo = prog->aux->linfo;
459c454a46bSMartin KaFai Lau 
460c454a46bSMartin KaFai Lau 	for (i = 0; i < nr_linfo; i++)
461c454a46bSMartin KaFai Lau 		if (off < linfo[i].insn_off)
462c454a46bSMartin KaFai Lau 			break;
463c454a46bSMartin KaFai Lau 
464c454a46bSMartin KaFai Lau 	/* Push all off < linfo[i].insn_off by delta */
465c454a46bSMartin KaFai Lau 	for (; i < nr_linfo; i++)
466c454a46bSMartin KaFai Lau 		linfo[i].insn_off += delta;
467c454a46bSMartin KaFai Lau }
468c454a46bSMartin KaFai Lau 
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)469c237ee5eSDaniel Borkmann struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
470c237ee5eSDaniel Borkmann 				       const struct bpf_insn *patch, u32 len)
471c237ee5eSDaniel Borkmann {
472c237ee5eSDaniel Borkmann 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
473050fad7cSDaniel Borkmann 	const u32 cnt_max = S16_MAX;
474c237ee5eSDaniel Borkmann 	struct bpf_prog *prog_adj;
4754f73379eSAlexei Starovoitov 	int err;
476c237ee5eSDaniel Borkmann 
477c237ee5eSDaniel Borkmann 	/* Since our patchlet doesn't expand the image, we're done. */
478c237ee5eSDaniel Borkmann 	if (insn_delta == 0) {
479c237ee5eSDaniel Borkmann 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
480c237ee5eSDaniel Borkmann 		return prog;
481c237ee5eSDaniel Borkmann 	}
482c237ee5eSDaniel Borkmann 
483c237ee5eSDaniel Borkmann 	insn_adj_cnt = prog->len + insn_delta;
484c237ee5eSDaniel Borkmann 
485050fad7cSDaniel Borkmann 	/* Reject anything that would potentially let the insn->off
486050fad7cSDaniel Borkmann 	 * target overflow when we have excessive program expansions.
487050fad7cSDaniel Borkmann 	 * We need to probe here before we do any reallocation where
488050fad7cSDaniel Borkmann 	 * we afterwards may not fail anymore.
489050fad7cSDaniel Borkmann 	 */
490050fad7cSDaniel Borkmann 	if (insn_adj_cnt > cnt_max &&
4914f73379eSAlexei Starovoitov 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
4924f73379eSAlexei Starovoitov 		return ERR_PTR(err);
493050fad7cSDaniel Borkmann 
494c237ee5eSDaniel Borkmann 	/* Several new instructions need to be inserted. Make room
495c237ee5eSDaniel Borkmann 	 * for them. Likely, there's no need for a new allocation as
496c237ee5eSDaniel Borkmann 	 * last page could have large enough tailroom.
497c237ee5eSDaniel Borkmann 	 */
498c237ee5eSDaniel Borkmann 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
499c237ee5eSDaniel Borkmann 				    GFP_USER);
500c237ee5eSDaniel Borkmann 	if (!prog_adj)
5014f73379eSAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
502c237ee5eSDaniel Borkmann 
503c237ee5eSDaniel Borkmann 	prog_adj->len = insn_adj_cnt;
504c237ee5eSDaniel Borkmann 
505c237ee5eSDaniel Borkmann 	/* Patching happens in 3 steps:
506c237ee5eSDaniel Borkmann 	 *
507c237ee5eSDaniel Borkmann 	 * 1) Move over tail of insnsi from next instruction onwards,
508c237ee5eSDaniel Borkmann 	 *    so we can patch the single target insn with one or more
509c237ee5eSDaniel Borkmann 	 *    new ones (patching is always from 1 to n insns, n > 0).
510c237ee5eSDaniel Borkmann 	 * 2) Inject new instructions at the target location.
511c237ee5eSDaniel Borkmann 	 * 3) Adjust branch offsets if necessary.
512c237ee5eSDaniel Borkmann 	 */
513c237ee5eSDaniel Borkmann 	insn_rest = insn_adj_cnt - off - len;
514c237ee5eSDaniel Borkmann 
515c237ee5eSDaniel Borkmann 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
516c237ee5eSDaniel Borkmann 		sizeof(*patch) * insn_rest);
517c237ee5eSDaniel Borkmann 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
518c237ee5eSDaniel Borkmann 
519050fad7cSDaniel Borkmann 	/* We are guaranteed to not fail at this point, otherwise
520050fad7cSDaniel Borkmann 	 * the ship has sailed to reverse to the original state. An
521050fad7cSDaniel Borkmann 	 * overflow cannot happen at this point.
522050fad7cSDaniel Borkmann 	 */
5232cbd95a5SJakub Kicinski 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
524c237ee5eSDaniel Borkmann 
525c454a46bSMartin KaFai Lau 	bpf_adj_linfo(prog_adj, off, insn_delta);
526c454a46bSMartin KaFai Lau 
527c237ee5eSDaniel Borkmann 	return prog_adj;
528c237ee5eSDaniel Borkmann }
529c237ee5eSDaniel Borkmann 
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)53052875a04SJakub Kicinski int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
53152875a04SJakub Kicinski {
532*f53b3731SAnton Protopopov 	int err;
533*f53b3731SAnton Protopopov 
53452875a04SJakub Kicinski 	/* Branch offsets can't overflow when program is shrinking, no need
53552875a04SJakub Kicinski 	 * to call bpf_adj_branches(..., true) here
53652875a04SJakub Kicinski 	 */
53752875a04SJakub Kicinski 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
53852875a04SJakub Kicinski 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
53952875a04SJakub Kicinski 	prog->len -= cnt;
54052875a04SJakub Kicinski 
541*f53b3731SAnton Protopopov 	err = bpf_adj_branches(prog, off, off + cnt, off, false);
542*f53b3731SAnton Protopopov 	WARN_ON_ONCE(err);
543*f53b3731SAnton Protopopov 	return err;
54452875a04SJakub Kicinski }
54552875a04SJakub Kicinski 
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)546cd7455f1SDaniel Borkmann static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
5477d1982b4SDaniel Borkmann {
5487d1982b4SDaniel Borkmann 	int i;
5497d1982b4SDaniel Borkmann 
5507d1982b4SDaniel Borkmann 	for (i = 0; i < fp->aux->func_cnt; i++)
5517d1982b4SDaniel Borkmann 		bpf_prog_kallsyms_del(fp->aux->func[i]);
5527d1982b4SDaniel Borkmann }
5537d1982b4SDaniel Borkmann 
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)5547d1982b4SDaniel Borkmann void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
5557d1982b4SDaniel Borkmann {
5567d1982b4SDaniel Borkmann 	bpf_prog_kallsyms_del_subprogs(fp);
5577d1982b4SDaniel Borkmann 	bpf_prog_kallsyms_del(fp);
5587d1982b4SDaniel Borkmann }
5597d1982b4SDaniel Borkmann 
560b954d834SDaniel Borkmann #ifdef CONFIG_BPF_JIT
561fa9dd599SDaniel Borkmann /* All BPF JIT sysctl knobs here. */
56281c22041SDaniel Borkmann int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
56381c22041SDaniel Borkmann int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
564fa9dd599SDaniel Borkmann int bpf_jit_harden   __read_mostly;
565fdadd049SDaniel Borkmann long bpf_jit_limit   __read_mostly;
566fadb7ff1SLorenz Bauer long bpf_jit_limit_max __read_mostly;
567fa9dd599SDaniel Borkmann 
568535911c8SJiri Olsa static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)569535911c8SJiri Olsa bpf_prog_ksym_set_addr(struct bpf_prog *prog)
57074451e66SDaniel Borkmann {
57174451e66SDaniel Borkmann 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
57274451e66SDaniel Borkmann 
573535911c8SJiri Olsa 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
574d00c6473SSong Liu 	prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
57574451e66SDaniel Borkmann }
57674451e66SDaniel Borkmann 
577bfea9a85SJiri Olsa static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)578bfea9a85SJiri Olsa bpf_prog_ksym_set_name(struct bpf_prog *prog)
57974451e66SDaniel Borkmann {
580bfea9a85SJiri Olsa 	char *sym = prog->aux->ksym.name;
581368211fbSMartin KaFai Lau 	const char *end = sym + KSYM_NAME_LEN;
582838e9690SYonghong Song 	const struct btf_type *type;
583838e9690SYonghong Song 	const char *func_name;
584368211fbSMartin KaFai Lau 
58574451e66SDaniel Borkmann 	BUILD_BUG_ON(sizeof("bpf_prog_") +
586368211fbSMartin KaFai Lau 		     sizeof(prog->tag) * 2 +
587368211fbSMartin KaFai Lau 		     /* name has been null terminated.
588368211fbSMartin KaFai Lau 		      * We should need +1 for the '_' preceding
589368211fbSMartin KaFai Lau 		      * the name.  However, the null character
590368211fbSMartin KaFai Lau 		      * is double counted between the name and the
591368211fbSMartin KaFai Lau 		      * sizeof("bpf_prog_") above, so we omit
592368211fbSMartin KaFai Lau 		      * the +1 here.
593368211fbSMartin KaFai Lau 		      */
594368211fbSMartin KaFai Lau 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
59574451e66SDaniel Borkmann 
59674451e66SDaniel Borkmann 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
59774451e66SDaniel Borkmann 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
598838e9690SYonghong Song 
599838e9690SYonghong Song 	/* prog->aux->name will be ignored if full btf name is available */
6007337224fSMartin KaFai Lau 	if (prog->aux->func_info_cnt) {
601ba64e7d8SYonghong Song 		type = btf_type_by_id(prog->aux->btf,
602ba64e7d8SYonghong Song 				      prog->aux->func_info[prog->aux->func_idx].type_id);
603838e9690SYonghong Song 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
604838e9690SYonghong Song 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
605838e9690SYonghong Song 		return;
606838e9690SYonghong Song 	}
607838e9690SYonghong Song 
608368211fbSMartin KaFai Lau 	if (prog->aux->name[0])
609368211fbSMartin KaFai Lau 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
610368211fbSMartin KaFai Lau 	else
61174451e66SDaniel Borkmann 		*sym = 0;
61274451e66SDaniel Borkmann }
61374451e66SDaniel Borkmann 
bpf_get_ksym_start(struct latch_tree_node * n)614ca4424c9SJiri Olsa static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
61574451e66SDaniel Borkmann {
616ca4424c9SJiri Olsa 	return container_of(n, struct bpf_ksym, tnode)->start;
61774451e66SDaniel Borkmann }
61874451e66SDaniel Borkmann 
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)61974451e66SDaniel Borkmann static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
62074451e66SDaniel Borkmann 					  struct latch_tree_node *b)
62174451e66SDaniel Borkmann {
622ca4424c9SJiri Olsa 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
62374451e66SDaniel Borkmann }
62474451e66SDaniel Borkmann 
bpf_tree_comp(void * key,struct latch_tree_node * n)62574451e66SDaniel Borkmann static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
62674451e66SDaniel Borkmann {
62774451e66SDaniel Borkmann 	unsigned long val = (unsigned long)key;
628ca4424c9SJiri Olsa 	const struct bpf_ksym *ksym;
62974451e66SDaniel Borkmann 
630ca4424c9SJiri Olsa 	ksym = container_of(n, struct bpf_ksym, tnode);
63174451e66SDaniel Borkmann 
632ca4424c9SJiri Olsa 	if (val < ksym->start)
63374451e66SDaniel Borkmann 		return -1;
634821a7e41SKumar Kartikeya Dwivedi 	/* Ensure that we detect return addresses as part of the program, when
635821a7e41SKumar Kartikeya Dwivedi 	 * the final instruction is a call for a program part of the stack
636821a7e41SKumar Kartikeya Dwivedi 	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
637821a7e41SKumar Kartikeya Dwivedi 	 */
638821a7e41SKumar Kartikeya Dwivedi 	if (val > ksym->end)
63974451e66SDaniel Borkmann 		return  1;
64074451e66SDaniel Borkmann 
64174451e66SDaniel Borkmann 	return 0;
64274451e66SDaniel Borkmann }
64374451e66SDaniel Borkmann 
64474451e66SDaniel Borkmann static const struct latch_tree_ops bpf_tree_ops = {
64574451e66SDaniel Borkmann 	.less	= bpf_tree_less,
64674451e66SDaniel Borkmann 	.comp	= bpf_tree_comp,
64774451e66SDaniel Borkmann };
64874451e66SDaniel Borkmann 
64974451e66SDaniel Borkmann static DEFINE_SPINLOCK(bpf_lock);
65074451e66SDaniel Borkmann static LIST_HEAD(bpf_kallsyms);
65174451e66SDaniel Borkmann static struct latch_tree_root bpf_tree __cacheline_aligned;
65274451e66SDaniel Borkmann 
bpf_ksym_add(struct bpf_ksym * ksym)653dba122fbSJiri Olsa void bpf_ksym_add(struct bpf_ksym *ksym)
65474451e66SDaniel Borkmann {
655dba122fbSJiri Olsa 	spin_lock_bh(&bpf_lock);
656dba122fbSJiri Olsa 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
657dba122fbSJiri Olsa 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
658dba122fbSJiri Olsa 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
659dba122fbSJiri Olsa 	spin_unlock_bh(&bpf_lock);
66074451e66SDaniel Borkmann }
66174451e66SDaniel Borkmann 
__bpf_ksym_del(struct bpf_ksym * ksym)662dba122fbSJiri Olsa static void __bpf_ksym_del(struct bpf_ksym *ksym)
66374451e66SDaniel Borkmann {
664dba122fbSJiri Olsa 	if (list_empty(&ksym->lnode))
66574451e66SDaniel Borkmann 		return;
66674451e66SDaniel Borkmann 
667dba122fbSJiri Olsa 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
668dba122fbSJiri Olsa 	list_del_rcu(&ksym->lnode);
669dba122fbSJiri Olsa }
670dba122fbSJiri Olsa 
bpf_ksym_del(struct bpf_ksym * ksym)671dba122fbSJiri Olsa void bpf_ksym_del(struct bpf_ksym *ksym)
672dba122fbSJiri Olsa {
673dba122fbSJiri Olsa 	spin_lock_bh(&bpf_lock);
674dba122fbSJiri Olsa 	__bpf_ksym_del(ksym);
675dba122fbSJiri Olsa 	spin_unlock_bh(&bpf_lock);
67674451e66SDaniel Borkmann }
67774451e66SDaniel Borkmann 
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)67874451e66SDaniel Borkmann static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
67974451e66SDaniel Borkmann {
68074451e66SDaniel Borkmann 	return fp->jited && !bpf_prog_was_classic(fp);
68174451e66SDaniel Borkmann }
68274451e66SDaniel Borkmann 
bpf_prog_kallsyms_add(struct bpf_prog * fp)68374451e66SDaniel Borkmann void bpf_prog_kallsyms_add(struct bpf_prog *fp)
68474451e66SDaniel Borkmann {
68574451e66SDaniel Borkmann 	if (!bpf_prog_kallsyms_candidate(fp) ||
6862c78ee89SAlexei Starovoitov 	    !bpf_capable())
68774451e66SDaniel Borkmann 		return;
68874451e66SDaniel Borkmann 
689535911c8SJiri Olsa 	bpf_prog_ksym_set_addr(fp);
690bfea9a85SJiri Olsa 	bpf_prog_ksym_set_name(fp);
691cbd76f8dSJiri Olsa 	fp->aux->ksym.prog = true;
692535911c8SJiri Olsa 
693dba122fbSJiri Olsa 	bpf_ksym_add(&fp->aux->ksym);
69474451e66SDaniel Borkmann }
69574451e66SDaniel Borkmann 
bpf_prog_kallsyms_del(struct bpf_prog * fp)69674451e66SDaniel Borkmann void bpf_prog_kallsyms_del(struct bpf_prog *fp)
69774451e66SDaniel Borkmann {
69874451e66SDaniel Borkmann 	if (!bpf_prog_kallsyms_candidate(fp))
69974451e66SDaniel Borkmann 		return;
70074451e66SDaniel Borkmann 
701dba122fbSJiri Olsa 	bpf_ksym_del(&fp->aux->ksym);
70274451e66SDaniel Borkmann }
70374451e66SDaniel Borkmann 
bpf_ksym_find(unsigned long addr)704eda0c929SJiri Olsa static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
705eda0c929SJiri Olsa {
706eda0c929SJiri Olsa 	struct latch_tree_node *n;
707eda0c929SJiri Olsa 
708eda0c929SJiri Olsa 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
709eda0c929SJiri Olsa 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
710eda0c929SJiri Olsa }
711eda0c929SJiri Olsa 
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)71274451e66SDaniel Borkmann const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
71374451e66SDaniel Borkmann 				 unsigned long *off, char *sym)
71474451e66SDaniel Borkmann {
715eda0c929SJiri Olsa 	struct bpf_ksym *ksym;
71674451e66SDaniel Borkmann 	char *ret = NULL;
71774451e66SDaniel Borkmann 
71874451e66SDaniel Borkmann 	rcu_read_lock();
719eda0c929SJiri Olsa 	ksym = bpf_ksym_find(addr);
720eda0c929SJiri Olsa 	if (ksym) {
721eda0c929SJiri Olsa 		unsigned long symbol_start = ksym->start;
722eda0c929SJiri Olsa 		unsigned long symbol_end = ksym->end;
723535911c8SJiri Olsa 
724eda0c929SJiri Olsa 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
72574451e66SDaniel Borkmann 
72674451e66SDaniel Borkmann 		ret = sym;
72774451e66SDaniel Borkmann 		if (size)
72874451e66SDaniel Borkmann 			*size = symbol_end - symbol_start;
72974451e66SDaniel Borkmann 		if (off)
73074451e66SDaniel Borkmann 			*off  = addr - symbol_start;
73174451e66SDaniel Borkmann 	}
73274451e66SDaniel Borkmann 	rcu_read_unlock();
73374451e66SDaniel Borkmann 
73474451e66SDaniel Borkmann 	return ret;
73574451e66SDaniel Borkmann }
73674451e66SDaniel Borkmann 
is_bpf_text_address(unsigned long addr)73774451e66SDaniel Borkmann bool is_bpf_text_address(unsigned long addr)
73874451e66SDaniel Borkmann {
73974451e66SDaniel Borkmann 	bool ret;
74074451e66SDaniel Borkmann 
74174451e66SDaniel Borkmann 	rcu_read_lock();
742eda0c929SJiri Olsa 	ret = bpf_ksym_find(addr) != NULL;
74374451e66SDaniel Borkmann 	rcu_read_unlock();
74474451e66SDaniel Borkmann 
74574451e66SDaniel Borkmann 	return ret;
74674451e66SDaniel Borkmann }
74774451e66SDaniel Borkmann 
bpf_prog_ksym_find(unsigned long addr)748cbd76f8dSJiri Olsa static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
749cbd76f8dSJiri Olsa {
750cbd76f8dSJiri Olsa 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
751cbd76f8dSJiri Olsa 
752cbd76f8dSJiri Olsa 	return ksym && ksym->prog ?
753cbd76f8dSJiri Olsa 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
754cbd76f8dSJiri Olsa 	       NULL;
755cbd76f8dSJiri Olsa }
756cbd76f8dSJiri Olsa 
search_bpf_extables(unsigned long addr)7573dec541bSAlexei Starovoitov const struct exception_table_entry *search_bpf_extables(unsigned long addr)
7583dec541bSAlexei Starovoitov {
7593dec541bSAlexei Starovoitov 	const struct exception_table_entry *e = NULL;
7603dec541bSAlexei Starovoitov 	struct bpf_prog *prog;
7613dec541bSAlexei Starovoitov 
7623dec541bSAlexei Starovoitov 	rcu_read_lock();
763cbd76f8dSJiri Olsa 	prog = bpf_prog_ksym_find(addr);
7643dec541bSAlexei Starovoitov 	if (!prog)
7653dec541bSAlexei Starovoitov 		goto out;
7663dec541bSAlexei Starovoitov 	if (!prog->aux->num_exentries)
7673dec541bSAlexei Starovoitov 		goto out;
7683dec541bSAlexei Starovoitov 
7693dec541bSAlexei Starovoitov 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
7703dec541bSAlexei Starovoitov out:
7713dec541bSAlexei Starovoitov 	rcu_read_unlock();
7723dec541bSAlexei Starovoitov 	return e;
7733dec541bSAlexei Starovoitov }
7743dec541bSAlexei Starovoitov 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)77574451e66SDaniel Borkmann int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
77674451e66SDaniel Borkmann 		    char *sym)
77774451e66SDaniel Borkmann {
778ecb60d1cSJiri Olsa 	struct bpf_ksym *ksym;
77974451e66SDaniel Borkmann 	unsigned int it = 0;
78074451e66SDaniel Borkmann 	int ret = -ERANGE;
78174451e66SDaniel Borkmann 
78274451e66SDaniel Borkmann 	if (!bpf_jit_kallsyms_enabled())
78374451e66SDaniel Borkmann 		return ret;
78474451e66SDaniel Borkmann 
78574451e66SDaniel Borkmann 	rcu_read_lock();
786ecb60d1cSJiri Olsa 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
78774451e66SDaniel Borkmann 		if (it++ != symnum)
78874451e66SDaniel Borkmann 			continue;
78974451e66SDaniel Borkmann 
790ecb60d1cSJiri Olsa 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
79174451e66SDaniel Borkmann 
792ecb60d1cSJiri Olsa 		*value = ksym->start;
79374451e66SDaniel Borkmann 		*type  = BPF_SYM_ELF_TYPE;
79474451e66SDaniel Borkmann 
79574451e66SDaniel Borkmann 		ret = 0;
79674451e66SDaniel Borkmann 		break;
79774451e66SDaniel Borkmann 	}
79874451e66SDaniel Borkmann 	rcu_read_unlock();
79974451e66SDaniel Borkmann 
80074451e66SDaniel Borkmann 	return ret;
80174451e66SDaniel Borkmann }
80274451e66SDaniel Borkmann 
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)803a66886feSDaniel Borkmann int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
804a66886feSDaniel Borkmann 				struct bpf_jit_poke_descriptor *poke)
805a66886feSDaniel Borkmann {
806a66886feSDaniel Borkmann 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
807a66886feSDaniel Borkmann 	static const u32 poke_tab_max = 1024;
808a66886feSDaniel Borkmann 	u32 slot = prog->aux->size_poke_tab;
809a66886feSDaniel Borkmann 	u32 size = slot + 1;
810a66886feSDaniel Borkmann 
811a66886feSDaniel Borkmann 	if (size > poke_tab_max)
812a66886feSDaniel Borkmann 		return -ENOSPC;
813cf71b174SMaciej Fijalkowski 	if (poke->tailcall_target || poke->tailcall_target_stable ||
814ebf7d1f5SMaciej Fijalkowski 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
815a66886feSDaniel Borkmann 		return -EINVAL;
816a66886feSDaniel Borkmann 
817a66886feSDaniel Borkmann 	switch (poke->reason) {
818a66886feSDaniel Borkmann 	case BPF_POKE_REASON_TAIL_CALL:
819a66886feSDaniel Borkmann 		if (!poke->tail_call.map)
820a66886feSDaniel Borkmann 			return -EINVAL;
821a66886feSDaniel Borkmann 		break;
822a66886feSDaniel Borkmann 	default:
823a66886feSDaniel Borkmann 		return -EINVAL;
824a66886feSDaniel Borkmann 	}
825a66886feSDaniel Borkmann 
826a66886feSDaniel Borkmann 	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
827a66886feSDaniel Borkmann 	if (!tab)
828a66886feSDaniel Borkmann 		return -ENOMEM;
829a66886feSDaniel Borkmann 
830a66886feSDaniel Borkmann 	memcpy(&tab[slot], poke, sizeof(*poke));
831a66886feSDaniel Borkmann 	prog->aux->size_poke_tab = size;
832a66886feSDaniel Borkmann 	prog->aux->poke_tab = tab;
833a66886feSDaniel Borkmann 
834a66886feSDaniel Borkmann 	return slot;
835a66886feSDaniel Borkmann }
836a66886feSDaniel Borkmann 
83757631054SSong Liu /*
83857631054SSong Liu  * BPF program pack allocator.
83957631054SSong Liu  *
84057631054SSong Liu  * Most BPF programs are pretty small. Allocating a hole page for each
84157631054SSong Liu  * program is sometime a waste. Many small bpf program also adds pressure
84257631054SSong Liu  * to instruction TLB. To solve this issue, we introduce a BPF program pack
84357631054SSong Liu  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
84457631054SSong Liu  * to host BPF programs.
84557631054SSong Liu  */
84657631054SSong Liu #define BPF_PROG_CHUNK_SHIFT	6
84757631054SSong Liu #define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
84857631054SSong Liu #define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
84957631054SSong Liu 
85057631054SSong Liu struct bpf_prog_pack {
85157631054SSong Liu 	struct list_head list;
85257631054SSong Liu 	void *ptr;
8534cc0991aSSong Liu 	unsigned long bitmap[];
85457631054SSong Liu };
85557631054SSong Liu 
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)85619c02415SSong Liu void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
85719c02415SSong Liu {
85819c02415SSong Liu 	memset(area, 0, size);
85919c02415SSong Liu }
86019c02415SSong Liu 
86157631054SSong Liu #define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
86257631054SSong Liu 
86357631054SSong Liu static DEFINE_MUTEX(pack_mutex);
86457631054SSong Liu static LIST_HEAD(pack_list);
86557631054SSong Liu 
866e5810941SSong Liu /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
867e5810941SSong Liu  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
868e5810941SSong Liu  */
869e5810941SSong Liu #ifdef PMD_SIZE
870535fb216SPuranjay Mohan /* PMD_SIZE is really big for some archs. It doesn't make sense to
871535fb216SPuranjay Mohan  * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
872535fb216SPuranjay Mohan  * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
873535fb216SPuranjay Mohan  * greater than or equal to 2MB.
874535fb216SPuranjay Mohan  */
875535fb216SPuranjay Mohan #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
876e5810941SSong Liu #else
877ea2babacSSong Liu #define BPF_PROG_PACK_SIZE PAGE_SIZE
878e5810941SSong Liu #endif
879e5810941SSong Liu 
880ea2babacSSong Liu #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
881ef078600SSong Liu 
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)882d88bb5eeSSong Liu static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
88357631054SSong Liu {
88457631054SSong Liu 	struct bpf_prog_pack *pack;
88557631054SSong Liu 
886ea2babacSSong Liu 	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
887ef078600SSong Liu 		       GFP_KERNEL);
88857631054SSong Liu 	if (!pack)
88957631054SSong Liu 		return NULL;
89020e490adSPuranjay Mohan 	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
89157631054SSong Liu 	if (!pack->ptr) {
89257631054SSong Liu 		kfree(pack);
89357631054SSong Liu 		return NULL;
89457631054SSong Liu 	}
895ea2babacSSong Liu 	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
896ea2babacSSong Liu 	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
89757631054SSong Liu 	list_add_tail(&pack->list, &pack_list);
89857631054SSong Liu 
89957631054SSong Liu 	set_vm_flush_reset_perms(pack->ptr);
900d48567c9SPeter Zijlstra 	set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
90157631054SSong Liu 	return pack;
90257631054SSong Liu }
90357631054SSong Liu 
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)90419c02415SSong Liu void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
90557631054SSong Liu {
90657631054SSong Liu 	unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
90757631054SSong Liu 	struct bpf_prog_pack *pack;
90857631054SSong Liu 	unsigned long pos;
90957631054SSong Liu 	void *ptr = NULL;
91057631054SSong Liu 
911ef078600SSong Liu 	mutex_lock(&pack_mutex);
912ea2babacSSong Liu 	if (size > BPF_PROG_PACK_SIZE) {
91357631054SSong Liu 		size = round_up(size, PAGE_SIZE);
91420e490adSPuranjay Mohan 		ptr = bpf_jit_alloc_exec(size);
91557631054SSong Liu 		if (ptr) {
916d88bb5eeSSong Liu 			bpf_fill_ill_insns(ptr, size);
91757631054SSong Liu 			set_vm_flush_reset_perms(ptr);
918d48567c9SPeter Zijlstra 			set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
91957631054SSong Liu 		}
920ef078600SSong Liu 		goto out;
92157631054SSong Liu 	}
92257631054SSong Liu 	list_for_each_entry(pack, &pack_list, list) {
923ea2babacSSong Liu 		pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
92457631054SSong Liu 						 nbits, 0);
925ea2babacSSong Liu 		if (pos < BPF_PROG_CHUNK_COUNT)
92657631054SSong Liu 			goto found_free_area;
92757631054SSong Liu 	}
92857631054SSong Liu 
929d88bb5eeSSong Liu 	pack = alloc_new_pack(bpf_fill_ill_insns);
93057631054SSong Liu 	if (!pack)
93157631054SSong Liu 		goto out;
93257631054SSong Liu 
93357631054SSong Liu 	pos = 0;
93457631054SSong Liu 
93557631054SSong Liu found_free_area:
93657631054SSong Liu 	bitmap_set(pack->bitmap, pos, nbits);
93757631054SSong Liu 	ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
93857631054SSong Liu 
93957631054SSong Liu out:
94057631054SSong Liu 	mutex_unlock(&pack_mutex);
94157631054SSong Liu 	return ptr;
94257631054SSong Liu }
94357631054SSong Liu 
bpf_prog_pack_free(struct bpf_binary_header * hdr)94419c02415SSong Liu void bpf_prog_pack_free(struct bpf_binary_header *hdr)
94557631054SSong Liu {
94657631054SSong Liu 	struct bpf_prog_pack *pack = NULL, *tmp;
94757631054SSong Liu 	unsigned int nbits;
94857631054SSong Liu 	unsigned long pos;
94957631054SSong Liu 
950ef078600SSong Liu 	mutex_lock(&pack_mutex);
951ea2babacSSong Liu 	if (hdr->size > BPF_PROG_PACK_SIZE) {
95220e490adSPuranjay Mohan 		bpf_jit_free_exec(hdr);
953ef078600SSong Liu 		goto out;
95457631054SSong Liu 	}
95557631054SSong Liu 
95657631054SSong Liu 	list_for_each_entry(tmp, &pack_list, list) {
957ea2babacSSong Liu 		if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
95857631054SSong Liu 			pack = tmp;
95957631054SSong Liu 			break;
96057631054SSong Liu 		}
96157631054SSong Liu 	}
96257631054SSong Liu 
96357631054SSong Liu 	if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
96457631054SSong Liu 		goto out;
96557631054SSong Liu 
96657631054SSong Liu 	nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
967ea2babacSSong Liu 	pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
96857631054SSong Liu 
969fe736565SSong Liu 	WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
970fe736565SSong Liu 		  "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
971fe736565SSong Liu 
97257631054SSong Liu 	bitmap_clear(pack->bitmap, pos, nbits);
973ea2babacSSong Liu 	if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
974ea2babacSSong Liu 				       BPF_PROG_CHUNK_COUNT, 0) == 0) {
97557631054SSong Liu 		list_del(&pack->list);
97620e490adSPuranjay Mohan 		bpf_jit_free_exec(pack->ptr);
97757631054SSong Liu 		kfree(pack);
97857631054SSong Liu 	}
97957631054SSong Liu out:
98057631054SSong Liu 	mutex_unlock(&pack_mutex);
98157631054SSong Liu }
98257631054SSong Liu 
983ede95a63SDaniel Borkmann static atomic_long_t bpf_jit_current;
984ede95a63SDaniel Borkmann 
985fdadd049SDaniel Borkmann /* Can be overridden by an arch's JIT compiler if it has a custom,
986fdadd049SDaniel Borkmann  * dedicated BPF backend memory area, or if neither of the two
987fdadd049SDaniel Borkmann  * below apply.
988fdadd049SDaniel Borkmann  */
bpf_jit_alloc_exec_limit(void)989fdadd049SDaniel Borkmann u64 __weak bpf_jit_alloc_exec_limit(void)
990fdadd049SDaniel Borkmann {
991ede95a63SDaniel Borkmann #if defined(MODULES_VADDR)
992fdadd049SDaniel Borkmann 	return MODULES_END - MODULES_VADDR;
993fdadd049SDaniel Borkmann #else
994fdadd049SDaniel Borkmann 	return VMALLOC_END - VMALLOC_START;
995fdadd049SDaniel Borkmann #endif
996fdadd049SDaniel Borkmann }
997fdadd049SDaniel Borkmann 
bpf_jit_charge_init(void)998ede95a63SDaniel Borkmann static int __init bpf_jit_charge_init(void)
999ede95a63SDaniel Borkmann {
1000ede95a63SDaniel Borkmann 	/* Only used as heuristic here to derive limit. */
1001fadb7ff1SLorenz Bauer 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
100210ec8ca8SDaniel Borkmann 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1003fdadd049SDaniel Borkmann 					    PAGE_SIZE), LONG_MAX);
1004ede95a63SDaniel Borkmann 	return 0;
1005ede95a63SDaniel Borkmann }
1006ede95a63SDaniel Borkmann pure_initcall(bpf_jit_charge_init);
1007ede95a63SDaniel Borkmann 
bpf_jit_charge_modmem(u32 size)10083486beddSSong Liu int bpf_jit_charge_modmem(u32 size)
1009ede95a63SDaniel Borkmann {
10100947ae11SKuniyuki Iwashima 	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
10118a98ae12SLorenz Bauer 		if (!bpf_capable()) {
10123486beddSSong Liu 			atomic_long_sub(size, &bpf_jit_current);
1013ede95a63SDaniel Borkmann 			return -EPERM;
1014ede95a63SDaniel Borkmann 		}
1015ede95a63SDaniel Borkmann 	}
1016ede95a63SDaniel Borkmann 
1017ede95a63SDaniel Borkmann 	return 0;
1018ede95a63SDaniel Borkmann }
1019ede95a63SDaniel Borkmann 
bpf_jit_uncharge_modmem(u32 size)10203486beddSSong Liu void bpf_jit_uncharge_modmem(u32 size)
1021ede95a63SDaniel Borkmann {
10223486beddSSong Liu 	atomic_long_sub(size, &bpf_jit_current);
1023ede95a63SDaniel Borkmann }
1024ede95a63SDaniel Borkmann 
bpf_jit_alloc_exec(unsigned long size)1025dc002bb6SArd Biesheuvel void *__weak bpf_jit_alloc_exec(unsigned long size)
1026dc002bb6SArd Biesheuvel {
1027dc002bb6SArd Biesheuvel 	return module_alloc(size);
1028dc002bb6SArd Biesheuvel }
1029dc002bb6SArd Biesheuvel 
bpf_jit_free_exec(void * addr)1030dc002bb6SArd Biesheuvel void __weak bpf_jit_free_exec(void *addr)
1031dc002bb6SArd Biesheuvel {
1032dc002bb6SArd Biesheuvel 	module_memfree(addr);
1033dc002bb6SArd Biesheuvel }
1034dc002bb6SArd Biesheuvel 
1035738cbe72SDaniel Borkmann struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1036738cbe72SDaniel Borkmann bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1037738cbe72SDaniel Borkmann 		     unsigned int alignment,
1038738cbe72SDaniel Borkmann 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
1039738cbe72SDaniel Borkmann {
1040738cbe72SDaniel Borkmann 	struct bpf_binary_header *hdr;
1041ed2d9e1aSSong Liu 	u32 size, hole, start;
1042738cbe72SDaniel Borkmann 
1043b7b3fc8dSIlya Leoshkevich 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1044b7b3fc8dSIlya Leoshkevich 		     alignment > BPF_IMAGE_ALIGNMENT);
1045b7b3fc8dSIlya Leoshkevich 
1046738cbe72SDaniel Borkmann 	/* Most of BPF filters are really small, but if some of them
1047738cbe72SDaniel Borkmann 	 * fill a page, allow at least 128 extra bytes to insert a
1048738cbe72SDaniel Borkmann 	 * random section of illegal instructions.
1049738cbe72SDaniel Borkmann 	 */
1050738cbe72SDaniel Borkmann 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1051ede95a63SDaniel Borkmann 
10523486beddSSong Liu 	if (bpf_jit_charge_modmem(size))
1053738cbe72SDaniel Borkmann 		return NULL;
1054dc002bb6SArd Biesheuvel 	hdr = bpf_jit_alloc_exec(size);
1055ede95a63SDaniel Borkmann 	if (!hdr) {
10563486beddSSong Liu 		bpf_jit_uncharge_modmem(size);
1057ede95a63SDaniel Borkmann 		return NULL;
1058ede95a63SDaniel Borkmann 	}
1059738cbe72SDaniel Borkmann 
1060738cbe72SDaniel Borkmann 	/* Fill space with illegal/arch-dep instructions. */
1061738cbe72SDaniel Borkmann 	bpf_fill_ill_insns(hdr, size);
1062738cbe72SDaniel Borkmann 
1063ed2d9e1aSSong Liu 	hdr->size = size;
1064738cbe72SDaniel Borkmann 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1065738cbe72SDaniel Borkmann 		     PAGE_SIZE - sizeof(*hdr));
10668032bf12SJason A. Donenfeld 	start = get_random_u32_below(hole) & ~(alignment - 1);
1067738cbe72SDaniel Borkmann 
1068738cbe72SDaniel Borkmann 	/* Leave a random number of instructions before BPF code. */
1069738cbe72SDaniel Borkmann 	*image_ptr = &hdr->image[start];
1070738cbe72SDaniel Borkmann 
1071738cbe72SDaniel Borkmann 	return hdr;
1072738cbe72SDaniel Borkmann }
1073738cbe72SDaniel Borkmann 
bpf_jit_binary_free(struct bpf_binary_header * hdr)1074738cbe72SDaniel Borkmann void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1075738cbe72SDaniel Borkmann {
1076ed2d9e1aSSong Liu 	u32 size = hdr->size;
1077ede95a63SDaniel Borkmann 
1078dc002bb6SArd Biesheuvel 	bpf_jit_free_exec(hdr);
1079ed2d9e1aSSong Liu 	bpf_jit_uncharge_modmem(size);
1080738cbe72SDaniel Borkmann }
10814f3446bbSDaniel Borkmann 
108233c98058SSong Liu /* Allocate jit binary from bpf_prog_pack allocator.
108333c98058SSong Liu  * Since the allocated memory is RO+X, the JIT engine cannot write directly
108433c98058SSong Liu  * to the memory. To solve this problem, a RW buffer is also allocated at
108533c98058SSong Liu  * as the same time. The JIT engine should calculate offsets based on the
108633c98058SSong Liu  * RO memory address, but write JITed program to the RW buffer. Once the
108733c98058SSong Liu  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
108833c98058SSong Liu  * the JITed program to the RO memory.
108933c98058SSong Liu  */
109033c98058SSong Liu struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)109133c98058SSong Liu bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
109233c98058SSong Liu 			  unsigned int alignment,
109333c98058SSong Liu 			  struct bpf_binary_header **rw_header,
109433c98058SSong Liu 			  u8 **rw_image,
109533c98058SSong Liu 			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
109633c98058SSong Liu {
109733c98058SSong Liu 	struct bpf_binary_header *ro_header;
109833c98058SSong Liu 	u32 size, hole, start;
109933c98058SSong Liu 
110033c98058SSong Liu 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
110133c98058SSong Liu 		     alignment > BPF_IMAGE_ALIGNMENT);
110233c98058SSong Liu 
110333c98058SSong Liu 	/* add 16 bytes for a random section of illegal instructions */
110433c98058SSong Liu 	size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
110533c98058SSong Liu 
110633c98058SSong Liu 	if (bpf_jit_charge_modmem(size))
110733c98058SSong Liu 		return NULL;
1108d88bb5eeSSong Liu 	ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
110933c98058SSong Liu 	if (!ro_header) {
111033c98058SSong Liu 		bpf_jit_uncharge_modmem(size);
111133c98058SSong Liu 		return NULL;
111233c98058SSong Liu 	}
111333c98058SSong Liu 
111433c98058SSong Liu 	*rw_header = kvmalloc(size, GFP_KERNEL);
111533c98058SSong Liu 	if (!*rw_header) {
1116d24d2a2bSSong Liu 		bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
111733c98058SSong Liu 		bpf_prog_pack_free(ro_header);
111833c98058SSong Liu 		bpf_jit_uncharge_modmem(size);
111933c98058SSong Liu 		return NULL;
112033c98058SSong Liu 	}
112133c98058SSong Liu 
112233c98058SSong Liu 	/* Fill space with illegal/arch-dep instructions. */
112333c98058SSong Liu 	bpf_fill_ill_insns(*rw_header, size);
112433c98058SSong Liu 	(*rw_header)->size = size;
112533c98058SSong Liu 
112633c98058SSong Liu 	hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
112733c98058SSong Liu 		     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
11288032bf12SJason A. Donenfeld 	start = get_random_u32_below(hole) & ~(alignment - 1);
112933c98058SSong Liu 
113033c98058SSong Liu 	*image_ptr = &ro_header->image[start];
113133c98058SSong Liu 	*rw_image = &(*rw_header)->image[start];
113233c98058SSong Liu 
113333c98058SSong Liu 	return ro_header;
113433c98058SSong Liu }
113533c98058SSong Liu 
113633c98058SSong Liu /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_prog * prog,struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)113733c98058SSong Liu int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
113833c98058SSong Liu 				 struct bpf_binary_header *ro_header,
113933c98058SSong Liu 				 struct bpf_binary_header *rw_header)
114033c98058SSong Liu {
114133c98058SSong Liu 	void *ptr;
114233c98058SSong Liu 
114333c98058SSong Liu 	ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
114433c98058SSong Liu 
114533c98058SSong Liu 	kvfree(rw_header);
114633c98058SSong Liu 
114733c98058SSong Liu 	if (IS_ERR(ptr)) {
114833c98058SSong Liu 		bpf_prog_pack_free(ro_header);
114933c98058SSong Liu 		return PTR_ERR(ptr);
115033c98058SSong Liu 	}
115133c98058SSong Liu 	return 0;
115233c98058SSong Liu }
115333c98058SSong Liu 
115433c98058SSong Liu /* bpf_jit_binary_pack_free is called in two different scenarios:
115533c98058SSong Liu  *   1) when the program is freed after;
115633c98058SSong Liu  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
115733c98058SSong Liu  * For case 2), we need to free both the RO memory and the RW buffer.
1158676b2daaSSong Liu  *
1159676b2daaSSong Liu  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1160676b2daaSSong Liu  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1161676b2daaSSong Liu  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1162676b2daaSSong Liu  * bpf_arch_text_copy (when jit fails).
116333c98058SSong Liu  */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)116433c98058SSong Liu void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
116533c98058SSong Liu 			      struct bpf_binary_header *rw_header)
116633c98058SSong Liu {
1167676b2daaSSong Liu 	u32 size = ro_header->size;
116833c98058SSong Liu 
116933c98058SSong Liu 	bpf_prog_pack_free(ro_header);
117033c98058SSong Liu 	kvfree(rw_header);
117133c98058SSong Liu 	bpf_jit_uncharge_modmem(size);
117233c98058SSong Liu }
117333c98058SSong Liu 
11741d5f82d9SSong Liu struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)11751d5f82d9SSong Liu bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
11761d5f82d9SSong Liu {
11771d5f82d9SSong Liu 	unsigned long real_start = (unsigned long)fp->bpf_func;
11781d5f82d9SSong Liu 	unsigned long addr;
11791d5f82d9SSong Liu 
11801d5f82d9SSong Liu 	addr = real_start & BPF_PROG_CHUNK_MASK;
11811d5f82d9SSong Liu 	return (void *)addr;
11821d5f82d9SSong Liu }
11831d5f82d9SSong Liu 
118433c98058SSong Liu static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)118533c98058SSong Liu bpf_jit_binary_hdr(const struct bpf_prog *fp)
118633c98058SSong Liu {
118733c98058SSong Liu 	unsigned long real_start = (unsigned long)fp->bpf_func;
118833c98058SSong Liu 	unsigned long addr;
118933c98058SSong Liu 
119033c98058SSong Liu 	addr = real_start & PAGE_MASK;
119133c98058SSong Liu 	return (void *)addr;
119233c98058SSong Liu }
119333c98058SSong Liu 
119474451e66SDaniel Borkmann /* This symbol is only overridden by archs that have different
119574451e66SDaniel Borkmann  * requirements than the usual eBPF JITs, f.e. when they only
119674451e66SDaniel Borkmann  * implement cBPF JIT, do not set images read-only, etc.
119774451e66SDaniel Borkmann  */
bpf_jit_free(struct bpf_prog * fp)119874451e66SDaniel Borkmann void __weak bpf_jit_free(struct bpf_prog *fp)
119974451e66SDaniel Borkmann {
120074451e66SDaniel Borkmann 	if (fp->jited) {
120174451e66SDaniel Borkmann 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
120274451e66SDaniel Borkmann 
120374451e66SDaniel Borkmann 		bpf_jit_binary_free(hdr);
120474451e66SDaniel Borkmann 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
120574451e66SDaniel Borkmann 	}
120674451e66SDaniel Borkmann 
120774451e66SDaniel Borkmann 	bpf_prog_unlock_free(fp);
120874451e66SDaniel Borkmann }
120974451e66SDaniel Borkmann 
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1210e2c95a61SDaniel Borkmann int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1211e2c95a61SDaniel Borkmann 			  const struct bpf_insn *insn, bool extra_pass,
1212e2c95a61SDaniel Borkmann 			  u64 *func_addr, bool *func_addr_fixed)
1213e2c95a61SDaniel Borkmann {
1214e2c95a61SDaniel Borkmann 	s16 off = insn->off;
1215e2c95a61SDaniel Borkmann 	s32 imm = insn->imm;
1216e2c95a61SDaniel Borkmann 	u8 *addr;
12171cf3bfc6SIlya Leoshkevich 	int err;
1218e2c95a61SDaniel Borkmann 
1219e2c95a61SDaniel Borkmann 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1220e2c95a61SDaniel Borkmann 	if (!*func_addr_fixed) {
1221e2c95a61SDaniel Borkmann 		/* Place-holder address till the last pass has collected
1222e2c95a61SDaniel Borkmann 		 * all addresses for JITed subprograms in which case we
1223e2c95a61SDaniel Borkmann 		 * can pick them up from prog->aux.
1224e2c95a61SDaniel Borkmann 		 */
1225e2c95a61SDaniel Borkmann 		if (!extra_pass)
1226e2c95a61SDaniel Borkmann 			addr = NULL;
1227e2c95a61SDaniel Borkmann 		else if (prog->aux->func &&
1228e2c95a61SDaniel Borkmann 			 off >= 0 && off < prog->aux->func_cnt)
1229e2c95a61SDaniel Borkmann 			addr = (u8 *)prog->aux->func[off]->bpf_func;
1230e2c95a61SDaniel Borkmann 		else
1231e2c95a61SDaniel Borkmann 			return -EINVAL;
12321cf3bfc6SIlya Leoshkevich 	} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
12331cf3bfc6SIlya Leoshkevich 		   bpf_jit_supports_far_kfunc_call()) {
12341cf3bfc6SIlya Leoshkevich 		err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
12351cf3bfc6SIlya Leoshkevich 		if (err)
12361cf3bfc6SIlya Leoshkevich 			return err;
1237e2c95a61SDaniel Borkmann 	} else {
1238e2c95a61SDaniel Borkmann 		/* Address of a BPF helper call. Since part of the core
1239e2c95a61SDaniel Borkmann 		 * kernel, it's always at a fixed location. __bpf_call_base
1240e2c95a61SDaniel Borkmann 		 * and the helper with imm relative to it are both in core
1241e2c95a61SDaniel Borkmann 		 * kernel.
1242e2c95a61SDaniel Borkmann 		 */
1243e2c95a61SDaniel Borkmann 		addr = (u8 *)__bpf_call_base + imm;
1244e2c95a61SDaniel Borkmann 	}
1245e2c95a61SDaniel Borkmann 
1246e2c95a61SDaniel Borkmann 	*func_addr = (unsigned long)addr;
1247e2c95a61SDaniel Borkmann 	return 0;
1248e2c95a61SDaniel Borkmann }
1249e2c95a61SDaniel Borkmann 
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)12504f3446bbSDaniel Borkmann static int bpf_jit_blind_insn(const struct bpf_insn *from,
12514f3446bbSDaniel Borkmann 			      const struct bpf_insn *aux,
1252ede7c460SNaveen N. Rao 			      struct bpf_insn *to_buff,
1253ede7c460SNaveen N. Rao 			      bool emit_zext)
12544f3446bbSDaniel Borkmann {
12554f3446bbSDaniel Borkmann 	struct bpf_insn *to = to_buff;
1256a251c17aSJason A. Donenfeld 	u32 imm_rnd = get_random_u32();
12574f3446bbSDaniel Borkmann 	s16 off;
12584f3446bbSDaniel Borkmann 
12594f3446bbSDaniel Borkmann 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
12604f3446bbSDaniel Borkmann 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
12614f3446bbSDaniel Borkmann 
12629b73bfddSDaniel Borkmann 	/* Constraints on AX register:
12639b73bfddSDaniel Borkmann 	 *
12649b73bfddSDaniel Borkmann 	 * AX register is inaccessible from user space. It is mapped in
12659b73bfddSDaniel Borkmann 	 * all JITs, and used here for constant blinding rewrites. It is
12669b73bfddSDaniel Borkmann 	 * typically "stateless" meaning its contents are only valid within
12679b73bfddSDaniel Borkmann 	 * the executed instruction, but not across several instructions.
12689b73bfddSDaniel Borkmann 	 * There are a few exceptions however which are further detailed
12699b73bfddSDaniel Borkmann 	 * below.
12709b73bfddSDaniel Borkmann 	 *
12719b73bfddSDaniel Borkmann 	 * Constant blinding is only used by JITs, not in the interpreter.
12729b73bfddSDaniel Borkmann 	 * The interpreter uses AX in some occasions as a local temporary
12739b73bfddSDaniel Borkmann 	 * register e.g. in DIV or MOD instructions.
12749b73bfddSDaniel Borkmann 	 *
12759b73bfddSDaniel Borkmann 	 * In restricted circumstances, the verifier can also use the AX
12769b73bfddSDaniel Borkmann 	 * register for rewrites as long as they do not interfere with
12779b73bfddSDaniel Borkmann 	 * the above cases!
12789b73bfddSDaniel Borkmann 	 */
12799b73bfddSDaniel Borkmann 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
12809b73bfddSDaniel Borkmann 		goto out;
12819b73bfddSDaniel Borkmann 
12824f3446bbSDaniel Borkmann 	if (from->imm == 0 &&
12834f3446bbSDaniel Borkmann 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
12844f3446bbSDaniel Borkmann 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
12854f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
12864f3446bbSDaniel Borkmann 		goto out;
12874f3446bbSDaniel Borkmann 	}
12884f3446bbSDaniel Borkmann 
12894f3446bbSDaniel Borkmann 	switch (from->code) {
12904f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_ADD | BPF_K:
12914f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_SUB | BPF_K:
12924f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_AND | BPF_K:
12934f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_OR  | BPF_K:
12944f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_XOR | BPF_K:
12954f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MUL | BPF_K:
12964f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MOV | BPF_K:
12974f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_DIV | BPF_K:
12984f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MOD | BPF_K:
12994f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13004f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13017058e3a3SYonghong Song 		*to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
13024f3446bbSDaniel Borkmann 		break;
13034f3446bbSDaniel Borkmann 
13044f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_ADD | BPF_K:
13054f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_SUB | BPF_K:
13064f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_AND | BPF_K:
13074f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_OR  | BPF_K:
13084f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_XOR | BPF_K:
13094f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MUL | BPF_K:
13104f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MOV | BPF_K:
13114f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_DIV | BPF_K:
13124f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MOD | BPF_K:
13134f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13144f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13157058e3a3SYonghong Song 		*to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
13164f3446bbSDaniel Borkmann 		break;
13174f3446bbSDaniel Borkmann 
13184f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JEQ  | BPF_K:
13194f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JNE  | BPF_K:
13204f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JGT  | BPF_K:
132192b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JLT  | BPF_K:
13224f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JGE  | BPF_K:
132392b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JLE  | BPF_K:
13244f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSGT | BPF_K:
132592b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JSLT | BPF_K:
13264f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSGE | BPF_K:
132792b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JSLE | BPF_K:
13284f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSET | BPF_K:
13294f3446bbSDaniel Borkmann 		/* Accommodate for extra offset in case of a backjump. */
13304f3446bbSDaniel Borkmann 		off = from->off;
13314f3446bbSDaniel Borkmann 		if (off < 0)
13324f3446bbSDaniel Borkmann 			off -= 2;
13334f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13344f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13354f3446bbSDaniel Borkmann 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
13364f3446bbSDaniel Borkmann 		break;
13374f3446bbSDaniel Borkmann 
1338a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1339a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1340a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1341a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1342a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1343a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1344a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1345a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1346a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1347a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1348a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSET | BPF_K:
1349a7b76c88SJiong Wang 		/* Accommodate for extra offset in case of a backjump. */
1350a7b76c88SJiong Wang 		off = from->off;
1351a7b76c88SJiong Wang 		if (off < 0)
1352a7b76c88SJiong Wang 			off -= 2;
1353a7b76c88SJiong Wang 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1354a7b76c88SJiong Wang 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1355a7b76c88SJiong Wang 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1356a7b76c88SJiong Wang 				      off);
1357a7b76c88SJiong Wang 		break;
1358a7b76c88SJiong Wang 
13594f3446bbSDaniel Borkmann 	case BPF_LD | BPF_IMM | BPF_DW:
13604f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
13614f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13624f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
13634f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
13644f3446bbSDaniel Borkmann 		break;
13654f3446bbSDaniel Borkmann 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
13664f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
13674f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1368ede7c460SNaveen N. Rao 		if (emit_zext)
1369ede7c460SNaveen N. Rao 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
13704f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
13714f3446bbSDaniel Borkmann 		break;
13724f3446bbSDaniel Borkmann 
13734f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_DW:
13744f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_W:
13754f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_H:
13764f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_B:
13774f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13784f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13794f3446bbSDaniel Borkmann 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
13804f3446bbSDaniel Borkmann 		break;
13814f3446bbSDaniel Borkmann 	}
13824f3446bbSDaniel Borkmann out:
13834f3446bbSDaniel Borkmann 	return to - to_buff;
13844f3446bbSDaniel Borkmann }
13854f3446bbSDaniel Borkmann 
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)13864f3446bbSDaniel Borkmann static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
13874f3446bbSDaniel Borkmann 					      gfp_t gfp_extra_flags)
13884f3446bbSDaniel Borkmann {
138919809c2dSMichal Hocko 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
13904f3446bbSDaniel Borkmann 	struct bpf_prog *fp;
13914f3446bbSDaniel Borkmann 
139288dca4caSChristoph Hellwig 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
13934f3446bbSDaniel Borkmann 	if (fp != NULL) {
13944f3446bbSDaniel Borkmann 		/* aux->prog still points to the fp_other one, so
13954f3446bbSDaniel Borkmann 		 * when promoting the clone to the real program,
13964f3446bbSDaniel Borkmann 		 * this still needs to be adapted.
13974f3446bbSDaniel Borkmann 		 */
13984f3446bbSDaniel Borkmann 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
13994f3446bbSDaniel Borkmann 	}
14004f3446bbSDaniel Borkmann 
14014f3446bbSDaniel Borkmann 	return fp;
14024f3446bbSDaniel Borkmann }
14034f3446bbSDaniel Borkmann 
bpf_prog_clone_free(struct bpf_prog * fp)14044f3446bbSDaniel Borkmann static void bpf_prog_clone_free(struct bpf_prog *fp)
14054f3446bbSDaniel Borkmann {
14064f3446bbSDaniel Borkmann 	/* aux was stolen by the other clone, so we cannot free
14074f3446bbSDaniel Borkmann 	 * it from this path! It will be freed eventually by the
14084f3446bbSDaniel Borkmann 	 * other program on release.
14094f3446bbSDaniel Borkmann 	 *
14104f3446bbSDaniel Borkmann 	 * At this point, we don't need a deferred release since
14114f3446bbSDaniel Borkmann 	 * clone is guaranteed to not be locked.
14124f3446bbSDaniel Borkmann 	 */
14134f3446bbSDaniel Borkmann 	fp->aux = NULL;
141453f523f3SCong Wang 	fp->stats = NULL;
141553f523f3SCong Wang 	fp->active = NULL;
14164f3446bbSDaniel Borkmann 	__bpf_prog_free(fp);
14174f3446bbSDaniel Borkmann }
14184f3446bbSDaniel Borkmann 
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)14194f3446bbSDaniel Borkmann void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
14204f3446bbSDaniel Borkmann {
14214f3446bbSDaniel Borkmann 	/* We have to repoint aux->prog to self, as we don't
14224f3446bbSDaniel Borkmann 	 * know whether fp here is the clone or the original.
14234f3446bbSDaniel Borkmann 	 */
14244f3446bbSDaniel Borkmann 	fp->aux->prog = fp;
14254f3446bbSDaniel Borkmann 	bpf_prog_clone_free(fp_other);
14264f3446bbSDaniel Borkmann }
14274f3446bbSDaniel Borkmann 
bpf_jit_blind_constants(struct bpf_prog * prog)14284f3446bbSDaniel Borkmann struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
14294f3446bbSDaniel Borkmann {
14304f3446bbSDaniel Borkmann 	struct bpf_insn insn_buff[16], aux[2];
14314f3446bbSDaniel Borkmann 	struct bpf_prog *clone, *tmp;
14324f3446bbSDaniel Borkmann 	int insn_delta, insn_cnt;
14334f3446bbSDaniel Borkmann 	struct bpf_insn *insn;
14344f3446bbSDaniel Borkmann 	int i, rewritten;
14354f3446bbSDaniel Borkmann 
1436d2a3b7c5SHou Tao 	if (!prog->blinding_requested || prog->blinded)
14374f3446bbSDaniel Borkmann 		return prog;
14384f3446bbSDaniel Borkmann 
14394f3446bbSDaniel Borkmann 	clone = bpf_prog_clone_create(prog, GFP_USER);
14404f3446bbSDaniel Borkmann 	if (!clone)
14414f3446bbSDaniel Borkmann 		return ERR_PTR(-ENOMEM);
14424f3446bbSDaniel Borkmann 
14434f3446bbSDaniel Borkmann 	insn_cnt = clone->len;
14444f3446bbSDaniel Borkmann 	insn = clone->insnsi;
14454f3446bbSDaniel Borkmann 
14464f3446bbSDaniel Borkmann 	for (i = 0; i < insn_cnt; i++, insn++) {
14474b6313cfSAlexei Starovoitov 		if (bpf_pseudo_func(insn)) {
14484b6313cfSAlexei Starovoitov 			/* ld_imm64 with an address of bpf subprog is not
14494b6313cfSAlexei Starovoitov 			 * a user controlled constant. Don't randomize it,
14504b6313cfSAlexei Starovoitov 			 * since it will conflict with jit_subprogs() logic.
14514b6313cfSAlexei Starovoitov 			 */
14524b6313cfSAlexei Starovoitov 			insn++;
14534b6313cfSAlexei Starovoitov 			i++;
14544b6313cfSAlexei Starovoitov 			continue;
14554b6313cfSAlexei Starovoitov 		}
14564b6313cfSAlexei Starovoitov 
14574f3446bbSDaniel Borkmann 		/* We temporarily need to hold the original ld64 insn
14584f3446bbSDaniel Borkmann 		 * so that we can still access the first part in the
14594f3446bbSDaniel Borkmann 		 * second blinding run.
14604f3446bbSDaniel Borkmann 		 */
14614f3446bbSDaniel Borkmann 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
14624f3446bbSDaniel Borkmann 		    insn[1].code == 0)
14634f3446bbSDaniel Borkmann 			memcpy(aux, insn, sizeof(aux));
14644f3446bbSDaniel Borkmann 
1465ede7c460SNaveen N. Rao 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1466ede7c460SNaveen N. Rao 						clone->aux->verifier_zext);
14674f3446bbSDaniel Borkmann 		if (!rewritten)
14684f3446bbSDaniel Borkmann 			continue;
14694f3446bbSDaniel Borkmann 
14704f3446bbSDaniel Borkmann 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
14714f73379eSAlexei Starovoitov 		if (IS_ERR(tmp)) {
14724f3446bbSDaniel Borkmann 			/* Patching may have repointed aux->prog during
14734f3446bbSDaniel Borkmann 			 * realloc from the original one, so we need to
14744f3446bbSDaniel Borkmann 			 * fix it up here on error.
14754f3446bbSDaniel Borkmann 			 */
14764f3446bbSDaniel Borkmann 			bpf_jit_prog_release_other(prog, clone);
14774f73379eSAlexei Starovoitov 			return tmp;
14784f3446bbSDaniel Borkmann 		}
14794f3446bbSDaniel Borkmann 
14804f3446bbSDaniel Borkmann 		clone = tmp;
14814f3446bbSDaniel Borkmann 		insn_delta = rewritten - 1;
14824f3446bbSDaniel Borkmann 
14834f3446bbSDaniel Borkmann 		/* Walk new program and skip insns we just inserted. */
14844f3446bbSDaniel Borkmann 		insn = clone->insnsi + i + insn_delta;
14854f3446bbSDaniel Borkmann 		insn_cnt += insn_delta;
14864f3446bbSDaniel Borkmann 		i        += insn_delta;
14874f3446bbSDaniel Borkmann 	}
14884f3446bbSDaniel Borkmann 
14891c2a088aSAlexei Starovoitov 	clone->blinded = 1;
14904f3446bbSDaniel Borkmann 	return clone;
14914f3446bbSDaniel Borkmann }
1492b954d834SDaniel Borkmann #endif /* CONFIG_BPF_JIT */
1493738cbe72SDaniel Borkmann 
1494f5bffecdSAlexei Starovoitov /* Base function for offset calculation. Needs to go into .text section,
1495f5bffecdSAlexei Starovoitov  * therefore keeping it non-static as well; will also be used by JITs
14967105e828SDaniel Borkmann  * anyway later on, so do not let the compiler omit it. This also needs
14977105e828SDaniel Borkmann  * to go into kallsyms for correlation from e.g. bpftool, so naming
14987105e828SDaniel Borkmann  * must not change.
1499f5bffecdSAlexei Starovoitov  */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1500f5bffecdSAlexei Starovoitov noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1501f5bffecdSAlexei Starovoitov {
1502f5bffecdSAlexei Starovoitov 	return 0;
1503f5bffecdSAlexei Starovoitov }
15044d9c5c53SAlexei Starovoitov EXPORT_SYMBOL_GPL(__bpf_call_base);
1505f5bffecdSAlexei Starovoitov 
15065e581dadSDaniel Borkmann /* All UAPI available opcodes. */
15075e581dadSDaniel Borkmann #define BPF_INSN_MAP(INSN_2, INSN_3)		\
15085e581dadSDaniel Borkmann 	/* 32 bit ALU operations. */		\
15095e581dadSDaniel Borkmann 	/*   Register based. */			\
15105e581dadSDaniel Borkmann 	INSN_3(ALU, ADD,  X),			\
15115e581dadSDaniel Borkmann 	INSN_3(ALU, SUB,  X),			\
15125e581dadSDaniel Borkmann 	INSN_3(ALU, AND,  X),			\
15135e581dadSDaniel Borkmann 	INSN_3(ALU, OR,   X),			\
15145e581dadSDaniel Borkmann 	INSN_3(ALU, LSH,  X),			\
15155e581dadSDaniel Borkmann 	INSN_3(ALU, RSH,  X),			\
15165e581dadSDaniel Borkmann 	INSN_3(ALU, XOR,  X),			\
15175e581dadSDaniel Borkmann 	INSN_3(ALU, MUL,  X),			\
15185e581dadSDaniel Borkmann 	INSN_3(ALU, MOV,  X),			\
15192dc6b100SJiong Wang 	INSN_3(ALU, ARSH, X),			\
15205e581dadSDaniel Borkmann 	INSN_3(ALU, DIV,  X),			\
15215e581dadSDaniel Borkmann 	INSN_3(ALU, MOD,  X),			\
15225e581dadSDaniel Borkmann 	INSN_2(ALU, NEG),			\
15235e581dadSDaniel Borkmann 	INSN_3(ALU, END, TO_BE),		\
15245e581dadSDaniel Borkmann 	INSN_3(ALU, END, TO_LE),		\
15255e581dadSDaniel Borkmann 	/*   Immediate based. */		\
15265e581dadSDaniel Borkmann 	INSN_3(ALU, ADD,  K),			\
15275e581dadSDaniel Borkmann 	INSN_3(ALU, SUB,  K),			\
15285e581dadSDaniel Borkmann 	INSN_3(ALU, AND,  K),			\
15295e581dadSDaniel Borkmann 	INSN_3(ALU, OR,   K),			\
15305e581dadSDaniel Borkmann 	INSN_3(ALU, LSH,  K),			\
15315e581dadSDaniel Borkmann 	INSN_3(ALU, RSH,  K),			\
15325e581dadSDaniel Borkmann 	INSN_3(ALU, XOR,  K),			\
15335e581dadSDaniel Borkmann 	INSN_3(ALU, MUL,  K),			\
15345e581dadSDaniel Borkmann 	INSN_3(ALU, MOV,  K),			\
15352dc6b100SJiong Wang 	INSN_3(ALU, ARSH, K),			\
15365e581dadSDaniel Borkmann 	INSN_3(ALU, DIV,  K),			\
15375e581dadSDaniel Borkmann 	INSN_3(ALU, MOD,  K),			\
15385e581dadSDaniel Borkmann 	/* 64 bit ALU operations. */		\
15395e581dadSDaniel Borkmann 	/*   Register based. */			\
15405e581dadSDaniel Borkmann 	INSN_3(ALU64, ADD,  X),			\
15415e581dadSDaniel Borkmann 	INSN_3(ALU64, SUB,  X),			\
15425e581dadSDaniel Borkmann 	INSN_3(ALU64, AND,  X),			\
15435e581dadSDaniel Borkmann 	INSN_3(ALU64, OR,   X),			\
15445e581dadSDaniel Borkmann 	INSN_3(ALU64, LSH,  X),			\
15455e581dadSDaniel Borkmann 	INSN_3(ALU64, RSH,  X),			\
15465e581dadSDaniel Borkmann 	INSN_3(ALU64, XOR,  X),			\
15475e581dadSDaniel Borkmann 	INSN_3(ALU64, MUL,  X),			\
15485e581dadSDaniel Borkmann 	INSN_3(ALU64, MOV,  X),			\
15495e581dadSDaniel Borkmann 	INSN_3(ALU64, ARSH, X),			\
15505e581dadSDaniel Borkmann 	INSN_3(ALU64, DIV,  X),			\
15515e581dadSDaniel Borkmann 	INSN_3(ALU64, MOD,  X),			\
15525e581dadSDaniel Borkmann 	INSN_2(ALU64, NEG),			\
15530845c3dbSYonghong Song 	INSN_3(ALU64, END, TO_LE),		\
15545e581dadSDaniel Borkmann 	/*   Immediate based. */		\
15555e581dadSDaniel Borkmann 	INSN_3(ALU64, ADD,  K),			\
15565e581dadSDaniel Borkmann 	INSN_3(ALU64, SUB,  K),			\
15575e581dadSDaniel Borkmann 	INSN_3(ALU64, AND,  K),			\
15585e581dadSDaniel Borkmann 	INSN_3(ALU64, OR,   K),			\
15595e581dadSDaniel Borkmann 	INSN_3(ALU64, LSH,  K),			\
15605e581dadSDaniel Borkmann 	INSN_3(ALU64, RSH,  K),			\
15615e581dadSDaniel Borkmann 	INSN_3(ALU64, XOR,  K),			\
15625e581dadSDaniel Borkmann 	INSN_3(ALU64, MUL,  K),			\
15635e581dadSDaniel Borkmann 	INSN_3(ALU64, MOV,  K),			\
15645e581dadSDaniel Borkmann 	INSN_3(ALU64, ARSH, K),			\
15655e581dadSDaniel Borkmann 	INSN_3(ALU64, DIV,  K),			\
15665e581dadSDaniel Borkmann 	INSN_3(ALU64, MOD,  K),			\
15675e581dadSDaniel Borkmann 	/* Call instruction. */			\
15685e581dadSDaniel Borkmann 	INSN_2(JMP, CALL),			\
15695e581dadSDaniel Borkmann 	/* Exit instruction. */			\
15705e581dadSDaniel Borkmann 	INSN_2(JMP, EXIT),			\
1571503a8865SJiong Wang 	/* 32-bit Jump instructions. */		\
1572503a8865SJiong Wang 	/*   Register based. */			\
1573503a8865SJiong Wang 	INSN_3(JMP32, JEQ,  X),			\
1574503a8865SJiong Wang 	INSN_3(JMP32, JNE,  X),			\
1575503a8865SJiong Wang 	INSN_3(JMP32, JGT,  X),			\
1576503a8865SJiong Wang 	INSN_3(JMP32, JLT,  X),			\
1577503a8865SJiong Wang 	INSN_3(JMP32, JGE,  X),			\
1578503a8865SJiong Wang 	INSN_3(JMP32, JLE,  X),			\
1579503a8865SJiong Wang 	INSN_3(JMP32, JSGT, X),			\
1580503a8865SJiong Wang 	INSN_3(JMP32, JSLT, X),			\
1581503a8865SJiong Wang 	INSN_3(JMP32, JSGE, X),			\
1582503a8865SJiong Wang 	INSN_3(JMP32, JSLE, X),			\
1583503a8865SJiong Wang 	INSN_3(JMP32, JSET, X),			\
1584503a8865SJiong Wang 	/*   Immediate based. */		\
1585503a8865SJiong Wang 	INSN_3(JMP32, JEQ,  K),			\
1586503a8865SJiong Wang 	INSN_3(JMP32, JNE,  K),			\
1587503a8865SJiong Wang 	INSN_3(JMP32, JGT,  K),			\
1588503a8865SJiong Wang 	INSN_3(JMP32, JLT,  K),			\
1589503a8865SJiong Wang 	INSN_3(JMP32, JGE,  K),			\
1590503a8865SJiong Wang 	INSN_3(JMP32, JLE,  K),			\
1591503a8865SJiong Wang 	INSN_3(JMP32, JSGT, K),			\
1592503a8865SJiong Wang 	INSN_3(JMP32, JSLT, K),			\
1593503a8865SJiong Wang 	INSN_3(JMP32, JSGE, K),			\
1594503a8865SJiong Wang 	INSN_3(JMP32, JSLE, K),			\
1595503a8865SJiong Wang 	INSN_3(JMP32, JSET, K),			\
15965e581dadSDaniel Borkmann 	/* Jump instructions. */		\
15975e581dadSDaniel Borkmann 	/*   Register based. */			\
15985e581dadSDaniel Borkmann 	INSN_3(JMP, JEQ,  X),			\
15995e581dadSDaniel Borkmann 	INSN_3(JMP, JNE,  X),			\
16005e581dadSDaniel Borkmann 	INSN_3(JMP, JGT,  X),			\
16015e581dadSDaniel Borkmann 	INSN_3(JMP, JLT,  X),			\
16025e581dadSDaniel Borkmann 	INSN_3(JMP, JGE,  X),			\
16035e581dadSDaniel Borkmann 	INSN_3(JMP, JLE,  X),			\
16045e581dadSDaniel Borkmann 	INSN_3(JMP, JSGT, X),			\
16055e581dadSDaniel Borkmann 	INSN_3(JMP, JSLT, X),			\
16065e581dadSDaniel Borkmann 	INSN_3(JMP, JSGE, X),			\
16075e581dadSDaniel Borkmann 	INSN_3(JMP, JSLE, X),			\
16085e581dadSDaniel Borkmann 	INSN_3(JMP, JSET, X),			\
16095e581dadSDaniel Borkmann 	/*   Immediate based. */		\
16105e581dadSDaniel Borkmann 	INSN_3(JMP, JEQ,  K),			\
16115e581dadSDaniel Borkmann 	INSN_3(JMP, JNE,  K),			\
16125e581dadSDaniel Borkmann 	INSN_3(JMP, JGT,  K),			\
16135e581dadSDaniel Borkmann 	INSN_3(JMP, JLT,  K),			\
16145e581dadSDaniel Borkmann 	INSN_3(JMP, JGE,  K),			\
16155e581dadSDaniel Borkmann 	INSN_3(JMP, JLE,  K),			\
16165e581dadSDaniel Borkmann 	INSN_3(JMP, JSGT, K),			\
16175e581dadSDaniel Borkmann 	INSN_3(JMP, JSLT, K),			\
16185e581dadSDaniel Borkmann 	INSN_3(JMP, JSGE, K),			\
16195e581dadSDaniel Borkmann 	INSN_3(JMP, JSLE, K),			\
16205e581dadSDaniel Borkmann 	INSN_3(JMP, JSET, K),			\
16215e581dadSDaniel Borkmann 	INSN_2(JMP, JA),			\
16224cd58e9aSYonghong Song 	INSN_2(JMP32, JA),			\
16235e581dadSDaniel Borkmann 	/* Store instructions. */		\
16245e581dadSDaniel Borkmann 	/*   Register based. */			\
16255e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  B),			\
16265e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  H),			\
16275e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  W),			\
16285e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  DW),			\
162991c960b0SBrendan Jackman 	INSN_3(STX, ATOMIC, W),			\
163091c960b0SBrendan Jackman 	INSN_3(STX, ATOMIC, DW),		\
16315e581dadSDaniel Borkmann 	/*   Immediate based. */		\
16325e581dadSDaniel Borkmann 	INSN_3(ST, MEM, B),			\
16335e581dadSDaniel Borkmann 	INSN_3(ST, MEM, H),			\
16345e581dadSDaniel Borkmann 	INSN_3(ST, MEM, W),			\
16355e581dadSDaniel Borkmann 	INSN_3(ST, MEM, DW),			\
16365e581dadSDaniel Borkmann 	/* Load instructions. */		\
16375e581dadSDaniel Borkmann 	/*   Register based. */			\
16385e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, B),			\
16395e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, H),			\
16405e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, W),			\
16415e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, DW),			\
16421f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, B),			\
16431f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, H),			\
16441f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, W),			\
16455e581dadSDaniel Borkmann 	/*   Immediate based. */		\
1646e0cea7ceSDaniel Borkmann 	INSN_3(LD, IMM, DW)
16475e581dadSDaniel Borkmann 
bpf_opcode_in_insntable(u8 code)16485e581dadSDaniel Borkmann bool bpf_opcode_in_insntable(u8 code)
16495e581dadSDaniel Borkmann {
16505e581dadSDaniel Borkmann #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
16515e581dadSDaniel Borkmann #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
16525e581dadSDaniel Borkmann 	static const bool public_insntable[256] = {
16535e581dadSDaniel Borkmann 		[0 ... 255] = false,
16545e581dadSDaniel Borkmann 		/* Now overwrite non-defaults ... */
16555e581dadSDaniel Borkmann 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1656e0cea7ceSDaniel Borkmann 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1657e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_B] = true,
1658e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_H] = true,
1659e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_W] = true,
1660e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_B] = true,
1661e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_H] = true,
1662e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_W] = true,
16635e581dadSDaniel Borkmann 	};
16645e581dadSDaniel Borkmann #undef BPF_INSN_3_TBL
16655e581dadSDaniel Borkmann #undef BPF_INSN_2_TBL
16665e581dadSDaniel Borkmann 	return public_insntable[code];
16675e581dadSDaniel Borkmann }
16685e581dadSDaniel Borkmann 
1669290af866SAlexei Starovoitov #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1670f5bffecdSAlexei Starovoitov /**
1671019d0454SRandy Dunlap  *	___bpf_prog_run - run eBPF program on a given context
1672de1da68dSValdis Kletnieks  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
16737ae457c1SAlexei Starovoitov  *	@insn: is the array of eBPF instructions
1674f5bffecdSAlexei Starovoitov  *
16757ae457c1SAlexei Starovoitov  * Decode and execute eBPF instructions.
1676019d0454SRandy Dunlap  *
1677019d0454SRandy Dunlap  * Return: whatever value is in %BPF_R0 at program exit
1678f5bffecdSAlexei Starovoitov  */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)16792ec9898eSHe Fengqing static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1680f5bffecdSAlexei Starovoitov {
16815e581dadSDaniel Borkmann #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
16825e581dadSDaniel Borkmann #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1683e55a7325SJosh Poimboeuf 	static const void * const jumptable[256] __annotate_jump_table = {
1684f5bffecdSAlexei Starovoitov 		[0 ... 255] = &&default_label,
1685f5bffecdSAlexei Starovoitov 		/* Now overwrite non-defaults ... */
16865e581dadSDaniel Borkmann 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
16875e581dadSDaniel Borkmann 		/* Non-UAPI available opcodes. */
16881ea47e01SAlexei Starovoitov 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
168971189fa9SAlexei Starovoitov 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1690f5e81d11SDaniel Borkmann 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
16912a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
16922a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
16932a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
16942a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
16951f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
16961f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
16971f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1698f5bffecdSAlexei Starovoitov 	};
16995e581dadSDaniel Borkmann #undef BPF_INSN_3_LBL
17005e581dadSDaniel Borkmann #undef BPF_INSN_2_LBL
170104fd61abSAlexei Starovoitov 	u32 tail_call_cnt = 0;
1702f5bffecdSAlexei Starovoitov 
1703f5bffecdSAlexei Starovoitov #define CONT	 ({ insn++; goto select_insn; })
1704f5bffecdSAlexei Starovoitov #define CONT_JMP ({ insn++; goto select_insn; })
1705f5bffecdSAlexei Starovoitov 
1706f5bffecdSAlexei Starovoitov select_insn:
1707f5bffecdSAlexei Starovoitov 	goto *jumptable[insn->code];
1708f5bffecdSAlexei Starovoitov 
170928131e9dSDaniel Borkmann 	/* Explicitly mask the register-based shift amounts with 63 or 31
171028131e9dSDaniel Borkmann 	 * to avoid undefined behavior. Normally this won't affect the
171128131e9dSDaniel Borkmann 	 * generated code, for example, in case of native 64 bit archs such
171228131e9dSDaniel Borkmann 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
171328131e9dSDaniel Borkmann 	 * the interpreter. In case of JITs, each of the JIT backends compiles
171428131e9dSDaniel Borkmann 	 * the BPF shift operations to machine instructions which produce
171528131e9dSDaniel Borkmann 	 * implementation-defined results in such a case; the resulting
171628131e9dSDaniel Borkmann 	 * contents of the register may be arbitrary, but program behaviour
171728131e9dSDaniel Borkmann 	 * as a whole remains defined. In other words, in case of JIT backends,
171828131e9dSDaniel Borkmann 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
171928131e9dSDaniel Borkmann 	 */
172028131e9dSDaniel Borkmann 	/* ALU (shifts) */
172128131e9dSDaniel Borkmann #define SHT(OPCODE, OP)					\
172228131e9dSDaniel Borkmann 	ALU64_##OPCODE##_X:				\
172328131e9dSDaniel Borkmann 		DST = DST OP (SRC & 63);		\
172428131e9dSDaniel Borkmann 		CONT;					\
172528131e9dSDaniel Borkmann 	ALU_##OPCODE##_X:				\
172628131e9dSDaniel Borkmann 		DST = (u32) DST OP ((u32) SRC & 31);	\
172728131e9dSDaniel Borkmann 		CONT;					\
172828131e9dSDaniel Borkmann 	ALU64_##OPCODE##_K:				\
172928131e9dSDaniel Borkmann 		DST = DST OP IMM;			\
173028131e9dSDaniel Borkmann 		CONT;					\
173128131e9dSDaniel Borkmann 	ALU_##OPCODE##_K:				\
173228131e9dSDaniel Borkmann 		DST = (u32) DST OP (u32) IMM;		\
173328131e9dSDaniel Borkmann 		CONT;
173428131e9dSDaniel Borkmann 	/* ALU (rest) */
1735f5bffecdSAlexei Starovoitov #define ALU(OPCODE, OP)					\
1736f5bffecdSAlexei Starovoitov 	ALU64_##OPCODE##_X:				\
1737f5bffecdSAlexei Starovoitov 		DST = DST OP SRC;			\
1738f5bffecdSAlexei Starovoitov 		CONT;					\
1739f5bffecdSAlexei Starovoitov 	ALU_##OPCODE##_X:				\
1740f5bffecdSAlexei Starovoitov 		DST = (u32) DST OP (u32) SRC;		\
1741f5bffecdSAlexei Starovoitov 		CONT;					\
1742f5bffecdSAlexei Starovoitov 	ALU64_##OPCODE##_K:				\
1743f5bffecdSAlexei Starovoitov 		DST = DST OP IMM;			\
1744f5bffecdSAlexei Starovoitov 		CONT;					\
1745f5bffecdSAlexei Starovoitov 	ALU_##OPCODE##_K:				\
1746f5bffecdSAlexei Starovoitov 		DST = (u32) DST OP (u32) IMM;		\
1747f5bffecdSAlexei Starovoitov 		CONT;
1748f5bffecdSAlexei Starovoitov 	ALU(ADD,  +)
1749f5bffecdSAlexei Starovoitov 	ALU(SUB,  -)
1750f5bffecdSAlexei Starovoitov 	ALU(AND,  &)
1751f5bffecdSAlexei Starovoitov 	ALU(OR,   |)
1752f5bffecdSAlexei Starovoitov 	ALU(XOR,  ^)
1753f5bffecdSAlexei Starovoitov 	ALU(MUL,  *)
175428131e9dSDaniel Borkmann 	SHT(LSH, <<)
175528131e9dSDaniel Borkmann 	SHT(RSH, >>)
175628131e9dSDaniel Borkmann #undef SHT
1757f5bffecdSAlexei Starovoitov #undef ALU
1758f5bffecdSAlexei Starovoitov 	ALU_NEG:
1759f5bffecdSAlexei Starovoitov 		DST = (u32) -DST;
1760f5bffecdSAlexei Starovoitov 		CONT;
1761f5bffecdSAlexei Starovoitov 	ALU64_NEG:
1762f5bffecdSAlexei Starovoitov 		DST = -DST;
1763f5bffecdSAlexei Starovoitov 		CONT;
1764f5bffecdSAlexei Starovoitov 	ALU_MOV_X:
17658100928cSYonghong Song 		switch (OFF) {
17668100928cSYonghong Song 		case 0:
1767f5bffecdSAlexei Starovoitov 			DST = (u32) SRC;
17688100928cSYonghong Song 			break;
17698100928cSYonghong Song 		case 8:
17708100928cSYonghong Song 			DST = (u32)(s8) SRC;
17718100928cSYonghong Song 			break;
17728100928cSYonghong Song 		case 16:
17738100928cSYonghong Song 			DST = (u32)(s16) SRC;
17748100928cSYonghong Song 			break;
17758100928cSYonghong Song 		}
1776f5bffecdSAlexei Starovoitov 		CONT;
1777f5bffecdSAlexei Starovoitov 	ALU_MOV_K:
1778f5bffecdSAlexei Starovoitov 		DST = (u32) IMM;
1779f5bffecdSAlexei Starovoitov 		CONT;
1780f5bffecdSAlexei Starovoitov 	ALU64_MOV_X:
17818100928cSYonghong Song 		switch (OFF) {
17828100928cSYonghong Song 		case 0:
1783f5bffecdSAlexei Starovoitov 			DST = SRC;
17848100928cSYonghong Song 			break;
17858100928cSYonghong Song 		case 8:
17868100928cSYonghong Song 			DST = (s8) SRC;
17878100928cSYonghong Song 			break;
17888100928cSYonghong Song 		case 16:
17898100928cSYonghong Song 			DST = (s16) SRC;
17908100928cSYonghong Song 			break;
17918100928cSYonghong Song 		case 32:
17928100928cSYonghong Song 			DST = (s32) SRC;
17938100928cSYonghong Song 			break;
17948100928cSYonghong Song 		}
1795f5bffecdSAlexei Starovoitov 		CONT;
1796f5bffecdSAlexei Starovoitov 	ALU64_MOV_K:
1797f5bffecdSAlexei Starovoitov 		DST = IMM;
1798f5bffecdSAlexei Starovoitov 		CONT;
179902ab695bSAlexei Starovoitov 	LD_IMM_DW:
180002ab695bSAlexei Starovoitov 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
180102ab695bSAlexei Starovoitov 		insn++;
180202ab695bSAlexei Starovoitov 		CONT;
18032dc6b100SJiong Wang 	ALU_ARSH_X:
180428131e9dSDaniel Borkmann 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
18052dc6b100SJiong Wang 		CONT;
18062dc6b100SJiong Wang 	ALU_ARSH_K:
180775672ddaSJiong Wang 		DST = (u64) (u32) (((s32) DST) >> IMM);
18082dc6b100SJiong Wang 		CONT;
1809f5bffecdSAlexei Starovoitov 	ALU64_ARSH_X:
181028131e9dSDaniel Borkmann 		(*(s64 *) &DST) >>= (SRC & 63);
1811f5bffecdSAlexei Starovoitov 		CONT;
1812f5bffecdSAlexei Starovoitov 	ALU64_ARSH_K:
1813f5bffecdSAlexei Starovoitov 		(*(s64 *) &DST) >>= IMM;
1814f5bffecdSAlexei Starovoitov 		CONT;
1815f5bffecdSAlexei Starovoitov 	ALU64_MOD_X:
1816ec0e2da9SYonghong Song 		switch (OFF) {
1817ec0e2da9SYonghong Song 		case 0:
1818144cd91cSDaniel Borkmann 			div64_u64_rem(DST, SRC, &AX);
1819144cd91cSDaniel Borkmann 			DST = AX;
1820ec0e2da9SYonghong Song 			break;
1821ec0e2da9SYonghong Song 		case 1:
1822ec0e2da9SYonghong Song 			AX = div64_s64(DST, SRC);
1823ec0e2da9SYonghong Song 			DST = DST - AX * SRC;
1824ec0e2da9SYonghong Song 			break;
1825ec0e2da9SYonghong Song 		}
1826f5bffecdSAlexei Starovoitov 		CONT;
1827f5bffecdSAlexei Starovoitov 	ALU_MOD_X:
1828ec0e2da9SYonghong Song 		switch (OFF) {
1829ec0e2da9SYonghong Song 		case 0:
1830144cd91cSDaniel Borkmann 			AX = (u32) DST;
1831144cd91cSDaniel Borkmann 			DST = do_div(AX, (u32) SRC);
1832ec0e2da9SYonghong Song 			break;
1833ec0e2da9SYonghong Song 		case 1:
1834ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1835ec0e2da9SYonghong Song 			AX = do_div(AX, abs((s32)SRC));
1836ec0e2da9SYonghong Song 			if ((s32)DST < 0)
1837ec0e2da9SYonghong Song 				DST = (u32)-AX;
1838ec0e2da9SYonghong Song 			else
1839ec0e2da9SYonghong Song 				DST = (u32)AX;
1840ec0e2da9SYonghong Song 			break;
1841ec0e2da9SYonghong Song 		}
1842f5bffecdSAlexei Starovoitov 		CONT;
1843f5bffecdSAlexei Starovoitov 	ALU64_MOD_K:
1844ec0e2da9SYonghong Song 		switch (OFF) {
1845ec0e2da9SYonghong Song 		case 0:
1846144cd91cSDaniel Borkmann 			div64_u64_rem(DST, IMM, &AX);
1847144cd91cSDaniel Borkmann 			DST = AX;
1848ec0e2da9SYonghong Song 			break;
1849ec0e2da9SYonghong Song 		case 1:
1850ec0e2da9SYonghong Song 			AX = div64_s64(DST, IMM);
1851ec0e2da9SYonghong Song 			DST = DST - AX * IMM;
1852ec0e2da9SYonghong Song 			break;
1853ec0e2da9SYonghong Song 		}
1854f5bffecdSAlexei Starovoitov 		CONT;
1855f5bffecdSAlexei Starovoitov 	ALU_MOD_K:
1856ec0e2da9SYonghong Song 		switch (OFF) {
1857ec0e2da9SYonghong Song 		case 0:
1858144cd91cSDaniel Borkmann 			AX = (u32) DST;
1859144cd91cSDaniel Borkmann 			DST = do_div(AX, (u32) IMM);
1860ec0e2da9SYonghong Song 			break;
1861ec0e2da9SYonghong Song 		case 1:
1862ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1863ec0e2da9SYonghong Song 			AX = do_div(AX, abs((s32)IMM));
1864ec0e2da9SYonghong Song 			if ((s32)DST < 0)
1865ec0e2da9SYonghong Song 				DST = (u32)-AX;
1866ec0e2da9SYonghong Song 			else
1867ec0e2da9SYonghong Song 				DST = (u32)AX;
1868ec0e2da9SYonghong Song 			break;
1869ec0e2da9SYonghong Song 		}
1870f5bffecdSAlexei Starovoitov 		CONT;
1871f5bffecdSAlexei Starovoitov 	ALU64_DIV_X:
1872ec0e2da9SYonghong Song 		switch (OFF) {
1873ec0e2da9SYonghong Song 		case 0:
1874876a7ae6SAlexei Starovoitov 			DST = div64_u64(DST, SRC);
1875ec0e2da9SYonghong Song 			break;
1876ec0e2da9SYonghong Song 		case 1:
1877ec0e2da9SYonghong Song 			DST = div64_s64(DST, SRC);
1878ec0e2da9SYonghong Song 			break;
1879ec0e2da9SYonghong Song 		}
1880f5bffecdSAlexei Starovoitov 		CONT;
1881f5bffecdSAlexei Starovoitov 	ALU_DIV_X:
1882ec0e2da9SYonghong Song 		switch (OFF) {
1883ec0e2da9SYonghong Song 		case 0:
1884144cd91cSDaniel Borkmann 			AX = (u32) DST;
1885144cd91cSDaniel Borkmann 			do_div(AX, (u32) SRC);
1886144cd91cSDaniel Borkmann 			DST = (u32) AX;
1887ec0e2da9SYonghong Song 			break;
1888ec0e2da9SYonghong Song 		case 1:
1889ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1890ec0e2da9SYonghong Song 			do_div(AX, abs((s32)SRC));
189109fedc73SYonghong Song 			if (((s32)DST < 0) == ((s32)SRC < 0))
1892ec0e2da9SYonghong Song 				DST = (u32)AX;
1893ec0e2da9SYonghong Song 			else
1894ec0e2da9SYonghong Song 				DST = (u32)-AX;
1895ec0e2da9SYonghong Song 			break;
1896ec0e2da9SYonghong Song 		}
1897f5bffecdSAlexei Starovoitov 		CONT;
1898f5bffecdSAlexei Starovoitov 	ALU64_DIV_K:
1899ec0e2da9SYonghong Song 		switch (OFF) {
1900ec0e2da9SYonghong Song 		case 0:
1901876a7ae6SAlexei Starovoitov 			DST = div64_u64(DST, IMM);
1902ec0e2da9SYonghong Song 			break;
1903ec0e2da9SYonghong Song 		case 1:
1904ec0e2da9SYonghong Song 			DST = div64_s64(DST, IMM);
1905ec0e2da9SYonghong Song 			break;
1906ec0e2da9SYonghong Song 		}
1907f5bffecdSAlexei Starovoitov 		CONT;
1908f5bffecdSAlexei Starovoitov 	ALU_DIV_K:
1909ec0e2da9SYonghong Song 		switch (OFF) {
1910ec0e2da9SYonghong Song 		case 0:
1911144cd91cSDaniel Borkmann 			AX = (u32) DST;
1912144cd91cSDaniel Borkmann 			do_div(AX, (u32) IMM);
1913144cd91cSDaniel Borkmann 			DST = (u32) AX;
1914ec0e2da9SYonghong Song 			break;
1915ec0e2da9SYonghong Song 		case 1:
1916ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1917ec0e2da9SYonghong Song 			do_div(AX, abs((s32)IMM));
191809fedc73SYonghong Song 			if (((s32)DST < 0) == ((s32)IMM < 0))
1919ec0e2da9SYonghong Song 				DST = (u32)AX;
1920ec0e2da9SYonghong Song 			else
1921ec0e2da9SYonghong Song 				DST = (u32)-AX;
1922ec0e2da9SYonghong Song 			break;
1923ec0e2da9SYonghong Song 		}
1924f5bffecdSAlexei Starovoitov 		CONT;
1925f5bffecdSAlexei Starovoitov 	ALU_END_TO_BE:
1926f5bffecdSAlexei Starovoitov 		switch (IMM) {
1927f5bffecdSAlexei Starovoitov 		case 16:
1928f5bffecdSAlexei Starovoitov 			DST = (__force u16) cpu_to_be16(DST);
1929f5bffecdSAlexei Starovoitov 			break;
1930f5bffecdSAlexei Starovoitov 		case 32:
1931f5bffecdSAlexei Starovoitov 			DST = (__force u32) cpu_to_be32(DST);
1932f5bffecdSAlexei Starovoitov 			break;
1933f5bffecdSAlexei Starovoitov 		case 64:
1934f5bffecdSAlexei Starovoitov 			DST = (__force u64) cpu_to_be64(DST);
1935f5bffecdSAlexei Starovoitov 			break;
1936f5bffecdSAlexei Starovoitov 		}
1937f5bffecdSAlexei Starovoitov 		CONT;
1938f5bffecdSAlexei Starovoitov 	ALU_END_TO_LE:
1939f5bffecdSAlexei Starovoitov 		switch (IMM) {
1940f5bffecdSAlexei Starovoitov 		case 16:
1941f5bffecdSAlexei Starovoitov 			DST = (__force u16) cpu_to_le16(DST);
1942f5bffecdSAlexei Starovoitov 			break;
1943f5bffecdSAlexei Starovoitov 		case 32:
1944f5bffecdSAlexei Starovoitov 			DST = (__force u32) cpu_to_le32(DST);
1945f5bffecdSAlexei Starovoitov 			break;
1946f5bffecdSAlexei Starovoitov 		case 64:
1947f5bffecdSAlexei Starovoitov 			DST = (__force u64) cpu_to_le64(DST);
1948f5bffecdSAlexei Starovoitov 			break;
1949f5bffecdSAlexei Starovoitov 		}
1950f5bffecdSAlexei Starovoitov 		CONT;
19510845c3dbSYonghong Song 	ALU64_END_TO_LE:
19520845c3dbSYonghong Song 		switch (IMM) {
19530845c3dbSYonghong Song 		case 16:
19540845c3dbSYonghong Song 			DST = (__force u16) __swab16(DST);
19550845c3dbSYonghong Song 			break;
19560845c3dbSYonghong Song 		case 32:
19570845c3dbSYonghong Song 			DST = (__force u32) __swab32(DST);
19580845c3dbSYonghong Song 			break;
19590845c3dbSYonghong Song 		case 64:
19600845c3dbSYonghong Song 			DST = (__force u64) __swab64(DST);
19610845c3dbSYonghong Song 			break;
19620845c3dbSYonghong Song 		}
19630845c3dbSYonghong Song 		CONT;
1964f5bffecdSAlexei Starovoitov 
1965f5bffecdSAlexei Starovoitov 	/* CALL */
1966f5bffecdSAlexei Starovoitov 	JMP_CALL:
1967f5bffecdSAlexei Starovoitov 		/* Function call scratches BPF_R1-BPF_R5 registers,
1968f5bffecdSAlexei Starovoitov 		 * preserves BPF_R6-BPF_R9, and stores return value
1969f5bffecdSAlexei Starovoitov 		 * into BPF_R0.
1970f5bffecdSAlexei Starovoitov 		 */
1971f5bffecdSAlexei Starovoitov 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1972f5bffecdSAlexei Starovoitov 						       BPF_R4, BPF_R5);
1973f5bffecdSAlexei Starovoitov 		CONT;
1974f5bffecdSAlexei Starovoitov 
19751ea47e01SAlexei Starovoitov 	JMP_CALL_ARGS:
19761ea47e01SAlexei Starovoitov 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
19771ea47e01SAlexei Starovoitov 							    BPF_R3, BPF_R4,
19781ea47e01SAlexei Starovoitov 							    BPF_R5,
19791ea47e01SAlexei Starovoitov 							    insn + insn->off + 1);
19801ea47e01SAlexei Starovoitov 		CONT;
19811ea47e01SAlexei Starovoitov 
198204fd61abSAlexei Starovoitov 	JMP_TAIL_CALL: {
198304fd61abSAlexei Starovoitov 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
198404fd61abSAlexei Starovoitov 		struct bpf_array *array = container_of(map, struct bpf_array, map);
198504fd61abSAlexei Starovoitov 		struct bpf_prog *prog;
198690caccddSAlexei Starovoitov 		u32 index = BPF_R3;
198704fd61abSAlexei Starovoitov 
198804fd61abSAlexei Starovoitov 		if (unlikely(index >= array->map.max_entries))
198904fd61abSAlexei Starovoitov 			goto out;
1990ebf7f6f0STiezhu Yang 
1991ebf7f6f0STiezhu Yang 		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
199204fd61abSAlexei Starovoitov 			goto out;
199304fd61abSAlexei Starovoitov 
199404fd61abSAlexei Starovoitov 		tail_call_cnt++;
199504fd61abSAlexei Starovoitov 
19962a36f0b9SWang Nan 		prog = READ_ONCE(array->ptrs[index]);
19971ca1cc98SDaniel Borkmann 		if (!prog)
199804fd61abSAlexei Starovoitov 			goto out;
199904fd61abSAlexei Starovoitov 
2000c4675f93SDaniel Borkmann 		/* ARG1 at this point is guaranteed to point to CTX from
2001c4675f93SDaniel Borkmann 		 * the verifier side due to the fact that the tail call is
20020142dddcSChris Packham 		 * handled like a helper, that is, bpf_tail_call_proto,
2003c4675f93SDaniel Borkmann 		 * where arg1_type is ARG_PTR_TO_CTX.
2004c4675f93SDaniel Borkmann 		 */
200504fd61abSAlexei Starovoitov 		insn = prog->insnsi;
200604fd61abSAlexei Starovoitov 		goto select_insn;
200704fd61abSAlexei Starovoitov out:
200804fd61abSAlexei Starovoitov 		CONT;
200904fd61abSAlexei Starovoitov 	}
2010f5bffecdSAlexei Starovoitov 	JMP_JA:
2011f5bffecdSAlexei Starovoitov 		insn += insn->off;
2012f5bffecdSAlexei Starovoitov 		CONT;
20134cd58e9aSYonghong Song 	JMP32_JA:
20144cd58e9aSYonghong Song 		insn += insn->imm;
20154cd58e9aSYonghong Song 		CONT;
2016f5bffecdSAlexei Starovoitov 	JMP_EXIT:
2017f5bffecdSAlexei Starovoitov 		return BPF_R0;
2018503a8865SJiong Wang 	/* JMP */
2019503a8865SJiong Wang #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
2020503a8865SJiong Wang 	JMP_##OPCODE##_X:					\
2021503a8865SJiong Wang 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
2022503a8865SJiong Wang 			insn += insn->off;			\
2023503a8865SJiong Wang 			CONT_JMP;				\
2024503a8865SJiong Wang 		}						\
2025503a8865SJiong Wang 		CONT;						\
2026503a8865SJiong Wang 	JMP32_##OPCODE##_X:					\
2027503a8865SJiong Wang 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
2028503a8865SJiong Wang 			insn += insn->off;			\
2029503a8865SJiong Wang 			CONT_JMP;				\
2030503a8865SJiong Wang 		}						\
2031503a8865SJiong Wang 		CONT;						\
2032503a8865SJiong Wang 	JMP_##OPCODE##_K:					\
2033503a8865SJiong Wang 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
2034503a8865SJiong Wang 			insn += insn->off;			\
2035503a8865SJiong Wang 			CONT_JMP;				\
2036503a8865SJiong Wang 		}						\
2037503a8865SJiong Wang 		CONT;						\
2038503a8865SJiong Wang 	JMP32_##OPCODE##_K:					\
2039503a8865SJiong Wang 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
2040503a8865SJiong Wang 			insn += insn->off;			\
2041503a8865SJiong Wang 			CONT_JMP;				\
2042503a8865SJiong Wang 		}						\
2043503a8865SJiong Wang 		CONT;
2044503a8865SJiong Wang 	COND_JMP(u, JEQ, ==)
2045503a8865SJiong Wang 	COND_JMP(u, JNE, !=)
2046503a8865SJiong Wang 	COND_JMP(u, JGT, >)
2047503a8865SJiong Wang 	COND_JMP(u, JLT, <)
2048503a8865SJiong Wang 	COND_JMP(u, JGE, >=)
2049503a8865SJiong Wang 	COND_JMP(u, JLE, <=)
2050503a8865SJiong Wang 	COND_JMP(u, JSET, &)
2051503a8865SJiong Wang 	COND_JMP(s, JSGT, >)
2052503a8865SJiong Wang 	COND_JMP(s, JSLT, <)
2053503a8865SJiong Wang 	COND_JMP(s, JSGE, >=)
2054503a8865SJiong Wang 	COND_JMP(s, JSLE, <=)
2055503a8865SJiong Wang #undef COND_JMP
2056f5e81d11SDaniel Borkmann 	/* ST, STX and LDX*/
2057f5e81d11SDaniel Borkmann 	ST_NOSPEC:
2058f5e81d11SDaniel Borkmann 		/* Speculation barrier for mitigating Speculative Store Bypass.
2059f5e81d11SDaniel Borkmann 		 * In case of arm64, we rely on the firmware mitigation as
2060f5e81d11SDaniel Borkmann 		 * controlled via the ssbd kernel parameter. Whenever the
2061f5e81d11SDaniel Borkmann 		 * mitigation is enabled, it works for all of the kernel code
2062f5e81d11SDaniel Borkmann 		 * with no need to provide any additional instructions here.
2063f5e81d11SDaniel Borkmann 		 * In case of x86, we use 'lfence' insn for mitigation. We
2064f5e81d11SDaniel Borkmann 		 * reuse preexisting logic from Spectre v1 mitigation that
2065f5e81d11SDaniel Borkmann 		 * happens to produce the required code on x86 for v4 as well.
2066f5e81d11SDaniel Borkmann 		 */
2067f5e81d11SDaniel Borkmann 		barrier_nospec();
2068f5e81d11SDaniel Borkmann 		CONT;
2069f5bffecdSAlexei Starovoitov #define LDST(SIZEOP, SIZE)						\
2070f5bffecdSAlexei Starovoitov 	STX_MEM_##SIZEOP:						\
2071f5bffecdSAlexei Starovoitov 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
2072f5bffecdSAlexei Starovoitov 		CONT;							\
2073f5bffecdSAlexei Starovoitov 	ST_MEM_##SIZEOP:						\
2074f5bffecdSAlexei Starovoitov 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
2075f5bffecdSAlexei Starovoitov 		CONT;							\
2076f5bffecdSAlexei Starovoitov 	LDX_MEM_##SIZEOP:						\
2077f5bffecdSAlexei Starovoitov 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2078caff1fa4SMenglong Dong 		CONT;							\
2079caff1fa4SMenglong Dong 	LDX_PROBE_MEM_##SIZEOP:						\
20806a5a148aSArnd Bergmann 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),	\
2081caff1fa4SMenglong Dong 			      (const void *)(long) (SRC + insn->off));	\
2082caff1fa4SMenglong Dong 		DST = *((SIZE *)&DST);					\
2083f5bffecdSAlexei Starovoitov 		CONT;
2084f5bffecdSAlexei Starovoitov 
2085f5bffecdSAlexei Starovoitov 	LDST(B,   u8)
2086f5bffecdSAlexei Starovoitov 	LDST(H,  u16)
2087f5bffecdSAlexei Starovoitov 	LDST(W,  u32)
2088f5bffecdSAlexei Starovoitov 	LDST(DW, u64)
2089f5bffecdSAlexei Starovoitov #undef LDST
20902a02759eSAlexei Starovoitov 
20911f9a1ea8SYonghong Song #define LDSX(SIZEOP, SIZE)						\
20921f9a1ea8SYonghong Song 	LDX_MEMSX_##SIZEOP:						\
20931f9a1ea8SYonghong Song 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
20941f9a1ea8SYonghong Song 		CONT;							\
20951f9a1ea8SYonghong Song 	LDX_PROBE_MEMSX_##SIZEOP:					\
20966a5a148aSArnd Bergmann 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),		\
20971f9a1ea8SYonghong Song 				      (const void *)(long) (SRC + insn->off));	\
20981f9a1ea8SYonghong Song 		DST = *((SIZE *)&DST);					\
20991f9a1ea8SYonghong Song 		CONT;
21001f9a1ea8SYonghong Song 
21011f9a1ea8SYonghong Song 	LDSX(B,   s8)
21021f9a1ea8SYonghong Song 	LDSX(H,  s16)
21031f9a1ea8SYonghong Song 	LDSX(W,  s32)
21041f9a1ea8SYonghong Song #undef LDSX
21051f9a1ea8SYonghong Song 
210646291067SBrendan Jackman #define ATOMIC_ALU_OP(BOP, KOP)						\
210746291067SBrendan Jackman 		case BOP:						\
210846291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)		\
210946291067SBrendan Jackman 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
211046291067SBrendan Jackman 					     (DST + insn->off));	\
211146291067SBrendan Jackman 			else						\
211246291067SBrendan Jackman 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
211346291067SBrendan Jackman 					       (DST + insn->off));	\
211446291067SBrendan Jackman 			break;						\
211546291067SBrendan Jackman 		case BOP | BPF_FETCH:					\
211646291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)		\
211746291067SBrendan Jackman 				SRC = (u32) atomic_fetch_##KOP(		\
211846291067SBrendan Jackman 					(u32) SRC,			\
211946291067SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off)); \
212046291067SBrendan Jackman 			else						\
212146291067SBrendan Jackman 				SRC = (u64) atomic64_fetch_##KOP(	\
212246291067SBrendan Jackman 					(u64) SRC,			\
212346291067SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
212446291067SBrendan Jackman 			break;
212546291067SBrendan Jackman 
212646291067SBrendan Jackman 	STX_ATOMIC_DW:
212791c960b0SBrendan Jackman 	STX_ATOMIC_W:
212891c960b0SBrendan Jackman 		switch (IMM) {
212946291067SBrendan Jackman 		ATOMIC_ALU_OP(BPF_ADD, add)
2130981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_AND, and)
2131981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_OR, or)
2132981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_XOR, xor)
213346291067SBrendan Jackman #undef ATOMIC_ALU_OP
213446291067SBrendan Jackman 
21355ffa2550SBrendan Jackman 		case BPF_XCHG:
213646291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)
21375ffa2550SBrendan Jackman 				SRC = (u32) atomic_xchg(
21385ffa2550SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off),
21395ffa2550SBrendan Jackman 					(u32) SRC);
214046291067SBrendan Jackman 			else
21415ffa2550SBrendan Jackman 				SRC = (u64) atomic64_xchg(
21425ffa2550SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off),
21435ffa2550SBrendan Jackman 					(u64) SRC);
21445ffa2550SBrendan Jackman 			break;
21455ffa2550SBrendan Jackman 		case BPF_CMPXCHG:
214646291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)
214746291067SBrendan Jackman 				BPF_R0 = (u32) atomic_cmpxchg(
214846291067SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off),
214946291067SBrendan Jackman 					(u32) BPF_R0, (u32) SRC);
215046291067SBrendan Jackman 			else
21515ffa2550SBrendan Jackman 				BPF_R0 = (u64) atomic64_cmpxchg(
21525ffa2550SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off),
21535ffa2550SBrendan Jackman 					(u64) BPF_R0, (u64) SRC);
21545ffa2550SBrendan Jackman 			break;
215546291067SBrendan Jackman 
215691c960b0SBrendan Jackman 		default:
215791c960b0SBrendan Jackman 			goto default_label;
215891c960b0SBrendan Jackman 		}
2159f5bffecdSAlexei Starovoitov 		CONT;
2160f5bffecdSAlexei Starovoitov 
2161f5bffecdSAlexei Starovoitov 	default_label:
21625e581dadSDaniel Borkmann 		/* If we ever reach this, we have a bug somewhere. Die hard here
21635e581dadSDaniel Borkmann 		 * instead of just returning 0; we could be somewhere in a subprog,
21645e581dadSDaniel Borkmann 		 * so execution could continue otherwise which we do /not/ want.
21655e581dadSDaniel Borkmann 		 *
21665e581dadSDaniel Borkmann 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
21675e581dadSDaniel Borkmann 		 */
216891c960b0SBrendan Jackman 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
216991c960b0SBrendan Jackman 			insn->code, insn->imm);
21705e581dadSDaniel Borkmann 		BUG_ON(1);
2171f5bffecdSAlexei Starovoitov 		return 0;
2172f5bffecdSAlexei Starovoitov }
2173f696b8f4SAlexei Starovoitov 
2174b870aa90SAlexei Starovoitov #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2175b870aa90SAlexei Starovoitov #define DEFINE_BPF_PROG_RUN(stack_size) \
2176b870aa90SAlexei Starovoitov static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2177b870aa90SAlexei Starovoitov { \
2178b870aa90SAlexei Starovoitov 	u64 stack[stack_size / sizeof(u64)]; \
2179a6a7aabaSAlexander Potapenko 	u64 regs[MAX_BPF_EXT_REG] = {}; \
2180b870aa90SAlexei Starovoitov \
2181d812ae6eSMartin KaFai Lau 	kmsan_unpoison_memory(stack, sizeof(stack)); \
2182b870aa90SAlexei Starovoitov 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2183b870aa90SAlexei Starovoitov 	ARG1 = (u64) (unsigned long) ctx; \
21842ec9898eSHe Fengqing 	return ___bpf_prog_run(regs, insn); \
2185f696b8f4SAlexei Starovoitov }
2186f5bffecdSAlexei Starovoitov 
21871ea47e01SAlexei Starovoitov #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
21881ea47e01SAlexei Starovoitov #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
21891ea47e01SAlexei Starovoitov static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
21901ea47e01SAlexei Starovoitov 				      const struct bpf_insn *insn) \
21911ea47e01SAlexei Starovoitov { \
21921ea47e01SAlexei Starovoitov 	u64 stack[stack_size / sizeof(u64)]; \
2193144cd91cSDaniel Borkmann 	u64 regs[MAX_BPF_EXT_REG]; \
21941ea47e01SAlexei Starovoitov \
2195d812ae6eSMartin KaFai Lau 	kmsan_unpoison_memory(stack, sizeof(stack)); \
21961ea47e01SAlexei Starovoitov 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
21971ea47e01SAlexei Starovoitov 	BPF_R1 = r1; \
21981ea47e01SAlexei Starovoitov 	BPF_R2 = r2; \
21991ea47e01SAlexei Starovoitov 	BPF_R3 = r3; \
22001ea47e01SAlexei Starovoitov 	BPF_R4 = r4; \
22011ea47e01SAlexei Starovoitov 	BPF_R5 = r5; \
22022ec9898eSHe Fengqing 	return ___bpf_prog_run(regs, insn); \
22031ea47e01SAlexei Starovoitov }
22041ea47e01SAlexei Starovoitov 
2205b870aa90SAlexei Starovoitov #define EVAL1(FN, X) FN(X)
2206b870aa90SAlexei Starovoitov #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2207b870aa90SAlexei Starovoitov #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2208b870aa90SAlexei Starovoitov #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2209b870aa90SAlexei Starovoitov #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2210b870aa90SAlexei Starovoitov #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2211b870aa90SAlexei Starovoitov 
2212b870aa90SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2213b870aa90SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2214b870aa90SAlexei Starovoitov EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2215b870aa90SAlexei Starovoitov 
22161ea47e01SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
22171ea47e01SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
22181ea47e01SAlexei Starovoitov EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
22191ea47e01SAlexei Starovoitov 
2220b870aa90SAlexei Starovoitov #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2221b870aa90SAlexei Starovoitov 
2222b870aa90SAlexei Starovoitov static unsigned int (*interpreters[])(const void *ctx,
2223b870aa90SAlexei Starovoitov 				      const struct bpf_insn *insn) = {
2224b870aa90SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2225b870aa90SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2226b870aa90SAlexei Starovoitov EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2227b870aa90SAlexei Starovoitov };
22281ea47e01SAlexei Starovoitov #undef PROG_NAME_LIST
22291ea47e01SAlexei Starovoitov #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2230ba49f976SArnd Bergmann static __maybe_unused
2231ba49f976SArnd Bergmann u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
22321ea47e01SAlexei Starovoitov 			   const struct bpf_insn *insn) = {
22331ea47e01SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
22341ea47e01SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
22351ea47e01SAlexei Starovoitov EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
22361ea47e01SAlexei Starovoitov };
22371ea47e01SAlexei Starovoitov #undef PROG_NAME_LIST
22381ea47e01SAlexei Starovoitov 
2239ba49f976SArnd Bergmann #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)22401ea47e01SAlexei Starovoitov void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
22411ea47e01SAlexei Starovoitov {
22421ea47e01SAlexei Starovoitov 	stack_depth = max_t(u32, stack_depth, 1);
22431ea47e01SAlexei Starovoitov 	insn->off = (s16) insn->imm;
22441ea47e01SAlexei Starovoitov 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
22451ea47e01SAlexei Starovoitov 		__bpf_call_base_args;
22461ea47e01SAlexei Starovoitov 	insn->code = BPF_JMP | BPF_CALL_ARGS;
22471ea47e01SAlexei Starovoitov }
2248ba49f976SArnd Bergmann #endif
2249290af866SAlexei Starovoitov #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2250fa9dd599SDaniel Borkmann static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2251290af866SAlexei Starovoitov 					 const struct bpf_insn *insn)
2252290af866SAlexei Starovoitov {
2253fa9dd599SDaniel Borkmann 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2254fa9dd599SDaniel Borkmann 	 * is not working properly, so warn about it!
2255fa9dd599SDaniel Borkmann 	 */
2256fa9dd599SDaniel Borkmann 	WARN_ON_ONCE(1);
2257290af866SAlexei Starovoitov 	return 0;
2258290af866SAlexei Starovoitov }
2259290af866SAlexei Starovoitov #endif
2260290af866SAlexei Starovoitov 
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2261f45d5b6cSToke Hoiland-Jorgensen bool bpf_prog_map_compatible(struct bpf_map *map,
22623324b584SDaniel Borkmann 			     const struct bpf_prog *fp)
2263f5bffecdSAlexei Starovoitov {
22641c123c56SToke Høiland-Jørgensen 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
226554713c85SToke Høiland-Jørgensen 	bool ret;
22665d5e3b4cSXu Kuohai 	struct bpf_prog_aux *aux = fp->aux;
226754713c85SToke Høiland-Jørgensen 
22689802d865SJosef Bacik 	if (fp->kprobe_override)
22699802d865SJosef Bacik 		return false;
22709802d865SJosef Bacik 
22713d76a4d3SStanislav Fomichev 	/* XDP programs inserted into maps are not guaranteed to run on
22723d76a4d3SStanislav Fomichev 	 * a particular netdev (and can run outside driver context entirely
22733d76a4d3SStanislav Fomichev 	 * in the case of devmap and cpumap). Until device checks
22743d76a4d3SStanislav Fomichev 	 * are implemented, prohibit adding dev-bound programs to program maps.
22753d76a4d3SStanislav Fomichev 	 */
22765d5e3b4cSXu Kuohai 	if (bpf_prog_is_dev_bound(aux))
22773d76a4d3SStanislav Fomichev 		return false;
22783d76a4d3SStanislav Fomichev 
2279f45d5b6cSToke Hoiland-Jorgensen 	spin_lock(&map->owner.lock);
2280f45d5b6cSToke Hoiland-Jorgensen 	if (!map->owner.type) {
22813324b584SDaniel Borkmann 		/* There's no owner yet where we could check for
22823324b584SDaniel Borkmann 		 * compatibility.
22833324b584SDaniel Borkmann 		 */
22841c123c56SToke Høiland-Jørgensen 		map->owner.type  = prog_type;
2285f45d5b6cSToke Hoiland-Jorgensen 		map->owner.jited = fp->jited;
22865d5e3b4cSXu Kuohai 		map->owner.xdp_has_frags = aux->xdp_has_frags;
22875d5e3b4cSXu Kuohai 		map->owner.attach_func_proto = aux->attach_func_proto;
228854713c85SToke Høiland-Jørgensen 		ret = true;
228954713c85SToke Høiland-Jørgensen 	} else {
22901c123c56SToke Høiland-Jørgensen 		ret = map->owner.type  == prog_type &&
2291f45d5b6cSToke Hoiland-Jorgensen 		      map->owner.jited == fp->jited &&
22925d5e3b4cSXu Kuohai 		      map->owner.xdp_has_frags == aux->xdp_has_frags;
22935d5e3b4cSXu Kuohai 		if (ret &&
22945d5e3b4cSXu Kuohai 		    map->owner.attach_func_proto != aux->attach_func_proto) {
22955d5e3b4cSXu Kuohai 			switch (prog_type) {
22965d5e3b4cSXu Kuohai 			case BPF_PROG_TYPE_TRACING:
22975d5e3b4cSXu Kuohai 			case BPF_PROG_TYPE_LSM:
22985d5e3b4cSXu Kuohai 			case BPF_PROG_TYPE_EXT:
22995d5e3b4cSXu Kuohai 			case BPF_PROG_TYPE_STRUCT_OPS:
23005d5e3b4cSXu Kuohai 				ret = false;
23015d5e3b4cSXu Kuohai 				break;
23025d5e3b4cSXu Kuohai 			default:
23035d5e3b4cSXu Kuohai 				break;
23045d5e3b4cSXu Kuohai 			}
23055d5e3b4cSXu Kuohai 		}
230604fd61abSAlexei Starovoitov 	}
2307f45d5b6cSToke Hoiland-Jorgensen 	spin_unlock(&map->owner.lock);
2308f45d5b6cSToke Hoiland-Jorgensen 
230954713c85SToke Høiland-Jørgensen 	return ret;
23103324b584SDaniel Borkmann }
23113324b584SDaniel Borkmann 
bpf_check_tail_call(const struct bpf_prog * fp)23123324b584SDaniel Borkmann static int bpf_check_tail_call(const struct bpf_prog *fp)
231304fd61abSAlexei Starovoitov {
231404fd61abSAlexei Starovoitov 	struct bpf_prog_aux *aux = fp->aux;
2315984fe94fSYiFei Zhu 	int i, ret = 0;
231604fd61abSAlexei Starovoitov 
2317984fe94fSYiFei Zhu 	mutex_lock(&aux->used_maps_mutex);
231804fd61abSAlexei Starovoitov 	for (i = 0; i < aux->used_map_cnt; i++) {
23193324b584SDaniel Borkmann 		struct bpf_map *map = aux->used_maps[i];
232004fd61abSAlexei Starovoitov 
2321f45d5b6cSToke Hoiland-Jorgensen 		if (!map_type_contains_progs(map))
232204fd61abSAlexei Starovoitov 			continue;
23233324b584SDaniel Borkmann 
2324f45d5b6cSToke Hoiland-Jorgensen 		if (!bpf_prog_map_compatible(map, fp)) {
2325984fe94fSYiFei Zhu 			ret = -EINVAL;
2326984fe94fSYiFei Zhu 			goto out;
2327984fe94fSYiFei Zhu 		}
232804fd61abSAlexei Starovoitov 	}
232904fd61abSAlexei Starovoitov 
2330984fe94fSYiFei Zhu out:
2331984fe94fSYiFei Zhu 	mutex_unlock(&aux->used_maps_mutex);
2332984fe94fSYiFei Zhu 	return ret;
233304fd61abSAlexei Starovoitov }
233404fd61abSAlexei Starovoitov 
bpf_prog_select_func(struct bpf_prog * fp)23359facc336SDaniel Borkmann static void bpf_prog_select_func(struct bpf_prog *fp)
23369facc336SDaniel Borkmann {
23379facc336SDaniel Borkmann #ifndef CONFIG_BPF_JIT_ALWAYS_ON
23389facc336SDaniel Borkmann 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
23399facc336SDaniel Borkmann 
23409facc336SDaniel Borkmann 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
23419facc336SDaniel Borkmann #else
23429facc336SDaniel Borkmann 	fp->bpf_func = __bpf_prog_ret0_warn;
23439facc336SDaniel Borkmann #endif
23449facc336SDaniel Borkmann }
23459facc336SDaniel Borkmann 
2346f5bffecdSAlexei Starovoitov /**
23473324b584SDaniel Borkmann  *	bpf_prog_select_runtime - select exec runtime for BPF program
234806edc59cSChristoph Hellwig  *	@fp: bpf_prog populated with BPF program
2349d1c55ab5SDaniel Borkmann  *	@err: pointer to error variable
2350f5bffecdSAlexei Starovoitov  *
23513324b584SDaniel Borkmann  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2352fb7dd8bcSAndrii Nakryiko  * The BPF program will be executed via bpf_prog_run() function.
2353019d0454SRandy Dunlap  *
2354019d0454SRandy Dunlap  * Return: the &fp argument along with &err set to 0 for success or
2355019d0454SRandy Dunlap  * a negative errno code on failure
2356f5bffecdSAlexei Starovoitov  */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2357d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2358f5bffecdSAlexei Starovoitov {
23599facc336SDaniel Borkmann 	/* In case of BPF to BPF calls, verifier did all the prep
23609facc336SDaniel Borkmann 	 * work with regards to JITing, etc.
23619facc336SDaniel Borkmann 	 */
2362e6ac2450SMartin KaFai Lau 	bool jit_needed = false;
2363e6ac2450SMartin KaFai Lau 
23649facc336SDaniel Borkmann 	if (fp->bpf_func)
23659facc336SDaniel Borkmann 		goto finalize;
23668007e40aSMartin KaFai Lau 
2367e6ac2450SMartin KaFai Lau 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2368e6ac2450SMartin KaFai Lau 	    bpf_prog_has_kfunc_call(fp))
2369e6ac2450SMartin KaFai Lau 		jit_needed = true;
2370e6ac2450SMartin KaFai Lau 
23719facc336SDaniel Borkmann 	bpf_prog_select_func(fp);
2372f5bffecdSAlexei Starovoitov 
2373d1c55ab5SDaniel Borkmann 	/* eBPF JITs can rewrite the program in case constant
2374d1c55ab5SDaniel Borkmann 	 * blinding is active. However, in case of error during
2375d1c55ab5SDaniel Borkmann 	 * blinding, bpf_int_jit_compile() must always return a
2376d1c55ab5SDaniel Borkmann 	 * valid program, which in this case would simply not
2377d1c55ab5SDaniel Borkmann 	 * be JITed, but falls back to the interpreter.
2378d1c55ab5SDaniel Borkmann 	 */
23799d03ebc7SStanislav Fomichev 	if (!bpf_prog_is_offloaded(fp->aux)) {
2380c454a46bSMartin KaFai Lau 		*err = bpf_prog_alloc_jited_linfo(fp);
2381c454a46bSMartin KaFai Lau 		if (*err)
2382c454a46bSMartin KaFai Lau 			return fp;
2383c454a46bSMartin KaFai Lau 
2384d1c55ab5SDaniel Borkmann 		fp = bpf_int_jit_compile(fp);
2385e16301fbSMartin KaFai Lau 		bpf_prog_jit_attempt_done(fp);
2386e6ac2450SMartin KaFai Lau 		if (!fp->jited && jit_needed) {
2387290af866SAlexei Starovoitov 			*err = -ENOTSUPP;
2388290af866SAlexei Starovoitov 			return fp;
2389c454a46bSMartin KaFai Lau 		}
2390c454a46bSMartin KaFai Lau 	} else {
2391ab3f0063SJakub Kicinski 		*err = bpf_prog_offload_compile(fp);
2392ab3f0063SJakub Kicinski 		if (*err)
2393ab3f0063SJakub Kicinski 			return fp;
2394ab3f0063SJakub Kicinski 	}
23959facc336SDaniel Borkmann 
23969facc336SDaniel Borkmann finalize:
2397e3540e5aSGreg Kroah-Hartman 	bpf_prog_lock_ro(fp);
239804fd61abSAlexei Starovoitov 
23993324b584SDaniel Borkmann 	/* The tail call compatibility check can only be done at
24003324b584SDaniel Borkmann 	 * this late stage as we need to determine, if we deal
24013324b584SDaniel Borkmann 	 * with JITed or non JITed program concatenations and not
24023324b584SDaniel Borkmann 	 * all eBPF JITs might immediately support all features.
24033324b584SDaniel Borkmann 	 */
2404d1c55ab5SDaniel Borkmann 	*err = bpf_check_tail_call(fp);
2405d1c55ab5SDaniel Borkmann 
2406d1c55ab5SDaniel Borkmann 	return fp;
2407f5bffecdSAlexei Starovoitov }
24087ae457c1SAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2409f5bffecdSAlexei Starovoitov 
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2410e87c6bc3SYonghong Song static unsigned int __bpf_prog_ret1(const void *ctx,
2411e87c6bc3SYonghong Song 				    const struct bpf_insn *insn)
2412e87c6bc3SYonghong Song {
2413e87c6bc3SYonghong Song 	return 1;
2414e87c6bc3SYonghong Song }
2415e87c6bc3SYonghong Song 
2416e87c6bc3SYonghong Song static struct bpf_prog_dummy {
2417e87c6bc3SYonghong Song 	struct bpf_prog prog;
2418e87c6bc3SYonghong Song } dummy_bpf_prog = {
2419e87c6bc3SYonghong Song 	.prog = {
2420e87c6bc3SYonghong Song 		.bpf_func = __bpf_prog_ret1,
2421e87c6bc3SYonghong Song 	},
2422e87c6bc3SYonghong Song };
2423e87c6bc3SYonghong Song 
242446531a30SPavel Begunkov struct bpf_empty_prog_array bpf_empty_prog_array = {
2425324bda9eSAlexei Starovoitov 	.null_prog = NULL,
2426324bda9eSAlexei Starovoitov };
242746531a30SPavel Begunkov EXPORT_SYMBOL(bpf_empty_prog_array);
2428324bda9eSAlexei Starovoitov 
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2429d29ab6e1SRoman Gushchin struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2430324bda9eSAlexei Starovoitov {
2431324bda9eSAlexei Starovoitov 	if (prog_cnt)
2432324bda9eSAlexei Starovoitov 		return kzalloc(sizeof(struct bpf_prog_array) +
2433394e40a2SRoman Gushchin 			       sizeof(struct bpf_prog_array_item) *
2434394e40a2SRoman Gushchin 			       (prog_cnt + 1),
2435324bda9eSAlexei Starovoitov 			       flags);
2436324bda9eSAlexei Starovoitov 
243746531a30SPavel Begunkov 	return &bpf_empty_prog_array.hdr;
2438324bda9eSAlexei Starovoitov }
2439324bda9eSAlexei Starovoitov 
bpf_prog_array_free(struct bpf_prog_array * progs)244054e9c9d4SStanislav Fomichev void bpf_prog_array_free(struct bpf_prog_array *progs)
2441324bda9eSAlexei Starovoitov {
244246531a30SPavel Begunkov 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2443324bda9eSAlexei Starovoitov 		return;
2444324bda9eSAlexei Starovoitov 	kfree_rcu(progs, rcu);
2445324bda9eSAlexei Starovoitov }
2446324bda9eSAlexei Starovoitov 
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)24478c7dcb84SDelyan Kratunov static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
24488c7dcb84SDelyan Kratunov {
24498c7dcb84SDelyan Kratunov 	struct bpf_prog_array *progs;
24508c7dcb84SDelyan Kratunov 
24514835f9eeSHou Tao 	/* If RCU Tasks Trace grace period implies RCU grace period, there is
24524835f9eeSHou Tao 	 * no need to call kfree_rcu(), just call kfree() directly.
24534835f9eeSHou Tao 	 */
24548c7dcb84SDelyan Kratunov 	progs = container_of(rcu, struct bpf_prog_array, rcu);
24554835f9eeSHou Tao 	if (rcu_trace_implies_rcu_gp())
24564835f9eeSHou Tao 		kfree(progs);
24574835f9eeSHou Tao 	else
24588c7dcb84SDelyan Kratunov 		kfree_rcu(progs, rcu);
24598c7dcb84SDelyan Kratunov }
24608c7dcb84SDelyan Kratunov 
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)24618c7dcb84SDelyan Kratunov void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
24628c7dcb84SDelyan Kratunov {
24638c7dcb84SDelyan Kratunov 	if (!progs || progs == &bpf_empty_prog_array.hdr)
24648c7dcb84SDelyan Kratunov 		return;
24658c7dcb84SDelyan Kratunov 	call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
24668c7dcb84SDelyan Kratunov }
24678c7dcb84SDelyan Kratunov 
bpf_prog_array_length(struct bpf_prog_array * array)246854e9c9d4SStanislav Fomichev int bpf_prog_array_length(struct bpf_prog_array *array)
2469468e2f64SAlexei Starovoitov {
2470394e40a2SRoman Gushchin 	struct bpf_prog_array_item *item;
2471468e2f64SAlexei Starovoitov 	u32 cnt = 0;
2472468e2f64SAlexei Starovoitov 
247354e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++)
2474394e40a2SRoman Gushchin 		if (item->prog != &dummy_bpf_prog.prog)
2475468e2f64SAlexei Starovoitov 			cnt++;
2476468e2f64SAlexei Starovoitov 	return cnt;
2477468e2f64SAlexei Starovoitov }
2478468e2f64SAlexei Starovoitov 
bpf_prog_array_is_empty(struct bpf_prog_array * array)24790d01da6aSStanislav Fomichev bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
24800d01da6aSStanislav Fomichev {
24810d01da6aSStanislav Fomichev 	struct bpf_prog_array_item *item;
24820d01da6aSStanislav Fomichev 
24830d01da6aSStanislav Fomichev 	for (item = array->items; item->prog; item++)
24840d01da6aSStanislav Fomichev 		if (item->prog != &dummy_bpf_prog.prog)
24850d01da6aSStanislav Fomichev 			return false;
24860d01da6aSStanislav Fomichev 	return true;
24870d01da6aSStanislav Fomichev }
2488394e40a2SRoman Gushchin 
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)248954e9c9d4SStanislav Fomichev static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
24903a38bb98SYonghong Song 				     u32 *prog_ids,
24913a38bb98SYonghong Song 				     u32 request_cnt)
24923a38bb98SYonghong Song {
2493394e40a2SRoman Gushchin 	struct bpf_prog_array_item *item;
24943a38bb98SYonghong Song 	int i = 0;
24953a38bb98SYonghong Song 
249654e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++) {
2497394e40a2SRoman Gushchin 		if (item->prog == &dummy_bpf_prog.prog)
24983a38bb98SYonghong Song 			continue;
2499394e40a2SRoman Gushchin 		prog_ids[i] = item->prog->aux->id;
25003a38bb98SYonghong Song 		if (++i == request_cnt) {
2501394e40a2SRoman Gushchin 			item++;
25023a38bb98SYonghong Song 			break;
25033a38bb98SYonghong Song 		}
25043a38bb98SYonghong Song 	}
25053a38bb98SYonghong Song 
2506394e40a2SRoman Gushchin 	return !!(item->prog);
25073a38bb98SYonghong Song }
25083a38bb98SYonghong Song 
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)250954e9c9d4SStanislav Fomichev int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2510468e2f64SAlexei Starovoitov 				__u32 __user *prog_ids, u32 cnt)
2511468e2f64SAlexei Starovoitov {
25120911287cSAlexei Starovoitov 	unsigned long err = 0;
25130911287cSAlexei Starovoitov 	bool nospc;
25143a38bb98SYonghong Song 	u32 *ids;
2515468e2f64SAlexei Starovoitov 
25160911287cSAlexei Starovoitov 	/* users of this function are doing:
25170911287cSAlexei Starovoitov 	 * cnt = bpf_prog_array_length();
25180911287cSAlexei Starovoitov 	 * if (cnt > 0)
25190911287cSAlexei Starovoitov 	 *     bpf_prog_array_copy_to_user(..., cnt);
252054e9c9d4SStanislav Fomichev 	 * so below kcalloc doesn't need extra cnt > 0 check.
25210911287cSAlexei Starovoitov 	 */
25229c481b90SDaniel Borkmann 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
25230911287cSAlexei Starovoitov 	if (!ids)
25240911287cSAlexei Starovoitov 		return -ENOMEM;
2525394e40a2SRoman Gushchin 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
25260911287cSAlexei Starovoitov 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
25270911287cSAlexei Starovoitov 	kfree(ids);
25280911287cSAlexei Starovoitov 	if (err)
25290911287cSAlexei Starovoitov 		return -EFAULT;
25300911287cSAlexei Starovoitov 	if (nospc)
2531468e2f64SAlexei Starovoitov 		return -ENOSPC;
2532468e2f64SAlexei Starovoitov 	return 0;
2533468e2f64SAlexei Starovoitov }
2534468e2f64SAlexei Starovoitov 
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)253554e9c9d4SStanislav Fomichev void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2536e87c6bc3SYonghong Song 				struct bpf_prog *old_prog)
2537e87c6bc3SYonghong Song {
253854e9c9d4SStanislav Fomichev 	struct bpf_prog_array_item *item;
2539e87c6bc3SYonghong Song 
254054e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++)
2541394e40a2SRoman Gushchin 		if (item->prog == old_prog) {
2542394e40a2SRoman Gushchin 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2543e87c6bc3SYonghong Song 			break;
2544e87c6bc3SYonghong Song 		}
2545e87c6bc3SYonghong Song }
2546e87c6bc3SYonghong Song 
2547ce3aa9ccSJakub Sitnicki /**
2548ce3aa9ccSJakub Sitnicki  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2549ce3aa9ccSJakub Sitnicki  *                                   index into the program array with
2550ce3aa9ccSJakub Sitnicki  *                                   a dummy no-op program.
2551ce3aa9ccSJakub Sitnicki  * @array: a bpf_prog_array
2552ce3aa9ccSJakub Sitnicki  * @index: the index of the program to replace
2553ce3aa9ccSJakub Sitnicki  *
2554ce3aa9ccSJakub Sitnicki  * Skips over dummy programs, by not counting them, when calculating
2555b8c1a309SRandy Dunlap  * the position of the program to replace.
2556ce3aa9ccSJakub Sitnicki  *
2557ce3aa9ccSJakub Sitnicki  * Return:
2558ce3aa9ccSJakub Sitnicki  * * 0		- Success
2559ce3aa9ccSJakub Sitnicki  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2560ce3aa9ccSJakub Sitnicki  * * -ENOENT	- Index out of range
2561ce3aa9ccSJakub Sitnicki  */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2562ce3aa9ccSJakub Sitnicki int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2563ce3aa9ccSJakub Sitnicki {
2564ce3aa9ccSJakub Sitnicki 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2565ce3aa9ccSJakub Sitnicki }
2566ce3aa9ccSJakub Sitnicki 
2567ce3aa9ccSJakub Sitnicki /**
2568ce3aa9ccSJakub Sitnicki  * bpf_prog_array_update_at() - Updates the program at the given index
2569ce3aa9ccSJakub Sitnicki  *                              into the program array.
2570ce3aa9ccSJakub Sitnicki  * @array: a bpf_prog_array
2571ce3aa9ccSJakub Sitnicki  * @index: the index of the program to update
2572ce3aa9ccSJakub Sitnicki  * @prog: the program to insert into the array
2573ce3aa9ccSJakub Sitnicki  *
2574ce3aa9ccSJakub Sitnicki  * Skips over dummy programs, by not counting them, when calculating
2575ce3aa9ccSJakub Sitnicki  * the position of the program to update.
2576ce3aa9ccSJakub Sitnicki  *
2577ce3aa9ccSJakub Sitnicki  * Return:
2578ce3aa9ccSJakub Sitnicki  * * 0		- Success
2579ce3aa9ccSJakub Sitnicki  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2580ce3aa9ccSJakub Sitnicki  * * -ENOENT	- Index out of range
2581ce3aa9ccSJakub Sitnicki  */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2582ce3aa9ccSJakub Sitnicki int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2583ce3aa9ccSJakub Sitnicki 			     struct bpf_prog *prog)
2584ce3aa9ccSJakub Sitnicki {
2585ce3aa9ccSJakub Sitnicki 	struct bpf_prog_array_item *item;
2586ce3aa9ccSJakub Sitnicki 
2587ce3aa9ccSJakub Sitnicki 	if (unlikely(index < 0))
2588ce3aa9ccSJakub Sitnicki 		return -EINVAL;
2589ce3aa9ccSJakub Sitnicki 
2590ce3aa9ccSJakub Sitnicki 	for (item = array->items; item->prog; item++) {
2591ce3aa9ccSJakub Sitnicki 		if (item->prog == &dummy_bpf_prog.prog)
2592ce3aa9ccSJakub Sitnicki 			continue;
2593ce3aa9ccSJakub Sitnicki 		if (!index) {
2594ce3aa9ccSJakub Sitnicki 			WRITE_ONCE(item->prog, prog);
2595ce3aa9ccSJakub Sitnicki 			return 0;
2596ce3aa9ccSJakub Sitnicki 		}
2597ce3aa9ccSJakub Sitnicki 		index--;
2598ce3aa9ccSJakub Sitnicki 	}
2599ce3aa9ccSJakub Sitnicki 	return -ENOENT;
2600ce3aa9ccSJakub Sitnicki }
2601ce3aa9ccSJakub Sitnicki 
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)260254e9c9d4SStanislav Fomichev int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2603e87c6bc3SYonghong Song 			struct bpf_prog *exclude_prog,
2604e87c6bc3SYonghong Song 			struct bpf_prog *include_prog,
260582e6b1eeSAndrii Nakryiko 			u64 bpf_cookie,
2606e87c6bc3SYonghong Song 			struct bpf_prog_array **new_array)
2607e87c6bc3SYonghong Song {
2608e87c6bc3SYonghong Song 	int new_prog_cnt, carry_prog_cnt = 0;
260982e6b1eeSAndrii Nakryiko 	struct bpf_prog_array_item *existing, *new;
2610e87c6bc3SYonghong Song 	struct bpf_prog_array *array;
2611170a7e3eSSean Young 	bool found_exclude = false;
2612e87c6bc3SYonghong Song 
2613e87c6bc3SYonghong Song 	/* Figure out how many existing progs we need to carry over to
2614e87c6bc3SYonghong Song 	 * the new array.
2615e87c6bc3SYonghong Song 	 */
2616e87c6bc3SYonghong Song 	if (old_array) {
2617394e40a2SRoman Gushchin 		existing = old_array->items;
2618394e40a2SRoman Gushchin 		for (; existing->prog; existing++) {
2619394e40a2SRoman Gushchin 			if (existing->prog == exclude_prog) {
2620170a7e3eSSean Young 				found_exclude = true;
2621170a7e3eSSean Young 				continue;
2622170a7e3eSSean Young 			}
2623394e40a2SRoman Gushchin 			if (existing->prog != &dummy_bpf_prog.prog)
2624e87c6bc3SYonghong Song 				carry_prog_cnt++;
2625394e40a2SRoman Gushchin 			if (existing->prog == include_prog)
2626e87c6bc3SYonghong Song 				return -EEXIST;
2627e87c6bc3SYonghong Song 		}
2628e87c6bc3SYonghong Song 	}
2629e87c6bc3SYonghong Song 
2630170a7e3eSSean Young 	if (exclude_prog && !found_exclude)
2631170a7e3eSSean Young 		return -ENOENT;
2632170a7e3eSSean Young 
2633e87c6bc3SYonghong Song 	/* How many progs (not NULL) will be in the new array? */
2634e87c6bc3SYonghong Song 	new_prog_cnt = carry_prog_cnt;
2635e87c6bc3SYonghong Song 	if (include_prog)
2636e87c6bc3SYonghong Song 		new_prog_cnt += 1;
2637e87c6bc3SYonghong Song 
2638e87c6bc3SYonghong Song 	/* Do we have any prog (not NULL) in the new array? */
2639e87c6bc3SYonghong Song 	if (!new_prog_cnt) {
2640e87c6bc3SYonghong Song 		*new_array = NULL;
2641e87c6bc3SYonghong Song 		return 0;
2642e87c6bc3SYonghong Song 	}
2643e87c6bc3SYonghong Song 
2644e87c6bc3SYonghong Song 	/* +1 as the end of prog_array is marked with NULL */
2645e87c6bc3SYonghong Song 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2646e87c6bc3SYonghong Song 	if (!array)
2647e87c6bc3SYonghong Song 		return -ENOMEM;
264882e6b1eeSAndrii Nakryiko 	new = array->items;
2649e87c6bc3SYonghong Song 
2650e87c6bc3SYonghong Song 	/* Fill in the new prog array */
2651e87c6bc3SYonghong Song 	if (carry_prog_cnt) {
2652394e40a2SRoman Gushchin 		existing = old_array->items;
265382e6b1eeSAndrii Nakryiko 		for (; existing->prog; existing++) {
265482e6b1eeSAndrii Nakryiko 			if (existing->prog == exclude_prog ||
265582e6b1eeSAndrii Nakryiko 			    existing->prog == &dummy_bpf_prog.prog)
265682e6b1eeSAndrii Nakryiko 				continue;
265782e6b1eeSAndrii Nakryiko 
265882e6b1eeSAndrii Nakryiko 			new->prog = existing->prog;
265982e6b1eeSAndrii Nakryiko 			new->bpf_cookie = existing->bpf_cookie;
266082e6b1eeSAndrii Nakryiko 			new++;
2661394e40a2SRoman Gushchin 		}
2662e87c6bc3SYonghong Song 	}
266382e6b1eeSAndrii Nakryiko 	if (include_prog) {
266482e6b1eeSAndrii Nakryiko 		new->prog = include_prog;
266582e6b1eeSAndrii Nakryiko 		new->bpf_cookie = bpf_cookie;
266682e6b1eeSAndrii Nakryiko 		new++;
266782e6b1eeSAndrii Nakryiko 	}
266882e6b1eeSAndrii Nakryiko 	new->prog = NULL;
2669e87c6bc3SYonghong Song 	*new_array = array;
2670e87c6bc3SYonghong Song 	return 0;
2671e87c6bc3SYonghong Song }
2672e87c6bc3SYonghong Song 
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)267354e9c9d4SStanislav Fomichev int bpf_prog_array_copy_info(struct bpf_prog_array *array,
26743a38bb98SYonghong Song 			     u32 *prog_ids, u32 request_cnt,
26753a38bb98SYonghong Song 			     u32 *prog_cnt)
2676f371b304SYonghong Song {
2677f371b304SYonghong Song 	u32 cnt = 0;
2678f371b304SYonghong Song 
2679f371b304SYonghong Song 	if (array)
2680f371b304SYonghong Song 		cnt = bpf_prog_array_length(array);
2681f371b304SYonghong Song 
26823a38bb98SYonghong Song 	*prog_cnt = cnt;
2683f371b304SYonghong Song 
2684f371b304SYonghong Song 	/* return early if user requested only program count or nothing to copy */
2685f371b304SYonghong Song 	if (!request_cnt || !cnt)
2686f371b304SYonghong Song 		return 0;
2687f371b304SYonghong Song 
26883a38bb98SYonghong Song 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2689394e40a2SRoman Gushchin 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
26903a38bb98SYonghong Song 								     : 0;
2691f371b304SYonghong Song }
2692f371b304SYonghong Song 
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2693a2ea0746SDaniel Borkmann void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2694a2ea0746SDaniel Borkmann 			  struct bpf_map **used_maps, u32 len)
26956332be04SDaniel Borkmann {
2696da765a2fSDaniel Borkmann 	struct bpf_map *map;
26972ad2f2edSHou Tao 	bool sleepable;
2698a2ea0746SDaniel Borkmann 	u32 i;
26996332be04SDaniel Borkmann 
27002ad2f2edSHou Tao 	sleepable = aux->sleepable;
2701a2ea0746SDaniel Borkmann 	for (i = 0; i < len; i++) {
2702a2ea0746SDaniel Borkmann 		map = used_maps[i];
2703da765a2fSDaniel Borkmann 		if (map->ops->map_poke_untrack)
2704da765a2fSDaniel Borkmann 			map->ops->map_poke_untrack(map, aux);
27052ad2f2edSHou Tao 		if (sleepable)
27062ad2f2edSHou Tao 			atomic64_dec(&map->sleepable_refcnt);
2707da765a2fSDaniel Borkmann 		bpf_map_put(map);
2708da765a2fSDaniel Borkmann 	}
2709a2ea0746SDaniel Borkmann }
2710a2ea0746SDaniel Borkmann 
bpf_free_used_maps(struct bpf_prog_aux * aux)2711a2ea0746SDaniel Borkmann static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2712a2ea0746SDaniel Borkmann {
2713a2ea0746SDaniel Borkmann 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
27146332be04SDaniel Borkmann 	kfree(aux->used_maps);
27156332be04SDaniel Borkmann }
27166332be04SDaniel Borkmann 
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2717541c3badSAndrii Nakryiko void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2718541c3badSAndrii Nakryiko 			  struct btf_mod_pair *used_btfs, u32 len)
2719541c3badSAndrii Nakryiko {
2720541c3badSAndrii Nakryiko #ifdef CONFIG_BPF_SYSCALL
2721541c3badSAndrii Nakryiko 	struct btf_mod_pair *btf_mod;
2722541c3badSAndrii Nakryiko 	u32 i;
2723541c3badSAndrii Nakryiko 
2724541c3badSAndrii Nakryiko 	for (i = 0; i < len; i++) {
2725541c3badSAndrii Nakryiko 		btf_mod = &used_btfs[i];
2726541c3badSAndrii Nakryiko 		if (btf_mod->module)
2727541c3badSAndrii Nakryiko 			module_put(btf_mod->module);
2728541c3badSAndrii Nakryiko 		btf_put(btf_mod->btf);
2729541c3badSAndrii Nakryiko 	}
2730541c3badSAndrii Nakryiko #endif
2731541c3badSAndrii Nakryiko }
2732541c3badSAndrii Nakryiko 
bpf_free_used_btfs(struct bpf_prog_aux * aux)2733541c3badSAndrii Nakryiko static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2734541c3badSAndrii Nakryiko {
2735541c3badSAndrii Nakryiko 	__bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2736541c3badSAndrii Nakryiko 	kfree(aux->used_btfs);
2737541c3badSAndrii Nakryiko }
2738541c3badSAndrii Nakryiko 
bpf_prog_free_deferred(struct work_struct * work)273960a3b225SDaniel Borkmann static void bpf_prog_free_deferred(struct work_struct *work)
274060a3b225SDaniel Borkmann {
274109756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux;
27421c2a088aSAlexei Starovoitov 	int i;
274360a3b225SDaniel Borkmann 
274409756af4SAlexei Starovoitov 	aux = container_of(work, struct bpf_prog_aux, work);
27452357672cSKumar Kartikeya Dwivedi #ifdef CONFIG_BPF_SYSCALL
27462357672cSKumar Kartikeya Dwivedi 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
27472357672cSKumar Kartikeya Dwivedi #endif
2748c0e19f2cSStanislav Fomichev #ifdef CONFIG_CGROUP_BPF
2749c0e19f2cSStanislav Fomichev 	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2750c0e19f2cSStanislav Fomichev 		bpf_cgroup_atype_put(aux->cgroup_atype);
2751c0e19f2cSStanislav Fomichev #endif
27526332be04SDaniel Borkmann 	bpf_free_used_maps(aux);
2753541c3badSAndrii Nakryiko 	bpf_free_used_btfs(aux);
2754ab3f0063SJakub Kicinski 	if (bpf_prog_is_dev_bound(aux))
27552b3486bcSStanislav Fomichev 		bpf_prog_dev_bound_destroy(aux->prog);
2756c195651eSYonghong Song #ifdef CONFIG_PERF_EVENTS
2757c195651eSYonghong Song 	if (aux->prog->has_callchain_buf)
2758c195651eSYonghong Song 		put_callchain_buffers();
2759c195651eSYonghong Song #endif
27603aac1eadSToke Høiland-Jørgensen 	if (aux->dst_trampoline)
27613aac1eadSToke Høiland-Jørgensen 		bpf_trampoline_put(aux->dst_trampoline);
2762f263a814SJohn Fastabend 	for (i = 0; i < aux->func_cnt; i++) {
2763f263a814SJohn Fastabend 		/* We can just unlink the subprog poke descriptor table as
2764f263a814SJohn Fastabend 		 * it was originally linked to the main program and is also
2765f263a814SJohn Fastabend 		 * released along with it.
2766f263a814SJohn Fastabend 		 */
2767f263a814SJohn Fastabend 		aux->func[i]->aux->poke_tab = NULL;
27681c2a088aSAlexei Starovoitov 		bpf_jit_free(aux->func[i]);
2769f263a814SJohn Fastabend 	}
27701c2a088aSAlexei Starovoitov 	if (aux->func_cnt) {
27711c2a088aSAlexei Starovoitov 		kfree(aux->func);
27721c2a088aSAlexei Starovoitov 		bpf_prog_unlock_free(aux->prog);
27731c2a088aSAlexei Starovoitov 	} else {
277409756af4SAlexei Starovoitov 		bpf_jit_free(aux->prog);
277560a3b225SDaniel Borkmann 	}
27761c2a088aSAlexei Starovoitov }
277760a3b225SDaniel Borkmann 
bpf_prog_free(struct bpf_prog * fp)27787ae457c1SAlexei Starovoitov void bpf_prog_free(struct bpf_prog *fp)
2779f5bffecdSAlexei Starovoitov {
278009756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux = fp->aux;
278160a3b225SDaniel Borkmann 
27823aac1eadSToke Høiland-Jørgensen 	if (aux->dst_prog)
27833aac1eadSToke Høiland-Jørgensen 		bpf_prog_put(aux->dst_prog);
278409756af4SAlexei Starovoitov 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
278509756af4SAlexei Starovoitov 	schedule_work(&aux->work);
2786f5bffecdSAlexei Starovoitov }
27877ae457c1SAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_prog_free);
2788f89b7755SAlexei Starovoitov 
27893ad00405SDaniel Borkmann /* RNG for unpriviledged user space with separated state from prandom_u32(). */
27903ad00405SDaniel Borkmann static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
27913ad00405SDaniel Borkmann 
bpf_user_rnd_init_once(void)27923ad00405SDaniel Borkmann void bpf_user_rnd_init_once(void)
27933ad00405SDaniel Borkmann {
27943ad00405SDaniel Borkmann 	prandom_init_once(&bpf_user_rnd_state);
27953ad00405SDaniel Borkmann }
27963ad00405SDaniel Borkmann 
BPF_CALL_0(bpf_user_rnd_u32)2797f3694e00SDaniel Borkmann BPF_CALL_0(bpf_user_rnd_u32)
27983ad00405SDaniel Borkmann {
27993ad00405SDaniel Borkmann 	/* Should someone ever have the rather unwise idea to use some
28003ad00405SDaniel Borkmann 	 * of the registers passed into this function, then note that
28013ad00405SDaniel Borkmann 	 * this function is called from native eBPF and classic-to-eBPF
28023ad00405SDaniel Borkmann 	 * transformations. Register assignments from both sides are
28033ad00405SDaniel Borkmann 	 * different, f.e. classic always sets fn(ctx, A, X) here.
28043ad00405SDaniel Borkmann 	 */
28053ad00405SDaniel Borkmann 	struct rnd_state *state;
28063ad00405SDaniel Borkmann 	u32 res;
28073ad00405SDaniel Borkmann 
28083ad00405SDaniel Borkmann 	state = &get_cpu_var(bpf_user_rnd_state);
28093ad00405SDaniel Borkmann 	res = prandom_u32_state(state);
2810b761fe22SShaohua Li 	put_cpu_var(bpf_user_rnd_state);
28113ad00405SDaniel Borkmann 
28123ad00405SDaniel Borkmann 	return res;
28133ad00405SDaniel Borkmann }
28143ad00405SDaniel Borkmann 
BPF_CALL_0(bpf_get_raw_cpu_id)28156890896bSStanislav Fomichev BPF_CALL_0(bpf_get_raw_cpu_id)
28166890896bSStanislav Fomichev {
28176890896bSStanislav Fomichev 	return raw_smp_processor_id();
28186890896bSStanislav Fomichev }
28196890896bSStanislav Fomichev 
28203ba67dabSDaniel Borkmann /* Weak definitions of helper functions in case we don't have bpf syscall. */
28213ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
28223ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_update_elem_proto __weak;
28233ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2824f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2825f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2826f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
282707343110SFeng Zhou const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2828d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_lock_proto __weak;
2829d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_unlock_proto __weak;
28305576b991SMartin KaFai Lau const struct bpf_func_proto bpf_jiffies64_proto __weak;
28313ba67dabSDaniel Borkmann 
283203e69b50SDaniel Borkmann const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2833c04167ceSDaniel Borkmann const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
28342d0e30c3SDaniel Borkmann const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
283517ca8cbfSDaniel Borkmann const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
283671d19214SMaciej Żenczykowski const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2837d0551261SDmitrii Banshchikov const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2838c8996c98SJesper Dangaard Brouer const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2839bd570ff9SDaniel Borkmann 
2840ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2841ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2842ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2843bf6fa2c8SYonghong Song const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
28440f09abd1SDaniel Borkmann const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2845cd339431SRoman Gushchin const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2846b4490c5cSCarlos Neira const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2847c4d0bfb4SAlan Maguire const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2848eb411377SAlan Maguire const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
284969fd337aSStanislav Fomichev const struct bpf_func_proto bpf_set_retval_proto __weak;
285069fd337aSStanislav Fomichev const struct bpf_func_proto bpf_get_retval_proto __weak;
2851bd570ff9SDaniel Borkmann 
bpf_get_trace_printk_proto(void)28520756ea3eSAlexei Starovoitov const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
28530756ea3eSAlexei Starovoitov {
28540756ea3eSAlexei Starovoitov 	return NULL;
28550756ea3eSAlexei Starovoitov }
285603e69b50SDaniel Borkmann 
bpf_get_trace_vprintk_proto(void)285710aceb62SDave Marchevsky const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
285810aceb62SDave Marchevsky {
285910aceb62SDave Marchevsky 	return NULL;
286010aceb62SDave Marchevsky }
286110aceb62SDave Marchevsky 
2862555c8a86SDaniel Borkmann u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2863555c8a86SDaniel Borkmann bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2864555c8a86SDaniel Borkmann 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2865bd570ff9SDaniel Borkmann {
2866555c8a86SDaniel Borkmann 	return -ENOTSUPP;
2867bd570ff9SDaniel Borkmann }
28686cb5fb38SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_event_output);
2869bd570ff9SDaniel Borkmann 
28703324b584SDaniel Borkmann /* Always built-in helper functions. */
28713324b584SDaniel Borkmann const struct bpf_func_proto bpf_tail_call_proto = {
28723324b584SDaniel Borkmann 	.func		= NULL,
28733324b584SDaniel Borkmann 	.gpl_only	= false,
28743324b584SDaniel Borkmann 	.ret_type	= RET_VOID,
28753324b584SDaniel Borkmann 	.arg1_type	= ARG_PTR_TO_CTX,
28763324b584SDaniel Borkmann 	.arg2_type	= ARG_CONST_MAP_PTR,
28773324b584SDaniel Borkmann 	.arg3_type	= ARG_ANYTHING,
28783324b584SDaniel Borkmann };
28793324b584SDaniel Borkmann 
28809383191dSDaniel Borkmann /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
28819383191dSDaniel Borkmann  * It is encouraged to implement bpf_int_jit_compile() instead, so that
28829383191dSDaniel Borkmann  * eBPF and implicitly also cBPF can get JITed!
28839383191dSDaniel Borkmann  */
bpf_int_jit_compile(struct bpf_prog * prog)2884d1c55ab5SDaniel Borkmann struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
28853324b584SDaniel Borkmann {
2886d1c55ab5SDaniel Borkmann 	return prog;
28873324b584SDaniel Borkmann }
28883324b584SDaniel Borkmann 
28899383191dSDaniel Borkmann /* Stub for JITs that support eBPF. All cBPF code gets transformed into
28909383191dSDaniel Borkmann  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
28919383191dSDaniel Borkmann  */
bpf_jit_compile(struct bpf_prog * prog)28929383191dSDaniel Borkmann void __weak bpf_jit_compile(struct bpf_prog *prog)
28939383191dSDaniel Borkmann {
28949383191dSDaniel Borkmann }
28959383191dSDaniel Borkmann 
bpf_helper_changes_pkt_data(void * func)289617bedab2SMartin KaFai Lau bool __weak bpf_helper_changes_pkt_data(void *func)
2897969bf05eSAlexei Starovoitov {
2898969bf05eSAlexei Starovoitov 	return false;
2899969bf05eSAlexei Starovoitov }
2900969bf05eSAlexei Starovoitov 
2901a4b1d3c1SJiong Wang /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2902a4b1d3c1SJiong Wang  * analysis code and wants explicit zero extension inserted by verifier.
2903a4b1d3c1SJiong Wang  * Otherwise, return FALSE.
290439491867SBrendan Jackman  *
290539491867SBrendan Jackman  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
290639491867SBrendan Jackman  * you don't override this. JITs that don't want these extra insns can detect
290739491867SBrendan Jackman  * them using insn_is_zext.
2908a4b1d3c1SJiong Wang  */
bpf_jit_needs_zext(void)2909a4b1d3c1SJiong Wang bool __weak bpf_jit_needs_zext(void)
2910a4b1d3c1SJiong Wang {
2911a4b1d3c1SJiong Wang 	return false;
2912a4b1d3c1SJiong Wang }
2913a4b1d3c1SJiong Wang 
291495acd881STony Ambardar /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)291595acd881STony Ambardar bool __weak bpf_jit_supports_subprog_tailcalls(void)
291695acd881STony Ambardar {
291795acd881STony Ambardar 	return false;
291895acd881STony Ambardar }
291995acd881STony Ambardar 
bpf_jit_supports_kfunc_call(void)2920e6ac2450SMartin KaFai Lau bool __weak bpf_jit_supports_kfunc_call(void)
2921e6ac2450SMartin KaFai Lau {
2922e6ac2450SMartin KaFai Lau 	return false;
2923e6ac2450SMartin KaFai Lau }
2924e6ac2450SMartin KaFai Lau 
bpf_jit_supports_far_kfunc_call(void)29251cf3bfc6SIlya Leoshkevich bool __weak bpf_jit_supports_far_kfunc_call(void)
29261cf3bfc6SIlya Leoshkevich {
29271cf3bfc6SIlya Leoshkevich 	return false;
29281cf3bfc6SIlya Leoshkevich }
29291cf3bfc6SIlya Leoshkevich 
2930f89b7755SAlexei Starovoitov /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2931f89b7755SAlexei Starovoitov  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2932f89b7755SAlexei Starovoitov  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2933f89b7755SAlexei Starovoitov int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2934f89b7755SAlexei Starovoitov 			 int len)
2935f89b7755SAlexei Starovoitov {
2936f89b7755SAlexei Starovoitov 	return -EFAULT;
2937f89b7755SAlexei Starovoitov }
2938a67edbf4SDaniel Borkmann 
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)29395964b200SAlexei Starovoitov int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
29405964b200SAlexei Starovoitov 			      void *addr1, void *addr2)
29415964b200SAlexei Starovoitov {
29425964b200SAlexei Starovoitov 	return -ENOTSUPP;
29435964b200SAlexei Starovoitov }
29445964b200SAlexei Starovoitov 
bpf_arch_text_copy(void * dst,void * src,size_t len)2945ebc1415dSSong Liu void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2946ebc1415dSSong Liu {
2947ebc1415dSSong Liu 	return ERR_PTR(-ENOTSUPP);
2948ebc1415dSSong Liu }
2949ebc1415dSSong Liu 
bpf_arch_text_invalidate(void * dst,size_t len)2950fe736565SSong Liu int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2951fe736565SSong Liu {
2952fe736565SSong Liu 	return -ENOTSUPP;
2953fe736565SSong Liu }
2954fe736565SSong Liu 
2955958cf2e2SKumar Kartikeya Dwivedi #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)2956958cf2e2SKumar Kartikeya Dwivedi static int __init bpf_global_ma_init(void)
2957958cf2e2SKumar Kartikeya Dwivedi {
2958958cf2e2SKumar Kartikeya Dwivedi 	int ret;
2959958cf2e2SKumar Kartikeya Dwivedi 
2960958cf2e2SKumar Kartikeya Dwivedi 	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2961958cf2e2SKumar Kartikeya Dwivedi 	bpf_global_ma_set = !ret;
2962958cf2e2SKumar Kartikeya Dwivedi 	return ret;
2963958cf2e2SKumar Kartikeya Dwivedi }
2964958cf2e2SKumar Kartikeya Dwivedi late_initcall(bpf_global_ma_init);
2965958cf2e2SKumar Kartikeya Dwivedi #endif
2966958cf2e2SKumar Kartikeya Dwivedi 
2967492ecee8SAlexei Starovoitov DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2968492ecee8SAlexei Starovoitov EXPORT_SYMBOL(bpf_stats_enabled_key);
2969492ecee8SAlexei Starovoitov 
2970a67edbf4SDaniel Borkmann /* All definitions of tracepoints related to BPF. */
2971a67edbf4SDaniel Borkmann #define CREATE_TRACE_POINTS
2972a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
2973a67edbf4SDaniel Borkmann 
2974a67edbf4SDaniel Borkmann EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2975e7d47989SToshiaki Makita EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2976