xref: /openbmc/linux/arch/powerpc/net/bpf_jit_comp.c (revision e2aa5e65)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 
19 #include "bpf_jit.h"
20 
21 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
22 {
23 	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
24 }
25 
26 /* Fix the branch target addresses for subprog calls */
27 static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
28 				       struct codegen_context *ctx, u32 *addrs)
29 {
30 	const struct bpf_insn *insn = fp->insnsi;
31 	bool func_addr_fixed;
32 	u64 func_addr;
33 	u32 tmp_idx;
34 	int i, ret;
35 
36 	for (i = 0; i < fp->len; i++) {
37 		/*
38 		 * During the extra pass, only the branch target addresses for
39 		 * the subprog calls need to be fixed. All other instructions
40 		 * can left untouched.
41 		 *
42 		 * The JITed image length does not change because we already
43 		 * ensure that the JITed instruction sequence for these calls
44 		 * are of fixed length by padding them with NOPs.
45 		 */
46 		if (insn[i].code == (BPF_JMP | BPF_CALL) &&
47 		    insn[i].src_reg == BPF_PSEUDO_CALL) {
48 			ret = bpf_jit_get_func_addr(fp, &insn[i], true,
49 						    &func_addr,
50 						    &func_addr_fixed);
51 			if (ret < 0)
52 				return ret;
53 
54 			/*
55 			 * Save ctx->idx as this would currently point to the
56 			 * end of the JITed image and set it to the offset of
57 			 * the instruction sequence corresponding to the
58 			 * subprog call temporarily.
59 			 */
60 			tmp_idx = ctx->idx;
61 			ctx->idx = addrs[i] / 4;
62 			bpf_jit_emit_func_call_rel(image, ctx, func_addr);
63 
64 			/*
65 			 * Restore ctx->idx here. This is safe as the length
66 			 * of the JITed sequence remains unchanged.
67 			 */
68 			ctx->idx = tmp_idx;
69 		}
70 	}
71 
72 	return 0;
73 }
74 
75 struct powerpc64_jit_data {
76 	struct bpf_binary_header *header;
77 	u32 *addrs;
78 	u8 *image;
79 	u32 proglen;
80 	struct codegen_context ctx;
81 };
82 
83 bool bpf_jit_needs_zext(void)
84 {
85 	return true;
86 }
87 
88 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
89 {
90 	u32 proglen;
91 	u32 alloclen;
92 	u8 *image = NULL;
93 	u32 *code_base;
94 	u32 *addrs;
95 	struct powerpc64_jit_data *jit_data;
96 	struct codegen_context cgctx;
97 	int pass;
98 	int flen;
99 	struct bpf_binary_header *bpf_hdr;
100 	struct bpf_prog *org_fp = fp;
101 	struct bpf_prog *tmp_fp;
102 	bool bpf_blinded = false;
103 	bool extra_pass = false;
104 	u32 extable_len;
105 	u32 fixup_len;
106 
107 	if (!fp->jit_requested)
108 		return org_fp;
109 
110 	tmp_fp = bpf_jit_blind_constants(org_fp);
111 	if (IS_ERR(tmp_fp))
112 		return org_fp;
113 
114 	if (tmp_fp != org_fp) {
115 		bpf_blinded = true;
116 		fp = tmp_fp;
117 	}
118 
119 	jit_data = fp->aux->jit_data;
120 	if (!jit_data) {
121 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
122 		if (!jit_data) {
123 			fp = org_fp;
124 			goto out;
125 		}
126 		fp->aux->jit_data = jit_data;
127 	}
128 
129 	flen = fp->len;
130 	addrs = jit_data->addrs;
131 	if (addrs) {
132 		cgctx = jit_data->ctx;
133 		image = jit_data->image;
134 		bpf_hdr = jit_data->header;
135 		proglen = jit_data->proglen;
136 		extra_pass = true;
137 		goto skip_init_ctx;
138 	}
139 
140 	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
141 	if (addrs == NULL) {
142 		fp = org_fp;
143 		goto out_addrs;
144 	}
145 
146 	memset(&cgctx, 0, sizeof(struct codegen_context));
147 	memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p));
148 
149 	/* Make sure that the stack is quadword aligned. */
150 	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
151 
152 	/* Scouting faux-generate pass 0 */
153 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
154 		/* We hit something illegal or unsupported. */
155 		fp = org_fp;
156 		goto out_addrs;
157 	}
158 
159 	/*
160 	 * If we have seen a tail call, we need a second pass.
161 	 * This is because bpf_jit_emit_common_epilogue() is called
162 	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
163 	 */
164 	if (cgctx.seen & SEEN_TAILCALL) {
165 		cgctx.idx = 0;
166 		if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
167 			fp = org_fp;
168 			goto out_addrs;
169 		}
170 	}
171 
172 	bpf_jit_realloc_regs(&cgctx);
173 	/*
174 	 * Pretend to build prologue, given the features we've seen.  This will
175 	 * update ctgtx.idx as it pretends to output instructions, then we can
176 	 * calculate total size from idx.
177 	 */
178 	bpf_jit_build_prologue(0, &cgctx);
179 	bpf_jit_build_epilogue(0, &cgctx);
180 
181 	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
182 	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
183 
184 	proglen = cgctx.idx * 4;
185 	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
186 
187 	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
188 	if (!bpf_hdr) {
189 		fp = org_fp;
190 		goto out_addrs;
191 	}
192 
193 	if (extable_len)
194 		fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
195 
196 skip_init_ctx:
197 	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
198 
199 	if (extra_pass) {
200 		/*
201 		 * Do not touch the prologue and epilogue as they will remain
202 		 * unchanged. Only fix the branch target address for subprog
203 		 * calls in the body.
204 		 *
205 		 * This does not change the offsets and lengths of the subprog
206 		 * call instruction sequences and hence, the size of the JITed
207 		 * image as well.
208 		 */
209 		bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
210 
211 		/* There is no need to perform the usual passes. */
212 		goto skip_codegen_passes;
213 	}
214 
215 	/* Code generation passes 1-2 */
216 	for (pass = 1; pass < 3; pass++) {
217 		/* Now build the prologue, body code & epilogue for real. */
218 		cgctx.idx = 0;
219 		bpf_jit_build_prologue(code_base, &cgctx);
220 		if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
221 			bpf_jit_binary_free(bpf_hdr);
222 			fp = org_fp;
223 			goto out_addrs;
224 		}
225 		bpf_jit_build_epilogue(code_base, &cgctx);
226 
227 		if (bpf_jit_enable > 1)
228 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
229 				proglen - (cgctx.idx * 4), cgctx.seen);
230 	}
231 
232 skip_codegen_passes:
233 	if (bpf_jit_enable > 1)
234 		/*
235 		 * Note that we output the base address of the code_base
236 		 * rather than image, since opcodes are in code_base.
237 		 */
238 		bpf_jit_dump(flen, proglen, pass, code_base);
239 
240 #ifdef PPC64_ELF_ABI_v1
241 	/* Function descriptor nastiness: Address + TOC */
242 	((u64 *)image)[0] = (u64)code_base;
243 	((u64 *)image)[1] = local_paca->kernel_toc;
244 #endif
245 
246 	fp->bpf_func = (void *)image;
247 	fp->jited = 1;
248 	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
249 
250 	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
251 	if (!fp->is_func || extra_pass) {
252 		bpf_jit_binary_lock_ro(bpf_hdr);
253 		bpf_prog_fill_jited_linfo(fp, addrs);
254 out_addrs:
255 		kfree(addrs);
256 		kfree(jit_data);
257 		fp->aux->jit_data = NULL;
258 	} else {
259 		jit_data->addrs = addrs;
260 		jit_data->ctx = cgctx;
261 		jit_data->proglen = proglen;
262 		jit_data->image = image;
263 		jit_data->header = bpf_hdr;
264 	}
265 
266 out:
267 	if (bpf_blinded)
268 		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
269 
270 	return fp;
271 }
272 
273 /*
274  * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
275  * this function, as this only applies to BPF_PROBE_MEM, for now.
276  */
277 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
278 			  int insn_idx, int jmp_off, int dst_reg)
279 {
280 	off_t offset;
281 	unsigned long pc;
282 	struct exception_table_entry *ex;
283 	u32 *fixup;
284 
285 	/* Populate extable entries only in the last pass */
286 	if (pass != 2)
287 		return 0;
288 
289 	if (!fp->aux->extable ||
290 	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
291 		return -EINVAL;
292 
293 	pc = (unsigned long)&image[insn_idx];
294 
295 	fixup = (void *)fp->aux->extable -
296 		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
297 		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
298 
299 	fixup[0] = PPC_RAW_LI(dst_reg, 0);
300 	if (IS_ENABLED(CONFIG_PPC32))
301 		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
302 
303 	fixup[BPF_FIXUP_LEN - 1] =
304 		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
305 
306 	ex = &fp->aux->extable[ctx->exentry_idx];
307 
308 	offset = pc - (long)&ex->insn;
309 	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
310 		return -ERANGE;
311 	ex->insn = offset;
312 
313 	offset = (long)fixup - (long)&ex->fixup;
314 	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
315 		return -ERANGE;
316 	ex->fixup = offset;
317 
318 	ctx->exentry_idx++;
319 	return 0;
320 }
321