1 /*
2  * bpf_jit_comp64.c: eBPF JIT compiler
3  *
4  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5  *		  IBM Corporation
6  *
7  * Based on the powerpc classic BPF JIT compiler by Matt Evans
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
21 
22 #include "bpf_jit64.h"
23 
24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
25 {
26 	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
27 }
28 
29 static inline void bpf_flush_icache(void *start, void *end)
30 {
31 	smp_wmb();
32 	flush_icache_range((unsigned long)start, (unsigned long)end);
33 }
34 
35 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
36 {
37 	return (ctx->seen & (1 << (31 - b2p[i])));
38 }
39 
40 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
41 {
42 	ctx->seen |= (1 << (31 - b2p[i]));
43 }
44 
45 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
46 {
47 	/*
48 	 * We only need a stack frame if:
49 	 * - we call other functions (kernel helpers), or
50 	 * - the bpf program uses its stack area
51 	 * The latter condition is deduced from the usage of BPF_REG_FP
52 	 */
53 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54 }
55 
56 /*
57  * When not setting up our own stackframe, the redzone usage is:
58  *
59  *		[	prev sp		] <-------------
60  *		[	  ...       	] 		|
61  * sp (r1) --->	[    stack pointer	] --------------
62  *		[   nv gpr save area	] 6*8
63  *		[    tail_call_cnt	] 8
64  *		[    local_tmp_var	] 8
65  *		[   unused red zone	] 208 bytes protected
66  */
67 static int bpf_jit_stack_local(struct codegen_context *ctx)
68 {
69 	if (bpf_has_stack_frame(ctx))
70 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
71 	else
72 		return -(BPF_PPC_STACK_SAVE + 16);
73 }
74 
75 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
76 {
77 	return bpf_jit_stack_local(ctx) + 8;
78 }
79 
80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
81 {
82 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 		return (bpf_has_stack_frame(ctx) ?
84 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
85 				- (8 * (32 - reg));
86 
87 	pr_err("BPF JIT is asking about unknown registers");
88 	BUG();
89 }
90 
91 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
92 {
93 	int i;
94 
95 	/*
96 	 * Initialize tail_call_cnt if we do tail calls.
97 	 * Otherwise, put in NOPs so that it can be skipped when we are
98 	 * invoked through a tail call.
99 	 */
100 	if (ctx->seen & SEEN_TAILCALL) {
101 		PPC_LI(b2p[TMP_REG_1], 0);
102 		/* this goes in the redzone */
103 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
104 	} else {
105 		PPC_NOP();
106 		PPC_NOP();
107 	}
108 
109 #define BPF_TAILCALL_PROLOGUE_SIZE	8
110 
111 	if (bpf_has_stack_frame(ctx)) {
112 		/*
113 		 * We need a stack frame, but we don't necessarily need to
114 		 * save/restore LR unless we call other functions
115 		 */
116 		if (ctx->seen & SEEN_FUNC) {
117 			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
118 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
119 		}
120 
121 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
122 	}
123 
124 	/*
125 	 * Back up non-volatile regs -- BPF registers 6-10
126 	 * If we haven't created our own stack frame, we save these
127 	 * in the protected zone below the previous stack frame
128 	 */
129 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
130 		if (bpf_is_seen_register(ctx, i))
131 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
132 
133 	/* Setup frame pointer to point to the bpf stack area */
134 	if (bpf_is_seen_register(ctx, BPF_REG_FP))
135 		PPC_ADDI(b2p[BPF_REG_FP], 1,
136 				STACK_FRAME_MIN_SIZE + ctx->stack_size);
137 }
138 
139 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
140 {
141 	int i;
142 
143 	/* Restore NVRs */
144 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 		if (bpf_is_seen_register(ctx, i))
146 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
147 
148 	/* Tear down our stack frame */
149 	if (bpf_has_stack_frame(ctx)) {
150 		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
151 		if (ctx->seen & SEEN_FUNC) {
152 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
153 			PPC_MTLR(0);
154 		}
155 	}
156 }
157 
158 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
159 {
160 	bpf_jit_emit_common_epilogue(image, ctx);
161 
162 	/* Move result to r3 */
163 	PPC_MR(3, b2p[BPF_REG_0]);
164 
165 	PPC_BLR();
166 }
167 
168 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
169 {
170 	unsigned int i, ctx_idx = ctx->idx;
171 
172 	/* Load function address into r12 */
173 	PPC_LI64(12, func);
174 
175 	/* For bpf-to-bpf function calls, the callee's address is unknown
176 	 * until the last extra pass. As seen above, we use PPC_LI64() to
177 	 * load the callee's address, but this may optimize the number of
178 	 * instructions required based on the nature of the address.
179 	 *
180 	 * Since we don't want the number of instructions emitted to change,
181 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
182 	 * we always have a five-instruction sequence, which is the maximum
183 	 * that PPC_LI64() can emit.
184 	 */
185 	for (i = ctx->idx - ctx_idx; i < 5; i++)
186 		PPC_NOP();
187 
188 #ifdef PPC64_ELF_ABI_v1
189 	/*
190 	 * Load TOC from function descriptor at offset 8.
191 	 * We can clobber r2 since we get called through a
192 	 * function pointer (so caller will save/restore r2)
193 	 * and since we don't use a TOC ourself.
194 	 */
195 	PPC_BPF_LL(2, 12, 8);
196 	/* Load actual entry point from function descriptor */
197 	PPC_BPF_LL(12, 12, 0);
198 #endif
199 
200 	PPC_MTLR(12);
201 	PPC_BLRL();
202 }
203 
204 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
205 {
206 	/*
207 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
208 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
209 	 * r4/BPF_REG_2 - pointer to bpf_array
210 	 * r5/BPF_REG_3 - index in bpf_array
211 	 */
212 	int b2p_bpf_array = b2p[BPF_REG_2];
213 	int b2p_index = b2p[BPF_REG_3];
214 
215 	/*
216 	 * if (index >= array->map.max_entries)
217 	 *   goto out;
218 	 */
219 	PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
220 	PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
221 	PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
222 	PPC_BCC(COND_GE, out);
223 
224 	/*
225 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
226 	 *   goto out;
227 	 */
228 	PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
229 	PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
230 	PPC_BCC(COND_GT, out);
231 
232 	/*
233 	 * tail_call_cnt++;
234 	 */
235 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
236 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
237 
238 	/* prog = array->ptrs[index]; */
239 	PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
240 	PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
241 	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
242 
243 	/*
244 	 * if (prog == NULL)
245 	 *   goto out;
246 	 */
247 	PPC_CMPLDI(b2p[TMP_REG_1], 0);
248 	PPC_BCC(COND_EQ, out);
249 
250 	/* goto *(prog->bpf_func + prologue_size); */
251 	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
252 #ifdef PPC64_ELF_ABI_v1
253 	/* skip past the function descriptor */
254 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
255 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
256 #else
257 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
258 #endif
259 	PPC_MTCTR(b2p[TMP_REG_1]);
260 
261 	/* tear down stack, restore NVRs, ... */
262 	bpf_jit_emit_common_epilogue(image, ctx);
263 
264 	PPC_BCTR();
265 	/* out: */
266 }
267 
268 /* Assemble the body code between the prologue & epilogue */
269 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
270 			      struct codegen_context *ctx,
271 			      u32 *addrs, bool extra_pass)
272 {
273 	const struct bpf_insn *insn = fp->insnsi;
274 	int flen = fp->len;
275 	int i;
276 
277 	/* Start of epilogue code - will only be valid 2nd pass onwards */
278 	u32 exit_addr = addrs[flen];
279 
280 	for (i = 0; i < flen; i++) {
281 		u32 code = insn[i].code;
282 		u32 dst_reg = b2p[insn[i].dst_reg];
283 		u32 src_reg = b2p[insn[i].src_reg];
284 		s16 off = insn[i].off;
285 		s32 imm = insn[i].imm;
286 		u64 imm64;
287 		u8 *func;
288 		u32 true_cond;
289 
290 		/*
291 		 * addrs[] maps a BPF bytecode address into a real offset from
292 		 * the start of the body code.
293 		 */
294 		addrs[i] = ctx->idx * 4;
295 
296 		/*
297 		 * As an optimization, we note down which non-volatile registers
298 		 * are used so that we can only save/restore those in our
299 		 * prologue and epilogue. We do this here regardless of whether
300 		 * the actual BPF instruction uses src/dst registers or not
301 		 * (for instance, BPF_CALL does not use them). The expectation
302 		 * is that those instructions will have src_reg/dst_reg set to
303 		 * 0. Even otherwise, we just lose some prologue/epilogue
304 		 * optimization but everything else should work without
305 		 * any issues.
306 		 */
307 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
308 			bpf_set_seen_register(ctx, insn[i].dst_reg);
309 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
310 			bpf_set_seen_register(ctx, insn[i].src_reg);
311 
312 		switch (code) {
313 		/*
314 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
315 		 */
316 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
317 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
318 			PPC_ADD(dst_reg, dst_reg, src_reg);
319 			goto bpf_alu32_trunc;
320 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
321 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
322 			PPC_SUB(dst_reg, dst_reg, src_reg);
323 			goto bpf_alu32_trunc;
324 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
325 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
326 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
327 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
328 			if (BPF_OP(code) == BPF_SUB)
329 				imm = -imm;
330 			if (imm) {
331 				if (imm >= -32768 && imm < 32768)
332 					PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
333 				else {
334 					PPC_LI32(b2p[TMP_REG_1], imm);
335 					PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
336 				}
337 			}
338 			goto bpf_alu32_trunc;
339 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
340 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
341 			if (BPF_CLASS(code) == BPF_ALU)
342 				PPC_MULW(dst_reg, dst_reg, src_reg);
343 			else
344 				PPC_MULD(dst_reg, dst_reg, src_reg);
345 			goto bpf_alu32_trunc;
346 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
347 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
348 			if (imm >= -32768 && imm < 32768)
349 				PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
350 			else {
351 				PPC_LI32(b2p[TMP_REG_1], imm);
352 				if (BPF_CLASS(code) == BPF_ALU)
353 					PPC_MULW(dst_reg, dst_reg,
354 							b2p[TMP_REG_1]);
355 				else
356 					PPC_MULD(dst_reg, dst_reg,
357 							b2p[TMP_REG_1]);
358 			}
359 			goto bpf_alu32_trunc;
360 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
361 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
362 			if (BPF_OP(code) == BPF_MOD) {
363 				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
364 				PPC_MULW(b2p[TMP_REG_1], src_reg,
365 						b2p[TMP_REG_1]);
366 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
367 			} else
368 				PPC_DIVWU(dst_reg, dst_reg, src_reg);
369 			goto bpf_alu32_trunc;
370 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
371 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
372 			if (BPF_OP(code) == BPF_MOD) {
373 				PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
374 				PPC_MULD(b2p[TMP_REG_1], src_reg,
375 						b2p[TMP_REG_1]);
376 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
377 			} else
378 				PPC_DIVD(dst_reg, dst_reg, src_reg);
379 			break;
380 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
381 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
382 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
383 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
384 			if (imm == 0)
385 				return -EINVAL;
386 			else if (imm == 1)
387 				goto bpf_alu32_trunc;
388 
389 			PPC_LI32(b2p[TMP_REG_1], imm);
390 			switch (BPF_CLASS(code)) {
391 			case BPF_ALU:
392 				if (BPF_OP(code) == BPF_MOD) {
393 					PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
394 							b2p[TMP_REG_1]);
395 					PPC_MULW(b2p[TMP_REG_1],
396 							b2p[TMP_REG_1],
397 							b2p[TMP_REG_2]);
398 					PPC_SUB(dst_reg, dst_reg,
399 							b2p[TMP_REG_1]);
400 				} else
401 					PPC_DIVWU(dst_reg, dst_reg,
402 							b2p[TMP_REG_1]);
403 				break;
404 			case BPF_ALU64:
405 				if (BPF_OP(code) == BPF_MOD) {
406 					PPC_DIVD(b2p[TMP_REG_2], dst_reg,
407 							b2p[TMP_REG_1]);
408 					PPC_MULD(b2p[TMP_REG_1],
409 							b2p[TMP_REG_1],
410 							b2p[TMP_REG_2]);
411 					PPC_SUB(dst_reg, dst_reg,
412 							b2p[TMP_REG_1]);
413 				} else
414 					PPC_DIVD(dst_reg, dst_reg,
415 							b2p[TMP_REG_1]);
416 				break;
417 			}
418 			goto bpf_alu32_trunc;
419 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
420 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
421 			PPC_NEG(dst_reg, dst_reg);
422 			goto bpf_alu32_trunc;
423 
424 		/*
425 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
426 		 */
427 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
428 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
429 			PPC_AND(dst_reg, dst_reg, src_reg);
430 			goto bpf_alu32_trunc;
431 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
432 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
433 			if (!IMM_H(imm))
434 				PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
435 			else {
436 				/* Sign-extended */
437 				PPC_LI32(b2p[TMP_REG_1], imm);
438 				PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
439 			}
440 			goto bpf_alu32_trunc;
441 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
442 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
443 			PPC_OR(dst_reg, dst_reg, src_reg);
444 			goto bpf_alu32_trunc;
445 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
446 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
447 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
448 				/* Sign-extended */
449 				PPC_LI32(b2p[TMP_REG_1], imm);
450 				PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
451 			} else {
452 				if (IMM_L(imm))
453 					PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
454 				if (IMM_H(imm))
455 					PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
456 			}
457 			goto bpf_alu32_trunc;
458 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
459 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
460 			PPC_XOR(dst_reg, dst_reg, src_reg);
461 			goto bpf_alu32_trunc;
462 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
463 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
464 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
465 				/* Sign-extended */
466 				PPC_LI32(b2p[TMP_REG_1], imm);
467 				PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
468 			} else {
469 				if (IMM_L(imm))
470 					PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
471 				if (IMM_H(imm))
472 					PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
473 			}
474 			goto bpf_alu32_trunc;
475 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
476 			/* slw clears top 32 bits */
477 			PPC_SLW(dst_reg, dst_reg, src_reg);
478 			break;
479 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
480 			PPC_SLD(dst_reg, dst_reg, src_reg);
481 			break;
482 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
483 			/* with imm 0, we still need to clear top 32 bits */
484 			PPC_SLWI(dst_reg, dst_reg, imm);
485 			break;
486 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
487 			if (imm != 0)
488 				PPC_SLDI(dst_reg, dst_reg, imm);
489 			break;
490 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
491 			PPC_SRW(dst_reg, dst_reg, src_reg);
492 			break;
493 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
494 			PPC_SRD(dst_reg, dst_reg, src_reg);
495 			break;
496 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
497 			PPC_SRWI(dst_reg, dst_reg, imm);
498 			break;
499 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
500 			if (imm != 0)
501 				PPC_SRDI(dst_reg, dst_reg, imm);
502 			break;
503 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
504 			PPC_SRAD(dst_reg, dst_reg, src_reg);
505 			break;
506 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
507 			if (imm != 0)
508 				PPC_SRADI(dst_reg, dst_reg, imm);
509 			break;
510 
511 		/*
512 		 * MOV
513 		 */
514 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
515 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
516 			PPC_MR(dst_reg, src_reg);
517 			goto bpf_alu32_trunc;
518 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
519 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
520 			PPC_LI32(dst_reg, imm);
521 			if (imm < 0)
522 				goto bpf_alu32_trunc;
523 			break;
524 
525 bpf_alu32_trunc:
526 		/* Truncate to 32-bits */
527 		if (BPF_CLASS(code) == BPF_ALU)
528 			PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
529 		break;
530 
531 		/*
532 		 * BPF_FROM_BE/LE
533 		 */
534 		case BPF_ALU | BPF_END | BPF_FROM_LE:
535 		case BPF_ALU | BPF_END | BPF_FROM_BE:
536 #ifdef __BIG_ENDIAN__
537 			if (BPF_SRC(code) == BPF_FROM_BE)
538 				goto emit_clear;
539 #else /* !__BIG_ENDIAN__ */
540 			if (BPF_SRC(code) == BPF_FROM_LE)
541 				goto emit_clear;
542 #endif
543 			switch (imm) {
544 			case 16:
545 				/* Rotate 8 bits left & mask with 0x0000ff00 */
546 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
547 				/* Rotate 8 bits right & insert LSB to reg */
548 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
549 				/* Move result back to dst_reg */
550 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
551 				break;
552 			case 32:
553 				/*
554 				 * Rotate word left by 8 bits:
555 				 * 2 bytes are already in their final position
556 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
557 				 */
558 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
559 				/* Rotate 24 bits and insert byte 1 */
560 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
561 				/* Rotate 24 bits and insert byte 3 */
562 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
563 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
564 				break;
565 			case 64:
566 				/*
567 				 * Way easier and faster(?) to store the value
568 				 * into stack and then use ldbrx
569 				 *
570 				 * ctx->seen will be reliable in pass2, but
571 				 * the instructions generated will remain the
572 				 * same across all passes
573 				 */
574 				PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
575 				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
576 				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
577 				break;
578 			}
579 			break;
580 
581 emit_clear:
582 			switch (imm) {
583 			case 16:
584 				/* zero-extend 16 bits into 64 bits */
585 				PPC_RLDICL(dst_reg, dst_reg, 0, 48);
586 				break;
587 			case 32:
588 				/* zero-extend 32 bits into 64 bits */
589 				PPC_RLDICL(dst_reg, dst_reg, 0, 32);
590 				break;
591 			case 64:
592 				/* nop */
593 				break;
594 			}
595 			break;
596 
597 		/*
598 		 * BPF_ST(X)
599 		 */
600 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
601 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
602 			if (BPF_CLASS(code) == BPF_ST) {
603 				PPC_LI(b2p[TMP_REG_1], imm);
604 				src_reg = b2p[TMP_REG_1];
605 			}
606 			PPC_STB(src_reg, dst_reg, off);
607 			break;
608 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
609 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
610 			if (BPF_CLASS(code) == BPF_ST) {
611 				PPC_LI(b2p[TMP_REG_1], imm);
612 				src_reg = b2p[TMP_REG_1];
613 			}
614 			PPC_STH(src_reg, dst_reg, off);
615 			break;
616 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
617 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
618 			if (BPF_CLASS(code) == BPF_ST) {
619 				PPC_LI32(b2p[TMP_REG_1], imm);
620 				src_reg = b2p[TMP_REG_1];
621 			}
622 			PPC_STW(src_reg, dst_reg, off);
623 			break;
624 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
625 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
626 			if (BPF_CLASS(code) == BPF_ST) {
627 				PPC_LI32(b2p[TMP_REG_1], imm);
628 				src_reg = b2p[TMP_REG_1];
629 			}
630 			PPC_STD(src_reg, dst_reg, off);
631 			break;
632 
633 		/*
634 		 * BPF_STX XADD (atomic_add)
635 		 */
636 		/* *(u32 *)(dst + off) += src */
637 		case BPF_STX | BPF_XADD | BPF_W:
638 			/* Get EA into TMP_REG_1 */
639 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
640 			/* error if EA is not word-aligned */
641 			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
642 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
643 			PPC_LI(b2p[BPF_REG_0], 0);
644 			PPC_JMP(exit_addr);
645 			/* load value from memory into TMP_REG_2 */
646 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
647 			/* add value from src_reg into this */
648 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
649 			/* store result back */
650 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
651 			/* we're done if this succeeded */
652 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
653 			/* otherwise, let's try once more */
654 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
655 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
656 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
657 			/* exit if the store was not successful */
658 			PPC_LI(b2p[BPF_REG_0], 0);
659 			PPC_BCC(COND_NE, exit_addr);
660 			break;
661 		/* *(u64 *)(dst + off) += src */
662 		case BPF_STX | BPF_XADD | BPF_DW:
663 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
664 			/* error if EA is not doubleword-aligned */
665 			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
666 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
667 			PPC_LI(b2p[BPF_REG_0], 0);
668 			PPC_JMP(exit_addr);
669 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
670 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
671 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
672 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
673 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
674 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
675 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
676 			PPC_LI(b2p[BPF_REG_0], 0);
677 			PPC_BCC(COND_NE, exit_addr);
678 			break;
679 
680 		/*
681 		 * BPF_LDX
682 		 */
683 		/* dst = *(u8 *)(ul) (src + off) */
684 		case BPF_LDX | BPF_MEM | BPF_B:
685 			PPC_LBZ(dst_reg, src_reg, off);
686 			break;
687 		/* dst = *(u16 *)(ul) (src + off) */
688 		case BPF_LDX | BPF_MEM | BPF_H:
689 			PPC_LHZ(dst_reg, src_reg, off);
690 			break;
691 		/* dst = *(u32 *)(ul) (src + off) */
692 		case BPF_LDX | BPF_MEM | BPF_W:
693 			PPC_LWZ(dst_reg, src_reg, off);
694 			break;
695 		/* dst = *(u64 *)(ul) (src + off) */
696 		case BPF_LDX | BPF_MEM | BPF_DW:
697 			PPC_LD(dst_reg, src_reg, off);
698 			break;
699 
700 		/*
701 		 * Doubleword load
702 		 * 16 byte instruction that uses two 'struct bpf_insn'
703 		 */
704 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
705 			imm64 = ((u64)(u32) insn[i].imm) |
706 				    (((u64)(u32) insn[i+1].imm) << 32);
707 			/* Adjust for two bpf instructions */
708 			addrs[++i] = ctx->idx * 4;
709 			PPC_LI64(dst_reg, imm64);
710 			break;
711 
712 		/*
713 		 * Return/Exit
714 		 */
715 		case BPF_JMP | BPF_EXIT:
716 			/*
717 			 * If this isn't the very last instruction, branch to
718 			 * the epilogue. If we _are_ the last instruction,
719 			 * we'll just fall through to the epilogue.
720 			 */
721 			if (i != flen - 1)
722 				PPC_JMP(exit_addr);
723 			/* else fall through to the epilogue */
724 			break;
725 
726 		/*
727 		 * Call kernel helper or bpf function
728 		 */
729 		case BPF_JMP | BPF_CALL:
730 			ctx->seen |= SEEN_FUNC;
731 
732 			/* bpf function call */
733 			if (insn[i].src_reg == BPF_PSEUDO_CALL)
734 				if (!extra_pass)
735 					func = NULL;
736 				else if (fp->aux->func && off < fp->aux->func_cnt)
737 					/* use the subprog id from the off
738 					 * field to lookup the callee address
739 					 */
740 					func = (u8 *) fp->aux->func[off]->bpf_func;
741 				else
742 					return -EINVAL;
743 			/* kernel helper call */
744 			else
745 				func = (u8 *) __bpf_call_base + imm;
746 
747 			bpf_jit_emit_func_call(image, ctx, (u64)func);
748 
749 			/* move return value from r3 to BPF_REG_0 */
750 			PPC_MR(b2p[BPF_REG_0], 3);
751 			break;
752 
753 		/*
754 		 * Jumps and branches
755 		 */
756 		case BPF_JMP | BPF_JA:
757 			PPC_JMP(addrs[i + 1 + off]);
758 			break;
759 
760 		case BPF_JMP | BPF_JGT | BPF_K:
761 		case BPF_JMP | BPF_JGT | BPF_X:
762 		case BPF_JMP | BPF_JSGT | BPF_K:
763 		case BPF_JMP | BPF_JSGT | BPF_X:
764 			true_cond = COND_GT;
765 			goto cond_branch;
766 		case BPF_JMP | BPF_JLT | BPF_K:
767 		case BPF_JMP | BPF_JLT | BPF_X:
768 		case BPF_JMP | BPF_JSLT | BPF_K:
769 		case BPF_JMP | BPF_JSLT | BPF_X:
770 			true_cond = COND_LT;
771 			goto cond_branch;
772 		case BPF_JMP | BPF_JGE | BPF_K:
773 		case BPF_JMP | BPF_JGE | BPF_X:
774 		case BPF_JMP | BPF_JSGE | BPF_K:
775 		case BPF_JMP | BPF_JSGE | BPF_X:
776 			true_cond = COND_GE;
777 			goto cond_branch;
778 		case BPF_JMP | BPF_JLE | BPF_K:
779 		case BPF_JMP | BPF_JLE | BPF_X:
780 		case BPF_JMP | BPF_JSLE | BPF_K:
781 		case BPF_JMP | BPF_JSLE | BPF_X:
782 			true_cond = COND_LE;
783 			goto cond_branch;
784 		case BPF_JMP | BPF_JEQ | BPF_K:
785 		case BPF_JMP | BPF_JEQ | BPF_X:
786 			true_cond = COND_EQ;
787 			goto cond_branch;
788 		case BPF_JMP | BPF_JNE | BPF_K:
789 		case BPF_JMP | BPF_JNE | BPF_X:
790 			true_cond = COND_NE;
791 			goto cond_branch;
792 		case BPF_JMP | BPF_JSET | BPF_K:
793 		case BPF_JMP | BPF_JSET | BPF_X:
794 			true_cond = COND_NE;
795 			/* Fall through */
796 
797 cond_branch:
798 			switch (code) {
799 			case BPF_JMP | BPF_JGT | BPF_X:
800 			case BPF_JMP | BPF_JLT | BPF_X:
801 			case BPF_JMP | BPF_JGE | BPF_X:
802 			case BPF_JMP | BPF_JLE | BPF_X:
803 			case BPF_JMP | BPF_JEQ | BPF_X:
804 			case BPF_JMP | BPF_JNE | BPF_X:
805 				/* unsigned comparison */
806 				PPC_CMPLD(dst_reg, src_reg);
807 				break;
808 			case BPF_JMP | BPF_JSGT | BPF_X:
809 			case BPF_JMP | BPF_JSLT | BPF_X:
810 			case BPF_JMP | BPF_JSGE | BPF_X:
811 			case BPF_JMP | BPF_JSLE | BPF_X:
812 				/* signed comparison */
813 				PPC_CMPD(dst_reg, src_reg);
814 				break;
815 			case BPF_JMP | BPF_JSET | BPF_X:
816 				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
817 				break;
818 			case BPF_JMP | BPF_JNE | BPF_K:
819 			case BPF_JMP | BPF_JEQ | BPF_K:
820 			case BPF_JMP | BPF_JGT | BPF_K:
821 			case BPF_JMP | BPF_JLT | BPF_K:
822 			case BPF_JMP | BPF_JGE | BPF_K:
823 			case BPF_JMP | BPF_JLE | BPF_K:
824 				/*
825 				 * Need sign-extended load, so only positive
826 				 * values can be used as imm in cmpldi
827 				 */
828 				if (imm >= 0 && imm < 32768)
829 					PPC_CMPLDI(dst_reg, imm);
830 				else {
831 					/* sign-extending load */
832 					PPC_LI32(b2p[TMP_REG_1], imm);
833 					/* ... but unsigned comparison */
834 					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
835 				}
836 				break;
837 			case BPF_JMP | BPF_JSGT | BPF_K:
838 			case BPF_JMP | BPF_JSLT | BPF_K:
839 			case BPF_JMP | BPF_JSGE | BPF_K:
840 			case BPF_JMP | BPF_JSLE | BPF_K:
841 				/*
842 				 * signed comparison, so any 16-bit value
843 				 * can be used in cmpdi
844 				 */
845 				if (imm >= -32768 && imm < 32768)
846 					PPC_CMPDI(dst_reg, imm);
847 				else {
848 					PPC_LI32(b2p[TMP_REG_1], imm);
849 					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
850 				}
851 				break;
852 			case BPF_JMP | BPF_JSET | BPF_K:
853 				/* andi does not sign-extend the immediate */
854 				if (imm >= 0 && imm < 32768)
855 					/* PPC_ANDI is _only/always_ dot-form */
856 					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
857 				else {
858 					PPC_LI32(b2p[TMP_REG_1], imm);
859 					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
860 						    b2p[TMP_REG_1]);
861 				}
862 				break;
863 			}
864 			PPC_BCC(true_cond, addrs[i + 1 + off]);
865 			break;
866 
867 		/*
868 		 * Tail call
869 		 */
870 		case BPF_JMP | BPF_TAIL_CALL:
871 			ctx->seen |= SEEN_TAILCALL;
872 			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
873 			break;
874 
875 		default:
876 			/*
877 			 * The filter contains something cruel & unusual.
878 			 * We don't handle it, but also there shouldn't be
879 			 * anything missing from our list.
880 			 */
881 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
882 					code, i);
883 			return -ENOTSUPP;
884 		}
885 	}
886 
887 	/* Set end-of-body-code address for exit. */
888 	addrs[i] = ctx->idx * 4;
889 
890 	return 0;
891 }
892 
893 struct powerpc64_jit_data {
894 	struct bpf_binary_header *header;
895 	u32 *addrs;
896 	u8 *image;
897 	u32 proglen;
898 	struct codegen_context ctx;
899 };
900 
901 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
902 {
903 	u32 proglen;
904 	u32 alloclen;
905 	u8 *image = NULL;
906 	u32 *code_base;
907 	u32 *addrs;
908 	struct powerpc64_jit_data *jit_data;
909 	struct codegen_context cgctx;
910 	int pass;
911 	int flen;
912 	struct bpf_binary_header *bpf_hdr;
913 	struct bpf_prog *org_fp = fp;
914 	struct bpf_prog *tmp_fp;
915 	bool bpf_blinded = false;
916 	bool extra_pass = false;
917 
918 	if (!fp->jit_requested)
919 		return org_fp;
920 
921 	tmp_fp = bpf_jit_blind_constants(org_fp);
922 	if (IS_ERR(tmp_fp))
923 		return org_fp;
924 
925 	if (tmp_fp != org_fp) {
926 		bpf_blinded = true;
927 		fp = tmp_fp;
928 	}
929 
930 	jit_data = fp->aux->jit_data;
931 	if (!jit_data) {
932 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
933 		if (!jit_data) {
934 			fp = org_fp;
935 			goto out;
936 		}
937 		fp->aux->jit_data = jit_data;
938 	}
939 
940 	flen = fp->len;
941 	addrs = jit_data->addrs;
942 	if (addrs) {
943 		cgctx = jit_data->ctx;
944 		image = jit_data->image;
945 		bpf_hdr = jit_data->header;
946 		proglen = jit_data->proglen;
947 		alloclen = proglen + FUNCTION_DESCR_SIZE;
948 		extra_pass = true;
949 		goto skip_init_ctx;
950 	}
951 
952 	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
953 	if (addrs == NULL) {
954 		fp = org_fp;
955 		goto out_addrs;
956 	}
957 
958 	memset(&cgctx, 0, sizeof(struct codegen_context));
959 
960 	/* Make sure that the stack is quadword aligned. */
961 	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
962 
963 	/* Scouting faux-generate pass 0 */
964 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
965 		/* We hit something illegal or unsupported. */
966 		fp = org_fp;
967 		goto out_addrs;
968 	}
969 
970 	/*
971 	 * Pretend to build prologue, given the features we've seen.  This will
972 	 * update ctgtx.idx as it pretends to output instructions, then we can
973 	 * calculate total size from idx.
974 	 */
975 	bpf_jit_build_prologue(0, &cgctx);
976 	bpf_jit_build_epilogue(0, &cgctx);
977 
978 	proglen = cgctx.idx * 4;
979 	alloclen = proglen + FUNCTION_DESCR_SIZE;
980 
981 	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
982 			bpf_jit_fill_ill_insns);
983 	if (!bpf_hdr) {
984 		fp = org_fp;
985 		goto out_addrs;
986 	}
987 
988 skip_init_ctx:
989 	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
990 
991 	/* Code generation passes 1-2 */
992 	for (pass = 1; pass < 3; pass++) {
993 		/* Now build the prologue, body code & epilogue for real. */
994 		cgctx.idx = 0;
995 		bpf_jit_build_prologue(code_base, &cgctx);
996 		bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
997 		bpf_jit_build_epilogue(code_base, &cgctx);
998 
999 		if (bpf_jit_enable > 1)
1000 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1001 				proglen - (cgctx.idx * 4), cgctx.seen);
1002 	}
1003 
1004 	if (bpf_jit_enable > 1)
1005 		/*
1006 		 * Note that we output the base address of the code_base
1007 		 * rather than image, since opcodes are in code_base.
1008 		 */
1009 		bpf_jit_dump(flen, proglen, pass, code_base);
1010 
1011 #ifdef PPC64_ELF_ABI_v1
1012 	/* Function descriptor nastiness: Address + TOC */
1013 	((u64 *)image)[0] = (u64)code_base;
1014 	((u64 *)image)[1] = local_paca->kernel_toc;
1015 #endif
1016 
1017 	fp->bpf_func = (void *)image;
1018 	fp->jited = 1;
1019 	fp->jited_len = alloclen;
1020 
1021 	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1022 	if (!fp->is_func || extra_pass) {
1023 out_addrs:
1024 		kfree(addrs);
1025 		kfree(jit_data);
1026 		fp->aux->jit_data = NULL;
1027 	} else {
1028 		jit_data->addrs = addrs;
1029 		jit_data->ctx = cgctx;
1030 		jit_data->proglen = proglen;
1031 		jit_data->image = image;
1032 		jit_data->header = bpf_hdr;
1033 	}
1034 
1035 out:
1036 	if (bpf_blinded)
1037 		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1038 
1039 	return fp;
1040 }
1041 
1042 /* Overriding bpf_jit_free() as we don't set images read-only. */
1043 void bpf_jit_free(struct bpf_prog *fp)
1044 {
1045 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1046 	struct bpf_binary_header *bpf_hdr = (void *)addr;
1047 
1048 	if (fp->jited)
1049 		bpf_jit_binary_free(bpf_hdr);
1050 
1051 	bpf_prog_unlock_free(fp);
1052 }
1053