1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit64.h"
21 
22 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
23 {
24 	/*
25 	 * We only need a stack frame if:
26 	 * - we call other functions (kernel helpers), or
27 	 * - the bpf program uses its stack area
28 	 * The latter condition is deduced from the usage of BPF_REG_FP
29 	 */
30 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
31 }
32 
33 /*
34  * When not setting up our own stackframe, the redzone usage is:
35  *
36  *		[	prev sp		] <-------------
37  *		[	  ...       	] 		|
38  * sp (r1) --->	[    stack pointer	] --------------
39  *		[   nv gpr save area	] 5*8
40  *		[    tail_call_cnt	] 8
41  *		[    local_tmp_var	] 16
42  *		[   unused red zone	] 208 bytes protected
43  */
44 static int bpf_jit_stack_local(struct codegen_context *ctx)
45 {
46 	if (bpf_has_stack_frame(ctx))
47 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
48 	else
49 		return -(BPF_PPC_STACK_SAVE + 24);
50 }
51 
52 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
53 {
54 	return bpf_jit_stack_local(ctx) + 16;
55 }
56 
57 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
58 {
59 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 		return (bpf_has_stack_frame(ctx) ?
61 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
62 				- (8 * (32 - reg));
63 
64 	pr_err("BPF JIT is asking about unknown registers");
65 	BUG();
66 }
67 
68 void bpf_jit_realloc_regs(struct codegen_context *ctx)
69 {
70 }
71 
72 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
73 {
74 	int i;
75 
76 	/*
77 	 * Initialize tail_call_cnt if we do tail calls.
78 	 * Otherwise, put in NOPs so that it can be skipped when we are
79 	 * invoked through a tail call.
80 	 */
81 	if (ctx->seen & SEEN_TAILCALL) {
82 		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83 		/* this goes in the redzone */
84 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
85 	} else {
86 		EMIT(PPC_RAW_NOP());
87 		EMIT(PPC_RAW_NOP());
88 	}
89 
90 #define BPF_TAILCALL_PROLOGUE_SIZE	8
91 
92 	if (bpf_has_stack_frame(ctx)) {
93 		/*
94 		 * We need a stack frame, but we don't necessarily need to
95 		 * save/restore LR unless we call other functions
96 		 */
97 		if (ctx->seen & SEEN_FUNC) {
98 			EMIT(PPC_RAW_MFLR(_R0));
99 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
100 		}
101 
102 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
103 	}
104 
105 	/*
106 	 * Back up non-volatile regs -- BPF registers 6-10
107 	 * If we haven't created our own stack frame, we save these
108 	 * in the protected zone below the previous stack frame
109 	 */
110 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111 		if (bpf_is_seen_register(ctx, b2p[i]))
112 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
113 
114 	/* Setup frame pointer to point to the bpf stack area */
115 	if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
116 		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
117 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
118 }
119 
120 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
121 {
122 	int i;
123 
124 	/* Restore NVRs */
125 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126 		if (bpf_is_seen_register(ctx, b2p[i]))
127 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
128 
129 	/* Tear down our stack frame */
130 	if (bpf_has_stack_frame(ctx)) {
131 		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132 		if (ctx->seen & SEEN_FUNC) {
133 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
134 			EMIT(PPC_RAW_MTLR(0));
135 		}
136 	}
137 }
138 
139 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
140 {
141 	bpf_jit_emit_common_epilogue(image, ctx);
142 
143 	/* Move result to r3 */
144 	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
145 
146 	EMIT(PPC_RAW_BLR());
147 }
148 
149 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
150 				       u64 func)
151 {
152 #ifdef PPC64_ELF_ABI_v1
153 	/* func points to the function descriptor */
154 	PPC_LI64(b2p[TMP_REG_2], func);
155 	/* Load actual entry point from function descriptor */
156 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
157 	/* ... and move it to CTR */
158 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
159 	/*
160 	 * Load TOC from function descriptor at offset 8.
161 	 * We can clobber r2 since we get called through a
162 	 * function pointer (so caller will save/restore r2)
163 	 * and since we don't use a TOC ourself.
164 	 */
165 	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
166 #else
167 	/* We can clobber r12 */
168 	PPC_FUNC_ADDR(12, func);
169 	EMIT(PPC_RAW_MTCTR(12));
170 #endif
171 	EMIT(PPC_RAW_BCTRL());
172 }
173 
174 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
175 {
176 	unsigned int i, ctx_idx = ctx->idx;
177 
178 	/* Load function address into r12 */
179 	PPC_LI64(12, func);
180 
181 	/* For bpf-to-bpf function calls, the callee's address is unknown
182 	 * until the last extra pass. As seen above, we use PPC_LI64() to
183 	 * load the callee's address, but this may optimize the number of
184 	 * instructions required based on the nature of the address.
185 	 *
186 	 * Since we don't want the number of instructions emitted to change,
187 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
188 	 * we always have a five-instruction sequence, which is the maximum
189 	 * that PPC_LI64() can emit.
190 	 */
191 	for (i = ctx->idx - ctx_idx; i < 5; i++)
192 		EMIT(PPC_RAW_NOP());
193 
194 #ifdef PPC64_ELF_ABI_v1
195 	/*
196 	 * Load TOC from function descriptor at offset 8.
197 	 * We can clobber r2 since we get called through a
198 	 * function pointer (so caller will save/restore r2)
199 	 * and since we don't use a TOC ourself.
200 	 */
201 	PPC_BPF_LL(2, 12, 8);
202 	/* Load actual entry point from function descriptor */
203 	PPC_BPF_LL(12, 12, 0);
204 #endif
205 
206 	EMIT(PPC_RAW_MTCTR(12));
207 	EMIT(PPC_RAW_BCTRL());
208 }
209 
210 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
211 {
212 	/*
213 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215 	 * r4/BPF_REG_2 - pointer to bpf_array
216 	 * r5/BPF_REG_3 - index in bpf_array
217 	 */
218 	int b2p_bpf_array = b2p[BPF_REG_2];
219 	int b2p_index = b2p[BPF_REG_3];
220 
221 	/*
222 	 * if (index >= array->map.max_entries)
223 	 *   goto out;
224 	 */
225 	EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
226 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
227 	EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228 	PPC_BCC(COND_GE, out);
229 
230 	/*
231 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
232 	 *   goto out;
233 	 */
234 	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
235 	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236 	PPC_BCC(COND_GE, out);
237 
238 	/*
239 	 * tail_call_cnt++;
240 	 */
241 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
243 
244 	/* prog = array->ptrs[index]; */
245 	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
246 	EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
247 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
248 
249 	/*
250 	 * if (prog == NULL)
251 	 *   goto out;
252 	 */
253 	EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254 	PPC_BCC(COND_EQ, out);
255 
256 	/* goto *(prog->bpf_func + prologue_size); */
257 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258 #ifdef PPC64_ELF_ABI_v1
259 	/* skip past the function descriptor */
260 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
261 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
262 #else
263 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
264 #endif
265 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
266 
267 	/* tear down stack, restore NVRs, ... */
268 	bpf_jit_emit_common_epilogue(image, ctx);
269 
270 	EMIT(PPC_RAW_BCTR());
271 
272 	/* out: */
273 	return 0;
274 }
275 
276 /*
277  * We spill into the redzone always, even if the bpf program has its own stackframe.
278  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
279  */
280 void bpf_stf_barrier(void);
281 
282 asm (
283 "		.global bpf_stf_barrier		;"
284 "	bpf_stf_barrier:			;"
285 "		std	21,-64(1)		;"
286 "		std	22,-56(1)		;"
287 "		sync				;"
288 "		ld	21,-64(1)		;"
289 "		ld	22,-56(1)		;"
290 "		ori	31,31,0			;"
291 "		.rept 14			;"
292 "		b	1f			;"
293 "	1:					;"
294 "		.endr				;"
295 "		blr				;"
296 );
297 
298 /* Assemble the body code between the prologue & epilogue */
299 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300 		       u32 *addrs, int pass)
301 {
302 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303 	const struct bpf_insn *insn = fp->insnsi;
304 	int flen = fp->len;
305 	int i, ret;
306 
307 	/* Start of epilogue code - will only be valid 2nd pass onwards */
308 	u32 exit_addr = addrs[flen];
309 
310 	for (i = 0; i < flen; i++) {
311 		u32 code = insn[i].code;
312 		u32 dst_reg = b2p[insn[i].dst_reg];
313 		u32 src_reg = b2p[insn[i].src_reg];
314 		u32 size = BPF_SIZE(code);
315 		s16 off = insn[i].off;
316 		s32 imm = insn[i].imm;
317 		bool func_addr_fixed;
318 		u64 func_addr;
319 		u64 imm64;
320 		u32 true_cond;
321 		u32 tmp_idx;
322 		int j;
323 
324 		/*
325 		 * addrs[] maps a BPF bytecode address into a real offset from
326 		 * the start of the body code.
327 		 */
328 		addrs[i] = ctx->idx * 4;
329 
330 		/*
331 		 * As an optimization, we note down which non-volatile registers
332 		 * are used so that we can only save/restore those in our
333 		 * prologue and epilogue. We do this here regardless of whether
334 		 * the actual BPF instruction uses src/dst registers or not
335 		 * (for instance, BPF_CALL does not use them). The expectation
336 		 * is that those instructions will have src_reg/dst_reg set to
337 		 * 0. Even otherwise, we just lose some prologue/epilogue
338 		 * optimization but everything else should work without
339 		 * any issues.
340 		 */
341 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
342 			bpf_set_seen_register(ctx, dst_reg);
343 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
344 			bpf_set_seen_register(ctx, src_reg);
345 
346 		switch (code) {
347 		/*
348 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
349 		 */
350 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
351 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
352 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
353 			goto bpf_alu32_trunc;
354 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
355 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
356 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
357 			goto bpf_alu32_trunc;
358 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
359 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
360 			if (!imm) {
361 				goto bpf_alu32_trunc;
362 			} else if (imm >= -32768 && imm < 32768) {
363 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
364 			} else {
365 				PPC_LI32(b2p[TMP_REG_1], imm);
366 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
367 			}
368 			goto bpf_alu32_trunc;
369 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
370 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
371 			if (!imm) {
372 				goto bpf_alu32_trunc;
373 			} else if (imm > -32768 && imm <= 32768) {
374 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
375 			} else {
376 				PPC_LI32(b2p[TMP_REG_1], imm);
377 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
378 			}
379 			goto bpf_alu32_trunc;
380 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
381 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
382 			if (BPF_CLASS(code) == BPF_ALU)
383 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
384 			else
385 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
386 			goto bpf_alu32_trunc;
387 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
388 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
389 			if (imm >= -32768 && imm < 32768)
390 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
391 			else {
392 				PPC_LI32(b2p[TMP_REG_1], imm);
393 				if (BPF_CLASS(code) == BPF_ALU)
394 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
395 							b2p[TMP_REG_1]));
396 				else
397 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
398 							b2p[TMP_REG_1]));
399 			}
400 			goto bpf_alu32_trunc;
401 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
402 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
403 			if (BPF_OP(code) == BPF_MOD) {
404 				EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
405 				EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
406 						b2p[TMP_REG_1]));
407 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
408 			} else
409 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
410 			goto bpf_alu32_trunc;
411 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
412 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
413 			if (BPF_OP(code) == BPF_MOD) {
414 				EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
415 				EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
416 						b2p[TMP_REG_1]));
417 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
418 			} else
419 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
420 			break;
421 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
422 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
423 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
424 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
425 			if (imm == 0)
426 				return -EINVAL;
427 			if (imm == 1) {
428 				if (BPF_OP(code) == BPF_DIV) {
429 					goto bpf_alu32_trunc;
430 				} else {
431 					EMIT(PPC_RAW_LI(dst_reg, 0));
432 					break;
433 				}
434 			}
435 
436 			PPC_LI32(b2p[TMP_REG_1], imm);
437 			switch (BPF_CLASS(code)) {
438 			case BPF_ALU:
439 				if (BPF_OP(code) == BPF_MOD) {
440 					EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
441 							dst_reg,
442 							b2p[TMP_REG_1]));
443 					EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
444 							b2p[TMP_REG_1],
445 							b2p[TMP_REG_2]));
446 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
447 							b2p[TMP_REG_1]));
448 				} else
449 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
450 							b2p[TMP_REG_1]));
451 				break;
452 			case BPF_ALU64:
453 				if (BPF_OP(code) == BPF_MOD) {
454 					EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
455 							dst_reg,
456 							b2p[TMP_REG_1]));
457 					EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
458 							b2p[TMP_REG_1],
459 							b2p[TMP_REG_2]));
460 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
461 							b2p[TMP_REG_1]));
462 				} else
463 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
464 							b2p[TMP_REG_1]));
465 				break;
466 			}
467 			goto bpf_alu32_trunc;
468 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
469 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
470 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
471 			goto bpf_alu32_trunc;
472 
473 		/*
474 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
475 		 */
476 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
477 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
478 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
479 			goto bpf_alu32_trunc;
480 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
481 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
482 			if (!IMM_H(imm))
483 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
484 			else {
485 				/* Sign-extended */
486 				PPC_LI32(b2p[TMP_REG_1], imm);
487 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
488 			}
489 			goto bpf_alu32_trunc;
490 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
491 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
492 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
493 			goto bpf_alu32_trunc;
494 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
495 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
496 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
497 				/* Sign-extended */
498 				PPC_LI32(b2p[TMP_REG_1], imm);
499 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
500 			} else {
501 				if (IMM_L(imm))
502 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
503 				if (IMM_H(imm))
504 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
505 			}
506 			goto bpf_alu32_trunc;
507 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
508 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
509 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
510 			goto bpf_alu32_trunc;
511 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
512 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
513 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
514 				/* Sign-extended */
515 				PPC_LI32(b2p[TMP_REG_1], imm);
516 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
517 			} else {
518 				if (IMM_L(imm))
519 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
520 				if (IMM_H(imm))
521 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
522 			}
523 			goto bpf_alu32_trunc;
524 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
525 			/* slw clears top 32 bits */
526 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
527 			/* skip zero extension move, but set address map. */
528 			if (insn_is_zext(&insn[i + 1]))
529 				addrs[++i] = ctx->idx * 4;
530 			break;
531 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
532 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
533 			break;
534 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
535 			/* with imm 0, we still need to clear top 32 bits */
536 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
537 			if (insn_is_zext(&insn[i + 1]))
538 				addrs[++i] = ctx->idx * 4;
539 			break;
540 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
541 			if (imm != 0)
542 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
543 			break;
544 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
545 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
546 			if (insn_is_zext(&insn[i + 1]))
547 				addrs[++i] = ctx->idx * 4;
548 			break;
549 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
550 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
551 			break;
552 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
553 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
554 			if (insn_is_zext(&insn[i + 1]))
555 				addrs[++i] = ctx->idx * 4;
556 			break;
557 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
558 			if (imm != 0)
559 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
560 			break;
561 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
562 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
563 			goto bpf_alu32_trunc;
564 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
565 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
566 			break;
567 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
568 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
569 			goto bpf_alu32_trunc;
570 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
571 			if (imm != 0)
572 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
573 			break;
574 
575 		/*
576 		 * MOV
577 		 */
578 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
579 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
580 			if (imm == 1) {
581 				/* special mov32 for zext */
582 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
583 				break;
584 			}
585 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
586 			goto bpf_alu32_trunc;
587 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
588 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
589 			PPC_LI32(dst_reg, imm);
590 			if (imm < 0)
591 				goto bpf_alu32_trunc;
592 			else if (insn_is_zext(&insn[i + 1]))
593 				addrs[++i] = ctx->idx * 4;
594 			break;
595 
596 bpf_alu32_trunc:
597 		/* Truncate to 32-bits */
598 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
599 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
600 		break;
601 
602 		/*
603 		 * BPF_FROM_BE/LE
604 		 */
605 		case BPF_ALU | BPF_END | BPF_FROM_LE:
606 		case BPF_ALU | BPF_END | BPF_FROM_BE:
607 #ifdef __BIG_ENDIAN__
608 			if (BPF_SRC(code) == BPF_FROM_BE)
609 				goto emit_clear;
610 #else /* !__BIG_ENDIAN__ */
611 			if (BPF_SRC(code) == BPF_FROM_LE)
612 				goto emit_clear;
613 #endif
614 			switch (imm) {
615 			case 16:
616 				/* Rotate 8 bits left & mask with 0x0000ff00 */
617 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
618 				/* Rotate 8 bits right & insert LSB to reg */
619 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
620 				/* Move result back to dst_reg */
621 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
622 				break;
623 			case 32:
624 				/*
625 				 * Rotate word left by 8 bits:
626 				 * 2 bytes are already in their final position
627 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
628 				 */
629 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
630 				/* Rotate 24 bits and insert byte 1 */
631 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
632 				/* Rotate 24 bits and insert byte 3 */
633 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
634 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
635 				break;
636 			case 64:
637 				/* Store the value to stack and then use byte-reverse loads */
638 				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
639 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
640 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
641 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
642 				} else {
643 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
644 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
645 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
646 					EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
647 					EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
648 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
649 						EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
650 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
651 				}
652 				break;
653 			}
654 			break;
655 
656 emit_clear:
657 			switch (imm) {
658 			case 16:
659 				/* zero-extend 16 bits into 64 bits */
660 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
661 				if (insn_is_zext(&insn[i + 1]))
662 					addrs[++i] = ctx->idx * 4;
663 				break;
664 			case 32:
665 				if (!fp->aux->verifier_zext)
666 					/* zero-extend 32 bits into 64 bits */
667 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
668 				break;
669 			case 64:
670 				/* nop */
671 				break;
672 			}
673 			break;
674 
675 		/*
676 		 * BPF_ST NOSPEC (speculation barrier)
677 		 */
678 		case BPF_ST | BPF_NOSPEC:
679 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
680 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
681 				break;
682 
683 			switch (stf_barrier) {
684 			case STF_BARRIER_EIEIO:
685 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
686 				break;
687 			case STF_BARRIER_SYNC_ORI:
688 				EMIT(PPC_RAW_SYNC());
689 				EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
690 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
691 				break;
692 			case STF_BARRIER_FALLBACK:
693 				EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
694 				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
695 				EMIT(PPC_RAW_MTCTR(12));
696 				EMIT(PPC_RAW_BCTRL());
697 				EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
698 				break;
699 			case STF_BARRIER_NONE:
700 				break;
701 			}
702 			break;
703 
704 		/*
705 		 * BPF_ST(X)
706 		 */
707 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
708 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
709 			if (BPF_CLASS(code) == BPF_ST) {
710 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
711 				src_reg = b2p[TMP_REG_1];
712 			}
713 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
714 			break;
715 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
716 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
717 			if (BPF_CLASS(code) == BPF_ST) {
718 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
719 				src_reg = b2p[TMP_REG_1];
720 			}
721 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
722 			break;
723 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
724 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
725 			if (BPF_CLASS(code) == BPF_ST) {
726 				PPC_LI32(b2p[TMP_REG_1], imm);
727 				src_reg = b2p[TMP_REG_1];
728 			}
729 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
730 			break;
731 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
732 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
733 			if (BPF_CLASS(code) == BPF_ST) {
734 				PPC_LI32(b2p[TMP_REG_1], imm);
735 				src_reg = b2p[TMP_REG_1];
736 			}
737 			PPC_BPF_STL(src_reg, dst_reg, off);
738 			break;
739 
740 		/*
741 		 * BPF_STX ATOMIC (atomic ops)
742 		 */
743 		case BPF_STX | BPF_ATOMIC | BPF_W:
744 			if (imm != BPF_ADD) {
745 				pr_err_ratelimited(
746 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
747 					code, i);
748 				return -ENOTSUPP;
749 			}
750 
751 			/* *(u32 *)(dst + off) += src */
752 
753 			/* Get EA into TMP_REG_1 */
754 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
755 			tmp_idx = ctx->idx * 4;
756 			/* load value from memory into TMP_REG_2 */
757 			EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
758 			/* add value from src_reg into this */
759 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
760 			/* store result back */
761 			EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
762 			/* we're done if this succeeded */
763 			PPC_BCC_SHORT(COND_NE, tmp_idx);
764 			break;
765 		case BPF_STX | BPF_ATOMIC | BPF_DW:
766 			if (imm != BPF_ADD) {
767 				pr_err_ratelimited(
768 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
769 					code, i);
770 				return -ENOTSUPP;
771 			}
772 			/* *(u64 *)(dst + off) += src */
773 
774 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
775 			tmp_idx = ctx->idx * 4;
776 			EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
777 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
778 			EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
779 			PPC_BCC_SHORT(COND_NE, tmp_idx);
780 			break;
781 
782 		/*
783 		 * BPF_LDX
784 		 */
785 		/* dst = *(u8 *)(ul) (src + off) */
786 		case BPF_LDX | BPF_MEM | BPF_B:
787 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
788 		/* dst = *(u16 *)(ul) (src + off) */
789 		case BPF_LDX | BPF_MEM | BPF_H:
790 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
791 		/* dst = *(u32 *)(ul) (src + off) */
792 		case BPF_LDX | BPF_MEM | BPF_W:
793 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
794 		/* dst = *(u64 *)(ul) (src + off) */
795 		case BPF_LDX | BPF_MEM | BPF_DW:
796 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
797 			/*
798 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
799 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
800 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
801 			 * set dst_reg=0 and move on.
802 			 */
803 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
804 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
805 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
806 					PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
807 				else /* BOOK3S_64 */
808 					PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
809 				EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
810 				PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
811 				EMIT(PPC_RAW_LI(dst_reg, 0));
812 				/*
813 				 * Check if 'off' is word aligned because PPC_BPF_LL()
814 				 * (BPF_DW case) generates two instructions if 'off' is not
815 				 * word-aligned and one instruction otherwise.
816 				 */
817 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
818 					PPC_JMP((ctx->idx + 3) * 4);
819 				else
820 					PPC_JMP((ctx->idx + 2) * 4);
821 			}
822 
823 			switch (size) {
824 			case BPF_B:
825 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
826 				break;
827 			case BPF_H:
828 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
829 				break;
830 			case BPF_W:
831 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
832 				break;
833 			case BPF_DW:
834 				PPC_BPF_LL(dst_reg, src_reg, off);
835 				break;
836 			}
837 
838 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
839 				addrs[++i] = ctx->idx * 4;
840 
841 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
842 				ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
843 							    4, dst_reg);
844 				if (ret)
845 					return ret;
846 			}
847 			break;
848 
849 		/*
850 		 * Doubleword load
851 		 * 16 byte instruction that uses two 'struct bpf_insn'
852 		 */
853 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
854 			imm64 = ((u64)(u32) insn[i].imm) |
855 				    (((u64)(u32) insn[i+1].imm) << 32);
856 			tmp_idx = ctx->idx;
857 			PPC_LI64(dst_reg, imm64);
858 			/* padding to allow full 5 instructions for later patching */
859 			for (j = ctx->idx - tmp_idx; j < 5; j++)
860 				EMIT(PPC_RAW_NOP());
861 			/* Adjust for two bpf instructions */
862 			addrs[++i] = ctx->idx * 4;
863 			break;
864 
865 		/*
866 		 * Return/Exit
867 		 */
868 		case BPF_JMP | BPF_EXIT:
869 			/*
870 			 * If this isn't the very last instruction, branch to
871 			 * the epilogue. If we _are_ the last instruction,
872 			 * we'll just fall through to the epilogue.
873 			 */
874 			if (i != flen - 1)
875 				PPC_JMP(exit_addr);
876 			/* else fall through to the epilogue */
877 			break;
878 
879 		/*
880 		 * Call kernel helper or bpf function
881 		 */
882 		case BPF_JMP | BPF_CALL:
883 			ctx->seen |= SEEN_FUNC;
884 
885 			ret = bpf_jit_get_func_addr(fp, &insn[i], false,
886 						    &func_addr, &func_addr_fixed);
887 			if (ret < 0)
888 				return ret;
889 
890 			if (func_addr_fixed)
891 				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
892 			else
893 				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
894 			/* move return value from r3 to BPF_REG_0 */
895 			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
896 			break;
897 
898 		/*
899 		 * Jumps and branches
900 		 */
901 		case BPF_JMP | BPF_JA:
902 			PPC_JMP(addrs[i + 1 + off]);
903 			break;
904 
905 		case BPF_JMP | BPF_JGT | BPF_K:
906 		case BPF_JMP | BPF_JGT | BPF_X:
907 		case BPF_JMP | BPF_JSGT | BPF_K:
908 		case BPF_JMP | BPF_JSGT | BPF_X:
909 		case BPF_JMP32 | BPF_JGT | BPF_K:
910 		case BPF_JMP32 | BPF_JGT | BPF_X:
911 		case BPF_JMP32 | BPF_JSGT | BPF_K:
912 		case BPF_JMP32 | BPF_JSGT | BPF_X:
913 			true_cond = COND_GT;
914 			goto cond_branch;
915 		case BPF_JMP | BPF_JLT | BPF_K:
916 		case BPF_JMP | BPF_JLT | BPF_X:
917 		case BPF_JMP | BPF_JSLT | BPF_K:
918 		case BPF_JMP | BPF_JSLT | BPF_X:
919 		case BPF_JMP32 | BPF_JLT | BPF_K:
920 		case BPF_JMP32 | BPF_JLT | BPF_X:
921 		case BPF_JMP32 | BPF_JSLT | BPF_K:
922 		case BPF_JMP32 | BPF_JSLT | BPF_X:
923 			true_cond = COND_LT;
924 			goto cond_branch;
925 		case BPF_JMP | BPF_JGE | BPF_K:
926 		case BPF_JMP | BPF_JGE | BPF_X:
927 		case BPF_JMP | BPF_JSGE | BPF_K:
928 		case BPF_JMP | BPF_JSGE | BPF_X:
929 		case BPF_JMP32 | BPF_JGE | BPF_K:
930 		case BPF_JMP32 | BPF_JGE | BPF_X:
931 		case BPF_JMP32 | BPF_JSGE | BPF_K:
932 		case BPF_JMP32 | BPF_JSGE | BPF_X:
933 			true_cond = COND_GE;
934 			goto cond_branch;
935 		case BPF_JMP | BPF_JLE | BPF_K:
936 		case BPF_JMP | BPF_JLE | BPF_X:
937 		case BPF_JMP | BPF_JSLE | BPF_K:
938 		case BPF_JMP | BPF_JSLE | BPF_X:
939 		case BPF_JMP32 | BPF_JLE | BPF_K:
940 		case BPF_JMP32 | BPF_JLE | BPF_X:
941 		case BPF_JMP32 | BPF_JSLE | BPF_K:
942 		case BPF_JMP32 | BPF_JSLE | BPF_X:
943 			true_cond = COND_LE;
944 			goto cond_branch;
945 		case BPF_JMP | BPF_JEQ | BPF_K:
946 		case BPF_JMP | BPF_JEQ | BPF_X:
947 		case BPF_JMP32 | BPF_JEQ | BPF_K:
948 		case BPF_JMP32 | BPF_JEQ | BPF_X:
949 			true_cond = COND_EQ;
950 			goto cond_branch;
951 		case BPF_JMP | BPF_JNE | BPF_K:
952 		case BPF_JMP | BPF_JNE | BPF_X:
953 		case BPF_JMP32 | BPF_JNE | BPF_K:
954 		case BPF_JMP32 | BPF_JNE | BPF_X:
955 			true_cond = COND_NE;
956 			goto cond_branch;
957 		case BPF_JMP | BPF_JSET | BPF_K:
958 		case BPF_JMP | BPF_JSET | BPF_X:
959 		case BPF_JMP32 | BPF_JSET | BPF_K:
960 		case BPF_JMP32 | BPF_JSET | BPF_X:
961 			true_cond = COND_NE;
962 			/* Fall through */
963 
964 cond_branch:
965 			switch (code) {
966 			case BPF_JMP | BPF_JGT | BPF_X:
967 			case BPF_JMP | BPF_JLT | BPF_X:
968 			case BPF_JMP | BPF_JGE | BPF_X:
969 			case BPF_JMP | BPF_JLE | BPF_X:
970 			case BPF_JMP | BPF_JEQ | BPF_X:
971 			case BPF_JMP | BPF_JNE | BPF_X:
972 			case BPF_JMP32 | BPF_JGT | BPF_X:
973 			case BPF_JMP32 | BPF_JLT | BPF_X:
974 			case BPF_JMP32 | BPF_JGE | BPF_X:
975 			case BPF_JMP32 | BPF_JLE | BPF_X:
976 			case BPF_JMP32 | BPF_JEQ | BPF_X:
977 			case BPF_JMP32 | BPF_JNE | BPF_X:
978 				/* unsigned comparison */
979 				if (BPF_CLASS(code) == BPF_JMP32)
980 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
981 				else
982 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
983 				break;
984 			case BPF_JMP | BPF_JSGT | BPF_X:
985 			case BPF_JMP | BPF_JSLT | BPF_X:
986 			case BPF_JMP | BPF_JSGE | BPF_X:
987 			case BPF_JMP | BPF_JSLE | BPF_X:
988 			case BPF_JMP32 | BPF_JSGT | BPF_X:
989 			case BPF_JMP32 | BPF_JSLT | BPF_X:
990 			case BPF_JMP32 | BPF_JSGE | BPF_X:
991 			case BPF_JMP32 | BPF_JSLE | BPF_X:
992 				/* signed comparison */
993 				if (BPF_CLASS(code) == BPF_JMP32)
994 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
995 				else
996 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
997 				break;
998 			case BPF_JMP | BPF_JSET | BPF_X:
999 			case BPF_JMP32 | BPF_JSET | BPF_X:
1000 				if (BPF_CLASS(code) == BPF_JMP) {
1001 					EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
1002 						    src_reg));
1003 				} else {
1004 					int tmp_reg = b2p[TMP_REG_1];
1005 
1006 					EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
1007 					EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
1008 						       31));
1009 				}
1010 				break;
1011 			case BPF_JMP | BPF_JNE | BPF_K:
1012 			case BPF_JMP | BPF_JEQ | BPF_K:
1013 			case BPF_JMP | BPF_JGT | BPF_K:
1014 			case BPF_JMP | BPF_JLT | BPF_K:
1015 			case BPF_JMP | BPF_JGE | BPF_K:
1016 			case BPF_JMP | BPF_JLE | BPF_K:
1017 			case BPF_JMP32 | BPF_JNE | BPF_K:
1018 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1019 			case BPF_JMP32 | BPF_JGT | BPF_K:
1020 			case BPF_JMP32 | BPF_JLT | BPF_K:
1021 			case BPF_JMP32 | BPF_JGE | BPF_K:
1022 			case BPF_JMP32 | BPF_JLE | BPF_K:
1023 			{
1024 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1025 
1026 				/*
1027 				 * Need sign-extended load, so only positive
1028 				 * values can be used as imm in cmpldi
1029 				 */
1030 				if (imm >= 0 && imm < 32768) {
1031 					if (is_jmp32)
1032 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1033 					else
1034 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1035 				} else {
1036 					/* sign-extending load */
1037 					PPC_LI32(b2p[TMP_REG_1], imm);
1038 					/* ... but unsigned comparison */
1039 					if (is_jmp32)
1040 						EMIT(PPC_RAW_CMPLW(dst_reg,
1041 							  b2p[TMP_REG_1]));
1042 					else
1043 						EMIT(PPC_RAW_CMPLD(dst_reg,
1044 							  b2p[TMP_REG_1]));
1045 				}
1046 				break;
1047 			}
1048 			case BPF_JMP | BPF_JSGT | BPF_K:
1049 			case BPF_JMP | BPF_JSLT | BPF_K:
1050 			case BPF_JMP | BPF_JSGE | BPF_K:
1051 			case BPF_JMP | BPF_JSLE | BPF_K:
1052 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1053 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1054 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1055 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1056 			{
1057 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1058 
1059 				/*
1060 				 * signed comparison, so any 16-bit value
1061 				 * can be used in cmpdi
1062 				 */
1063 				if (imm >= -32768 && imm < 32768) {
1064 					if (is_jmp32)
1065 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1066 					else
1067 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1068 				} else {
1069 					PPC_LI32(b2p[TMP_REG_1], imm);
1070 					if (is_jmp32)
1071 						EMIT(PPC_RAW_CMPW(dst_reg,
1072 							 b2p[TMP_REG_1]));
1073 					else
1074 						EMIT(PPC_RAW_CMPD(dst_reg,
1075 							 b2p[TMP_REG_1]));
1076 				}
1077 				break;
1078 			}
1079 			case BPF_JMP | BPF_JSET | BPF_K:
1080 			case BPF_JMP32 | BPF_JSET | BPF_K:
1081 				/* andi does not sign-extend the immediate */
1082 				if (imm >= 0 && imm < 32768)
1083 					/* PPC_ANDI is _only/always_ dot-form */
1084 					EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1085 				else {
1086 					int tmp_reg = b2p[TMP_REG_1];
1087 
1088 					PPC_LI32(tmp_reg, imm);
1089 					if (BPF_CLASS(code) == BPF_JMP) {
1090 						EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1091 							    tmp_reg));
1092 					} else {
1093 						EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1094 							tmp_reg));
1095 						EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1096 							       0, 0, 31));
1097 					}
1098 				}
1099 				break;
1100 			}
1101 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1102 			break;
1103 
1104 		/*
1105 		 * Tail call
1106 		 */
1107 		case BPF_JMP | BPF_TAIL_CALL:
1108 			ctx->seen |= SEEN_TAILCALL;
1109 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1110 			if (ret < 0)
1111 				return ret;
1112 			break;
1113 
1114 		default:
1115 			/*
1116 			 * The filter contains something cruel & unusual.
1117 			 * We don't handle it, but also there shouldn't be
1118 			 * anything missing from our list.
1119 			 */
1120 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1121 					code, i);
1122 			return -ENOTSUPP;
1123 		}
1124 	}
1125 
1126 	/* Set end-of-body-code address for exit. */
1127 	addrs[i] = ctx->idx * 4;
1128 
1129 	return 0;
1130 }
1131