1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * eBPF JIT compiler for PPC32
4  *
5  * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
6  *		  CS GROUP France
7  *
8  * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 
19 #include "bpf_jit.h"
20 
21 /*
22  * Stack layout:
23  *
24  *		[	prev sp		] <-------------
25  *		[   nv gpr save area	] 16 * 4	|
26  * fp (r31) -->	[   ebpf stack space	] upto 512	|
27  *		[     frame header	] 16		|
28  * sp (r1) --->	[    stack pointer	] --------------
29  */
30 
31 /* for gpr non volatile registers r17 to r31 (14) + tail call */
32 #define BPF_PPC_STACK_SAVE	(15 * 4 + 4)
33 /* stack frame, ensure this is quadword aligned */
34 #define BPF_PPC_STACKFRAME(ctx)	(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
35 
36 #define PPC_EX32(r, i)		EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
37 
38 /* PPC NVR range -- update this if we ever use NVRs below r17 */
39 #define BPF_PPC_NVR_MIN		_R17
40 #define BPF_PPC_TC		_R16
41 
42 /* BPF register usage */
43 #define TMP_REG			(MAX_BPF_JIT_REG + 0)
44 
45 /* BPF to ppc register mappings */
46 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
47 {
48 	/* function return value */
49 	ctx->b2p[BPF_REG_0] = _R12;
50 	/* function arguments */
51 	ctx->b2p[BPF_REG_1] = _R4;
52 	ctx->b2p[BPF_REG_2] = _R6;
53 	ctx->b2p[BPF_REG_3] = _R8;
54 	ctx->b2p[BPF_REG_4] = _R10;
55 	ctx->b2p[BPF_REG_5] = _R22;
56 	/* non volatile registers */
57 	ctx->b2p[BPF_REG_6] = _R24;
58 	ctx->b2p[BPF_REG_7] = _R26;
59 	ctx->b2p[BPF_REG_8] = _R28;
60 	ctx->b2p[BPF_REG_9] = _R30;
61 	/* frame pointer aka BPF_REG_10 */
62 	ctx->b2p[BPF_REG_FP] = _R18;
63 	/* eBPF jit internal registers */
64 	ctx->b2p[BPF_REG_AX] = _R20;
65 	ctx->b2p[TMP_REG] = _R31;		/* 32 bits */
66 }
67 
68 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
69 {
70 	if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
71 		return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
72 
73 	WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
74 	/* Use the hole we have left for alignment */
75 	return BPF_PPC_STACKFRAME(ctx) - 4;
76 }
77 
78 #define SEEN_VREG_MASK		0x1ff80000 /* Volatile registers r3-r12 */
79 #define SEEN_NVREG_FULL_MASK	0x0003ffff /* Non volatile registers r14-r31 */
80 #define SEEN_NVREG_TEMP_MASK	0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
81 
82 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
83 {
84 	/*
85 	 * We only need a stack frame if:
86 	 * - we call other functions (kernel helpers), or
87 	 * - we use non volatile registers, or
88 	 * - we use tail call counter
89 	 * - the bpf program uses its stack area
90 	 * The latter condition is deduced from the usage of BPF_REG_FP
91 	 */
92 	return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
93 	       bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
94 }
95 
96 void bpf_jit_realloc_regs(struct codegen_context *ctx)
97 {
98 	unsigned int nvreg_mask;
99 
100 	if (ctx->seen & SEEN_FUNC)
101 		nvreg_mask = SEEN_NVREG_TEMP_MASK;
102 	else
103 		nvreg_mask = SEEN_NVREG_FULL_MASK;
104 
105 	while (ctx->seen & nvreg_mask &&
106 	      (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
107 		int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
108 		int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
109 		int i;
110 
111 		for (i = BPF_REG_0; i <= TMP_REG; i++) {
112 			if (ctx->b2p[i] != old)
113 				continue;
114 			ctx->b2p[i] = new;
115 			bpf_set_seen_register(ctx, new);
116 			bpf_clear_seen_register(ctx, old);
117 			if (i != TMP_REG) {
118 				bpf_set_seen_register(ctx, new - 1);
119 				bpf_clear_seen_register(ctx, old - 1);
120 			}
121 			break;
122 		}
123 	}
124 }
125 
126 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
127 {
128 	int i;
129 
130 	/* Initialize tail_call_cnt, to be skipped if we do tail calls. */
131 	if (ctx->seen & SEEN_TAILCALL)
132 		EMIT(PPC_RAW_LI(_R4, 0));
133 	else
134 		EMIT(PPC_RAW_NOP());
135 
136 #define BPF_TAILCALL_PROLOGUE_SIZE	4
137 
138 	if (bpf_has_stack_frame(ctx))
139 		EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
140 
141 	if (ctx->seen & SEEN_TAILCALL)
142 		EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
143 
144 	/* First arg comes in as a 32 bits pointer. */
145 	EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
146 	EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
147 
148 	/*
149 	 * We need a stack frame, but we don't necessarily need to
150 	 * save/restore LR unless we call other functions
151 	 */
152 	if (ctx->seen & SEEN_FUNC)
153 		EMIT(PPC_RAW_MFLR(_R0));
154 
155 	/*
156 	 * Back up non-volatile regs -- registers r18-r31
157 	 */
158 	for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
159 		if (bpf_is_seen_register(ctx, i))
160 			EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
161 
162 	/* Setup frame pointer to point to the bpf stack area */
163 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
164 		EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
165 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
166 				  STACK_FRAME_MIN_SIZE + ctx->stack_size));
167 	}
168 
169 	if (ctx->seen & SEEN_FUNC)
170 		EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
171 }
172 
173 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
174 {
175 	int i;
176 
177 	/* Restore NVRs */
178 	for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
179 		if (bpf_is_seen_register(ctx, i))
180 			EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
181 
182 	if (ctx->seen & SEEN_FUNC)
183 		EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
184 
185 	/* Tear down our stack frame */
186 	if (bpf_has_stack_frame(ctx))
187 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
188 
189 	if (ctx->seen & SEEN_FUNC)
190 		EMIT(PPC_RAW_MTLR(_R0));
191 
192 }
193 
194 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
195 {
196 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
197 
198 	bpf_jit_emit_common_epilogue(image, ctx);
199 
200 	EMIT(PPC_RAW_BLR());
201 }
202 
203 /* Relative offset needs to be calculated based on final image location */
204 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
205 {
206 	s32 rel = (s32)func - (s32)(fimage + ctx->idx);
207 
208 	if (image && rel < 0x2000000 && rel >= -0x2000000) {
209 		EMIT(PPC_RAW_BL(rel));
210 	} else {
211 		/* Load function address into r0 */
212 		EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
213 		EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
214 		EMIT(PPC_RAW_MTCTR(_R0));
215 		EMIT(PPC_RAW_BCTRL());
216 	}
217 
218 	return 0;
219 }
220 
221 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
222 {
223 	/*
224 	 * By now, the eBPF program has already setup parameters in r3-r6
225 	 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
226 	 * r5-r6/BPF_REG_2 - pointer to bpf_array
227 	 * r7-r8/BPF_REG_3 - index in bpf_array
228 	 */
229 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
230 	int b2p_index = bpf_to_ppc(BPF_REG_3);
231 
232 	/*
233 	 * if (index >= array->map.max_entries)
234 	 *   goto out;
235 	 */
236 	EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
237 	EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
238 	EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
239 	PPC_BCC_SHORT(COND_GE, out);
240 
241 	/*
242 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
243 	 *   goto out;
244 	 */
245 	EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
246 	/* tail_call_cnt++; */
247 	EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
248 	PPC_BCC_SHORT(COND_GE, out);
249 
250 	/* prog = array->ptrs[index]; */
251 	EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
252 	EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
253 	EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
254 
255 	/*
256 	 * if (prog == NULL)
257 	 *   goto out;
258 	 */
259 	EMIT(PPC_RAW_CMPLWI(_R3, 0));
260 	PPC_BCC_SHORT(COND_EQ, out);
261 
262 	/* goto *(prog->bpf_func + prologue_size); */
263 	EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
264 	EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
265 	EMIT(PPC_RAW_MTCTR(_R3));
266 
267 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
268 
269 	/* Put tail_call_cnt in r4 */
270 	EMIT(PPC_RAW_MR(_R4, _R0));
271 
272 	/* tear restore NVRs, ... */
273 	bpf_jit_emit_common_epilogue(image, ctx);
274 
275 	EMIT(PPC_RAW_BCTR());
276 
277 	/* out: */
278 	return 0;
279 }
280 
281 /* Assemble the body code between the prologue & epilogue */
282 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
283 		       u32 *addrs, int pass, bool extra_pass)
284 {
285 	const struct bpf_insn *insn = fp->insnsi;
286 	int flen = fp->len;
287 	int i, ret;
288 
289 	/* Start of epilogue code - will only be valid 2nd pass onwards */
290 	u32 exit_addr = addrs[flen];
291 
292 	for (i = 0; i < flen; i++) {
293 		u32 code = insn[i].code;
294 		u32 prevcode = i ? insn[i - 1].code : 0;
295 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
296 		u32 dst_reg_h = dst_reg - 1;
297 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
298 		u32 src_reg_h = src_reg - 1;
299 		u32 src2_reg = dst_reg;
300 		u32 src2_reg_h = dst_reg_h;
301 		u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
302 		u32 tmp_reg = bpf_to_ppc(TMP_REG);
303 		u32 size = BPF_SIZE(code);
304 		u32 save_reg, ret_reg;
305 		s16 off = insn[i].off;
306 		s32 imm = insn[i].imm;
307 		bool func_addr_fixed;
308 		u64 func_addr;
309 		u32 true_cond;
310 		u32 tmp_idx;
311 		int j;
312 
313 		if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
314 		    (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
315 		    BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X &&
316 		    insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) {
317 			src2_reg = bpf_to_ppc(insn[i - 1].src_reg);
318 			src2_reg_h = src2_reg - 1;
319 			ctx->idx = addrs[i - 1] / 4;
320 		}
321 
322 		/*
323 		 * addrs[] maps a BPF bytecode address into a real offset from
324 		 * the start of the body code.
325 		 */
326 		addrs[i] = ctx->idx * 4;
327 
328 		/*
329 		 * As an optimization, we note down which registers
330 		 * are used so that we can only save/restore those in our
331 		 * prologue and epilogue. We do this here regardless of whether
332 		 * the actual BPF instruction uses src/dst registers or not
333 		 * (for instance, BPF_CALL does not use them). The expectation
334 		 * is that those instructions will have src_reg/dst_reg set to
335 		 * 0. Even otherwise, we just lose some prologue/epilogue
336 		 * optimization but everything else should work without
337 		 * any issues.
338 		 */
339 		if (dst_reg >= 3 && dst_reg < 32) {
340 			bpf_set_seen_register(ctx, dst_reg);
341 			bpf_set_seen_register(ctx, dst_reg_h);
342 		}
343 
344 		if (src_reg >= 3 && src_reg < 32) {
345 			bpf_set_seen_register(ctx, src_reg);
346 			bpf_set_seen_register(ctx, src_reg_h);
347 		}
348 
349 		switch (code) {
350 		/*
351 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
352 		 */
353 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
354 			EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg));
355 			break;
356 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
357 			EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg));
358 			EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h));
359 			break;
360 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
361 			EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg));
362 			break;
363 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
364 			EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg));
365 			EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h));
366 			break;
367 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
368 			imm = -imm;
369 			fallthrough;
370 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
371 			if (!imm) {
372 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
373 			} else if (IMM_HA(imm) & 0xffff) {
374 				EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm)));
375 				src2_reg = dst_reg;
376 			}
377 			if (IMM_L(imm))
378 				EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm)));
379 			break;
380 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
381 			imm = -imm;
382 			fallthrough;
383 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
384 			if (!imm) {
385 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
386 				EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
387 				break;
388 			}
389 			if (imm >= -32768 && imm < 32768) {
390 				EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm));
391 			} else {
392 				PPC_LI32(_R0, imm);
393 				EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0));
394 			}
395 			if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
396 				EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h));
397 			else
398 				EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h));
399 			break;
400 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
401 			bpf_set_seen_register(ctx, tmp_reg);
402 			EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h));
403 			EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg));
404 			EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg));
405 			EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
406 			EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
407 			EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
408 			break;
409 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
410 			EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
411 			break;
412 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
413 			if (imm == 1) {
414 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
415 			} else if (imm == -1) {
416 				EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
417 			} else if (is_power_of_2((u32)imm)) {
418 				EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm)));
419 			} else if (imm >= -32768 && imm < 32768) {
420 				EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm));
421 			} else {
422 				PPC_LI32(_R0, imm);
423 				EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0));
424 			}
425 			break;
426 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
427 			if (!imm) {
428 				PPC_LI32(dst_reg, 0);
429 				PPC_LI32(dst_reg_h, 0);
430 			} else if (imm == 1) {
431 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
432 				EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
433 			} else if (imm == -1) {
434 				EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
435 				EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
436 			} else if (imm > 0 && is_power_of_2(imm)) {
437 				imm = ilog2(imm);
438 				EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
439 				EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
440 				EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
441 			} else {
442 				bpf_set_seen_register(ctx, tmp_reg);
443 				PPC_LI32(tmp_reg, imm);
444 				EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg));
445 				if (imm < 0)
446 					EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg));
447 				EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg));
448 				EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg));
449 				EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
450 			}
451 			break;
452 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
453 			EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
454 			break;
455 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
456 			EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
457 			EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
458 			EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
459 			break;
460 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
461 			return -EOPNOTSUPP;
462 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
463 			return -EOPNOTSUPP;
464 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
465 			if (!imm)
466 				return -EINVAL;
467 			if (imm == 1) {
468 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
469 			} else if (is_power_of_2((u32)imm)) {
470 				EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
471 			} else {
472 				PPC_LI32(_R0, imm);
473 				EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
474 			}
475 			break;
476 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
477 			if (!imm)
478 				return -EINVAL;
479 
480 			if (!is_power_of_2((u32)imm)) {
481 				bpf_set_seen_register(ctx, tmp_reg);
482 				PPC_LI32(tmp_reg, imm);
483 				EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
484 				EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
485 				EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
486 			} else if (imm == 1) {
487 				EMIT(PPC_RAW_LI(dst_reg, 0));
488 			} else {
489 				imm = ilog2((u32)imm);
490 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
491 			}
492 			break;
493 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
494 			if (!imm)
495 				return -EINVAL;
496 			if (imm < 0)
497 				imm = -imm;
498 			if (!is_power_of_2(imm))
499 				return -EOPNOTSUPP;
500 			if (imm == 1)
501 				EMIT(PPC_RAW_LI(dst_reg, 0));
502 			else
503 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
504 			EMIT(PPC_RAW_LI(dst_reg_h, 0));
505 			break;
506 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
507 			if (!imm)
508 				return -EINVAL;
509 			if (!is_power_of_2(abs(imm)))
510 				return -EOPNOTSUPP;
511 
512 			if (imm < 0) {
513 				EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
514 				EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
515 				imm = -imm;
516 				src2_reg = dst_reg;
517 			}
518 			if (imm == 1) {
519 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
520 				EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
521 			} else {
522 				imm = ilog2(imm);
523 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
524 				EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
525 				EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
526 			}
527 			break;
528 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
529 			EMIT(PPC_RAW_NEG(dst_reg, src2_reg));
530 			break;
531 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
532 			EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
533 			EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
534 			break;
535 
536 		/*
537 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
538 		 */
539 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
540 			EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
541 			EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h));
542 			break;
543 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
544 			EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
545 			break;
546 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
547 			if (imm >= 0)
548 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
549 			fallthrough;
550 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
551 			if (!IMM_H(imm)) {
552 				EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm)));
553 			} else if (!IMM_L(imm)) {
554 				EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm)));
555 			} else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
556 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0,
557 						    32 - fls(imm), 32 - ffs(imm)));
558 			} else {
559 				PPC_LI32(_R0, imm);
560 				EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0));
561 			}
562 			break;
563 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
564 			EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
565 			EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h));
566 			break;
567 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
568 			EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
569 			break;
570 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
571 			/* Sign-extended */
572 			if (imm < 0)
573 				EMIT(PPC_RAW_LI(dst_reg_h, -1));
574 			fallthrough;
575 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
576 			if (IMM_L(imm)) {
577 				EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm)));
578 				src2_reg = dst_reg;
579 			}
580 			if (IMM_H(imm))
581 				EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm)));
582 			break;
583 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
584 			if (dst_reg == src_reg) {
585 				EMIT(PPC_RAW_LI(dst_reg, 0));
586 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
587 			} else {
588 				EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
589 				EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h));
590 			}
591 			break;
592 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
593 			if (dst_reg == src_reg)
594 				EMIT(PPC_RAW_LI(dst_reg, 0));
595 			else
596 				EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
597 			break;
598 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
599 			if (imm < 0)
600 				EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h));
601 			fallthrough;
602 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
603 			if (IMM_L(imm)) {
604 				EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm)));
605 				src2_reg = dst_reg;
606 			}
607 			if (IMM_H(imm))
608 				EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm)));
609 			break;
610 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
611 			EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
612 			break;
613 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
614 			bpf_set_seen_register(ctx, tmp_reg);
615 			EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
616 			EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg));
617 			EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
618 			EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0));
619 			EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg));
620 			EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
621 			EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
622 			EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
623 			break;
624 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
625 			if (imm)
626 				EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
627 			else
628 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
629 			break;
630 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
631 			if (imm < 0)
632 				return -EINVAL;
633 			if (!imm) {
634 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
635 			} else if (imm < 32) {
636 				EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
637 				EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31));
638 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm));
639 			} else if (imm < 64) {
640 				EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm));
641 				EMIT(PPC_RAW_LI(dst_reg, 0));
642 			} else {
643 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
644 				EMIT(PPC_RAW_LI(dst_reg, 0));
645 			}
646 			break;
647 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
648 			EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
649 			break;
650 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
651 			bpf_set_seen_register(ctx, tmp_reg);
652 			EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
653 			EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
654 			EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
655 			EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
656 			EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
657 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
658 			EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg));
659 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
660 			break;
661 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
662 			if (imm)
663 				EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm));
664 			else
665 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
666 			break;
667 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
668 			if (imm < 0)
669 				return -EINVAL;
670 			if (!imm) {
671 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
672 				EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
673 			} else if (imm < 32) {
674 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
675 				EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
676 				EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31));
677 			} else if (imm < 64) {
678 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31));
679 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
680 			} else {
681 				EMIT(PPC_RAW_LI(dst_reg, 0));
682 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
683 			}
684 			break;
685 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
686 			EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg));
687 			break;
688 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
689 			bpf_set_seen_register(ctx, tmp_reg);
690 			EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
691 			EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
692 			EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
693 			EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
694 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
695 			EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
696 			EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg));
697 			EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg));
698 			EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
699 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
700 			break;
701 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
702 			if (imm)
703 				EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm));
704 			else
705 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
706 			break;
707 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
708 			if (imm < 0)
709 				return -EINVAL;
710 			if (!imm) {
711 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
712 				EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
713 			} else if (imm < 32) {
714 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
715 				EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
716 				EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
717 			} else if (imm < 64) {
718 				EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32));
719 				EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
720 			} else {
721 				EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31));
722 				EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
723 			}
724 			break;
725 
726 		/*
727 		 * MOV
728 		 */
729 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
730 			if (dst_reg == src_reg)
731 				break;
732 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
733 			EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
734 			break;
735 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
736 			/* special mov32 for zext */
737 			if (imm == 1)
738 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
739 			else if (dst_reg != src_reg)
740 				EMIT(PPC_RAW_MR(dst_reg, src_reg));
741 			break;
742 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
743 			PPC_LI32(dst_reg, imm);
744 			PPC_EX32(dst_reg_h, imm);
745 			break;
746 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
747 			PPC_LI32(dst_reg, imm);
748 			break;
749 
750 		/*
751 		 * BPF_FROM_BE/LE
752 		 */
753 		case BPF_ALU | BPF_END | BPF_FROM_LE:
754 			switch (imm) {
755 			case 16:
756 				/* Copy 16 bits to upper part */
757 				EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15));
758 				/* Rotate 8 bits right & mask */
759 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
760 				break;
761 			case 32:
762 				/*
763 				 * Rotate word left by 8 bits:
764 				 * 2 bytes are already in their final position
765 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
766 				 */
767 				EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31));
768 				/* Rotate 24 bits and insert byte 1 */
769 				EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7));
770 				/* Rotate 24 bits and insert byte 3 */
771 				EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23));
772 				EMIT(PPC_RAW_MR(dst_reg, _R0));
773 				break;
774 			case 64:
775 				bpf_set_seen_register(ctx, tmp_reg);
776 				EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31));
777 				EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31));
778 				/* Rotate 24 bits and insert byte 1 */
779 				EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7));
780 				EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7));
781 				/* Rotate 24 bits and insert byte 3 */
782 				EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23));
783 				EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23));
784 				EMIT(PPC_RAW_MR(dst_reg, _R0));
785 				EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
786 				break;
787 			}
788 			break;
789 		case BPF_ALU | BPF_END | BPF_FROM_BE:
790 			switch (imm) {
791 			case 16:
792 				/* zero-extend 16 bits into 32 bits */
793 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31));
794 				break;
795 			case 32:
796 			case 64:
797 				/* nop */
798 				break;
799 			}
800 			break;
801 
802 		/*
803 		 * BPF_ST NOSPEC (speculation barrier)
804 		 */
805 		case BPF_ST | BPF_NOSPEC:
806 			break;
807 
808 		/*
809 		 * BPF_ST(X)
810 		 */
811 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
812 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
813 			break;
814 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
815 			PPC_LI32(_R0, imm);
816 			EMIT(PPC_RAW_STB(_R0, dst_reg, off));
817 			break;
818 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
819 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
820 			break;
821 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
822 			PPC_LI32(_R0, imm);
823 			EMIT(PPC_RAW_STH(_R0, dst_reg, off));
824 			break;
825 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
826 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
827 			break;
828 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
829 			PPC_LI32(_R0, imm);
830 			EMIT(PPC_RAW_STW(_R0, dst_reg, off));
831 			break;
832 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
833 			EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
834 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
835 			break;
836 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
837 			PPC_LI32(_R0, imm);
838 			EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
839 			PPC_EX32(_R0, imm);
840 			EMIT(PPC_RAW_STW(_R0, dst_reg, off));
841 			break;
842 
843 		/*
844 		 * BPF_STX ATOMIC (atomic ops)
845 		 */
846 		case BPF_STX | BPF_ATOMIC | BPF_W:
847 			save_reg = _R0;
848 			ret_reg = src_reg;
849 
850 			bpf_set_seen_register(ctx, tmp_reg);
851 			bpf_set_seen_register(ctx, ax_reg);
852 
853 			/* Get offset into TMP_REG */
854 			EMIT(PPC_RAW_LI(tmp_reg, off));
855 			/*
856 			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
857 			 * before and after the operation.
858 			 *
859 			 * This is a requirement in the Linux Kernel Memory Model.
860 			 * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
861 			 */
862 			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
863 				EMIT(PPC_RAW_SYNC());
864 			tmp_idx = ctx->idx * 4;
865 			/* load value from memory into r0 */
866 			EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
867 
868 			/* Save old value in BPF_REG_AX */
869 			if (imm & BPF_FETCH)
870 				EMIT(PPC_RAW_MR(ax_reg, _R0));
871 
872 			switch (imm) {
873 			case BPF_ADD:
874 			case BPF_ADD | BPF_FETCH:
875 				EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
876 				break;
877 			case BPF_AND:
878 			case BPF_AND | BPF_FETCH:
879 				EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
880 				break;
881 			case BPF_OR:
882 			case BPF_OR | BPF_FETCH:
883 				EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
884 				break;
885 			case BPF_XOR:
886 			case BPF_XOR | BPF_FETCH:
887 				EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
888 				break;
889 			case BPF_CMPXCHG:
890 				/*
891 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
892 				 * in src_reg for other cases.
893 				 */
894 				ret_reg = bpf_to_ppc(BPF_REG_0);
895 
896 				/* Compare with old value in BPF_REG_0 */
897 				EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
898 				/* Don't set if different from old value */
899 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
900 				fallthrough;
901 			case BPF_XCHG:
902 				save_reg = src_reg;
903 				break;
904 			default:
905 				pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
906 						   code, i);
907 				return -EOPNOTSUPP;
908 			}
909 
910 			/* store new value */
911 			EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
912 			/* we're done if this succeeded */
913 			PPC_BCC_SHORT(COND_NE, tmp_idx);
914 
915 			/* For the BPF_FETCH variant, get old data into src_reg */
916 			if (imm & BPF_FETCH) {
917 				/* Emit 'sync' to enforce full ordering */
918 				if (IS_ENABLED(CONFIG_SMP))
919 					EMIT(PPC_RAW_SYNC());
920 				EMIT(PPC_RAW_MR(ret_reg, ax_reg));
921 				if (!fp->aux->verifier_zext)
922 					EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
923 			}
924 			break;
925 
926 		case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
927 			return -EOPNOTSUPP;
928 
929 		/*
930 		 * BPF_LDX
931 		 */
932 		case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
933 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
934 		case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
935 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
936 		case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
937 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
938 		case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
939 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
940 			/*
941 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
942 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
943 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
944 			 * set dst_reg=0 and move on.
945 			 */
946 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
947 				PPC_LI32(_R0, TASK_SIZE - off);
948 				EMIT(PPC_RAW_CMPLW(src_reg, _R0));
949 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
950 				EMIT(PPC_RAW_LI(dst_reg, 0));
951 				/*
952 				 * For BPF_DW case, "li reg_h,0" would be needed when
953 				 * !fp->aux->verifier_zext. Emit NOP otherwise.
954 				 *
955 				 * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
956 				 * if necessary. So, jump there insted of emitting an
957 				 * additional "li reg_h,0" instruction.
958 				 */
959 				if (size == BPF_DW && !fp->aux->verifier_zext)
960 					EMIT(PPC_RAW_LI(dst_reg_h, 0));
961 				else
962 					EMIT(PPC_RAW_NOP());
963 				/*
964 				 * Need to jump two instructions instead of one for BPF_DW case
965 				 * as there are two load instructions for dst_reg_h & dst_reg
966 				 * respectively.
967 				 */
968 				if (size == BPF_DW)
969 					PPC_JMP((ctx->idx + 3) * 4);
970 				else
971 					PPC_JMP((ctx->idx + 2) * 4);
972 			}
973 
974 			switch (size) {
975 			case BPF_B:
976 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
977 				break;
978 			case BPF_H:
979 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
980 				break;
981 			case BPF_W:
982 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
983 				break;
984 			case BPF_DW:
985 				EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
986 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
987 				break;
988 			}
989 
990 			if (size != BPF_DW && !fp->aux->verifier_zext)
991 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
992 
993 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
994 				int insn_idx = ctx->idx - 1;
995 				int jmp_off = 4;
996 
997 				/*
998 				 * In case of BPF_DW, two lwz instructions are emitted, one
999 				 * for higher 32-bit and another for lower 32-bit. So, set
1000 				 * ex->insn to the first of the two and jump over both
1001 				 * instructions in fixup.
1002 				 *
1003 				 * Similarly, with !verifier_zext, two instructions are
1004 				 * emitted for BPF_B/H/W case. So, set ex->insn to the
1005 				 * instruction that could fault and skip over both
1006 				 * instructions.
1007 				 */
1008 				if (size == BPF_DW || !fp->aux->verifier_zext) {
1009 					insn_idx -= 1;
1010 					jmp_off += 4;
1011 				}
1012 
1013 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
1014 							    jmp_off, dst_reg);
1015 				if (ret)
1016 					return ret;
1017 			}
1018 			break;
1019 
1020 		/*
1021 		 * Doubleword load
1022 		 * 16 byte instruction that uses two 'struct bpf_insn'
1023 		 */
1024 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1025 			tmp_idx = ctx->idx;
1026 			PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
1027 			PPC_LI32(dst_reg, (u32)insn[i].imm);
1028 			/* padding to allow full 4 instructions for later patching */
1029 			if (!image)
1030 				for (j = ctx->idx - tmp_idx; j < 4; j++)
1031 					EMIT(PPC_RAW_NOP());
1032 			/* Adjust for two bpf instructions */
1033 			addrs[++i] = ctx->idx * 4;
1034 			break;
1035 
1036 		/*
1037 		 * Return/Exit
1038 		 */
1039 		case BPF_JMP | BPF_EXIT:
1040 			/*
1041 			 * If this isn't the very last instruction, branch to
1042 			 * the epilogue. If we _are_ the last instruction,
1043 			 * we'll just fall through to the epilogue.
1044 			 */
1045 			if (i != flen - 1) {
1046 				ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
1047 				if (ret)
1048 					return ret;
1049 			}
1050 			/* else fall through to the epilogue */
1051 			break;
1052 
1053 		/*
1054 		 * Call kernel helper or bpf function
1055 		 */
1056 		case BPF_JMP | BPF_CALL:
1057 			ctx->seen |= SEEN_FUNC;
1058 
1059 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1060 						    &func_addr, &func_addr_fixed);
1061 			if (ret < 0)
1062 				return ret;
1063 
1064 			if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
1065 				EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
1066 				EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
1067 			}
1068 
1069 			ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1070 			if (ret)
1071 				return ret;
1072 
1073 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
1074 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
1075 			break;
1076 
1077 		/*
1078 		 * Jumps and branches
1079 		 */
1080 		case BPF_JMP | BPF_JA:
1081 			PPC_JMP(addrs[i + 1 + off]);
1082 			break;
1083 
1084 		case BPF_JMP | BPF_JGT | BPF_K:
1085 		case BPF_JMP | BPF_JGT | BPF_X:
1086 		case BPF_JMP | BPF_JSGT | BPF_K:
1087 		case BPF_JMP | BPF_JSGT | BPF_X:
1088 		case BPF_JMP32 | BPF_JGT | BPF_K:
1089 		case BPF_JMP32 | BPF_JGT | BPF_X:
1090 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1091 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1092 			true_cond = COND_GT;
1093 			goto cond_branch;
1094 		case BPF_JMP | BPF_JLT | BPF_K:
1095 		case BPF_JMP | BPF_JLT | BPF_X:
1096 		case BPF_JMP | BPF_JSLT | BPF_K:
1097 		case BPF_JMP | BPF_JSLT | BPF_X:
1098 		case BPF_JMP32 | BPF_JLT | BPF_K:
1099 		case BPF_JMP32 | BPF_JLT | BPF_X:
1100 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1101 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1102 			true_cond = COND_LT;
1103 			goto cond_branch;
1104 		case BPF_JMP | BPF_JGE | BPF_K:
1105 		case BPF_JMP | BPF_JGE | BPF_X:
1106 		case BPF_JMP | BPF_JSGE | BPF_K:
1107 		case BPF_JMP | BPF_JSGE | BPF_X:
1108 		case BPF_JMP32 | BPF_JGE | BPF_K:
1109 		case BPF_JMP32 | BPF_JGE | BPF_X:
1110 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1111 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1112 			true_cond = COND_GE;
1113 			goto cond_branch;
1114 		case BPF_JMP | BPF_JLE | BPF_K:
1115 		case BPF_JMP | BPF_JLE | BPF_X:
1116 		case BPF_JMP | BPF_JSLE | BPF_K:
1117 		case BPF_JMP | BPF_JSLE | BPF_X:
1118 		case BPF_JMP32 | BPF_JLE | BPF_K:
1119 		case BPF_JMP32 | BPF_JLE | BPF_X:
1120 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1121 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1122 			true_cond = COND_LE;
1123 			goto cond_branch;
1124 		case BPF_JMP | BPF_JEQ | BPF_K:
1125 		case BPF_JMP | BPF_JEQ | BPF_X:
1126 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1127 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1128 			true_cond = COND_EQ;
1129 			goto cond_branch;
1130 		case BPF_JMP | BPF_JNE | BPF_K:
1131 		case BPF_JMP | BPF_JNE | BPF_X:
1132 		case BPF_JMP32 | BPF_JNE | BPF_K:
1133 		case BPF_JMP32 | BPF_JNE | BPF_X:
1134 			true_cond = COND_NE;
1135 			goto cond_branch;
1136 		case BPF_JMP | BPF_JSET | BPF_K:
1137 		case BPF_JMP | BPF_JSET | BPF_X:
1138 		case BPF_JMP32 | BPF_JSET | BPF_K:
1139 		case BPF_JMP32 | BPF_JSET | BPF_X:
1140 			true_cond = COND_NE;
1141 			/* fallthrough; */
1142 
1143 cond_branch:
1144 			switch (code) {
1145 			case BPF_JMP | BPF_JGT | BPF_X:
1146 			case BPF_JMP | BPF_JLT | BPF_X:
1147 			case BPF_JMP | BPF_JGE | BPF_X:
1148 			case BPF_JMP | BPF_JLE | BPF_X:
1149 			case BPF_JMP | BPF_JEQ | BPF_X:
1150 			case BPF_JMP | BPF_JNE | BPF_X:
1151 				/* unsigned comparison */
1152 				EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
1153 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1154 				EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1155 				break;
1156 			case BPF_JMP32 | BPF_JGT | BPF_X:
1157 			case BPF_JMP32 | BPF_JLT | BPF_X:
1158 			case BPF_JMP32 | BPF_JGE | BPF_X:
1159 			case BPF_JMP32 | BPF_JLE | BPF_X:
1160 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1161 			case BPF_JMP32 | BPF_JNE | BPF_X:
1162 				/* unsigned comparison */
1163 				EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1164 				break;
1165 			case BPF_JMP | BPF_JSGT | BPF_X:
1166 			case BPF_JMP | BPF_JSLT | BPF_X:
1167 			case BPF_JMP | BPF_JSGE | BPF_X:
1168 			case BPF_JMP | BPF_JSLE | BPF_X:
1169 				/* signed comparison */
1170 				EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
1171 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1172 				EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1173 				break;
1174 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1175 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1176 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1177 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1178 				/* signed comparison */
1179 				EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1180 				break;
1181 			case BPF_JMP | BPF_JSET | BPF_X:
1182 				EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
1183 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1184 				EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1185 				break;
1186 			case BPF_JMP32 | BPF_JSET | BPF_X: {
1187 				EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1188 				break;
1189 			case BPF_JMP | BPF_JNE | BPF_K:
1190 			case BPF_JMP | BPF_JEQ | BPF_K:
1191 			case BPF_JMP | BPF_JGT | BPF_K:
1192 			case BPF_JMP | BPF_JLT | BPF_K:
1193 			case BPF_JMP | BPF_JGE | BPF_K:
1194 			case BPF_JMP | BPF_JLE | BPF_K:
1195 				/*
1196 				 * Need sign-extended load, so only positive
1197 				 * values can be used as imm in cmplwi
1198 				 */
1199 				if (imm >= 0 && imm < 32768) {
1200 					EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
1201 					PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1202 					EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1203 				} else {
1204 					/* sign-extending load ... but unsigned comparison */
1205 					PPC_EX32(_R0, imm);
1206 					EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
1207 					PPC_LI32(_R0, imm);
1208 					PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1209 					EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1210 				}
1211 				break;
1212 			case BPF_JMP32 | BPF_JNE | BPF_K:
1213 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1214 			case BPF_JMP32 | BPF_JGT | BPF_K:
1215 			case BPF_JMP32 | BPF_JLT | BPF_K:
1216 			case BPF_JMP32 | BPF_JGE | BPF_K:
1217 			case BPF_JMP32 | BPF_JLE | BPF_K:
1218 				if (imm >= 0 && imm < 65536) {
1219 					EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1220 				} else {
1221 					PPC_LI32(_R0, imm);
1222 					EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1223 				}
1224 				break;
1225 			}
1226 			case BPF_JMP | BPF_JSGT | BPF_K:
1227 			case BPF_JMP | BPF_JSLT | BPF_K:
1228 			case BPF_JMP | BPF_JSGE | BPF_K:
1229 			case BPF_JMP | BPF_JSLE | BPF_K:
1230 				if (imm >= 0 && imm < 65536) {
1231 					EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1232 					PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1233 					EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1234 				} else {
1235 					/* sign-extending load */
1236 					EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1237 					PPC_LI32(_R0, imm);
1238 					PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1239 					EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1240 				}
1241 				break;
1242 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1243 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1244 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1245 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1246 				/*
1247 				 * signed comparison, so any 16-bit value
1248 				 * can be used in cmpwi
1249 				 */
1250 				if (imm >= -32768 && imm < 32768) {
1251 					EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1252 				} else {
1253 					/* sign-extending load */
1254 					PPC_LI32(_R0, imm);
1255 					EMIT(PPC_RAW_CMPW(dst_reg, _R0));
1256 				}
1257 				break;
1258 			case BPF_JMP | BPF_JSET | BPF_K:
1259 				/* andi does not sign-extend the immediate */
1260 				if (imm >= 0 && imm < 32768) {
1261 					/* PPC_ANDI is _only/always_ dot-form */
1262 					EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1263 				} else {
1264 					PPC_LI32(_R0, imm);
1265 					if (imm < 0) {
1266 						EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
1267 						PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1268 					}
1269 					EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1270 				}
1271 				break;
1272 			case BPF_JMP32 | BPF_JSET | BPF_K:
1273 				/* andi does not sign-extend the immediate */
1274 				if (imm >= 0 && imm < 32768) {
1275 					/* PPC_ANDI is _only/always_ dot-form */
1276 					EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1277 				} else {
1278 					PPC_LI32(_R0, imm);
1279 					EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1280 				}
1281 				break;
1282 			}
1283 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1284 			break;
1285 
1286 		/*
1287 		 * Tail call
1288 		 */
1289 		case BPF_JMP | BPF_TAIL_CALL:
1290 			ctx->seen |= SEEN_TAILCALL;
1291 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1292 			if (ret < 0)
1293 				return ret;
1294 			break;
1295 
1296 		default:
1297 			/*
1298 			 * The filter contains something cruel & unusual.
1299 			 * We don't handle it, but also there shouldn't be
1300 			 * anything missing from our list.
1301 			 */
1302 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
1303 			return -EOPNOTSUPP;
1304 		}
1305 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
1306 		    !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
1307 			EMIT(PPC_RAW_LI(dst_reg_h, 0));
1308 	}
1309 
1310 	/* Set end-of-body-code address for exit. */
1311 	addrs[i] = ctx->idx * 4;
1312 
1313 	return 0;
1314 }
1315