1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * eBPF JIT compiler for PPC32 4 * 5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu> 6 * CS GROUP France 7 * 8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <asm/kprobes.h> 17 #include <linux/bpf.h> 18 19 #include "bpf_jit.h" 20 21 /* 22 * Stack layout: 23 * 24 * [ prev sp ] <------------- 25 * [ nv gpr save area ] 16 * 4 | 26 * fp (r31) --> [ ebpf stack space ] upto 512 | 27 * [ frame header ] 16 | 28 * sp (r1) ---> [ stack pointer ] -------------- 29 */ 30 31 /* for gpr non volatile registers r17 to r31 (14) + tail call */ 32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4) 33 /* stack frame, ensure this is quadword aligned */ 34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) 35 36 /* BPF register usage */ 37 #define TMP_REG (MAX_BPF_JIT_REG + 0) 38 39 /* BPF to ppc register mappings */ 40 const int b2p[MAX_BPF_JIT_REG + 1] = { 41 /* function return value */ 42 [BPF_REG_0] = 12, 43 /* function arguments */ 44 [BPF_REG_1] = 4, 45 [BPF_REG_2] = 6, 46 [BPF_REG_3] = 8, 47 [BPF_REG_4] = 10, 48 [BPF_REG_5] = 22, 49 /* non volatile registers */ 50 [BPF_REG_6] = 24, 51 [BPF_REG_7] = 26, 52 [BPF_REG_8] = 28, 53 [BPF_REG_9] = 30, 54 /* frame pointer aka BPF_REG_10 */ 55 [BPF_REG_FP] = 18, 56 /* eBPF jit internal registers */ 57 [BPF_REG_AX] = 20, 58 [TMP_REG] = 31, /* 32 bits */ 59 }; 60 61 static int bpf_to_ppc(struct codegen_context *ctx, int reg) 62 { 63 return ctx->b2p[reg]; 64 } 65 66 /* PPC NVR range -- update this if we ever use NVRs below r17 */ 67 #define BPF_PPC_NVR_MIN 17 68 #define BPF_PPC_TC 16 69 70 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) 71 { 72 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) 73 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg); 74 75 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); 76 /* Use the hole we have left for alignment */ 77 return BPF_PPC_STACKFRAME(ctx) - 4; 78 } 79 80 void bpf_jit_realloc_regs(struct codegen_context *ctx) 81 { 82 if (ctx->seen & SEEN_FUNC) 83 return; 84 85 while (ctx->seen & SEEN_NVREG_MASK && 86 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { 87 int old = 32 - fls(ctx->seen & (SEEN_NVREG_MASK & 0xaaaaaaab)); 88 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); 89 int i; 90 91 for (i = BPF_REG_0; i <= TMP_REG; i++) { 92 if (ctx->b2p[i] != old) 93 continue; 94 ctx->b2p[i] = new; 95 bpf_set_seen_register(ctx, new); 96 bpf_clear_seen_register(ctx, old); 97 if (i != TMP_REG) { 98 bpf_set_seen_register(ctx, new - 1); 99 bpf_clear_seen_register(ctx, old - 1); 100 } 101 break; 102 } 103 } 104 } 105 106 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) 107 { 108 int i; 109 110 /* First arg comes in as a 32 bits pointer. */ 111 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3)); 112 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0)); 113 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); 114 115 /* 116 * Initialize tail_call_cnt in stack frame if we do tail calls. 117 * Otherwise, put in NOPs so that it can be skipped when we are 118 * invoked through a tail call. 119 */ 120 if (ctx->seen & SEEN_TAILCALL) 121 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1, 122 bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 123 else 124 EMIT(PPC_RAW_NOP()); 125 126 #define BPF_TAILCALL_PROLOGUE_SIZE 16 127 128 /* 129 * We need a stack frame, but we don't necessarily need to 130 * save/restore LR unless we call other functions 131 */ 132 if (ctx->seen & SEEN_FUNC) 133 EMIT(PPC_RAW_MFLR(_R0)); 134 135 /* 136 * Back up non-volatile regs -- registers r18-r31 137 */ 138 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 139 if (bpf_is_seen_register(ctx, i)) 140 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 141 142 /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/ 143 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { 144 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8); 145 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12); 146 } 147 148 /* Setup frame pointer to point to the bpf stack area */ 149 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) { 150 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0)); 151 EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1, 152 STACK_FRAME_MIN_SIZE + ctx->stack_size)); 153 } 154 155 if (ctx->seen & SEEN_FUNC) 156 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 157 } 158 159 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) 160 { 161 int i; 162 163 /* Restore NVRs */ 164 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 165 if (bpf_is_seen_register(ctx, i)) 166 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 167 } 168 169 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) 170 { 171 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0))); 172 173 bpf_jit_emit_common_epilogue(image, ctx); 174 175 /* Tear down our stack frame */ 176 177 if (ctx->seen & SEEN_FUNC) 178 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 179 180 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); 181 182 if (ctx->seen & SEEN_FUNC) 183 EMIT(PPC_RAW_MTLR(_R0)); 184 185 EMIT(PPC_RAW_BLR()); 186 } 187 188 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) 189 { 190 s32 rel = (s32)func - (s32)(image + ctx->idx); 191 192 if (image && rel < 0x2000000 && rel >= -0x2000000) { 193 PPC_BL_ABS(func); 194 } else { 195 /* Load function address into r0 */ 196 EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); 197 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func))); 198 EMIT(PPC_RAW_MTCTR(_R0)); 199 EMIT(PPC_RAW_BCTRL()); 200 } 201 } 202 203 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 204 { 205 /* 206 * By now, the eBPF program has already setup parameters in r3-r6 207 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program 208 * r5-r6/BPF_REG_2 - pointer to bpf_array 209 * r7-r8/BPF_REG_3 - index in bpf_array 210 */ 211 int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2); 212 int b2p_index = bpf_to_ppc(ctx, BPF_REG_3); 213 214 /* 215 * if (index >= array->map.max_entries) 216 * goto out; 217 */ 218 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); 219 EMIT(PPC_RAW_CMPLW(b2p_index, _R0)); 220 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 221 PPC_BCC(COND_GE, out); 222 223 /* 224 * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 225 * goto out; 226 */ 227 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT)); 228 /* tail_call_cnt++; */ 229 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1)); 230 PPC_BCC(COND_GT, out); 231 232 /* prog = array->ptrs[index]; */ 233 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29)); 234 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array)); 235 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs))); 236 EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 237 238 /* 239 * if (prog == NULL) 240 * goto out; 241 */ 242 EMIT(PPC_RAW_CMPLWI(_R3, 0)); 243 PPC_BCC(COND_EQ, out); 244 245 /* goto *(prog->bpf_func + prologue_size); */ 246 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func))); 247 248 if (ctx->seen & SEEN_FUNC) 249 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 250 251 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE)); 252 253 if (ctx->seen & SEEN_FUNC) 254 EMIT(PPC_RAW_MTLR(_R0)); 255 256 EMIT(PPC_RAW_MTCTR(_R3)); 257 258 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1))); 259 260 /* tear restore NVRs, ... */ 261 bpf_jit_emit_common_epilogue(image, ctx); 262 263 EMIT(PPC_RAW_BCTR()); 264 265 /* out: */ 266 return 0; 267 } 268 269 /* Assemble the body code between the prologue & epilogue */ 270 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, 271 u32 *addrs, bool extra_pass) 272 { 273 const struct bpf_insn *insn = fp->insnsi; 274 int flen = fp->len; 275 int i, ret; 276 277 /* Start of epilogue code - will only be valid 2nd pass onwards */ 278 u32 exit_addr = addrs[flen]; 279 280 for (i = 0; i < flen; i++) { 281 u32 code = insn[i].code; 282 u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg); 283 u32 dst_reg_h = dst_reg - 1; 284 u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg); 285 u32 src_reg_h = src_reg - 1; 286 u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG); 287 s16 off = insn[i].off; 288 s32 imm = insn[i].imm; 289 bool func_addr_fixed; 290 u64 func_addr; 291 u32 true_cond; 292 293 /* 294 * addrs[] maps a BPF bytecode address into a real offset from 295 * the start of the body code. 296 */ 297 addrs[i] = ctx->idx * 4; 298 299 /* 300 * As an optimization, we note down which registers 301 * are used so that we can only save/restore those in our 302 * prologue and epilogue. We do this here regardless of whether 303 * the actual BPF instruction uses src/dst registers or not 304 * (for instance, BPF_CALL does not use them). The expectation 305 * is that those instructions will have src_reg/dst_reg set to 306 * 0. Even otherwise, we just lose some prologue/epilogue 307 * optimization but everything else should work without 308 * any issues. 309 */ 310 if (dst_reg >= 3 && dst_reg < 32) { 311 bpf_set_seen_register(ctx, dst_reg); 312 bpf_set_seen_register(ctx, dst_reg_h); 313 } 314 315 if (src_reg >= 3 && src_reg < 32) { 316 bpf_set_seen_register(ctx, src_reg); 317 bpf_set_seen_register(ctx, src_reg_h); 318 } 319 320 switch (code) { 321 /* 322 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG 323 */ 324 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ 325 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); 326 break; 327 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ 328 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg)); 329 EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h)); 330 break; 331 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ 332 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); 333 break; 334 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ 335 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg)); 336 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h)); 337 break; 338 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 339 imm = -imm; 340 fallthrough; 341 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ 342 if (IMM_HA(imm) & 0xffff) 343 EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm))); 344 if (IMM_L(imm)) 345 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); 346 break; 347 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ 348 imm = -imm; 349 fallthrough; 350 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ 351 if (!imm) 352 break; 353 354 if (imm >= -32768 && imm < 32768) { 355 EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm)); 356 } else { 357 PPC_LI32(_R0, imm); 358 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0)); 359 } 360 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) 361 EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h)); 362 else 363 EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h)); 364 break; 365 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ 366 bpf_set_seen_register(ctx, tmp_reg); 367 EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h)); 368 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg)); 369 EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg)); 370 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); 371 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 372 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg)); 373 break; 374 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ 375 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); 376 break; 377 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ 378 if (imm >= -32768 && imm < 32768) { 379 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm)); 380 } else { 381 PPC_LI32(_R0, imm); 382 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0)); 383 } 384 break; 385 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ 386 if (!imm) { 387 PPC_LI32(dst_reg, 0); 388 PPC_LI32(dst_reg_h, 0); 389 break; 390 } 391 if (imm == 1) 392 break; 393 if (imm == -1) { 394 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 395 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 396 break; 397 } 398 bpf_set_seen_register(ctx, tmp_reg); 399 PPC_LI32(tmp_reg, imm); 400 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg)); 401 if (imm < 0) 402 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg)); 403 EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg)); 404 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg)); 405 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 406 break; 407 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ 408 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); 409 break; 410 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ 411 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg)); 412 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0)); 413 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0)); 414 break; 415 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ 416 return -EOPNOTSUPP; 417 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ 418 return -EOPNOTSUPP; 419 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ 420 if (!imm) 421 return -EINVAL; 422 if (imm == 1) 423 break; 424 425 PPC_LI32(_R0, imm); 426 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0)); 427 break; 428 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ 429 if (!imm) 430 return -EINVAL; 431 432 if (!is_power_of_2((u32)imm)) { 433 bpf_set_seen_register(ctx, tmp_reg); 434 PPC_LI32(tmp_reg, imm); 435 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg)); 436 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0)); 437 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0)); 438 break; 439 } 440 if (imm == 1) 441 EMIT(PPC_RAW_LI(dst_reg, 0)); 442 else 443 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31)); 444 445 break; 446 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ 447 if (!imm) 448 return -EINVAL; 449 if (imm < 0) 450 imm = -imm; 451 if (!is_power_of_2(imm)) 452 return -EOPNOTSUPP; 453 if (imm == 1) 454 EMIT(PPC_RAW_LI(dst_reg, 0)); 455 else 456 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); 457 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 458 break; 459 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ 460 if (!imm) 461 return -EINVAL; 462 if (!is_power_of_2(abs(imm))) 463 return -EOPNOTSUPP; 464 465 if (imm < 0) { 466 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 467 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 468 imm = -imm; 469 } 470 if (imm == 1) 471 break; 472 imm = ilog2(imm); 473 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 474 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 475 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); 476 break; 477 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ 478 EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); 479 break; 480 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 481 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 482 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 483 break; 484 485 /* 486 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH 487 */ 488 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 489 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); 490 EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h)); 491 break; 492 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ 493 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); 494 break; 495 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 496 if (imm >= 0) 497 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 498 fallthrough; 499 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ 500 if (!IMM_H(imm)) { 501 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); 502 } else if (!IMM_L(imm)) { 503 EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm))); 504 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) { 505 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 506 32 - fls(imm), 32 - ffs(imm))); 507 } else { 508 PPC_LI32(_R0, imm); 509 EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0)); 510 } 511 break; 512 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 513 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); 514 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h)); 515 break; 516 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 517 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); 518 break; 519 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ 520 /* Sign-extended */ 521 if (imm < 0) 522 EMIT(PPC_RAW_LI(dst_reg_h, -1)); 523 fallthrough; 524 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ 525 if (IMM_L(imm)) 526 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); 527 if (IMM_H(imm)) 528 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); 529 break; 530 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ 531 if (dst_reg == src_reg) { 532 EMIT(PPC_RAW_LI(dst_reg, 0)); 533 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 534 } else { 535 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); 536 EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h)); 537 } 538 break; 539 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ 540 if (dst_reg == src_reg) 541 EMIT(PPC_RAW_LI(dst_reg, 0)); 542 else 543 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); 544 break; 545 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ 546 if (imm < 0) 547 EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h)); 548 fallthrough; 549 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ 550 if (IMM_L(imm)) 551 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); 552 if (IMM_H(imm)) 553 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); 554 break; 555 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ 556 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); 557 break; 558 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ 559 bpf_set_seen_register(ctx, tmp_reg); 560 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 561 EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg)); 562 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 563 EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0)); 564 EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg)); 565 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0)); 566 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); 567 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg)); 568 break; 569 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */ 570 if (!imm) 571 break; 572 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); 573 break; 574 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */ 575 if (imm < 0) 576 return -EINVAL; 577 if (!imm) 578 break; 579 if (imm < 32) { 580 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm)); 581 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31)); 582 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm)); 583 break; 584 } 585 if (imm < 64) 586 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm)); 587 else 588 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 589 EMIT(PPC_RAW_LI(dst_reg, 0)); 590 break; 591 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ 592 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 593 break; 594 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ 595 bpf_set_seen_register(ctx, tmp_reg); 596 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 597 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 598 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 599 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0)); 600 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg)); 601 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 602 EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg)); 603 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 604 break; 605 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ 606 if (!imm) 607 break; 608 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); 609 break; 610 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ 611 if (imm < 0) 612 return -EINVAL; 613 if (!imm) 614 break; 615 if (imm < 32) { 616 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 617 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 618 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31)); 619 break; 620 } 621 if (imm < 64) 622 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31)); 623 else 624 EMIT(PPC_RAW_LI(dst_reg, 0)); 625 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 626 break; 627 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ 628 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); 629 break; 630 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ 631 bpf_set_seen_register(ctx, tmp_reg); 632 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 633 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 634 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0)); 635 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 636 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 637 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26)); 638 EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg)); 639 EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg)); 640 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0)); 641 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 642 break; 643 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ 644 if (!imm) 645 break; 646 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); 647 break; 648 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ 649 if (imm < 0) 650 return -EINVAL; 651 if (!imm) 652 break; 653 if (imm < 32) { 654 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 655 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 656 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); 657 break; 658 } 659 if (imm < 64) 660 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32)); 661 else 662 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31)); 663 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31)); 664 break; 665 666 /* 667 * MOV 668 */ 669 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ 670 if (dst_reg == src_reg) 671 break; 672 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 673 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); 674 break; 675 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ 676 /* special mov32 for zext */ 677 if (imm == 1) 678 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 679 else if (dst_reg != src_reg) 680 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 681 break; 682 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ 683 PPC_LI32(dst_reg, imm); 684 PPC_EX32(dst_reg_h, imm); 685 break; 686 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ 687 PPC_LI32(dst_reg, imm); 688 break; 689 690 /* 691 * BPF_FROM_BE/LE 692 */ 693 case BPF_ALU | BPF_END | BPF_FROM_LE: 694 switch (imm) { 695 case 16: 696 /* Copy 16 bits to upper part */ 697 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15)); 698 /* Rotate 8 bits right & mask */ 699 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31)); 700 break; 701 case 32: 702 /* 703 * Rotate word left by 8 bits: 704 * 2 bytes are already in their final position 705 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) 706 */ 707 EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31)); 708 /* Rotate 24 bits and insert byte 1 */ 709 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7)); 710 /* Rotate 24 bits and insert byte 3 */ 711 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23)); 712 EMIT(PPC_RAW_MR(dst_reg, _R0)); 713 break; 714 case 64: 715 bpf_set_seen_register(ctx, tmp_reg); 716 EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31)); 717 EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31)); 718 /* Rotate 24 bits and insert byte 1 */ 719 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7)); 720 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7)); 721 /* Rotate 24 bits and insert byte 3 */ 722 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23)); 723 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23)); 724 EMIT(PPC_RAW_MR(dst_reg, _R0)); 725 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); 726 break; 727 } 728 break; 729 case BPF_ALU | BPF_END | BPF_FROM_BE: 730 switch (imm) { 731 case 16: 732 /* zero-extend 16 bits into 32 bits */ 733 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31)); 734 break; 735 case 32: 736 case 64: 737 /* nop */ 738 break; 739 } 740 break; 741 742 /* 743 * BPF_ST NOSPEC (speculation barrier) 744 */ 745 case BPF_ST | BPF_NOSPEC: 746 break; 747 748 /* 749 * BPF_ST(X) 750 */ 751 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ 752 EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); 753 break; 754 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 755 PPC_LI32(_R0, imm); 756 EMIT(PPC_RAW_STB(_R0, dst_reg, off)); 757 break; 758 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 759 EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); 760 break; 761 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 762 PPC_LI32(_R0, imm); 763 EMIT(PPC_RAW_STH(_R0, dst_reg, off)); 764 break; 765 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 766 EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); 767 break; 768 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 769 PPC_LI32(_R0, imm); 770 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 771 break; 772 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 773 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off)); 774 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4)); 775 break; 776 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 777 PPC_LI32(_R0, imm); 778 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4)); 779 PPC_EX32(_R0, imm); 780 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 781 break; 782 783 /* 784 * BPF_STX ATOMIC (atomic ops) 785 */ 786 case BPF_STX | BPF_ATOMIC | BPF_W: 787 if (imm != BPF_ADD) { 788 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n", 789 code, i); 790 return -ENOTSUPP; 791 } 792 793 /* *(u32 *)(dst + off) += src */ 794 795 bpf_set_seen_register(ctx, tmp_reg); 796 /* Get offset into TMP_REG */ 797 EMIT(PPC_RAW_LI(tmp_reg, off)); 798 /* load value from memory into r0 */ 799 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0)); 800 /* add value from src_reg into this */ 801 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg)); 802 /* store result back */ 803 EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg)); 804 /* we're done if this succeeded */ 805 PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4); 806 break; 807 808 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */ 809 return -EOPNOTSUPP; 810 811 /* 812 * BPF_LDX 813 */ 814 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ 815 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); 816 if (!fp->aux->verifier_zext) 817 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 818 break; 819 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 820 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); 821 if (!fp->aux->verifier_zext) 822 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 823 break; 824 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 825 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); 826 if (!fp->aux->verifier_zext) 827 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 828 break; 829 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 830 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); 831 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); 832 break; 833 834 /* 835 * Doubleword load 836 * 16 byte instruction that uses two 'struct bpf_insn' 837 */ 838 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 839 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); 840 PPC_LI32(dst_reg, (u32)insn[i].imm); 841 /* Adjust for two bpf instructions */ 842 addrs[++i] = ctx->idx * 4; 843 break; 844 845 /* 846 * Return/Exit 847 */ 848 case BPF_JMP | BPF_EXIT: 849 /* 850 * If this isn't the very last instruction, branch to 851 * the epilogue. If we _are_ the last instruction, 852 * we'll just fall through to the epilogue. 853 */ 854 if (i != flen - 1) 855 PPC_JMP(exit_addr); 856 /* else fall through to the epilogue */ 857 break; 858 859 /* 860 * Call kernel helper or bpf function 861 */ 862 case BPF_JMP | BPF_CALL: 863 ctx->seen |= SEEN_FUNC; 864 865 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, 866 &func_addr, &func_addr_fixed); 867 if (ret < 0) 868 return ret; 869 870 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { 871 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8)); 872 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12)); 873 } 874 875 bpf_jit_emit_func_call_rel(image, ctx, func_addr); 876 877 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3)); 878 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4)); 879 break; 880 881 /* 882 * Jumps and branches 883 */ 884 case BPF_JMP | BPF_JA: 885 PPC_JMP(addrs[i + 1 + off]); 886 break; 887 888 case BPF_JMP | BPF_JGT | BPF_K: 889 case BPF_JMP | BPF_JGT | BPF_X: 890 case BPF_JMP | BPF_JSGT | BPF_K: 891 case BPF_JMP | BPF_JSGT | BPF_X: 892 case BPF_JMP32 | BPF_JGT | BPF_K: 893 case BPF_JMP32 | BPF_JGT | BPF_X: 894 case BPF_JMP32 | BPF_JSGT | BPF_K: 895 case BPF_JMP32 | BPF_JSGT | BPF_X: 896 true_cond = COND_GT; 897 goto cond_branch; 898 case BPF_JMP | BPF_JLT | BPF_K: 899 case BPF_JMP | BPF_JLT | BPF_X: 900 case BPF_JMP | BPF_JSLT | BPF_K: 901 case BPF_JMP | BPF_JSLT | BPF_X: 902 case BPF_JMP32 | BPF_JLT | BPF_K: 903 case BPF_JMP32 | BPF_JLT | BPF_X: 904 case BPF_JMP32 | BPF_JSLT | BPF_K: 905 case BPF_JMP32 | BPF_JSLT | BPF_X: 906 true_cond = COND_LT; 907 goto cond_branch; 908 case BPF_JMP | BPF_JGE | BPF_K: 909 case BPF_JMP | BPF_JGE | BPF_X: 910 case BPF_JMP | BPF_JSGE | BPF_K: 911 case BPF_JMP | BPF_JSGE | BPF_X: 912 case BPF_JMP32 | BPF_JGE | BPF_K: 913 case BPF_JMP32 | BPF_JGE | BPF_X: 914 case BPF_JMP32 | BPF_JSGE | BPF_K: 915 case BPF_JMP32 | BPF_JSGE | BPF_X: 916 true_cond = COND_GE; 917 goto cond_branch; 918 case BPF_JMP | BPF_JLE | BPF_K: 919 case BPF_JMP | BPF_JLE | BPF_X: 920 case BPF_JMP | BPF_JSLE | BPF_K: 921 case BPF_JMP | BPF_JSLE | BPF_X: 922 case BPF_JMP32 | BPF_JLE | BPF_K: 923 case BPF_JMP32 | BPF_JLE | BPF_X: 924 case BPF_JMP32 | BPF_JSLE | BPF_K: 925 case BPF_JMP32 | BPF_JSLE | BPF_X: 926 true_cond = COND_LE; 927 goto cond_branch; 928 case BPF_JMP | BPF_JEQ | BPF_K: 929 case BPF_JMP | BPF_JEQ | BPF_X: 930 case BPF_JMP32 | BPF_JEQ | BPF_K: 931 case BPF_JMP32 | BPF_JEQ | BPF_X: 932 true_cond = COND_EQ; 933 goto cond_branch; 934 case BPF_JMP | BPF_JNE | BPF_K: 935 case BPF_JMP | BPF_JNE | BPF_X: 936 case BPF_JMP32 | BPF_JNE | BPF_K: 937 case BPF_JMP32 | BPF_JNE | BPF_X: 938 true_cond = COND_NE; 939 goto cond_branch; 940 case BPF_JMP | BPF_JSET | BPF_K: 941 case BPF_JMP | BPF_JSET | BPF_X: 942 case BPF_JMP32 | BPF_JSET | BPF_K: 943 case BPF_JMP32 | BPF_JSET | BPF_X: 944 true_cond = COND_NE; 945 /* fallthrough; */ 946 947 cond_branch: 948 switch (code) { 949 case BPF_JMP | BPF_JGT | BPF_X: 950 case BPF_JMP | BPF_JLT | BPF_X: 951 case BPF_JMP | BPF_JGE | BPF_X: 952 case BPF_JMP | BPF_JLE | BPF_X: 953 case BPF_JMP | BPF_JEQ | BPF_X: 954 case BPF_JMP | BPF_JNE | BPF_X: 955 /* unsigned comparison */ 956 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h)); 957 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 958 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 959 break; 960 case BPF_JMP32 | BPF_JGT | BPF_X: 961 case BPF_JMP32 | BPF_JLT | BPF_X: 962 case BPF_JMP32 | BPF_JGE | BPF_X: 963 case BPF_JMP32 | BPF_JLE | BPF_X: 964 case BPF_JMP32 | BPF_JEQ | BPF_X: 965 case BPF_JMP32 | BPF_JNE | BPF_X: 966 /* unsigned comparison */ 967 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 968 break; 969 case BPF_JMP | BPF_JSGT | BPF_X: 970 case BPF_JMP | BPF_JSLT | BPF_X: 971 case BPF_JMP | BPF_JSGE | BPF_X: 972 case BPF_JMP | BPF_JSLE | BPF_X: 973 /* signed comparison */ 974 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h)); 975 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 976 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 977 break; 978 case BPF_JMP32 | BPF_JSGT | BPF_X: 979 case BPF_JMP32 | BPF_JSLT | BPF_X: 980 case BPF_JMP32 | BPF_JSGE | BPF_X: 981 case BPF_JMP32 | BPF_JSLE | BPF_X: 982 /* signed comparison */ 983 EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); 984 break; 985 case BPF_JMP | BPF_JSET | BPF_X: 986 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h)); 987 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 988 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 989 break; 990 case BPF_JMP32 | BPF_JSET | BPF_X: { 991 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 992 break; 993 case BPF_JMP | BPF_JNE | BPF_K: 994 case BPF_JMP | BPF_JEQ | BPF_K: 995 case BPF_JMP | BPF_JGT | BPF_K: 996 case BPF_JMP | BPF_JLT | BPF_K: 997 case BPF_JMP | BPF_JGE | BPF_K: 998 case BPF_JMP | BPF_JLE | BPF_K: 999 /* 1000 * Need sign-extended load, so only positive 1001 * values can be used as imm in cmplwi 1002 */ 1003 if (imm >= 0 && imm < 32768) { 1004 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0)); 1005 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1006 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1007 } else { 1008 /* sign-extending load ... but unsigned comparison */ 1009 PPC_EX32(_R0, imm); 1010 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0)); 1011 PPC_LI32(_R0, imm); 1012 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1013 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1014 } 1015 break; 1016 case BPF_JMP32 | BPF_JNE | BPF_K: 1017 case BPF_JMP32 | BPF_JEQ | BPF_K: 1018 case BPF_JMP32 | BPF_JGT | BPF_K: 1019 case BPF_JMP32 | BPF_JLT | BPF_K: 1020 case BPF_JMP32 | BPF_JGE | BPF_K: 1021 case BPF_JMP32 | BPF_JLE | BPF_K: 1022 if (imm >= 0 && imm < 65536) { 1023 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1024 } else { 1025 PPC_LI32(_R0, imm); 1026 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1027 } 1028 break; 1029 } 1030 case BPF_JMP | BPF_JSGT | BPF_K: 1031 case BPF_JMP | BPF_JSLT | BPF_K: 1032 case BPF_JMP | BPF_JSGE | BPF_K: 1033 case BPF_JMP | BPF_JSLE | BPF_K: 1034 if (imm >= 0 && imm < 65536) { 1035 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1036 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1037 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1038 } else { 1039 /* sign-extending load */ 1040 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1041 PPC_LI32(_R0, imm); 1042 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1043 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1044 } 1045 break; 1046 case BPF_JMP32 | BPF_JSGT | BPF_K: 1047 case BPF_JMP32 | BPF_JSLT | BPF_K: 1048 case BPF_JMP32 | BPF_JSGE | BPF_K: 1049 case BPF_JMP32 | BPF_JSLE | BPF_K: 1050 /* 1051 * signed comparison, so any 16-bit value 1052 * can be used in cmpwi 1053 */ 1054 if (imm >= -32768 && imm < 32768) { 1055 EMIT(PPC_RAW_CMPWI(dst_reg, imm)); 1056 } else { 1057 /* sign-extending load */ 1058 PPC_LI32(_R0, imm); 1059 EMIT(PPC_RAW_CMPW(dst_reg, _R0)); 1060 } 1061 break; 1062 case BPF_JMP | BPF_JSET | BPF_K: 1063 /* andi does not sign-extend the immediate */ 1064 if (imm >= 0 && imm < 32768) { 1065 /* PPC_ANDI is _only/always_ dot-form */ 1066 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1067 } else { 1068 PPC_LI32(_R0, imm); 1069 if (imm < 0) { 1070 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0)); 1071 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1072 } 1073 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1074 } 1075 break; 1076 case BPF_JMP32 | BPF_JSET | BPF_K: 1077 /* andi does not sign-extend the immediate */ 1078 if (imm >= 0 && imm < 32768) { 1079 /* PPC_ANDI is _only/always_ dot-form */ 1080 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1081 } else { 1082 PPC_LI32(_R0, imm); 1083 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1084 } 1085 break; 1086 } 1087 PPC_BCC(true_cond, addrs[i + 1 + off]); 1088 break; 1089 1090 /* 1091 * Tail call 1092 */ 1093 case BPF_JMP | BPF_TAIL_CALL: 1094 ctx->seen |= SEEN_TAILCALL; 1095 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1096 if (ret < 0) 1097 return ret; 1098 break; 1099 1100 default: 1101 /* 1102 * The filter contains something cruel & unusual. 1103 * We don't handle it, but also there shouldn't be 1104 * anything missing from our list. 1105 */ 1106 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); 1107 return -EOPNOTSUPP; 1108 } 1109 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && 1110 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) 1111 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1112 } 1113 1114 /* Set end-of-body-code address for exit. */ 1115 addrs[i] = ctx->idx * 4; 1116 1117 return 0; 1118 } 1119