1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * eBPF JIT compiler for PPC32 4 * 5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu> 6 * CS GROUP France 7 * 8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao 9 */ 10 #include <linux/moduleloader.h> 11 #include <asm/cacheflush.h> 12 #include <asm/asm-compat.h> 13 #include <linux/netdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_vlan.h> 16 #include <asm/kprobes.h> 17 #include <linux/bpf.h> 18 19 #include "bpf_jit.h" 20 21 /* 22 * Stack layout: 23 * 24 * [ prev sp ] <------------- 25 * [ nv gpr save area ] 16 * 4 | 26 * fp (r31) --> [ ebpf stack space ] upto 512 | 27 * [ frame header ] 16 | 28 * sp (r1) ---> [ stack pointer ] -------------- 29 */ 30 31 /* for gpr non volatile registers r17 to r31 (14) + tail call */ 32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4) 33 /* stack frame, ensure this is quadword aligned */ 34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) 35 36 /* BPF register usage */ 37 #define TMP_REG (MAX_BPF_JIT_REG + 0) 38 39 /* BPF to ppc register mappings */ 40 const int b2p[MAX_BPF_JIT_REG + 1] = { 41 /* function return value */ 42 [BPF_REG_0] = 12, 43 /* function arguments */ 44 [BPF_REG_1] = 4, 45 [BPF_REG_2] = 6, 46 [BPF_REG_3] = 8, 47 [BPF_REG_4] = 10, 48 [BPF_REG_5] = 22, 49 /* non volatile registers */ 50 [BPF_REG_6] = 24, 51 [BPF_REG_7] = 26, 52 [BPF_REG_8] = 28, 53 [BPF_REG_9] = 30, 54 /* frame pointer aka BPF_REG_10 */ 55 [BPF_REG_FP] = 18, 56 /* eBPF jit internal registers */ 57 [BPF_REG_AX] = 20, 58 [TMP_REG] = 31, /* 32 bits */ 59 }; 60 61 static int bpf_to_ppc(struct codegen_context *ctx, int reg) 62 { 63 return ctx->b2p[reg]; 64 } 65 66 /* PPC NVR range -- update this if we ever use NVRs below r17 */ 67 #define BPF_PPC_NVR_MIN 17 68 #define BPF_PPC_TC 16 69 70 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) 71 { 72 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) 73 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg); 74 75 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); 76 /* Use the hole we have left for alignment */ 77 return BPF_PPC_STACKFRAME(ctx) - 4; 78 } 79 80 void bpf_jit_realloc_regs(struct codegen_context *ctx) 81 { 82 if (ctx->seen & SEEN_FUNC) 83 return; 84 85 while (ctx->seen & SEEN_NVREG_MASK && 86 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { 87 int old = 32 - fls(ctx->seen & (SEEN_NVREG_MASK & 0xaaaaaaab)); 88 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); 89 int i; 90 91 for (i = BPF_REG_0; i <= TMP_REG; i++) { 92 if (ctx->b2p[i] != old) 93 continue; 94 ctx->b2p[i] = new; 95 bpf_set_seen_register(ctx, new); 96 bpf_clear_seen_register(ctx, old); 97 if (i != TMP_REG) { 98 bpf_set_seen_register(ctx, new - 1); 99 bpf_clear_seen_register(ctx, old - 1); 100 } 101 break; 102 } 103 } 104 } 105 106 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) 107 { 108 int i; 109 110 /* First arg comes in as a 32 bits pointer. */ 111 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3)); 112 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0)); 113 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); 114 115 /* 116 * Initialize tail_call_cnt in stack frame if we do tail calls. 117 * Otherwise, put in NOPs so that it can be skipped when we are 118 * invoked through a tail call. 119 */ 120 if (ctx->seen & SEEN_TAILCALL) 121 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1, 122 bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 123 else 124 EMIT(PPC_RAW_NOP()); 125 126 #define BPF_TAILCALL_PROLOGUE_SIZE 16 127 128 /* 129 * We need a stack frame, but we don't necessarily need to 130 * save/restore LR unless we call other functions 131 */ 132 if (ctx->seen & SEEN_FUNC) 133 EMIT(PPC_RAW_MFLR(_R0)); 134 135 /* 136 * Back up non-volatile regs -- registers r18-r31 137 */ 138 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 139 if (bpf_is_seen_register(ctx, i)) 140 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 141 142 /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/ 143 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { 144 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8); 145 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12); 146 } 147 148 /* Setup frame pointer to point to the bpf stack area */ 149 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) { 150 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0)); 151 EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1, 152 STACK_FRAME_MIN_SIZE + ctx->stack_size)); 153 } 154 155 if (ctx->seen & SEEN_FUNC) 156 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 157 } 158 159 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) 160 { 161 int i; 162 163 /* Restore NVRs */ 164 for (i = BPF_PPC_NVR_MIN; i <= 31; i++) 165 if (bpf_is_seen_register(ctx, i)) 166 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i))); 167 } 168 169 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) 170 { 171 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0))); 172 173 bpf_jit_emit_common_epilogue(image, ctx); 174 175 /* Tear down our stack frame */ 176 177 if (ctx->seen & SEEN_FUNC) 178 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 179 180 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); 181 182 if (ctx->seen & SEEN_FUNC) 183 EMIT(PPC_RAW_MTLR(_R0)); 184 185 EMIT(PPC_RAW_BLR()); 186 } 187 188 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func) 189 { 190 s32 rel = (s32)func - (s32)(image + ctx->idx); 191 192 if (image && rel < 0x2000000 && rel >= -0x2000000) { 193 PPC_BL_ABS(func); 194 EMIT(PPC_RAW_NOP()); 195 EMIT(PPC_RAW_NOP()); 196 EMIT(PPC_RAW_NOP()); 197 } else { 198 /* Load function address into r0 */ 199 EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); 200 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func))); 201 EMIT(PPC_RAW_MTCTR(_R0)); 202 EMIT(PPC_RAW_BCTRL()); 203 } 204 } 205 206 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 207 { 208 /* 209 * By now, the eBPF program has already setup parameters in r3-r6 210 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program 211 * r5-r6/BPF_REG_2 - pointer to bpf_array 212 * r7-r8/BPF_REG_3 - index in bpf_array 213 */ 214 int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2); 215 int b2p_index = bpf_to_ppc(ctx, BPF_REG_3); 216 217 /* 218 * if (index >= array->map.max_entries) 219 * goto out; 220 */ 221 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); 222 EMIT(PPC_RAW_CMPLW(b2p_index, _R0)); 223 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 224 PPC_BCC(COND_GE, out); 225 226 /* 227 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) 228 * goto out; 229 */ 230 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT)); 231 /* tail_call_cnt++; */ 232 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1)); 233 PPC_BCC(COND_GE, out); 234 235 /* prog = array->ptrs[index]; */ 236 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29)); 237 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array)); 238 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs))); 239 EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); 240 241 /* 242 * if (prog == NULL) 243 * goto out; 244 */ 245 EMIT(PPC_RAW_CMPLWI(_R3, 0)); 246 PPC_BCC(COND_EQ, out); 247 248 /* goto *(prog->bpf_func + prologue_size); */ 249 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func))); 250 251 if (ctx->seen & SEEN_FUNC) 252 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); 253 254 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE)); 255 256 if (ctx->seen & SEEN_FUNC) 257 EMIT(PPC_RAW_MTLR(_R0)); 258 259 EMIT(PPC_RAW_MTCTR(_R3)); 260 261 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1))); 262 263 /* tear restore NVRs, ... */ 264 bpf_jit_emit_common_epilogue(image, ctx); 265 266 EMIT(PPC_RAW_BCTR()); 267 268 /* out: */ 269 return 0; 270 } 271 272 /* Assemble the body code between the prologue & epilogue */ 273 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, 274 u32 *addrs, int pass) 275 { 276 const struct bpf_insn *insn = fp->insnsi; 277 int flen = fp->len; 278 int i, ret; 279 280 /* Start of epilogue code - will only be valid 2nd pass onwards */ 281 u32 exit_addr = addrs[flen]; 282 283 for (i = 0; i < flen; i++) { 284 u32 code = insn[i].code; 285 u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg); 286 u32 dst_reg_h = dst_reg - 1; 287 u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg); 288 u32 src_reg_h = src_reg - 1; 289 u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG); 290 u32 size = BPF_SIZE(code); 291 s16 off = insn[i].off; 292 s32 imm = insn[i].imm; 293 bool func_addr_fixed; 294 u64 func_addr; 295 u32 true_cond; 296 u32 tmp_idx; 297 int j; 298 299 /* 300 * addrs[] maps a BPF bytecode address into a real offset from 301 * the start of the body code. 302 */ 303 addrs[i] = ctx->idx * 4; 304 305 /* 306 * As an optimization, we note down which registers 307 * are used so that we can only save/restore those in our 308 * prologue and epilogue. We do this here regardless of whether 309 * the actual BPF instruction uses src/dst registers or not 310 * (for instance, BPF_CALL does not use them). The expectation 311 * is that those instructions will have src_reg/dst_reg set to 312 * 0. Even otherwise, we just lose some prologue/epilogue 313 * optimization but everything else should work without 314 * any issues. 315 */ 316 if (dst_reg >= 3 && dst_reg < 32) { 317 bpf_set_seen_register(ctx, dst_reg); 318 bpf_set_seen_register(ctx, dst_reg_h); 319 } 320 321 if (src_reg >= 3 && src_reg < 32) { 322 bpf_set_seen_register(ctx, src_reg); 323 bpf_set_seen_register(ctx, src_reg_h); 324 } 325 326 switch (code) { 327 /* 328 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG 329 */ 330 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ 331 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); 332 break; 333 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ 334 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg)); 335 EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h)); 336 break; 337 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ 338 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); 339 break; 340 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ 341 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg)); 342 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h)); 343 break; 344 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ 345 imm = -imm; 346 fallthrough; 347 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ 348 if (IMM_HA(imm) & 0xffff) 349 EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm))); 350 if (IMM_L(imm)) 351 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); 352 break; 353 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ 354 imm = -imm; 355 fallthrough; 356 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ 357 if (!imm) 358 break; 359 360 if (imm >= -32768 && imm < 32768) { 361 EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm)); 362 } else { 363 PPC_LI32(_R0, imm); 364 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0)); 365 } 366 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) 367 EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h)); 368 else 369 EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h)); 370 break; 371 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ 372 bpf_set_seen_register(ctx, tmp_reg); 373 EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h)); 374 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg)); 375 EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg)); 376 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); 377 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 378 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg)); 379 break; 380 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ 381 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); 382 break; 383 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ 384 if (imm >= -32768 && imm < 32768) { 385 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm)); 386 } else { 387 PPC_LI32(_R0, imm); 388 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0)); 389 } 390 break; 391 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ 392 if (!imm) { 393 PPC_LI32(dst_reg, 0); 394 PPC_LI32(dst_reg_h, 0); 395 break; 396 } 397 if (imm == 1) 398 break; 399 if (imm == -1) { 400 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 401 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 402 break; 403 } 404 bpf_set_seen_register(ctx, tmp_reg); 405 PPC_LI32(tmp_reg, imm); 406 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg)); 407 if (imm < 0) 408 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg)); 409 EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg)); 410 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg)); 411 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); 412 break; 413 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ 414 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); 415 break; 416 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ 417 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg)); 418 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0)); 419 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0)); 420 break; 421 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ 422 return -EOPNOTSUPP; 423 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ 424 return -EOPNOTSUPP; 425 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ 426 if (!imm) 427 return -EINVAL; 428 if (imm == 1) 429 break; 430 431 PPC_LI32(_R0, imm); 432 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0)); 433 break; 434 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ 435 if (!imm) 436 return -EINVAL; 437 438 if (!is_power_of_2((u32)imm)) { 439 bpf_set_seen_register(ctx, tmp_reg); 440 PPC_LI32(tmp_reg, imm); 441 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg)); 442 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0)); 443 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0)); 444 break; 445 } 446 if (imm == 1) 447 EMIT(PPC_RAW_LI(dst_reg, 0)); 448 else 449 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31)); 450 451 break; 452 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ 453 if (!imm) 454 return -EINVAL; 455 if (imm < 0) 456 imm = -imm; 457 if (!is_power_of_2(imm)) 458 return -EOPNOTSUPP; 459 if (imm == 1) 460 EMIT(PPC_RAW_LI(dst_reg, 0)); 461 else 462 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); 463 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 464 break; 465 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ 466 if (!imm) 467 return -EINVAL; 468 if (!is_power_of_2(abs(imm))) 469 return -EOPNOTSUPP; 470 471 if (imm < 0) { 472 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 473 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 474 imm = -imm; 475 } 476 if (imm == 1) 477 break; 478 imm = ilog2(imm); 479 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 480 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 481 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); 482 break; 483 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ 484 EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); 485 break; 486 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 487 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0)); 488 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h)); 489 break; 490 491 /* 492 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH 493 */ 494 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ 495 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); 496 EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h)); 497 break; 498 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ 499 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); 500 break; 501 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ 502 if (imm >= 0) 503 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 504 fallthrough; 505 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ 506 if (!IMM_H(imm)) { 507 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); 508 } else if (!IMM_L(imm)) { 509 EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm))); 510 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) { 511 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 512 32 - fls(imm), 32 - ffs(imm))); 513 } else { 514 PPC_LI32(_R0, imm); 515 EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0)); 516 } 517 break; 518 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ 519 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); 520 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h)); 521 break; 522 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ 523 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); 524 break; 525 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ 526 /* Sign-extended */ 527 if (imm < 0) 528 EMIT(PPC_RAW_LI(dst_reg_h, -1)); 529 fallthrough; 530 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ 531 if (IMM_L(imm)) 532 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); 533 if (IMM_H(imm)) 534 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); 535 break; 536 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ 537 if (dst_reg == src_reg) { 538 EMIT(PPC_RAW_LI(dst_reg, 0)); 539 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 540 } else { 541 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); 542 EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h)); 543 } 544 break; 545 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ 546 if (dst_reg == src_reg) 547 EMIT(PPC_RAW_LI(dst_reg, 0)); 548 else 549 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); 550 break; 551 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ 552 if (imm < 0) 553 EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h)); 554 fallthrough; 555 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ 556 if (IMM_L(imm)) 557 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); 558 if (IMM_H(imm)) 559 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); 560 break; 561 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ 562 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); 563 break; 564 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ 565 bpf_set_seen_register(ctx, tmp_reg); 566 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 567 EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg)); 568 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 569 EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0)); 570 EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg)); 571 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0)); 572 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); 573 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg)); 574 break; 575 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */ 576 if (!imm) 577 break; 578 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); 579 break; 580 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */ 581 if (imm < 0) 582 return -EINVAL; 583 if (!imm) 584 break; 585 if (imm < 32) { 586 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm)); 587 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31)); 588 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm)); 589 break; 590 } 591 if (imm < 64) 592 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm)); 593 else 594 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 595 EMIT(PPC_RAW_LI(dst_reg, 0)); 596 break; 597 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ 598 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 599 break; 600 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ 601 bpf_set_seen_register(ctx, tmp_reg); 602 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 603 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 604 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 605 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0)); 606 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg)); 607 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 608 EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg)); 609 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 610 break; 611 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ 612 if (!imm) 613 break; 614 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); 615 break; 616 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ 617 if (imm < 0) 618 return -EINVAL; 619 if (!imm) 620 break; 621 if (imm < 32) { 622 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 623 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 624 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31)); 625 break; 626 } 627 if (imm < 64) 628 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31)); 629 else 630 EMIT(PPC_RAW_LI(dst_reg, 0)); 631 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 632 break; 633 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ 634 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); 635 break; 636 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ 637 bpf_set_seen_register(ctx, tmp_reg); 638 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); 639 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); 640 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0)); 641 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); 642 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); 643 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26)); 644 EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg)); 645 EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg)); 646 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0)); 647 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); 648 break; 649 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ 650 if (!imm) 651 break; 652 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); 653 break; 654 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ 655 if (imm < 0) 656 return -EINVAL; 657 if (!imm) 658 break; 659 if (imm < 32) { 660 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31)); 661 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1)); 662 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm)); 663 break; 664 } 665 if (imm < 64) 666 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32)); 667 else 668 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31)); 669 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31)); 670 break; 671 672 /* 673 * MOV 674 */ 675 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ 676 if (dst_reg == src_reg) 677 break; 678 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 679 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); 680 break; 681 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ 682 /* special mov32 for zext */ 683 if (imm == 1) 684 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 685 else if (dst_reg != src_reg) 686 EMIT(PPC_RAW_MR(dst_reg, src_reg)); 687 break; 688 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ 689 PPC_LI32(dst_reg, imm); 690 PPC_EX32(dst_reg_h, imm); 691 break; 692 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ 693 PPC_LI32(dst_reg, imm); 694 break; 695 696 /* 697 * BPF_FROM_BE/LE 698 */ 699 case BPF_ALU | BPF_END | BPF_FROM_LE: 700 switch (imm) { 701 case 16: 702 /* Copy 16 bits to upper part */ 703 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15)); 704 /* Rotate 8 bits right & mask */ 705 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31)); 706 break; 707 case 32: 708 /* 709 * Rotate word left by 8 bits: 710 * 2 bytes are already in their final position 711 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) 712 */ 713 EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31)); 714 /* Rotate 24 bits and insert byte 1 */ 715 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7)); 716 /* Rotate 24 bits and insert byte 3 */ 717 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23)); 718 EMIT(PPC_RAW_MR(dst_reg, _R0)); 719 break; 720 case 64: 721 bpf_set_seen_register(ctx, tmp_reg); 722 EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31)); 723 EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31)); 724 /* Rotate 24 bits and insert byte 1 */ 725 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7)); 726 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7)); 727 /* Rotate 24 bits and insert byte 3 */ 728 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23)); 729 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23)); 730 EMIT(PPC_RAW_MR(dst_reg, _R0)); 731 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); 732 break; 733 } 734 break; 735 case BPF_ALU | BPF_END | BPF_FROM_BE: 736 switch (imm) { 737 case 16: 738 /* zero-extend 16 bits into 32 bits */ 739 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31)); 740 break; 741 case 32: 742 case 64: 743 /* nop */ 744 break; 745 } 746 break; 747 748 /* 749 * BPF_ST NOSPEC (speculation barrier) 750 */ 751 case BPF_ST | BPF_NOSPEC: 752 break; 753 754 /* 755 * BPF_ST(X) 756 */ 757 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ 758 EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); 759 break; 760 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ 761 PPC_LI32(_R0, imm); 762 EMIT(PPC_RAW_STB(_R0, dst_reg, off)); 763 break; 764 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ 765 EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); 766 break; 767 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ 768 PPC_LI32(_R0, imm); 769 EMIT(PPC_RAW_STH(_R0, dst_reg, off)); 770 break; 771 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ 772 EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); 773 break; 774 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ 775 PPC_LI32(_R0, imm); 776 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 777 break; 778 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ 779 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off)); 780 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4)); 781 break; 782 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ 783 PPC_LI32(_R0, imm); 784 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4)); 785 PPC_EX32(_R0, imm); 786 EMIT(PPC_RAW_STW(_R0, dst_reg, off)); 787 break; 788 789 /* 790 * BPF_STX ATOMIC (atomic ops) 791 */ 792 case BPF_STX | BPF_ATOMIC | BPF_W: 793 if (imm != BPF_ADD) { 794 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n", 795 code, i); 796 return -ENOTSUPP; 797 } 798 799 /* *(u32 *)(dst + off) += src */ 800 801 bpf_set_seen_register(ctx, tmp_reg); 802 /* Get offset into TMP_REG */ 803 EMIT(PPC_RAW_LI(tmp_reg, off)); 804 /* load value from memory into r0 */ 805 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0)); 806 /* add value from src_reg into this */ 807 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg)); 808 /* store result back */ 809 EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg)); 810 /* we're done if this succeeded */ 811 PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4); 812 break; 813 814 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */ 815 return -EOPNOTSUPP; 816 817 /* 818 * BPF_LDX 819 */ 820 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ 821 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 822 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ 823 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 824 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ 825 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 826 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ 827 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 828 /* 829 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid 830 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM 831 * load only if addr is kernel address (see is_kernel_addr()), otherwise 832 * set dst_reg=0 and move on. 833 */ 834 if (BPF_MODE(code) == BPF_PROBE_MEM) { 835 PPC_LI32(_R0, TASK_SIZE - off); 836 EMIT(PPC_RAW_CMPLW(src_reg, _R0)); 837 PPC_BCC(COND_GT, (ctx->idx + 5) * 4); 838 EMIT(PPC_RAW_LI(dst_reg, 0)); 839 /* 840 * For BPF_DW case, "li reg_h,0" would be needed when 841 * !fp->aux->verifier_zext. Emit NOP otherwise. 842 * 843 * Note that "li reg_h,0" is emitted for BPF_B/H/W case, 844 * if necessary. So, jump there insted of emitting an 845 * additional "li reg_h,0" instruction. 846 */ 847 if (size == BPF_DW && !fp->aux->verifier_zext) 848 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 849 else 850 EMIT(PPC_RAW_NOP()); 851 /* 852 * Need to jump two instructions instead of one for BPF_DW case 853 * as there are two load instructions for dst_reg_h & dst_reg 854 * respectively. 855 */ 856 if (size == BPF_DW) 857 PPC_JMP((ctx->idx + 3) * 4); 858 else 859 PPC_JMP((ctx->idx + 2) * 4); 860 } 861 862 switch (size) { 863 case BPF_B: 864 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); 865 break; 866 case BPF_H: 867 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); 868 break; 869 case BPF_W: 870 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); 871 break; 872 case BPF_DW: 873 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); 874 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); 875 break; 876 } 877 878 if (size != BPF_DW && !fp->aux->verifier_zext) 879 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 880 881 if (BPF_MODE(code) == BPF_PROBE_MEM) { 882 int insn_idx = ctx->idx - 1; 883 int jmp_off = 4; 884 885 /* 886 * In case of BPF_DW, two lwz instructions are emitted, one 887 * for higher 32-bit and another for lower 32-bit. So, set 888 * ex->insn to the first of the two and jump over both 889 * instructions in fixup. 890 * 891 * Similarly, with !verifier_zext, two instructions are 892 * emitted for BPF_B/H/W case. So, set ex->insn to the 893 * instruction that could fault and skip over both 894 * instructions. 895 */ 896 if (size == BPF_DW || !fp->aux->verifier_zext) { 897 insn_idx -= 1; 898 jmp_off += 4; 899 } 900 901 ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx, 902 jmp_off, dst_reg); 903 if (ret) 904 return ret; 905 } 906 break; 907 908 /* 909 * Doubleword load 910 * 16 byte instruction that uses two 'struct bpf_insn' 911 */ 912 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ 913 tmp_idx = ctx->idx; 914 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); 915 PPC_LI32(dst_reg, (u32)insn[i].imm); 916 /* padding to allow full 4 instructions for later patching */ 917 for (j = ctx->idx - tmp_idx; j < 4; j++) 918 EMIT(PPC_RAW_NOP()); 919 /* Adjust for two bpf instructions */ 920 addrs[++i] = ctx->idx * 4; 921 break; 922 923 /* 924 * Return/Exit 925 */ 926 case BPF_JMP | BPF_EXIT: 927 /* 928 * If this isn't the very last instruction, branch to 929 * the epilogue. If we _are_ the last instruction, 930 * we'll just fall through to the epilogue. 931 */ 932 if (i != flen - 1) 933 PPC_JMP(exit_addr); 934 /* else fall through to the epilogue */ 935 break; 936 937 /* 938 * Call kernel helper or bpf function 939 */ 940 case BPF_JMP | BPF_CALL: 941 ctx->seen |= SEEN_FUNC; 942 943 ret = bpf_jit_get_func_addr(fp, &insn[i], false, 944 &func_addr, &func_addr_fixed); 945 if (ret < 0) 946 return ret; 947 948 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) { 949 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8)); 950 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12)); 951 } 952 953 bpf_jit_emit_func_call_rel(image, ctx, func_addr); 954 955 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3)); 956 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4)); 957 break; 958 959 /* 960 * Jumps and branches 961 */ 962 case BPF_JMP | BPF_JA: 963 PPC_JMP(addrs[i + 1 + off]); 964 break; 965 966 case BPF_JMP | BPF_JGT | BPF_K: 967 case BPF_JMP | BPF_JGT | BPF_X: 968 case BPF_JMP | BPF_JSGT | BPF_K: 969 case BPF_JMP | BPF_JSGT | BPF_X: 970 case BPF_JMP32 | BPF_JGT | BPF_K: 971 case BPF_JMP32 | BPF_JGT | BPF_X: 972 case BPF_JMP32 | BPF_JSGT | BPF_K: 973 case BPF_JMP32 | BPF_JSGT | BPF_X: 974 true_cond = COND_GT; 975 goto cond_branch; 976 case BPF_JMP | BPF_JLT | BPF_K: 977 case BPF_JMP | BPF_JLT | BPF_X: 978 case BPF_JMP | BPF_JSLT | BPF_K: 979 case BPF_JMP | BPF_JSLT | BPF_X: 980 case BPF_JMP32 | BPF_JLT | BPF_K: 981 case BPF_JMP32 | BPF_JLT | BPF_X: 982 case BPF_JMP32 | BPF_JSLT | BPF_K: 983 case BPF_JMP32 | BPF_JSLT | BPF_X: 984 true_cond = COND_LT; 985 goto cond_branch; 986 case BPF_JMP | BPF_JGE | BPF_K: 987 case BPF_JMP | BPF_JGE | BPF_X: 988 case BPF_JMP | BPF_JSGE | BPF_K: 989 case BPF_JMP | BPF_JSGE | BPF_X: 990 case BPF_JMP32 | BPF_JGE | BPF_K: 991 case BPF_JMP32 | BPF_JGE | BPF_X: 992 case BPF_JMP32 | BPF_JSGE | BPF_K: 993 case BPF_JMP32 | BPF_JSGE | BPF_X: 994 true_cond = COND_GE; 995 goto cond_branch; 996 case BPF_JMP | BPF_JLE | BPF_K: 997 case BPF_JMP | BPF_JLE | BPF_X: 998 case BPF_JMP | BPF_JSLE | BPF_K: 999 case BPF_JMP | BPF_JSLE | BPF_X: 1000 case BPF_JMP32 | BPF_JLE | BPF_K: 1001 case BPF_JMP32 | BPF_JLE | BPF_X: 1002 case BPF_JMP32 | BPF_JSLE | BPF_K: 1003 case BPF_JMP32 | BPF_JSLE | BPF_X: 1004 true_cond = COND_LE; 1005 goto cond_branch; 1006 case BPF_JMP | BPF_JEQ | BPF_K: 1007 case BPF_JMP | BPF_JEQ | BPF_X: 1008 case BPF_JMP32 | BPF_JEQ | BPF_K: 1009 case BPF_JMP32 | BPF_JEQ | BPF_X: 1010 true_cond = COND_EQ; 1011 goto cond_branch; 1012 case BPF_JMP | BPF_JNE | BPF_K: 1013 case BPF_JMP | BPF_JNE | BPF_X: 1014 case BPF_JMP32 | BPF_JNE | BPF_K: 1015 case BPF_JMP32 | BPF_JNE | BPF_X: 1016 true_cond = COND_NE; 1017 goto cond_branch; 1018 case BPF_JMP | BPF_JSET | BPF_K: 1019 case BPF_JMP | BPF_JSET | BPF_X: 1020 case BPF_JMP32 | BPF_JSET | BPF_K: 1021 case BPF_JMP32 | BPF_JSET | BPF_X: 1022 true_cond = COND_NE; 1023 /* fallthrough; */ 1024 1025 cond_branch: 1026 switch (code) { 1027 case BPF_JMP | BPF_JGT | BPF_X: 1028 case BPF_JMP | BPF_JLT | BPF_X: 1029 case BPF_JMP | BPF_JGE | BPF_X: 1030 case BPF_JMP | BPF_JLE | BPF_X: 1031 case BPF_JMP | BPF_JEQ | BPF_X: 1032 case BPF_JMP | BPF_JNE | BPF_X: 1033 /* unsigned comparison */ 1034 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h)); 1035 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1036 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1037 break; 1038 case BPF_JMP32 | BPF_JGT | BPF_X: 1039 case BPF_JMP32 | BPF_JLT | BPF_X: 1040 case BPF_JMP32 | BPF_JGE | BPF_X: 1041 case BPF_JMP32 | BPF_JLE | BPF_X: 1042 case BPF_JMP32 | BPF_JEQ | BPF_X: 1043 case BPF_JMP32 | BPF_JNE | BPF_X: 1044 /* unsigned comparison */ 1045 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1046 break; 1047 case BPF_JMP | BPF_JSGT | BPF_X: 1048 case BPF_JMP | BPF_JSLT | BPF_X: 1049 case BPF_JMP | BPF_JSGE | BPF_X: 1050 case BPF_JMP | BPF_JSLE | BPF_X: 1051 /* signed comparison */ 1052 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h)); 1053 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1054 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); 1055 break; 1056 case BPF_JMP32 | BPF_JSGT | BPF_X: 1057 case BPF_JMP32 | BPF_JSLT | BPF_X: 1058 case BPF_JMP32 | BPF_JSGE | BPF_X: 1059 case BPF_JMP32 | BPF_JSLE | BPF_X: 1060 /* signed comparison */ 1061 EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); 1062 break; 1063 case BPF_JMP | BPF_JSET | BPF_X: 1064 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h)); 1065 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1066 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 1067 break; 1068 case BPF_JMP32 | BPF_JSET | BPF_X: { 1069 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); 1070 break; 1071 case BPF_JMP | BPF_JNE | BPF_K: 1072 case BPF_JMP | BPF_JEQ | BPF_K: 1073 case BPF_JMP | BPF_JGT | BPF_K: 1074 case BPF_JMP | BPF_JLT | BPF_K: 1075 case BPF_JMP | BPF_JGE | BPF_K: 1076 case BPF_JMP | BPF_JLE | BPF_K: 1077 /* 1078 * Need sign-extended load, so only positive 1079 * values can be used as imm in cmplwi 1080 */ 1081 if (imm >= 0 && imm < 32768) { 1082 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0)); 1083 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1084 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1085 } else { 1086 /* sign-extending load ... but unsigned comparison */ 1087 PPC_EX32(_R0, imm); 1088 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0)); 1089 PPC_LI32(_R0, imm); 1090 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1091 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1092 } 1093 break; 1094 case BPF_JMP32 | BPF_JNE | BPF_K: 1095 case BPF_JMP32 | BPF_JEQ | BPF_K: 1096 case BPF_JMP32 | BPF_JGT | BPF_K: 1097 case BPF_JMP32 | BPF_JLT | BPF_K: 1098 case BPF_JMP32 | BPF_JGE | BPF_K: 1099 case BPF_JMP32 | BPF_JLE | BPF_K: 1100 if (imm >= 0 && imm < 65536) { 1101 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1102 } else { 1103 PPC_LI32(_R0, imm); 1104 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1105 } 1106 break; 1107 } 1108 case BPF_JMP | BPF_JSGT | BPF_K: 1109 case BPF_JMP | BPF_JSLT | BPF_K: 1110 case BPF_JMP | BPF_JSGE | BPF_K: 1111 case BPF_JMP | BPF_JSLE | BPF_K: 1112 if (imm >= 0 && imm < 65536) { 1113 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1114 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1115 EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); 1116 } else { 1117 /* sign-extending load */ 1118 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); 1119 PPC_LI32(_R0, imm); 1120 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1121 EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); 1122 } 1123 break; 1124 case BPF_JMP32 | BPF_JSGT | BPF_K: 1125 case BPF_JMP32 | BPF_JSLT | BPF_K: 1126 case BPF_JMP32 | BPF_JSGE | BPF_K: 1127 case BPF_JMP32 | BPF_JSLE | BPF_K: 1128 /* 1129 * signed comparison, so any 16-bit value 1130 * can be used in cmpwi 1131 */ 1132 if (imm >= -32768 && imm < 32768) { 1133 EMIT(PPC_RAW_CMPWI(dst_reg, imm)); 1134 } else { 1135 /* sign-extending load */ 1136 PPC_LI32(_R0, imm); 1137 EMIT(PPC_RAW_CMPW(dst_reg, _R0)); 1138 } 1139 break; 1140 case BPF_JMP | BPF_JSET | BPF_K: 1141 /* andi does not sign-extend the immediate */ 1142 if (imm >= 0 && imm < 32768) { 1143 /* PPC_ANDI is _only/always_ dot-form */ 1144 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1145 } else { 1146 PPC_LI32(_R0, imm); 1147 if (imm < 0) { 1148 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0)); 1149 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); 1150 } 1151 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1152 } 1153 break; 1154 case BPF_JMP32 | BPF_JSET | BPF_K: 1155 /* andi does not sign-extend the immediate */ 1156 if (imm >= 0 && imm < 32768) { 1157 /* PPC_ANDI is _only/always_ dot-form */ 1158 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); 1159 } else { 1160 PPC_LI32(_R0, imm); 1161 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); 1162 } 1163 break; 1164 } 1165 PPC_BCC(true_cond, addrs[i + 1 + off]); 1166 break; 1167 1168 /* 1169 * Tail call 1170 */ 1171 case BPF_JMP | BPF_TAIL_CALL: 1172 ctx->seen |= SEEN_TAILCALL; 1173 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); 1174 if (ret < 0) 1175 return ret; 1176 break; 1177 1178 default: 1179 /* 1180 * The filter contains something cruel & unusual. 1181 * We don't handle it, but also there shouldn't be 1182 * anything missing from our list. 1183 */ 1184 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); 1185 return -EOPNOTSUPP; 1186 } 1187 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && 1188 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) 1189 EMIT(PPC_RAW_LI(dst_reg_h, 0)); 1190 } 1191 1192 /* Set end-of-body-code address for exit. */ 1193 addrs[i] = ctx->idx * 4; 1194 1195 return 0; 1196 } 1197