1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler for ARM64 4 * 5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> 6 */ 7 8 #define pr_fmt(fmt) "bpf_jit: " fmt 9 10 #include <linux/bitfield.h> 11 #include <linux/bpf.h> 12 #include <linux/filter.h> 13 #include <linux/printk.h> 14 #include <linux/slab.h> 15 16 #include <asm/asm-extable.h> 17 #include <asm/byteorder.h> 18 #include <asm/cacheflush.h> 19 #include <asm/debug-monitors.h> 20 #include <asm/insn.h> 21 #include <asm/set_memory.h> 22 23 #include "bpf_jit.h" 24 25 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 26 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 27 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) 28 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) 29 30 /* Map BPF registers to A64 registers */ 31 static const int bpf2a64[] = { 32 /* return value from in-kernel function, and exit value from eBPF */ 33 [BPF_REG_0] = A64_R(7), 34 /* arguments from eBPF program to in-kernel function */ 35 [BPF_REG_1] = A64_R(0), 36 [BPF_REG_2] = A64_R(1), 37 [BPF_REG_3] = A64_R(2), 38 [BPF_REG_4] = A64_R(3), 39 [BPF_REG_5] = A64_R(4), 40 /* callee saved registers that in-kernel function will preserve */ 41 [BPF_REG_6] = A64_R(19), 42 [BPF_REG_7] = A64_R(20), 43 [BPF_REG_8] = A64_R(21), 44 [BPF_REG_9] = A64_R(22), 45 /* read-only frame pointer to access stack */ 46 [BPF_REG_FP] = A64_R(25), 47 /* temporary registers for internal BPF JIT */ 48 [TMP_REG_1] = A64_R(10), 49 [TMP_REG_2] = A64_R(11), 50 [TMP_REG_3] = A64_R(12), 51 /* tail_call_cnt */ 52 [TCALL_CNT] = A64_R(26), 53 /* temporary register for blinding constants */ 54 [BPF_REG_AX] = A64_R(9), 55 }; 56 57 struct jit_ctx { 58 const struct bpf_prog *prog; 59 int idx; 60 int epilogue_offset; 61 int *offset; 62 int exentry_idx; 63 __le32 *image; 64 u32 stack_size; 65 }; 66 67 static inline void emit(const u32 insn, struct jit_ctx *ctx) 68 { 69 if (ctx->image != NULL) 70 ctx->image[ctx->idx] = cpu_to_le32(insn); 71 72 ctx->idx++; 73 } 74 75 static inline void emit_a64_mov_i(const int is64, const int reg, 76 const s32 val, struct jit_ctx *ctx) 77 { 78 u16 hi = val >> 16; 79 u16 lo = val & 0xffff; 80 81 if (hi & 0x8000) { 82 if (hi == 0xffff) { 83 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); 84 } else { 85 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); 86 if (lo != 0xffff) 87 emit(A64_MOVK(is64, reg, lo, 0), ctx); 88 } 89 } else { 90 emit(A64_MOVZ(is64, reg, lo, 0), ctx); 91 if (hi) 92 emit(A64_MOVK(is64, reg, hi, 16), ctx); 93 } 94 } 95 96 static int i64_i16_blocks(const u64 val, bool inverse) 97 { 98 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 99 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 100 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) + 101 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000)); 102 } 103 104 static inline void emit_a64_mov_i64(const int reg, const u64 val, 105 struct jit_ctx *ctx) 106 { 107 u64 nrm_tmp = val, rev_tmp = ~val; 108 bool inverse; 109 int shift; 110 111 if (!(nrm_tmp >> 32)) 112 return emit_a64_mov_i(0, reg, (u32)val, ctx); 113 114 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false); 115 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) : 116 (fls64(nrm_tmp) - 1)), 16), 0); 117 if (inverse) 118 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx); 119 else 120 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); 121 shift -= 16; 122 while (shift >= 0) { 123 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000)) 124 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx); 125 shift -= 16; 126 } 127 } 128 129 /* 130 * Kernel addresses in the vmalloc space use at most 48 bits, and the 131 * remaining bits are guaranteed to be 0x1. So we can compose the address 132 * with a fixed length movn/movk/movk sequence. 133 */ 134 static inline void emit_addr_mov_i64(const int reg, const u64 val, 135 struct jit_ctx *ctx) 136 { 137 u64 tmp = val; 138 int shift = 0; 139 140 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx); 141 while (shift < 32) { 142 tmp >>= 16; 143 shift += 16; 144 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); 145 } 146 } 147 148 static inline int bpf2a64_offset(int bpf_insn, int off, 149 const struct jit_ctx *ctx) 150 { 151 /* BPF JMP offset is relative to the next instruction */ 152 bpf_insn++; 153 /* 154 * Whereas arm64 branch instructions encode the offset 155 * from the branch itself, so we must subtract 1 from the 156 * instruction offset. 157 */ 158 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1); 159 } 160 161 static void jit_fill_hole(void *area, unsigned int size) 162 { 163 __le32 *ptr; 164 /* We are guaranteed to have aligned memory. */ 165 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 166 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 167 } 168 169 static inline int epilogue_offset(const struct jit_ctx *ctx) 170 { 171 int to = ctx->epilogue_offset; 172 int from = ctx->idx; 173 174 return to - from; 175 } 176 177 static bool is_addsub_imm(u32 imm) 178 { 179 /* Either imm12 or shifted imm12. */ 180 return !(imm & ~0xfff) || !(imm & ~0xfff000); 181 } 182 183 /* Tail call offset to jump into */ 184 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) 185 #define PROLOGUE_OFFSET 8 186 #else 187 #define PROLOGUE_OFFSET 7 188 #endif 189 190 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) 191 { 192 const struct bpf_prog *prog = ctx->prog; 193 const u8 r6 = bpf2a64[BPF_REG_6]; 194 const u8 r7 = bpf2a64[BPF_REG_7]; 195 const u8 r8 = bpf2a64[BPF_REG_8]; 196 const u8 r9 = bpf2a64[BPF_REG_9]; 197 const u8 fp = bpf2a64[BPF_REG_FP]; 198 const u8 tcc = bpf2a64[TCALL_CNT]; 199 const int idx0 = ctx->idx; 200 int cur_offset; 201 202 /* 203 * BPF prog stack layout 204 * 205 * high 206 * original A64_SP => 0:+-----+ BPF prologue 207 * |FP/LR| 208 * current A64_FP => -16:+-----+ 209 * | ... | callee saved registers 210 * BPF fp register => -64:+-----+ <= (BPF_FP) 211 * | | 212 * | ... | BPF prog stack 213 * | | 214 * +-----+ <= (BPF_FP - prog->aux->stack_depth) 215 * |RSVD | padding 216 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size) 217 * | | 218 * | ... | Function call stack 219 * | | 220 * +-----+ 221 * low 222 * 223 */ 224 225 /* BTI landing pad */ 226 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 227 emit(A64_BTI_C, ctx); 228 229 /* Save FP and LR registers to stay align with ARM64 AAPCS */ 230 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 231 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 232 233 /* Save callee-saved registers */ 234 emit(A64_PUSH(r6, r7, A64_SP), ctx); 235 emit(A64_PUSH(r8, r9, A64_SP), ctx); 236 emit(A64_PUSH(fp, tcc, A64_SP), ctx); 237 238 /* Set up BPF prog stack base register */ 239 emit(A64_MOV(1, fp, A64_SP), ctx); 240 241 if (!ebpf_from_cbpf) { 242 /* Initialize tail_call_cnt */ 243 emit(A64_MOVZ(1, tcc, 0, 0), ctx); 244 245 cur_offset = ctx->idx - idx0; 246 if (cur_offset != PROLOGUE_OFFSET) { 247 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", 248 cur_offset, PROLOGUE_OFFSET); 249 return -1; 250 } 251 252 /* BTI landing pad for the tail call, done with a BR */ 253 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 254 emit(A64_BTI_J, ctx); 255 } 256 257 /* Stack must be multiples of 16B */ 258 ctx->stack_size = round_up(prog->aux->stack_depth, 16); 259 260 /* Set up function call stack */ 261 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 262 return 0; 263 } 264 265 static int out_offset = -1; /* initialized on the first pass of build_body() */ 266 static int emit_bpf_tail_call(struct jit_ctx *ctx) 267 { 268 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ 269 const u8 r2 = bpf2a64[BPF_REG_2]; 270 const u8 r3 = bpf2a64[BPF_REG_3]; 271 272 const u8 tmp = bpf2a64[TMP_REG_1]; 273 const u8 prg = bpf2a64[TMP_REG_2]; 274 const u8 tcc = bpf2a64[TCALL_CNT]; 275 const int idx0 = ctx->idx; 276 #define cur_offset (ctx->idx - idx0) 277 #define jmp_offset (out_offset - (cur_offset)) 278 size_t off; 279 280 /* if (index >= array->map.max_entries) 281 * goto out; 282 */ 283 off = offsetof(struct bpf_array, map.max_entries); 284 emit_a64_mov_i64(tmp, off, ctx); 285 emit(A64_LDR32(tmp, r2, tmp), ctx); 286 emit(A64_MOV(0, r3, r3), ctx); 287 emit(A64_CMP(0, r3, tmp), ctx); 288 emit(A64_B_(A64_COND_CS, jmp_offset), ctx); 289 290 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) 291 * goto out; 292 * tail_call_cnt++; 293 */ 294 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); 295 emit(A64_CMP(1, tcc, tmp), ctx); 296 emit(A64_B_(A64_COND_HI, jmp_offset), ctx); 297 emit(A64_ADD_I(1, tcc, tcc, 1), ctx); 298 299 /* prog = array->ptrs[index]; 300 * if (prog == NULL) 301 * goto out; 302 */ 303 off = offsetof(struct bpf_array, ptrs); 304 emit_a64_mov_i64(tmp, off, ctx); 305 emit(A64_ADD(1, tmp, r2, tmp), ctx); 306 emit(A64_LSL(1, prg, r3, 3), ctx); 307 emit(A64_LDR64(prg, tmp, prg), ctx); 308 emit(A64_CBZ(1, prg, jmp_offset), ctx); 309 310 /* goto *(prog->bpf_func + prologue_offset); */ 311 off = offsetof(struct bpf_prog, bpf_func); 312 emit_a64_mov_i64(tmp, off, ctx); 313 emit(A64_LDR64(tmp, prg, tmp), ctx); 314 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); 315 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 316 emit(A64_BR(tmp), ctx); 317 318 /* out: */ 319 if (out_offset == -1) 320 out_offset = cur_offset; 321 if (cur_offset != out_offset) { 322 pr_err_once("tail_call out_offset = %d, expected %d!\n", 323 cur_offset, out_offset); 324 return -1; 325 } 326 return 0; 327 #undef cur_offset 328 #undef jmp_offset 329 } 330 331 static void build_epilogue(struct jit_ctx *ctx) 332 { 333 const u8 r0 = bpf2a64[BPF_REG_0]; 334 const u8 r6 = bpf2a64[BPF_REG_6]; 335 const u8 r7 = bpf2a64[BPF_REG_7]; 336 const u8 r8 = bpf2a64[BPF_REG_8]; 337 const u8 r9 = bpf2a64[BPF_REG_9]; 338 const u8 fp = bpf2a64[BPF_REG_FP]; 339 340 /* We're done with BPF stack */ 341 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); 342 343 /* Restore fs (x25) and x26 */ 344 emit(A64_POP(fp, A64_R(26), A64_SP), ctx); 345 346 /* Restore callee-saved register */ 347 emit(A64_POP(r8, r9, A64_SP), ctx); 348 emit(A64_POP(r6, r7, A64_SP), ctx); 349 350 /* Restore FP/LR registers */ 351 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 352 353 /* Set return value */ 354 emit(A64_MOV(1, A64_R(0), r0), ctx); 355 356 emit(A64_RET(A64_LR), ctx); 357 } 358 359 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) 360 #define BPF_FIXUP_REG_MASK GENMASK(31, 27) 361 362 bool ex_handler_bpf(const struct exception_table_entry *ex, 363 struct pt_regs *regs) 364 { 365 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); 366 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); 367 368 regs->regs[dst_reg] = 0; 369 regs->pc = (unsigned long)&ex->fixup - offset; 370 return true; 371 } 372 373 /* For accesses to BTF pointers, add an entry to the exception table */ 374 static int add_exception_handler(const struct bpf_insn *insn, 375 struct jit_ctx *ctx, 376 int dst_reg) 377 { 378 off_t offset; 379 unsigned long pc; 380 struct exception_table_entry *ex; 381 382 if (!ctx->image) 383 /* First pass */ 384 return 0; 385 386 if (BPF_MODE(insn->code) != BPF_PROBE_MEM) 387 return 0; 388 389 if (!ctx->prog->aux->extable || 390 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) 391 return -EINVAL; 392 393 ex = &ctx->prog->aux->extable[ctx->exentry_idx]; 394 pc = (unsigned long)&ctx->image[ctx->idx - 1]; 395 396 offset = pc - (long)&ex->insn; 397 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) 398 return -ERANGE; 399 ex->insn = offset; 400 401 /* 402 * Since the extable follows the program, the fixup offset is always 403 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value 404 * to keep things simple, and put the destination register in the upper 405 * bits. We don't need to worry about buildtime or runtime sort 406 * modifying the upper bits because the table is already sorted, and 407 * isn't part of the main exception table. 408 */ 409 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE); 410 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) 411 return -ERANGE; 412 413 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | 414 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); 415 416 ex->type = EX_TYPE_BPF; 417 418 ctx->exentry_idx++; 419 return 0; 420 } 421 422 /* JITs an eBPF instruction. 423 * Returns: 424 * 0 - successfully JITed an 8-byte eBPF instruction. 425 * >0 - successfully JITed a 16-byte eBPF instruction. 426 * <0 - failed to JIT. 427 */ 428 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 429 bool extra_pass) 430 { 431 const u8 code = insn->code; 432 const u8 dst = bpf2a64[insn->dst_reg]; 433 const u8 src = bpf2a64[insn->src_reg]; 434 const u8 tmp = bpf2a64[TMP_REG_1]; 435 const u8 tmp2 = bpf2a64[TMP_REG_2]; 436 const u8 tmp3 = bpf2a64[TMP_REG_3]; 437 const s16 off = insn->off; 438 const s32 imm = insn->imm; 439 const int i = insn - ctx->prog->insnsi; 440 const bool is64 = BPF_CLASS(code) == BPF_ALU64 || 441 BPF_CLASS(code) == BPF_JMP; 442 const bool isdw = BPF_SIZE(code) == BPF_DW; 443 u8 jmp_cond, reg; 444 s32 jmp_offset; 445 u32 a64_insn; 446 int ret; 447 448 #define check_imm(bits, imm) do { \ 449 if ((((imm) > 0) && ((imm) >> (bits))) || \ 450 (((imm) < 0) && (~(imm) >> (bits)))) { \ 451 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 452 i, imm, imm); \ 453 return -EINVAL; \ 454 } \ 455 } while (0) 456 #define check_imm19(imm) check_imm(19, imm) 457 #define check_imm26(imm) check_imm(26, imm) 458 459 switch (code) { 460 /* dst = src */ 461 case BPF_ALU | BPF_MOV | BPF_X: 462 case BPF_ALU64 | BPF_MOV | BPF_X: 463 emit(A64_MOV(is64, dst, src), ctx); 464 break; 465 /* dst = dst OP src */ 466 case BPF_ALU | BPF_ADD | BPF_X: 467 case BPF_ALU64 | BPF_ADD | BPF_X: 468 emit(A64_ADD(is64, dst, dst, src), ctx); 469 break; 470 case BPF_ALU | BPF_SUB | BPF_X: 471 case BPF_ALU64 | BPF_SUB | BPF_X: 472 emit(A64_SUB(is64, dst, dst, src), ctx); 473 break; 474 case BPF_ALU | BPF_AND | BPF_X: 475 case BPF_ALU64 | BPF_AND | BPF_X: 476 emit(A64_AND(is64, dst, dst, src), ctx); 477 break; 478 case BPF_ALU | BPF_OR | BPF_X: 479 case BPF_ALU64 | BPF_OR | BPF_X: 480 emit(A64_ORR(is64, dst, dst, src), ctx); 481 break; 482 case BPF_ALU | BPF_XOR | BPF_X: 483 case BPF_ALU64 | BPF_XOR | BPF_X: 484 emit(A64_EOR(is64, dst, dst, src), ctx); 485 break; 486 case BPF_ALU | BPF_MUL | BPF_X: 487 case BPF_ALU64 | BPF_MUL | BPF_X: 488 emit(A64_MUL(is64, dst, dst, src), ctx); 489 break; 490 case BPF_ALU | BPF_DIV | BPF_X: 491 case BPF_ALU64 | BPF_DIV | BPF_X: 492 emit(A64_UDIV(is64, dst, dst, src), ctx); 493 break; 494 case BPF_ALU | BPF_MOD | BPF_X: 495 case BPF_ALU64 | BPF_MOD | BPF_X: 496 emit(A64_UDIV(is64, tmp, dst, src), ctx); 497 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx); 498 break; 499 case BPF_ALU | BPF_LSH | BPF_X: 500 case BPF_ALU64 | BPF_LSH | BPF_X: 501 emit(A64_LSLV(is64, dst, dst, src), ctx); 502 break; 503 case BPF_ALU | BPF_RSH | BPF_X: 504 case BPF_ALU64 | BPF_RSH | BPF_X: 505 emit(A64_LSRV(is64, dst, dst, src), ctx); 506 break; 507 case BPF_ALU | BPF_ARSH | BPF_X: 508 case BPF_ALU64 | BPF_ARSH | BPF_X: 509 emit(A64_ASRV(is64, dst, dst, src), ctx); 510 break; 511 /* dst = -dst */ 512 case BPF_ALU | BPF_NEG: 513 case BPF_ALU64 | BPF_NEG: 514 emit(A64_NEG(is64, dst, dst), ctx); 515 break; 516 /* dst = BSWAP##imm(dst) */ 517 case BPF_ALU | BPF_END | BPF_FROM_LE: 518 case BPF_ALU | BPF_END | BPF_FROM_BE: 519 #ifdef CONFIG_CPU_BIG_ENDIAN 520 if (BPF_SRC(code) == BPF_FROM_BE) 521 goto emit_bswap_uxt; 522 #else /* !CONFIG_CPU_BIG_ENDIAN */ 523 if (BPF_SRC(code) == BPF_FROM_LE) 524 goto emit_bswap_uxt; 525 #endif 526 switch (imm) { 527 case 16: 528 emit(A64_REV16(is64, dst, dst), ctx); 529 /* zero-extend 16 bits into 64 bits */ 530 emit(A64_UXTH(is64, dst, dst), ctx); 531 break; 532 case 32: 533 emit(A64_REV32(is64, dst, dst), ctx); 534 /* upper 32 bits already cleared */ 535 break; 536 case 64: 537 emit(A64_REV64(dst, dst), ctx); 538 break; 539 } 540 break; 541 emit_bswap_uxt: 542 switch (imm) { 543 case 16: 544 /* zero-extend 16 bits into 64 bits */ 545 emit(A64_UXTH(is64, dst, dst), ctx); 546 break; 547 case 32: 548 /* zero-extend 32 bits into 64 bits */ 549 emit(A64_UXTW(is64, dst, dst), ctx); 550 break; 551 case 64: 552 /* nop */ 553 break; 554 } 555 break; 556 /* dst = imm */ 557 case BPF_ALU | BPF_MOV | BPF_K: 558 case BPF_ALU64 | BPF_MOV | BPF_K: 559 emit_a64_mov_i(is64, dst, imm, ctx); 560 break; 561 /* dst = dst OP imm */ 562 case BPF_ALU | BPF_ADD | BPF_K: 563 case BPF_ALU64 | BPF_ADD | BPF_K: 564 if (is_addsub_imm(imm)) { 565 emit(A64_ADD_I(is64, dst, dst, imm), ctx); 566 } else if (is_addsub_imm(-imm)) { 567 emit(A64_SUB_I(is64, dst, dst, -imm), ctx); 568 } else { 569 emit_a64_mov_i(is64, tmp, imm, ctx); 570 emit(A64_ADD(is64, dst, dst, tmp), ctx); 571 } 572 break; 573 case BPF_ALU | BPF_SUB | BPF_K: 574 case BPF_ALU64 | BPF_SUB | BPF_K: 575 if (is_addsub_imm(imm)) { 576 emit(A64_SUB_I(is64, dst, dst, imm), ctx); 577 } else if (is_addsub_imm(-imm)) { 578 emit(A64_ADD_I(is64, dst, dst, -imm), ctx); 579 } else { 580 emit_a64_mov_i(is64, tmp, imm, ctx); 581 emit(A64_SUB(is64, dst, dst, tmp), ctx); 582 } 583 break; 584 case BPF_ALU | BPF_AND | BPF_K: 585 case BPF_ALU64 | BPF_AND | BPF_K: 586 a64_insn = A64_AND_I(is64, dst, dst, imm); 587 if (a64_insn != AARCH64_BREAK_FAULT) { 588 emit(a64_insn, ctx); 589 } else { 590 emit_a64_mov_i(is64, tmp, imm, ctx); 591 emit(A64_AND(is64, dst, dst, tmp), ctx); 592 } 593 break; 594 case BPF_ALU | BPF_OR | BPF_K: 595 case BPF_ALU64 | BPF_OR | BPF_K: 596 a64_insn = A64_ORR_I(is64, dst, dst, imm); 597 if (a64_insn != AARCH64_BREAK_FAULT) { 598 emit(a64_insn, ctx); 599 } else { 600 emit_a64_mov_i(is64, tmp, imm, ctx); 601 emit(A64_ORR(is64, dst, dst, tmp), ctx); 602 } 603 break; 604 case BPF_ALU | BPF_XOR | BPF_K: 605 case BPF_ALU64 | BPF_XOR | BPF_K: 606 a64_insn = A64_EOR_I(is64, dst, dst, imm); 607 if (a64_insn != AARCH64_BREAK_FAULT) { 608 emit(a64_insn, ctx); 609 } else { 610 emit_a64_mov_i(is64, tmp, imm, ctx); 611 emit(A64_EOR(is64, dst, dst, tmp), ctx); 612 } 613 break; 614 case BPF_ALU | BPF_MUL | BPF_K: 615 case BPF_ALU64 | BPF_MUL | BPF_K: 616 emit_a64_mov_i(is64, tmp, imm, ctx); 617 emit(A64_MUL(is64, dst, dst, tmp), ctx); 618 break; 619 case BPF_ALU | BPF_DIV | BPF_K: 620 case BPF_ALU64 | BPF_DIV | BPF_K: 621 emit_a64_mov_i(is64, tmp, imm, ctx); 622 emit(A64_UDIV(is64, dst, dst, tmp), ctx); 623 break; 624 case BPF_ALU | BPF_MOD | BPF_K: 625 case BPF_ALU64 | BPF_MOD | BPF_K: 626 emit_a64_mov_i(is64, tmp2, imm, ctx); 627 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 628 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx); 629 break; 630 case BPF_ALU | BPF_LSH | BPF_K: 631 case BPF_ALU64 | BPF_LSH | BPF_K: 632 emit(A64_LSL(is64, dst, dst, imm), ctx); 633 break; 634 case BPF_ALU | BPF_RSH | BPF_K: 635 case BPF_ALU64 | BPF_RSH | BPF_K: 636 emit(A64_LSR(is64, dst, dst, imm), ctx); 637 break; 638 case BPF_ALU | BPF_ARSH | BPF_K: 639 case BPF_ALU64 | BPF_ARSH | BPF_K: 640 emit(A64_ASR(is64, dst, dst, imm), ctx); 641 break; 642 643 /* JUMP off */ 644 case BPF_JMP | BPF_JA: 645 jmp_offset = bpf2a64_offset(i, off, ctx); 646 check_imm26(jmp_offset); 647 emit(A64_B(jmp_offset), ctx); 648 break; 649 /* IF (dst COND src) JUMP off */ 650 case BPF_JMP | BPF_JEQ | BPF_X: 651 case BPF_JMP | BPF_JGT | BPF_X: 652 case BPF_JMP | BPF_JLT | BPF_X: 653 case BPF_JMP | BPF_JGE | BPF_X: 654 case BPF_JMP | BPF_JLE | BPF_X: 655 case BPF_JMP | BPF_JNE | BPF_X: 656 case BPF_JMP | BPF_JSGT | BPF_X: 657 case BPF_JMP | BPF_JSLT | BPF_X: 658 case BPF_JMP | BPF_JSGE | BPF_X: 659 case BPF_JMP | BPF_JSLE | BPF_X: 660 case BPF_JMP32 | BPF_JEQ | BPF_X: 661 case BPF_JMP32 | BPF_JGT | BPF_X: 662 case BPF_JMP32 | BPF_JLT | BPF_X: 663 case BPF_JMP32 | BPF_JGE | BPF_X: 664 case BPF_JMP32 | BPF_JLE | BPF_X: 665 case BPF_JMP32 | BPF_JNE | BPF_X: 666 case BPF_JMP32 | BPF_JSGT | BPF_X: 667 case BPF_JMP32 | BPF_JSLT | BPF_X: 668 case BPF_JMP32 | BPF_JSGE | BPF_X: 669 case BPF_JMP32 | BPF_JSLE | BPF_X: 670 emit(A64_CMP(is64, dst, src), ctx); 671 emit_cond_jmp: 672 jmp_offset = bpf2a64_offset(i, off, ctx); 673 check_imm19(jmp_offset); 674 switch (BPF_OP(code)) { 675 case BPF_JEQ: 676 jmp_cond = A64_COND_EQ; 677 break; 678 case BPF_JGT: 679 jmp_cond = A64_COND_HI; 680 break; 681 case BPF_JLT: 682 jmp_cond = A64_COND_CC; 683 break; 684 case BPF_JGE: 685 jmp_cond = A64_COND_CS; 686 break; 687 case BPF_JLE: 688 jmp_cond = A64_COND_LS; 689 break; 690 case BPF_JSET: 691 case BPF_JNE: 692 jmp_cond = A64_COND_NE; 693 break; 694 case BPF_JSGT: 695 jmp_cond = A64_COND_GT; 696 break; 697 case BPF_JSLT: 698 jmp_cond = A64_COND_LT; 699 break; 700 case BPF_JSGE: 701 jmp_cond = A64_COND_GE; 702 break; 703 case BPF_JSLE: 704 jmp_cond = A64_COND_LE; 705 break; 706 default: 707 return -EFAULT; 708 } 709 emit(A64_B_(jmp_cond, jmp_offset), ctx); 710 break; 711 case BPF_JMP | BPF_JSET | BPF_X: 712 case BPF_JMP32 | BPF_JSET | BPF_X: 713 emit(A64_TST(is64, dst, src), ctx); 714 goto emit_cond_jmp; 715 /* IF (dst COND imm) JUMP off */ 716 case BPF_JMP | BPF_JEQ | BPF_K: 717 case BPF_JMP | BPF_JGT | BPF_K: 718 case BPF_JMP | BPF_JLT | BPF_K: 719 case BPF_JMP | BPF_JGE | BPF_K: 720 case BPF_JMP | BPF_JLE | BPF_K: 721 case BPF_JMP | BPF_JNE | BPF_K: 722 case BPF_JMP | BPF_JSGT | BPF_K: 723 case BPF_JMP | BPF_JSLT | BPF_K: 724 case BPF_JMP | BPF_JSGE | BPF_K: 725 case BPF_JMP | BPF_JSLE | BPF_K: 726 case BPF_JMP32 | BPF_JEQ | BPF_K: 727 case BPF_JMP32 | BPF_JGT | BPF_K: 728 case BPF_JMP32 | BPF_JLT | BPF_K: 729 case BPF_JMP32 | BPF_JGE | BPF_K: 730 case BPF_JMP32 | BPF_JLE | BPF_K: 731 case BPF_JMP32 | BPF_JNE | BPF_K: 732 case BPF_JMP32 | BPF_JSGT | BPF_K: 733 case BPF_JMP32 | BPF_JSLT | BPF_K: 734 case BPF_JMP32 | BPF_JSGE | BPF_K: 735 case BPF_JMP32 | BPF_JSLE | BPF_K: 736 if (is_addsub_imm(imm)) { 737 emit(A64_CMP_I(is64, dst, imm), ctx); 738 } else if (is_addsub_imm(-imm)) { 739 emit(A64_CMN_I(is64, dst, -imm), ctx); 740 } else { 741 emit_a64_mov_i(is64, tmp, imm, ctx); 742 emit(A64_CMP(is64, dst, tmp), ctx); 743 } 744 goto emit_cond_jmp; 745 case BPF_JMP | BPF_JSET | BPF_K: 746 case BPF_JMP32 | BPF_JSET | BPF_K: 747 a64_insn = A64_TST_I(is64, dst, imm); 748 if (a64_insn != AARCH64_BREAK_FAULT) { 749 emit(a64_insn, ctx); 750 } else { 751 emit_a64_mov_i(is64, tmp, imm, ctx); 752 emit(A64_TST(is64, dst, tmp), ctx); 753 } 754 goto emit_cond_jmp; 755 /* function call */ 756 case BPF_JMP | BPF_CALL: 757 { 758 const u8 r0 = bpf2a64[BPF_REG_0]; 759 bool func_addr_fixed; 760 u64 func_addr; 761 762 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, 763 &func_addr, &func_addr_fixed); 764 if (ret < 0) 765 return ret; 766 emit_addr_mov_i64(tmp, func_addr, ctx); 767 emit(A64_BLR(tmp), ctx); 768 emit(A64_MOV(1, r0, A64_R(0)), ctx); 769 break; 770 } 771 /* tail call */ 772 case BPF_JMP | BPF_TAIL_CALL: 773 if (emit_bpf_tail_call(ctx)) 774 return -EFAULT; 775 break; 776 /* function return */ 777 case BPF_JMP | BPF_EXIT: 778 /* Optimization: when last instruction is EXIT, 779 simply fallthrough to epilogue. */ 780 if (i == ctx->prog->len - 1) 781 break; 782 jmp_offset = epilogue_offset(ctx); 783 check_imm26(jmp_offset); 784 emit(A64_B(jmp_offset), ctx); 785 break; 786 787 /* dst = imm64 */ 788 case BPF_LD | BPF_IMM | BPF_DW: 789 { 790 const struct bpf_insn insn1 = insn[1]; 791 u64 imm64; 792 793 imm64 = (u64)insn1.imm << 32 | (u32)imm; 794 emit_a64_mov_i64(dst, imm64, ctx); 795 796 return 1; 797 } 798 799 /* LDX: dst = *(size *)(src + off) */ 800 case BPF_LDX | BPF_MEM | BPF_W: 801 case BPF_LDX | BPF_MEM | BPF_H: 802 case BPF_LDX | BPF_MEM | BPF_B: 803 case BPF_LDX | BPF_MEM | BPF_DW: 804 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 805 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 806 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 807 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 808 emit_a64_mov_i(1, tmp, off, ctx); 809 switch (BPF_SIZE(code)) { 810 case BPF_W: 811 emit(A64_LDR32(dst, src, tmp), ctx); 812 break; 813 case BPF_H: 814 emit(A64_LDRH(dst, src, tmp), ctx); 815 break; 816 case BPF_B: 817 emit(A64_LDRB(dst, src, tmp), ctx); 818 break; 819 case BPF_DW: 820 emit(A64_LDR64(dst, src, tmp), ctx); 821 break; 822 } 823 824 ret = add_exception_handler(insn, ctx, dst); 825 if (ret) 826 return ret; 827 break; 828 829 /* speculation barrier */ 830 case BPF_ST | BPF_NOSPEC: 831 /* 832 * Nothing required here. 833 * 834 * In case of arm64, we rely on the firmware mitigation of 835 * Speculative Store Bypass as controlled via the ssbd kernel 836 * parameter. Whenever the mitigation is enabled, it works 837 * for all of the kernel code with no need to provide any 838 * additional instructions. 839 */ 840 break; 841 842 /* ST: *(size *)(dst + off) = imm */ 843 case BPF_ST | BPF_MEM | BPF_W: 844 case BPF_ST | BPF_MEM | BPF_H: 845 case BPF_ST | BPF_MEM | BPF_B: 846 case BPF_ST | BPF_MEM | BPF_DW: 847 /* Load imm to a register then store it */ 848 emit_a64_mov_i(1, tmp2, off, ctx); 849 emit_a64_mov_i(1, tmp, imm, ctx); 850 switch (BPF_SIZE(code)) { 851 case BPF_W: 852 emit(A64_STR32(tmp, dst, tmp2), ctx); 853 break; 854 case BPF_H: 855 emit(A64_STRH(tmp, dst, tmp2), ctx); 856 break; 857 case BPF_B: 858 emit(A64_STRB(tmp, dst, tmp2), ctx); 859 break; 860 case BPF_DW: 861 emit(A64_STR64(tmp, dst, tmp2), ctx); 862 break; 863 } 864 break; 865 866 /* STX: *(size *)(dst + off) = src */ 867 case BPF_STX | BPF_MEM | BPF_W: 868 case BPF_STX | BPF_MEM | BPF_H: 869 case BPF_STX | BPF_MEM | BPF_B: 870 case BPF_STX | BPF_MEM | BPF_DW: 871 emit_a64_mov_i(1, tmp, off, ctx); 872 switch (BPF_SIZE(code)) { 873 case BPF_W: 874 emit(A64_STR32(src, dst, tmp), ctx); 875 break; 876 case BPF_H: 877 emit(A64_STRH(src, dst, tmp), ctx); 878 break; 879 case BPF_B: 880 emit(A64_STRB(src, dst, tmp), ctx); 881 break; 882 case BPF_DW: 883 emit(A64_STR64(src, dst, tmp), ctx); 884 break; 885 } 886 break; 887 888 case BPF_STX | BPF_ATOMIC | BPF_W: 889 case BPF_STX | BPF_ATOMIC | BPF_DW: 890 if (insn->imm != BPF_ADD) { 891 pr_err_once("unknown atomic op code %02x\n", insn->imm); 892 return -EINVAL; 893 } 894 895 /* STX XADD: lock *(u32 *)(dst + off) += src 896 * and 897 * STX XADD: lock *(u64 *)(dst + off) += src 898 */ 899 900 if (!off) { 901 reg = dst; 902 } else { 903 emit_a64_mov_i(1, tmp, off, ctx); 904 emit(A64_ADD(1, tmp, tmp, dst), ctx); 905 reg = tmp; 906 } 907 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) { 908 emit(A64_STADD(isdw, reg, src), ctx); 909 } else { 910 emit(A64_LDXR(isdw, tmp2, reg), ctx); 911 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); 912 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx); 913 jmp_offset = -3; 914 check_imm19(jmp_offset); 915 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 916 } 917 break; 918 919 default: 920 pr_err_once("unknown opcode %02x\n", code); 921 return -EINVAL; 922 } 923 924 return 0; 925 } 926 927 static int build_body(struct jit_ctx *ctx, bool extra_pass) 928 { 929 const struct bpf_prog *prog = ctx->prog; 930 int i; 931 932 /* 933 * - offset[0] offset of the end of prologue, 934 * start of the 1st instruction. 935 * - offset[1] - offset of the end of 1st instruction, 936 * start of the 2nd instruction 937 * [....] 938 * - offset[3] - offset of the end of 3rd instruction, 939 * start of 4th instruction 940 */ 941 for (i = 0; i < prog->len; i++) { 942 const struct bpf_insn *insn = &prog->insnsi[i]; 943 int ret; 944 945 if (ctx->image == NULL) 946 ctx->offset[i] = ctx->idx; 947 ret = build_insn(insn, ctx, extra_pass); 948 if (ret > 0) { 949 i++; 950 if (ctx->image == NULL) 951 ctx->offset[i] = ctx->idx; 952 continue; 953 } 954 if (ret) 955 return ret; 956 } 957 /* 958 * offset is allocated with prog->len + 1 so fill in 959 * the last element with the offset after the last 960 * instruction (end of program) 961 */ 962 if (ctx->image == NULL) 963 ctx->offset[i] = ctx->idx; 964 965 return 0; 966 } 967 968 static int validate_code(struct jit_ctx *ctx) 969 { 970 int i; 971 972 for (i = 0; i < ctx->idx; i++) { 973 u32 a64_insn = le32_to_cpu(ctx->image[i]); 974 975 if (a64_insn == AARCH64_BREAK_FAULT) 976 return -1; 977 } 978 979 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) 980 return -1; 981 982 return 0; 983 } 984 985 static inline void bpf_flush_icache(void *start, void *end) 986 { 987 flush_icache_range((unsigned long)start, (unsigned long)end); 988 } 989 990 struct arm64_jit_data { 991 struct bpf_binary_header *header; 992 u8 *image; 993 struct jit_ctx ctx; 994 }; 995 996 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 997 { 998 int image_size, prog_size, extable_size; 999 struct bpf_prog *tmp, *orig_prog = prog; 1000 struct bpf_binary_header *header; 1001 struct arm64_jit_data *jit_data; 1002 bool was_classic = bpf_prog_was_classic(prog); 1003 bool tmp_blinded = false; 1004 bool extra_pass = false; 1005 struct jit_ctx ctx; 1006 u8 *image_ptr; 1007 1008 if (!prog->jit_requested) 1009 return orig_prog; 1010 1011 tmp = bpf_jit_blind_constants(prog); 1012 /* If blinding was requested and we failed during blinding, 1013 * we must fall back to the interpreter. 1014 */ 1015 if (IS_ERR(tmp)) 1016 return orig_prog; 1017 if (tmp != prog) { 1018 tmp_blinded = true; 1019 prog = tmp; 1020 } 1021 1022 jit_data = prog->aux->jit_data; 1023 if (!jit_data) { 1024 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 1025 if (!jit_data) { 1026 prog = orig_prog; 1027 goto out; 1028 } 1029 prog->aux->jit_data = jit_data; 1030 } 1031 if (jit_data->ctx.offset) { 1032 ctx = jit_data->ctx; 1033 image_ptr = jit_data->image; 1034 header = jit_data->header; 1035 extra_pass = true; 1036 prog_size = sizeof(u32) * ctx.idx; 1037 goto skip_init_ctx; 1038 } 1039 memset(&ctx, 0, sizeof(ctx)); 1040 ctx.prog = prog; 1041 1042 ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); 1043 if (ctx.offset == NULL) { 1044 prog = orig_prog; 1045 goto out_off; 1046 } 1047 1048 /* 1. Initial fake pass to compute ctx->idx. */ 1049 1050 /* Fake pass to fill in ctx->offset. */ 1051 if (build_body(&ctx, extra_pass)) { 1052 prog = orig_prog; 1053 goto out_off; 1054 } 1055 1056 if (build_prologue(&ctx, was_classic)) { 1057 prog = orig_prog; 1058 goto out_off; 1059 } 1060 1061 ctx.epilogue_offset = ctx.idx; 1062 build_epilogue(&ctx); 1063 1064 extable_size = prog->aux->num_exentries * 1065 sizeof(struct exception_table_entry); 1066 1067 /* Now we know the actual image size. */ 1068 prog_size = sizeof(u32) * ctx.idx; 1069 image_size = prog_size + extable_size; 1070 header = bpf_jit_binary_alloc(image_size, &image_ptr, 1071 sizeof(u32), jit_fill_hole); 1072 if (header == NULL) { 1073 prog = orig_prog; 1074 goto out_off; 1075 } 1076 1077 /* 2. Now, the actual pass. */ 1078 1079 ctx.image = (__le32 *)image_ptr; 1080 if (extable_size) 1081 prog->aux->extable = (void *)image_ptr + prog_size; 1082 skip_init_ctx: 1083 ctx.idx = 0; 1084 ctx.exentry_idx = 0; 1085 1086 build_prologue(&ctx, was_classic); 1087 1088 if (build_body(&ctx, extra_pass)) { 1089 bpf_jit_binary_free(header); 1090 prog = orig_prog; 1091 goto out_off; 1092 } 1093 1094 build_epilogue(&ctx); 1095 1096 /* 3. Extra pass to validate JITed code. */ 1097 if (validate_code(&ctx)) { 1098 bpf_jit_binary_free(header); 1099 prog = orig_prog; 1100 goto out_off; 1101 } 1102 1103 /* And we're done. */ 1104 if (bpf_jit_enable > 1) 1105 bpf_jit_dump(prog->len, prog_size, 2, ctx.image); 1106 1107 bpf_flush_icache(header, ctx.image + ctx.idx); 1108 1109 if (!prog->is_func || extra_pass) { 1110 if (extra_pass && ctx.idx != jit_data->ctx.idx) { 1111 pr_err_once("multi-func JIT bug %d != %d\n", 1112 ctx.idx, jit_data->ctx.idx); 1113 bpf_jit_binary_free(header); 1114 prog->bpf_func = NULL; 1115 prog->jited = 0; 1116 goto out_off; 1117 } 1118 bpf_jit_binary_lock_ro(header); 1119 } else { 1120 jit_data->ctx = ctx; 1121 jit_data->image = image_ptr; 1122 jit_data->header = header; 1123 } 1124 prog->bpf_func = (void *)ctx.image; 1125 prog->jited = 1; 1126 prog->jited_len = prog_size; 1127 1128 if (!prog->is_func || extra_pass) { 1129 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); 1130 out_off: 1131 kfree(ctx.offset); 1132 kfree(jit_data); 1133 prog->aux->jit_data = NULL; 1134 } 1135 out: 1136 if (tmp_blinded) 1137 bpf_jit_prog_release_other(prog, prog == orig_prog ? 1138 tmp : orig_prog); 1139 return prog; 1140 } 1141 1142 u64 bpf_jit_alloc_exec_limit(void) 1143 { 1144 return BPF_JIT_REGION_SIZE; 1145 } 1146 1147 void *bpf_jit_alloc_exec(unsigned long size) 1148 { 1149 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, 1150 BPF_JIT_REGION_END, GFP_KERNEL, 1151 PAGE_KERNEL, 0, NUMA_NO_NODE, 1152 __builtin_return_address(0)); 1153 } 1154 1155 void bpf_jit_free_exec(void *addr) 1156 { 1157 return vfree(addr); 1158 } 1159