1 /* 2 * BPF JIT compiler for ARM64 3 * 4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "bpf_jit: " fmt 20 21 #include <linux/filter.h> 22 #include <linux/printk.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 26 #include <asm/byteorder.h> 27 #include <asm/cacheflush.h> 28 #include <asm/debug-monitors.h> 29 30 #include "bpf_jit.h" 31 32 int bpf_jit_enable __read_mostly; 33 34 #define TMP_REG_1 (MAX_BPF_REG + 0) 35 #define TMP_REG_2 (MAX_BPF_REG + 1) 36 37 /* Map BPF registers to A64 registers */ 38 static const int bpf2a64[] = { 39 /* return value from in-kernel function, and exit value from eBPF */ 40 [BPF_REG_0] = A64_R(7), 41 /* arguments from eBPF program to in-kernel function */ 42 [BPF_REG_1] = A64_R(0), 43 [BPF_REG_2] = A64_R(1), 44 [BPF_REG_3] = A64_R(2), 45 [BPF_REG_4] = A64_R(3), 46 [BPF_REG_5] = A64_R(4), 47 /* callee saved registers that in-kernel function will preserve */ 48 [BPF_REG_6] = A64_R(19), 49 [BPF_REG_7] = A64_R(20), 50 [BPF_REG_8] = A64_R(21), 51 [BPF_REG_9] = A64_R(22), 52 /* read-only frame pointer to access stack */ 53 [BPF_REG_FP] = A64_R(25), 54 /* temporary register for internal BPF JIT */ 55 [TMP_REG_1] = A64_R(23), 56 [TMP_REG_2] = A64_R(24), 57 }; 58 59 struct jit_ctx { 60 const struct bpf_prog *prog; 61 int idx; 62 int tmp_used; 63 int epilogue_offset; 64 int *offset; 65 u32 *image; 66 }; 67 68 static inline void emit(const u32 insn, struct jit_ctx *ctx) 69 { 70 if (ctx->image != NULL) 71 ctx->image[ctx->idx] = cpu_to_le32(insn); 72 73 ctx->idx++; 74 } 75 76 static inline void emit_a64_mov_i64(const int reg, const u64 val, 77 struct jit_ctx *ctx) 78 { 79 u64 tmp = val; 80 int shift = 0; 81 82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); 83 tmp >>= 16; 84 shift += 16; 85 while (tmp) { 86 if (tmp & 0xffff) 87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); 88 tmp >>= 16; 89 shift += 16; 90 } 91 } 92 93 static inline void emit_a64_mov_i(const int is64, const int reg, 94 const s32 val, struct jit_ctx *ctx) 95 { 96 u16 hi = val >> 16; 97 u16 lo = val & 0xffff; 98 99 if (hi & 0x8000) { 100 if (hi == 0xffff) { 101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); 102 } else { 103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); 104 emit(A64_MOVK(is64, reg, lo, 0), ctx); 105 } 106 } else { 107 emit(A64_MOVZ(is64, reg, lo, 0), ctx); 108 if (hi) 109 emit(A64_MOVK(is64, reg, hi, 16), ctx); 110 } 111 } 112 113 static inline int bpf2a64_offset(int bpf_to, int bpf_from, 114 const struct jit_ctx *ctx) 115 { 116 int to = ctx->offset[bpf_to]; 117 /* -1 to account for the Branch instruction */ 118 int from = ctx->offset[bpf_from] - 1; 119 120 return to - from; 121 } 122 123 static void jit_fill_hole(void *area, unsigned int size) 124 { 125 u32 *ptr; 126 /* We are guaranteed to have aligned memory. */ 127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 129 } 130 131 static inline int epilogue_offset(const struct jit_ctx *ctx) 132 { 133 int to = ctx->epilogue_offset; 134 int from = ctx->idx; 135 136 return to - from; 137 } 138 139 /* Stack must be multiples of 16B */ 140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15) 141 142 #define _STACK_SIZE \ 143 (MAX_BPF_STACK \ 144 + 4 /* extra for skb_copy_bits buffer */) 145 146 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE) 147 148 static void build_prologue(struct jit_ctx *ctx) 149 { 150 const u8 r6 = bpf2a64[BPF_REG_6]; 151 const u8 r7 = bpf2a64[BPF_REG_7]; 152 const u8 r8 = bpf2a64[BPF_REG_8]; 153 const u8 r9 = bpf2a64[BPF_REG_9]; 154 const u8 fp = bpf2a64[BPF_REG_FP]; 155 const u8 tmp1 = bpf2a64[TMP_REG_1]; 156 const u8 tmp2 = bpf2a64[TMP_REG_2]; 157 158 /* 159 * BPF prog stack layout 160 * 161 * high 162 * original A64_SP => 0:+-----+ BPF prologue 163 * |FP/LR| 164 * current A64_FP => -16:+-----+ 165 * | ... | callee saved registers 166 * +-----+ 167 * | | x25/x26 168 * BPF fp register => -80:+-----+ <= (BPF_FP) 169 * | | 170 * | ... | BPF prog stack 171 * | | 172 * +-----+ <= (BPF_FP - MAX_BPF_STACK) 173 * |RSVD | JIT scratchpad 174 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) 175 * | | 176 * | ... | Function call stack 177 * | | 178 * +-----+ 179 * low 180 * 181 */ 182 183 /* Save FP and LR registers to stay align with ARM64 AAPCS */ 184 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 185 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 186 187 /* Save callee-saved register */ 188 emit(A64_PUSH(r6, r7, A64_SP), ctx); 189 emit(A64_PUSH(r8, r9, A64_SP), ctx); 190 if (ctx->tmp_used) 191 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 192 193 /* Save fp (x25) and x26. SP requires 16 bytes alignment */ 194 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); 195 196 /* Set up BPF prog stack base register (x25) */ 197 emit(A64_MOV(1, fp, A64_SP), ctx); 198 199 /* Set up function call stack */ 200 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); 201 } 202 203 static void build_epilogue(struct jit_ctx *ctx) 204 { 205 const u8 r0 = bpf2a64[BPF_REG_0]; 206 const u8 r6 = bpf2a64[BPF_REG_6]; 207 const u8 r7 = bpf2a64[BPF_REG_7]; 208 const u8 r8 = bpf2a64[BPF_REG_8]; 209 const u8 r9 = bpf2a64[BPF_REG_9]; 210 const u8 fp = bpf2a64[BPF_REG_FP]; 211 const u8 tmp1 = bpf2a64[TMP_REG_1]; 212 const u8 tmp2 = bpf2a64[TMP_REG_2]; 213 214 /* We're done with BPF stack */ 215 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); 216 217 /* Restore fs (x25) and x26 */ 218 emit(A64_POP(fp, A64_R(26), A64_SP), ctx); 219 220 /* Restore callee-saved register */ 221 if (ctx->tmp_used) 222 emit(A64_POP(tmp1, tmp2, A64_SP), ctx); 223 emit(A64_POP(r8, r9, A64_SP), ctx); 224 emit(A64_POP(r6, r7, A64_SP), ctx); 225 226 /* Restore FP/LR registers */ 227 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 228 229 /* Set return value */ 230 emit(A64_MOV(1, A64_R(0), r0), ctx); 231 232 emit(A64_RET(A64_LR), ctx); 233 } 234 235 /* JITs an eBPF instruction. 236 * Returns: 237 * 0 - successfully JITed an 8-byte eBPF instruction. 238 * >0 - successfully JITed a 16-byte eBPF instruction. 239 * <0 - failed to JIT. 240 */ 241 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 242 { 243 const u8 code = insn->code; 244 const u8 dst = bpf2a64[insn->dst_reg]; 245 const u8 src = bpf2a64[insn->src_reg]; 246 const u8 tmp = bpf2a64[TMP_REG_1]; 247 const u8 tmp2 = bpf2a64[TMP_REG_2]; 248 const s16 off = insn->off; 249 const s32 imm = insn->imm; 250 const int i = insn - ctx->prog->insnsi; 251 const bool is64 = BPF_CLASS(code) == BPF_ALU64; 252 u8 jmp_cond; 253 s32 jmp_offset; 254 255 #define check_imm(bits, imm) do { \ 256 if ((((imm) > 0) && ((imm) >> (bits))) || \ 257 (((imm) < 0) && (~(imm) >> (bits)))) { \ 258 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 259 i, imm, imm); \ 260 return -EINVAL; \ 261 } \ 262 } while (0) 263 #define check_imm19(imm) check_imm(19, imm) 264 #define check_imm26(imm) check_imm(26, imm) 265 266 switch (code) { 267 /* dst = src */ 268 case BPF_ALU | BPF_MOV | BPF_X: 269 case BPF_ALU64 | BPF_MOV | BPF_X: 270 emit(A64_MOV(is64, dst, src), ctx); 271 break; 272 /* dst = dst OP src */ 273 case BPF_ALU | BPF_ADD | BPF_X: 274 case BPF_ALU64 | BPF_ADD | BPF_X: 275 emit(A64_ADD(is64, dst, dst, src), ctx); 276 break; 277 case BPF_ALU | BPF_SUB | BPF_X: 278 case BPF_ALU64 | BPF_SUB | BPF_X: 279 emit(A64_SUB(is64, dst, dst, src), ctx); 280 break; 281 case BPF_ALU | BPF_AND | BPF_X: 282 case BPF_ALU64 | BPF_AND | BPF_X: 283 emit(A64_AND(is64, dst, dst, src), ctx); 284 break; 285 case BPF_ALU | BPF_OR | BPF_X: 286 case BPF_ALU64 | BPF_OR | BPF_X: 287 emit(A64_ORR(is64, dst, dst, src), ctx); 288 break; 289 case BPF_ALU | BPF_XOR | BPF_X: 290 case BPF_ALU64 | BPF_XOR | BPF_X: 291 emit(A64_EOR(is64, dst, dst, src), ctx); 292 break; 293 case BPF_ALU | BPF_MUL | BPF_X: 294 case BPF_ALU64 | BPF_MUL | BPF_X: 295 emit(A64_MUL(is64, dst, dst, src), ctx); 296 break; 297 case BPF_ALU | BPF_DIV | BPF_X: 298 case BPF_ALU64 | BPF_DIV | BPF_X: 299 case BPF_ALU | BPF_MOD | BPF_X: 300 case BPF_ALU64 | BPF_MOD | BPF_X: 301 { 302 const u8 r0 = bpf2a64[BPF_REG_0]; 303 304 /* if (src == 0) return 0 */ 305 jmp_offset = 3; /* skip ahead to else path */ 306 check_imm19(jmp_offset); 307 emit(A64_CBNZ(is64, src, jmp_offset), ctx); 308 emit(A64_MOVZ(1, r0, 0, 0), ctx); 309 jmp_offset = epilogue_offset(ctx); 310 check_imm26(jmp_offset); 311 emit(A64_B(jmp_offset), ctx); 312 /* else */ 313 switch (BPF_OP(code)) { 314 case BPF_DIV: 315 emit(A64_UDIV(is64, dst, dst, src), ctx); 316 break; 317 case BPF_MOD: 318 ctx->tmp_used = 1; 319 emit(A64_UDIV(is64, tmp, dst, src), ctx); 320 emit(A64_MUL(is64, tmp, tmp, src), ctx); 321 emit(A64_SUB(is64, dst, dst, tmp), ctx); 322 break; 323 } 324 break; 325 } 326 case BPF_ALU | BPF_LSH | BPF_X: 327 case BPF_ALU64 | BPF_LSH | BPF_X: 328 emit(A64_LSLV(is64, dst, dst, src), ctx); 329 break; 330 case BPF_ALU | BPF_RSH | BPF_X: 331 case BPF_ALU64 | BPF_RSH | BPF_X: 332 emit(A64_LSRV(is64, dst, dst, src), ctx); 333 break; 334 case BPF_ALU | BPF_ARSH | BPF_X: 335 case BPF_ALU64 | BPF_ARSH | BPF_X: 336 emit(A64_ASRV(is64, dst, dst, src), ctx); 337 break; 338 /* dst = -dst */ 339 case BPF_ALU | BPF_NEG: 340 case BPF_ALU64 | BPF_NEG: 341 emit(A64_NEG(is64, dst, dst), ctx); 342 break; 343 /* dst = BSWAP##imm(dst) */ 344 case BPF_ALU | BPF_END | BPF_FROM_LE: 345 case BPF_ALU | BPF_END | BPF_FROM_BE: 346 #ifdef CONFIG_CPU_BIG_ENDIAN 347 if (BPF_SRC(code) == BPF_FROM_BE) 348 goto emit_bswap_uxt; 349 #else /* !CONFIG_CPU_BIG_ENDIAN */ 350 if (BPF_SRC(code) == BPF_FROM_LE) 351 goto emit_bswap_uxt; 352 #endif 353 switch (imm) { 354 case 16: 355 emit(A64_REV16(is64, dst, dst), ctx); 356 /* zero-extend 16 bits into 64 bits */ 357 emit(A64_UXTH(is64, dst, dst), ctx); 358 break; 359 case 32: 360 emit(A64_REV32(is64, dst, dst), ctx); 361 /* upper 32 bits already cleared */ 362 break; 363 case 64: 364 emit(A64_REV64(dst, dst), ctx); 365 break; 366 } 367 break; 368 emit_bswap_uxt: 369 switch (imm) { 370 case 16: 371 /* zero-extend 16 bits into 64 bits */ 372 emit(A64_UXTH(is64, dst, dst), ctx); 373 break; 374 case 32: 375 /* zero-extend 32 bits into 64 bits */ 376 emit(A64_UXTW(is64, dst, dst), ctx); 377 break; 378 case 64: 379 /* nop */ 380 break; 381 } 382 break; 383 /* dst = imm */ 384 case BPF_ALU | BPF_MOV | BPF_K: 385 case BPF_ALU64 | BPF_MOV | BPF_K: 386 emit_a64_mov_i(is64, dst, imm, ctx); 387 break; 388 /* dst = dst OP imm */ 389 case BPF_ALU | BPF_ADD | BPF_K: 390 case BPF_ALU64 | BPF_ADD | BPF_K: 391 ctx->tmp_used = 1; 392 emit_a64_mov_i(is64, tmp, imm, ctx); 393 emit(A64_ADD(is64, dst, dst, tmp), ctx); 394 break; 395 case BPF_ALU | BPF_SUB | BPF_K: 396 case BPF_ALU64 | BPF_SUB | BPF_K: 397 ctx->tmp_used = 1; 398 emit_a64_mov_i(is64, tmp, imm, ctx); 399 emit(A64_SUB(is64, dst, dst, tmp), ctx); 400 break; 401 case BPF_ALU | BPF_AND | BPF_K: 402 case BPF_ALU64 | BPF_AND | BPF_K: 403 ctx->tmp_used = 1; 404 emit_a64_mov_i(is64, tmp, imm, ctx); 405 emit(A64_AND(is64, dst, dst, tmp), ctx); 406 break; 407 case BPF_ALU | BPF_OR | BPF_K: 408 case BPF_ALU64 | BPF_OR | BPF_K: 409 ctx->tmp_used = 1; 410 emit_a64_mov_i(is64, tmp, imm, ctx); 411 emit(A64_ORR(is64, dst, dst, tmp), ctx); 412 break; 413 case BPF_ALU | BPF_XOR | BPF_K: 414 case BPF_ALU64 | BPF_XOR | BPF_K: 415 ctx->tmp_used = 1; 416 emit_a64_mov_i(is64, tmp, imm, ctx); 417 emit(A64_EOR(is64, dst, dst, tmp), ctx); 418 break; 419 case BPF_ALU | BPF_MUL | BPF_K: 420 case BPF_ALU64 | BPF_MUL | BPF_K: 421 ctx->tmp_used = 1; 422 emit_a64_mov_i(is64, tmp, imm, ctx); 423 emit(A64_MUL(is64, dst, dst, tmp), ctx); 424 break; 425 case BPF_ALU | BPF_DIV | BPF_K: 426 case BPF_ALU64 | BPF_DIV | BPF_K: 427 ctx->tmp_used = 1; 428 emit_a64_mov_i(is64, tmp, imm, ctx); 429 emit(A64_UDIV(is64, dst, dst, tmp), ctx); 430 break; 431 case BPF_ALU | BPF_MOD | BPF_K: 432 case BPF_ALU64 | BPF_MOD | BPF_K: 433 ctx->tmp_used = 1; 434 emit_a64_mov_i(is64, tmp2, imm, ctx); 435 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 436 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); 437 emit(A64_SUB(is64, dst, dst, tmp), ctx); 438 break; 439 case BPF_ALU | BPF_LSH | BPF_K: 440 case BPF_ALU64 | BPF_LSH | BPF_K: 441 emit(A64_LSL(is64, dst, dst, imm), ctx); 442 break; 443 case BPF_ALU | BPF_RSH | BPF_K: 444 case BPF_ALU64 | BPF_RSH | BPF_K: 445 emit(A64_LSR(is64, dst, dst, imm), ctx); 446 break; 447 case BPF_ALU | BPF_ARSH | BPF_K: 448 case BPF_ALU64 | BPF_ARSH | BPF_K: 449 emit(A64_ASR(is64, dst, dst, imm), ctx); 450 break; 451 452 /* JUMP off */ 453 case BPF_JMP | BPF_JA: 454 jmp_offset = bpf2a64_offset(i + off, i, ctx); 455 check_imm26(jmp_offset); 456 emit(A64_B(jmp_offset), ctx); 457 break; 458 /* IF (dst COND src) JUMP off */ 459 case BPF_JMP | BPF_JEQ | BPF_X: 460 case BPF_JMP | BPF_JGT | BPF_X: 461 case BPF_JMP | BPF_JGE | BPF_X: 462 case BPF_JMP | BPF_JNE | BPF_X: 463 case BPF_JMP | BPF_JSGT | BPF_X: 464 case BPF_JMP | BPF_JSGE | BPF_X: 465 emit(A64_CMP(1, dst, src), ctx); 466 emit_cond_jmp: 467 jmp_offset = bpf2a64_offset(i + off, i, ctx); 468 check_imm19(jmp_offset); 469 switch (BPF_OP(code)) { 470 case BPF_JEQ: 471 jmp_cond = A64_COND_EQ; 472 break; 473 case BPF_JGT: 474 jmp_cond = A64_COND_HI; 475 break; 476 case BPF_JGE: 477 jmp_cond = A64_COND_CS; 478 break; 479 case BPF_JNE: 480 jmp_cond = A64_COND_NE; 481 break; 482 case BPF_JSGT: 483 jmp_cond = A64_COND_GT; 484 break; 485 case BPF_JSGE: 486 jmp_cond = A64_COND_GE; 487 break; 488 default: 489 return -EFAULT; 490 } 491 emit(A64_B_(jmp_cond, jmp_offset), ctx); 492 break; 493 case BPF_JMP | BPF_JSET | BPF_X: 494 emit(A64_TST(1, dst, src), ctx); 495 goto emit_cond_jmp; 496 /* IF (dst COND imm) JUMP off */ 497 case BPF_JMP | BPF_JEQ | BPF_K: 498 case BPF_JMP | BPF_JGT | BPF_K: 499 case BPF_JMP | BPF_JGE | BPF_K: 500 case BPF_JMP | BPF_JNE | BPF_K: 501 case BPF_JMP | BPF_JSGT | BPF_K: 502 case BPF_JMP | BPF_JSGE | BPF_K: 503 ctx->tmp_used = 1; 504 emit_a64_mov_i(1, tmp, imm, ctx); 505 emit(A64_CMP(1, dst, tmp), ctx); 506 goto emit_cond_jmp; 507 case BPF_JMP | BPF_JSET | BPF_K: 508 ctx->tmp_used = 1; 509 emit_a64_mov_i(1, tmp, imm, ctx); 510 emit(A64_TST(1, dst, tmp), ctx); 511 goto emit_cond_jmp; 512 /* function call */ 513 case BPF_JMP | BPF_CALL: 514 { 515 const u8 r0 = bpf2a64[BPF_REG_0]; 516 const u64 func = (u64)__bpf_call_base + imm; 517 518 ctx->tmp_used = 1; 519 emit_a64_mov_i64(tmp, func, ctx); 520 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 521 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 522 emit(A64_BLR(tmp), ctx); 523 emit(A64_MOV(1, r0, A64_R(0)), ctx); 524 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 525 break; 526 } 527 /* function return */ 528 case BPF_JMP | BPF_EXIT: 529 /* Optimization: when last instruction is EXIT, 530 simply fallthrough to epilogue. */ 531 if (i == ctx->prog->len - 1) 532 break; 533 jmp_offset = epilogue_offset(ctx); 534 check_imm26(jmp_offset); 535 emit(A64_B(jmp_offset), ctx); 536 break; 537 538 /* dst = imm64 */ 539 case BPF_LD | BPF_IMM | BPF_DW: 540 { 541 const struct bpf_insn insn1 = insn[1]; 542 u64 imm64; 543 544 if (insn1.code != 0 || insn1.src_reg != 0 || 545 insn1.dst_reg != 0 || insn1.off != 0) { 546 /* Note: verifier in BPF core must catch invalid 547 * instructions. 548 */ 549 pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); 550 return -EINVAL; 551 } 552 553 imm64 = (u64)insn1.imm << 32 | (u32)imm; 554 emit_a64_mov_i64(dst, imm64, ctx); 555 556 return 1; 557 } 558 559 /* LDX: dst = *(size *)(src + off) */ 560 case BPF_LDX | BPF_MEM | BPF_W: 561 case BPF_LDX | BPF_MEM | BPF_H: 562 case BPF_LDX | BPF_MEM | BPF_B: 563 case BPF_LDX | BPF_MEM | BPF_DW: 564 ctx->tmp_used = 1; 565 emit_a64_mov_i(1, tmp, off, ctx); 566 switch (BPF_SIZE(code)) { 567 case BPF_W: 568 emit(A64_LDR32(dst, src, tmp), ctx); 569 break; 570 case BPF_H: 571 emit(A64_LDRH(dst, src, tmp), ctx); 572 break; 573 case BPF_B: 574 emit(A64_LDRB(dst, src, tmp), ctx); 575 break; 576 case BPF_DW: 577 emit(A64_LDR64(dst, src, tmp), ctx); 578 break; 579 } 580 break; 581 582 /* ST: *(size *)(dst + off) = imm */ 583 case BPF_ST | BPF_MEM | BPF_W: 584 case BPF_ST | BPF_MEM | BPF_H: 585 case BPF_ST | BPF_MEM | BPF_B: 586 case BPF_ST | BPF_MEM | BPF_DW: 587 /* Load imm to a register then store it */ 588 ctx->tmp_used = 1; 589 emit_a64_mov_i(1, tmp2, off, ctx); 590 emit_a64_mov_i(1, tmp, imm, ctx); 591 switch (BPF_SIZE(code)) { 592 case BPF_W: 593 emit(A64_STR32(tmp, dst, tmp2), ctx); 594 break; 595 case BPF_H: 596 emit(A64_STRH(tmp, dst, tmp2), ctx); 597 break; 598 case BPF_B: 599 emit(A64_STRB(tmp, dst, tmp2), ctx); 600 break; 601 case BPF_DW: 602 emit(A64_STR64(tmp, dst, tmp2), ctx); 603 break; 604 } 605 break; 606 607 /* STX: *(size *)(dst + off) = src */ 608 case BPF_STX | BPF_MEM | BPF_W: 609 case BPF_STX | BPF_MEM | BPF_H: 610 case BPF_STX | BPF_MEM | BPF_B: 611 case BPF_STX | BPF_MEM | BPF_DW: 612 ctx->tmp_used = 1; 613 emit_a64_mov_i(1, tmp, off, ctx); 614 switch (BPF_SIZE(code)) { 615 case BPF_W: 616 emit(A64_STR32(src, dst, tmp), ctx); 617 break; 618 case BPF_H: 619 emit(A64_STRH(src, dst, tmp), ctx); 620 break; 621 case BPF_B: 622 emit(A64_STRB(src, dst, tmp), ctx); 623 break; 624 case BPF_DW: 625 emit(A64_STR64(src, dst, tmp), ctx); 626 break; 627 } 628 break; 629 /* STX XADD: lock *(u32 *)(dst + off) += src */ 630 case BPF_STX | BPF_XADD | BPF_W: 631 /* STX XADD: lock *(u64 *)(dst + off) += src */ 632 case BPF_STX | BPF_XADD | BPF_DW: 633 goto notyet; 634 635 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 636 case BPF_LD | BPF_ABS | BPF_W: 637 case BPF_LD | BPF_ABS | BPF_H: 638 case BPF_LD | BPF_ABS | BPF_B: 639 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ 640 case BPF_LD | BPF_IND | BPF_W: 641 case BPF_LD | BPF_IND | BPF_H: 642 case BPF_LD | BPF_IND | BPF_B: 643 { 644 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ 645 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ 646 const u8 fp = bpf2a64[BPF_REG_FP]; 647 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ 648 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ 649 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ 650 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ 651 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ 652 int size; 653 654 emit(A64_MOV(1, r1, r6), ctx); 655 emit_a64_mov_i(0, r2, imm, ctx); 656 if (BPF_MODE(code) == BPF_IND) 657 emit(A64_ADD(0, r2, r2, src), ctx); 658 switch (BPF_SIZE(code)) { 659 case BPF_W: 660 size = 4; 661 break; 662 case BPF_H: 663 size = 2; 664 break; 665 case BPF_B: 666 size = 1; 667 break; 668 default: 669 return -EINVAL; 670 } 671 emit_a64_mov_i64(r3, size, ctx); 672 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); 673 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 674 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 675 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 676 emit(A64_BLR(r5), ctx); 677 emit(A64_MOV(1, r0, A64_R(0)), ctx); 678 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 679 680 jmp_offset = epilogue_offset(ctx); 681 check_imm19(jmp_offset); 682 emit(A64_CBZ(1, r0, jmp_offset), ctx); 683 emit(A64_MOV(1, r5, r0), ctx); 684 switch (BPF_SIZE(code)) { 685 case BPF_W: 686 emit(A64_LDR32(r0, r5, A64_ZR), ctx); 687 #ifndef CONFIG_CPU_BIG_ENDIAN 688 emit(A64_REV32(0, r0, r0), ctx); 689 #endif 690 break; 691 case BPF_H: 692 emit(A64_LDRH(r0, r5, A64_ZR), ctx); 693 #ifndef CONFIG_CPU_BIG_ENDIAN 694 emit(A64_REV16(0, r0, r0), ctx); 695 #endif 696 break; 697 case BPF_B: 698 emit(A64_LDRB(r0, r5, A64_ZR), ctx); 699 break; 700 } 701 break; 702 } 703 notyet: 704 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 705 return -EFAULT; 706 707 default: 708 pr_err_once("unknown opcode %02x\n", code); 709 return -EINVAL; 710 } 711 712 return 0; 713 } 714 715 static int build_body(struct jit_ctx *ctx) 716 { 717 const struct bpf_prog *prog = ctx->prog; 718 int i; 719 720 for (i = 0; i < prog->len; i++) { 721 const struct bpf_insn *insn = &prog->insnsi[i]; 722 int ret; 723 724 ret = build_insn(insn, ctx); 725 726 if (ctx->image == NULL) 727 ctx->offset[i] = ctx->idx; 728 729 if (ret > 0) { 730 i++; 731 continue; 732 } 733 if (ret) 734 return ret; 735 } 736 737 return 0; 738 } 739 740 static int validate_code(struct jit_ctx *ctx) 741 { 742 int i; 743 744 for (i = 0; i < ctx->idx; i++) { 745 u32 a64_insn = le32_to_cpu(ctx->image[i]); 746 747 if (a64_insn == AARCH64_BREAK_FAULT) 748 return -1; 749 } 750 751 return 0; 752 } 753 754 static inline void bpf_flush_icache(void *start, void *end) 755 { 756 flush_icache_range((unsigned long)start, (unsigned long)end); 757 } 758 759 void bpf_jit_compile(struct bpf_prog *prog) 760 { 761 /* Nothing to do here. We support Internal BPF. */ 762 } 763 764 void bpf_int_jit_compile(struct bpf_prog *prog) 765 { 766 struct bpf_binary_header *header; 767 struct jit_ctx ctx; 768 int image_size; 769 u8 *image_ptr; 770 771 if (!bpf_jit_enable) 772 return; 773 774 if (!prog || !prog->len) 775 return; 776 777 memset(&ctx, 0, sizeof(ctx)); 778 ctx.prog = prog; 779 780 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); 781 if (ctx.offset == NULL) 782 return; 783 784 /* 1. Initial fake pass to compute ctx->idx. */ 785 786 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ 787 if (build_body(&ctx)) 788 goto out; 789 790 build_prologue(&ctx); 791 792 ctx.epilogue_offset = ctx.idx; 793 build_epilogue(&ctx); 794 795 /* Now we know the actual image size. */ 796 image_size = sizeof(u32) * ctx.idx; 797 header = bpf_jit_binary_alloc(image_size, &image_ptr, 798 sizeof(u32), jit_fill_hole); 799 if (header == NULL) 800 goto out; 801 802 /* 2. Now, the actual pass. */ 803 804 ctx.image = (u32 *)image_ptr; 805 ctx.idx = 0; 806 807 build_prologue(&ctx); 808 809 if (build_body(&ctx)) { 810 bpf_jit_binary_free(header); 811 goto out; 812 } 813 814 build_epilogue(&ctx); 815 816 /* 3. Extra pass to validate JITed code. */ 817 if (validate_code(&ctx)) { 818 bpf_jit_binary_free(header); 819 goto out; 820 } 821 822 /* And we're done. */ 823 if (bpf_jit_enable > 1) 824 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 825 826 bpf_flush_icache(header, ctx.image + ctx.idx); 827 828 set_memory_ro((unsigned long)header, header->pages); 829 prog->bpf_func = (void *)ctx.image; 830 prog->jited = 1; 831 out: 832 kfree(ctx.offset); 833 } 834 835 void bpf_jit_free(struct bpf_prog *prog) 836 { 837 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; 838 struct bpf_binary_header *header = (void *)addr; 839 840 if (!prog->jited) 841 goto free_filter; 842 843 set_memory_rw(addr, header->pages); 844 bpf_jit_binary_free(header); 845 846 free_filter: 847 bpf_prog_unlock_free(prog); 848 } 849