1 /* 2 * BPF JIT compiler for ARM64 3 * 4 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "bpf_jit: " fmt 20 21 #include <linux/filter.h> 22 #include <linux/printk.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 26 #include <asm/byteorder.h> 27 #include <asm/cacheflush.h> 28 #include <asm/debug-monitors.h> 29 30 #include "bpf_jit.h" 31 32 int bpf_jit_enable __read_mostly; 33 34 #define TMP_REG_1 (MAX_BPF_REG + 0) 35 #define TMP_REG_2 (MAX_BPF_REG + 1) 36 37 /* Map BPF registers to A64 registers */ 38 static const int bpf2a64[] = { 39 /* return value from in-kernel function, and exit value from eBPF */ 40 [BPF_REG_0] = A64_R(7), 41 /* arguments from eBPF program to in-kernel function */ 42 [BPF_REG_1] = A64_R(0), 43 [BPF_REG_2] = A64_R(1), 44 [BPF_REG_3] = A64_R(2), 45 [BPF_REG_4] = A64_R(3), 46 [BPF_REG_5] = A64_R(4), 47 /* callee saved registers that in-kernel function will preserve */ 48 [BPF_REG_6] = A64_R(19), 49 [BPF_REG_7] = A64_R(20), 50 [BPF_REG_8] = A64_R(21), 51 [BPF_REG_9] = A64_R(22), 52 /* read-only frame pointer to access stack */ 53 [BPF_REG_FP] = A64_FP, 54 /* temporary register for internal BPF JIT */ 55 [TMP_REG_1] = A64_R(23), 56 [TMP_REG_2] = A64_R(24), 57 }; 58 59 struct jit_ctx { 60 const struct bpf_prog *prog; 61 int idx; 62 int tmp_used; 63 int epilogue_offset; 64 int *offset; 65 u32 *image; 66 }; 67 68 static inline void emit(const u32 insn, struct jit_ctx *ctx) 69 { 70 if (ctx->image != NULL) 71 ctx->image[ctx->idx] = cpu_to_le32(insn); 72 73 ctx->idx++; 74 } 75 76 static inline void emit_a64_mov_i64(const int reg, const u64 val, 77 struct jit_ctx *ctx) 78 { 79 u64 tmp = val; 80 int shift = 0; 81 82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); 83 tmp >>= 16; 84 shift += 16; 85 while (tmp) { 86 if (tmp & 0xffff) 87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); 88 tmp >>= 16; 89 shift += 16; 90 } 91 } 92 93 static inline void emit_a64_mov_i(const int is64, const int reg, 94 const s32 val, struct jit_ctx *ctx) 95 { 96 u16 hi = val >> 16; 97 u16 lo = val & 0xffff; 98 99 if (hi & 0x8000) { 100 if (hi == 0xffff) { 101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); 102 } else { 103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); 104 emit(A64_MOVK(is64, reg, lo, 0), ctx); 105 } 106 } else { 107 emit(A64_MOVZ(is64, reg, lo, 0), ctx); 108 if (hi) 109 emit(A64_MOVK(is64, reg, hi, 16), ctx); 110 } 111 } 112 113 static inline int bpf2a64_offset(int bpf_to, int bpf_from, 114 const struct jit_ctx *ctx) 115 { 116 int to = ctx->offset[bpf_to]; 117 /* -1 to account for the Branch instruction */ 118 int from = ctx->offset[bpf_from] - 1; 119 120 return to - from; 121 } 122 123 static void jit_fill_hole(void *area, unsigned int size) 124 { 125 u32 *ptr; 126 /* We are guaranteed to have aligned memory. */ 127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 129 } 130 131 static inline int epilogue_offset(const struct jit_ctx *ctx) 132 { 133 int to = ctx->epilogue_offset; 134 int from = ctx->idx; 135 136 return to - from; 137 } 138 139 /* Stack must be multiples of 16B */ 140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15) 141 142 static void build_prologue(struct jit_ctx *ctx) 143 { 144 const u8 r6 = bpf2a64[BPF_REG_6]; 145 const u8 r7 = bpf2a64[BPF_REG_7]; 146 const u8 r8 = bpf2a64[BPF_REG_8]; 147 const u8 r9 = bpf2a64[BPF_REG_9]; 148 const u8 fp = bpf2a64[BPF_REG_FP]; 149 const u8 ra = bpf2a64[BPF_REG_A]; 150 const u8 rx = bpf2a64[BPF_REG_X]; 151 const u8 tmp1 = bpf2a64[TMP_REG_1]; 152 const u8 tmp2 = bpf2a64[TMP_REG_2]; 153 int stack_size = MAX_BPF_STACK; 154 155 stack_size += 4; /* extra for skb_copy_bits buffer */ 156 stack_size = STACK_ALIGN(stack_size); 157 158 /* Save callee-saved register */ 159 emit(A64_PUSH(r6, r7, A64_SP), ctx); 160 emit(A64_PUSH(r8, r9, A64_SP), ctx); 161 if (ctx->tmp_used) 162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 163 164 /* Set up BPF stack */ 165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 166 167 /* Set up frame pointer */ 168 emit(A64_MOV(1, fp, A64_SP), ctx); 169 170 /* Clear registers A and X */ 171 emit_a64_mov_i64(ra, 0, ctx); 172 emit_a64_mov_i64(rx, 0, ctx); 173 } 174 175 static void build_epilogue(struct jit_ctx *ctx) 176 { 177 const u8 r0 = bpf2a64[BPF_REG_0]; 178 const u8 r6 = bpf2a64[BPF_REG_6]; 179 const u8 r7 = bpf2a64[BPF_REG_7]; 180 const u8 r8 = bpf2a64[BPF_REG_8]; 181 const u8 r9 = bpf2a64[BPF_REG_9]; 182 const u8 fp = bpf2a64[BPF_REG_FP]; 183 const u8 tmp1 = bpf2a64[TMP_REG_1]; 184 const u8 tmp2 = bpf2a64[TMP_REG_2]; 185 int stack_size = MAX_BPF_STACK; 186 187 stack_size += 4; /* extra for skb_copy_bits buffer */ 188 stack_size = STACK_ALIGN(stack_size); 189 190 /* We're done with BPF stack */ 191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 192 193 /* Restore callee-saved register */ 194 if (ctx->tmp_used) 195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx); 196 emit(A64_POP(r8, r9, A64_SP), ctx); 197 emit(A64_POP(r6, r7, A64_SP), ctx); 198 199 /* Restore frame pointer */ 200 emit(A64_MOV(1, fp, A64_SP), ctx); 201 202 /* Set return value */ 203 emit(A64_MOV(1, A64_R(0), r0), ctx); 204 205 emit(A64_RET(A64_LR), ctx); 206 } 207 208 /* JITs an eBPF instruction. 209 * Returns: 210 * 0 - successfully JITed an 8-byte eBPF instruction. 211 * >0 - successfully JITed a 16-byte eBPF instruction. 212 * <0 - failed to JIT. 213 */ 214 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 215 { 216 const u8 code = insn->code; 217 const u8 dst = bpf2a64[insn->dst_reg]; 218 const u8 src = bpf2a64[insn->src_reg]; 219 const u8 tmp = bpf2a64[TMP_REG_1]; 220 const u8 tmp2 = bpf2a64[TMP_REG_2]; 221 const s16 off = insn->off; 222 const s32 imm = insn->imm; 223 const int i = insn - ctx->prog->insnsi; 224 const bool is64 = BPF_CLASS(code) == BPF_ALU64; 225 u8 jmp_cond; 226 s32 jmp_offset; 227 228 switch (code) { 229 /* dst = src */ 230 case BPF_ALU | BPF_MOV | BPF_X: 231 case BPF_ALU64 | BPF_MOV | BPF_X: 232 emit(A64_MOV(is64, dst, src), ctx); 233 break; 234 /* dst = dst OP src */ 235 case BPF_ALU | BPF_ADD | BPF_X: 236 case BPF_ALU64 | BPF_ADD | BPF_X: 237 emit(A64_ADD(is64, dst, dst, src), ctx); 238 break; 239 case BPF_ALU | BPF_SUB | BPF_X: 240 case BPF_ALU64 | BPF_SUB | BPF_X: 241 emit(A64_SUB(is64, dst, dst, src), ctx); 242 break; 243 case BPF_ALU | BPF_AND | BPF_X: 244 case BPF_ALU64 | BPF_AND | BPF_X: 245 emit(A64_AND(is64, dst, dst, src), ctx); 246 break; 247 case BPF_ALU | BPF_OR | BPF_X: 248 case BPF_ALU64 | BPF_OR | BPF_X: 249 emit(A64_ORR(is64, dst, dst, src), ctx); 250 break; 251 case BPF_ALU | BPF_XOR | BPF_X: 252 case BPF_ALU64 | BPF_XOR | BPF_X: 253 emit(A64_EOR(is64, dst, dst, src), ctx); 254 break; 255 case BPF_ALU | BPF_MUL | BPF_X: 256 case BPF_ALU64 | BPF_MUL | BPF_X: 257 emit(A64_MUL(is64, dst, dst, src), ctx); 258 break; 259 case BPF_ALU | BPF_DIV | BPF_X: 260 case BPF_ALU64 | BPF_DIV | BPF_X: 261 emit(A64_UDIV(is64, dst, dst, src), ctx); 262 break; 263 case BPF_ALU | BPF_MOD | BPF_X: 264 case BPF_ALU64 | BPF_MOD | BPF_X: 265 ctx->tmp_used = 1; 266 emit(A64_UDIV(is64, tmp, dst, src), ctx); 267 emit(A64_MUL(is64, tmp, tmp, src), ctx); 268 emit(A64_SUB(is64, dst, dst, tmp), ctx); 269 break; 270 case BPF_ALU | BPF_LSH | BPF_X: 271 case BPF_ALU64 | BPF_LSH | BPF_X: 272 emit(A64_LSLV(is64, dst, dst, src), ctx); 273 break; 274 case BPF_ALU | BPF_RSH | BPF_X: 275 case BPF_ALU64 | BPF_RSH | BPF_X: 276 emit(A64_LSRV(is64, dst, dst, src), ctx); 277 break; 278 case BPF_ALU | BPF_ARSH | BPF_X: 279 case BPF_ALU64 | BPF_ARSH | BPF_X: 280 emit(A64_ASRV(is64, dst, dst, src), ctx); 281 break; 282 /* dst = -dst */ 283 case BPF_ALU | BPF_NEG: 284 case BPF_ALU64 | BPF_NEG: 285 emit(A64_NEG(is64, dst, dst), ctx); 286 break; 287 /* dst = BSWAP##imm(dst) */ 288 case BPF_ALU | BPF_END | BPF_FROM_LE: 289 case BPF_ALU | BPF_END | BPF_FROM_BE: 290 #ifdef CONFIG_CPU_BIG_ENDIAN 291 if (BPF_SRC(code) == BPF_FROM_BE) 292 goto emit_bswap_uxt; 293 #else /* !CONFIG_CPU_BIG_ENDIAN */ 294 if (BPF_SRC(code) == BPF_FROM_LE) 295 goto emit_bswap_uxt; 296 #endif 297 switch (imm) { 298 case 16: 299 emit(A64_REV16(is64, dst, dst), ctx); 300 /* zero-extend 16 bits into 64 bits */ 301 emit(A64_UXTH(is64, dst, dst), ctx); 302 break; 303 case 32: 304 emit(A64_REV32(is64, dst, dst), ctx); 305 /* upper 32 bits already cleared */ 306 break; 307 case 64: 308 emit(A64_REV64(dst, dst), ctx); 309 break; 310 } 311 break; 312 emit_bswap_uxt: 313 switch (imm) { 314 case 16: 315 /* zero-extend 16 bits into 64 bits */ 316 emit(A64_UXTH(is64, dst, dst), ctx); 317 break; 318 case 32: 319 /* zero-extend 32 bits into 64 bits */ 320 emit(A64_UXTW(is64, dst, dst), ctx); 321 break; 322 case 64: 323 /* nop */ 324 break; 325 } 326 break; 327 /* dst = imm */ 328 case BPF_ALU | BPF_MOV | BPF_K: 329 case BPF_ALU64 | BPF_MOV | BPF_K: 330 emit_a64_mov_i(is64, dst, imm, ctx); 331 break; 332 /* dst = dst OP imm */ 333 case BPF_ALU | BPF_ADD | BPF_K: 334 case BPF_ALU64 | BPF_ADD | BPF_K: 335 ctx->tmp_used = 1; 336 emit_a64_mov_i(is64, tmp, imm, ctx); 337 emit(A64_ADD(is64, dst, dst, tmp), ctx); 338 break; 339 case BPF_ALU | BPF_SUB | BPF_K: 340 case BPF_ALU64 | BPF_SUB | BPF_K: 341 ctx->tmp_used = 1; 342 emit_a64_mov_i(is64, tmp, imm, ctx); 343 emit(A64_SUB(is64, dst, dst, tmp), ctx); 344 break; 345 case BPF_ALU | BPF_AND | BPF_K: 346 case BPF_ALU64 | BPF_AND | BPF_K: 347 ctx->tmp_used = 1; 348 emit_a64_mov_i(is64, tmp, imm, ctx); 349 emit(A64_AND(is64, dst, dst, tmp), ctx); 350 break; 351 case BPF_ALU | BPF_OR | BPF_K: 352 case BPF_ALU64 | BPF_OR | BPF_K: 353 ctx->tmp_used = 1; 354 emit_a64_mov_i(is64, tmp, imm, ctx); 355 emit(A64_ORR(is64, dst, dst, tmp), ctx); 356 break; 357 case BPF_ALU | BPF_XOR | BPF_K: 358 case BPF_ALU64 | BPF_XOR | BPF_K: 359 ctx->tmp_used = 1; 360 emit_a64_mov_i(is64, tmp, imm, ctx); 361 emit(A64_EOR(is64, dst, dst, tmp), ctx); 362 break; 363 case BPF_ALU | BPF_MUL | BPF_K: 364 case BPF_ALU64 | BPF_MUL | BPF_K: 365 ctx->tmp_used = 1; 366 emit_a64_mov_i(is64, tmp, imm, ctx); 367 emit(A64_MUL(is64, dst, dst, tmp), ctx); 368 break; 369 case BPF_ALU | BPF_DIV | BPF_K: 370 case BPF_ALU64 | BPF_DIV | BPF_K: 371 ctx->tmp_used = 1; 372 emit_a64_mov_i(is64, tmp, imm, ctx); 373 emit(A64_UDIV(is64, dst, dst, tmp), ctx); 374 break; 375 case BPF_ALU | BPF_MOD | BPF_K: 376 case BPF_ALU64 | BPF_MOD | BPF_K: 377 ctx->tmp_used = 1; 378 emit_a64_mov_i(is64, tmp2, imm, ctx); 379 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 380 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); 381 emit(A64_SUB(is64, dst, dst, tmp), ctx); 382 break; 383 case BPF_ALU | BPF_LSH | BPF_K: 384 case BPF_ALU64 | BPF_LSH | BPF_K: 385 emit(A64_LSL(is64, dst, dst, imm), ctx); 386 break; 387 case BPF_ALU | BPF_RSH | BPF_K: 388 case BPF_ALU64 | BPF_RSH | BPF_K: 389 emit(A64_LSR(is64, dst, dst, imm), ctx); 390 break; 391 case BPF_ALU | BPF_ARSH | BPF_K: 392 case BPF_ALU64 | BPF_ARSH | BPF_K: 393 emit(A64_ASR(is64, dst, dst, imm), ctx); 394 break; 395 396 #define check_imm(bits, imm) do { \ 397 if ((((imm) > 0) && ((imm) >> (bits))) || \ 398 (((imm) < 0) && (~(imm) >> (bits)))) { \ 399 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 400 i, imm, imm); \ 401 return -EINVAL; \ 402 } \ 403 } while (0) 404 #define check_imm19(imm) check_imm(19, imm) 405 #define check_imm26(imm) check_imm(26, imm) 406 407 /* JUMP off */ 408 case BPF_JMP | BPF_JA: 409 jmp_offset = bpf2a64_offset(i + off, i, ctx); 410 check_imm26(jmp_offset); 411 emit(A64_B(jmp_offset), ctx); 412 break; 413 /* IF (dst COND src) JUMP off */ 414 case BPF_JMP | BPF_JEQ | BPF_X: 415 case BPF_JMP | BPF_JGT | BPF_X: 416 case BPF_JMP | BPF_JGE | BPF_X: 417 case BPF_JMP | BPF_JNE | BPF_X: 418 case BPF_JMP | BPF_JSGT | BPF_X: 419 case BPF_JMP | BPF_JSGE | BPF_X: 420 emit(A64_CMP(1, dst, src), ctx); 421 emit_cond_jmp: 422 jmp_offset = bpf2a64_offset(i + off, i, ctx); 423 check_imm19(jmp_offset); 424 switch (BPF_OP(code)) { 425 case BPF_JEQ: 426 jmp_cond = A64_COND_EQ; 427 break; 428 case BPF_JGT: 429 jmp_cond = A64_COND_HI; 430 break; 431 case BPF_JGE: 432 jmp_cond = A64_COND_CS; 433 break; 434 case BPF_JNE: 435 jmp_cond = A64_COND_NE; 436 break; 437 case BPF_JSGT: 438 jmp_cond = A64_COND_GT; 439 break; 440 case BPF_JSGE: 441 jmp_cond = A64_COND_GE; 442 break; 443 default: 444 return -EFAULT; 445 } 446 emit(A64_B_(jmp_cond, jmp_offset), ctx); 447 break; 448 case BPF_JMP | BPF_JSET | BPF_X: 449 emit(A64_TST(1, dst, src), ctx); 450 goto emit_cond_jmp; 451 /* IF (dst COND imm) JUMP off */ 452 case BPF_JMP | BPF_JEQ | BPF_K: 453 case BPF_JMP | BPF_JGT | BPF_K: 454 case BPF_JMP | BPF_JGE | BPF_K: 455 case BPF_JMP | BPF_JNE | BPF_K: 456 case BPF_JMP | BPF_JSGT | BPF_K: 457 case BPF_JMP | BPF_JSGE | BPF_K: 458 ctx->tmp_used = 1; 459 emit_a64_mov_i(1, tmp, imm, ctx); 460 emit(A64_CMP(1, dst, tmp), ctx); 461 goto emit_cond_jmp; 462 case BPF_JMP | BPF_JSET | BPF_K: 463 ctx->tmp_used = 1; 464 emit_a64_mov_i(1, tmp, imm, ctx); 465 emit(A64_TST(1, dst, tmp), ctx); 466 goto emit_cond_jmp; 467 /* function call */ 468 case BPF_JMP | BPF_CALL: 469 { 470 const u8 r0 = bpf2a64[BPF_REG_0]; 471 const u64 func = (u64)__bpf_call_base + imm; 472 473 ctx->tmp_used = 1; 474 emit_a64_mov_i64(tmp, func, ctx); 475 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 476 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 477 emit(A64_BLR(tmp), ctx); 478 emit(A64_MOV(1, r0, A64_R(0)), ctx); 479 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 480 break; 481 } 482 /* function return */ 483 case BPF_JMP | BPF_EXIT: 484 /* Optimization: when last instruction is EXIT, 485 simply fallthrough to epilogue. */ 486 if (i == ctx->prog->len - 1) 487 break; 488 jmp_offset = epilogue_offset(ctx); 489 check_imm26(jmp_offset); 490 emit(A64_B(jmp_offset), ctx); 491 break; 492 493 /* dst = imm64 */ 494 case BPF_LD | BPF_IMM | BPF_DW: 495 { 496 const struct bpf_insn insn1 = insn[1]; 497 u64 imm64; 498 499 if (insn1.code != 0 || insn1.src_reg != 0 || 500 insn1.dst_reg != 0 || insn1.off != 0) { 501 /* Note: verifier in BPF core must catch invalid 502 * instructions. 503 */ 504 pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); 505 return -EINVAL; 506 } 507 508 imm64 = (u64)insn1.imm << 32 | (u32)imm; 509 emit_a64_mov_i64(dst, imm64, ctx); 510 511 return 1; 512 } 513 514 /* LDX: dst = *(size *)(src + off) */ 515 case BPF_LDX | BPF_MEM | BPF_W: 516 case BPF_LDX | BPF_MEM | BPF_H: 517 case BPF_LDX | BPF_MEM | BPF_B: 518 case BPF_LDX | BPF_MEM | BPF_DW: 519 ctx->tmp_used = 1; 520 emit_a64_mov_i(1, tmp, off, ctx); 521 switch (BPF_SIZE(code)) { 522 case BPF_W: 523 emit(A64_LDR32(dst, src, tmp), ctx); 524 break; 525 case BPF_H: 526 emit(A64_LDRH(dst, src, tmp), ctx); 527 break; 528 case BPF_B: 529 emit(A64_LDRB(dst, src, tmp), ctx); 530 break; 531 case BPF_DW: 532 emit(A64_LDR64(dst, src, tmp), ctx); 533 break; 534 } 535 break; 536 537 /* ST: *(size *)(dst + off) = imm */ 538 case BPF_ST | BPF_MEM | BPF_W: 539 case BPF_ST | BPF_MEM | BPF_H: 540 case BPF_ST | BPF_MEM | BPF_B: 541 case BPF_ST | BPF_MEM | BPF_DW: 542 goto notyet; 543 544 /* STX: *(size *)(dst + off) = src */ 545 case BPF_STX | BPF_MEM | BPF_W: 546 case BPF_STX | BPF_MEM | BPF_H: 547 case BPF_STX | BPF_MEM | BPF_B: 548 case BPF_STX | BPF_MEM | BPF_DW: 549 ctx->tmp_used = 1; 550 emit_a64_mov_i(1, tmp, off, ctx); 551 switch (BPF_SIZE(code)) { 552 case BPF_W: 553 emit(A64_STR32(src, dst, tmp), ctx); 554 break; 555 case BPF_H: 556 emit(A64_STRH(src, dst, tmp), ctx); 557 break; 558 case BPF_B: 559 emit(A64_STRB(src, dst, tmp), ctx); 560 break; 561 case BPF_DW: 562 emit(A64_STR64(src, dst, tmp), ctx); 563 break; 564 } 565 break; 566 /* STX XADD: lock *(u32 *)(dst + off) += src */ 567 case BPF_STX | BPF_XADD | BPF_W: 568 /* STX XADD: lock *(u64 *)(dst + off) += src */ 569 case BPF_STX | BPF_XADD | BPF_DW: 570 goto notyet; 571 572 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 573 case BPF_LD | BPF_ABS | BPF_W: 574 case BPF_LD | BPF_ABS | BPF_H: 575 case BPF_LD | BPF_ABS | BPF_B: 576 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ 577 case BPF_LD | BPF_IND | BPF_W: 578 case BPF_LD | BPF_IND | BPF_H: 579 case BPF_LD | BPF_IND | BPF_B: 580 { 581 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ 582 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ 583 const u8 fp = bpf2a64[BPF_REG_FP]; 584 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ 585 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ 586 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ 587 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ 588 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ 589 int size; 590 591 emit(A64_MOV(1, r1, r6), ctx); 592 emit_a64_mov_i(0, r2, imm, ctx); 593 if (BPF_MODE(code) == BPF_IND) 594 emit(A64_ADD(0, r2, r2, src), ctx); 595 switch (BPF_SIZE(code)) { 596 case BPF_W: 597 size = 4; 598 break; 599 case BPF_H: 600 size = 2; 601 break; 602 case BPF_B: 603 size = 1; 604 break; 605 default: 606 return -EINVAL; 607 } 608 emit_a64_mov_i64(r3, size, ctx); 609 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); 610 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 611 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 612 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 613 emit(A64_BLR(r5), ctx); 614 emit(A64_MOV(1, r0, A64_R(0)), ctx); 615 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 616 617 jmp_offset = epilogue_offset(ctx); 618 check_imm19(jmp_offset); 619 emit(A64_CBZ(1, r0, jmp_offset), ctx); 620 emit(A64_MOV(1, r5, r0), ctx); 621 switch (BPF_SIZE(code)) { 622 case BPF_W: 623 emit(A64_LDR32(r0, r5, A64_ZR), ctx); 624 #ifndef CONFIG_CPU_BIG_ENDIAN 625 emit(A64_REV32(0, r0, r0), ctx); 626 #endif 627 break; 628 case BPF_H: 629 emit(A64_LDRH(r0, r5, A64_ZR), ctx); 630 #ifndef CONFIG_CPU_BIG_ENDIAN 631 emit(A64_REV16(0, r0, r0), ctx); 632 #endif 633 break; 634 case BPF_B: 635 emit(A64_LDRB(r0, r5, A64_ZR), ctx); 636 break; 637 } 638 break; 639 } 640 notyet: 641 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 642 return -EFAULT; 643 644 default: 645 pr_err_once("unknown opcode %02x\n", code); 646 return -EINVAL; 647 } 648 649 return 0; 650 } 651 652 static int build_body(struct jit_ctx *ctx) 653 { 654 const struct bpf_prog *prog = ctx->prog; 655 int i; 656 657 for (i = 0; i < prog->len; i++) { 658 const struct bpf_insn *insn = &prog->insnsi[i]; 659 int ret; 660 661 ret = build_insn(insn, ctx); 662 663 if (ctx->image == NULL) 664 ctx->offset[i] = ctx->idx; 665 666 if (ret > 0) { 667 i++; 668 continue; 669 } 670 if (ret) 671 return ret; 672 } 673 674 return 0; 675 } 676 677 static inline void bpf_flush_icache(void *start, void *end) 678 { 679 flush_icache_range((unsigned long)start, (unsigned long)end); 680 } 681 682 void bpf_jit_compile(struct bpf_prog *prog) 683 { 684 /* Nothing to do here. We support Internal BPF. */ 685 } 686 687 void bpf_int_jit_compile(struct bpf_prog *prog) 688 { 689 struct bpf_binary_header *header; 690 struct jit_ctx ctx; 691 int image_size; 692 u8 *image_ptr; 693 694 if (!bpf_jit_enable) 695 return; 696 697 if (!prog || !prog->len) 698 return; 699 700 memset(&ctx, 0, sizeof(ctx)); 701 ctx.prog = prog; 702 703 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); 704 if (ctx.offset == NULL) 705 return; 706 707 /* 1. Initial fake pass to compute ctx->idx. */ 708 709 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ 710 if (build_body(&ctx)) 711 goto out; 712 713 build_prologue(&ctx); 714 715 ctx.epilogue_offset = ctx.idx; 716 build_epilogue(&ctx); 717 718 /* Now we know the actual image size. */ 719 image_size = sizeof(u32) * ctx.idx; 720 header = bpf_jit_binary_alloc(image_size, &image_ptr, 721 sizeof(u32), jit_fill_hole); 722 if (header == NULL) 723 goto out; 724 725 /* 2. Now, the actual pass. */ 726 727 ctx.image = (u32 *)image_ptr; 728 ctx.idx = 0; 729 730 build_prologue(&ctx); 731 732 if (build_body(&ctx)) { 733 bpf_jit_binary_free(header); 734 goto out; 735 } 736 737 build_epilogue(&ctx); 738 739 /* And we're done. */ 740 if (bpf_jit_enable > 1) 741 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 742 743 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 744 745 set_memory_ro((unsigned long)header, header->pages); 746 prog->bpf_func = (void *)ctx.image; 747 prog->jited = true; 748 out: 749 kfree(ctx.offset); 750 } 751 752 void bpf_jit_free(struct bpf_prog *prog) 753 { 754 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; 755 struct bpf_binary_header *header = (void *)addr; 756 757 if (!prog->jited) 758 goto free_filter; 759 760 set_memory_rw(addr, header->pages); 761 bpf_jit_binary_free(header); 762 763 free_filter: 764 bpf_prog_unlock_free(prog); 765 } 766