1 /* 2 * BPF JIT compiler for ARM64 3 * 4 * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "bpf_jit: " fmt 20 21 #include <linux/filter.h> 22 #include <linux/printk.h> 23 #include <linux/skbuff.h> 24 #include <linux/slab.h> 25 26 #include <asm/byteorder.h> 27 #include <asm/cacheflush.h> 28 #include <asm/debug-monitors.h> 29 30 #include "bpf_jit.h" 31 32 int bpf_jit_enable __read_mostly; 33 34 #define TMP_REG_1 (MAX_BPF_REG + 0) 35 #define TMP_REG_2 (MAX_BPF_REG + 1) 36 37 /* Map BPF registers to A64 registers */ 38 static const int bpf2a64[] = { 39 /* return value from in-kernel function, and exit value from eBPF */ 40 [BPF_REG_0] = A64_R(7), 41 /* arguments from eBPF program to in-kernel function */ 42 [BPF_REG_1] = A64_R(0), 43 [BPF_REG_2] = A64_R(1), 44 [BPF_REG_3] = A64_R(2), 45 [BPF_REG_4] = A64_R(3), 46 [BPF_REG_5] = A64_R(4), 47 /* callee saved registers that in-kernel function will preserve */ 48 [BPF_REG_6] = A64_R(19), 49 [BPF_REG_7] = A64_R(20), 50 [BPF_REG_8] = A64_R(21), 51 [BPF_REG_9] = A64_R(22), 52 /* read-only frame pointer to access stack */ 53 [BPF_REG_FP] = A64_FP, 54 /* temporary register for internal BPF JIT */ 55 [TMP_REG_1] = A64_R(23), 56 [TMP_REG_2] = A64_R(24), 57 }; 58 59 struct jit_ctx { 60 const struct bpf_prog *prog; 61 int idx; 62 int tmp_used; 63 int epilogue_offset; 64 int *offset; 65 u32 *image; 66 }; 67 68 static inline void emit(const u32 insn, struct jit_ctx *ctx) 69 { 70 if (ctx->image != NULL) 71 ctx->image[ctx->idx] = cpu_to_le32(insn); 72 73 ctx->idx++; 74 } 75 76 static inline void emit_a64_mov_i64(const int reg, const u64 val, 77 struct jit_ctx *ctx) 78 { 79 u64 tmp = val; 80 int shift = 0; 81 82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); 83 tmp >>= 16; 84 shift += 16; 85 while (tmp) { 86 if (tmp & 0xffff) 87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); 88 tmp >>= 16; 89 shift += 16; 90 } 91 } 92 93 static inline void emit_a64_mov_i(const int is64, const int reg, 94 const s32 val, struct jit_ctx *ctx) 95 { 96 u16 hi = val >> 16; 97 u16 lo = val & 0xffff; 98 99 if (hi & 0x8000) { 100 if (hi == 0xffff) { 101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); 102 } else { 103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); 104 emit(A64_MOVK(is64, reg, lo, 0), ctx); 105 } 106 } else { 107 emit(A64_MOVZ(is64, reg, lo, 0), ctx); 108 if (hi) 109 emit(A64_MOVK(is64, reg, hi, 16), ctx); 110 } 111 } 112 113 static inline int bpf2a64_offset(int bpf_to, int bpf_from, 114 const struct jit_ctx *ctx) 115 { 116 int to = ctx->offset[bpf_to]; 117 /* -1 to account for the Branch instruction */ 118 int from = ctx->offset[bpf_from] - 1; 119 120 return to - from; 121 } 122 123 static void jit_fill_hole(void *area, unsigned int size) 124 { 125 u32 *ptr; 126 /* We are guaranteed to have aligned memory. */ 127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); 129 } 130 131 static inline int epilogue_offset(const struct jit_ctx *ctx) 132 { 133 int to = ctx->epilogue_offset; 134 int from = ctx->idx; 135 136 return to - from; 137 } 138 139 /* Stack must be multiples of 16B */ 140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15) 141 142 static void build_prologue(struct jit_ctx *ctx) 143 { 144 const u8 r6 = bpf2a64[BPF_REG_6]; 145 const u8 r7 = bpf2a64[BPF_REG_7]; 146 const u8 r8 = bpf2a64[BPF_REG_8]; 147 const u8 r9 = bpf2a64[BPF_REG_9]; 148 const u8 fp = bpf2a64[BPF_REG_FP]; 149 const u8 ra = bpf2a64[BPF_REG_A]; 150 const u8 rx = bpf2a64[BPF_REG_X]; 151 const u8 tmp1 = bpf2a64[TMP_REG_1]; 152 const u8 tmp2 = bpf2a64[TMP_REG_2]; 153 int stack_size = MAX_BPF_STACK; 154 155 stack_size += 4; /* extra for skb_copy_bits buffer */ 156 stack_size = STACK_ALIGN(stack_size); 157 158 /* Save callee-saved register */ 159 emit(A64_PUSH(r6, r7, A64_SP), ctx); 160 emit(A64_PUSH(r8, r9, A64_SP), ctx); 161 if (ctx->tmp_used) 162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 163 164 /* Set up BPF stack */ 165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 166 167 /* Set up frame pointer */ 168 emit(A64_MOV(1, fp, A64_SP), ctx); 169 170 /* Clear registers A and X */ 171 emit_a64_mov_i64(ra, 0, ctx); 172 emit_a64_mov_i64(rx, 0, ctx); 173 } 174 175 static void build_epilogue(struct jit_ctx *ctx) 176 { 177 const u8 r0 = bpf2a64[BPF_REG_0]; 178 const u8 r6 = bpf2a64[BPF_REG_6]; 179 const u8 r7 = bpf2a64[BPF_REG_7]; 180 const u8 r8 = bpf2a64[BPF_REG_8]; 181 const u8 r9 = bpf2a64[BPF_REG_9]; 182 const u8 fp = bpf2a64[BPF_REG_FP]; 183 const u8 tmp1 = bpf2a64[TMP_REG_1]; 184 const u8 tmp2 = bpf2a64[TMP_REG_2]; 185 int stack_size = MAX_BPF_STACK; 186 187 stack_size += 4; /* extra for skb_copy_bits buffer */ 188 stack_size = STACK_ALIGN(stack_size); 189 190 /* We're done with BPF stack */ 191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 192 193 /* Restore callee-saved register */ 194 if (ctx->tmp_used) 195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx); 196 emit(A64_POP(r8, r9, A64_SP), ctx); 197 emit(A64_POP(r6, r7, A64_SP), ctx); 198 199 /* Restore frame pointer */ 200 emit(A64_MOV(1, fp, A64_SP), ctx); 201 202 /* Set return value */ 203 emit(A64_MOV(1, A64_R(0), r0), ctx); 204 205 emit(A64_RET(A64_LR), ctx); 206 } 207 208 /* JITs an eBPF instruction. 209 * Returns: 210 * 0 - successfully JITed an 8-byte eBPF instruction. 211 * >0 - successfully JITed a 16-byte eBPF instruction. 212 * <0 - failed to JIT. 213 */ 214 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 215 { 216 const u8 code = insn->code; 217 const u8 dst = bpf2a64[insn->dst_reg]; 218 const u8 src = bpf2a64[insn->src_reg]; 219 const u8 tmp = bpf2a64[TMP_REG_1]; 220 const u8 tmp2 = bpf2a64[TMP_REG_2]; 221 const s16 off = insn->off; 222 const s32 imm = insn->imm; 223 const int i = insn - ctx->prog->insnsi; 224 const bool is64 = BPF_CLASS(code) == BPF_ALU64; 225 u8 jmp_cond; 226 s32 jmp_offset; 227 228 #define check_imm(bits, imm) do { \ 229 if ((((imm) > 0) && ((imm) >> (bits))) || \ 230 (((imm) < 0) && (~(imm) >> (bits)))) { \ 231 pr_info("[%2d] imm=%d(0x%x) out of range\n", \ 232 i, imm, imm); \ 233 return -EINVAL; \ 234 } \ 235 } while (0) 236 #define check_imm19(imm) check_imm(19, imm) 237 #define check_imm26(imm) check_imm(26, imm) 238 239 switch (code) { 240 /* dst = src */ 241 case BPF_ALU | BPF_MOV | BPF_X: 242 case BPF_ALU64 | BPF_MOV | BPF_X: 243 emit(A64_MOV(is64, dst, src), ctx); 244 break; 245 /* dst = dst OP src */ 246 case BPF_ALU | BPF_ADD | BPF_X: 247 case BPF_ALU64 | BPF_ADD | BPF_X: 248 emit(A64_ADD(is64, dst, dst, src), ctx); 249 break; 250 case BPF_ALU | BPF_SUB | BPF_X: 251 case BPF_ALU64 | BPF_SUB | BPF_X: 252 emit(A64_SUB(is64, dst, dst, src), ctx); 253 break; 254 case BPF_ALU | BPF_AND | BPF_X: 255 case BPF_ALU64 | BPF_AND | BPF_X: 256 emit(A64_AND(is64, dst, dst, src), ctx); 257 break; 258 case BPF_ALU | BPF_OR | BPF_X: 259 case BPF_ALU64 | BPF_OR | BPF_X: 260 emit(A64_ORR(is64, dst, dst, src), ctx); 261 break; 262 case BPF_ALU | BPF_XOR | BPF_X: 263 case BPF_ALU64 | BPF_XOR | BPF_X: 264 emit(A64_EOR(is64, dst, dst, src), ctx); 265 break; 266 case BPF_ALU | BPF_MUL | BPF_X: 267 case BPF_ALU64 | BPF_MUL | BPF_X: 268 emit(A64_MUL(is64, dst, dst, src), ctx); 269 break; 270 case BPF_ALU | BPF_DIV | BPF_X: 271 case BPF_ALU64 | BPF_DIV | BPF_X: 272 case BPF_ALU | BPF_MOD | BPF_X: 273 case BPF_ALU64 | BPF_MOD | BPF_X: 274 { 275 const u8 r0 = bpf2a64[BPF_REG_0]; 276 277 /* if (src == 0) return 0 */ 278 jmp_offset = 3; /* skip ahead to else path */ 279 check_imm19(jmp_offset); 280 emit(A64_CBNZ(is64, src, jmp_offset), ctx); 281 emit(A64_MOVZ(1, r0, 0, 0), ctx); 282 jmp_offset = epilogue_offset(ctx); 283 check_imm26(jmp_offset); 284 emit(A64_B(jmp_offset), ctx); 285 /* else */ 286 switch (BPF_OP(code)) { 287 case BPF_DIV: 288 emit(A64_UDIV(is64, dst, dst, src), ctx); 289 break; 290 case BPF_MOD: 291 ctx->tmp_used = 1; 292 emit(A64_UDIV(is64, tmp, dst, src), ctx); 293 emit(A64_MUL(is64, tmp, tmp, src), ctx); 294 emit(A64_SUB(is64, dst, dst, tmp), ctx); 295 break; 296 } 297 break; 298 } 299 case BPF_ALU | BPF_LSH | BPF_X: 300 case BPF_ALU64 | BPF_LSH | BPF_X: 301 emit(A64_LSLV(is64, dst, dst, src), ctx); 302 break; 303 case BPF_ALU | BPF_RSH | BPF_X: 304 case BPF_ALU64 | BPF_RSH | BPF_X: 305 emit(A64_LSRV(is64, dst, dst, src), ctx); 306 break; 307 case BPF_ALU | BPF_ARSH | BPF_X: 308 case BPF_ALU64 | BPF_ARSH | BPF_X: 309 emit(A64_ASRV(is64, dst, dst, src), ctx); 310 break; 311 /* dst = -dst */ 312 case BPF_ALU | BPF_NEG: 313 case BPF_ALU64 | BPF_NEG: 314 emit(A64_NEG(is64, dst, dst), ctx); 315 break; 316 /* dst = BSWAP##imm(dst) */ 317 case BPF_ALU | BPF_END | BPF_FROM_LE: 318 case BPF_ALU | BPF_END | BPF_FROM_BE: 319 #ifdef CONFIG_CPU_BIG_ENDIAN 320 if (BPF_SRC(code) == BPF_FROM_BE) 321 goto emit_bswap_uxt; 322 #else /* !CONFIG_CPU_BIG_ENDIAN */ 323 if (BPF_SRC(code) == BPF_FROM_LE) 324 goto emit_bswap_uxt; 325 #endif 326 switch (imm) { 327 case 16: 328 emit(A64_REV16(is64, dst, dst), ctx); 329 /* zero-extend 16 bits into 64 bits */ 330 emit(A64_UXTH(is64, dst, dst), ctx); 331 break; 332 case 32: 333 emit(A64_REV32(is64, dst, dst), ctx); 334 /* upper 32 bits already cleared */ 335 break; 336 case 64: 337 emit(A64_REV64(dst, dst), ctx); 338 break; 339 } 340 break; 341 emit_bswap_uxt: 342 switch (imm) { 343 case 16: 344 /* zero-extend 16 bits into 64 bits */ 345 emit(A64_UXTH(is64, dst, dst), ctx); 346 break; 347 case 32: 348 /* zero-extend 32 bits into 64 bits */ 349 emit(A64_UXTW(is64, dst, dst), ctx); 350 break; 351 case 64: 352 /* nop */ 353 break; 354 } 355 break; 356 /* dst = imm */ 357 case BPF_ALU | BPF_MOV | BPF_K: 358 case BPF_ALU64 | BPF_MOV | BPF_K: 359 emit_a64_mov_i(is64, dst, imm, ctx); 360 break; 361 /* dst = dst OP imm */ 362 case BPF_ALU | BPF_ADD | BPF_K: 363 case BPF_ALU64 | BPF_ADD | BPF_K: 364 ctx->tmp_used = 1; 365 emit_a64_mov_i(is64, tmp, imm, ctx); 366 emit(A64_ADD(is64, dst, dst, tmp), ctx); 367 break; 368 case BPF_ALU | BPF_SUB | BPF_K: 369 case BPF_ALU64 | BPF_SUB | BPF_K: 370 ctx->tmp_used = 1; 371 emit_a64_mov_i(is64, tmp, imm, ctx); 372 emit(A64_SUB(is64, dst, dst, tmp), ctx); 373 break; 374 case BPF_ALU | BPF_AND | BPF_K: 375 case BPF_ALU64 | BPF_AND | BPF_K: 376 ctx->tmp_used = 1; 377 emit_a64_mov_i(is64, tmp, imm, ctx); 378 emit(A64_AND(is64, dst, dst, tmp), ctx); 379 break; 380 case BPF_ALU | BPF_OR | BPF_K: 381 case BPF_ALU64 | BPF_OR | BPF_K: 382 ctx->tmp_used = 1; 383 emit_a64_mov_i(is64, tmp, imm, ctx); 384 emit(A64_ORR(is64, dst, dst, tmp), ctx); 385 break; 386 case BPF_ALU | BPF_XOR | BPF_K: 387 case BPF_ALU64 | BPF_XOR | BPF_K: 388 ctx->tmp_used = 1; 389 emit_a64_mov_i(is64, tmp, imm, ctx); 390 emit(A64_EOR(is64, dst, dst, tmp), ctx); 391 break; 392 case BPF_ALU | BPF_MUL | BPF_K: 393 case BPF_ALU64 | BPF_MUL | BPF_K: 394 ctx->tmp_used = 1; 395 emit_a64_mov_i(is64, tmp, imm, ctx); 396 emit(A64_MUL(is64, dst, dst, tmp), ctx); 397 break; 398 case BPF_ALU | BPF_DIV | BPF_K: 399 case BPF_ALU64 | BPF_DIV | BPF_K: 400 ctx->tmp_used = 1; 401 emit_a64_mov_i(is64, tmp, imm, ctx); 402 emit(A64_UDIV(is64, dst, dst, tmp), ctx); 403 break; 404 case BPF_ALU | BPF_MOD | BPF_K: 405 case BPF_ALU64 | BPF_MOD | BPF_K: 406 ctx->tmp_used = 1; 407 emit_a64_mov_i(is64, tmp2, imm, ctx); 408 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 409 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); 410 emit(A64_SUB(is64, dst, dst, tmp), ctx); 411 break; 412 case BPF_ALU | BPF_LSH | BPF_K: 413 case BPF_ALU64 | BPF_LSH | BPF_K: 414 emit(A64_LSL(is64, dst, dst, imm), ctx); 415 break; 416 case BPF_ALU | BPF_RSH | BPF_K: 417 case BPF_ALU64 | BPF_RSH | BPF_K: 418 emit(A64_LSR(is64, dst, dst, imm), ctx); 419 break; 420 case BPF_ALU | BPF_ARSH | BPF_K: 421 case BPF_ALU64 | BPF_ARSH | BPF_K: 422 emit(A64_ASR(is64, dst, dst, imm), ctx); 423 break; 424 425 /* JUMP off */ 426 case BPF_JMP | BPF_JA: 427 jmp_offset = bpf2a64_offset(i + off, i, ctx); 428 check_imm26(jmp_offset); 429 emit(A64_B(jmp_offset), ctx); 430 break; 431 /* IF (dst COND src) JUMP off */ 432 case BPF_JMP | BPF_JEQ | BPF_X: 433 case BPF_JMP | BPF_JGT | BPF_X: 434 case BPF_JMP | BPF_JGE | BPF_X: 435 case BPF_JMP | BPF_JNE | BPF_X: 436 case BPF_JMP | BPF_JSGT | BPF_X: 437 case BPF_JMP | BPF_JSGE | BPF_X: 438 emit(A64_CMP(1, dst, src), ctx); 439 emit_cond_jmp: 440 jmp_offset = bpf2a64_offset(i + off, i, ctx); 441 check_imm19(jmp_offset); 442 switch (BPF_OP(code)) { 443 case BPF_JEQ: 444 jmp_cond = A64_COND_EQ; 445 break; 446 case BPF_JGT: 447 jmp_cond = A64_COND_HI; 448 break; 449 case BPF_JGE: 450 jmp_cond = A64_COND_CS; 451 break; 452 case BPF_JNE: 453 jmp_cond = A64_COND_NE; 454 break; 455 case BPF_JSGT: 456 jmp_cond = A64_COND_GT; 457 break; 458 case BPF_JSGE: 459 jmp_cond = A64_COND_GE; 460 break; 461 default: 462 return -EFAULT; 463 } 464 emit(A64_B_(jmp_cond, jmp_offset), ctx); 465 break; 466 case BPF_JMP | BPF_JSET | BPF_X: 467 emit(A64_TST(1, dst, src), ctx); 468 goto emit_cond_jmp; 469 /* IF (dst COND imm) JUMP off */ 470 case BPF_JMP | BPF_JEQ | BPF_K: 471 case BPF_JMP | BPF_JGT | BPF_K: 472 case BPF_JMP | BPF_JGE | BPF_K: 473 case BPF_JMP | BPF_JNE | BPF_K: 474 case BPF_JMP | BPF_JSGT | BPF_K: 475 case BPF_JMP | BPF_JSGE | BPF_K: 476 ctx->tmp_used = 1; 477 emit_a64_mov_i(1, tmp, imm, ctx); 478 emit(A64_CMP(1, dst, tmp), ctx); 479 goto emit_cond_jmp; 480 case BPF_JMP | BPF_JSET | BPF_K: 481 ctx->tmp_used = 1; 482 emit_a64_mov_i(1, tmp, imm, ctx); 483 emit(A64_TST(1, dst, tmp), ctx); 484 goto emit_cond_jmp; 485 /* function call */ 486 case BPF_JMP | BPF_CALL: 487 { 488 const u8 r0 = bpf2a64[BPF_REG_0]; 489 const u64 func = (u64)__bpf_call_base + imm; 490 491 ctx->tmp_used = 1; 492 emit_a64_mov_i64(tmp, func, ctx); 493 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 494 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 495 emit(A64_BLR(tmp), ctx); 496 emit(A64_MOV(1, r0, A64_R(0)), ctx); 497 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 498 break; 499 } 500 /* function return */ 501 case BPF_JMP | BPF_EXIT: 502 /* Optimization: when last instruction is EXIT, 503 simply fallthrough to epilogue. */ 504 if (i == ctx->prog->len - 1) 505 break; 506 jmp_offset = epilogue_offset(ctx); 507 check_imm26(jmp_offset); 508 emit(A64_B(jmp_offset), ctx); 509 break; 510 511 /* dst = imm64 */ 512 case BPF_LD | BPF_IMM | BPF_DW: 513 { 514 const struct bpf_insn insn1 = insn[1]; 515 u64 imm64; 516 517 if (insn1.code != 0 || insn1.src_reg != 0 || 518 insn1.dst_reg != 0 || insn1.off != 0) { 519 /* Note: verifier in BPF core must catch invalid 520 * instructions. 521 */ 522 pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); 523 return -EINVAL; 524 } 525 526 imm64 = (u64)insn1.imm << 32 | (u32)imm; 527 emit_a64_mov_i64(dst, imm64, ctx); 528 529 return 1; 530 } 531 532 /* LDX: dst = *(size *)(src + off) */ 533 case BPF_LDX | BPF_MEM | BPF_W: 534 case BPF_LDX | BPF_MEM | BPF_H: 535 case BPF_LDX | BPF_MEM | BPF_B: 536 case BPF_LDX | BPF_MEM | BPF_DW: 537 ctx->tmp_used = 1; 538 emit_a64_mov_i(1, tmp, off, ctx); 539 switch (BPF_SIZE(code)) { 540 case BPF_W: 541 emit(A64_LDR32(dst, src, tmp), ctx); 542 break; 543 case BPF_H: 544 emit(A64_LDRH(dst, src, tmp), ctx); 545 break; 546 case BPF_B: 547 emit(A64_LDRB(dst, src, tmp), ctx); 548 break; 549 case BPF_DW: 550 emit(A64_LDR64(dst, src, tmp), ctx); 551 break; 552 } 553 break; 554 555 /* ST: *(size *)(dst + off) = imm */ 556 case BPF_ST | BPF_MEM | BPF_W: 557 case BPF_ST | BPF_MEM | BPF_H: 558 case BPF_ST | BPF_MEM | BPF_B: 559 case BPF_ST | BPF_MEM | BPF_DW: 560 goto notyet; 561 562 /* STX: *(size *)(dst + off) = src */ 563 case BPF_STX | BPF_MEM | BPF_W: 564 case BPF_STX | BPF_MEM | BPF_H: 565 case BPF_STX | BPF_MEM | BPF_B: 566 case BPF_STX | BPF_MEM | BPF_DW: 567 ctx->tmp_used = 1; 568 emit_a64_mov_i(1, tmp, off, ctx); 569 switch (BPF_SIZE(code)) { 570 case BPF_W: 571 emit(A64_STR32(src, dst, tmp), ctx); 572 break; 573 case BPF_H: 574 emit(A64_STRH(src, dst, tmp), ctx); 575 break; 576 case BPF_B: 577 emit(A64_STRB(src, dst, tmp), ctx); 578 break; 579 case BPF_DW: 580 emit(A64_STR64(src, dst, tmp), ctx); 581 break; 582 } 583 break; 584 /* STX XADD: lock *(u32 *)(dst + off) += src */ 585 case BPF_STX | BPF_XADD | BPF_W: 586 /* STX XADD: lock *(u64 *)(dst + off) += src */ 587 case BPF_STX | BPF_XADD | BPF_DW: 588 goto notyet; 589 590 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 591 case BPF_LD | BPF_ABS | BPF_W: 592 case BPF_LD | BPF_ABS | BPF_H: 593 case BPF_LD | BPF_ABS | BPF_B: 594 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ 595 case BPF_LD | BPF_IND | BPF_W: 596 case BPF_LD | BPF_IND | BPF_H: 597 case BPF_LD | BPF_IND | BPF_B: 598 { 599 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ 600 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ 601 const u8 fp = bpf2a64[BPF_REG_FP]; 602 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ 603 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ 604 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ 605 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ 606 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ 607 int size; 608 609 emit(A64_MOV(1, r1, r6), ctx); 610 emit_a64_mov_i(0, r2, imm, ctx); 611 if (BPF_MODE(code) == BPF_IND) 612 emit(A64_ADD(0, r2, r2, src), ctx); 613 switch (BPF_SIZE(code)) { 614 case BPF_W: 615 size = 4; 616 break; 617 case BPF_H: 618 size = 2; 619 break; 620 case BPF_B: 621 size = 1; 622 break; 623 default: 624 return -EINVAL; 625 } 626 emit_a64_mov_i64(r3, size, ctx); 627 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); 628 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 629 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 630 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 631 emit(A64_BLR(r5), ctx); 632 emit(A64_MOV(1, r0, A64_R(0)), ctx); 633 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); 634 635 jmp_offset = epilogue_offset(ctx); 636 check_imm19(jmp_offset); 637 emit(A64_CBZ(1, r0, jmp_offset), ctx); 638 emit(A64_MOV(1, r5, r0), ctx); 639 switch (BPF_SIZE(code)) { 640 case BPF_W: 641 emit(A64_LDR32(r0, r5, A64_ZR), ctx); 642 #ifndef CONFIG_CPU_BIG_ENDIAN 643 emit(A64_REV32(0, r0, r0), ctx); 644 #endif 645 break; 646 case BPF_H: 647 emit(A64_LDRH(r0, r5, A64_ZR), ctx); 648 #ifndef CONFIG_CPU_BIG_ENDIAN 649 emit(A64_REV16(0, r0, r0), ctx); 650 #endif 651 break; 652 case BPF_B: 653 emit(A64_LDRB(r0, r5, A64_ZR), ctx); 654 break; 655 } 656 break; 657 } 658 notyet: 659 pr_info_once("*** NOT YET: opcode %02x ***\n", code); 660 return -EFAULT; 661 662 default: 663 pr_err_once("unknown opcode %02x\n", code); 664 return -EINVAL; 665 } 666 667 return 0; 668 } 669 670 static int build_body(struct jit_ctx *ctx) 671 { 672 const struct bpf_prog *prog = ctx->prog; 673 int i; 674 675 for (i = 0; i < prog->len; i++) { 676 const struct bpf_insn *insn = &prog->insnsi[i]; 677 int ret; 678 679 ret = build_insn(insn, ctx); 680 681 if (ctx->image == NULL) 682 ctx->offset[i] = ctx->idx; 683 684 if (ret > 0) { 685 i++; 686 continue; 687 } 688 if (ret) 689 return ret; 690 } 691 692 return 0; 693 } 694 695 static inline void bpf_flush_icache(void *start, void *end) 696 { 697 flush_icache_range((unsigned long)start, (unsigned long)end); 698 } 699 700 void bpf_jit_compile(struct bpf_prog *prog) 701 { 702 /* Nothing to do here. We support Internal BPF. */ 703 } 704 705 void bpf_int_jit_compile(struct bpf_prog *prog) 706 { 707 struct bpf_binary_header *header; 708 struct jit_ctx ctx; 709 int image_size; 710 u8 *image_ptr; 711 712 if (!bpf_jit_enable) 713 return; 714 715 if (!prog || !prog->len) 716 return; 717 718 memset(&ctx, 0, sizeof(ctx)); 719 ctx.prog = prog; 720 721 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); 722 if (ctx.offset == NULL) 723 return; 724 725 /* 1. Initial fake pass to compute ctx->idx. */ 726 727 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ 728 if (build_body(&ctx)) 729 goto out; 730 731 build_prologue(&ctx); 732 733 ctx.epilogue_offset = ctx.idx; 734 build_epilogue(&ctx); 735 736 /* Now we know the actual image size. */ 737 image_size = sizeof(u32) * ctx.idx; 738 header = bpf_jit_binary_alloc(image_size, &image_ptr, 739 sizeof(u32), jit_fill_hole); 740 if (header == NULL) 741 goto out; 742 743 /* 2. Now, the actual pass. */ 744 745 ctx.image = (u32 *)image_ptr; 746 ctx.idx = 0; 747 748 build_prologue(&ctx); 749 750 if (build_body(&ctx)) { 751 bpf_jit_binary_free(header); 752 goto out; 753 } 754 755 build_epilogue(&ctx); 756 757 /* And we're done. */ 758 if (bpf_jit_enable > 1) 759 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 760 761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 762 763 set_memory_ro((unsigned long)header, header->pages); 764 prog->bpf_func = (void *)ctx.image; 765 prog->jited = 1; 766 out: 767 kfree(ctx.offset); 768 } 769 770 void bpf_jit_free(struct bpf_prog *prog) 771 { 772 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; 773 struct bpf_binary_header *header = (void *)addr; 774 775 if (!prog->jited) 776 goto free_filter; 777 778 set_memory_rw(addr, header->pages); 779 bpf_jit_binary_free(header); 780 781 free_filter: 782 bpf_prog_unlock_free(prog); 783 } 784