1 /* 2 * Just-In-Time compiler for BPF filters on 32bit ARM 3 * 4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/compiler.h> 13 #include <linux/errno.h> 14 #include <linux/filter.h> 15 #include <linux/netdevice.h> 16 #include <linux/string.h> 17 #include <linux/slab.h> 18 #include <linux/if_vlan.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/hwcap.h> 22 #include <asm/opcodes.h> 23 24 #include "bpf_jit_32.h" 25 26 /* 27 * ABI: 28 * 29 * r0 scratch register 30 * r4 BPF register A 31 * r5 BPF register X 32 * r6 pointer to the skb 33 * r7 skb->data 34 * r8 skb_headlen(skb) 35 */ 36 37 #define r_scratch ARM_R0 38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ 39 #define r_off ARM_R1 40 #define r_A ARM_R4 41 #define r_X ARM_R5 42 #define r_skb ARM_R6 43 #define r_skb_data ARM_R7 44 #define r_skb_hl ARM_R8 45 46 #define SCRATCH_SP_OFFSET 0 47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) 48 49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) 50 #define SEEN_MEM_WORD(k) (1 << (k)) 51 #define SEEN_X (1 << BPF_MEMWORDS) 52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) 53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) 54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 55 56 #define FLAG_NEED_X_RESET (1 << 0) 57 #define FLAG_IMM_OVERFLOW (1 << 1) 58 59 struct jit_ctx { 60 const struct bpf_prog *skf; 61 unsigned idx; 62 unsigned prologue_bytes; 63 int ret0_fp_idx; 64 u32 seen; 65 u32 flags; 66 u32 *offsets; 67 u32 *target; 68 #if __LINUX_ARM_ARCH__ < 7 69 u16 epilogue_bytes; 70 u16 imm_count; 71 u32 *imms; 72 #endif 73 }; 74 75 int bpf_jit_enable __read_mostly; 76 77 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, 78 unsigned int size) 79 { 80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); 81 82 if (!ptr) 83 return -EFAULT; 84 memcpy(ret, ptr, size); 85 return 0; 86 } 87 88 static u64 jit_get_skb_b(struct sk_buff *skb, int offset) 89 { 90 u8 ret; 91 int err; 92 93 if (offset < 0) 94 err = call_neg_helper(skb, offset, &ret, 1); 95 else 96 err = skb_copy_bits(skb, offset, &ret, 1); 97 98 return (u64)err << 32 | ret; 99 } 100 101 static u64 jit_get_skb_h(struct sk_buff *skb, int offset) 102 { 103 u16 ret; 104 int err; 105 106 if (offset < 0) 107 err = call_neg_helper(skb, offset, &ret, 2); 108 else 109 err = skb_copy_bits(skb, offset, &ret, 2); 110 111 return (u64)err << 32 | ntohs(ret); 112 } 113 114 static u64 jit_get_skb_w(struct sk_buff *skb, int offset) 115 { 116 u32 ret; 117 int err; 118 119 if (offset < 0) 120 err = call_neg_helper(skb, offset, &ret, 4); 121 else 122 err = skb_copy_bits(skb, offset, &ret, 4); 123 124 return (u64)err << 32 | ntohl(ret); 125 } 126 127 /* 128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking 129 * (where the assembly routines like __aeabi_uidiv could cause problems). 130 */ 131 static u32 jit_udiv(u32 dividend, u32 divisor) 132 { 133 return dividend / divisor; 134 } 135 136 static u32 jit_mod(u32 dividend, u32 divisor) 137 { 138 return dividend % divisor; 139 } 140 141 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) 142 { 143 inst |= (cond << 28); 144 inst = __opcode_to_mem_arm(inst); 145 146 if (ctx->target != NULL) 147 ctx->target[ctx->idx] = inst; 148 149 ctx->idx++; 150 } 151 152 /* 153 * Emit an instruction that will be executed unconditionally. 154 */ 155 static inline void emit(u32 inst, struct jit_ctx *ctx) 156 { 157 _emit(ARM_COND_AL, inst, ctx); 158 } 159 160 static u16 saved_regs(struct jit_ctx *ctx) 161 { 162 u16 ret = 0; 163 164 if ((ctx->skf->len > 1) || 165 (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) 166 ret |= 1 << r_A; 167 168 #ifdef CONFIG_FRAME_POINTER 169 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); 170 #else 171 if (ctx->seen & SEEN_CALL) 172 ret |= 1 << ARM_LR; 173 #endif 174 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 175 ret |= 1 << r_skb; 176 if (ctx->seen & SEEN_DATA) 177 ret |= (1 << r_skb_data) | (1 << r_skb_hl); 178 if (ctx->seen & SEEN_X) 179 ret |= 1 << r_X; 180 181 return ret; 182 } 183 184 static inline int mem_words_used(struct jit_ctx *ctx) 185 { 186 /* yes, we do waste some stack space IF there are "holes" in the set" */ 187 return fls(ctx->seen & SEEN_MEM); 188 } 189 190 static void jit_fill_hole(void *area, unsigned int size) 191 { 192 u32 *ptr; 193 /* We are guaranteed to have aligned memory. */ 194 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 195 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 196 } 197 198 static void build_prologue(struct jit_ctx *ctx) 199 { 200 u16 reg_set = saved_regs(ctx); 201 u16 off; 202 203 #ifdef CONFIG_FRAME_POINTER 204 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); 205 emit(ARM_PUSH(reg_set), ctx); 206 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 207 #else 208 if (reg_set) 209 emit(ARM_PUSH(reg_set), ctx); 210 #endif 211 212 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 213 emit(ARM_MOV_R(r_skb, ARM_R0), ctx); 214 215 if (ctx->seen & SEEN_DATA) { 216 off = offsetof(struct sk_buff, data); 217 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); 218 /* headlen = len - data_len */ 219 off = offsetof(struct sk_buff, len); 220 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); 221 off = offsetof(struct sk_buff, data_len); 222 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 223 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); 224 } 225 226 if (ctx->flags & FLAG_NEED_X_RESET) 227 emit(ARM_MOV_I(r_X, 0), ctx); 228 229 /* do not leak kernel data to userspace */ 230 if (bpf_needs_clear_a(&ctx->skf->insns[0])) 231 emit(ARM_MOV_I(r_A, 0), ctx); 232 233 /* stack space for the BPF_MEM words */ 234 if (ctx->seen & SEEN_MEM) 235 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 236 } 237 238 static void build_epilogue(struct jit_ctx *ctx) 239 { 240 u16 reg_set = saved_regs(ctx); 241 242 if (ctx->seen & SEEN_MEM) 243 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 244 245 reg_set &= ~(1 << ARM_LR); 246 247 #ifdef CONFIG_FRAME_POINTER 248 /* the first instruction of the prologue was: mov ip, sp */ 249 reg_set &= ~(1 << ARM_IP); 250 reg_set |= (1 << ARM_SP); 251 emit(ARM_LDM(ARM_SP, reg_set), ctx); 252 #else 253 if (reg_set) { 254 if (ctx->seen & SEEN_CALL) 255 reg_set |= 1 << ARM_PC; 256 emit(ARM_POP(reg_set), ctx); 257 } 258 259 if (!(ctx->seen & SEEN_CALL)) 260 emit(ARM_BX(ARM_LR), ctx); 261 #endif 262 } 263 264 static int16_t imm8m(u32 x) 265 { 266 u32 rot; 267 268 for (rot = 0; rot < 16; rot++) 269 if ((x & ~ror32(0xff, 2 * rot)) == 0) 270 return rol32(x, 2 * rot) | (rot << 8); 271 272 return -1; 273 } 274 275 #if __LINUX_ARM_ARCH__ < 7 276 277 static u16 imm_offset(u32 k, struct jit_ctx *ctx) 278 { 279 unsigned i = 0, offset; 280 u16 imm; 281 282 /* on the "fake" run we just count them (duplicates included) */ 283 if (ctx->target == NULL) { 284 ctx->imm_count++; 285 return 0; 286 } 287 288 while ((i < ctx->imm_count) && ctx->imms[i]) { 289 if (ctx->imms[i] == k) 290 break; 291 i++; 292 } 293 294 if (ctx->imms[i] == 0) 295 ctx->imms[i] = k; 296 297 /* constants go just after the epilogue */ 298 offset = ctx->offsets[ctx->skf->len]; 299 offset += ctx->prologue_bytes; 300 offset += ctx->epilogue_bytes; 301 offset += i * 4; 302 303 ctx->target[offset / 4] = k; 304 305 /* PC in ARM mode == address of the instruction + 8 */ 306 imm = offset - (8 + ctx->idx * 4); 307 308 if (imm & ~0xfff) { 309 /* 310 * literal pool is too far, signal it into flags. we 311 * can only detect it on the second pass unfortunately. 312 */ 313 ctx->flags |= FLAG_IMM_OVERFLOW; 314 return 0; 315 } 316 317 return imm; 318 } 319 320 #endif /* __LINUX_ARM_ARCH__ */ 321 322 /* 323 * Move an immediate that's not an imm8m to a core register. 324 */ 325 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) 326 { 327 #if __LINUX_ARM_ARCH__ < 7 328 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); 329 #else 330 emit(ARM_MOVW(rd, val & 0xffff), ctx); 331 if (val > 0xffff) 332 emit(ARM_MOVT(rd, val >> 16), ctx); 333 #endif 334 } 335 336 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) 337 { 338 int imm12 = imm8m(val); 339 340 if (imm12 >= 0) 341 emit(ARM_MOV_I(rd, imm12), ctx); 342 else 343 emit_mov_i_no8m(rd, val, ctx); 344 } 345 346 #if __LINUX_ARM_ARCH__ < 6 347 348 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 349 { 350 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); 351 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 352 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); 353 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); 354 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); 355 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); 356 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); 357 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); 358 } 359 360 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 361 { 362 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 363 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); 364 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); 365 } 366 367 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) 368 { 369 /* r_dst = (r_src << 8) | (r_src >> 8) */ 370 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); 371 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); 372 373 /* 374 * we need to mask out the bits set in r_dst[23:16] due to 375 * the first shift instruction. 376 * 377 * note that 0x8ff is the encoded immediate 0x00ff0000. 378 */ 379 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); 380 } 381 382 #else /* ARMv6+ */ 383 384 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 385 { 386 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); 387 #ifdef __LITTLE_ENDIAN 388 _emit(cond, ARM_REV(r_res, r_res), ctx); 389 #endif 390 } 391 392 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 393 { 394 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); 395 #ifdef __LITTLE_ENDIAN 396 _emit(cond, ARM_REV16(r_res, r_res), ctx); 397 #endif 398 } 399 400 static inline void emit_swap16(u8 r_dst __maybe_unused, 401 u8 r_src __maybe_unused, 402 struct jit_ctx *ctx __maybe_unused) 403 { 404 #ifdef __LITTLE_ENDIAN 405 emit(ARM_REV16(r_dst, r_src), ctx); 406 #endif 407 } 408 409 #endif /* __LINUX_ARM_ARCH__ < 6 */ 410 411 412 /* Compute the immediate value for a PC-relative branch. */ 413 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) 414 { 415 u32 imm; 416 417 if (ctx->target == NULL) 418 return 0; 419 /* 420 * BPF allows only forward jumps and the offset of the target is 421 * still the one computed during the first pass. 422 */ 423 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); 424 425 return imm >> 2; 426 } 427 428 #define OP_IMM3(op, r1, r2, imm_val, ctx) \ 429 do { \ 430 imm12 = imm8m(imm_val); \ 431 if (imm12 < 0) { \ 432 emit_mov_i_no8m(r_scratch, imm_val, ctx); \ 433 emit(op ## _R((r1), (r2), r_scratch), ctx); \ 434 } else { \ 435 emit(op ## _I((r1), (r2), imm12), ctx); \ 436 } \ 437 } while (0) 438 439 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) 440 { 441 if (ctx->ret0_fp_idx >= 0) { 442 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); 443 /* NOP to keep the size constant between passes */ 444 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); 445 } else { 446 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); 447 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); 448 } 449 } 450 451 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 452 { 453 #if __LINUX_ARM_ARCH__ < 5 454 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); 455 456 if (elf_hwcap & HWCAP_THUMB) 457 emit(ARM_BX(tgt_reg), ctx); 458 else 459 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 460 #else 461 emit(ARM_BLX_R(tgt_reg), ctx); 462 #endif 463 } 464 465 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, 466 int bpf_op) 467 { 468 #if __LINUX_ARM_ARCH__ == 7 469 if (elf_hwcap & HWCAP_IDIVA) { 470 if (bpf_op == BPF_DIV) 471 emit(ARM_UDIV(rd, rm, rn), ctx); 472 else { 473 emit(ARM_UDIV(ARM_R3, rm, rn), ctx); 474 emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx); 475 } 476 return; 477 } 478 #endif 479 480 /* 481 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 482 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into 483 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm 484 * before using it as a source for ARM_R1. 485 * 486 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is 487 * ARM_R5 (r_X) so there is no particular register overlap 488 * issues. 489 */ 490 if (rn != ARM_R1) 491 emit(ARM_MOV_R(ARM_R1, rn), ctx); 492 if (rm != ARM_R0) 493 emit(ARM_MOV_R(ARM_R0, rm), ctx); 494 495 ctx->seen |= SEEN_CALL; 496 emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod, 497 ctx); 498 emit_blx_r(ARM_R3, ctx); 499 500 if (rd != ARM_R0) 501 emit(ARM_MOV_R(rd, ARM_R0), ctx); 502 } 503 504 static inline void update_on_xread(struct jit_ctx *ctx) 505 { 506 if (!(ctx->seen & SEEN_X)) 507 ctx->flags |= FLAG_NEED_X_RESET; 508 509 ctx->seen |= SEEN_X; 510 } 511 512 static int build_body(struct jit_ctx *ctx) 513 { 514 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 515 const struct bpf_prog *prog = ctx->skf; 516 const struct sock_filter *inst; 517 unsigned i, load_order, off, condt; 518 int imm12; 519 u32 k; 520 521 for (i = 0; i < prog->len; i++) { 522 u16 code; 523 524 inst = &(prog->insns[i]); 525 /* K as an immediate value operand */ 526 k = inst->k; 527 code = bpf_anc_helper(inst); 528 529 /* compute offsets only in the fake pass */ 530 if (ctx->target == NULL) 531 ctx->offsets[i] = ctx->idx * 4; 532 533 switch (code) { 534 case BPF_LD | BPF_IMM: 535 emit_mov_i(r_A, k, ctx); 536 break; 537 case BPF_LD | BPF_W | BPF_LEN: 538 ctx->seen |= SEEN_SKB; 539 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 540 emit(ARM_LDR_I(r_A, r_skb, 541 offsetof(struct sk_buff, len)), ctx); 542 break; 543 case BPF_LD | BPF_MEM: 544 /* A = scratch[k] */ 545 ctx->seen |= SEEN_MEM_WORD(k); 546 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 547 break; 548 case BPF_LD | BPF_W | BPF_ABS: 549 load_order = 2; 550 goto load; 551 case BPF_LD | BPF_H | BPF_ABS: 552 load_order = 1; 553 goto load; 554 case BPF_LD | BPF_B | BPF_ABS: 555 load_order = 0; 556 load: 557 emit_mov_i(r_off, k, ctx); 558 load_common: 559 ctx->seen |= SEEN_DATA | SEEN_CALL; 560 561 if (load_order > 0) { 562 emit(ARM_SUB_I(r_scratch, r_skb_hl, 563 1 << load_order), ctx); 564 emit(ARM_CMP_R(r_scratch, r_off), ctx); 565 condt = ARM_COND_GE; 566 } else { 567 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 568 condt = ARM_COND_HI; 569 } 570 571 /* 572 * test for negative offset, only if we are 573 * currently scheduled to take the fast 574 * path. this will update the flags so that 575 * the slowpath instruction are ignored if the 576 * offset is negative. 577 * 578 * for loard_order == 0 the HI condition will 579 * make loads at offset 0 take the slow path too. 580 */ 581 _emit(condt, ARM_CMP_I(r_off, 0), ctx); 582 583 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 584 ctx); 585 586 if (load_order == 0) 587 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), 588 ctx); 589 else if (load_order == 1) 590 emit_load_be16(condt, r_A, r_scratch, ctx); 591 else if (load_order == 2) 592 emit_load_be32(condt, r_A, r_scratch, ctx); 593 594 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); 595 596 /* the slowpath */ 597 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); 598 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 599 /* the offset is already in R1 */ 600 emit_blx_r(ARM_R3, ctx); 601 /* check the result of skb_copy_bits */ 602 emit(ARM_CMP_I(ARM_R1, 0), ctx); 603 emit_err_ret(ARM_COND_NE, ctx); 604 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 605 break; 606 case BPF_LD | BPF_W | BPF_IND: 607 load_order = 2; 608 goto load_ind; 609 case BPF_LD | BPF_H | BPF_IND: 610 load_order = 1; 611 goto load_ind; 612 case BPF_LD | BPF_B | BPF_IND: 613 load_order = 0; 614 load_ind: 615 update_on_xread(ctx); 616 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 617 goto load_common; 618 case BPF_LDX | BPF_IMM: 619 ctx->seen |= SEEN_X; 620 emit_mov_i(r_X, k, ctx); 621 break; 622 case BPF_LDX | BPF_W | BPF_LEN: 623 ctx->seen |= SEEN_X | SEEN_SKB; 624 emit(ARM_LDR_I(r_X, r_skb, 625 offsetof(struct sk_buff, len)), ctx); 626 break; 627 case BPF_LDX | BPF_MEM: 628 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 629 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 630 break; 631 case BPF_LDX | BPF_B | BPF_MSH: 632 /* x = ((*(frame + k)) & 0xf) << 2; */ 633 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 634 /* the interpreter should deal with the negative K */ 635 if ((int)k < 0) 636 return -1; 637 /* offset in r1: we might have to take the slow path */ 638 emit_mov_i(r_off, k, ctx); 639 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 640 641 /* load in r0: common with the slowpath */ 642 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, 643 ARM_R1), ctx); 644 /* 645 * emit_mov_i() might generate one or two instructions, 646 * the same holds for emit_blx_r() 647 */ 648 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); 649 650 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 651 /* r_off is r1 */ 652 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); 653 emit_blx_r(ARM_R3, ctx); 654 /* check the return value of skb_copy_bits */ 655 emit(ARM_CMP_I(ARM_R1, 0), ctx); 656 emit_err_ret(ARM_COND_NE, ctx); 657 658 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 659 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 660 break; 661 case BPF_ST: 662 ctx->seen |= SEEN_MEM_WORD(k); 663 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 664 break; 665 case BPF_STX: 666 update_on_xread(ctx); 667 ctx->seen |= SEEN_MEM_WORD(k); 668 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 669 break; 670 case BPF_ALU | BPF_ADD | BPF_K: 671 /* A += K */ 672 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 673 break; 674 case BPF_ALU | BPF_ADD | BPF_X: 675 update_on_xread(ctx); 676 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 677 break; 678 case BPF_ALU | BPF_SUB | BPF_K: 679 /* A -= K */ 680 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 681 break; 682 case BPF_ALU | BPF_SUB | BPF_X: 683 update_on_xread(ctx); 684 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 685 break; 686 case BPF_ALU | BPF_MUL | BPF_K: 687 /* A *= K */ 688 emit_mov_i(r_scratch, k, ctx); 689 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 690 break; 691 case BPF_ALU | BPF_MUL | BPF_X: 692 update_on_xread(ctx); 693 emit(ARM_MUL(r_A, r_A, r_X), ctx); 694 break; 695 case BPF_ALU | BPF_DIV | BPF_K: 696 if (k == 1) 697 break; 698 emit_mov_i(r_scratch, k, ctx); 699 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV); 700 break; 701 case BPF_ALU | BPF_DIV | BPF_X: 702 update_on_xread(ctx); 703 emit(ARM_CMP_I(r_X, 0), ctx); 704 emit_err_ret(ARM_COND_EQ, ctx); 705 emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV); 706 break; 707 case BPF_ALU | BPF_MOD | BPF_K: 708 if (k == 1) { 709 emit_mov_i(r_A, 0, ctx); 710 break; 711 } 712 emit_mov_i(r_scratch, k, ctx); 713 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD); 714 break; 715 case BPF_ALU | BPF_MOD | BPF_X: 716 update_on_xread(ctx); 717 emit(ARM_CMP_I(r_X, 0), ctx); 718 emit_err_ret(ARM_COND_EQ, ctx); 719 emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD); 720 break; 721 case BPF_ALU | BPF_OR | BPF_K: 722 /* A |= K */ 723 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 724 break; 725 case BPF_ALU | BPF_OR | BPF_X: 726 update_on_xread(ctx); 727 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 728 break; 729 case BPF_ALU | BPF_XOR | BPF_K: 730 /* A ^= K; */ 731 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 732 break; 733 case BPF_ANC | SKF_AD_ALU_XOR_X: 734 case BPF_ALU | BPF_XOR | BPF_X: 735 /* A ^= X */ 736 update_on_xread(ctx); 737 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 738 break; 739 case BPF_ALU | BPF_AND | BPF_K: 740 /* A &= K */ 741 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 742 break; 743 case BPF_ALU | BPF_AND | BPF_X: 744 update_on_xread(ctx); 745 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 746 break; 747 case BPF_ALU | BPF_LSH | BPF_K: 748 if (unlikely(k > 31)) 749 return -1; 750 emit(ARM_LSL_I(r_A, r_A, k), ctx); 751 break; 752 case BPF_ALU | BPF_LSH | BPF_X: 753 update_on_xread(ctx); 754 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 755 break; 756 case BPF_ALU | BPF_RSH | BPF_K: 757 if (unlikely(k > 31)) 758 return -1; 759 if (k) 760 emit(ARM_LSR_I(r_A, r_A, k), ctx); 761 break; 762 case BPF_ALU | BPF_RSH | BPF_X: 763 update_on_xread(ctx); 764 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 765 break; 766 case BPF_ALU | BPF_NEG: 767 /* A = -A */ 768 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 769 break; 770 case BPF_JMP | BPF_JA: 771 /* pc += K */ 772 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 773 break; 774 case BPF_JMP | BPF_JEQ | BPF_K: 775 /* pc += (A == K) ? pc->jt : pc->jf */ 776 condt = ARM_COND_EQ; 777 goto cmp_imm; 778 case BPF_JMP | BPF_JGT | BPF_K: 779 /* pc += (A > K) ? pc->jt : pc->jf */ 780 condt = ARM_COND_HI; 781 goto cmp_imm; 782 case BPF_JMP | BPF_JGE | BPF_K: 783 /* pc += (A >= K) ? pc->jt : pc->jf */ 784 condt = ARM_COND_HS; 785 cmp_imm: 786 imm12 = imm8m(k); 787 if (imm12 < 0) { 788 emit_mov_i_no8m(r_scratch, k, ctx); 789 emit(ARM_CMP_R(r_A, r_scratch), ctx); 790 } else { 791 emit(ARM_CMP_I(r_A, imm12), ctx); 792 } 793 cond_jump: 794 if (inst->jt) 795 _emit(condt, ARM_B(b_imm(i + inst->jt + 1, 796 ctx)), ctx); 797 if (inst->jf) 798 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 799 ctx)), ctx); 800 break; 801 case BPF_JMP | BPF_JEQ | BPF_X: 802 /* pc += (A == X) ? pc->jt : pc->jf */ 803 condt = ARM_COND_EQ; 804 goto cmp_x; 805 case BPF_JMP | BPF_JGT | BPF_X: 806 /* pc += (A > X) ? pc->jt : pc->jf */ 807 condt = ARM_COND_HI; 808 goto cmp_x; 809 case BPF_JMP | BPF_JGE | BPF_X: 810 /* pc += (A >= X) ? pc->jt : pc->jf */ 811 condt = ARM_COND_CS; 812 cmp_x: 813 update_on_xread(ctx); 814 emit(ARM_CMP_R(r_A, r_X), ctx); 815 goto cond_jump; 816 case BPF_JMP | BPF_JSET | BPF_K: 817 /* pc += (A & K) ? pc->jt : pc->jf */ 818 condt = ARM_COND_NE; 819 /* not set iff all zeroes iff Z==1 iff EQ */ 820 821 imm12 = imm8m(k); 822 if (imm12 < 0) { 823 emit_mov_i_no8m(r_scratch, k, ctx); 824 emit(ARM_TST_R(r_A, r_scratch), ctx); 825 } else { 826 emit(ARM_TST_I(r_A, imm12), ctx); 827 } 828 goto cond_jump; 829 case BPF_JMP | BPF_JSET | BPF_X: 830 /* pc += (A & X) ? pc->jt : pc->jf */ 831 update_on_xread(ctx); 832 condt = ARM_COND_NE; 833 emit(ARM_TST_R(r_A, r_X), ctx); 834 goto cond_jump; 835 case BPF_RET | BPF_A: 836 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 837 goto b_epilogue; 838 case BPF_RET | BPF_K: 839 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 840 ctx->ret0_fp_idx = i; 841 emit_mov_i(ARM_R0, k, ctx); 842 b_epilogue: 843 if (i != ctx->skf->len - 1) 844 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 845 break; 846 case BPF_MISC | BPF_TAX: 847 /* X = A */ 848 ctx->seen |= SEEN_X; 849 emit(ARM_MOV_R(r_X, r_A), ctx); 850 break; 851 case BPF_MISC | BPF_TXA: 852 /* A = X */ 853 update_on_xread(ctx); 854 emit(ARM_MOV_R(r_A, r_X), ctx); 855 break; 856 case BPF_ANC | SKF_AD_PROTOCOL: 857 /* A = ntohs(skb->protocol) */ 858 ctx->seen |= SEEN_SKB; 859 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 860 protocol) != 2); 861 off = offsetof(struct sk_buff, protocol); 862 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 863 emit_swap16(r_A, r_scratch, ctx); 864 break; 865 case BPF_ANC | SKF_AD_CPU: 866 /* r_scratch = current_thread_info() */ 867 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 868 /* A = current_thread_info()->cpu */ 869 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); 870 off = offsetof(struct thread_info, cpu); 871 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 872 break; 873 case BPF_ANC | SKF_AD_IFINDEX: 874 case BPF_ANC | SKF_AD_HATYPE: 875 /* A = skb->dev->ifindex */ 876 /* A = skb->dev->type */ 877 ctx->seen |= SEEN_SKB; 878 off = offsetof(struct sk_buff, dev); 879 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 880 881 emit(ARM_CMP_I(r_scratch, 0), ctx); 882 emit_err_ret(ARM_COND_EQ, ctx); 883 884 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 885 ifindex) != 4); 886 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 887 type) != 2); 888 889 if (code == (BPF_ANC | SKF_AD_IFINDEX)) { 890 off = offsetof(struct net_device, ifindex); 891 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 892 } else { 893 /* 894 * offset of field "type" in "struct 895 * net_device" is above what can be 896 * used in the ldrh rd, [rn, #imm] 897 * instruction, so load the offset in 898 * a register and use ldrh rd, [rn, rm] 899 */ 900 off = offsetof(struct net_device, type); 901 emit_mov_i(ARM_R3, off, ctx); 902 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx); 903 } 904 break; 905 case BPF_ANC | SKF_AD_MARK: 906 ctx->seen |= SEEN_SKB; 907 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 908 off = offsetof(struct sk_buff, mark); 909 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 910 break; 911 case BPF_ANC | SKF_AD_RXHASH: 912 ctx->seen |= SEEN_SKB; 913 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 914 off = offsetof(struct sk_buff, hash); 915 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 916 break; 917 case BPF_ANC | SKF_AD_VLAN_TAG: 918 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 919 ctx->seen |= SEEN_SKB; 920 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 921 off = offsetof(struct sk_buff, vlan_tci); 922 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 923 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 924 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); 925 else { 926 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); 927 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); 928 } 929 break; 930 case BPF_ANC | SKF_AD_PKTTYPE: 931 ctx->seen |= SEEN_SKB; 932 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 933 __pkt_type_offset[0]) != 1); 934 off = PKT_TYPE_OFFSET(); 935 emit(ARM_LDRB_I(r_A, r_skb, off), ctx); 936 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx); 937 #ifdef __BIG_ENDIAN_BITFIELD 938 emit(ARM_LSR_I(r_A, r_A, 5), ctx); 939 #endif 940 break; 941 case BPF_ANC | SKF_AD_QUEUE: 942 ctx->seen |= SEEN_SKB; 943 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 944 queue_mapping) != 2); 945 BUILD_BUG_ON(offsetof(struct sk_buff, 946 queue_mapping) > 0xff); 947 off = offsetof(struct sk_buff, queue_mapping); 948 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 949 break; 950 case BPF_ANC | SKF_AD_PAY_OFFSET: 951 ctx->seen |= SEEN_SKB | SEEN_CALL; 952 953 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 954 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx); 955 emit_blx_r(ARM_R3, ctx); 956 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 957 break; 958 case BPF_LDX | BPF_W | BPF_ABS: 959 /* 960 * load a 32bit word from struct seccomp_data. 961 * seccomp_check_filter() will already have checked 962 * that k is 32bit aligned and lies within the 963 * struct seccomp_data. 964 */ 965 ctx->seen |= SEEN_SKB; 966 emit(ARM_LDR_I(r_A, r_skb, k), ctx); 967 break; 968 default: 969 return -1; 970 } 971 972 if (ctx->flags & FLAG_IMM_OVERFLOW) 973 /* 974 * this instruction generated an overflow when 975 * trying to access the literal pool, so 976 * delegate this filter to the kernel interpreter. 977 */ 978 return -1; 979 } 980 981 /* compute offsets only during the first pass */ 982 if (ctx->target == NULL) 983 ctx->offsets[i] = ctx->idx * 4; 984 985 return 0; 986 } 987 988 989 void bpf_jit_compile(struct bpf_prog *fp) 990 { 991 struct bpf_binary_header *header; 992 struct jit_ctx ctx; 993 unsigned tmp_idx; 994 unsigned alloc_size; 995 u8 *target_ptr; 996 997 if (!bpf_jit_enable) 998 return; 999 1000 memset(&ctx, 0, sizeof(ctx)); 1001 ctx.skf = fp; 1002 ctx.ret0_fp_idx = -1; 1003 1004 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); 1005 if (ctx.offsets == NULL) 1006 return; 1007 1008 /* fake pass to fill in the ctx->seen */ 1009 if (unlikely(build_body(&ctx))) 1010 goto out; 1011 1012 tmp_idx = ctx.idx; 1013 build_prologue(&ctx); 1014 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 1015 1016 #if __LINUX_ARM_ARCH__ < 7 1017 tmp_idx = ctx.idx; 1018 build_epilogue(&ctx); 1019 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; 1020 1021 ctx.idx += ctx.imm_count; 1022 if (ctx.imm_count) { 1023 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); 1024 if (ctx.imms == NULL) 1025 goto out; 1026 } 1027 #else 1028 /* there's nothing after the epilogue on ARMv7 */ 1029 build_epilogue(&ctx); 1030 #endif 1031 alloc_size = 4 * ctx.idx; 1032 header = bpf_jit_binary_alloc(alloc_size, &target_ptr, 1033 4, jit_fill_hole); 1034 if (header == NULL) 1035 goto out; 1036 1037 ctx.target = (u32 *) target_ptr; 1038 ctx.idx = 0; 1039 1040 build_prologue(&ctx); 1041 if (build_body(&ctx) < 0) { 1042 #if __LINUX_ARM_ARCH__ < 7 1043 if (ctx.imm_count) 1044 kfree(ctx.imms); 1045 #endif 1046 bpf_jit_binary_free(header); 1047 goto out; 1048 } 1049 build_epilogue(&ctx); 1050 1051 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); 1052 1053 #if __LINUX_ARM_ARCH__ < 7 1054 if (ctx.imm_count) 1055 kfree(ctx.imms); 1056 #endif 1057 1058 if (bpf_jit_enable > 1) 1059 /* there are 2 passes here */ 1060 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); 1061 1062 set_memory_ro((unsigned long)header, header->pages); 1063 fp->bpf_func = (void *)ctx.target; 1064 fp->jited = 1; 1065 out: 1066 kfree(ctx.offsets); 1067 return; 1068 } 1069 1070 void bpf_jit_free(struct bpf_prog *fp) 1071 { 1072 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 1073 struct bpf_binary_header *header = (void *)addr; 1074 1075 if (!fp->jited) 1076 goto free_filter; 1077 1078 set_memory_rw(addr, header->pages); 1079 bpf_jit_binary_free(header); 1080 1081 free_filter: 1082 bpf_prog_unlock_free(fp); 1083 } 1084