1 /* 2 * Just-In-Time compiler for BPF filters on 32bit ARM 3 * 4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/compiler.h> 13 #include <linux/errno.h> 14 #include <linux/filter.h> 15 #include <linux/netdevice.h> 16 #include <linux/string.h> 17 #include <linux/slab.h> 18 #include <linux/if_vlan.h> 19 20 #include <asm/cacheflush.h> 21 #include <asm/hwcap.h> 22 #include <asm/opcodes.h> 23 24 #include "bpf_jit_32.h" 25 26 /* 27 * ABI: 28 * 29 * r0 scratch register 30 * r4 BPF register A 31 * r5 BPF register X 32 * r6 pointer to the skb 33 * r7 skb->data 34 * r8 skb_headlen(skb) 35 */ 36 37 #define r_scratch ARM_R0 38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ 39 #define r_off ARM_R1 40 #define r_A ARM_R4 41 #define r_X ARM_R5 42 #define r_skb ARM_R6 43 #define r_skb_data ARM_R7 44 #define r_skb_hl ARM_R8 45 46 #define SCRATCH_SP_OFFSET 0 47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) 48 49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) 50 #define SEEN_MEM_WORD(k) (1 << (k)) 51 #define SEEN_X (1 << BPF_MEMWORDS) 52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) 53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) 54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 55 56 #define FLAG_NEED_X_RESET (1 << 0) 57 #define FLAG_IMM_OVERFLOW (1 << 1) 58 59 struct jit_ctx { 60 const struct bpf_prog *skf; 61 unsigned idx; 62 unsigned prologue_bytes; 63 int ret0_fp_idx; 64 u32 seen; 65 u32 flags; 66 u32 *offsets; 67 u32 *target; 68 #if __LINUX_ARM_ARCH__ < 7 69 u16 epilogue_bytes; 70 u16 imm_count; 71 u32 *imms; 72 #endif 73 }; 74 75 int bpf_jit_enable __read_mostly; 76 77 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, 78 unsigned int size) 79 { 80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); 81 82 if (!ptr) 83 return -EFAULT; 84 memcpy(ret, ptr, size); 85 return 0; 86 } 87 88 static u64 jit_get_skb_b(struct sk_buff *skb, int offset) 89 { 90 u8 ret; 91 int err; 92 93 if (offset < 0) 94 err = call_neg_helper(skb, offset, &ret, 1); 95 else 96 err = skb_copy_bits(skb, offset, &ret, 1); 97 98 return (u64)err << 32 | ret; 99 } 100 101 static u64 jit_get_skb_h(struct sk_buff *skb, int offset) 102 { 103 u16 ret; 104 int err; 105 106 if (offset < 0) 107 err = call_neg_helper(skb, offset, &ret, 2); 108 else 109 err = skb_copy_bits(skb, offset, &ret, 2); 110 111 return (u64)err << 32 | ntohs(ret); 112 } 113 114 static u64 jit_get_skb_w(struct sk_buff *skb, int offset) 115 { 116 u32 ret; 117 int err; 118 119 if (offset < 0) 120 err = call_neg_helper(skb, offset, &ret, 4); 121 else 122 err = skb_copy_bits(skb, offset, &ret, 4); 123 124 return (u64)err << 32 | ntohl(ret); 125 } 126 127 /* 128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking 129 * (where the assembly routines like __aeabi_uidiv could cause problems). 130 */ 131 static u32 jit_udiv(u32 dividend, u32 divisor) 132 { 133 return dividend / divisor; 134 } 135 136 static u32 jit_mod(u32 dividend, u32 divisor) 137 { 138 return dividend % divisor; 139 } 140 141 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) 142 { 143 inst |= (cond << 28); 144 inst = __opcode_to_mem_arm(inst); 145 146 if (ctx->target != NULL) 147 ctx->target[ctx->idx] = inst; 148 149 ctx->idx++; 150 } 151 152 /* 153 * Emit an instruction that will be executed unconditionally. 154 */ 155 static inline void emit(u32 inst, struct jit_ctx *ctx) 156 { 157 _emit(ARM_COND_AL, inst, ctx); 158 } 159 160 static u16 saved_regs(struct jit_ctx *ctx) 161 { 162 u16 ret = 0; 163 164 if ((ctx->skf->len > 1) || 165 (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) 166 ret |= 1 << r_A; 167 168 #ifdef CONFIG_FRAME_POINTER 169 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); 170 #else 171 if (ctx->seen & SEEN_CALL) 172 ret |= 1 << ARM_LR; 173 #endif 174 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 175 ret |= 1 << r_skb; 176 if (ctx->seen & SEEN_DATA) 177 ret |= (1 << r_skb_data) | (1 << r_skb_hl); 178 if (ctx->seen & SEEN_X) 179 ret |= 1 << r_X; 180 181 return ret; 182 } 183 184 static inline int mem_words_used(struct jit_ctx *ctx) 185 { 186 /* yes, we do waste some stack space IF there are "holes" in the set" */ 187 return fls(ctx->seen & SEEN_MEM); 188 } 189 190 static inline bool is_load_to_a(u16 inst) 191 { 192 switch (inst) { 193 case BPF_LD | BPF_W | BPF_LEN: 194 case BPF_LD | BPF_W | BPF_ABS: 195 case BPF_LD | BPF_H | BPF_ABS: 196 case BPF_LD | BPF_B | BPF_ABS: 197 return true; 198 default: 199 return false; 200 } 201 } 202 203 static void jit_fill_hole(void *area, unsigned int size) 204 { 205 u32 *ptr; 206 /* We are guaranteed to have aligned memory. */ 207 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) 208 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 209 } 210 211 static void build_prologue(struct jit_ctx *ctx) 212 { 213 u16 reg_set = saved_regs(ctx); 214 u16 first_inst = ctx->skf->insns[0].code; 215 u16 off; 216 217 #ifdef CONFIG_FRAME_POINTER 218 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); 219 emit(ARM_PUSH(reg_set), ctx); 220 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 221 #else 222 if (reg_set) 223 emit(ARM_PUSH(reg_set), ctx); 224 #endif 225 226 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) 227 emit(ARM_MOV_R(r_skb, ARM_R0), ctx); 228 229 if (ctx->seen & SEEN_DATA) { 230 off = offsetof(struct sk_buff, data); 231 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); 232 /* headlen = len - data_len */ 233 off = offsetof(struct sk_buff, len); 234 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); 235 off = offsetof(struct sk_buff, data_len); 236 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 237 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); 238 } 239 240 if (ctx->flags & FLAG_NEED_X_RESET) 241 emit(ARM_MOV_I(r_X, 0), ctx); 242 243 /* do not leak kernel data to userspace */ 244 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) 245 emit(ARM_MOV_I(r_A, 0), ctx); 246 247 /* stack space for the BPF_MEM words */ 248 if (ctx->seen & SEEN_MEM) 249 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 250 } 251 252 static void build_epilogue(struct jit_ctx *ctx) 253 { 254 u16 reg_set = saved_regs(ctx); 255 256 if (ctx->seen & SEEN_MEM) 257 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); 258 259 reg_set &= ~(1 << ARM_LR); 260 261 #ifdef CONFIG_FRAME_POINTER 262 /* the first instruction of the prologue was: mov ip, sp */ 263 reg_set &= ~(1 << ARM_IP); 264 reg_set |= (1 << ARM_SP); 265 emit(ARM_LDM(ARM_SP, reg_set), ctx); 266 #else 267 if (reg_set) { 268 if (ctx->seen & SEEN_CALL) 269 reg_set |= 1 << ARM_PC; 270 emit(ARM_POP(reg_set), ctx); 271 } 272 273 if (!(ctx->seen & SEEN_CALL)) 274 emit(ARM_BX(ARM_LR), ctx); 275 #endif 276 } 277 278 static int16_t imm8m(u32 x) 279 { 280 u32 rot; 281 282 for (rot = 0; rot < 16; rot++) 283 if ((x & ~ror32(0xff, 2 * rot)) == 0) 284 return rol32(x, 2 * rot) | (rot << 8); 285 286 return -1; 287 } 288 289 #if __LINUX_ARM_ARCH__ < 7 290 291 static u16 imm_offset(u32 k, struct jit_ctx *ctx) 292 { 293 unsigned i = 0, offset; 294 u16 imm; 295 296 /* on the "fake" run we just count them (duplicates included) */ 297 if (ctx->target == NULL) { 298 ctx->imm_count++; 299 return 0; 300 } 301 302 while ((i < ctx->imm_count) && ctx->imms[i]) { 303 if (ctx->imms[i] == k) 304 break; 305 i++; 306 } 307 308 if (ctx->imms[i] == 0) 309 ctx->imms[i] = k; 310 311 /* constants go just after the epilogue */ 312 offset = ctx->offsets[ctx->skf->len]; 313 offset += ctx->prologue_bytes; 314 offset += ctx->epilogue_bytes; 315 offset += i * 4; 316 317 ctx->target[offset / 4] = k; 318 319 /* PC in ARM mode == address of the instruction + 8 */ 320 imm = offset - (8 + ctx->idx * 4); 321 322 if (imm & ~0xfff) { 323 /* 324 * literal pool is too far, signal it into flags. we 325 * can only detect it on the second pass unfortunately. 326 */ 327 ctx->flags |= FLAG_IMM_OVERFLOW; 328 return 0; 329 } 330 331 return imm; 332 } 333 334 #endif /* __LINUX_ARM_ARCH__ */ 335 336 /* 337 * Move an immediate that's not an imm8m to a core register. 338 */ 339 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) 340 { 341 #if __LINUX_ARM_ARCH__ < 7 342 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); 343 #else 344 emit(ARM_MOVW(rd, val & 0xffff), ctx); 345 if (val > 0xffff) 346 emit(ARM_MOVT(rd, val >> 16), ctx); 347 #endif 348 } 349 350 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) 351 { 352 int imm12 = imm8m(val); 353 354 if (imm12 >= 0) 355 emit(ARM_MOV_I(rd, imm12), ctx); 356 else 357 emit_mov_i_no8m(rd, val, ctx); 358 } 359 360 #if __LINUX_ARM_ARCH__ < 6 361 362 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 363 { 364 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); 365 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 366 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); 367 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); 368 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); 369 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); 370 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); 371 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); 372 } 373 374 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 375 { 376 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); 377 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); 378 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); 379 } 380 381 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) 382 { 383 /* r_dst = (r_src << 8) | (r_src >> 8) */ 384 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); 385 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); 386 387 /* 388 * we need to mask out the bits set in r_dst[23:16] due to 389 * the first shift instruction. 390 * 391 * note that 0x8ff is the encoded immediate 0x00ff0000. 392 */ 393 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); 394 } 395 396 #else /* ARMv6+ */ 397 398 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 399 { 400 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); 401 #ifdef __LITTLE_ENDIAN 402 _emit(cond, ARM_REV(r_res, r_res), ctx); 403 #endif 404 } 405 406 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) 407 { 408 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); 409 #ifdef __LITTLE_ENDIAN 410 _emit(cond, ARM_REV16(r_res, r_res), ctx); 411 #endif 412 } 413 414 static inline void emit_swap16(u8 r_dst __maybe_unused, 415 u8 r_src __maybe_unused, 416 struct jit_ctx *ctx __maybe_unused) 417 { 418 #ifdef __LITTLE_ENDIAN 419 emit(ARM_REV16(r_dst, r_src), ctx); 420 #endif 421 } 422 423 #endif /* __LINUX_ARM_ARCH__ < 6 */ 424 425 426 /* Compute the immediate value for a PC-relative branch. */ 427 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) 428 { 429 u32 imm; 430 431 if (ctx->target == NULL) 432 return 0; 433 /* 434 * BPF allows only forward jumps and the offset of the target is 435 * still the one computed during the first pass. 436 */ 437 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); 438 439 return imm >> 2; 440 } 441 442 #define OP_IMM3(op, r1, r2, imm_val, ctx) \ 443 do { \ 444 imm12 = imm8m(imm_val); \ 445 if (imm12 < 0) { \ 446 emit_mov_i_no8m(r_scratch, imm_val, ctx); \ 447 emit(op ## _R((r1), (r2), r_scratch), ctx); \ 448 } else { \ 449 emit(op ## _I((r1), (r2), imm12), ctx); \ 450 } \ 451 } while (0) 452 453 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) 454 { 455 if (ctx->ret0_fp_idx >= 0) { 456 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); 457 /* NOP to keep the size constant between passes */ 458 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); 459 } else { 460 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); 461 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); 462 } 463 } 464 465 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 466 { 467 #if __LINUX_ARM_ARCH__ < 5 468 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); 469 470 if (elf_hwcap & HWCAP_THUMB) 471 emit(ARM_BX(tgt_reg), ctx); 472 else 473 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 474 #else 475 emit(ARM_BLX_R(tgt_reg), ctx); 476 #endif 477 } 478 479 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, 480 int bpf_op) 481 { 482 #if __LINUX_ARM_ARCH__ == 7 483 if (elf_hwcap & HWCAP_IDIVA) { 484 if (bpf_op == BPF_DIV) 485 emit(ARM_UDIV(rd, rm, rn), ctx); 486 else { 487 emit(ARM_UDIV(ARM_R3, rm, rn), ctx); 488 emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx); 489 } 490 return; 491 } 492 #endif 493 494 /* 495 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 496 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into 497 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm 498 * before using it as a source for ARM_R1. 499 * 500 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is 501 * ARM_R5 (r_X) so there is no particular register overlap 502 * issues. 503 */ 504 if (rn != ARM_R1) 505 emit(ARM_MOV_R(ARM_R1, rn), ctx); 506 if (rm != ARM_R0) 507 emit(ARM_MOV_R(ARM_R0, rm), ctx); 508 509 ctx->seen |= SEEN_CALL; 510 emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod, 511 ctx); 512 emit_blx_r(ARM_R3, ctx); 513 514 if (rd != ARM_R0) 515 emit(ARM_MOV_R(rd, ARM_R0), ctx); 516 } 517 518 static inline void update_on_xread(struct jit_ctx *ctx) 519 { 520 if (!(ctx->seen & SEEN_X)) 521 ctx->flags |= FLAG_NEED_X_RESET; 522 523 ctx->seen |= SEEN_X; 524 } 525 526 static int build_body(struct jit_ctx *ctx) 527 { 528 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 529 const struct bpf_prog *prog = ctx->skf; 530 const struct sock_filter *inst; 531 unsigned i, load_order, off, condt; 532 int imm12; 533 u32 k; 534 535 for (i = 0; i < prog->len; i++) { 536 u16 code; 537 538 inst = &(prog->insns[i]); 539 /* K as an immediate value operand */ 540 k = inst->k; 541 code = bpf_anc_helper(inst); 542 543 /* compute offsets only in the fake pass */ 544 if (ctx->target == NULL) 545 ctx->offsets[i] = ctx->idx * 4; 546 547 switch (code) { 548 case BPF_LD | BPF_IMM: 549 emit_mov_i(r_A, k, ctx); 550 break; 551 case BPF_LD | BPF_W | BPF_LEN: 552 ctx->seen |= SEEN_SKB; 553 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 554 emit(ARM_LDR_I(r_A, r_skb, 555 offsetof(struct sk_buff, len)), ctx); 556 break; 557 case BPF_LD | BPF_MEM: 558 /* A = scratch[k] */ 559 ctx->seen |= SEEN_MEM_WORD(k); 560 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 561 break; 562 case BPF_LD | BPF_W | BPF_ABS: 563 load_order = 2; 564 goto load; 565 case BPF_LD | BPF_H | BPF_ABS: 566 load_order = 1; 567 goto load; 568 case BPF_LD | BPF_B | BPF_ABS: 569 load_order = 0; 570 load: 571 emit_mov_i(r_off, k, ctx); 572 load_common: 573 ctx->seen |= SEEN_DATA | SEEN_CALL; 574 575 if (load_order > 0) { 576 emit(ARM_SUB_I(r_scratch, r_skb_hl, 577 1 << load_order), ctx); 578 emit(ARM_CMP_R(r_scratch, r_off), ctx); 579 condt = ARM_COND_GE; 580 } else { 581 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 582 condt = ARM_COND_HI; 583 } 584 585 /* 586 * test for negative offset, only if we are 587 * currently scheduled to take the fast 588 * path. this will update the flags so that 589 * the slowpath instruction are ignored if the 590 * offset is negative. 591 * 592 * for loard_order == 0 the HI condition will 593 * make loads at offset 0 take the slow path too. 594 */ 595 _emit(condt, ARM_CMP_I(r_off, 0), ctx); 596 597 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 598 ctx); 599 600 if (load_order == 0) 601 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), 602 ctx); 603 else if (load_order == 1) 604 emit_load_be16(condt, r_A, r_scratch, ctx); 605 else if (load_order == 2) 606 emit_load_be32(condt, r_A, r_scratch, ctx); 607 608 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); 609 610 /* the slowpath */ 611 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); 612 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 613 /* the offset is already in R1 */ 614 emit_blx_r(ARM_R3, ctx); 615 /* check the result of skb_copy_bits */ 616 emit(ARM_CMP_I(ARM_R1, 0), ctx); 617 emit_err_ret(ARM_COND_NE, ctx); 618 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 619 break; 620 case BPF_LD | BPF_W | BPF_IND: 621 load_order = 2; 622 goto load_ind; 623 case BPF_LD | BPF_H | BPF_IND: 624 load_order = 1; 625 goto load_ind; 626 case BPF_LD | BPF_B | BPF_IND: 627 load_order = 0; 628 load_ind: 629 update_on_xread(ctx); 630 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 631 goto load_common; 632 case BPF_LDX | BPF_IMM: 633 ctx->seen |= SEEN_X; 634 emit_mov_i(r_X, k, ctx); 635 break; 636 case BPF_LDX | BPF_W | BPF_LEN: 637 ctx->seen |= SEEN_X | SEEN_SKB; 638 emit(ARM_LDR_I(r_X, r_skb, 639 offsetof(struct sk_buff, len)), ctx); 640 break; 641 case BPF_LDX | BPF_MEM: 642 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 643 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 644 break; 645 case BPF_LDX | BPF_B | BPF_MSH: 646 /* x = ((*(frame + k)) & 0xf) << 2; */ 647 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 648 /* the interpreter should deal with the negative K */ 649 if ((int)k < 0) 650 return -1; 651 /* offset in r1: we might have to take the slow path */ 652 emit_mov_i(r_off, k, ctx); 653 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 654 655 /* load in r0: common with the slowpath */ 656 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, 657 ARM_R1), ctx); 658 /* 659 * emit_mov_i() might generate one or two instructions, 660 * the same holds for emit_blx_r() 661 */ 662 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); 663 664 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 665 /* r_off is r1 */ 666 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); 667 emit_blx_r(ARM_R3, ctx); 668 /* check the return value of skb_copy_bits */ 669 emit(ARM_CMP_I(ARM_R1, 0), ctx); 670 emit_err_ret(ARM_COND_NE, ctx); 671 672 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 673 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 674 break; 675 case BPF_ST: 676 ctx->seen |= SEEN_MEM_WORD(k); 677 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 678 break; 679 case BPF_STX: 680 update_on_xread(ctx); 681 ctx->seen |= SEEN_MEM_WORD(k); 682 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 683 break; 684 case BPF_ALU | BPF_ADD | BPF_K: 685 /* A += K */ 686 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 687 break; 688 case BPF_ALU | BPF_ADD | BPF_X: 689 update_on_xread(ctx); 690 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 691 break; 692 case BPF_ALU | BPF_SUB | BPF_K: 693 /* A -= K */ 694 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 695 break; 696 case BPF_ALU | BPF_SUB | BPF_X: 697 update_on_xread(ctx); 698 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 699 break; 700 case BPF_ALU | BPF_MUL | BPF_K: 701 /* A *= K */ 702 emit_mov_i(r_scratch, k, ctx); 703 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 704 break; 705 case BPF_ALU | BPF_MUL | BPF_X: 706 update_on_xread(ctx); 707 emit(ARM_MUL(r_A, r_A, r_X), ctx); 708 break; 709 case BPF_ALU | BPF_DIV | BPF_K: 710 if (k == 1) 711 break; 712 emit_mov_i(r_scratch, k, ctx); 713 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV); 714 break; 715 case BPF_ALU | BPF_DIV | BPF_X: 716 update_on_xread(ctx); 717 emit(ARM_CMP_I(r_X, 0), ctx); 718 emit_err_ret(ARM_COND_EQ, ctx); 719 emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV); 720 break; 721 case BPF_ALU | BPF_MOD | BPF_K: 722 if (k == 1) { 723 emit_mov_i(r_A, 0, ctx); 724 break; 725 } 726 emit_mov_i(r_scratch, k, ctx); 727 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD); 728 break; 729 case BPF_ALU | BPF_MOD | BPF_X: 730 update_on_xread(ctx); 731 emit(ARM_CMP_I(r_X, 0), ctx); 732 emit_err_ret(ARM_COND_EQ, ctx); 733 emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD); 734 break; 735 case BPF_ALU | BPF_OR | BPF_K: 736 /* A |= K */ 737 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 738 break; 739 case BPF_ALU | BPF_OR | BPF_X: 740 update_on_xread(ctx); 741 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 742 break; 743 case BPF_ALU | BPF_XOR | BPF_K: 744 /* A ^= K; */ 745 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 746 break; 747 case BPF_ANC | SKF_AD_ALU_XOR_X: 748 case BPF_ALU | BPF_XOR | BPF_X: 749 /* A ^= X */ 750 update_on_xread(ctx); 751 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 752 break; 753 case BPF_ALU | BPF_AND | BPF_K: 754 /* A &= K */ 755 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 756 break; 757 case BPF_ALU | BPF_AND | BPF_X: 758 update_on_xread(ctx); 759 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 760 break; 761 case BPF_ALU | BPF_LSH | BPF_K: 762 if (unlikely(k > 31)) 763 return -1; 764 emit(ARM_LSL_I(r_A, r_A, k), ctx); 765 break; 766 case BPF_ALU | BPF_LSH | BPF_X: 767 update_on_xread(ctx); 768 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 769 break; 770 case BPF_ALU | BPF_RSH | BPF_K: 771 if (unlikely(k > 31)) 772 return -1; 773 emit(ARM_LSR_I(r_A, r_A, k), ctx); 774 break; 775 case BPF_ALU | BPF_RSH | BPF_X: 776 update_on_xread(ctx); 777 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 778 break; 779 case BPF_ALU | BPF_NEG: 780 /* A = -A */ 781 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 782 break; 783 case BPF_JMP | BPF_JA: 784 /* pc += K */ 785 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 786 break; 787 case BPF_JMP | BPF_JEQ | BPF_K: 788 /* pc += (A == K) ? pc->jt : pc->jf */ 789 condt = ARM_COND_EQ; 790 goto cmp_imm; 791 case BPF_JMP | BPF_JGT | BPF_K: 792 /* pc += (A > K) ? pc->jt : pc->jf */ 793 condt = ARM_COND_HI; 794 goto cmp_imm; 795 case BPF_JMP | BPF_JGE | BPF_K: 796 /* pc += (A >= K) ? pc->jt : pc->jf */ 797 condt = ARM_COND_HS; 798 cmp_imm: 799 imm12 = imm8m(k); 800 if (imm12 < 0) { 801 emit_mov_i_no8m(r_scratch, k, ctx); 802 emit(ARM_CMP_R(r_A, r_scratch), ctx); 803 } else { 804 emit(ARM_CMP_I(r_A, imm12), ctx); 805 } 806 cond_jump: 807 if (inst->jt) 808 _emit(condt, ARM_B(b_imm(i + inst->jt + 1, 809 ctx)), ctx); 810 if (inst->jf) 811 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 812 ctx)), ctx); 813 break; 814 case BPF_JMP | BPF_JEQ | BPF_X: 815 /* pc += (A == X) ? pc->jt : pc->jf */ 816 condt = ARM_COND_EQ; 817 goto cmp_x; 818 case BPF_JMP | BPF_JGT | BPF_X: 819 /* pc += (A > X) ? pc->jt : pc->jf */ 820 condt = ARM_COND_HI; 821 goto cmp_x; 822 case BPF_JMP | BPF_JGE | BPF_X: 823 /* pc += (A >= X) ? pc->jt : pc->jf */ 824 condt = ARM_COND_CS; 825 cmp_x: 826 update_on_xread(ctx); 827 emit(ARM_CMP_R(r_A, r_X), ctx); 828 goto cond_jump; 829 case BPF_JMP | BPF_JSET | BPF_K: 830 /* pc += (A & K) ? pc->jt : pc->jf */ 831 condt = ARM_COND_NE; 832 /* not set iff all zeroes iff Z==1 iff EQ */ 833 834 imm12 = imm8m(k); 835 if (imm12 < 0) { 836 emit_mov_i_no8m(r_scratch, k, ctx); 837 emit(ARM_TST_R(r_A, r_scratch), ctx); 838 } else { 839 emit(ARM_TST_I(r_A, imm12), ctx); 840 } 841 goto cond_jump; 842 case BPF_JMP | BPF_JSET | BPF_X: 843 /* pc += (A & X) ? pc->jt : pc->jf */ 844 update_on_xread(ctx); 845 condt = ARM_COND_NE; 846 emit(ARM_TST_R(r_A, r_X), ctx); 847 goto cond_jump; 848 case BPF_RET | BPF_A: 849 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 850 goto b_epilogue; 851 case BPF_RET | BPF_K: 852 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 853 ctx->ret0_fp_idx = i; 854 emit_mov_i(ARM_R0, k, ctx); 855 b_epilogue: 856 if (i != ctx->skf->len - 1) 857 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 858 break; 859 case BPF_MISC | BPF_TAX: 860 /* X = A */ 861 ctx->seen |= SEEN_X; 862 emit(ARM_MOV_R(r_X, r_A), ctx); 863 break; 864 case BPF_MISC | BPF_TXA: 865 /* A = X */ 866 update_on_xread(ctx); 867 emit(ARM_MOV_R(r_A, r_X), ctx); 868 break; 869 case BPF_ANC | SKF_AD_PROTOCOL: 870 /* A = ntohs(skb->protocol) */ 871 ctx->seen |= SEEN_SKB; 872 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 873 protocol) != 2); 874 off = offsetof(struct sk_buff, protocol); 875 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 876 emit_swap16(r_A, r_scratch, ctx); 877 break; 878 case BPF_ANC | SKF_AD_CPU: 879 /* r_scratch = current_thread_info() */ 880 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 881 /* A = current_thread_info()->cpu */ 882 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); 883 off = offsetof(struct thread_info, cpu); 884 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 885 break; 886 case BPF_ANC | SKF_AD_IFINDEX: 887 case BPF_ANC | SKF_AD_HATYPE: 888 /* A = skb->dev->ifindex */ 889 /* A = skb->dev->type */ 890 ctx->seen |= SEEN_SKB; 891 off = offsetof(struct sk_buff, dev); 892 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); 893 894 emit(ARM_CMP_I(r_scratch, 0), ctx); 895 emit_err_ret(ARM_COND_EQ, ctx); 896 897 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 898 ifindex) != 4); 899 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 900 type) != 2); 901 902 if (code == (BPF_ANC | SKF_AD_IFINDEX)) { 903 off = offsetof(struct net_device, ifindex); 904 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 905 } else { 906 /* 907 * offset of field "type" in "struct 908 * net_device" is above what can be 909 * used in the ldrh rd, [rn, #imm] 910 * instruction, so load the offset in 911 * a register and use ldrh rd, [rn, rm] 912 */ 913 off = offsetof(struct net_device, type); 914 emit_mov_i(ARM_R3, off, ctx); 915 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx); 916 } 917 break; 918 case BPF_ANC | SKF_AD_MARK: 919 ctx->seen |= SEEN_SKB; 920 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 921 off = offsetof(struct sk_buff, mark); 922 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 923 break; 924 case BPF_ANC | SKF_AD_RXHASH: 925 ctx->seen |= SEEN_SKB; 926 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 927 off = offsetof(struct sk_buff, hash); 928 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 929 break; 930 case BPF_ANC | SKF_AD_VLAN_TAG: 931 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 932 ctx->seen |= SEEN_SKB; 933 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 934 off = offsetof(struct sk_buff, vlan_tci); 935 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 936 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 937 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); 938 else { 939 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); 940 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); 941 } 942 break; 943 case BPF_ANC | SKF_AD_PKTTYPE: 944 ctx->seen |= SEEN_SKB; 945 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 946 __pkt_type_offset[0]) != 1); 947 off = PKT_TYPE_OFFSET(); 948 emit(ARM_LDRB_I(r_A, r_skb, off), ctx); 949 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx); 950 #ifdef __BIG_ENDIAN_BITFIELD 951 emit(ARM_LSR_I(r_A, r_A, 5), ctx); 952 #endif 953 break; 954 case BPF_ANC | SKF_AD_QUEUE: 955 ctx->seen |= SEEN_SKB; 956 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 957 queue_mapping) != 2); 958 BUILD_BUG_ON(offsetof(struct sk_buff, 959 queue_mapping) > 0xff); 960 off = offsetof(struct sk_buff, queue_mapping); 961 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 962 break; 963 case BPF_ANC | SKF_AD_PAY_OFFSET: 964 ctx->seen |= SEEN_SKB | SEEN_CALL; 965 966 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); 967 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx); 968 emit_blx_r(ARM_R3, ctx); 969 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 970 break; 971 case BPF_LDX | BPF_W | BPF_ABS: 972 /* 973 * load a 32bit word from struct seccomp_data. 974 * seccomp_check_filter() will already have checked 975 * that k is 32bit aligned and lies within the 976 * struct seccomp_data. 977 */ 978 ctx->seen |= SEEN_SKB; 979 emit(ARM_LDR_I(r_A, r_skb, k), ctx); 980 break; 981 default: 982 return -1; 983 } 984 985 if (ctx->flags & FLAG_IMM_OVERFLOW) 986 /* 987 * this instruction generated an overflow when 988 * trying to access the literal pool, so 989 * delegate this filter to the kernel interpreter. 990 */ 991 return -1; 992 } 993 994 /* compute offsets only during the first pass */ 995 if (ctx->target == NULL) 996 ctx->offsets[i] = ctx->idx * 4; 997 998 return 0; 999 } 1000 1001 1002 void bpf_jit_compile(struct bpf_prog *fp) 1003 { 1004 struct bpf_binary_header *header; 1005 struct jit_ctx ctx; 1006 unsigned tmp_idx; 1007 unsigned alloc_size; 1008 u8 *target_ptr; 1009 1010 if (!bpf_jit_enable) 1011 return; 1012 1013 memset(&ctx, 0, sizeof(ctx)); 1014 ctx.skf = fp; 1015 ctx.ret0_fp_idx = -1; 1016 1017 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); 1018 if (ctx.offsets == NULL) 1019 return; 1020 1021 /* fake pass to fill in the ctx->seen */ 1022 if (unlikely(build_body(&ctx))) 1023 goto out; 1024 1025 tmp_idx = ctx.idx; 1026 build_prologue(&ctx); 1027 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; 1028 1029 #if __LINUX_ARM_ARCH__ < 7 1030 tmp_idx = ctx.idx; 1031 build_epilogue(&ctx); 1032 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; 1033 1034 ctx.idx += ctx.imm_count; 1035 if (ctx.imm_count) { 1036 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); 1037 if (ctx.imms == NULL) 1038 goto out; 1039 } 1040 #else 1041 /* there's nothing after the epilogue on ARMv7 */ 1042 build_epilogue(&ctx); 1043 #endif 1044 alloc_size = 4 * ctx.idx; 1045 header = bpf_jit_binary_alloc(alloc_size, &target_ptr, 1046 4, jit_fill_hole); 1047 if (header == NULL) 1048 goto out; 1049 1050 ctx.target = (u32 *) target_ptr; 1051 ctx.idx = 0; 1052 1053 build_prologue(&ctx); 1054 if (build_body(&ctx) < 0) { 1055 #if __LINUX_ARM_ARCH__ < 7 1056 if (ctx.imm_count) 1057 kfree(ctx.imms); 1058 #endif 1059 bpf_jit_binary_free(header); 1060 goto out; 1061 } 1062 build_epilogue(&ctx); 1063 1064 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); 1065 1066 #if __LINUX_ARM_ARCH__ < 7 1067 if (ctx.imm_count) 1068 kfree(ctx.imms); 1069 #endif 1070 1071 if (bpf_jit_enable > 1) 1072 /* there are 2 passes here */ 1073 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); 1074 1075 set_memory_ro((unsigned long)header, header->pages); 1076 fp->bpf_func = (void *)ctx.target; 1077 fp->jited = 1; 1078 out: 1079 kfree(ctx.offsets); 1080 return; 1081 } 1082 1083 void bpf_jit_free(struct bpf_prog *fp) 1084 { 1085 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 1086 struct bpf_binary_header *header = (void *)addr; 1087 1088 if (!fp->jited) 1089 goto free_filter; 1090 1091 set_memory_rw(addr, header->pages); 1092 bpf_jit_binary_free(header); 1093 1094 free_filter: 1095 bpf_prog_unlock_free(fp); 1096 } 1097