1 /* bpf_jit_comp.c : BPF JIT compiler 2 * 3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; version 2 9 * of the License. 10 */ 11 #include <linux/netdevice.h> 12 #include <linux/filter.h> 13 #include <linux/if_vlan.h> 14 #include <asm/cacheflush.h> 15 #include <linux/bpf.h> 16 17 int bpf_jit_enable __read_mostly; 18 19 /* 20 * assembly code in arch/x86/net/bpf_jit.S 21 */ 22 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; 23 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; 24 extern u8 sk_load_byte_positive_offset[]; 25 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; 26 extern u8 sk_load_byte_negative_offset[]; 27 28 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 29 { 30 if (len == 1) 31 *ptr = bytes; 32 else if (len == 2) 33 *(u16 *)ptr = bytes; 34 else { 35 *(u32 *)ptr = bytes; 36 barrier(); 37 } 38 return ptr + len; 39 } 40 41 #define EMIT(bytes, len) \ 42 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) 43 44 #define EMIT1(b1) EMIT(b1, 1) 45 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 46 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 47 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 48 #define EMIT1_off32(b1, off) \ 49 do {EMIT1(b1); EMIT(off, 4); } while (0) 50 #define EMIT2_off32(b1, b2, off) \ 51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0) 52 #define EMIT3_off32(b1, b2, b3, off) \ 53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 54 #define EMIT4_off32(b1, b2, b3, b4, off) \ 55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 56 57 static bool is_imm8(int value) 58 { 59 return value <= 127 && value >= -128; 60 } 61 62 static bool is_simm32(s64 value) 63 { 64 return value == (s64) (s32) value; 65 } 66 67 /* mov dst, src */ 68 #define EMIT_mov(DST, SRC) \ 69 do {if (DST != SRC) \ 70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 71 } while (0) 72 73 static int bpf_size_to_x86_bytes(int bpf_size) 74 { 75 if (bpf_size == BPF_W) 76 return 4; 77 else if (bpf_size == BPF_H) 78 return 2; 79 else if (bpf_size == BPF_B) 80 return 1; 81 else if (bpf_size == BPF_DW) 82 return 4; /* imm32 */ 83 else 84 return 0; 85 } 86 87 /* list of x86 cond jumps opcodes (. + s8) 88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 89 */ 90 #define X86_JB 0x72 91 #define X86_JAE 0x73 92 #define X86_JE 0x74 93 #define X86_JNE 0x75 94 #define X86_JBE 0x76 95 #define X86_JA 0x77 96 #define X86_JGE 0x7D 97 #define X86_JG 0x7F 98 99 static void bpf_flush_icache(void *start, void *end) 100 { 101 mm_segment_t old_fs = get_fs(); 102 103 set_fs(KERNEL_DS); 104 smp_wmb(); 105 flush_icache_range((unsigned long)start, (unsigned long)end); 106 set_fs(old_fs); 107 } 108 109 #define CHOOSE_LOAD_FUNC(K, func) \ 110 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 111 112 /* pick a register outside of BPF range for JIT internal work */ 113 #define AUX_REG (MAX_BPF_REG + 1) 114 115 /* the following table maps BPF registers to x64 registers. 116 * x64 register r12 is unused, since if used as base address register 117 * in load/store instructions, it always needs an extra byte of encoding 118 */ 119 static const int reg2hex[] = { 120 [BPF_REG_0] = 0, /* rax */ 121 [BPF_REG_1] = 7, /* rdi */ 122 [BPF_REG_2] = 6, /* rsi */ 123 [BPF_REG_3] = 2, /* rdx */ 124 [BPF_REG_4] = 1, /* rcx */ 125 [BPF_REG_5] = 0, /* r8 */ 126 [BPF_REG_6] = 3, /* rbx callee saved */ 127 [BPF_REG_7] = 5, /* r13 callee saved */ 128 [BPF_REG_8] = 6, /* r14 callee saved */ 129 [BPF_REG_9] = 7, /* r15 callee saved */ 130 [BPF_REG_FP] = 5, /* rbp readonly */ 131 [AUX_REG] = 3, /* r11 temp register */ 132 }; 133 134 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 135 * which need extra byte of encoding. 136 * rax,rcx,...,rbp have simpler encoding 137 */ 138 static bool is_ereg(u32 reg) 139 { 140 return (1 << reg) & (BIT(BPF_REG_5) | 141 BIT(AUX_REG) | 142 BIT(BPF_REG_7) | 143 BIT(BPF_REG_8) | 144 BIT(BPF_REG_9)); 145 } 146 147 /* add modifiers if 'reg' maps to x64 registers r8..r15 */ 148 static u8 add_1mod(u8 byte, u32 reg) 149 { 150 if (is_ereg(reg)) 151 byte |= 1; 152 return byte; 153 } 154 155 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 156 { 157 if (is_ereg(r1)) 158 byte |= 1; 159 if (is_ereg(r2)) 160 byte |= 4; 161 return byte; 162 } 163 164 /* encode 'dst_reg' register into x64 opcode 'byte' */ 165 static u8 add_1reg(u8 byte, u32 dst_reg) 166 { 167 return byte + reg2hex[dst_reg]; 168 } 169 170 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ 171 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 172 { 173 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 174 } 175 176 static void jit_fill_hole(void *area, unsigned int size) 177 { 178 /* fill whole space with int3 instructions */ 179 memset(area, 0xcc, size); 180 } 181 182 struct jit_context { 183 int cleanup_addr; /* epilogue code offset */ 184 bool seen_ld_abs; 185 }; 186 187 /* maximum number of bytes emitted while JITing one eBPF insn */ 188 #define BPF_MAX_INSN_SIZE 128 189 #define BPF_INSN_SAFETY 64 190 191 #define STACKSIZE \ 192 (MAX_BPF_STACK + \ 193 32 /* space for rbx, r13, r14, r15 */ + \ 194 8 /* space for skb_copy_bits() buffer */) 195 196 #define PROLOGUE_SIZE 51 197 198 /* emit x64 prologue code for BPF program and check it's size. 199 * bpf_tail_call helper will skip it while jumping into another program 200 */ 201 static void emit_prologue(u8 **pprog) 202 { 203 u8 *prog = *pprog; 204 int cnt = 0; 205 206 EMIT1(0x55); /* push rbp */ 207 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ 208 209 /* sub rsp, STACKSIZE */ 210 EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE); 211 212 /* all classic BPF filters use R6(rbx) save it */ 213 214 /* mov qword ptr [rbp-X],rbx */ 215 EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE); 216 217 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8 218 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and 219 * R8(r14). R9(r15) spill could be made conditional, but there is only 220 * one 'bpf_error' return path out of helper functions inside bpf_jit.S 221 * The overhead of extra spill is negligible for any filter other 222 * than synthetic ones. Therefore not worth adding complexity. 223 */ 224 225 /* mov qword ptr [rbp-X],r13 */ 226 EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8); 227 /* mov qword ptr [rbp-X],r14 */ 228 EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16); 229 /* mov qword ptr [rbp-X],r15 */ 230 EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24); 231 232 /* clear A and X registers */ 233 EMIT2(0x31, 0xc0); /* xor eax, eax */ 234 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ 235 236 /* clear tail_cnt: mov qword ptr [rbp-X], rax */ 237 EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32); 238 239 BUILD_BUG_ON(cnt != PROLOGUE_SIZE); 240 *pprog = prog; 241 } 242 243 /* generate the following code: 244 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 245 * if (index >= array->map.max_entries) 246 * goto out; 247 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) 248 * goto out; 249 * prog = array->ptrs[index]; 250 * if (prog == NULL) 251 * goto out; 252 * goto *(prog->bpf_func + prologue_size); 253 * out: 254 */ 255 static void emit_bpf_tail_call(u8 **pprog) 256 { 257 u8 *prog = *pprog; 258 int label1, label2, label3; 259 int cnt = 0; 260 261 /* rdi - pointer to ctx 262 * rsi - pointer to bpf_array 263 * rdx - index in bpf_array 264 */ 265 266 /* if (index >= array->map.max_entries) 267 * goto out; 268 */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 270 offsetof(struct bpf_array, map.max_entries)); 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 272 #define OFFSET1 47 /* number of bytes to jump */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 274 label1 = cnt; 275 276 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) 277 * goto out; 278 */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 281 #define OFFSET2 36 282 EMIT2(X86_JA, OFFSET2); /* ja out */ 283 label2 = cnt; 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 286 287 /* prog = array->ptrs[index]; */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ 289 offsetof(struct bpf_array, ptrs)); 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 291 292 /* if (prog == NULL) 293 * goto out; 294 */ 295 EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */ 296 #define OFFSET3 10 297 EMIT2(X86_JE, OFFSET3); /* je out */ 298 label3 = cnt; 299 300 /* goto *(prog->bpf_func + prologue_size); */ 301 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */ 302 offsetof(struct bpf_prog, bpf_func)); 303 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */ 304 305 /* now we're ready to jump into next BPF program 306 * rdi == ctx (1st arg) 307 * rax == prog->bpf_func + prologue_size 308 */ 309 EMIT2(0xFF, 0xE0); /* jmp rax */ 310 311 /* out: */ 312 BUILD_BUG_ON(cnt - label1 != OFFSET1); 313 BUILD_BUG_ON(cnt - label2 != OFFSET2); 314 BUILD_BUG_ON(cnt - label3 != OFFSET3); 315 *pprog = prog; 316 } 317 318 319 static void emit_load_skb_data_hlen(u8 **pprog) 320 { 321 u8 *prog = *pprog; 322 int cnt = 0; 323 324 /* r9d = skb->len - skb->data_len (headlen) 325 * r10 = skb->data 326 */ 327 /* mov %r9d, off32(%rdi) */ 328 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len)); 329 330 /* sub %r9d, off32(%rdi) */ 331 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len)); 332 333 /* mov %r10, off32(%rdi) */ 334 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data)); 335 *pprog = prog; 336 } 337 338 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 339 int oldproglen, struct jit_context *ctx) 340 { 341 struct bpf_insn *insn = bpf_prog->insnsi; 342 int insn_cnt = bpf_prog->len; 343 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); 344 bool seen_exit = false; 345 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 346 int i, cnt = 0; 347 int proglen = 0; 348 u8 *prog = temp; 349 350 emit_prologue(&prog); 351 352 if (seen_ld_abs) 353 emit_load_skb_data_hlen(&prog); 354 355 for (i = 0; i < insn_cnt; i++, insn++) { 356 const s32 imm32 = insn->imm; 357 u32 dst_reg = insn->dst_reg; 358 u32 src_reg = insn->src_reg; 359 u8 b1 = 0, b2 = 0, b3 = 0; 360 s64 jmp_offset; 361 u8 jmp_cond; 362 bool reload_skb_data; 363 int ilen; 364 u8 *func; 365 366 switch (insn->code) { 367 /* ALU */ 368 case BPF_ALU | BPF_ADD | BPF_X: 369 case BPF_ALU | BPF_SUB | BPF_X: 370 case BPF_ALU | BPF_AND | BPF_X: 371 case BPF_ALU | BPF_OR | BPF_X: 372 case BPF_ALU | BPF_XOR | BPF_X: 373 case BPF_ALU64 | BPF_ADD | BPF_X: 374 case BPF_ALU64 | BPF_SUB | BPF_X: 375 case BPF_ALU64 | BPF_AND | BPF_X: 376 case BPF_ALU64 | BPF_OR | BPF_X: 377 case BPF_ALU64 | BPF_XOR | BPF_X: 378 switch (BPF_OP(insn->code)) { 379 case BPF_ADD: b2 = 0x01; break; 380 case BPF_SUB: b2 = 0x29; break; 381 case BPF_AND: b2 = 0x21; break; 382 case BPF_OR: b2 = 0x09; break; 383 case BPF_XOR: b2 = 0x31; break; 384 } 385 if (BPF_CLASS(insn->code) == BPF_ALU64) 386 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 387 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 388 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 389 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 390 break; 391 392 /* mov dst, src */ 393 case BPF_ALU64 | BPF_MOV | BPF_X: 394 EMIT_mov(dst_reg, src_reg); 395 break; 396 397 /* mov32 dst, src */ 398 case BPF_ALU | BPF_MOV | BPF_X: 399 if (is_ereg(dst_reg) || is_ereg(src_reg)) 400 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 401 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 402 break; 403 404 /* neg dst */ 405 case BPF_ALU | BPF_NEG: 406 case BPF_ALU64 | BPF_NEG: 407 if (BPF_CLASS(insn->code) == BPF_ALU64) 408 EMIT1(add_1mod(0x48, dst_reg)); 409 else if (is_ereg(dst_reg)) 410 EMIT1(add_1mod(0x40, dst_reg)); 411 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 412 break; 413 414 case BPF_ALU | BPF_ADD | BPF_K: 415 case BPF_ALU | BPF_SUB | BPF_K: 416 case BPF_ALU | BPF_AND | BPF_K: 417 case BPF_ALU | BPF_OR | BPF_K: 418 case BPF_ALU | BPF_XOR | BPF_K: 419 case BPF_ALU64 | BPF_ADD | BPF_K: 420 case BPF_ALU64 | BPF_SUB | BPF_K: 421 case BPF_ALU64 | BPF_AND | BPF_K: 422 case BPF_ALU64 | BPF_OR | BPF_K: 423 case BPF_ALU64 | BPF_XOR | BPF_K: 424 if (BPF_CLASS(insn->code) == BPF_ALU64) 425 EMIT1(add_1mod(0x48, dst_reg)); 426 else if (is_ereg(dst_reg)) 427 EMIT1(add_1mod(0x40, dst_reg)); 428 429 switch (BPF_OP(insn->code)) { 430 case BPF_ADD: b3 = 0xC0; break; 431 case BPF_SUB: b3 = 0xE8; break; 432 case BPF_AND: b3 = 0xE0; break; 433 case BPF_OR: b3 = 0xC8; break; 434 case BPF_XOR: b3 = 0xF0; break; 435 } 436 437 if (is_imm8(imm32)) 438 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 439 else 440 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 441 break; 442 443 case BPF_ALU64 | BPF_MOV | BPF_K: 444 /* optimization: if imm32 is positive, 445 * use 'mov eax, imm32' (which zero-extends imm32) 446 * to save 2 bytes 447 */ 448 if (imm32 < 0) { 449 /* 'mov rax, imm32' sign extends imm32 */ 450 b1 = add_1mod(0x48, dst_reg); 451 b2 = 0xC7; 452 b3 = 0xC0; 453 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 454 break; 455 } 456 457 case BPF_ALU | BPF_MOV | BPF_K: 458 /* mov %eax, imm32 */ 459 if (is_ereg(dst_reg)) 460 EMIT1(add_1mod(0x40, dst_reg)); 461 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 462 break; 463 464 case BPF_LD | BPF_IMM | BPF_DW: 465 if (insn[1].code != 0 || insn[1].src_reg != 0 || 466 insn[1].dst_reg != 0 || insn[1].off != 0) { 467 /* verifier must catch invalid insns */ 468 pr_err("invalid BPF_LD_IMM64 insn\n"); 469 return -EINVAL; 470 } 471 472 /* movabsq %rax, imm64 */ 473 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 474 EMIT(insn[0].imm, 4); 475 EMIT(insn[1].imm, 4); 476 477 insn++; 478 i++; 479 break; 480 481 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 482 case BPF_ALU | BPF_MOD | BPF_X: 483 case BPF_ALU | BPF_DIV | BPF_X: 484 case BPF_ALU | BPF_MOD | BPF_K: 485 case BPF_ALU | BPF_DIV | BPF_K: 486 case BPF_ALU64 | BPF_MOD | BPF_X: 487 case BPF_ALU64 | BPF_DIV | BPF_X: 488 case BPF_ALU64 | BPF_MOD | BPF_K: 489 case BPF_ALU64 | BPF_DIV | BPF_K: 490 EMIT1(0x50); /* push rax */ 491 EMIT1(0x52); /* push rdx */ 492 493 if (BPF_SRC(insn->code) == BPF_X) 494 /* mov r11, src_reg */ 495 EMIT_mov(AUX_REG, src_reg); 496 else 497 /* mov r11, imm32 */ 498 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 499 500 /* mov rax, dst_reg */ 501 EMIT_mov(BPF_REG_0, dst_reg); 502 503 /* xor edx, edx 504 * equivalent to 'xor rdx, rdx', but one byte less 505 */ 506 EMIT2(0x31, 0xd2); 507 508 if (BPF_SRC(insn->code) == BPF_X) { 509 /* if (src_reg == 0) return 0 */ 510 511 /* cmp r11, 0 */ 512 EMIT4(0x49, 0x83, 0xFB, 0x00); 513 514 /* jne .+9 (skip over pop, pop, xor and jmp) */ 515 EMIT2(X86_JNE, 1 + 1 + 2 + 5); 516 EMIT1(0x5A); /* pop rdx */ 517 EMIT1(0x58); /* pop rax */ 518 EMIT2(0x31, 0xc0); /* xor eax, eax */ 519 520 /* jmp cleanup_addr 521 * addrs[i] - 11, because there are 11 bytes 522 * after this insn: div, mov, pop, pop, mov 523 */ 524 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11); 525 EMIT1_off32(0xE9, jmp_offset); 526 } 527 528 if (BPF_CLASS(insn->code) == BPF_ALU64) 529 /* div r11 */ 530 EMIT3(0x49, 0xF7, 0xF3); 531 else 532 /* div r11d */ 533 EMIT3(0x41, 0xF7, 0xF3); 534 535 if (BPF_OP(insn->code) == BPF_MOD) 536 /* mov r11, rdx */ 537 EMIT3(0x49, 0x89, 0xD3); 538 else 539 /* mov r11, rax */ 540 EMIT3(0x49, 0x89, 0xC3); 541 542 EMIT1(0x5A); /* pop rdx */ 543 EMIT1(0x58); /* pop rax */ 544 545 /* mov dst_reg, r11 */ 546 EMIT_mov(dst_reg, AUX_REG); 547 break; 548 549 case BPF_ALU | BPF_MUL | BPF_K: 550 case BPF_ALU | BPF_MUL | BPF_X: 551 case BPF_ALU64 | BPF_MUL | BPF_K: 552 case BPF_ALU64 | BPF_MUL | BPF_X: 553 EMIT1(0x50); /* push rax */ 554 EMIT1(0x52); /* push rdx */ 555 556 /* mov r11, dst_reg */ 557 EMIT_mov(AUX_REG, dst_reg); 558 559 if (BPF_SRC(insn->code) == BPF_X) 560 /* mov rax, src_reg */ 561 EMIT_mov(BPF_REG_0, src_reg); 562 else 563 /* mov rax, imm32 */ 564 EMIT3_off32(0x48, 0xC7, 0xC0, imm32); 565 566 if (BPF_CLASS(insn->code) == BPF_ALU64) 567 EMIT1(add_1mod(0x48, AUX_REG)); 568 else if (is_ereg(AUX_REG)) 569 EMIT1(add_1mod(0x40, AUX_REG)); 570 /* mul(q) r11 */ 571 EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); 572 573 /* mov r11, rax */ 574 EMIT_mov(AUX_REG, BPF_REG_0); 575 576 EMIT1(0x5A); /* pop rdx */ 577 EMIT1(0x58); /* pop rax */ 578 579 /* mov dst_reg, r11 */ 580 EMIT_mov(dst_reg, AUX_REG); 581 break; 582 583 /* shifts */ 584 case BPF_ALU | BPF_LSH | BPF_K: 585 case BPF_ALU | BPF_RSH | BPF_K: 586 case BPF_ALU | BPF_ARSH | BPF_K: 587 case BPF_ALU64 | BPF_LSH | BPF_K: 588 case BPF_ALU64 | BPF_RSH | BPF_K: 589 case BPF_ALU64 | BPF_ARSH | BPF_K: 590 if (BPF_CLASS(insn->code) == BPF_ALU64) 591 EMIT1(add_1mod(0x48, dst_reg)); 592 else if (is_ereg(dst_reg)) 593 EMIT1(add_1mod(0x40, dst_reg)); 594 595 switch (BPF_OP(insn->code)) { 596 case BPF_LSH: b3 = 0xE0; break; 597 case BPF_RSH: b3 = 0xE8; break; 598 case BPF_ARSH: b3 = 0xF8; break; 599 } 600 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 601 break; 602 603 case BPF_ALU | BPF_LSH | BPF_X: 604 case BPF_ALU | BPF_RSH | BPF_X: 605 case BPF_ALU | BPF_ARSH | BPF_X: 606 case BPF_ALU64 | BPF_LSH | BPF_X: 607 case BPF_ALU64 | BPF_RSH | BPF_X: 608 case BPF_ALU64 | BPF_ARSH | BPF_X: 609 610 /* check for bad case when dst_reg == rcx */ 611 if (dst_reg == BPF_REG_4) { 612 /* mov r11, dst_reg */ 613 EMIT_mov(AUX_REG, dst_reg); 614 dst_reg = AUX_REG; 615 } 616 617 if (src_reg != BPF_REG_4) { /* common case */ 618 EMIT1(0x51); /* push rcx */ 619 620 /* mov rcx, src_reg */ 621 EMIT_mov(BPF_REG_4, src_reg); 622 } 623 624 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 625 if (BPF_CLASS(insn->code) == BPF_ALU64) 626 EMIT1(add_1mod(0x48, dst_reg)); 627 else if (is_ereg(dst_reg)) 628 EMIT1(add_1mod(0x40, dst_reg)); 629 630 switch (BPF_OP(insn->code)) { 631 case BPF_LSH: b3 = 0xE0; break; 632 case BPF_RSH: b3 = 0xE8; break; 633 case BPF_ARSH: b3 = 0xF8; break; 634 } 635 EMIT2(0xD3, add_1reg(b3, dst_reg)); 636 637 if (src_reg != BPF_REG_4) 638 EMIT1(0x59); /* pop rcx */ 639 640 if (insn->dst_reg == BPF_REG_4) 641 /* mov dst_reg, r11 */ 642 EMIT_mov(insn->dst_reg, AUX_REG); 643 break; 644 645 case BPF_ALU | BPF_END | BPF_FROM_BE: 646 switch (imm32) { 647 case 16: 648 /* emit 'ror %ax, 8' to swap lower 2 bytes */ 649 EMIT1(0x66); 650 if (is_ereg(dst_reg)) 651 EMIT1(0x41); 652 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 653 654 /* emit 'movzwl eax, ax' */ 655 if (is_ereg(dst_reg)) 656 EMIT3(0x45, 0x0F, 0xB7); 657 else 658 EMIT2(0x0F, 0xB7); 659 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 660 break; 661 case 32: 662 /* emit 'bswap eax' to swap lower 4 bytes */ 663 if (is_ereg(dst_reg)) 664 EMIT2(0x41, 0x0F); 665 else 666 EMIT1(0x0F); 667 EMIT1(add_1reg(0xC8, dst_reg)); 668 break; 669 case 64: 670 /* emit 'bswap rax' to swap 8 bytes */ 671 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 672 add_1reg(0xC8, dst_reg)); 673 break; 674 } 675 break; 676 677 case BPF_ALU | BPF_END | BPF_FROM_LE: 678 switch (imm32) { 679 case 16: 680 /* emit 'movzwl eax, ax' to zero extend 16-bit 681 * into 64 bit 682 */ 683 if (is_ereg(dst_reg)) 684 EMIT3(0x45, 0x0F, 0xB7); 685 else 686 EMIT2(0x0F, 0xB7); 687 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 688 break; 689 case 32: 690 /* emit 'mov eax, eax' to clear upper 32-bits */ 691 if (is_ereg(dst_reg)) 692 EMIT1(0x45); 693 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 694 break; 695 case 64: 696 /* nop */ 697 break; 698 } 699 break; 700 701 /* ST: *(u8*)(dst_reg + off) = imm */ 702 case BPF_ST | BPF_MEM | BPF_B: 703 if (is_ereg(dst_reg)) 704 EMIT2(0x41, 0xC6); 705 else 706 EMIT1(0xC6); 707 goto st; 708 case BPF_ST | BPF_MEM | BPF_H: 709 if (is_ereg(dst_reg)) 710 EMIT3(0x66, 0x41, 0xC7); 711 else 712 EMIT2(0x66, 0xC7); 713 goto st; 714 case BPF_ST | BPF_MEM | BPF_W: 715 if (is_ereg(dst_reg)) 716 EMIT2(0x41, 0xC7); 717 else 718 EMIT1(0xC7); 719 goto st; 720 case BPF_ST | BPF_MEM | BPF_DW: 721 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 722 723 st: if (is_imm8(insn->off)) 724 EMIT2(add_1reg(0x40, dst_reg), insn->off); 725 else 726 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 727 728 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 729 break; 730 731 /* STX: *(u8*)(dst_reg + off) = src_reg */ 732 case BPF_STX | BPF_MEM | BPF_B: 733 /* emit 'mov byte ptr [rax + off], al' */ 734 if (is_ereg(dst_reg) || is_ereg(src_reg) || 735 /* have to add extra byte for x86 SIL, DIL regs */ 736 src_reg == BPF_REG_1 || src_reg == BPF_REG_2) 737 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 738 else 739 EMIT1(0x88); 740 goto stx; 741 case BPF_STX | BPF_MEM | BPF_H: 742 if (is_ereg(dst_reg) || is_ereg(src_reg)) 743 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 744 else 745 EMIT2(0x66, 0x89); 746 goto stx; 747 case BPF_STX | BPF_MEM | BPF_W: 748 if (is_ereg(dst_reg) || is_ereg(src_reg)) 749 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 750 else 751 EMIT1(0x89); 752 goto stx; 753 case BPF_STX | BPF_MEM | BPF_DW: 754 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 755 stx: if (is_imm8(insn->off)) 756 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); 757 else 758 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), 759 insn->off); 760 break; 761 762 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 763 case BPF_LDX | BPF_MEM | BPF_B: 764 /* emit 'movzx rax, byte ptr [rax + off]' */ 765 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 766 goto ldx; 767 case BPF_LDX | BPF_MEM | BPF_H: 768 /* emit 'movzx rax, word ptr [rax + off]' */ 769 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 770 goto ldx; 771 case BPF_LDX | BPF_MEM | BPF_W: 772 /* emit 'mov eax, dword ptr [rax+0x14]' */ 773 if (is_ereg(dst_reg) || is_ereg(src_reg)) 774 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 775 else 776 EMIT1(0x8B); 777 goto ldx; 778 case BPF_LDX | BPF_MEM | BPF_DW: 779 /* emit 'mov rax, qword ptr [rax+0x14]' */ 780 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 781 ldx: /* if insn->off == 0 we can save one extra byte, but 782 * special case of x86 r13 which always needs an offset 783 * is not worth the hassle 784 */ 785 if (is_imm8(insn->off)) 786 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); 787 else 788 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), 789 insn->off); 790 break; 791 792 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ 793 case BPF_STX | BPF_XADD | BPF_W: 794 /* emit 'lock add dword ptr [rax + off], eax' */ 795 if (is_ereg(dst_reg) || is_ereg(src_reg)) 796 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); 797 else 798 EMIT2(0xF0, 0x01); 799 goto xadd; 800 case BPF_STX | BPF_XADD | BPF_DW: 801 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); 802 xadd: if (is_imm8(insn->off)) 803 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); 804 else 805 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), 806 insn->off); 807 break; 808 809 /* call */ 810 case BPF_JMP | BPF_CALL: 811 func = (u8 *) __bpf_call_base + imm32; 812 jmp_offset = func - (image + addrs[i]); 813 if (seen_ld_abs) { 814 reload_skb_data = bpf_helper_changes_skb_data(func); 815 if (reload_skb_data) { 816 EMIT1(0x57); /* push %rdi */ 817 jmp_offset += 22; /* pop, mov, sub, mov */ 818 } else { 819 EMIT2(0x41, 0x52); /* push %r10 */ 820 EMIT2(0x41, 0x51); /* push %r9 */ 821 /* need to adjust jmp offset, since 822 * pop %r9, pop %r10 take 4 bytes after call insn 823 */ 824 jmp_offset += 4; 825 } 826 } 827 if (!imm32 || !is_simm32(jmp_offset)) { 828 pr_err("unsupported bpf func %d addr %p image %p\n", 829 imm32, func, image); 830 return -EINVAL; 831 } 832 EMIT1_off32(0xE8, jmp_offset); 833 if (seen_ld_abs) { 834 if (reload_skb_data) { 835 EMIT1(0x5F); /* pop %rdi */ 836 emit_load_skb_data_hlen(&prog); 837 } else { 838 EMIT2(0x41, 0x59); /* pop %r9 */ 839 EMIT2(0x41, 0x5A); /* pop %r10 */ 840 } 841 } 842 break; 843 844 case BPF_JMP | BPF_CALL | BPF_X: 845 emit_bpf_tail_call(&prog); 846 break; 847 848 /* cond jump */ 849 case BPF_JMP | BPF_JEQ | BPF_X: 850 case BPF_JMP | BPF_JNE | BPF_X: 851 case BPF_JMP | BPF_JGT | BPF_X: 852 case BPF_JMP | BPF_JGE | BPF_X: 853 case BPF_JMP | BPF_JSGT | BPF_X: 854 case BPF_JMP | BPF_JSGE | BPF_X: 855 /* cmp dst_reg, src_reg */ 856 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, 857 add_2reg(0xC0, dst_reg, src_reg)); 858 goto emit_cond_jmp; 859 860 case BPF_JMP | BPF_JSET | BPF_X: 861 /* test dst_reg, src_reg */ 862 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, 863 add_2reg(0xC0, dst_reg, src_reg)); 864 goto emit_cond_jmp; 865 866 case BPF_JMP | BPF_JSET | BPF_K: 867 /* test dst_reg, imm32 */ 868 EMIT1(add_1mod(0x48, dst_reg)); 869 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 870 goto emit_cond_jmp; 871 872 case BPF_JMP | BPF_JEQ | BPF_K: 873 case BPF_JMP | BPF_JNE | BPF_K: 874 case BPF_JMP | BPF_JGT | BPF_K: 875 case BPF_JMP | BPF_JGE | BPF_K: 876 case BPF_JMP | BPF_JSGT | BPF_K: 877 case BPF_JMP | BPF_JSGE | BPF_K: 878 /* cmp dst_reg, imm8/32 */ 879 EMIT1(add_1mod(0x48, dst_reg)); 880 881 if (is_imm8(imm32)) 882 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 883 else 884 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 885 886 emit_cond_jmp: /* convert BPF opcode to x86 */ 887 switch (BPF_OP(insn->code)) { 888 case BPF_JEQ: 889 jmp_cond = X86_JE; 890 break; 891 case BPF_JSET: 892 case BPF_JNE: 893 jmp_cond = X86_JNE; 894 break; 895 case BPF_JGT: 896 /* GT is unsigned '>', JA in x86 */ 897 jmp_cond = X86_JA; 898 break; 899 case BPF_JGE: 900 /* GE is unsigned '>=', JAE in x86 */ 901 jmp_cond = X86_JAE; 902 break; 903 case BPF_JSGT: 904 /* signed '>', GT in x86 */ 905 jmp_cond = X86_JG; 906 break; 907 case BPF_JSGE: 908 /* signed '>=', GE in x86 */ 909 jmp_cond = X86_JGE; 910 break; 911 default: /* to silence gcc warning */ 912 return -EFAULT; 913 } 914 jmp_offset = addrs[i + insn->off] - addrs[i]; 915 if (is_imm8(jmp_offset)) { 916 EMIT2(jmp_cond, jmp_offset); 917 } else if (is_simm32(jmp_offset)) { 918 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 919 } else { 920 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 921 return -EFAULT; 922 } 923 924 break; 925 926 case BPF_JMP | BPF_JA: 927 jmp_offset = addrs[i + insn->off] - addrs[i]; 928 if (!jmp_offset) 929 /* optimize out nop jumps */ 930 break; 931 emit_jmp: 932 if (is_imm8(jmp_offset)) { 933 EMIT2(0xEB, jmp_offset); 934 } else if (is_simm32(jmp_offset)) { 935 EMIT1_off32(0xE9, jmp_offset); 936 } else { 937 pr_err("jmp gen bug %llx\n", jmp_offset); 938 return -EFAULT; 939 } 940 break; 941 942 case BPF_LD | BPF_IND | BPF_W: 943 func = sk_load_word; 944 goto common_load; 945 case BPF_LD | BPF_ABS | BPF_W: 946 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); 947 common_load: 948 ctx->seen_ld_abs = seen_ld_abs = true; 949 jmp_offset = func - (image + addrs[i]); 950 if (!func || !is_simm32(jmp_offset)) { 951 pr_err("unsupported bpf func %d addr %p image %p\n", 952 imm32, func, image); 953 return -EINVAL; 954 } 955 if (BPF_MODE(insn->code) == BPF_ABS) { 956 /* mov %esi, imm32 */ 957 EMIT1_off32(0xBE, imm32); 958 } else { 959 /* mov %rsi, src_reg */ 960 EMIT_mov(BPF_REG_2, src_reg); 961 if (imm32) { 962 if (is_imm8(imm32)) 963 /* add %esi, imm8 */ 964 EMIT3(0x83, 0xC6, imm32); 965 else 966 /* add %esi, imm32 */ 967 EMIT2_off32(0x81, 0xC6, imm32); 968 } 969 } 970 /* skb pointer is in R6 (%rbx), it will be copied into 971 * %rdi if skb_copy_bits() call is necessary. 972 * sk_load_* helpers also use %r10 and %r9d. 973 * See bpf_jit.S 974 */ 975 EMIT1_off32(0xE8, jmp_offset); /* call */ 976 break; 977 978 case BPF_LD | BPF_IND | BPF_H: 979 func = sk_load_half; 980 goto common_load; 981 case BPF_LD | BPF_ABS | BPF_H: 982 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); 983 goto common_load; 984 case BPF_LD | BPF_IND | BPF_B: 985 func = sk_load_byte; 986 goto common_load; 987 case BPF_LD | BPF_ABS | BPF_B: 988 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); 989 goto common_load; 990 991 case BPF_JMP | BPF_EXIT: 992 if (seen_exit) { 993 jmp_offset = ctx->cleanup_addr - addrs[i]; 994 goto emit_jmp; 995 } 996 seen_exit = true; 997 /* update cleanup_addr */ 998 ctx->cleanup_addr = proglen; 999 /* mov rbx, qword ptr [rbp-X] */ 1000 EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE); 1001 /* mov r13, qword ptr [rbp-X] */ 1002 EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8); 1003 /* mov r14, qword ptr [rbp-X] */ 1004 EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16); 1005 /* mov r15, qword ptr [rbp-X] */ 1006 EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24); 1007 1008 EMIT1(0xC9); /* leave */ 1009 EMIT1(0xC3); /* ret */ 1010 break; 1011 1012 default: 1013 /* By design x64 JIT should support all BPF instructions 1014 * This error will be seen if new instruction was added 1015 * to interpreter, but not to JIT 1016 * or if there is junk in bpf_prog 1017 */ 1018 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1019 return -EINVAL; 1020 } 1021 1022 ilen = prog - temp; 1023 if (ilen > BPF_MAX_INSN_SIZE) { 1024 pr_err("bpf_jit_compile fatal insn size error\n"); 1025 return -EFAULT; 1026 } 1027 1028 if (image) { 1029 if (unlikely(proglen + ilen > oldproglen)) { 1030 pr_err("bpf_jit_compile fatal error\n"); 1031 return -EFAULT; 1032 } 1033 memcpy(image + proglen, temp, ilen); 1034 } 1035 proglen += ilen; 1036 addrs[i] = proglen; 1037 prog = temp; 1038 } 1039 return proglen; 1040 } 1041 1042 void bpf_jit_compile(struct bpf_prog *prog) 1043 { 1044 } 1045 1046 void bpf_int_jit_compile(struct bpf_prog *prog) 1047 { 1048 struct bpf_binary_header *header = NULL; 1049 int proglen, oldproglen = 0; 1050 struct jit_context ctx = {}; 1051 u8 *image = NULL; 1052 int *addrs; 1053 int pass; 1054 int i; 1055 1056 if (!bpf_jit_enable) 1057 return; 1058 1059 if (!prog || !prog->len) 1060 return; 1061 1062 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); 1063 if (!addrs) 1064 return; 1065 1066 /* Before first pass, make a rough estimation of addrs[] 1067 * each bpf instruction is translated to less than 64 bytes 1068 */ 1069 for (proglen = 0, i = 0; i < prog->len; i++) { 1070 proglen += 64; 1071 addrs[i] = proglen; 1072 } 1073 ctx.cleanup_addr = proglen; 1074 1075 /* JITed image shrinks with every pass and the loop iterates 1076 * until the image stops shrinking. Very large bpf programs 1077 * may converge on the last pass. In such case do one more 1078 * pass to emit the final image 1079 */ 1080 for (pass = 0; pass < 10 || image; pass++) { 1081 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1082 if (proglen <= 0) { 1083 image = NULL; 1084 if (header) 1085 bpf_jit_binary_free(header); 1086 goto out; 1087 } 1088 if (image) { 1089 if (proglen != oldproglen) { 1090 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 1091 proglen, oldproglen); 1092 goto out; 1093 } 1094 break; 1095 } 1096 if (proglen == oldproglen) { 1097 header = bpf_jit_binary_alloc(proglen, &image, 1098 1, jit_fill_hole); 1099 if (!header) 1100 goto out; 1101 } 1102 oldproglen = proglen; 1103 } 1104 1105 if (bpf_jit_enable > 1) 1106 bpf_jit_dump(prog->len, proglen, pass + 1, image); 1107 1108 if (image) { 1109 bpf_flush_icache(header, image + proglen); 1110 set_memory_ro((unsigned long)header, header->pages); 1111 prog->bpf_func = (void *)image; 1112 prog->jited = true; 1113 } 1114 out: 1115 kfree(addrs); 1116 } 1117 1118 void bpf_jit_free(struct bpf_prog *fp) 1119 { 1120 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 1121 struct bpf_binary_header *header = (void *)addr; 1122 1123 if (!fp->jited) 1124 goto free_filter; 1125 1126 set_memory_rw(addr, header->pages); 1127 bpf_jit_binary_free(header); 1128 1129 free_filter: 1130 bpf_prog_unlock_free(fp); 1131 } 1132