1 /* bpf_jit_comp.c : BPF JIT compiler 2 * 3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; version 2 9 * of the License. 10 */ 11 #include <linux/netdevice.h> 12 #include <linux/filter.h> 13 #include <linux/if_vlan.h> 14 #include <asm/cacheflush.h> 15 #include <asm/set_memory.h> 16 #include <linux/bpf.h> 17 18 /* 19 * assembly code in arch/x86/net/bpf_jit.S 20 */ 21 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; 22 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; 23 extern u8 sk_load_byte_positive_offset[]; 24 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; 25 extern u8 sk_load_byte_negative_offset[]; 26 27 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 28 { 29 if (len == 1) 30 *ptr = bytes; 31 else if (len == 2) 32 *(u16 *)ptr = bytes; 33 else { 34 *(u32 *)ptr = bytes; 35 barrier(); 36 } 37 return ptr + len; 38 } 39 40 #define EMIT(bytes, len) \ 41 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) 42 43 #define EMIT1(b1) EMIT(b1, 1) 44 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 45 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 46 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 47 #define EMIT1_off32(b1, off) \ 48 do {EMIT1(b1); EMIT(off, 4); } while (0) 49 #define EMIT2_off32(b1, b2, off) \ 50 do {EMIT2(b1, b2); EMIT(off, 4); } while (0) 51 #define EMIT3_off32(b1, b2, b3, off) \ 52 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 53 #define EMIT4_off32(b1, b2, b3, b4, off) \ 54 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 55 56 static bool is_imm8(int value) 57 { 58 return value <= 127 && value >= -128; 59 } 60 61 static bool is_simm32(s64 value) 62 { 63 return value == (s64) (s32) value; 64 } 65 66 /* mov dst, src */ 67 #define EMIT_mov(DST, SRC) \ 68 do {if (DST != SRC) \ 69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 70 } while (0) 71 72 static int bpf_size_to_x86_bytes(int bpf_size) 73 { 74 if (bpf_size == BPF_W) 75 return 4; 76 else if (bpf_size == BPF_H) 77 return 2; 78 else if (bpf_size == BPF_B) 79 return 1; 80 else if (bpf_size == BPF_DW) 81 return 4; /* imm32 */ 82 else 83 return 0; 84 } 85 86 /* list of x86 cond jumps opcodes (. + s8) 87 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 88 */ 89 #define X86_JB 0x72 90 #define X86_JAE 0x73 91 #define X86_JE 0x74 92 #define X86_JNE 0x75 93 #define X86_JBE 0x76 94 #define X86_JA 0x77 95 #define X86_JL 0x7C 96 #define X86_JGE 0x7D 97 #define X86_JLE 0x7E 98 #define X86_JG 0x7F 99 100 static void bpf_flush_icache(void *start, void *end) 101 { 102 mm_segment_t old_fs = get_fs(); 103 104 set_fs(KERNEL_DS); 105 smp_wmb(); 106 flush_icache_range((unsigned long)start, (unsigned long)end); 107 set_fs(old_fs); 108 } 109 110 #define CHOOSE_LOAD_FUNC(K, func) \ 111 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 112 113 /* pick a register outside of BPF range for JIT internal work */ 114 #define AUX_REG (MAX_BPF_JIT_REG + 1) 115 116 /* The following table maps BPF registers to x64 registers. 117 * 118 * x64 register r12 is unused, since if used as base address 119 * register in load/store instructions, it always needs an 120 * extra byte of encoding and is callee saved. 121 * 122 * r9 caches skb->len - skb->data_len 123 * r10 caches skb->data, and used for blinding (if enabled) 124 */ 125 static const int reg2hex[] = { 126 [BPF_REG_0] = 0, /* rax */ 127 [BPF_REG_1] = 7, /* rdi */ 128 [BPF_REG_2] = 6, /* rsi */ 129 [BPF_REG_3] = 2, /* rdx */ 130 [BPF_REG_4] = 1, /* rcx */ 131 [BPF_REG_5] = 0, /* r8 */ 132 [BPF_REG_6] = 3, /* rbx callee saved */ 133 [BPF_REG_7] = 5, /* r13 callee saved */ 134 [BPF_REG_8] = 6, /* r14 callee saved */ 135 [BPF_REG_9] = 7, /* r15 callee saved */ 136 [BPF_REG_FP] = 5, /* rbp readonly */ 137 [BPF_REG_AX] = 2, /* r10 temp register */ 138 [AUX_REG] = 3, /* r11 temp register */ 139 }; 140 141 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 142 * which need extra byte of encoding. 143 * rax,rcx,...,rbp have simpler encoding 144 */ 145 static bool is_ereg(u32 reg) 146 { 147 return (1 << reg) & (BIT(BPF_REG_5) | 148 BIT(AUX_REG) | 149 BIT(BPF_REG_7) | 150 BIT(BPF_REG_8) | 151 BIT(BPF_REG_9) | 152 BIT(BPF_REG_AX)); 153 } 154 155 static bool is_axreg(u32 reg) 156 { 157 return reg == BPF_REG_0; 158 } 159 160 /* add modifiers if 'reg' maps to x64 registers r8..r15 */ 161 static u8 add_1mod(u8 byte, u32 reg) 162 { 163 if (is_ereg(reg)) 164 byte |= 1; 165 return byte; 166 } 167 168 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 169 { 170 if (is_ereg(r1)) 171 byte |= 1; 172 if (is_ereg(r2)) 173 byte |= 4; 174 return byte; 175 } 176 177 /* encode 'dst_reg' register into x64 opcode 'byte' */ 178 static u8 add_1reg(u8 byte, u32 dst_reg) 179 { 180 return byte + reg2hex[dst_reg]; 181 } 182 183 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ 184 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 185 { 186 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 187 } 188 189 static void jit_fill_hole(void *area, unsigned int size) 190 { 191 /* fill whole space with int3 instructions */ 192 memset(area, 0xcc, size); 193 } 194 195 struct jit_context { 196 int cleanup_addr; /* epilogue code offset */ 197 bool seen_ld_abs; 198 bool seen_ax_reg; 199 }; 200 201 /* maximum number of bytes emitted while JITing one eBPF insn */ 202 #define BPF_MAX_INSN_SIZE 128 203 #define BPF_INSN_SAFETY 64 204 205 #define AUX_STACK_SPACE \ 206 (32 /* space for rbx, r13, r14, r15 */ + \ 207 8 /* space for skb_copy_bits() buffer */) 208 209 #define PROLOGUE_SIZE 37 210 211 /* emit x64 prologue code for BPF program and check it's size. 212 * bpf_tail_call helper will skip it while jumping into another program 213 */ 214 static void emit_prologue(u8 **pprog, u32 stack_depth) 215 { 216 u8 *prog = *pprog; 217 int cnt = 0; 218 219 EMIT1(0x55); /* push rbp */ 220 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ 221 222 /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */ 223 EMIT3_off32(0x48, 0x81, 0xEC, 224 round_up(stack_depth, 8) + AUX_STACK_SPACE); 225 226 /* sub rbp, AUX_STACK_SPACE */ 227 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE); 228 229 /* all classic BPF filters use R6(rbx) save it */ 230 231 /* mov qword ptr [rbp+0],rbx */ 232 EMIT4(0x48, 0x89, 0x5D, 0); 233 234 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8 235 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and 236 * R8(r14). R9(r15) spill could be made conditional, but there is only 237 * one 'bpf_error' return path out of helper functions inside bpf_jit.S 238 * The overhead of extra spill is negligible for any filter other 239 * than synthetic ones. Therefore not worth adding complexity. 240 */ 241 242 /* mov qword ptr [rbp+8],r13 */ 243 EMIT4(0x4C, 0x89, 0x6D, 8); 244 /* mov qword ptr [rbp+16],r14 */ 245 EMIT4(0x4C, 0x89, 0x75, 16); 246 /* mov qword ptr [rbp+24],r15 */ 247 EMIT4(0x4C, 0x89, 0x7D, 24); 248 249 /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls 250 * we need to reset the counter to 0. It's done in two instructions, 251 * resetting rax register to 0 (xor on eax gets 0 extended), and 252 * moving it to the counter location. 253 */ 254 255 /* xor eax, eax */ 256 EMIT2(0x31, 0xc0); 257 /* mov qword ptr [rbp+32], rax */ 258 EMIT4(0x48, 0x89, 0x45, 32); 259 260 BUILD_BUG_ON(cnt != PROLOGUE_SIZE); 261 *pprog = prog; 262 } 263 264 /* generate the following code: 265 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 266 * if (index >= array->map.max_entries) 267 * goto out; 268 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) 269 * goto out; 270 * prog = array->ptrs[index]; 271 * if (prog == NULL) 272 * goto out; 273 * goto *(prog->bpf_func + prologue_size); 274 * out: 275 */ 276 static void emit_bpf_tail_call(u8 **pprog) 277 { 278 u8 *prog = *pprog; 279 int label1, label2, label3; 280 int cnt = 0; 281 282 /* rdi - pointer to ctx 283 * rsi - pointer to bpf_array 284 * rdx - index in bpf_array 285 */ 286 287 /* if (index >= array->map.max_entries) 288 * goto out; 289 */ 290 EMIT2(0x89, 0xD2); /* mov edx, edx */ 291 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 292 offsetof(struct bpf_array, map.max_entries)); 293 #define OFFSET1 43 /* number of bytes to jump */ 294 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 295 label1 = cnt; 296 297 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) 298 * goto out; 299 */ 300 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ 301 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 302 #define OFFSET2 32 303 EMIT2(X86_JA, OFFSET2); /* ja out */ 304 label2 = cnt; 305 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 306 EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */ 307 308 /* prog = array->ptrs[index]; */ 309 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ 310 offsetof(struct bpf_array, ptrs)); 311 312 /* if (prog == NULL) 313 * goto out; 314 */ 315 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 316 #define OFFSET3 10 317 EMIT2(X86_JE, OFFSET3); /* je out */ 318 label3 = cnt; 319 320 /* goto *(prog->bpf_func + prologue_size); */ 321 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */ 322 offsetof(struct bpf_prog, bpf_func)); 323 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */ 324 325 /* now we're ready to jump into next BPF program 326 * rdi == ctx (1st arg) 327 * rax == prog->bpf_func + prologue_size 328 */ 329 EMIT2(0xFF, 0xE0); /* jmp rax */ 330 331 /* out: */ 332 BUILD_BUG_ON(cnt - label1 != OFFSET1); 333 BUILD_BUG_ON(cnt - label2 != OFFSET2); 334 BUILD_BUG_ON(cnt - label3 != OFFSET3); 335 *pprog = prog; 336 } 337 338 339 static void emit_load_skb_data_hlen(u8 **pprog) 340 { 341 u8 *prog = *pprog; 342 int cnt = 0; 343 344 /* r9d = skb->len - skb->data_len (headlen) 345 * r10 = skb->data 346 */ 347 /* mov %r9d, off32(%rdi) */ 348 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len)); 349 350 /* sub %r9d, off32(%rdi) */ 351 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len)); 352 353 /* mov %r10, off32(%rdi) */ 354 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data)); 355 *pprog = prog; 356 } 357 358 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 359 int oldproglen, struct jit_context *ctx) 360 { 361 struct bpf_insn *insn = bpf_prog->insnsi; 362 int insn_cnt = bpf_prog->len; 363 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); 364 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0); 365 bool seen_exit = false; 366 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 367 int i, cnt = 0; 368 int proglen = 0; 369 u8 *prog = temp; 370 371 emit_prologue(&prog, bpf_prog->aux->stack_depth); 372 373 if (seen_ld_abs) 374 emit_load_skb_data_hlen(&prog); 375 376 for (i = 0; i < insn_cnt; i++, insn++) { 377 const s32 imm32 = insn->imm; 378 u32 dst_reg = insn->dst_reg; 379 u32 src_reg = insn->src_reg; 380 u8 b1 = 0, b2 = 0, b3 = 0; 381 s64 jmp_offset; 382 u8 jmp_cond; 383 bool reload_skb_data; 384 int ilen; 385 u8 *func; 386 387 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) 388 ctx->seen_ax_reg = seen_ax_reg = true; 389 390 switch (insn->code) { 391 /* ALU */ 392 case BPF_ALU | BPF_ADD | BPF_X: 393 case BPF_ALU | BPF_SUB | BPF_X: 394 case BPF_ALU | BPF_AND | BPF_X: 395 case BPF_ALU | BPF_OR | BPF_X: 396 case BPF_ALU | BPF_XOR | BPF_X: 397 case BPF_ALU64 | BPF_ADD | BPF_X: 398 case BPF_ALU64 | BPF_SUB | BPF_X: 399 case BPF_ALU64 | BPF_AND | BPF_X: 400 case BPF_ALU64 | BPF_OR | BPF_X: 401 case BPF_ALU64 | BPF_XOR | BPF_X: 402 switch (BPF_OP(insn->code)) { 403 case BPF_ADD: b2 = 0x01; break; 404 case BPF_SUB: b2 = 0x29; break; 405 case BPF_AND: b2 = 0x21; break; 406 case BPF_OR: b2 = 0x09; break; 407 case BPF_XOR: b2 = 0x31; break; 408 } 409 if (BPF_CLASS(insn->code) == BPF_ALU64) 410 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 411 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 412 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 413 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 414 break; 415 416 /* mov dst, src */ 417 case BPF_ALU64 | BPF_MOV | BPF_X: 418 EMIT_mov(dst_reg, src_reg); 419 break; 420 421 /* mov32 dst, src */ 422 case BPF_ALU | BPF_MOV | BPF_X: 423 if (is_ereg(dst_reg) || is_ereg(src_reg)) 424 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 425 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 426 break; 427 428 /* neg dst */ 429 case BPF_ALU | BPF_NEG: 430 case BPF_ALU64 | BPF_NEG: 431 if (BPF_CLASS(insn->code) == BPF_ALU64) 432 EMIT1(add_1mod(0x48, dst_reg)); 433 else if (is_ereg(dst_reg)) 434 EMIT1(add_1mod(0x40, dst_reg)); 435 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 436 break; 437 438 case BPF_ALU | BPF_ADD | BPF_K: 439 case BPF_ALU | BPF_SUB | BPF_K: 440 case BPF_ALU | BPF_AND | BPF_K: 441 case BPF_ALU | BPF_OR | BPF_K: 442 case BPF_ALU | BPF_XOR | BPF_K: 443 case BPF_ALU64 | BPF_ADD | BPF_K: 444 case BPF_ALU64 | BPF_SUB | BPF_K: 445 case BPF_ALU64 | BPF_AND | BPF_K: 446 case BPF_ALU64 | BPF_OR | BPF_K: 447 case BPF_ALU64 | BPF_XOR | BPF_K: 448 if (BPF_CLASS(insn->code) == BPF_ALU64) 449 EMIT1(add_1mod(0x48, dst_reg)); 450 else if (is_ereg(dst_reg)) 451 EMIT1(add_1mod(0x40, dst_reg)); 452 453 /* b3 holds 'normal' opcode, b2 short form only valid 454 * in case dst is eax/rax. 455 */ 456 switch (BPF_OP(insn->code)) { 457 case BPF_ADD: 458 b3 = 0xC0; 459 b2 = 0x05; 460 break; 461 case BPF_SUB: 462 b3 = 0xE8; 463 b2 = 0x2D; 464 break; 465 case BPF_AND: 466 b3 = 0xE0; 467 b2 = 0x25; 468 break; 469 case BPF_OR: 470 b3 = 0xC8; 471 b2 = 0x0D; 472 break; 473 case BPF_XOR: 474 b3 = 0xF0; 475 b2 = 0x35; 476 break; 477 } 478 479 if (is_imm8(imm32)) 480 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 481 else if (is_axreg(dst_reg)) 482 EMIT1_off32(b2, imm32); 483 else 484 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 485 break; 486 487 case BPF_ALU64 | BPF_MOV | BPF_K: 488 /* optimization: if imm32 is positive, 489 * use 'mov eax, imm32' (which zero-extends imm32) 490 * to save 2 bytes 491 */ 492 if (imm32 < 0) { 493 /* 'mov rax, imm32' sign extends imm32 */ 494 b1 = add_1mod(0x48, dst_reg); 495 b2 = 0xC7; 496 b3 = 0xC0; 497 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 498 break; 499 } 500 501 case BPF_ALU | BPF_MOV | BPF_K: 502 /* optimization: if imm32 is zero, use 'xor <dst>,<dst>' 503 * to save 3 bytes. 504 */ 505 if (imm32 == 0) { 506 if (is_ereg(dst_reg)) 507 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 508 b2 = 0x31; /* xor */ 509 b3 = 0xC0; 510 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 511 break; 512 } 513 514 /* mov %eax, imm32 */ 515 if (is_ereg(dst_reg)) 516 EMIT1(add_1mod(0x40, dst_reg)); 517 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 518 break; 519 520 case BPF_LD | BPF_IMM | BPF_DW: 521 /* optimization: if imm64 is zero, use 'xor <dst>,<dst>' 522 * to save 7 bytes. 523 */ 524 if (insn[0].imm == 0 && insn[1].imm == 0) { 525 b1 = add_2mod(0x48, dst_reg, dst_reg); 526 b2 = 0x31; /* xor */ 527 b3 = 0xC0; 528 EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg)); 529 530 insn++; 531 i++; 532 break; 533 } 534 535 /* movabsq %rax, imm64 */ 536 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 537 EMIT(insn[0].imm, 4); 538 EMIT(insn[1].imm, 4); 539 540 insn++; 541 i++; 542 break; 543 544 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 545 case BPF_ALU | BPF_MOD | BPF_X: 546 case BPF_ALU | BPF_DIV | BPF_X: 547 case BPF_ALU | BPF_MOD | BPF_K: 548 case BPF_ALU | BPF_DIV | BPF_K: 549 case BPF_ALU64 | BPF_MOD | BPF_X: 550 case BPF_ALU64 | BPF_DIV | BPF_X: 551 case BPF_ALU64 | BPF_MOD | BPF_K: 552 case BPF_ALU64 | BPF_DIV | BPF_K: 553 EMIT1(0x50); /* push rax */ 554 EMIT1(0x52); /* push rdx */ 555 556 if (BPF_SRC(insn->code) == BPF_X) 557 /* mov r11, src_reg */ 558 EMIT_mov(AUX_REG, src_reg); 559 else 560 /* mov r11, imm32 */ 561 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 562 563 /* mov rax, dst_reg */ 564 EMIT_mov(BPF_REG_0, dst_reg); 565 566 /* xor edx, edx 567 * equivalent to 'xor rdx, rdx', but one byte less 568 */ 569 EMIT2(0x31, 0xd2); 570 571 if (BPF_CLASS(insn->code) == BPF_ALU64) 572 /* div r11 */ 573 EMIT3(0x49, 0xF7, 0xF3); 574 else 575 /* div r11d */ 576 EMIT3(0x41, 0xF7, 0xF3); 577 578 if (BPF_OP(insn->code) == BPF_MOD) 579 /* mov r11, rdx */ 580 EMIT3(0x49, 0x89, 0xD3); 581 else 582 /* mov r11, rax */ 583 EMIT3(0x49, 0x89, 0xC3); 584 585 EMIT1(0x5A); /* pop rdx */ 586 EMIT1(0x58); /* pop rax */ 587 588 /* mov dst_reg, r11 */ 589 EMIT_mov(dst_reg, AUX_REG); 590 break; 591 592 case BPF_ALU | BPF_MUL | BPF_K: 593 case BPF_ALU | BPF_MUL | BPF_X: 594 case BPF_ALU64 | BPF_MUL | BPF_K: 595 case BPF_ALU64 | BPF_MUL | BPF_X: 596 EMIT1(0x50); /* push rax */ 597 EMIT1(0x52); /* push rdx */ 598 599 /* mov r11, dst_reg */ 600 EMIT_mov(AUX_REG, dst_reg); 601 602 if (BPF_SRC(insn->code) == BPF_X) 603 /* mov rax, src_reg */ 604 EMIT_mov(BPF_REG_0, src_reg); 605 else 606 /* mov rax, imm32 */ 607 EMIT3_off32(0x48, 0xC7, 0xC0, imm32); 608 609 if (BPF_CLASS(insn->code) == BPF_ALU64) 610 EMIT1(add_1mod(0x48, AUX_REG)); 611 else if (is_ereg(AUX_REG)) 612 EMIT1(add_1mod(0x40, AUX_REG)); 613 /* mul(q) r11 */ 614 EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); 615 616 /* mov r11, rax */ 617 EMIT_mov(AUX_REG, BPF_REG_0); 618 619 EMIT1(0x5A); /* pop rdx */ 620 EMIT1(0x58); /* pop rax */ 621 622 /* mov dst_reg, r11 */ 623 EMIT_mov(dst_reg, AUX_REG); 624 break; 625 626 /* shifts */ 627 case BPF_ALU | BPF_LSH | BPF_K: 628 case BPF_ALU | BPF_RSH | BPF_K: 629 case BPF_ALU | BPF_ARSH | BPF_K: 630 case BPF_ALU64 | BPF_LSH | BPF_K: 631 case BPF_ALU64 | BPF_RSH | BPF_K: 632 case BPF_ALU64 | BPF_ARSH | BPF_K: 633 if (BPF_CLASS(insn->code) == BPF_ALU64) 634 EMIT1(add_1mod(0x48, dst_reg)); 635 else if (is_ereg(dst_reg)) 636 EMIT1(add_1mod(0x40, dst_reg)); 637 638 switch (BPF_OP(insn->code)) { 639 case BPF_LSH: b3 = 0xE0; break; 640 case BPF_RSH: b3 = 0xE8; break; 641 case BPF_ARSH: b3 = 0xF8; break; 642 } 643 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 644 break; 645 646 case BPF_ALU | BPF_LSH | BPF_X: 647 case BPF_ALU | BPF_RSH | BPF_X: 648 case BPF_ALU | BPF_ARSH | BPF_X: 649 case BPF_ALU64 | BPF_LSH | BPF_X: 650 case BPF_ALU64 | BPF_RSH | BPF_X: 651 case BPF_ALU64 | BPF_ARSH | BPF_X: 652 653 /* check for bad case when dst_reg == rcx */ 654 if (dst_reg == BPF_REG_4) { 655 /* mov r11, dst_reg */ 656 EMIT_mov(AUX_REG, dst_reg); 657 dst_reg = AUX_REG; 658 } 659 660 if (src_reg != BPF_REG_4) { /* common case */ 661 EMIT1(0x51); /* push rcx */ 662 663 /* mov rcx, src_reg */ 664 EMIT_mov(BPF_REG_4, src_reg); 665 } 666 667 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 668 if (BPF_CLASS(insn->code) == BPF_ALU64) 669 EMIT1(add_1mod(0x48, dst_reg)); 670 else if (is_ereg(dst_reg)) 671 EMIT1(add_1mod(0x40, dst_reg)); 672 673 switch (BPF_OP(insn->code)) { 674 case BPF_LSH: b3 = 0xE0; break; 675 case BPF_RSH: b3 = 0xE8; break; 676 case BPF_ARSH: b3 = 0xF8; break; 677 } 678 EMIT2(0xD3, add_1reg(b3, dst_reg)); 679 680 if (src_reg != BPF_REG_4) 681 EMIT1(0x59); /* pop rcx */ 682 683 if (insn->dst_reg == BPF_REG_4) 684 /* mov dst_reg, r11 */ 685 EMIT_mov(insn->dst_reg, AUX_REG); 686 break; 687 688 case BPF_ALU | BPF_END | BPF_FROM_BE: 689 switch (imm32) { 690 case 16: 691 /* emit 'ror %ax, 8' to swap lower 2 bytes */ 692 EMIT1(0x66); 693 if (is_ereg(dst_reg)) 694 EMIT1(0x41); 695 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 696 697 /* emit 'movzwl eax, ax' */ 698 if (is_ereg(dst_reg)) 699 EMIT3(0x45, 0x0F, 0xB7); 700 else 701 EMIT2(0x0F, 0xB7); 702 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 703 break; 704 case 32: 705 /* emit 'bswap eax' to swap lower 4 bytes */ 706 if (is_ereg(dst_reg)) 707 EMIT2(0x41, 0x0F); 708 else 709 EMIT1(0x0F); 710 EMIT1(add_1reg(0xC8, dst_reg)); 711 break; 712 case 64: 713 /* emit 'bswap rax' to swap 8 bytes */ 714 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 715 add_1reg(0xC8, dst_reg)); 716 break; 717 } 718 break; 719 720 case BPF_ALU | BPF_END | BPF_FROM_LE: 721 switch (imm32) { 722 case 16: 723 /* emit 'movzwl eax, ax' to zero extend 16-bit 724 * into 64 bit 725 */ 726 if (is_ereg(dst_reg)) 727 EMIT3(0x45, 0x0F, 0xB7); 728 else 729 EMIT2(0x0F, 0xB7); 730 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 731 break; 732 case 32: 733 /* emit 'mov eax, eax' to clear upper 32-bits */ 734 if (is_ereg(dst_reg)) 735 EMIT1(0x45); 736 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 737 break; 738 case 64: 739 /* nop */ 740 break; 741 } 742 break; 743 744 /* ST: *(u8*)(dst_reg + off) = imm */ 745 case BPF_ST | BPF_MEM | BPF_B: 746 if (is_ereg(dst_reg)) 747 EMIT2(0x41, 0xC6); 748 else 749 EMIT1(0xC6); 750 goto st; 751 case BPF_ST | BPF_MEM | BPF_H: 752 if (is_ereg(dst_reg)) 753 EMIT3(0x66, 0x41, 0xC7); 754 else 755 EMIT2(0x66, 0xC7); 756 goto st; 757 case BPF_ST | BPF_MEM | BPF_W: 758 if (is_ereg(dst_reg)) 759 EMIT2(0x41, 0xC7); 760 else 761 EMIT1(0xC7); 762 goto st; 763 case BPF_ST | BPF_MEM | BPF_DW: 764 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 765 766 st: if (is_imm8(insn->off)) 767 EMIT2(add_1reg(0x40, dst_reg), insn->off); 768 else 769 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 770 771 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 772 break; 773 774 /* STX: *(u8*)(dst_reg + off) = src_reg */ 775 case BPF_STX | BPF_MEM | BPF_B: 776 /* emit 'mov byte ptr [rax + off], al' */ 777 if (is_ereg(dst_reg) || is_ereg(src_reg) || 778 /* have to add extra byte for x86 SIL, DIL regs */ 779 src_reg == BPF_REG_1 || src_reg == BPF_REG_2) 780 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 781 else 782 EMIT1(0x88); 783 goto stx; 784 case BPF_STX | BPF_MEM | BPF_H: 785 if (is_ereg(dst_reg) || is_ereg(src_reg)) 786 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 787 else 788 EMIT2(0x66, 0x89); 789 goto stx; 790 case BPF_STX | BPF_MEM | BPF_W: 791 if (is_ereg(dst_reg) || is_ereg(src_reg)) 792 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 793 else 794 EMIT1(0x89); 795 goto stx; 796 case BPF_STX | BPF_MEM | BPF_DW: 797 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 798 stx: if (is_imm8(insn->off)) 799 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); 800 else 801 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), 802 insn->off); 803 break; 804 805 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 806 case BPF_LDX | BPF_MEM | BPF_B: 807 /* emit 'movzx rax, byte ptr [rax + off]' */ 808 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 809 goto ldx; 810 case BPF_LDX | BPF_MEM | BPF_H: 811 /* emit 'movzx rax, word ptr [rax + off]' */ 812 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 813 goto ldx; 814 case BPF_LDX | BPF_MEM | BPF_W: 815 /* emit 'mov eax, dword ptr [rax+0x14]' */ 816 if (is_ereg(dst_reg) || is_ereg(src_reg)) 817 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 818 else 819 EMIT1(0x8B); 820 goto ldx; 821 case BPF_LDX | BPF_MEM | BPF_DW: 822 /* emit 'mov rax, qword ptr [rax+0x14]' */ 823 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 824 ldx: /* if insn->off == 0 we can save one extra byte, but 825 * special case of x86 r13 which always needs an offset 826 * is not worth the hassle 827 */ 828 if (is_imm8(insn->off)) 829 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); 830 else 831 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), 832 insn->off); 833 break; 834 835 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ 836 case BPF_STX | BPF_XADD | BPF_W: 837 /* emit 'lock add dword ptr [rax + off], eax' */ 838 if (is_ereg(dst_reg) || is_ereg(src_reg)) 839 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); 840 else 841 EMIT2(0xF0, 0x01); 842 goto xadd; 843 case BPF_STX | BPF_XADD | BPF_DW: 844 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); 845 xadd: if (is_imm8(insn->off)) 846 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); 847 else 848 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), 849 insn->off); 850 break; 851 852 /* call */ 853 case BPF_JMP | BPF_CALL: 854 func = (u8 *) __bpf_call_base + imm32; 855 jmp_offset = func - (image + addrs[i]); 856 if (seen_ld_abs) { 857 reload_skb_data = bpf_helper_changes_pkt_data(func); 858 if (reload_skb_data) { 859 EMIT1(0x57); /* push %rdi */ 860 jmp_offset += 22; /* pop, mov, sub, mov */ 861 } else { 862 EMIT2(0x41, 0x52); /* push %r10 */ 863 EMIT2(0x41, 0x51); /* push %r9 */ 864 /* need to adjust jmp offset, since 865 * pop %r9, pop %r10 take 4 bytes after call insn 866 */ 867 jmp_offset += 4; 868 } 869 } 870 if (!imm32 || !is_simm32(jmp_offset)) { 871 pr_err("unsupported bpf func %d addr %p image %p\n", 872 imm32, func, image); 873 return -EINVAL; 874 } 875 EMIT1_off32(0xE8, jmp_offset); 876 if (seen_ld_abs) { 877 if (reload_skb_data) { 878 EMIT1(0x5F); /* pop %rdi */ 879 emit_load_skb_data_hlen(&prog); 880 } else { 881 EMIT2(0x41, 0x59); /* pop %r9 */ 882 EMIT2(0x41, 0x5A); /* pop %r10 */ 883 } 884 } 885 break; 886 887 case BPF_JMP | BPF_TAIL_CALL: 888 emit_bpf_tail_call(&prog); 889 break; 890 891 /* cond jump */ 892 case BPF_JMP | BPF_JEQ | BPF_X: 893 case BPF_JMP | BPF_JNE | BPF_X: 894 case BPF_JMP | BPF_JGT | BPF_X: 895 case BPF_JMP | BPF_JLT | BPF_X: 896 case BPF_JMP | BPF_JGE | BPF_X: 897 case BPF_JMP | BPF_JLE | BPF_X: 898 case BPF_JMP | BPF_JSGT | BPF_X: 899 case BPF_JMP | BPF_JSLT | BPF_X: 900 case BPF_JMP | BPF_JSGE | BPF_X: 901 case BPF_JMP | BPF_JSLE | BPF_X: 902 /* cmp dst_reg, src_reg */ 903 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, 904 add_2reg(0xC0, dst_reg, src_reg)); 905 goto emit_cond_jmp; 906 907 case BPF_JMP | BPF_JSET | BPF_X: 908 /* test dst_reg, src_reg */ 909 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, 910 add_2reg(0xC0, dst_reg, src_reg)); 911 goto emit_cond_jmp; 912 913 case BPF_JMP | BPF_JSET | BPF_K: 914 /* test dst_reg, imm32 */ 915 EMIT1(add_1mod(0x48, dst_reg)); 916 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 917 goto emit_cond_jmp; 918 919 case BPF_JMP | BPF_JEQ | BPF_K: 920 case BPF_JMP | BPF_JNE | BPF_K: 921 case BPF_JMP | BPF_JGT | BPF_K: 922 case BPF_JMP | BPF_JLT | BPF_K: 923 case BPF_JMP | BPF_JGE | BPF_K: 924 case BPF_JMP | BPF_JLE | BPF_K: 925 case BPF_JMP | BPF_JSGT | BPF_K: 926 case BPF_JMP | BPF_JSLT | BPF_K: 927 case BPF_JMP | BPF_JSGE | BPF_K: 928 case BPF_JMP | BPF_JSLE | BPF_K: 929 /* cmp dst_reg, imm8/32 */ 930 EMIT1(add_1mod(0x48, dst_reg)); 931 932 if (is_imm8(imm32)) 933 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 934 else 935 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 936 937 emit_cond_jmp: /* convert BPF opcode to x86 */ 938 switch (BPF_OP(insn->code)) { 939 case BPF_JEQ: 940 jmp_cond = X86_JE; 941 break; 942 case BPF_JSET: 943 case BPF_JNE: 944 jmp_cond = X86_JNE; 945 break; 946 case BPF_JGT: 947 /* GT is unsigned '>', JA in x86 */ 948 jmp_cond = X86_JA; 949 break; 950 case BPF_JLT: 951 /* LT is unsigned '<', JB in x86 */ 952 jmp_cond = X86_JB; 953 break; 954 case BPF_JGE: 955 /* GE is unsigned '>=', JAE in x86 */ 956 jmp_cond = X86_JAE; 957 break; 958 case BPF_JLE: 959 /* LE is unsigned '<=', JBE in x86 */ 960 jmp_cond = X86_JBE; 961 break; 962 case BPF_JSGT: 963 /* signed '>', GT in x86 */ 964 jmp_cond = X86_JG; 965 break; 966 case BPF_JSLT: 967 /* signed '<', LT in x86 */ 968 jmp_cond = X86_JL; 969 break; 970 case BPF_JSGE: 971 /* signed '>=', GE in x86 */ 972 jmp_cond = X86_JGE; 973 break; 974 case BPF_JSLE: 975 /* signed '<=', LE in x86 */ 976 jmp_cond = X86_JLE; 977 break; 978 default: /* to silence gcc warning */ 979 return -EFAULT; 980 } 981 jmp_offset = addrs[i + insn->off] - addrs[i]; 982 if (is_imm8(jmp_offset)) { 983 EMIT2(jmp_cond, jmp_offset); 984 } else if (is_simm32(jmp_offset)) { 985 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 986 } else { 987 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 988 return -EFAULT; 989 } 990 991 break; 992 993 case BPF_JMP | BPF_JA: 994 jmp_offset = addrs[i + insn->off] - addrs[i]; 995 if (!jmp_offset) 996 /* optimize out nop jumps */ 997 break; 998 emit_jmp: 999 if (is_imm8(jmp_offset)) { 1000 EMIT2(0xEB, jmp_offset); 1001 } else if (is_simm32(jmp_offset)) { 1002 EMIT1_off32(0xE9, jmp_offset); 1003 } else { 1004 pr_err("jmp gen bug %llx\n", jmp_offset); 1005 return -EFAULT; 1006 } 1007 break; 1008 1009 case BPF_LD | BPF_IND | BPF_W: 1010 func = sk_load_word; 1011 goto common_load; 1012 case BPF_LD | BPF_ABS | BPF_W: 1013 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); 1014 common_load: 1015 ctx->seen_ld_abs = seen_ld_abs = true; 1016 jmp_offset = func - (image + addrs[i]); 1017 if (!func || !is_simm32(jmp_offset)) { 1018 pr_err("unsupported bpf func %d addr %p image %p\n", 1019 imm32, func, image); 1020 return -EINVAL; 1021 } 1022 if (BPF_MODE(insn->code) == BPF_ABS) { 1023 /* mov %esi, imm32 */ 1024 EMIT1_off32(0xBE, imm32); 1025 } else { 1026 /* mov %rsi, src_reg */ 1027 EMIT_mov(BPF_REG_2, src_reg); 1028 if (imm32) { 1029 if (is_imm8(imm32)) 1030 /* add %esi, imm8 */ 1031 EMIT3(0x83, 0xC6, imm32); 1032 else 1033 /* add %esi, imm32 */ 1034 EMIT2_off32(0x81, 0xC6, imm32); 1035 } 1036 } 1037 /* skb pointer is in R6 (%rbx), it will be copied into 1038 * %rdi if skb_copy_bits() call is necessary. 1039 * sk_load_* helpers also use %r10 and %r9d. 1040 * See bpf_jit.S 1041 */ 1042 if (seen_ax_reg) 1043 /* r10 = skb->data, mov %r10, off32(%rbx) */ 1044 EMIT3_off32(0x4c, 0x8b, 0x93, 1045 offsetof(struct sk_buff, data)); 1046 EMIT1_off32(0xE8, jmp_offset); /* call */ 1047 break; 1048 1049 case BPF_LD | BPF_IND | BPF_H: 1050 func = sk_load_half; 1051 goto common_load; 1052 case BPF_LD | BPF_ABS | BPF_H: 1053 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); 1054 goto common_load; 1055 case BPF_LD | BPF_IND | BPF_B: 1056 func = sk_load_byte; 1057 goto common_load; 1058 case BPF_LD | BPF_ABS | BPF_B: 1059 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); 1060 goto common_load; 1061 1062 case BPF_JMP | BPF_EXIT: 1063 if (seen_exit) { 1064 jmp_offset = ctx->cleanup_addr - addrs[i]; 1065 goto emit_jmp; 1066 } 1067 seen_exit = true; 1068 /* update cleanup_addr */ 1069 ctx->cleanup_addr = proglen; 1070 /* mov rbx, qword ptr [rbp+0] */ 1071 EMIT4(0x48, 0x8B, 0x5D, 0); 1072 /* mov r13, qword ptr [rbp+8] */ 1073 EMIT4(0x4C, 0x8B, 0x6D, 8); 1074 /* mov r14, qword ptr [rbp+16] */ 1075 EMIT4(0x4C, 0x8B, 0x75, 16); 1076 /* mov r15, qword ptr [rbp+24] */ 1077 EMIT4(0x4C, 0x8B, 0x7D, 24); 1078 1079 /* add rbp, AUX_STACK_SPACE */ 1080 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE); 1081 EMIT1(0xC9); /* leave */ 1082 EMIT1(0xC3); /* ret */ 1083 break; 1084 1085 default: 1086 /* By design x64 JIT should support all BPF instructions 1087 * This error will be seen if new instruction was added 1088 * to interpreter, but not to JIT 1089 * or if there is junk in bpf_prog 1090 */ 1091 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1092 return -EINVAL; 1093 } 1094 1095 ilen = prog - temp; 1096 if (ilen > BPF_MAX_INSN_SIZE) { 1097 pr_err("bpf_jit: fatal insn size error\n"); 1098 return -EFAULT; 1099 } 1100 1101 if (image) { 1102 if (unlikely(proglen + ilen > oldproglen)) { 1103 pr_err("bpf_jit: fatal error\n"); 1104 return -EFAULT; 1105 } 1106 memcpy(image + proglen, temp, ilen); 1107 } 1108 proglen += ilen; 1109 addrs[i] = proglen; 1110 prog = temp; 1111 } 1112 return proglen; 1113 } 1114 1115 struct x64_jit_data { 1116 struct bpf_binary_header *header; 1117 int *addrs; 1118 u8 *image; 1119 int proglen; 1120 struct jit_context ctx; 1121 }; 1122 1123 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1124 { 1125 struct bpf_binary_header *header = NULL; 1126 struct bpf_prog *tmp, *orig_prog = prog; 1127 struct x64_jit_data *jit_data; 1128 int proglen, oldproglen = 0; 1129 struct jit_context ctx = {}; 1130 bool tmp_blinded = false; 1131 bool extra_pass = false; 1132 u8 *image = NULL; 1133 int *addrs; 1134 int pass; 1135 int i; 1136 1137 if (!prog->jit_requested) 1138 return orig_prog; 1139 1140 tmp = bpf_jit_blind_constants(prog); 1141 /* If blinding was requested and we failed during blinding, 1142 * we must fall back to the interpreter. 1143 */ 1144 if (IS_ERR(tmp)) 1145 return orig_prog; 1146 if (tmp != prog) { 1147 tmp_blinded = true; 1148 prog = tmp; 1149 } 1150 1151 jit_data = prog->aux->jit_data; 1152 if (!jit_data) { 1153 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 1154 if (!jit_data) { 1155 prog = orig_prog; 1156 goto out; 1157 } 1158 prog->aux->jit_data = jit_data; 1159 } 1160 addrs = jit_data->addrs; 1161 if (addrs) { 1162 ctx = jit_data->ctx; 1163 oldproglen = jit_data->proglen; 1164 image = jit_data->image; 1165 header = jit_data->header; 1166 extra_pass = true; 1167 goto skip_init_addrs; 1168 } 1169 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); 1170 if (!addrs) { 1171 prog = orig_prog; 1172 goto out_addrs; 1173 } 1174 1175 /* Before first pass, make a rough estimation of addrs[] 1176 * each bpf instruction is translated to less than 64 bytes 1177 */ 1178 for (proglen = 0, i = 0; i < prog->len; i++) { 1179 proglen += 64; 1180 addrs[i] = proglen; 1181 } 1182 ctx.cleanup_addr = proglen; 1183 skip_init_addrs: 1184 1185 /* JITed image shrinks with every pass and the loop iterates 1186 * until the image stops shrinking. Very large bpf programs 1187 * may converge on the last pass. In such case do one more 1188 * pass to emit the final image 1189 */ 1190 for (pass = 0; pass < 10 || image; pass++) { 1191 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1192 if (proglen <= 0) { 1193 image = NULL; 1194 if (header) 1195 bpf_jit_binary_free(header); 1196 prog = orig_prog; 1197 goto out_addrs; 1198 } 1199 if (image) { 1200 if (proglen != oldproglen) { 1201 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 1202 proglen, oldproglen); 1203 prog = orig_prog; 1204 goto out_addrs; 1205 } 1206 break; 1207 } 1208 if (proglen == oldproglen) { 1209 header = bpf_jit_binary_alloc(proglen, &image, 1210 1, jit_fill_hole); 1211 if (!header) { 1212 prog = orig_prog; 1213 goto out_addrs; 1214 } 1215 } 1216 oldproglen = proglen; 1217 } 1218 1219 if (bpf_jit_enable > 1) 1220 bpf_jit_dump(prog->len, proglen, pass + 1, image); 1221 1222 if (image) { 1223 bpf_flush_icache(header, image + proglen); 1224 if (!prog->is_func || extra_pass) { 1225 bpf_jit_binary_lock_ro(header); 1226 } else { 1227 jit_data->addrs = addrs; 1228 jit_data->ctx = ctx; 1229 jit_data->proglen = proglen; 1230 jit_data->image = image; 1231 jit_data->header = header; 1232 } 1233 prog->bpf_func = (void *)image; 1234 prog->jited = 1; 1235 prog->jited_len = proglen; 1236 } else { 1237 prog = orig_prog; 1238 } 1239 1240 if (!prog->is_func || extra_pass) { 1241 out_addrs: 1242 kfree(addrs); 1243 kfree(jit_data); 1244 prog->aux->jit_data = NULL; 1245 } 1246 out: 1247 if (tmp_blinded) 1248 bpf_jit_prog_release_other(prog, prog == orig_prog ? 1249 tmp : orig_prog); 1250 return prog; 1251 } 1252