1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * bpf_jit_comp.c: BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 #include <asm/asm-prototypes.h> 19 20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 21 { 22 if (len == 1) 23 *ptr = bytes; 24 else if (len == 2) 25 *(u16 *)ptr = bytes; 26 else { 27 *(u32 *)ptr = bytes; 28 barrier(); 29 } 30 return ptr + len; 31 } 32 33 #define EMIT(bytes, len) \ 34 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) 35 36 #define EMIT1(b1) EMIT(b1, 1) 37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 40 41 #define EMIT1_off32(b1, off) \ 42 do { EMIT1(b1); EMIT(off, 4); } while (0) 43 #define EMIT2_off32(b1, b2, off) \ 44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 45 #define EMIT3_off32(b1, b2, b3, off) \ 46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 47 #define EMIT4_off32(b1, b2, b3, b4, off) \ 48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 49 50 static bool is_imm8(int value) 51 { 52 return value <= 127 && value >= -128; 53 } 54 55 static bool is_simm32(s64 value) 56 { 57 return value == (s64)(s32)value; 58 } 59 60 static bool is_uimm32(u64 value) 61 { 62 return value == (u64)(u32)value; 63 } 64 65 /* mov dst, src */ 66 #define EMIT_mov(DST, SRC) \ 67 do { \ 68 if (DST != SRC) \ 69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 70 } while (0) 71 72 static int bpf_size_to_x86_bytes(int bpf_size) 73 { 74 if (bpf_size == BPF_W) 75 return 4; 76 else if (bpf_size == BPF_H) 77 return 2; 78 else if (bpf_size == BPF_B) 79 return 1; 80 else if (bpf_size == BPF_DW) 81 return 4; /* imm32 */ 82 else 83 return 0; 84 } 85 86 /* 87 * List of x86 cond jumps opcodes (. + s8) 88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 89 */ 90 #define X86_JB 0x72 91 #define X86_JAE 0x73 92 #define X86_JE 0x74 93 #define X86_JNE 0x75 94 #define X86_JBE 0x76 95 #define X86_JA 0x77 96 #define X86_JL 0x7C 97 #define X86_JGE 0x7D 98 #define X86_JLE 0x7E 99 #define X86_JG 0x7F 100 101 /* Pick a register outside of BPF range for JIT internal work */ 102 #define AUX_REG (MAX_BPF_JIT_REG + 1) 103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 104 105 /* 106 * The following table maps BPF registers to x86-64 registers. 107 * 108 * x86-64 register R12 is unused, since if used as base address 109 * register in load/store instructions, it always needs an 110 * extra byte of encoding and is callee saved. 111 * 112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 113 * trampoline. x86-64 register R10 is used for blinding (if enabled). 114 */ 115 static const int reg2hex[] = { 116 [BPF_REG_0] = 0, /* RAX */ 117 [BPF_REG_1] = 7, /* RDI */ 118 [BPF_REG_2] = 6, /* RSI */ 119 [BPF_REG_3] = 2, /* RDX */ 120 [BPF_REG_4] = 1, /* RCX */ 121 [BPF_REG_5] = 0, /* R8 */ 122 [BPF_REG_6] = 3, /* RBX callee saved */ 123 [BPF_REG_7] = 5, /* R13 callee saved */ 124 [BPF_REG_8] = 6, /* R14 callee saved */ 125 [BPF_REG_9] = 7, /* R15 callee saved */ 126 [BPF_REG_FP] = 5, /* RBP readonly */ 127 [BPF_REG_AX] = 2, /* R10 temp register */ 128 [AUX_REG] = 3, /* R11 temp register */ 129 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 130 }; 131 132 static const int reg2pt_regs[] = { 133 [BPF_REG_0] = offsetof(struct pt_regs, ax), 134 [BPF_REG_1] = offsetof(struct pt_regs, di), 135 [BPF_REG_2] = offsetof(struct pt_regs, si), 136 [BPF_REG_3] = offsetof(struct pt_regs, dx), 137 [BPF_REG_4] = offsetof(struct pt_regs, cx), 138 [BPF_REG_5] = offsetof(struct pt_regs, r8), 139 [BPF_REG_6] = offsetof(struct pt_regs, bx), 140 [BPF_REG_7] = offsetof(struct pt_regs, r13), 141 [BPF_REG_8] = offsetof(struct pt_regs, r14), 142 [BPF_REG_9] = offsetof(struct pt_regs, r15), 143 }; 144 145 /* 146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 147 * which need extra byte of encoding. 148 * rax,rcx,...,rbp have simpler encoding 149 */ 150 static bool is_ereg(u32 reg) 151 { 152 return (1 << reg) & (BIT(BPF_REG_5) | 153 BIT(AUX_REG) | 154 BIT(BPF_REG_7) | 155 BIT(BPF_REG_8) | 156 BIT(BPF_REG_9) | 157 BIT(X86_REG_R9) | 158 BIT(BPF_REG_AX)); 159 } 160 161 /* 162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 164 * of encoding. al,cl,dl,bl have simpler encoding. 165 */ 166 static bool is_ereg_8l(u32 reg) 167 { 168 return is_ereg(reg) || 169 (1 << reg) & (BIT(BPF_REG_1) | 170 BIT(BPF_REG_2) | 171 BIT(BPF_REG_FP)); 172 } 173 174 static bool is_axreg(u32 reg) 175 { 176 return reg == BPF_REG_0; 177 } 178 179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 180 static u8 add_1mod(u8 byte, u32 reg) 181 { 182 if (is_ereg(reg)) 183 byte |= 1; 184 return byte; 185 } 186 187 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 188 { 189 if (is_ereg(r1)) 190 byte |= 1; 191 if (is_ereg(r2)) 192 byte |= 4; 193 return byte; 194 } 195 196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 197 static u8 add_1reg(u8 byte, u32 dst_reg) 198 { 199 return byte + reg2hex[dst_reg]; 200 } 201 202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 203 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 204 { 205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 206 } 207 208 static void jit_fill_hole(void *area, unsigned int size) 209 { 210 /* Fill whole space with INT3 instructions */ 211 memset(area, 0xcc, size); 212 } 213 214 struct jit_context { 215 int cleanup_addr; /* Epilogue code offset */ 216 }; 217 218 /* Maximum number of bytes emitted while JITing one eBPF insn */ 219 #define BPF_MAX_INSN_SIZE 128 220 #define BPF_INSN_SAFETY 64 221 222 /* Number of bytes emit_patch() needs to generate instructions */ 223 #define X86_PATCH_SIZE 5 224 /* Number of bytes that will be skipped on tailcall */ 225 #define X86_TAIL_CALL_OFFSET 11 226 227 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 228 { 229 u8 *prog = *pprog; 230 int cnt = 0; 231 232 if (callee_regs_used[0]) 233 EMIT1(0x53); /* push rbx */ 234 if (callee_regs_used[1]) 235 EMIT2(0x41, 0x55); /* push r13 */ 236 if (callee_regs_used[2]) 237 EMIT2(0x41, 0x56); /* push r14 */ 238 if (callee_regs_used[3]) 239 EMIT2(0x41, 0x57); /* push r15 */ 240 *pprog = prog; 241 } 242 243 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 244 { 245 u8 *prog = *pprog; 246 int cnt = 0; 247 248 if (callee_regs_used[3]) 249 EMIT2(0x41, 0x5F); /* pop r15 */ 250 if (callee_regs_used[2]) 251 EMIT2(0x41, 0x5E); /* pop r14 */ 252 if (callee_regs_used[1]) 253 EMIT2(0x41, 0x5D); /* pop r13 */ 254 if (callee_regs_used[0]) 255 EMIT1(0x5B); /* pop rbx */ 256 *pprog = prog; 257 } 258 259 /* 260 * Emit x86-64 prologue code for BPF program. 261 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 262 * while jumping to another program 263 */ 264 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 265 bool tail_call_reachable, bool is_subprog) 266 { 267 u8 *prog = *pprog; 268 int cnt = X86_PATCH_SIZE; 269 270 /* BPF trampoline can be made to work without these nops, 271 * but let's waste 5 bytes for now and optimize later 272 */ 273 memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); 274 prog += cnt; 275 if (!ebpf_from_cbpf) { 276 if (tail_call_reachable && !is_subprog) 277 EMIT2(0x31, 0xC0); /* xor eax, eax */ 278 else 279 EMIT2(0x66, 0x90); /* nop2 */ 280 } 281 EMIT1(0x55); /* push rbp */ 282 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 283 /* sub rsp, rounded_stack_depth */ 284 if (stack_depth) 285 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 286 if (tail_call_reachable) 287 EMIT1(0x50); /* push rax */ 288 *pprog = prog; 289 } 290 291 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 292 { 293 u8 *prog = *pprog; 294 int cnt = 0; 295 s64 offset; 296 297 offset = func - (ip + X86_PATCH_SIZE); 298 if (!is_simm32(offset)) { 299 pr_err("Target call %p is out of range\n", func); 300 return -ERANGE; 301 } 302 EMIT1_off32(opcode, offset); 303 *pprog = prog; 304 return 0; 305 } 306 307 static int emit_call(u8 **pprog, void *func, void *ip) 308 { 309 return emit_patch(pprog, func, ip, 0xE8); 310 } 311 312 static int emit_jump(u8 **pprog, void *func, void *ip) 313 { 314 return emit_patch(pprog, func, ip, 0xE9); 315 } 316 317 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 318 void *old_addr, void *new_addr, 319 const bool text_live) 320 { 321 const u8 *nop_insn = ideal_nops[NOP_ATOMIC5]; 322 u8 old_insn[X86_PATCH_SIZE]; 323 u8 new_insn[X86_PATCH_SIZE]; 324 u8 *prog; 325 int ret; 326 327 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 328 if (old_addr) { 329 prog = old_insn; 330 ret = t == BPF_MOD_CALL ? 331 emit_call(&prog, old_addr, ip) : 332 emit_jump(&prog, old_addr, ip); 333 if (ret) 334 return ret; 335 } 336 337 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 338 if (new_addr) { 339 prog = new_insn; 340 ret = t == BPF_MOD_CALL ? 341 emit_call(&prog, new_addr, ip) : 342 emit_jump(&prog, new_addr, ip); 343 if (ret) 344 return ret; 345 } 346 347 ret = -EBUSY; 348 mutex_lock(&text_mutex); 349 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 350 goto out; 351 ret = 1; 352 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 353 if (text_live) 354 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 355 else 356 memcpy(ip, new_insn, X86_PATCH_SIZE); 357 ret = 0; 358 } 359 out: 360 mutex_unlock(&text_mutex); 361 return ret; 362 } 363 364 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 365 void *old_addr, void *new_addr) 366 { 367 if (!is_kernel_text((long)ip) && 368 !is_bpf_text_address((long)ip)) 369 /* BPF poking in modules is not supported */ 370 return -EINVAL; 371 372 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); 373 } 374 375 static int get_pop_bytes(bool *callee_regs_used) 376 { 377 int bytes = 0; 378 379 if (callee_regs_used[3]) 380 bytes += 2; 381 if (callee_regs_used[2]) 382 bytes += 2; 383 if (callee_regs_used[1]) 384 bytes += 2; 385 if (callee_regs_used[0]) 386 bytes += 1; 387 388 return bytes; 389 } 390 391 /* 392 * Generate the following code: 393 * 394 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 395 * if (index >= array->map.max_entries) 396 * goto out; 397 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) 398 * goto out; 399 * prog = array->ptrs[index]; 400 * if (prog == NULL) 401 * goto out; 402 * goto *(prog->bpf_func + prologue_size); 403 * out: 404 */ 405 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 406 u32 stack_depth) 407 { 408 int tcc_off = -4 - round_up(stack_depth, 8); 409 u8 *prog = *pprog; 410 int pop_bytes = 0; 411 int off1 = 42; 412 int off2 = 31; 413 int off3 = 9; 414 int cnt = 0; 415 416 /* count the additional bytes used for popping callee regs from stack 417 * that need to be taken into account for each of the offsets that 418 * are used for bailing out of the tail call 419 */ 420 pop_bytes = get_pop_bytes(callee_regs_used); 421 off1 += pop_bytes; 422 off2 += pop_bytes; 423 off3 += pop_bytes; 424 425 if (stack_depth) { 426 off1 += 7; 427 off2 += 7; 428 off3 += 7; 429 } 430 431 /* 432 * rdi - pointer to ctx 433 * rsi - pointer to bpf_array 434 * rdx - index in bpf_array 435 */ 436 437 /* 438 * if (index >= array->map.max_entries) 439 * goto out; 440 */ 441 EMIT2(0x89, 0xD2); /* mov edx, edx */ 442 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 443 offsetof(struct bpf_array, map.max_entries)); 444 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */ 445 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 446 447 /* 448 * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 449 * goto out; 450 */ 451 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 452 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 453 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE) 454 EMIT2(X86_JA, OFFSET2); /* ja out */ 455 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 456 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 457 458 /* prog = array->ptrs[index]; */ 459 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 460 offsetof(struct bpf_array, ptrs)); 461 462 /* 463 * if (prog == NULL) 464 * goto out; 465 */ 466 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 467 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE) 468 EMIT2(X86_JE, OFFSET3); /* je out */ 469 470 *pprog = prog; 471 pop_callee_regs(pprog, callee_regs_used); 472 prog = *pprog; 473 474 EMIT1(0x58); /* pop rax */ 475 if (stack_depth) 476 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 477 round_up(stack_depth, 8)); 478 479 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 480 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 481 offsetof(struct bpf_prog, bpf_func)); 482 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 483 X86_TAIL_CALL_OFFSET); 484 /* 485 * Now we're ready to jump into next BPF program 486 * rdi == ctx (1st arg) 487 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 488 */ 489 RETPOLINE_RCX_BPF_JIT(); 490 491 /* out: */ 492 *pprog = prog; 493 } 494 495 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 496 u8 **pprog, int addr, u8 *image, 497 bool *callee_regs_used, u32 stack_depth) 498 { 499 int tcc_off = -4 - round_up(stack_depth, 8); 500 u8 *prog = *pprog; 501 int pop_bytes = 0; 502 int off1 = 20; 503 int poke_off; 504 int cnt = 0; 505 506 /* count the additional bytes used for popping callee regs to stack 507 * that need to be taken into account for jump offset that is used for 508 * bailing out from of the tail call when limit is reached 509 */ 510 pop_bytes = get_pop_bytes(callee_regs_used); 511 off1 += pop_bytes; 512 513 /* 514 * total bytes for: 515 * - nop5/ jmpq $off 516 * - pop callee regs 517 * - sub rsp, $val if depth > 0 518 * - pop rax 519 */ 520 poke_off = X86_PATCH_SIZE + pop_bytes + 1; 521 if (stack_depth) { 522 poke_off += 7; 523 off1 += 7; 524 } 525 526 /* 527 * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 528 * goto out; 529 */ 530 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 531 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 532 EMIT2(X86_JA, off1); /* ja out */ 533 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 534 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 535 536 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE); 537 poke->adj_off = X86_TAIL_CALL_OFFSET; 538 poke->tailcall_target = image + (addr - X86_PATCH_SIZE); 539 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 540 541 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 542 poke->tailcall_bypass); 543 544 *pprog = prog; 545 pop_callee_regs(pprog, callee_regs_used); 546 prog = *pprog; 547 EMIT1(0x58); /* pop rax */ 548 if (stack_depth) 549 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 550 551 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); 552 prog += X86_PATCH_SIZE; 553 /* out: */ 554 555 *pprog = prog; 556 } 557 558 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 559 { 560 struct bpf_jit_poke_descriptor *poke; 561 struct bpf_array *array; 562 struct bpf_prog *target; 563 int i, ret; 564 565 for (i = 0; i < prog->aux->size_poke_tab; i++) { 566 poke = &prog->aux->poke_tab[i]; 567 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 568 569 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 570 continue; 571 572 array = container_of(poke->tail_call.map, struct bpf_array, map); 573 mutex_lock(&array->aux->poke_mutex); 574 target = array->ptrs[poke->tail_call.key]; 575 if (target) { 576 /* Plain memcpy is used when image is not live yet 577 * and still not locked as read-only. Once poke 578 * location is active (poke->tailcall_target_stable), 579 * any parallel bpf_arch_text_poke() might occur 580 * still on the read-write image until we finally 581 * locked it as read-only. Both modifications on 582 * the given image are under text_mutex to avoid 583 * interference. 584 */ 585 ret = __bpf_arch_text_poke(poke->tailcall_target, 586 BPF_MOD_JUMP, NULL, 587 (u8 *)target->bpf_func + 588 poke->adj_off, false); 589 BUG_ON(ret < 0); 590 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 591 BPF_MOD_JUMP, 592 (u8 *)poke->tailcall_target + 593 X86_PATCH_SIZE, NULL, false); 594 BUG_ON(ret < 0); 595 } 596 WRITE_ONCE(poke->tailcall_target_stable, true); 597 mutex_unlock(&array->aux->poke_mutex); 598 } 599 } 600 601 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 602 u32 dst_reg, const u32 imm32) 603 { 604 u8 *prog = *pprog; 605 u8 b1, b2, b3; 606 int cnt = 0; 607 608 /* 609 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 610 * (which zero-extends imm32) to save 2 bytes. 611 */ 612 if (sign_propagate && (s32)imm32 < 0) { 613 /* 'mov %rax, imm32' sign extends imm32 */ 614 b1 = add_1mod(0x48, dst_reg); 615 b2 = 0xC7; 616 b3 = 0xC0; 617 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 618 goto done; 619 } 620 621 /* 622 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 623 * to save 3 bytes. 624 */ 625 if (imm32 == 0) { 626 if (is_ereg(dst_reg)) 627 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 628 b2 = 0x31; /* xor */ 629 b3 = 0xC0; 630 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 631 goto done; 632 } 633 634 /* mov %eax, imm32 */ 635 if (is_ereg(dst_reg)) 636 EMIT1(add_1mod(0x40, dst_reg)); 637 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 638 done: 639 *pprog = prog; 640 } 641 642 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 643 const u32 imm32_hi, const u32 imm32_lo) 644 { 645 u8 *prog = *pprog; 646 int cnt = 0; 647 648 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 649 /* 650 * For emitting plain u32, where sign bit must not be 651 * propagated LLVM tends to load imm64 over mov32 652 * directly, so save couple of bytes by just doing 653 * 'mov %eax, imm32' instead. 654 */ 655 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 656 } else { 657 /* movabsq %rax, imm64 */ 658 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 659 EMIT(imm32_lo, 4); 660 EMIT(imm32_hi, 4); 661 } 662 663 *pprog = prog; 664 } 665 666 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 667 { 668 u8 *prog = *pprog; 669 int cnt = 0; 670 671 if (is64) { 672 /* mov dst, src */ 673 EMIT_mov(dst_reg, src_reg); 674 } else { 675 /* mov32 dst, src */ 676 if (is_ereg(dst_reg) || is_ereg(src_reg)) 677 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 678 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 679 } 680 681 *pprog = prog; 682 } 683 684 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 685 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 686 { 687 u8 *prog = *pprog; 688 int cnt = 0; 689 690 switch (size) { 691 case BPF_B: 692 /* Emit 'movzx rax, byte ptr [rax + off]' */ 693 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 694 break; 695 case BPF_H: 696 /* Emit 'movzx rax, word ptr [rax + off]' */ 697 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 698 break; 699 case BPF_W: 700 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 701 if (is_ereg(dst_reg) || is_ereg(src_reg)) 702 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 703 else 704 EMIT1(0x8B); 705 break; 706 case BPF_DW: 707 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 708 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 709 break; 710 } 711 /* 712 * If insn->off == 0 we can save one extra byte, but 713 * special case of x86 R13 which always needs an offset 714 * is not worth the hassle 715 */ 716 if (is_imm8(off)) 717 EMIT2(add_2reg(0x40, src_reg, dst_reg), off); 718 else 719 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off); 720 *pprog = prog; 721 } 722 723 /* STX: *(u8*)(dst_reg + off) = src_reg */ 724 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 725 { 726 u8 *prog = *pprog; 727 int cnt = 0; 728 729 switch (size) { 730 case BPF_B: 731 /* Emit 'mov byte ptr [rax + off], al' */ 732 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 733 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 734 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 735 else 736 EMIT1(0x88); 737 break; 738 case BPF_H: 739 if (is_ereg(dst_reg) || is_ereg(src_reg)) 740 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 741 else 742 EMIT2(0x66, 0x89); 743 break; 744 case BPF_W: 745 if (is_ereg(dst_reg) || is_ereg(src_reg)) 746 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 747 else 748 EMIT1(0x89); 749 break; 750 case BPF_DW: 751 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 752 break; 753 } 754 if (is_imm8(off)) 755 EMIT2(add_2reg(0x40, dst_reg, src_reg), off); 756 else 757 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off); 758 *pprog = prog; 759 } 760 761 static bool ex_handler_bpf(const struct exception_table_entry *x, 762 struct pt_regs *regs, int trapnr, 763 unsigned long error_code, unsigned long fault_addr) 764 { 765 u32 reg = x->fixup >> 8; 766 767 /* jump over faulting load and clear dest register */ 768 *(unsigned long *)((void *)regs + reg) = 0; 769 regs->ip += x->fixup & 0xff; 770 return true; 771 } 772 773 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 774 bool *regs_used, bool *tail_call_seen) 775 { 776 int i; 777 778 for (i = 1; i <= insn_cnt; i++, insn++) { 779 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 780 *tail_call_seen = true; 781 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 782 regs_used[0] = true; 783 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 784 regs_used[1] = true; 785 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 786 regs_used[2] = true; 787 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 788 regs_used[3] = true; 789 } 790 } 791 792 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 793 int oldproglen, struct jit_context *ctx) 794 { 795 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 796 struct bpf_insn *insn = bpf_prog->insnsi; 797 bool callee_regs_used[4] = {}; 798 int insn_cnt = bpf_prog->len; 799 bool tail_call_seen = false; 800 bool seen_exit = false; 801 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 802 int i, cnt = 0, excnt = 0; 803 int proglen = 0; 804 u8 *prog = temp; 805 806 detect_reg_usage(insn, insn_cnt, callee_regs_used, 807 &tail_call_seen); 808 809 /* tail call's presence in current prog implies it is reachable */ 810 tail_call_reachable |= tail_call_seen; 811 812 emit_prologue(&prog, bpf_prog->aux->stack_depth, 813 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 814 bpf_prog->aux->func_idx != 0); 815 push_callee_regs(&prog, callee_regs_used); 816 addrs[0] = prog - temp; 817 818 for (i = 1; i <= insn_cnt; i++, insn++) { 819 const s32 imm32 = insn->imm; 820 u32 dst_reg = insn->dst_reg; 821 u32 src_reg = insn->src_reg; 822 u8 b2 = 0, b3 = 0; 823 s64 jmp_offset; 824 u8 jmp_cond; 825 int ilen; 826 u8 *func; 827 828 switch (insn->code) { 829 /* ALU */ 830 case BPF_ALU | BPF_ADD | BPF_X: 831 case BPF_ALU | BPF_SUB | BPF_X: 832 case BPF_ALU | BPF_AND | BPF_X: 833 case BPF_ALU | BPF_OR | BPF_X: 834 case BPF_ALU | BPF_XOR | BPF_X: 835 case BPF_ALU64 | BPF_ADD | BPF_X: 836 case BPF_ALU64 | BPF_SUB | BPF_X: 837 case BPF_ALU64 | BPF_AND | BPF_X: 838 case BPF_ALU64 | BPF_OR | BPF_X: 839 case BPF_ALU64 | BPF_XOR | BPF_X: 840 switch (BPF_OP(insn->code)) { 841 case BPF_ADD: b2 = 0x01; break; 842 case BPF_SUB: b2 = 0x29; break; 843 case BPF_AND: b2 = 0x21; break; 844 case BPF_OR: b2 = 0x09; break; 845 case BPF_XOR: b2 = 0x31; break; 846 } 847 if (BPF_CLASS(insn->code) == BPF_ALU64) 848 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 849 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 850 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 851 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 852 break; 853 854 case BPF_ALU64 | BPF_MOV | BPF_X: 855 case BPF_ALU | BPF_MOV | BPF_X: 856 emit_mov_reg(&prog, 857 BPF_CLASS(insn->code) == BPF_ALU64, 858 dst_reg, src_reg); 859 break; 860 861 /* neg dst */ 862 case BPF_ALU | BPF_NEG: 863 case BPF_ALU64 | BPF_NEG: 864 if (BPF_CLASS(insn->code) == BPF_ALU64) 865 EMIT1(add_1mod(0x48, dst_reg)); 866 else if (is_ereg(dst_reg)) 867 EMIT1(add_1mod(0x40, dst_reg)); 868 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 869 break; 870 871 case BPF_ALU | BPF_ADD | BPF_K: 872 case BPF_ALU | BPF_SUB | BPF_K: 873 case BPF_ALU | BPF_AND | BPF_K: 874 case BPF_ALU | BPF_OR | BPF_K: 875 case BPF_ALU | BPF_XOR | BPF_K: 876 case BPF_ALU64 | BPF_ADD | BPF_K: 877 case BPF_ALU64 | BPF_SUB | BPF_K: 878 case BPF_ALU64 | BPF_AND | BPF_K: 879 case BPF_ALU64 | BPF_OR | BPF_K: 880 case BPF_ALU64 | BPF_XOR | BPF_K: 881 if (BPF_CLASS(insn->code) == BPF_ALU64) 882 EMIT1(add_1mod(0x48, dst_reg)); 883 else if (is_ereg(dst_reg)) 884 EMIT1(add_1mod(0x40, dst_reg)); 885 886 /* 887 * b3 holds 'normal' opcode, b2 short form only valid 888 * in case dst is eax/rax. 889 */ 890 switch (BPF_OP(insn->code)) { 891 case BPF_ADD: 892 b3 = 0xC0; 893 b2 = 0x05; 894 break; 895 case BPF_SUB: 896 b3 = 0xE8; 897 b2 = 0x2D; 898 break; 899 case BPF_AND: 900 b3 = 0xE0; 901 b2 = 0x25; 902 break; 903 case BPF_OR: 904 b3 = 0xC8; 905 b2 = 0x0D; 906 break; 907 case BPF_XOR: 908 b3 = 0xF0; 909 b2 = 0x35; 910 break; 911 } 912 913 if (is_imm8(imm32)) 914 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 915 else if (is_axreg(dst_reg)) 916 EMIT1_off32(b2, imm32); 917 else 918 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 919 break; 920 921 case BPF_ALU64 | BPF_MOV | BPF_K: 922 case BPF_ALU | BPF_MOV | BPF_K: 923 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 924 dst_reg, imm32); 925 break; 926 927 case BPF_LD | BPF_IMM | BPF_DW: 928 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 929 insn++; 930 i++; 931 break; 932 933 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 934 case BPF_ALU | BPF_MOD | BPF_X: 935 case BPF_ALU | BPF_DIV | BPF_X: 936 case BPF_ALU | BPF_MOD | BPF_K: 937 case BPF_ALU | BPF_DIV | BPF_K: 938 case BPF_ALU64 | BPF_MOD | BPF_X: 939 case BPF_ALU64 | BPF_DIV | BPF_X: 940 case BPF_ALU64 | BPF_MOD | BPF_K: 941 case BPF_ALU64 | BPF_DIV | BPF_K: 942 EMIT1(0x50); /* push rax */ 943 EMIT1(0x52); /* push rdx */ 944 945 if (BPF_SRC(insn->code) == BPF_X) 946 /* mov r11, src_reg */ 947 EMIT_mov(AUX_REG, src_reg); 948 else 949 /* mov r11, imm32 */ 950 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 951 952 /* mov rax, dst_reg */ 953 EMIT_mov(BPF_REG_0, dst_reg); 954 955 /* 956 * xor edx, edx 957 * equivalent to 'xor rdx, rdx', but one byte less 958 */ 959 EMIT2(0x31, 0xd2); 960 961 if (BPF_CLASS(insn->code) == BPF_ALU64) 962 /* div r11 */ 963 EMIT3(0x49, 0xF7, 0xF3); 964 else 965 /* div r11d */ 966 EMIT3(0x41, 0xF7, 0xF3); 967 968 if (BPF_OP(insn->code) == BPF_MOD) 969 /* mov r11, rdx */ 970 EMIT3(0x49, 0x89, 0xD3); 971 else 972 /* mov r11, rax */ 973 EMIT3(0x49, 0x89, 0xC3); 974 975 EMIT1(0x5A); /* pop rdx */ 976 EMIT1(0x58); /* pop rax */ 977 978 /* mov dst_reg, r11 */ 979 EMIT_mov(dst_reg, AUX_REG); 980 break; 981 982 case BPF_ALU | BPF_MUL | BPF_K: 983 case BPF_ALU | BPF_MUL | BPF_X: 984 case BPF_ALU64 | BPF_MUL | BPF_K: 985 case BPF_ALU64 | BPF_MUL | BPF_X: 986 { 987 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 988 989 if (dst_reg != BPF_REG_0) 990 EMIT1(0x50); /* push rax */ 991 if (dst_reg != BPF_REG_3) 992 EMIT1(0x52); /* push rdx */ 993 994 /* mov r11, dst_reg */ 995 EMIT_mov(AUX_REG, dst_reg); 996 997 if (BPF_SRC(insn->code) == BPF_X) 998 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg); 999 else 1000 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32); 1001 1002 if (is64) 1003 EMIT1(add_1mod(0x48, AUX_REG)); 1004 else if (is_ereg(AUX_REG)) 1005 EMIT1(add_1mod(0x40, AUX_REG)); 1006 /* mul(q) r11 */ 1007 EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); 1008 1009 if (dst_reg != BPF_REG_3) 1010 EMIT1(0x5A); /* pop rdx */ 1011 if (dst_reg != BPF_REG_0) { 1012 /* mov dst_reg, rax */ 1013 EMIT_mov(dst_reg, BPF_REG_0); 1014 EMIT1(0x58); /* pop rax */ 1015 } 1016 break; 1017 } 1018 /* Shifts */ 1019 case BPF_ALU | BPF_LSH | BPF_K: 1020 case BPF_ALU | BPF_RSH | BPF_K: 1021 case BPF_ALU | BPF_ARSH | BPF_K: 1022 case BPF_ALU64 | BPF_LSH | BPF_K: 1023 case BPF_ALU64 | BPF_RSH | BPF_K: 1024 case BPF_ALU64 | BPF_ARSH | BPF_K: 1025 if (BPF_CLASS(insn->code) == BPF_ALU64) 1026 EMIT1(add_1mod(0x48, dst_reg)); 1027 else if (is_ereg(dst_reg)) 1028 EMIT1(add_1mod(0x40, dst_reg)); 1029 1030 switch (BPF_OP(insn->code)) { 1031 case BPF_LSH: b3 = 0xE0; break; 1032 case BPF_RSH: b3 = 0xE8; break; 1033 case BPF_ARSH: b3 = 0xF8; break; 1034 } 1035 1036 if (imm32 == 1) 1037 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1038 else 1039 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1040 break; 1041 1042 case BPF_ALU | BPF_LSH | BPF_X: 1043 case BPF_ALU | BPF_RSH | BPF_X: 1044 case BPF_ALU | BPF_ARSH | BPF_X: 1045 case BPF_ALU64 | BPF_LSH | BPF_X: 1046 case BPF_ALU64 | BPF_RSH | BPF_X: 1047 case BPF_ALU64 | BPF_ARSH | BPF_X: 1048 1049 /* Check for bad case when dst_reg == rcx */ 1050 if (dst_reg == BPF_REG_4) { 1051 /* mov r11, dst_reg */ 1052 EMIT_mov(AUX_REG, dst_reg); 1053 dst_reg = AUX_REG; 1054 } 1055 1056 if (src_reg != BPF_REG_4) { /* common case */ 1057 EMIT1(0x51); /* push rcx */ 1058 1059 /* mov rcx, src_reg */ 1060 EMIT_mov(BPF_REG_4, src_reg); 1061 } 1062 1063 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1064 if (BPF_CLASS(insn->code) == BPF_ALU64) 1065 EMIT1(add_1mod(0x48, dst_reg)); 1066 else if (is_ereg(dst_reg)) 1067 EMIT1(add_1mod(0x40, dst_reg)); 1068 1069 switch (BPF_OP(insn->code)) { 1070 case BPF_LSH: b3 = 0xE0; break; 1071 case BPF_RSH: b3 = 0xE8; break; 1072 case BPF_ARSH: b3 = 0xF8; break; 1073 } 1074 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1075 1076 if (src_reg != BPF_REG_4) 1077 EMIT1(0x59); /* pop rcx */ 1078 1079 if (insn->dst_reg == BPF_REG_4) 1080 /* mov dst_reg, r11 */ 1081 EMIT_mov(insn->dst_reg, AUX_REG); 1082 break; 1083 1084 case BPF_ALU | BPF_END | BPF_FROM_BE: 1085 switch (imm32) { 1086 case 16: 1087 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1088 EMIT1(0x66); 1089 if (is_ereg(dst_reg)) 1090 EMIT1(0x41); 1091 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1092 1093 /* Emit 'movzwl eax, ax' */ 1094 if (is_ereg(dst_reg)) 1095 EMIT3(0x45, 0x0F, 0xB7); 1096 else 1097 EMIT2(0x0F, 0xB7); 1098 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1099 break; 1100 case 32: 1101 /* Emit 'bswap eax' to swap lower 4 bytes */ 1102 if (is_ereg(dst_reg)) 1103 EMIT2(0x41, 0x0F); 1104 else 1105 EMIT1(0x0F); 1106 EMIT1(add_1reg(0xC8, dst_reg)); 1107 break; 1108 case 64: 1109 /* Emit 'bswap rax' to swap 8 bytes */ 1110 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1111 add_1reg(0xC8, dst_reg)); 1112 break; 1113 } 1114 break; 1115 1116 case BPF_ALU | BPF_END | BPF_FROM_LE: 1117 switch (imm32) { 1118 case 16: 1119 /* 1120 * Emit 'movzwl eax, ax' to zero extend 16-bit 1121 * into 64 bit 1122 */ 1123 if (is_ereg(dst_reg)) 1124 EMIT3(0x45, 0x0F, 0xB7); 1125 else 1126 EMIT2(0x0F, 0xB7); 1127 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1128 break; 1129 case 32: 1130 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1131 if (is_ereg(dst_reg)) 1132 EMIT1(0x45); 1133 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1134 break; 1135 case 64: 1136 /* nop */ 1137 break; 1138 } 1139 break; 1140 1141 /* ST: *(u8*)(dst_reg + off) = imm */ 1142 case BPF_ST | BPF_MEM | BPF_B: 1143 if (is_ereg(dst_reg)) 1144 EMIT2(0x41, 0xC6); 1145 else 1146 EMIT1(0xC6); 1147 goto st; 1148 case BPF_ST | BPF_MEM | BPF_H: 1149 if (is_ereg(dst_reg)) 1150 EMIT3(0x66, 0x41, 0xC7); 1151 else 1152 EMIT2(0x66, 0xC7); 1153 goto st; 1154 case BPF_ST | BPF_MEM | BPF_W: 1155 if (is_ereg(dst_reg)) 1156 EMIT2(0x41, 0xC7); 1157 else 1158 EMIT1(0xC7); 1159 goto st; 1160 case BPF_ST | BPF_MEM | BPF_DW: 1161 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1162 1163 st: if (is_imm8(insn->off)) 1164 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1165 else 1166 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1167 1168 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1169 break; 1170 1171 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1172 case BPF_STX | BPF_MEM | BPF_B: 1173 case BPF_STX | BPF_MEM | BPF_H: 1174 case BPF_STX | BPF_MEM | BPF_W: 1175 case BPF_STX | BPF_MEM | BPF_DW: 1176 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1177 break; 1178 1179 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1180 case BPF_LDX | BPF_MEM | BPF_B: 1181 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1182 case BPF_LDX | BPF_MEM | BPF_H: 1183 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1184 case BPF_LDX | BPF_MEM | BPF_W: 1185 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1186 case BPF_LDX | BPF_MEM | BPF_DW: 1187 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1188 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1189 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1190 struct exception_table_entry *ex; 1191 u8 *_insn = image + proglen; 1192 s64 delta; 1193 1194 if (!bpf_prog->aux->extable) 1195 break; 1196 1197 if (excnt >= bpf_prog->aux->num_exentries) { 1198 pr_err("ex gen bug\n"); 1199 return -EFAULT; 1200 } 1201 ex = &bpf_prog->aux->extable[excnt++]; 1202 1203 delta = _insn - (u8 *)&ex->insn; 1204 if (!is_simm32(delta)) { 1205 pr_err("extable->insn doesn't fit into 32-bit\n"); 1206 return -EFAULT; 1207 } 1208 ex->insn = delta; 1209 1210 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; 1211 if (!is_simm32(delta)) { 1212 pr_err("extable->handler doesn't fit into 32-bit\n"); 1213 return -EFAULT; 1214 } 1215 ex->handler = delta; 1216 1217 if (dst_reg > BPF_REG_9) { 1218 pr_err("verifier error\n"); 1219 return -EFAULT; 1220 } 1221 /* 1222 * Compute size of x86 insn and its target dest x86 register. 1223 * ex_handler_bpf() will use lower 8 bits to adjust 1224 * pt_regs->ip to jump over this x86 instruction 1225 * and upper bits to figure out which pt_regs to zero out. 1226 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1227 * of 4 bytes will be ignored and rbx will be zero inited. 1228 */ 1229 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8); 1230 } 1231 break; 1232 1233 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ 1234 case BPF_STX | BPF_XADD | BPF_W: 1235 /* Emit 'lock add dword ptr [rax + off], eax' */ 1236 if (is_ereg(dst_reg) || is_ereg(src_reg)) 1237 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); 1238 else 1239 EMIT2(0xF0, 0x01); 1240 goto xadd; 1241 case BPF_STX | BPF_XADD | BPF_DW: 1242 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); 1243 xadd: if (is_imm8(insn->off)) 1244 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); 1245 else 1246 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), 1247 insn->off); 1248 break; 1249 1250 /* call */ 1251 case BPF_JMP | BPF_CALL: 1252 func = (u8 *) __bpf_call_base + imm32; 1253 if (tail_call_reachable) { 1254 EMIT3_off32(0x48, 0x8B, 0x85, 1255 -(bpf_prog->aux->stack_depth + 8)); 1256 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1257 return -EINVAL; 1258 } else { 1259 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1260 return -EINVAL; 1261 } 1262 break; 1263 1264 case BPF_JMP | BPF_TAIL_CALL: 1265 if (imm32) 1266 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1267 &prog, addrs[i], image, 1268 callee_regs_used, 1269 bpf_prog->aux->stack_depth); 1270 else 1271 emit_bpf_tail_call_indirect(&prog, 1272 callee_regs_used, 1273 bpf_prog->aux->stack_depth); 1274 break; 1275 1276 /* cond jump */ 1277 case BPF_JMP | BPF_JEQ | BPF_X: 1278 case BPF_JMP | BPF_JNE | BPF_X: 1279 case BPF_JMP | BPF_JGT | BPF_X: 1280 case BPF_JMP | BPF_JLT | BPF_X: 1281 case BPF_JMP | BPF_JGE | BPF_X: 1282 case BPF_JMP | BPF_JLE | BPF_X: 1283 case BPF_JMP | BPF_JSGT | BPF_X: 1284 case BPF_JMP | BPF_JSLT | BPF_X: 1285 case BPF_JMP | BPF_JSGE | BPF_X: 1286 case BPF_JMP | BPF_JSLE | BPF_X: 1287 case BPF_JMP32 | BPF_JEQ | BPF_X: 1288 case BPF_JMP32 | BPF_JNE | BPF_X: 1289 case BPF_JMP32 | BPF_JGT | BPF_X: 1290 case BPF_JMP32 | BPF_JLT | BPF_X: 1291 case BPF_JMP32 | BPF_JGE | BPF_X: 1292 case BPF_JMP32 | BPF_JLE | BPF_X: 1293 case BPF_JMP32 | BPF_JSGT | BPF_X: 1294 case BPF_JMP32 | BPF_JSLT | BPF_X: 1295 case BPF_JMP32 | BPF_JSGE | BPF_X: 1296 case BPF_JMP32 | BPF_JSLE | BPF_X: 1297 /* cmp dst_reg, src_reg */ 1298 if (BPF_CLASS(insn->code) == BPF_JMP) 1299 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 1300 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 1301 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 1302 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1303 goto emit_cond_jmp; 1304 1305 case BPF_JMP | BPF_JSET | BPF_X: 1306 case BPF_JMP32 | BPF_JSET | BPF_X: 1307 /* test dst_reg, src_reg */ 1308 if (BPF_CLASS(insn->code) == BPF_JMP) 1309 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 1310 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 1311 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 1312 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1313 goto emit_cond_jmp; 1314 1315 case BPF_JMP | BPF_JSET | BPF_K: 1316 case BPF_JMP32 | BPF_JSET | BPF_K: 1317 /* test dst_reg, imm32 */ 1318 if (BPF_CLASS(insn->code) == BPF_JMP) 1319 EMIT1(add_1mod(0x48, dst_reg)); 1320 else if (is_ereg(dst_reg)) 1321 EMIT1(add_1mod(0x40, dst_reg)); 1322 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1323 goto emit_cond_jmp; 1324 1325 case BPF_JMP | BPF_JEQ | BPF_K: 1326 case BPF_JMP | BPF_JNE | BPF_K: 1327 case BPF_JMP | BPF_JGT | BPF_K: 1328 case BPF_JMP | BPF_JLT | BPF_K: 1329 case BPF_JMP | BPF_JGE | BPF_K: 1330 case BPF_JMP | BPF_JLE | BPF_K: 1331 case BPF_JMP | BPF_JSGT | BPF_K: 1332 case BPF_JMP | BPF_JSLT | BPF_K: 1333 case BPF_JMP | BPF_JSGE | BPF_K: 1334 case BPF_JMP | BPF_JSLE | BPF_K: 1335 case BPF_JMP32 | BPF_JEQ | BPF_K: 1336 case BPF_JMP32 | BPF_JNE | BPF_K: 1337 case BPF_JMP32 | BPF_JGT | BPF_K: 1338 case BPF_JMP32 | BPF_JLT | BPF_K: 1339 case BPF_JMP32 | BPF_JGE | BPF_K: 1340 case BPF_JMP32 | BPF_JLE | BPF_K: 1341 case BPF_JMP32 | BPF_JSGT | BPF_K: 1342 case BPF_JMP32 | BPF_JSLT | BPF_K: 1343 case BPF_JMP32 | BPF_JSGE | BPF_K: 1344 case BPF_JMP32 | BPF_JSLE | BPF_K: 1345 /* test dst_reg, dst_reg to save one extra byte */ 1346 if (imm32 == 0) { 1347 if (BPF_CLASS(insn->code) == BPF_JMP) 1348 EMIT1(add_2mod(0x48, dst_reg, dst_reg)); 1349 else if (is_ereg(dst_reg)) 1350 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 1351 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1352 goto emit_cond_jmp; 1353 } 1354 1355 /* cmp dst_reg, imm8/32 */ 1356 if (BPF_CLASS(insn->code) == BPF_JMP) 1357 EMIT1(add_1mod(0x48, dst_reg)); 1358 else if (is_ereg(dst_reg)) 1359 EMIT1(add_1mod(0x40, dst_reg)); 1360 1361 if (is_imm8(imm32)) 1362 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1363 else 1364 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1365 1366 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1367 switch (BPF_OP(insn->code)) { 1368 case BPF_JEQ: 1369 jmp_cond = X86_JE; 1370 break; 1371 case BPF_JSET: 1372 case BPF_JNE: 1373 jmp_cond = X86_JNE; 1374 break; 1375 case BPF_JGT: 1376 /* GT is unsigned '>', JA in x86 */ 1377 jmp_cond = X86_JA; 1378 break; 1379 case BPF_JLT: 1380 /* LT is unsigned '<', JB in x86 */ 1381 jmp_cond = X86_JB; 1382 break; 1383 case BPF_JGE: 1384 /* GE is unsigned '>=', JAE in x86 */ 1385 jmp_cond = X86_JAE; 1386 break; 1387 case BPF_JLE: 1388 /* LE is unsigned '<=', JBE in x86 */ 1389 jmp_cond = X86_JBE; 1390 break; 1391 case BPF_JSGT: 1392 /* Signed '>', GT in x86 */ 1393 jmp_cond = X86_JG; 1394 break; 1395 case BPF_JSLT: 1396 /* Signed '<', LT in x86 */ 1397 jmp_cond = X86_JL; 1398 break; 1399 case BPF_JSGE: 1400 /* Signed '>=', GE in x86 */ 1401 jmp_cond = X86_JGE; 1402 break; 1403 case BPF_JSLE: 1404 /* Signed '<=', LE in x86 */ 1405 jmp_cond = X86_JLE; 1406 break; 1407 default: /* to silence GCC warning */ 1408 return -EFAULT; 1409 } 1410 jmp_offset = addrs[i + insn->off] - addrs[i]; 1411 if (is_imm8(jmp_offset)) { 1412 EMIT2(jmp_cond, jmp_offset); 1413 } else if (is_simm32(jmp_offset)) { 1414 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1415 } else { 1416 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1417 return -EFAULT; 1418 } 1419 1420 break; 1421 1422 case BPF_JMP | BPF_JA: 1423 if (insn->off == -1) 1424 /* -1 jmp instructions will always jump 1425 * backwards two bytes. Explicitly handling 1426 * this case avoids wasting too many passes 1427 * when there are long sequences of replaced 1428 * dead code. 1429 */ 1430 jmp_offset = -2; 1431 else 1432 jmp_offset = addrs[i + insn->off] - addrs[i]; 1433 1434 if (!jmp_offset) 1435 /* Optimize out nop jumps */ 1436 break; 1437 emit_jmp: 1438 if (is_imm8(jmp_offset)) { 1439 EMIT2(0xEB, jmp_offset); 1440 } else if (is_simm32(jmp_offset)) { 1441 EMIT1_off32(0xE9, jmp_offset); 1442 } else { 1443 pr_err("jmp gen bug %llx\n", jmp_offset); 1444 return -EFAULT; 1445 } 1446 break; 1447 1448 case BPF_JMP | BPF_EXIT: 1449 if (seen_exit) { 1450 jmp_offset = ctx->cleanup_addr - addrs[i]; 1451 goto emit_jmp; 1452 } 1453 seen_exit = true; 1454 /* Update cleanup_addr */ 1455 ctx->cleanup_addr = proglen; 1456 pop_callee_regs(&prog, callee_regs_used); 1457 EMIT1(0xC9); /* leave */ 1458 EMIT1(0xC3); /* ret */ 1459 break; 1460 1461 default: 1462 /* 1463 * By design x86-64 JIT should support all BPF instructions. 1464 * This error will be seen if new instruction was added 1465 * to the interpreter, but not to the JIT, or if there is 1466 * junk in bpf_prog. 1467 */ 1468 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1469 return -EINVAL; 1470 } 1471 1472 ilen = prog - temp; 1473 if (ilen > BPF_MAX_INSN_SIZE) { 1474 pr_err("bpf_jit: fatal insn size error\n"); 1475 return -EFAULT; 1476 } 1477 1478 if (image) { 1479 if (unlikely(proglen + ilen > oldproglen)) { 1480 pr_err("bpf_jit: fatal error\n"); 1481 return -EFAULT; 1482 } 1483 memcpy(image + proglen, temp, ilen); 1484 } 1485 proglen += ilen; 1486 addrs[i] = proglen; 1487 prog = temp; 1488 } 1489 1490 if (image && excnt != bpf_prog->aux->num_exentries) { 1491 pr_err("extable is not populated\n"); 1492 return -EFAULT; 1493 } 1494 return proglen; 1495 } 1496 1497 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1498 int stack_size) 1499 { 1500 int i; 1501 /* Store function arguments to stack. 1502 * For a function that accepts two pointers the sequence will be: 1503 * mov QWORD PTR [rbp-0x10],rdi 1504 * mov QWORD PTR [rbp-0x8],rsi 1505 */ 1506 for (i = 0; i < min(nr_args, 6); i++) 1507 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1508 BPF_REG_FP, 1509 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1510 -(stack_size - i * 8)); 1511 } 1512 1513 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1514 int stack_size) 1515 { 1516 int i; 1517 1518 /* Restore function arguments from stack. 1519 * For a function that accepts two pointers the sequence will be: 1520 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1521 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1522 */ 1523 for (i = 0; i < min(nr_args, 6); i++) 1524 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1525 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1526 BPF_REG_FP, 1527 -(stack_size - i * 8)); 1528 } 1529 1530 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1531 struct bpf_prog *p, int stack_size, bool mod_ret) 1532 { 1533 u8 *prog = *pprog; 1534 int cnt = 0; 1535 1536 if (p->aux->sleepable) { 1537 if (emit_call(&prog, __bpf_prog_enter_sleepable, prog)) 1538 return -EINVAL; 1539 } else { 1540 if (emit_call(&prog, __bpf_prog_enter, prog)) 1541 return -EINVAL; 1542 /* remember prog start time returned by __bpf_prog_enter */ 1543 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1544 } 1545 1546 /* arg1: lea rdi, [rbp - stack_size] */ 1547 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1548 /* arg2: progs[i]->insnsi for interpreter */ 1549 if (!p->jited) 1550 emit_mov_imm64(&prog, BPF_REG_2, 1551 (long) p->insnsi >> 32, 1552 (u32) (long) p->insnsi); 1553 /* call JITed bpf program or interpreter */ 1554 if (emit_call(&prog, p->bpf_func, prog)) 1555 return -EINVAL; 1556 1557 /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1558 * of the previous call which is then passed on the stack to 1559 * the next BPF program. 1560 */ 1561 if (mod_ret) 1562 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1563 1564 if (p->aux->sleepable) { 1565 if (emit_call(&prog, __bpf_prog_exit_sleepable, prog)) 1566 return -EINVAL; 1567 } else { 1568 /* arg1: mov rdi, progs[i] */ 1569 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, 1570 (u32) (long) p); 1571 /* arg2: mov rsi, rbx <- start time in nsec */ 1572 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1573 if (emit_call(&prog, __bpf_prog_exit, prog)) 1574 return -EINVAL; 1575 } 1576 1577 *pprog = prog; 1578 return 0; 1579 } 1580 1581 static void emit_nops(u8 **pprog, unsigned int len) 1582 { 1583 unsigned int i, noplen; 1584 u8 *prog = *pprog; 1585 int cnt = 0; 1586 1587 while (len > 0) { 1588 noplen = len; 1589 1590 if (noplen > ASM_NOP_MAX) 1591 noplen = ASM_NOP_MAX; 1592 1593 for (i = 0; i < noplen; i++) 1594 EMIT1(ideal_nops[noplen][i]); 1595 len -= noplen; 1596 } 1597 1598 *pprog = prog; 1599 } 1600 1601 static void emit_align(u8 **pprog, u32 align) 1602 { 1603 u8 *target, *prog = *pprog; 1604 1605 target = PTR_ALIGN(prog, align); 1606 if (target != prog) 1607 emit_nops(&prog, target - prog); 1608 1609 *pprog = prog; 1610 } 1611 1612 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1613 { 1614 u8 *prog = *pprog; 1615 int cnt = 0; 1616 s64 offset; 1617 1618 offset = func - (ip + 2 + 4); 1619 if (!is_simm32(offset)) { 1620 pr_err("Target %p is out of range\n", func); 1621 return -EINVAL; 1622 } 1623 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 1624 *pprog = prog; 1625 return 0; 1626 } 1627 1628 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1629 struct bpf_tramp_progs *tp, int stack_size) 1630 { 1631 int i; 1632 u8 *prog = *pprog; 1633 1634 for (i = 0; i < tp->nr_progs; i++) { 1635 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) 1636 return -EINVAL; 1637 } 1638 *pprog = prog; 1639 return 0; 1640 } 1641 1642 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1643 struct bpf_tramp_progs *tp, int stack_size, 1644 u8 **branches) 1645 { 1646 u8 *prog = *pprog; 1647 int i, cnt = 0; 1648 1649 /* The first fmod_ret program will receive a garbage return value. 1650 * Set this to 0 to avoid confusing the program. 1651 */ 1652 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1653 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1654 for (i = 0; i < tp->nr_progs; i++) { 1655 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) 1656 return -EINVAL; 1657 1658 /* mod_ret prog stored return value into [rbp - 8]. Emit: 1659 * if (*(u64 *)(rbp - 8) != 0) 1660 * goto do_fexit; 1661 */ 1662 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 1663 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1664 1665 /* Save the location of the branch and Generate 6 nops 1666 * (4 bytes for an offset and 2 bytes for the jump) These nops 1667 * are replaced with a conditional jump once do_fexit (i.e. the 1668 * start of the fexit invocation) is finalized. 1669 */ 1670 branches[i] = prog; 1671 emit_nops(&prog, 4 + 2); 1672 } 1673 1674 *pprog = prog; 1675 return 0; 1676 } 1677 1678 /* Example: 1679 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1680 * its 'struct btf_func_model' will be nr_args=2 1681 * The assembly code when eth_type_trans is executing after trampoline: 1682 * 1683 * push rbp 1684 * mov rbp, rsp 1685 * sub rsp, 16 // space for skb and dev 1686 * push rbx // temp regs to pass start time 1687 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1688 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1689 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1690 * mov rbx, rax // remember start time in bpf stats are enabled 1691 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1692 * call addr_of_jited_FENTRY_prog 1693 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1694 * mov rsi, rbx // prog start time 1695 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1696 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1697 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1698 * pop rbx 1699 * leave 1700 * ret 1701 * 1702 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1703 * replaced with 'call generated_bpf_trampoline'. When it returns 1704 * eth_type_trans will continue executing with original skb and dev pointers. 1705 * 1706 * The assembly code when eth_type_trans is called from trampoline: 1707 * 1708 * push rbp 1709 * mov rbp, rsp 1710 * sub rsp, 24 // space for skb, dev, return value 1711 * push rbx // temp regs to pass start time 1712 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1713 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1714 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1715 * mov rbx, rax // remember start time if bpf stats are enabled 1716 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1717 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1718 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1719 * mov rsi, rbx // prog start time 1720 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1721 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1722 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1723 * call eth_type_trans+5 // execute body of eth_type_trans 1724 * mov qword ptr [rbp - 8], rax // save return value 1725 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1726 * mov rbx, rax // remember start time in bpf stats are enabled 1727 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1728 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1729 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1730 * mov rsi, rbx // prog start time 1731 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1732 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1733 * pop rbx 1734 * leave 1735 * add rsp, 8 // skip eth_type_trans's frame 1736 * ret // return to its caller 1737 */ 1738 int arch_prepare_bpf_trampoline(void *image, void *image_end, 1739 const struct btf_func_model *m, u32 flags, 1740 struct bpf_tramp_progs *tprogs, 1741 void *orig_call) 1742 { 1743 int ret, i, cnt = 0, nr_args = m->nr_args; 1744 int stack_size = nr_args * 8; 1745 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; 1746 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; 1747 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1748 u8 **branches = NULL; 1749 u8 *prog; 1750 1751 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1752 if (nr_args > 6) 1753 return -ENOTSUPP; 1754 1755 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1756 (flags & BPF_TRAMP_F_SKIP_FRAME)) 1757 return -EINVAL; 1758 1759 if (flags & BPF_TRAMP_F_CALL_ORIG) 1760 stack_size += 8; /* room for return value of orig_call */ 1761 1762 if (flags & BPF_TRAMP_F_SKIP_FRAME) 1763 /* skip patched call instruction and point orig_call to actual 1764 * body of the kernel function. 1765 */ 1766 orig_call += X86_PATCH_SIZE; 1767 1768 prog = image; 1769 1770 EMIT1(0x55); /* push rbp */ 1771 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 1772 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 1773 EMIT1(0x53); /* push rbx */ 1774 1775 save_regs(m, &prog, nr_args, stack_size); 1776 1777 if (fentry->nr_progs) 1778 if (invoke_bpf(m, &prog, fentry, stack_size)) 1779 return -EINVAL; 1780 1781 if (fmod_ret->nr_progs) { 1782 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), 1783 GFP_KERNEL); 1784 if (!branches) 1785 return -ENOMEM; 1786 1787 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, 1788 branches)) { 1789 ret = -EINVAL; 1790 goto cleanup; 1791 } 1792 } 1793 1794 if (flags & BPF_TRAMP_F_CALL_ORIG) { 1795 if (fentry->nr_progs || fmod_ret->nr_progs) 1796 restore_regs(m, &prog, nr_args, stack_size); 1797 1798 /* call original function */ 1799 if (emit_call(&prog, orig_call, prog)) { 1800 ret = -EINVAL; 1801 goto cleanup; 1802 } 1803 /* remember return value in a stack for bpf prog to access */ 1804 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1805 } 1806 1807 if (fmod_ret->nr_progs) { 1808 /* From Intel 64 and IA-32 Architectures Optimization 1809 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 1810 * Coding Rule 11: All branch targets should be 16-byte 1811 * aligned. 1812 */ 1813 emit_align(&prog, 16); 1814 /* Update the branches saved in invoke_bpf_mod_ret with the 1815 * aligned address of do_fexit. 1816 */ 1817 for (i = 0; i < fmod_ret->nr_progs; i++) 1818 emit_cond_near_jump(&branches[i], prog, branches[i], 1819 X86_JNE); 1820 } 1821 1822 if (fexit->nr_progs) 1823 if (invoke_bpf(m, &prog, fexit, stack_size)) { 1824 ret = -EINVAL; 1825 goto cleanup; 1826 } 1827 1828 if (flags & BPF_TRAMP_F_RESTORE_REGS) 1829 restore_regs(m, &prog, nr_args, stack_size); 1830 1831 /* This needs to be done regardless. If there were fmod_ret programs, 1832 * the return value is only updated on the stack and still needs to be 1833 * restored to R0. 1834 */ 1835 if (flags & BPF_TRAMP_F_CALL_ORIG) 1836 /* restore original return value back into RAX */ 1837 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 1838 1839 EMIT1(0x5B); /* pop rbx */ 1840 EMIT1(0xC9); /* leave */ 1841 if (flags & BPF_TRAMP_F_SKIP_FRAME) 1842 /* skip our return address and return to parent */ 1843 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 1844 EMIT1(0xC3); /* ret */ 1845 /* Make sure the trampoline generation logic doesn't overflow */ 1846 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 1847 ret = -EFAULT; 1848 goto cleanup; 1849 } 1850 ret = prog - (u8 *)image; 1851 1852 cleanup: 1853 kfree(branches); 1854 return ret; 1855 } 1856 1857 static int emit_fallback_jump(u8 **pprog) 1858 { 1859 u8 *prog = *pprog; 1860 int err = 0; 1861 1862 #ifdef CONFIG_RETPOLINE 1863 /* Note that this assumes the the compiler uses external 1864 * thunks for indirect calls. Both clang and GCC use the same 1865 * naming convention for external thunks. 1866 */ 1867 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); 1868 #else 1869 int cnt = 0; 1870 1871 EMIT2(0xFF, 0xE2); /* jmp rdx */ 1872 #endif 1873 *pprog = prog; 1874 return err; 1875 } 1876 1877 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 1878 { 1879 u8 *jg_reloc, *prog = *pprog; 1880 int pivot, err, jg_bytes = 1, cnt = 0; 1881 s64 jg_offset; 1882 1883 if (a == b) { 1884 /* Leaf node of recursion, i.e. not a range of indices 1885 * anymore. 1886 */ 1887 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 1888 if (!is_simm32(progs[a])) 1889 return -1; 1890 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 1891 progs[a]); 1892 err = emit_cond_near_jump(&prog, /* je func */ 1893 (void *)progs[a], prog, 1894 X86_JE); 1895 if (err) 1896 return err; 1897 1898 err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ 1899 if (err) 1900 return err; 1901 1902 *pprog = prog; 1903 return 0; 1904 } 1905 1906 /* Not a leaf node, so we pivot, and recursively descend into 1907 * the lower and upper ranges. 1908 */ 1909 pivot = (b - a) / 2; 1910 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 1911 if (!is_simm32(progs[a + pivot])) 1912 return -1; 1913 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 1914 1915 if (pivot > 2) { /* jg upper_part */ 1916 /* Require near jump. */ 1917 jg_bytes = 4; 1918 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 1919 } else { 1920 EMIT2(X86_JG, 0); 1921 } 1922 jg_reloc = prog; 1923 1924 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 1925 progs); 1926 if (err) 1927 return err; 1928 1929 /* From Intel 64 and IA-32 Architectures Optimization 1930 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 1931 * Coding Rule 11: All branch targets should be 16-byte 1932 * aligned. 1933 */ 1934 emit_align(&prog, 16); 1935 jg_offset = prog - jg_reloc; 1936 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 1937 1938 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 1939 b, progs); 1940 if (err) 1941 return err; 1942 1943 *pprog = prog; 1944 return 0; 1945 } 1946 1947 static int cmp_ips(const void *a, const void *b) 1948 { 1949 const s64 *ipa = a; 1950 const s64 *ipb = b; 1951 1952 if (*ipa > *ipb) 1953 return 1; 1954 if (*ipa < *ipb) 1955 return -1; 1956 return 0; 1957 } 1958 1959 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 1960 { 1961 u8 *prog = image; 1962 1963 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 1964 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 1965 } 1966 1967 struct x64_jit_data { 1968 struct bpf_binary_header *header; 1969 int *addrs; 1970 u8 *image; 1971 int proglen; 1972 struct jit_context ctx; 1973 }; 1974 1975 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1976 { 1977 struct bpf_binary_header *header = NULL; 1978 struct bpf_prog *tmp, *orig_prog = prog; 1979 struct x64_jit_data *jit_data; 1980 int proglen, oldproglen = 0; 1981 struct jit_context ctx = {}; 1982 bool tmp_blinded = false; 1983 bool extra_pass = false; 1984 u8 *image = NULL; 1985 int *addrs; 1986 int pass; 1987 int i; 1988 1989 if (!prog->jit_requested) 1990 return orig_prog; 1991 1992 tmp = bpf_jit_blind_constants(prog); 1993 /* 1994 * If blinding was requested and we failed during blinding, 1995 * we must fall back to the interpreter. 1996 */ 1997 if (IS_ERR(tmp)) 1998 return orig_prog; 1999 if (tmp != prog) { 2000 tmp_blinded = true; 2001 prog = tmp; 2002 } 2003 2004 jit_data = prog->aux->jit_data; 2005 if (!jit_data) { 2006 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2007 if (!jit_data) { 2008 prog = orig_prog; 2009 goto out; 2010 } 2011 prog->aux->jit_data = jit_data; 2012 } 2013 addrs = jit_data->addrs; 2014 if (addrs) { 2015 ctx = jit_data->ctx; 2016 oldproglen = jit_data->proglen; 2017 image = jit_data->image; 2018 header = jit_data->header; 2019 extra_pass = true; 2020 goto skip_init_addrs; 2021 } 2022 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2023 if (!addrs) { 2024 prog = orig_prog; 2025 goto out_addrs; 2026 } 2027 2028 /* 2029 * Before first pass, make a rough estimation of addrs[] 2030 * each BPF instruction is translated to less than 64 bytes 2031 */ 2032 for (proglen = 0, i = 0; i <= prog->len; i++) { 2033 proglen += 64; 2034 addrs[i] = proglen; 2035 } 2036 ctx.cleanup_addr = proglen; 2037 skip_init_addrs: 2038 2039 /* 2040 * JITed image shrinks with every pass and the loop iterates 2041 * until the image stops shrinking. Very large BPF programs 2042 * may converge on the last pass. In such case do one more 2043 * pass to emit the final image. 2044 */ 2045 for (pass = 0; pass < 20 || image; pass++) { 2046 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 2047 if (proglen <= 0) { 2048 out_image: 2049 image = NULL; 2050 if (header) 2051 bpf_jit_binary_free(header); 2052 prog = orig_prog; 2053 goto out_addrs; 2054 } 2055 if (image) { 2056 if (proglen != oldproglen) { 2057 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2058 proglen, oldproglen); 2059 goto out_image; 2060 } 2061 break; 2062 } 2063 if (proglen == oldproglen) { 2064 /* 2065 * The number of entries in extable is the number of BPF_LDX 2066 * insns that access kernel memory via "pointer to BTF type". 2067 * The verifier changed their opcode from LDX|MEM|size 2068 * to LDX|PROBE_MEM|size to make JITing easier. 2069 */ 2070 u32 align = __alignof__(struct exception_table_entry); 2071 u32 extable_size = prog->aux->num_exentries * 2072 sizeof(struct exception_table_entry); 2073 2074 /* allocate module memory for x86 insns and extable */ 2075 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, 2076 &image, align, jit_fill_hole); 2077 if (!header) { 2078 prog = orig_prog; 2079 goto out_addrs; 2080 } 2081 prog->aux->extable = (void *) image + roundup(proglen, align); 2082 } 2083 oldproglen = proglen; 2084 cond_resched(); 2085 } 2086 2087 if (bpf_jit_enable > 1) 2088 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2089 2090 if (image) { 2091 if (!prog->is_func || extra_pass) { 2092 bpf_tail_call_direct_fixup(prog); 2093 bpf_jit_binary_lock_ro(header); 2094 } else { 2095 jit_data->addrs = addrs; 2096 jit_data->ctx = ctx; 2097 jit_data->proglen = proglen; 2098 jit_data->image = image; 2099 jit_data->header = header; 2100 } 2101 prog->bpf_func = (void *)image; 2102 prog->jited = 1; 2103 prog->jited_len = proglen; 2104 } else { 2105 prog = orig_prog; 2106 } 2107 2108 if (!image || !prog->is_func || extra_pass) { 2109 if (image) 2110 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2111 out_addrs: 2112 kfree(addrs); 2113 kfree(jit_data); 2114 prog->aux->jit_data = NULL; 2115 } 2116 out: 2117 if (tmp_blinded) 2118 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2119 tmp : orig_prog); 2120 return prog; 2121 } 2122