1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <linux/init.h> 15 #include <asm/extable.h> 16 #include <asm/set_memory.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/text-patching.h> 19 20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 21 { 22 if (len == 1) 23 *ptr = bytes; 24 else if (len == 2) 25 *(u16 *)ptr = bytes; 26 else { 27 *(u32 *)ptr = bytes; 28 barrier(); 29 } 30 return ptr + len; 31 } 32 33 #define EMIT(bytes, len) \ 34 do { prog = emit_code(prog, bytes, len); } while (0) 35 36 #define EMIT1(b1) EMIT(b1, 1) 37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 40 41 #define EMIT1_off32(b1, off) \ 42 do { EMIT1(b1); EMIT(off, 4); } while (0) 43 #define EMIT2_off32(b1, b2, off) \ 44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 45 #define EMIT3_off32(b1, b2, b3, off) \ 46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 47 #define EMIT4_off32(b1, b2, b3, b4, off) \ 48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 49 50 #ifdef CONFIG_X86_KERNEL_IBT 51 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 52 #else 53 #define EMIT_ENDBR() 54 #endif 55 56 static bool is_imm8(int value) 57 { 58 return value <= 127 && value >= -128; 59 } 60 61 static bool is_simm32(s64 value) 62 { 63 return value == (s64)(s32)value; 64 } 65 66 static bool is_uimm32(u64 value) 67 { 68 return value == (u64)(u32)value; 69 } 70 71 /* mov dst, src */ 72 #define EMIT_mov(DST, SRC) \ 73 do { \ 74 if (DST != SRC) \ 75 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 76 } while (0) 77 78 static int bpf_size_to_x86_bytes(int bpf_size) 79 { 80 if (bpf_size == BPF_W) 81 return 4; 82 else if (bpf_size == BPF_H) 83 return 2; 84 else if (bpf_size == BPF_B) 85 return 1; 86 else if (bpf_size == BPF_DW) 87 return 4; /* imm32 */ 88 else 89 return 0; 90 } 91 92 /* 93 * List of x86 cond jumps opcodes (. + s8) 94 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 95 */ 96 #define X86_JB 0x72 97 #define X86_JAE 0x73 98 #define X86_JE 0x74 99 #define X86_JNE 0x75 100 #define X86_JBE 0x76 101 #define X86_JA 0x77 102 #define X86_JL 0x7C 103 #define X86_JGE 0x7D 104 #define X86_JLE 0x7E 105 #define X86_JG 0x7F 106 107 /* Pick a register outside of BPF range for JIT internal work */ 108 #define AUX_REG (MAX_BPF_JIT_REG + 1) 109 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 110 111 /* 112 * The following table maps BPF registers to x86-64 registers. 113 * 114 * x86-64 register R12 is unused, since if used as base address 115 * register in load/store instructions, it always needs an 116 * extra byte of encoding and is callee saved. 117 * 118 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 119 * trampoline. x86-64 register R10 is used for blinding (if enabled). 120 */ 121 static const int reg2hex[] = { 122 [BPF_REG_0] = 0, /* RAX */ 123 [BPF_REG_1] = 7, /* RDI */ 124 [BPF_REG_2] = 6, /* RSI */ 125 [BPF_REG_3] = 2, /* RDX */ 126 [BPF_REG_4] = 1, /* RCX */ 127 [BPF_REG_5] = 0, /* R8 */ 128 [BPF_REG_6] = 3, /* RBX callee saved */ 129 [BPF_REG_7] = 5, /* R13 callee saved */ 130 [BPF_REG_8] = 6, /* R14 callee saved */ 131 [BPF_REG_9] = 7, /* R15 callee saved */ 132 [BPF_REG_FP] = 5, /* RBP readonly */ 133 [BPF_REG_AX] = 2, /* R10 temp register */ 134 [AUX_REG] = 3, /* R11 temp register */ 135 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 136 }; 137 138 static const int reg2pt_regs[] = { 139 [BPF_REG_0] = offsetof(struct pt_regs, ax), 140 [BPF_REG_1] = offsetof(struct pt_regs, di), 141 [BPF_REG_2] = offsetof(struct pt_regs, si), 142 [BPF_REG_3] = offsetof(struct pt_regs, dx), 143 [BPF_REG_4] = offsetof(struct pt_regs, cx), 144 [BPF_REG_5] = offsetof(struct pt_regs, r8), 145 [BPF_REG_6] = offsetof(struct pt_regs, bx), 146 [BPF_REG_7] = offsetof(struct pt_regs, r13), 147 [BPF_REG_8] = offsetof(struct pt_regs, r14), 148 [BPF_REG_9] = offsetof(struct pt_regs, r15), 149 }; 150 151 /* 152 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 153 * which need extra byte of encoding. 154 * rax,rcx,...,rbp have simpler encoding 155 */ 156 static bool is_ereg(u32 reg) 157 { 158 return (1 << reg) & (BIT(BPF_REG_5) | 159 BIT(AUX_REG) | 160 BIT(BPF_REG_7) | 161 BIT(BPF_REG_8) | 162 BIT(BPF_REG_9) | 163 BIT(X86_REG_R9) | 164 BIT(BPF_REG_AX)); 165 } 166 167 /* 168 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 169 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 170 * of encoding. al,cl,dl,bl have simpler encoding. 171 */ 172 static bool is_ereg_8l(u32 reg) 173 { 174 return is_ereg(reg) || 175 (1 << reg) & (BIT(BPF_REG_1) | 176 BIT(BPF_REG_2) | 177 BIT(BPF_REG_FP)); 178 } 179 180 static bool is_axreg(u32 reg) 181 { 182 return reg == BPF_REG_0; 183 } 184 185 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 186 static u8 add_1mod(u8 byte, u32 reg) 187 { 188 if (is_ereg(reg)) 189 byte |= 1; 190 return byte; 191 } 192 193 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 194 { 195 if (is_ereg(r1)) 196 byte |= 1; 197 if (is_ereg(r2)) 198 byte |= 4; 199 return byte; 200 } 201 202 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 203 static u8 add_1reg(u8 byte, u32 dst_reg) 204 { 205 return byte + reg2hex[dst_reg]; 206 } 207 208 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 209 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 210 { 211 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 212 } 213 214 /* Some 1-byte opcodes for binary ALU operations */ 215 static u8 simple_alu_opcodes[] = { 216 [BPF_ADD] = 0x01, 217 [BPF_SUB] = 0x29, 218 [BPF_AND] = 0x21, 219 [BPF_OR] = 0x09, 220 [BPF_XOR] = 0x31, 221 [BPF_LSH] = 0xE0, 222 [BPF_RSH] = 0xE8, 223 [BPF_ARSH] = 0xF8, 224 }; 225 226 static void jit_fill_hole(void *area, unsigned int size) 227 { 228 /* Fill whole space with INT3 instructions */ 229 memset(area, 0xcc, size); 230 } 231 232 int bpf_arch_text_invalidate(void *dst, size_t len) 233 { 234 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 235 } 236 237 struct jit_context { 238 int cleanup_addr; /* Epilogue code offset */ 239 240 /* 241 * Program specific offsets of labels in the code; these rely on the 242 * JIT doing at least 2 passes, recording the position on the first 243 * pass, only to generate the correct offset on the second pass. 244 */ 245 int tail_call_direct_label; 246 int tail_call_indirect_label; 247 }; 248 249 /* Maximum number of bytes emitted while JITing one eBPF insn */ 250 #define BPF_MAX_INSN_SIZE 128 251 #define BPF_INSN_SAFETY 64 252 253 /* Number of bytes emit_patch() needs to generate instructions */ 254 #define X86_PATCH_SIZE 5 255 /* Number of bytes that will be skipped on tailcall */ 256 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 257 258 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 259 { 260 u8 *prog = *pprog; 261 262 if (callee_regs_used[0]) 263 EMIT1(0x53); /* push rbx */ 264 if (callee_regs_used[1]) 265 EMIT2(0x41, 0x55); /* push r13 */ 266 if (callee_regs_used[2]) 267 EMIT2(0x41, 0x56); /* push r14 */ 268 if (callee_regs_used[3]) 269 EMIT2(0x41, 0x57); /* push r15 */ 270 *pprog = prog; 271 } 272 273 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 274 { 275 u8 *prog = *pprog; 276 277 if (callee_regs_used[3]) 278 EMIT2(0x41, 0x5F); /* pop r15 */ 279 if (callee_regs_used[2]) 280 EMIT2(0x41, 0x5E); /* pop r14 */ 281 if (callee_regs_used[1]) 282 EMIT2(0x41, 0x5D); /* pop r13 */ 283 if (callee_regs_used[0]) 284 EMIT1(0x5B); /* pop rbx */ 285 *pprog = prog; 286 } 287 288 /* 289 * Emit x86-64 prologue code for BPF program. 290 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 291 * while jumping to another program 292 */ 293 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 294 bool tail_call_reachable, bool is_subprog) 295 { 296 u8 *prog = *pprog; 297 298 /* BPF trampoline can be made to work without these nops, 299 * but let's waste 5 bytes for now and optimize later 300 */ 301 EMIT_ENDBR(); 302 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 303 prog += X86_PATCH_SIZE; 304 if (!ebpf_from_cbpf) { 305 if (tail_call_reachable && !is_subprog) 306 EMIT2(0x31, 0xC0); /* xor eax, eax */ 307 else 308 EMIT2(0x66, 0x90); /* nop2 */ 309 } 310 EMIT1(0x55); /* push rbp */ 311 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 312 313 /* X86_TAIL_CALL_OFFSET is here */ 314 EMIT_ENDBR(); 315 316 /* sub rsp, rounded_stack_depth */ 317 if (stack_depth) 318 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 319 if (tail_call_reachable) 320 EMIT1(0x50); /* push rax */ 321 *pprog = prog; 322 } 323 324 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 325 { 326 u8 *prog = *pprog; 327 s64 offset; 328 329 offset = func - (ip + X86_PATCH_SIZE); 330 if (!is_simm32(offset)) { 331 pr_err("Target call %p is out of range\n", func); 332 return -ERANGE; 333 } 334 EMIT1_off32(opcode, offset); 335 *pprog = prog; 336 return 0; 337 } 338 339 static int emit_call(u8 **pprog, void *func, void *ip) 340 { 341 return emit_patch(pprog, func, ip, 0xE8); 342 } 343 344 static int emit_jump(u8 **pprog, void *func, void *ip) 345 { 346 return emit_patch(pprog, func, ip, 0xE9); 347 } 348 349 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 350 void *old_addr, void *new_addr) 351 { 352 const u8 *nop_insn = x86_nops[5]; 353 u8 old_insn[X86_PATCH_SIZE]; 354 u8 new_insn[X86_PATCH_SIZE]; 355 u8 *prog; 356 int ret; 357 358 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 359 if (old_addr) { 360 prog = old_insn; 361 ret = t == BPF_MOD_CALL ? 362 emit_call(&prog, old_addr, ip) : 363 emit_jump(&prog, old_addr, ip); 364 if (ret) 365 return ret; 366 } 367 368 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 369 if (new_addr) { 370 prog = new_insn; 371 ret = t == BPF_MOD_CALL ? 372 emit_call(&prog, new_addr, ip) : 373 emit_jump(&prog, new_addr, ip); 374 if (ret) 375 return ret; 376 } 377 378 ret = -EBUSY; 379 mutex_lock(&text_mutex); 380 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 381 goto out; 382 ret = 1; 383 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 384 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 385 ret = 0; 386 } 387 out: 388 mutex_unlock(&text_mutex); 389 return ret; 390 } 391 392 int __init bpf_arch_init_dispatcher_early(void *ip) 393 { 394 const u8 *nop_insn = x86_nops[5]; 395 396 if (is_endbr(*(u32 *)ip)) 397 ip += ENDBR_INSN_SIZE; 398 399 if (memcmp(ip, nop_insn, X86_PATCH_SIZE)) 400 text_poke_early(ip, nop_insn, X86_PATCH_SIZE); 401 return 0; 402 } 403 404 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 405 void *old_addr, void *new_addr) 406 { 407 if (!is_kernel_text((long)ip) && 408 !is_bpf_text_address((long)ip)) 409 /* BPF poking in modules is not supported */ 410 return -EINVAL; 411 412 /* 413 * See emit_prologue(), for IBT builds the trampoline hook is preceded 414 * with an ENDBR instruction. 415 */ 416 if (is_endbr(*(u32 *)ip)) 417 ip += ENDBR_INSN_SIZE; 418 419 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 420 } 421 422 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 423 424 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 425 { 426 u8 *prog = *pprog; 427 428 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 429 EMIT_LFENCE(); 430 EMIT2(0xFF, 0xE0 + reg); 431 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 432 OPTIMIZER_HIDE_VAR(reg); 433 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 434 } else { 435 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 436 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 437 EMIT1(0xCC); /* int3 */ 438 } 439 440 *pprog = prog; 441 } 442 443 static void emit_return(u8 **pprog, u8 *ip) 444 { 445 u8 *prog = *pprog; 446 447 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 448 emit_jump(&prog, &__x86_return_thunk, ip); 449 } else { 450 EMIT1(0xC3); /* ret */ 451 if (IS_ENABLED(CONFIG_SLS)) 452 EMIT1(0xCC); /* int3 */ 453 } 454 455 *pprog = prog; 456 } 457 458 /* 459 * Generate the following code: 460 * 461 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 462 * if (index >= array->map.max_entries) 463 * goto out; 464 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 465 * goto out; 466 * prog = array->ptrs[index]; 467 * if (prog == NULL) 468 * goto out; 469 * goto *(prog->bpf_func + prologue_size); 470 * out: 471 */ 472 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 473 u32 stack_depth, u8 *ip, 474 struct jit_context *ctx) 475 { 476 int tcc_off = -4 - round_up(stack_depth, 8); 477 u8 *prog = *pprog, *start = *pprog; 478 int offset; 479 480 /* 481 * rdi - pointer to ctx 482 * rsi - pointer to bpf_array 483 * rdx - index in bpf_array 484 */ 485 486 /* 487 * if (index >= array->map.max_entries) 488 * goto out; 489 */ 490 EMIT2(0x89, 0xD2); /* mov edx, edx */ 491 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 492 offsetof(struct bpf_array, map.max_entries)); 493 494 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 495 EMIT2(X86_JBE, offset); /* jbe out */ 496 497 /* 498 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 499 * goto out; 500 */ 501 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 502 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 503 504 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 505 EMIT2(X86_JAE, offset); /* jae out */ 506 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 507 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 508 509 /* prog = array->ptrs[index]; */ 510 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 511 offsetof(struct bpf_array, ptrs)); 512 513 /* 514 * if (prog == NULL) 515 * goto out; 516 */ 517 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 518 519 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 520 EMIT2(X86_JE, offset); /* je out */ 521 522 pop_callee_regs(&prog, callee_regs_used); 523 524 EMIT1(0x58); /* pop rax */ 525 if (stack_depth) 526 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 527 round_up(stack_depth, 8)); 528 529 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 530 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 531 offsetof(struct bpf_prog, bpf_func)); 532 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 533 X86_TAIL_CALL_OFFSET); 534 /* 535 * Now we're ready to jump into next BPF program 536 * rdi == ctx (1st arg) 537 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 538 */ 539 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 540 541 /* out: */ 542 ctx->tail_call_indirect_label = prog - start; 543 *pprog = prog; 544 } 545 546 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 547 u8 **pprog, u8 *ip, 548 bool *callee_regs_used, u32 stack_depth, 549 struct jit_context *ctx) 550 { 551 int tcc_off = -4 - round_up(stack_depth, 8); 552 u8 *prog = *pprog, *start = *pprog; 553 int offset; 554 555 /* 556 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 557 * goto out; 558 */ 559 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 560 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 561 562 offset = ctx->tail_call_direct_label - (prog + 2 - start); 563 EMIT2(X86_JAE, offset); /* jae out */ 564 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 565 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 566 567 poke->tailcall_bypass = ip + (prog - start); 568 poke->adj_off = X86_TAIL_CALL_OFFSET; 569 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 570 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 571 572 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 573 poke->tailcall_bypass); 574 575 pop_callee_regs(&prog, callee_regs_used); 576 EMIT1(0x58); /* pop rax */ 577 if (stack_depth) 578 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 579 580 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 581 prog += X86_PATCH_SIZE; 582 583 /* out: */ 584 ctx->tail_call_direct_label = prog - start; 585 586 *pprog = prog; 587 } 588 589 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 590 { 591 struct bpf_jit_poke_descriptor *poke; 592 struct bpf_array *array; 593 struct bpf_prog *target; 594 int i, ret; 595 596 for (i = 0; i < prog->aux->size_poke_tab; i++) { 597 poke = &prog->aux->poke_tab[i]; 598 if (poke->aux && poke->aux != prog->aux) 599 continue; 600 601 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 602 603 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 604 continue; 605 606 array = container_of(poke->tail_call.map, struct bpf_array, map); 607 mutex_lock(&array->aux->poke_mutex); 608 target = array->ptrs[poke->tail_call.key]; 609 if (target) { 610 ret = __bpf_arch_text_poke(poke->tailcall_target, 611 BPF_MOD_JUMP, NULL, 612 (u8 *)target->bpf_func + 613 poke->adj_off); 614 BUG_ON(ret < 0); 615 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 616 BPF_MOD_JUMP, 617 (u8 *)poke->tailcall_target + 618 X86_PATCH_SIZE, NULL); 619 BUG_ON(ret < 0); 620 } 621 WRITE_ONCE(poke->tailcall_target_stable, true); 622 mutex_unlock(&array->aux->poke_mutex); 623 } 624 } 625 626 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 627 u32 dst_reg, const u32 imm32) 628 { 629 u8 *prog = *pprog; 630 u8 b1, b2, b3; 631 632 /* 633 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 634 * (which zero-extends imm32) to save 2 bytes. 635 */ 636 if (sign_propagate && (s32)imm32 < 0) { 637 /* 'mov %rax, imm32' sign extends imm32 */ 638 b1 = add_1mod(0x48, dst_reg); 639 b2 = 0xC7; 640 b3 = 0xC0; 641 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 642 goto done; 643 } 644 645 /* 646 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 647 * to save 3 bytes. 648 */ 649 if (imm32 == 0) { 650 if (is_ereg(dst_reg)) 651 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 652 b2 = 0x31; /* xor */ 653 b3 = 0xC0; 654 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 655 goto done; 656 } 657 658 /* mov %eax, imm32 */ 659 if (is_ereg(dst_reg)) 660 EMIT1(add_1mod(0x40, dst_reg)); 661 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 662 done: 663 *pprog = prog; 664 } 665 666 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 667 const u32 imm32_hi, const u32 imm32_lo) 668 { 669 u8 *prog = *pprog; 670 671 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 672 /* 673 * For emitting plain u32, where sign bit must not be 674 * propagated LLVM tends to load imm64 over mov32 675 * directly, so save couple of bytes by just doing 676 * 'mov %eax, imm32' instead. 677 */ 678 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 679 } else { 680 /* movabsq rax, imm64 */ 681 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 682 EMIT(imm32_lo, 4); 683 EMIT(imm32_hi, 4); 684 } 685 686 *pprog = prog; 687 } 688 689 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 690 { 691 u8 *prog = *pprog; 692 693 if (is64) { 694 /* mov dst, src */ 695 EMIT_mov(dst_reg, src_reg); 696 } else { 697 /* mov32 dst, src */ 698 if (is_ereg(dst_reg) || is_ereg(src_reg)) 699 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 700 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 701 } 702 703 *pprog = prog; 704 } 705 706 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 707 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 708 { 709 u8 *prog = *pprog; 710 711 if (is_imm8(off)) { 712 /* 1-byte signed displacement. 713 * 714 * If off == 0 we could skip this and save one extra byte, but 715 * special case of x86 R13 which always needs an offset is not 716 * worth the hassle 717 */ 718 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 719 } else { 720 /* 4-byte signed displacement */ 721 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 722 } 723 *pprog = prog; 724 } 725 726 /* 727 * Emit a REX byte if it will be necessary to address these registers 728 */ 729 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 730 { 731 u8 *prog = *pprog; 732 733 if (is64) 734 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 735 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 736 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 737 *pprog = prog; 738 } 739 740 /* 741 * Similar version of maybe_emit_mod() for a single register 742 */ 743 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 744 { 745 u8 *prog = *pprog; 746 747 if (is64) 748 EMIT1(add_1mod(0x48, reg)); 749 else if (is_ereg(reg)) 750 EMIT1(add_1mod(0x40, reg)); 751 *pprog = prog; 752 } 753 754 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 755 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 756 { 757 u8 *prog = *pprog; 758 759 switch (size) { 760 case BPF_B: 761 /* Emit 'movzx rax, byte ptr [rax + off]' */ 762 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 763 break; 764 case BPF_H: 765 /* Emit 'movzx rax, word ptr [rax + off]' */ 766 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 767 break; 768 case BPF_W: 769 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 770 if (is_ereg(dst_reg) || is_ereg(src_reg)) 771 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 772 else 773 EMIT1(0x8B); 774 break; 775 case BPF_DW: 776 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 777 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 778 break; 779 } 780 emit_insn_suffix(&prog, src_reg, dst_reg, off); 781 *pprog = prog; 782 } 783 784 /* STX: *(u8*)(dst_reg + off) = src_reg */ 785 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 786 { 787 u8 *prog = *pprog; 788 789 switch (size) { 790 case BPF_B: 791 /* Emit 'mov byte ptr [rax + off], al' */ 792 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 793 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 794 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 795 else 796 EMIT1(0x88); 797 break; 798 case BPF_H: 799 if (is_ereg(dst_reg) || is_ereg(src_reg)) 800 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 801 else 802 EMIT2(0x66, 0x89); 803 break; 804 case BPF_W: 805 if (is_ereg(dst_reg) || is_ereg(src_reg)) 806 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 807 else 808 EMIT1(0x89); 809 break; 810 case BPF_DW: 811 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 812 break; 813 } 814 emit_insn_suffix(&prog, dst_reg, src_reg, off); 815 *pprog = prog; 816 } 817 818 static int emit_atomic(u8 **pprog, u8 atomic_op, 819 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 820 { 821 u8 *prog = *pprog; 822 823 EMIT1(0xF0); /* lock prefix */ 824 825 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 826 827 /* emit opcode */ 828 switch (atomic_op) { 829 case BPF_ADD: 830 case BPF_AND: 831 case BPF_OR: 832 case BPF_XOR: 833 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 834 EMIT1(simple_alu_opcodes[atomic_op]); 835 break; 836 case BPF_ADD | BPF_FETCH: 837 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 838 EMIT2(0x0F, 0xC1); 839 break; 840 case BPF_XCHG: 841 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 842 EMIT1(0x87); 843 break; 844 case BPF_CMPXCHG: 845 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 846 EMIT2(0x0F, 0xB1); 847 break; 848 default: 849 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 850 return -EFAULT; 851 } 852 853 emit_insn_suffix(&prog, dst_reg, src_reg, off); 854 855 *pprog = prog; 856 return 0; 857 } 858 859 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 860 { 861 u32 reg = x->fixup >> 8; 862 863 /* jump over faulting load and clear dest register */ 864 *(unsigned long *)((void *)regs + reg) = 0; 865 regs->ip += x->fixup & 0xff; 866 return true; 867 } 868 869 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 870 bool *regs_used, bool *tail_call_seen) 871 { 872 int i; 873 874 for (i = 1; i <= insn_cnt; i++, insn++) { 875 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 876 *tail_call_seen = true; 877 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 878 regs_used[0] = true; 879 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 880 regs_used[1] = true; 881 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 882 regs_used[2] = true; 883 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 884 regs_used[3] = true; 885 } 886 } 887 888 static void emit_nops(u8 **pprog, int len) 889 { 890 u8 *prog = *pprog; 891 int i, noplen; 892 893 while (len > 0) { 894 noplen = len; 895 896 if (noplen > ASM_NOP_MAX) 897 noplen = ASM_NOP_MAX; 898 899 for (i = 0; i < noplen; i++) 900 EMIT1(x86_nops[noplen][i]); 901 len -= noplen; 902 } 903 904 *pprog = prog; 905 } 906 907 /* emit the 3-byte VEX prefix 908 * 909 * r: same as rex.r, extra bit for ModRM reg field 910 * x: same as rex.x, extra bit for SIB index field 911 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 912 * m: opcode map select, encoding escape bytes e.g. 0x0f38 913 * w: same as rex.w (32 bit or 64 bit) or opcode specific 914 * src_reg2: additional source reg (encoded as BPF reg) 915 * l: vector length (128 bit or 256 bit) or reserved 916 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 917 */ 918 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 919 bool w, u8 src_reg2, bool l, u8 pp) 920 { 921 u8 *prog = *pprog; 922 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 923 u8 b1, b2; 924 u8 vvvv = reg2hex[src_reg2]; 925 926 /* reg2hex gives only the lower 3 bit of vvvv */ 927 if (is_ereg(src_reg2)) 928 vvvv |= 1 << 3; 929 930 /* 931 * 2nd byte of 3-byte VEX prefix 932 * ~ means bit inverted encoding 933 * 934 * 7 0 935 * +---+---+---+---+---+---+---+---+ 936 * |~R |~X |~B | m | 937 * +---+---+---+---+---+---+---+---+ 938 */ 939 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 940 /* 941 * 3rd byte of 3-byte VEX prefix 942 * 943 * 7 0 944 * +---+---+---+---+---+---+---+---+ 945 * | W | ~vvvv | L | pp | 946 * +---+---+---+---+---+---+---+---+ 947 */ 948 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 949 950 EMIT3(b0, b1, b2); 951 *pprog = prog; 952 } 953 954 /* emit BMI2 shift instruction */ 955 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 956 { 957 u8 *prog = *pprog; 958 bool r = is_ereg(dst_reg); 959 u8 m = 2; /* escape code 0f38 */ 960 961 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 962 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 963 *pprog = prog; 964 } 965 966 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 967 968 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 969 int oldproglen, struct jit_context *ctx, bool jmp_padding) 970 { 971 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 972 struct bpf_insn *insn = bpf_prog->insnsi; 973 bool callee_regs_used[4] = {}; 974 int insn_cnt = bpf_prog->len; 975 bool tail_call_seen = false; 976 bool seen_exit = false; 977 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 978 int i, excnt = 0; 979 int ilen, proglen = 0; 980 u8 *prog = temp; 981 int err; 982 983 detect_reg_usage(insn, insn_cnt, callee_regs_used, 984 &tail_call_seen); 985 986 /* tail call's presence in current prog implies it is reachable */ 987 tail_call_reachable |= tail_call_seen; 988 989 emit_prologue(&prog, bpf_prog->aux->stack_depth, 990 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 991 bpf_prog->aux->func_idx != 0); 992 push_callee_regs(&prog, callee_regs_used); 993 994 ilen = prog - temp; 995 if (rw_image) 996 memcpy(rw_image + proglen, temp, ilen); 997 proglen += ilen; 998 addrs[0] = proglen; 999 prog = temp; 1000 1001 for (i = 1; i <= insn_cnt; i++, insn++) { 1002 const s32 imm32 = insn->imm; 1003 u32 dst_reg = insn->dst_reg; 1004 u32 src_reg = insn->src_reg; 1005 u8 b2 = 0, b3 = 0; 1006 u8 *start_of_ldx; 1007 s64 jmp_offset; 1008 u8 jmp_cond; 1009 u8 *func; 1010 int nops; 1011 1012 switch (insn->code) { 1013 /* ALU */ 1014 case BPF_ALU | BPF_ADD | BPF_X: 1015 case BPF_ALU | BPF_SUB | BPF_X: 1016 case BPF_ALU | BPF_AND | BPF_X: 1017 case BPF_ALU | BPF_OR | BPF_X: 1018 case BPF_ALU | BPF_XOR | BPF_X: 1019 case BPF_ALU64 | BPF_ADD | BPF_X: 1020 case BPF_ALU64 | BPF_SUB | BPF_X: 1021 case BPF_ALU64 | BPF_AND | BPF_X: 1022 case BPF_ALU64 | BPF_OR | BPF_X: 1023 case BPF_ALU64 | BPF_XOR | BPF_X: 1024 maybe_emit_mod(&prog, dst_reg, src_reg, 1025 BPF_CLASS(insn->code) == BPF_ALU64); 1026 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1027 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1028 break; 1029 1030 case BPF_ALU64 | BPF_MOV | BPF_X: 1031 case BPF_ALU | BPF_MOV | BPF_X: 1032 emit_mov_reg(&prog, 1033 BPF_CLASS(insn->code) == BPF_ALU64, 1034 dst_reg, src_reg); 1035 break; 1036 1037 /* neg dst */ 1038 case BPF_ALU | BPF_NEG: 1039 case BPF_ALU64 | BPF_NEG: 1040 maybe_emit_1mod(&prog, dst_reg, 1041 BPF_CLASS(insn->code) == BPF_ALU64); 1042 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1043 break; 1044 1045 case BPF_ALU | BPF_ADD | BPF_K: 1046 case BPF_ALU | BPF_SUB | BPF_K: 1047 case BPF_ALU | BPF_AND | BPF_K: 1048 case BPF_ALU | BPF_OR | BPF_K: 1049 case BPF_ALU | BPF_XOR | BPF_K: 1050 case BPF_ALU64 | BPF_ADD | BPF_K: 1051 case BPF_ALU64 | BPF_SUB | BPF_K: 1052 case BPF_ALU64 | BPF_AND | BPF_K: 1053 case BPF_ALU64 | BPF_OR | BPF_K: 1054 case BPF_ALU64 | BPF_XOR | BPF_K: 1055 maybe_emit_1mod(&prog, dst_reg, 1056 BPF_CLASS(insn->code) == BPF_ALU64); 1057 1058 /* 1059 * b3 holds 'normal' opcode, b2 short form only valid 1060 * in case dst is eax/rax. 1061 */ 1062 switch (BPF_OP(insn->code)) { 1063 case BPF_ADD: 1064 b3 = 0xC0; 1065 b2 = 0x05; 1066 break; 1067 case BPF_SUB: 1068 b3 = 0xE8; 1069 b2 = 0x2D; 1070 break; 1071 case BPF_AND: 1072 b3 = 0xE0; 1073 b2 = 0x25; 1074 break; 1075 case BPF_OR: 1076 b3 = 0xC8; 1077 b2 = 0x0D; 1078 break; 1079 case BPF_XOR: 1080 b3 = 0xF0; 1081 b2 = 0x35; 1082 break; 1083 } 1084 1085 if (is_imm8(imm32)) 1086 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1087 else if (is_axreg(dst_reg)) 1088 EMIT1_off32(b2, imm32); 1089 else 1090 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1091 break; 1092 1093 case BPF_ALU64 | BPF_MOV | BPF_K: 1094 case BPF_ALU | BPF_MOV | BPF_K: 1095 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1096 dst_reg, imm32); 1097 break; 1098 1099 case BPF_LD | BPF_IMM | BPF_DW: 1100 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1101 insn++; 1102 i++; 1103 break; 1104 1105 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1106 case BPF_ALU | BPF_MOD | BPF_X: 1107 case BPF_ALU | BPF_DIV | BPF_X: 1108 case BPF_ALU | BPF_MOD | BPF_K: 1109 case BPF_ALU | BPF_DIV | BPF_K: 1110 case BPF_ALU64 | BPF_MOD | BPF_X: 1111 case BPF_ALU64 | BPF_DIV | BPF_X: 1112 case BPF_ALU64 | BPF_MOD | BPF_K: 1113 case BPF_ALU64 | BPF_DIV | BPF_K: { 1114 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1115 1116 if (dst_reg != BPF_REG_0) 1117 EMIT1(0x50); /* push rax */ 1118 if (dst_reg != BPF_REG_3) 1119 EMIT1(0x52); /* push rdx */ 1120 1121 if (BPF_SRC(insn->code) == BPF_X) { 1122 if (src_reg == BPF_REG_0 || 1123 src_reg == BPF_REG_3) { 1124 /* mov r11, src_reg */ 1125 EMIT_mov(AUX_REG, src_reg); 1126 src_reg = AUX_REG; 1127 } 1128 } else { 1129 /* mov r11, imm32 */ 1130 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1131 src_reg = AUX_REG; 1132 } 1133 1134 if (dst_reg != BPF_REG_0) 1135 /* mov rax, dst_reg */ 1136 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1137 1138 /* 1139 * xor edx, edx 1140 * equivalent to 'xor rdx, rdx', but one byte less 1141 */ 1142 EMIT2(0x31, 0xd2); 1143 1144 /* div src_reg */ 1145 maybe_emit_1mod(&prog, src_reg, is64); 1146 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1147 1148 if (BPF_OP(insn->code) == BPF_MOD && 1149 dst_reg != BPF_REG_3) 1150 /* mov dst_reg, rdx */ 1151 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1152 else if (BPF_OP(insn->code) == BPF_DIV && 1153 dst_reg != BPF_REG_0) 1154 /* mov dst_reg, rax */ 1155 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1156 1157 if (dst_reg != BPF_REG_3) 1158 EMIT1(0x5A); /* pop rdx */ 1159 if (dst_reg != BPF_REG_0) 1160 EMIT1(0x58); /* pop rax */ 1161 break; 1162 } 1163 1164 case BPF_ALU | BPF_MUL | BPF_K: 1165 case BPF_ALU64 | BPF_MUL | BPF_K: 1166 maybe_emit_mod(&prog, dst_reg, dst_reg, 1167 BPF_CLASS(insn->code) == BPF_ALU64); 1168 1169 if (is_imm8(imm32)) 1170 /* imul dst_reg, dst_reg, imm8 */ 1171 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1172 imm32); 1173 else 1174 /* imul dst_reg, dst_reg, imm32 */ 1175 EMIT2_off32(0x69, 1176 add_2reg(0xC0, dst_reg, dst_reg), 1177 imm32); 1178 break; 1179 1180 case BPF_ALU | BPF_MUL | BPF_X: 1181 case BPF_ALU64 | BPF_MUL | BPF_X: 1182 maybe_emit_mod(&prog, src_reg, dst_reg, 1183 BPF_CLASS(insn->code) == BPF_ALU64); 1184 1185 /* imul dst_reg, src_reg */ 1186 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1187 break; 1188 1189 /* Shifts */ 1190 case BPF_ALU | BPF_LSH | BPF_K: 1191 case BPF_ALU | BPF_RSH | BPF_K: 1192 case BPF_ALU | BPF_ARSH | BPF_K: 1193 case BPF_ALU64 | BPF_LSH | BPF_K: 1194 case BPF_ALU64 | BPF_RSH | BPF_K: 1195 case BPF_ALU64 | BPF_ARSH | BPF_K: 1196 maybe_emit_1mod(&prog, dst_reg, 1197 BPF_CLASS(insn->code) == BPF_ALU64); 1198 1199 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1200 if (imm32 == 1) 1201 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1202 else 1203 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1204 break; 1205 1206 case BPF_ALU | BPF_LSH | BPF_X: 1207 case BPF_ALU | BPF_RSH | BPF_X: 1208 case BPF_ALU | BPF_ARSH | BPF_X: 1209 case BPF_ALU64 | BPF_LSH | BPF_X: 1210 case BPF_ALU64 | BPF_RSH | BPF_X: 1211 case BPF_ALU64 | BPF_ARSH | BPF_X: 1212 /* BMI2 shifts aren't better when shift count is already in rcx */ 1213 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1214 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1215 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1216 u8 op; 1217 1218 switch (BPF_OP(insn->code)) { 1219 case BPF_LSH: 1220 op = 1; /* prefix 0x66 */ 1221 break; 1222 case BPF_RSH: 1223 op = 3; /* prefix 0xf2 */ 1224 break; 1225 case BPF_ARSH: 1226 op = 2; /* prefix 0xf3 */ 1227 break; 1228 } 1229 1230 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1231 1232 break; 1233 } 1234 1235 if (src_reg != BPF_REG_4) { /* common case */ 1236 /* Check for bad case when dst_reg == rcx */ 1237 if (dst_reg == BPF_REG_4) { 1238 /* mov r11, dst_reg */ 1239 EMIT_mov(AUX_REG, dst_reg); 1240 dst_reg = AUX_REG; 1241 } else { 1242 EMIT1(0x51); /* push rcx */ 1243 } 1244 /* mov rcx, src_reg */ 1245 EMIT_mov(BPF_REG_4, src_reg); 1246 } 1247 1248 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1249 maybe_emit_1mod(&prog, dst_reg, 1250 BPF_CLASS(insn->code) == BPF_ALU64); 1251 1252 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1253 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1254 1255 if (src_reg != BPF_REG_4) { 1256 if (insn->dst_reg == BPF_REG_4) 1257 /* mov dst_reg, r11 */ 1258 EMIT_mov(insn->dst_reg, AUX_REG); 1259 else 1260 EMIT1(0x59); /* pop rcx */ 1261 } 1262 1263 break; 1264 1265 case BPF_ALU | BPF_END | BPF_FROM_BE: 1266 switch (imm32) { 1267 case 16: 1268 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1269 EMIT1(0x66); 1270 if (is_ereg(dst_reg)) 1271 EMIT1(0x41); 1272 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1273 1274 /* Emit 'movzwl eax, ax' */ 1275 if (is_ereg(dst_reg)) 1276 EMIT3(0x45, 0x0F, 0xB7); 1277 else 1278 EMIT2(0x0F, 0xB7); 1279 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1280 break; 1281 case 32: 1282 /* Emit 'bswap eax' to swap lower 4 bytes */ 1283 if (is_ereg(dst_reg)) 1284 EMIT2(0x41, 0x0F); 1285 else 1286 EMIT1(0x0F); 1287 EMIT1(add_1reg(0xC8, dst_reg)); 1288 break; 1289 case 64: 1290 /* Emit 'bswap rax' to swap 8 bytes */ 1291 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1292 add_1reg(0xC8, dst_reg)); 1293 break; 1294 } 1295 break; 1296 1297 case BPF_ALU | BPF_END | BPF_FROM_LE: 1298 switch (imm32) { 1299 case 16: 1300 /* 1301 * Emit 'movzwl eax, ax' to zero extend 16-bit 1302 * into 64 bit 1303 */ 1304 if (is_ereg(dst_reg)) 1305 EMIT3(0x45, 0x0F, 0xB7); 1306 else 1307 EMIT2(0x0F, 0xB7); 1308 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1309 break; 1310 case 32: 1311 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1312 if (is_ereg(dst_reg)) 1313 EMIT1(0x45); 1314 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1315 break; 1316 case 64: 1317 /* nop */ 1318 break; 1319 } 1320 break; 1321 1322 /* speculation barrier */ 1323 case BPF_ST | BPF_NOSPEC: 1324 EMIT_LFENCE(); 1325 break; 1326 1327 /* ST: *(u8*)(dst_reg + off) = imm */ 1328 case BPF_ST | BPF_MEM | BPF_B: 1329 if (is_ereg(dst_reg)) 1330 EMIT2(0x41, 0xC6); 1331 else 1332 EMIT1(0xC6); 1333 goto st; 1334 case BPF_ST | BPF_MEM | BPF_H: 1335 if (is_ereg(dst_reg)) 1336 EMIT3(0x66, 0x41, 0xC7); 1337 else 1338 EMIT2(0x66, 0xC7); 1339 goto st; 1340 case BPF_ST | BPF_MEM | BPF_W: 1341 if (is_ereg(dst_reg)) 1342 EMIT2(0x41, 0xC7); 1343 else 1344 EMIT1(0xC7); 1345 goto st; 1346 case BPF_ST | BPF_MEM | BPF_DW: 1347 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1348 1349 st: if (is_imm8(insn->off)) 1350 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1351 else 1352 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1353 1354 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1355 break; 1356 1357 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1358 case BPF_STX | BPF_MEM | BPF_B: 1359 case BPF_STX | BPF_MEM | BPF_H: 1360 case BPF_STX | BPF_MEM | BPF_W: 1361 case BPF_STX | BPF_MEM | BPF_DW: 1362 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1363 break; 1364 1365 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1366 case BPF_LDX | BPF_MEM | BPF_B: 1367 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1368 case BPF_LDX | BPF_MEM | BPF_H: 1369 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1370 case BPF_LDX | BPF_MEM | BPF_W: 1371 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1372 case BPF_LDX | BPF_MEM | BPF_DW: 1373 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1374 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1375 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1376 * add abs(insn->off) to the limit to make sure that negative 1377 * offset won't be an issue. 1378 * insn->off is s16, so it won't affect valid pointers. 1379 */ 1380 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1381 u8 *end_of_jmp1, *end_of_jmp2; 1382 1383 /* Conservatively check that src_reg + insn->off is a kernel address: 1384 * 1. src_reg + insn->off >= limit 1385 * 2. src_reg + insn->off doesn't become small positive. 1386 * Cannot do src_reg + insn->off >= limit in one branch, 1387 * since it needs two spare registers, but JIT has only one. 1388 */ 1389 1390 /* movabsq r11, limit */ 1391 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1392 EMIT((u32)limit, 4); 1393 EMIT(limit >> 32, 4); 1394 /* cmp src_reg, r11 */ 1395 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1396 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1397 /* if unsigned '<' goto end_of_jmp2 */ 1398 EMIT2(X86_JB, 0); 1399 end_of_jmp1 = prog; 1400 1401 /* mov r11, src_reg */ 1402 emit_mov_reg(&prog, true, AUX_REG, src_reg); 1403 /* add r11, insn->off */ 1404 maybe_emit_1mod(&prog, AUX_REG, true); 1405 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1406 /* jmp if not carry to start_of_ldx 1407 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1408 * that has to be rejected. 1409 */ 1410 EMIT2(0x73 /* JNC */, 0); 1411 end_of_jmp2 = prog; 1412 1413 /* xor dst_reg, dst_reg */ 1414 emit_mov_imm32(&prog, false, dst_reg, 0); 1415 /* jmp byte_after_ldx */ 1416 EMIT2(0xEB, 0); 1417 1418 /* populate jmp_offset for JB above to jump to xor dst_reg */ 1419 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1420 /* populate jmp_offset for JNC above to jump to start_of_ldx */ 1421 start_of_ldx = prog; 1422 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 1423 } 1424 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1425 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1426 struct exception_table_entry *ex; 1427 u8 *_insn = image + proglen + (start_of_ldx - temp); 1428 s64 delta; 1429 1430 /* populate jmp_offset for JMP above */ 1431 start_of_ldx[-1] = prog - start_of_ldx; 1432 1433 if (!bpf_prog->aux->extable) 1434 break; 1435 1436 if (excnt >= bpf_prog->aux->num_exentries) { 1437 pr_err("ex gen bug\n"); 1438 return -EFAULT; 1439 } 1440 ex = &bpf_prog->aux->extable[excnt++]; 1441 1442 delta = _insn - (u8 *)&ex->insn; 1443 if (!is_simm32(delta)) { 1444 pr_err("extable->insn doesn't fit into 32-bit\n"); 1445 return -EFAULT; 1446 } 1447 /* switch ex to rw buffer for writes */ 1448 ex = (void *)rw_image + ((void *)ex - (void *)image); 1449 1450 ex->insn = delta; 1451 1452 ex->data = EX_TYPE_BPF; 1453 1454 if (dst_reg > BPF_REG_9) { 1455 pr_err("verifier error\n"); 1456 return -EFAULT; 1457 } 1458 /* 1459 * Compute size of x86 insn and its target dest x86 register. 1460 * ex_handler_bpf() will use lower 8 bits to adjust 1461 * pt_regs->ip to jump over this x86 instruction 1462 * and upper bits to figure out which pt_regs to zero out. 1463 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1464 * of 4 bytes will be ignored and rbx will be zero inited. 1465 */ 1466 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1467 } 1468 break; 1469 1470 case BPF_STX | BPF_ATOMIC | BPF_W: 1471 case BPF_STX | BPF_ATOMIC | BPF_DW: 1472 if (insn->imm == (BPF_AND | BPF_FETCH) || 1473 insn->imm == (BPF_OR | BPF_FETCH) || 1474 insn->imm == (BPF_XOR | BPF_FETCH)) { 1475 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1476 u32 real_src_reg = src_reg; 1477 u32 real_dst_reg = dst_reg; 1478 u8 *branch_target; 1479 1480 /* 1481 * Can't be implemented with a single x86 insn. 1482 * Need to do a CMPXCHG loop. 1483 */ 1484 1485 /* Will need RAX as a CMPXCHG operand so save R0 */ 1486 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1487 if (src_reg == BPF_REG_0) 1488 real_src_reg = BPF_REG_AX; 1489 if (dst_reg == BPF_REG_0) 1490 real_dst_reg = BPF_REG_AX; 1491 1492 branch_target = prog; 1493 /* Load old value */ 1494 emit_ldx(&prog, BPF_SIZE(insn->code), 1495 BPF_REG_0, real_dst_reg, insn->off); 1496 /* 1497 * Perform the (commutative) operation locally, 1498 * put the result in the AUX_REG. 1499 */ 1500 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1501 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1502 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1503 add_2reg(0xC0, AUX_REG, real_src_reg)); 1504 /* Attempt to swap in new value */ 1505 err = emit_atomic(&prog, BPF_CMPXCHG, 1506 real_dst_reg, AUX_REG, 1507 insn->off, 1508 BPF_SIZE(insn->code)); 1509 if (WARN_ON(err)) 1510 return err; 1511 /* 1512 * ZF tells us whether we won the race. If it's 1513 * cleared we need to try again. 1514 */ 1515 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1516 /* Return the pre-modification value */ 1517 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1518 /* Restore R0 after clobbering RAX */ 1519 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1520 break; 1521 } 1522 1523 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1524 insn->off, BPF_SIZE(insn->code)); 1525 if (err) 1526 return err; 1527 break; 1528 1529 /* call */ 1530 case BPF_JMP | BPF_CALL: 1531 func = (u8 *) __bpf_call_base + imm32; 1532 if (tail_call_reachable) { 1533 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1534 EMIT3_off32(0x48, 0x8B, 0x85, 1535 -round_up(bpf_prog->aux->stack_depth, 8) - 8); 1536 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1537 return -EINVAL; 1538 } else { 1539 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1540 return -EINVAL; 1541 } 1542 break; 1543 1544 case BPF_JMP | BPF_TAIL_CALL: 1545 if (imm32) 1546 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1547 &prog, image + addrs[i - 1], 1548 callee_regs_used, 1549 bpf_prog->aux->stack_depth, 1550 ctx); 1551 else 1552 emit_bpf_tail_call_indirect(&prog, 1553 callee_regs_used, 1554 bpf_prog->aux->stack_depth, 1555 image + addrs[i - 1], 1556 ctx); 1557 break; 1558 1559 /* cond jump */ 1560 case BPF_JMP | BPF_JEQ | BPF_X: 1561 case BPF_JMP | BPF_JNE | BPF_X: 1562 case BPF_JMP | BPF_JGT | BPF_X: 1563 case BPF_JMP | BPF_JLT | BPF_X: 1564 case BPF_JMP | BPF_JGE | BPF_X: 1565 case BPF_JMP | BPF_JLE | BPF_X: 1566 case BPF_JMP | BPF_JSGT | BPF_X: 1567 case BPF_JMP | BPF_JSLT | BPF_X: 1568 case BPF_JMP | BPF_JSGE | BPF_X: 1569 case BPF_JMP | BPF_JSLE | BPF_X: 1570 case BPF_JMP32 | BPF_JEQ | BPF_X: 1571 case BPF_JMP32 | BPF_JNE | BPF_X: 1572 case BPF_JMP32 | BPF_JGT | BPF_X: 1573 case BPF_JMP32 | BPF_JLT | BPF_X: 1574 case BPF_JMP32 | BPF_JGE | BPF_X: 1575 case BPF_JMP32 | BPF_JLE | BPF_X: 1576 case BPF_JMP32 | BPF_JSGT | BPF_X: 1577 case BPF_JMP32 | BPF_JSLT | BPF_X: 1578 case BPF_JMP32 | BPF_JSGE | BPF_X: 1579 case BPF_JMP32 | BPF_JSLE | BPF_X: 1580 /* cmp dst_reg, src_reg */ 1581 maybe_emit_mod(&prog, dst_reg, src_reg, 1582 BPF_CLASS(insn->code) == BPF_JMP); 1583 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1584 goto emit_cond_jmp; 1585 1586 case BPF_JMP | BPF_JSET | BPF_X: 1587 case BPF_JMP32 | BPF_JSET | BPF_X: 1588 /* test dst_reg, src_reg */ 1589 maybe_emit_mod(&prog, dst_reg, src_reg, 1590 BPF_CLASS(insn->code) == BPF_JMP); 1591 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1592 goto emit_cond_jmp; 1593 1594 case BPF_JMP | BPF_JSET | BPF_K: 1595 case BPF_JMP32 | BPF_JSET | BPF_K: 1596 /* test dst_reg, imm32 */ 1597 maybe_emit_1mod(&prog, dst_reg, 1598 BPF_CLASS(insn->code) == BPF_JMP); 1599 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1600 goto emit_cond_jmp; 1601 1602 case BPF_JMP | BPF_JEQ | BPF_K: 1603 case BPF_JMP | BPF_JNE | BPF_K: 1604 case BPF_JMP | BPF_JGT | BPF_K: 1605 case BPF_JMP | BPF_JLT | BPF_K: 1606 case BPF_JMP | BPF_JGE | BPF_K: 1607 case BPF_JMP | BPF_JLE | BPF_K: 1608 case BPF_JMP | BPF_JSGT | BPF_K: 1609 case BPF_JMP | BPF_JSLT | BPF_K: 1610 case BPF_JMP | BPF_JSGE | BPF_K: 1611 case BPF_JMP | BPF_JSLE | BPF_K: 1612 case BPF_JMP32 | BPF_JEQ | BPF_K: 1613 case BPF_JMP32 | BPF_JNE | BPF_K: 1614 case BPF_JMP32 | BPF_JGT | BPF_K: 1615 case BPF_JMP32 | BPF_JLT | BPF_K: 1616 case BPF_JMP32 | BPF_JGE | BPF_K: 1617 case BPF_JMP32 | BPF_JLE | BPF_K: 1618 case BPF_JMP32 | BPF_JSGT | BPF_K: 1619 case BPF_JMP32 | BPF_JSLT | BPF_K: 1620 case BPF_JMP32 | BPF_JSGE | BPF_K: 1621 case BPF_JMP32 | BPF_JSLE | BPF_K: 1622 /* test dst_reg, dst_reg to save one extra byte */ 1623 if (imm32 == 0) { 1624 maybe_emit_mod(&prog, dst_reg, dst_reg, 1625 BPF_CLASS(insn->code) == BPF_JMP); 1626 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1627 goto emit_cond_jmp; 1628 } 1629 1630 /* cmp dst_reg, imm8/32 */ 1631 maybe_emit_1mod(&prog, dst_reg, 1632 BPF_CLASS(insn->code) == BPF_JMP); 1633 1634 if (is_imm8(imm32)) 1635 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1636 else 1637 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1638 1639 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1640 switch (BPF_OP(insn->code)) { 1641 case BPF_JEQ: 1642 jmp_cond = X86_JE; 1643 break; 1644 case BPF_JSET: 1645 case BPF_JNE: 1646 jmp_cond = X86_JNE; 1647 break; 1648 case BPF_JGT: 1649 /* GT is unsigned '>', JA in x86 */ 1650 jmp_cond = X86_JA; 1651 break; 1652 case BPF_JLT: 1653 /* LT is unsigned '<', JB in x86 */ 1654 jmp_cond = X86_JB; 1655 break; 1656 case BPF_JGE: 1657 /* GE is unsigned '>=', JAE in x86 */ 1658 jmp_cond = X86_JAE; 1659 break; 1660 case BPF_JLE: 1661 /* LE is unsigned '<=', JBE in x86 */ 1662 jmp_cond = X86_JBE; 1663 break; 1664 case BPF_JSGT: 1665 /* Signed '>', GT in x86 */ 1666 jmp_cond = X86_JG; 1667 break; 1668 case BPF_JSLT: 1669 /* Signed '<', LT in x86 */ 1670 jmp_cond = X86_JL; 1671 break; 1672 case BPF_JSGE: 1673 /* Signed '>=', GE in x86 */ 1674 jmp_cond = X86_JGE; 1675 break; 1676 case BPF_JSLE: 1677 /* Signed '<=', LE in x86 */ 1678 jmp_cond = X86_JLE; 1679 break; 1680 default: /* to silence GCC warning */ 1681 return -EFAULT; 1682 } 1683 jmp_offset = addrs[i + insn->off] - addrs[i]; 1684 if (is_imm8(jmp_offset)) { 1685 if (jmp_padding) { 1686 /* To keep the jmp_offset valid, the extra bytes are 1687 * padded before the jump insn, so we subtract the 1688 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1689 * 1690 * If the previous pass already emits an imm8 1691 * jmp_cond, then this BPF insn won't shrink, so 1692 * "nops" is 0. 1693 * 1694 * On the other hand, if the previous pass emits an 1695 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1696 * keep the image from shrinking further. 1697 * 1698 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1699 * is 2 bytes, so the size difference is 4 bytes. 1700 */ 1701 nops = INSN_SZ_DIFF - 2; 1702 if (nops != 0 && nops != 4) { 1703 pr_err("unexpected jmp_cond padding: %d bytes\n", 1704 nops); 1705 return -EFAULT; 1706 } 1707 emit_nops(&prog, nops); 1708 } 1709 EMIT2(jmp_cond, jmp_offset); 1710 } else if (is_simm32(jmp_offset)) { 1711 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1712 } else { 1713 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1714 return -EFAULT; 1715 } 1716 1717 break; 1718 1719 case BPF_JMP | BPF_JA: 1720 if (insn->off == -1) 1721 /* -1 jmp instructions will always jump 1722 * backwards two bytes. Explicitly handling 1723 * this case avoids wasting too many passes 1724 * when there are long sequences of replaced 1725 * dead code. 1726 */ 1727 jmp_offset = -2; 1728 else 1729 jmp_offset = addrs[i + insn->off] - addrs[i]; 1730 1731 if (!jmp_offset) { 1732 /* 1733 * If jmp_padding is enabled, the extra nops will 1734 * be inserted. Otherwise, optimize out nop jumps. 1735 */ 1736 if (jmp_padding) { 1737 /* There are 3 possible conditions. 1738 * (1) This BPF_JA is already optimized out in 1739 * the previous run, so there is no need 1740 * to pad any extra byte (0 byte). 1741 * (2) The previous pass emits an imm8 jmp, 1742 * so we pad 2 bytes to match the previous 1743 * insn size. 1744 * (3) Similarly, the previous pass emits an 1745 * imm32 jmp, and 5 bytes is padded. 1746 */ 1747 nops = INSN_SZ_DIFF; 1748 if (nops != 0 && nops != 2 && nops != 5) { 1749 pr_err("unexpected nop jump padding: %d bytes\n", 1750 nops); 1751 return -EFAULT; 1752 } 1753 emit_nops(&prog, nops); 1754 } 1755 break; 1756 } 1757 emit_jmp: 1758 if (is_imm8(jmp_offset)) { 1759 if (jmp_padding) { 1760 /* To avoid breaking jmp_offset, the extra bytes 1761 * are padded before the actual jmp insn, so 1762 * 2 bytes is subtracted from INSN_SZ_DIFF. 1763 * 1764 * If the previous pass already emits an imm8 1765 * jmp, there is nothing to pad (0 byte). 1766 * 1767 * If it emits an imm32 jmp (5 bytes) previously 1768 * and now an imm8 jmp (2 bytes), then we pad 1769 * (5 - 2 = 3) bytes to stop the image from 1770 * shrinking further. 1771 */ 1772 nops = INSN_SZ_DIFF - 2; 1773 if (nops != 0 && nops != 3) { 1774 pr_err("unexpected jump padding: %d bytes\n", 1775 nops); 1776 return -EFAULT; 1777 } 1778 emit_nops(&prog, INSN_SZ_DIFF - 2); 1779 } 1780 EMIT2(0xEB, jmp_offset); 1781 } else if (is_simm32(jmp_offset)) { 1782 EMIT1_off32(0xE9, jmp_offset); 1783 } else { 1784 pr_err("jmp gen bug %llx\n", jmp_offset); 1785 return -EFAULT; 1786 } 1787 break; 1788 1789 case BPF_JMP | BPF_EXIT: 1790 if (seen_exit) { 1791 jmp_offset = ctx->cleanup_addr - addrs[i]; 1792 goto emit_jmp; 1793 } 1794 seen_exit = true; 1795 /* Update cleanup_addr */ 1796 ctx->cleanup_addr = proglen; 1797 pop_callee_regs(&prog, callee_regs_used); 1798 EMIT1(0xC9); /* leave */ 1799 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 1800 break; 1801 1802 default: 1803 /* 1804 * By design x86-64 JIT should support all BPF instructions. 1805 * This error will be seen if new instruction was added 1806 * to the interpreter, but not to the JIT, or if there is 1807 * junk in bpf_prog. 1808 */ 1809 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1810 return -EINVAL; 1811 } 1812 1813 ilen = prog - temp; 1814 if (ilen > BPF_MAX_INSN_SIZE) { 1815 pr_err("bpf_jit: fatal insn size error\n"); 1816 return -EFAULT; 1817 } 1818 1819 if (image) { 1820 /* 1821 * When populating the image, assert that: 1822 * 1823 * i) We do not write beyond the allocated space, and 1824 * ii) addrs[i] did not change from the prior run, in order 1825 * to validate assumptions made for computing branch 1826 * displacements. 1827 */ 1828 if (unlikely(proglen + ilen > oldproglen || 1829 proglen + ilen != addrs[i])) { 1830 pr_err("bpf_jit: fatal error\n"); 1831 return -EFAULT; 1832 } 1833 memcpy(rw_image + proglen, temp, ilen); 1834 } 1835 proglen += ilen; 1836 addrs[i] = proglen; 1837 prog = temp; 1838 } 1839 1840 if (image && excnt != bpf_prog->aux->num_exentries) { 1841 pr_err("extable is not populated\n"); 1842 return -EFAULT; 1843 } 1844 return proglen; 1845 } 1846 1847 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1848 int stack_size) 1849 { 1850 int i, j, arg_size, nr_regs; 1851 /* Store function arguments to stack. 1852 * For a function that accepts two pointers the sequence will be: 1853 * mov QWORD PTR [rbp-0x10],rdi 1854 * mov QWORD PTR [rbp-0x8],rsi 1855 */ 1856 for (i = 0, j = 0; i < min(nr_args, 6); i++) { 1857 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { 1858 nr_regs = (m->arg_size[i] + 7) / 8; 1859 arg_size = 8; 1860 } else { 1861 nr_regs = 1; 1862 arg_size = m->arg_size[i]; 1863 } 1864 1865 while (nr_regs) { 1866 emit_stx(prog, bytes_to_bpf_size(arg_size), 1867 BPF_REG_FP, 1868 j == 5 ? X86_REG_R9 : BPF_REG_1 + j, 1869 -(stack_size - j * 8)); 1870 nr_regs--; 1871 j++; 1872 } 1873 } 1874 } 1875 1876 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1877 int stack_size) 1878 { 1879 int i, j, arg_size, nr_regs; 1880 1881 /* Restore function arguments from stack. 1882 * For a function that accepts two pointers the sequence will be: 1883 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1884 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1885 */ 1886 for (i = 0, j = 0; i < min(nr_args, 6); i++) { 1887 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { 1888 nr_regs = (m->arg_size[i] + 7) / 8; 1889 arg_size = 8; 1890 } else { 1891 nr_regs = 1; 1892 arg_size = m->arg_size[i]; 1893 } 1894 1895 while (nr_regs) { 1896 emit_ldx(prog, bytes_to_bpf_size(arg_size), 1897 j == 5 ? X86_REG_R9 : BPF_REG_1 + j, 1898 BPF_REG_FP, 1899 -(stack_size - j * 8)); 1900 nr_regs--; 1901 j++; 1902 } 1903 } 1904 } 1905 1906 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1907 struct bpf_tramp_link *l, int stack_size, 1908 int run_ctx_off, bool save_ret) 1909 { 1910 u8 *prog = *pprog; 1911 u8 *jmp_insn; 1912 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 1913 struct bpf_prog *p = l->link.prog; 1914 u64 cookie = l->cookie; 1915 1916 /* mov rdi, cookie */ 1917 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 1918 1919 /* Prepare struct bpf_tramp_run_ctx. 1920 * 1921 * bpf_tramp_run_ctx is already preserved by 1922 * arch_prepare_bpf_trampoline(). 1923 * 1924 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 1925 */ 1926 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 1927 1928 /* arg1: mov rdi, progs[i] */ 1929 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1930 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 1931 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 1932 1933 if (emit_call(&prog, bpf_trampoline_enter(p), prog)) 1934 return -EINVAL; 1935 /* remember prog start time returned by __bpf_prog_enter */ 1936 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1937 1938 /* if (__bpf_prog_enter*(prog) == 0) 1939 * goto skip_exec_of_prog; 1940 */ 1941 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1942 /* emit 2 nops that will be replaced with JE insn */ 1943 jmp_insn = prog; 1944 emit_nops(&prog, 2); 1945 1946 /* arg1: lea rdi, [rbp - stack_size] */ 1947 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1948 /* arg2: progs[i]->insnsi for interpreter */ 1949 if (!p->jited) 1950 emit_mov_imm64(&prog, BPF_REG_2, 1951 (long) p->insnsi >> 32, 1952 (u32) (long) p->insnsi); 1953 /* call JITed bpf program or interpreter */ 1954 if (emit_call(&prog, p->bpf_func, prog)) 1955 return -EINVAL; 1956 1957 /* 1958 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1959 * of the previous call which is then passed on the stack to 1960 * the next BPF program. 1961 * 1962 * BPF_TRAMP_FENTRY trampoline may need to return the return 1963 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1964 */ 1965 if (save_ret) 1966 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1967 1968 /* replace 2 nops with JE insn, since jmp target is known */ 1969 jmp_insn[0] = X86_JE; 1970 jmp_insn[1] = prog - jmp_insn - 2; 1971 1972 /* arg1: mov rdi, progs[i] */ 1973 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1974 /* arg2: mov rsi, rbx <- start time in nsec */ 1975 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1976 /* arg3: lea rdx, [rbp - run_ctx_off] */ 1977 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 1978 if (emit_call(&prog, bpf_trampoline_exit(p), prog)) 1979 return -EINVAL; 1980 1981 *pprog = prog; 1982 return 0; 1983 } 1984 1985 static void emit_align(u8 **pprog, u32 align) 1986 { 1987 u8 *target, *prog = *pprog; 1988 1989 target = PTR_ALIGN(prog, align); 1990 if (target != prog) 1991 emit_nops(&prog, target - prog); 1992 1993 *pprog = prog; 1994 } 1995 1996 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1997 { 1998 u8 *prog = *pprog; 1999 s64 offset; 2000 2001 offset = func - (ip + 2 + 4); 2002 if (!is_simm32(offset)) { 2003 pr_err("Target %p is out of range\n", func); 2004 return -EINVAL; 2005 } 2006 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2007 *pprog = prog; 2008 return 0; 2009 } 2010 2011 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2012 struct bpf_tramp_links *tl, int stack_size, 2013 int run_ctx_off, bool save_ret) 2014 { 2015 int i; 2016 u8 *prog = *pprog; 2017 2018 for (i = 0; i < tl->nr_links; i++) { 2019 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2020 run_ctx_off, save_ret)) 2021 return -EINVAL; 2022 } 2023 *pprog = prog; 2024 return 0; 2025 } 2026 2027 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2028 struct bpf_tramp_links *tl, int stack_size, 2029 int run_ctx_off, u8 **branches) 2030 { 2031 u8 *prog = *pprog; 2032 int i; 2033 2034 /* The first fmod_ret program will receive a garbage return value. 2035 * Set this to 0 to avoid confusing the program. 2036 */ 2037 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2038 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2039 for (i = 0; i < tl->nr_links; i++) { 2040 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 2041 return -EINVAL; 2042 2043 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2044 * if (*(u64 *)(rbp - 8) != 0) 2045 * goto do_fexit; 2046 */ 2047 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2048 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2049 2050 /* Save the location of the branch and Generate 6 nops 2051 * (4 bytes for an offset and 2 bytes for the jump) These nops 2052 * are replaced with a conditional jump once do_fexit (i.e. the 2053 * start of the fexit invocation) is finalized. 2054 */ 2055 branches[i] = prog; 2056 emit_nops(&prog, 4 + 2); 2057 } 2058 2059 *pprog = prog; 2060 return 0; 2061 } 2062 2063 /* Example: 2064 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2065 * its 'struct btf_func_model' will be nr_args=2 2066 * The assembly code when eth_type_trans is executing after trampoline: 2067 * 2068 * push rbp 2069 * mov rbp, rsp 2070 * sub rsp, 16 // space for skb and dev 2071 * push rbx // temp regs to pass start time 2072 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2073 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2074 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2075 * mov rbx, rax // remember start time in bpf stats are enabled 2076 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2077 * call addr_of_jited_FENTRY_prog 2078 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2079 * mov rsi, rbx // prog start time 2080 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2081 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2082 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2083 * pop rbx 2084 * leave 2085 * ret 2086 * 2087 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2088 * replaced with 'call generated_bpf_trampoline'. When it returns 2089 * eth_type_trans will continue executing with original skb and dev pointers. 2090 * 2091 * The assembly code when eth_type_trans is called from trampoline: 2092 * 2093 * push rbp 2094 * mov rbp, rsp 2095 * sub rsp, 24 // space for skb, dev, return value 2096 * push rbx // temp regs to pass start time 2097 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2098 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2099 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2100 * mov rbx, rax // remember start time if bpf stats are enabled 2101 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2102 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2103 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2104 * mov rsi, rbx // prog start time 2105 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2106 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2107 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2108 * call eth_type_trans+5 // execute body of eth_type_trans 2109 * mov qword ptr [rbp - 8], rax // save return value 2110 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2111 * mov rbx, rax // remember start time in bpf stats are enabled 2112 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2113 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2114 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2115 * mov rsi, rbx // prog start time 2116 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2117 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2118 * pop rbx 2119 * leave 2120 * add rsp, 8 // skip eth_type_trans's frame 2121 * ret // return to its caller 2122 */ 2123 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 2124 const struct btf_func_model *m, u32 flags, 2125 struct bpf_tramp_links *tlinks, 2126 void *func_addr) 2127 { 2128 int ret, i, nr_args = m->nr_args, extra_nregs = 0; 2129 int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; 2130 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2131 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2132 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2133 void *orig_call = func_addr; 2134 u8 **branches = NULL; 2135 u8 *prog; 2136 bool save_ret; 2137 2138 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 2139 if (nr_args > 6) 2140 return -ENOTSUPP; 2141 2142 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { 2143 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2144 extra_nregs += (m->arg_size[i] + 7) / 8 - 1; 2145 } 2146 if (nr_args + extra_nregs > 6) 2147 return -ENOTSUPP; 2148 stack_size += extra_nregs * 8; 2149 2150 /* Generated trampoline stack layout: 2151 * 2152 * RBP + 8 [ return address ] 2153 * RBP + 0 [ RBP ] 2154 * 2155 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2156 * BPF_TRAMP_F_RET_FENTRY_RET flags 2157 * 2158 * [ reg_argN ] always 2159 * [ ... ] 2160 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2161 * 2162 * RBP - args_off [ arg regs count ] always 2163 * 2164 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2165 * 2166 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2167 */ 2168 2169 /* room for return value of orig_call or fentry prog */ 2170 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2171 if (save_ret) 2172 stack_size += 8; 2173 2174 regs_off = stack_size; 2175 2176 /* args count */ 2177 stack_size += 8; 2178 args_off = stack_size; 2179 2180 if (flags & BPF_TRAMP_F_IP_ARG) 2181 stack_size += 8; /* room for IP address argument */ 2182 2183 ip_off = stack_size; 2184 2185 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2186 run_ctx_off = stack_size; 2187 2188 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2189 /* skip patched call instruction and point orig_call to actual 2190 * body of the kernel function. 2191 */ 2192 if (is_endbr(*(u32 *)orig_call)) 2193 orig_call += ENDBR_INSN_SIZE; 2194 orig_call += X86_PATCH_SIZE; 2195 } 2196 2197 prog = image; 2198 2199 EMIT_ENDBR(); 2200 EMIT1(0x55); /* push rbp */ 2201 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2202 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2203 EMIT1(0x53); /* push rbx */ 2204 2205 /* Store number of argument registers of the traced function: 2206 * mov rax, nr_args + extra_nregs 2207 * mov QWORD PTR [rbp - args_off], rax 2208 */ 2209 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs); 2210 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2211 2212 if (flags & BPF_TRAMP_F_IP_ARG) { 2213 /* Store IP address of the traced function: 2214 * movabsq rax, func_addr 2215 * mov QWORD PTR [rbp - ip_off], rax 2216 */ 2217 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 2218 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2219 } 2220 2221 save_regs(m, &prog, nr_args, regs_off); 2222 2223 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2224 /* arg1: mov rdi, im */ 2225 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2226 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2227 ret = -EINVAL; 2228 goto cleanup; 2229 } 2230 } 2231 2232 if (fentry->nr_links) 2233 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2234 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2235 return -EINVAL; 2236 2237 if (fmod_ret->nr_links) { 2238 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2239 GFP_KERNEL); 2240 if (!branches) 2241 return -ENOMEM; 2242 2243 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2244 run_ctx_off, branches)) { 2245 ret = -EINVAL; 2246 goto cleanup; 2247 } 2248 } 2249 2250 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2251 restore_regs(m, &prog, nr_args, regs_off); 2252 2253 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2254 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2255 EMIT2(0xff, 0xd0); /* call *rax */ 2256 } else { 2257 /* call original function */ 2258 if (emit_call(&prog, orig_call, prog)) { 2259 ret = -EINVAL; 2260 goto cleanup; 2261 } 2262 } 2263 /* remember return value in a stack for bpf prog to access */ 2264 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2265 im->ip_after_call = prog; 2266 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2267 prog += X86_PATCH_SIZE; 2268 } 2269 2270 if (fmod_ret->nr_links) { 2271 /* From Intel 64 and IA-32 Architectures Optimization 2272 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2273 * Coding Rule 11: All branch targets should be 16-byte 2274 * aligned. 2275 */ 2276 emit_align(&prog, 16); 2277 /* Update the branches saved in invoke_bpf_mod_ret with the 2278 * aligned address of do_fexit. 2279 */ 2280 for (i = 0; i < fmod_ret->nr_links; i++) 2281 emit_cond_near_jump(&branches[i], prog, branches[i], 2282 X86_JNE); 2283 } 2284 2285 if (fexit->nr_links) 2286 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2287 ret = -EINVAL; 2288 goto cleanup; 2289 } 2290 2291 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2292 restore_regs(m, &prog, nr_args, regs_off); 2293 2294 /* This needs to be done regardless. If there were fmod_ret programs, 2295 * the return value is only updated on the stack and still needs to be 2296 * restored to R0. 2297 */ 2298 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2299 im->ip_epilogue = prog; 2300 /* arg1: mov rdi, im */ 2301 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2302 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2303 ret = -EINVAL; 2304 goto cleanup; 2305 } 2306 } 2307 /* restore return value of orig_call or fentry prog back into RAX */ 2308 if (save_ret) 2309 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2310 2311 EMIT1(0x5B); /* pop rbx */ 2312 EMIT1(0xC9); /* leave */ 2313 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2314 /* skip our return address and return to parent */ 2315 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2316 emit_return(&prog, prog); 2317 /* Make sure the trampoline generation logic doesn't overflow */ 2318 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2319 ret = -EFAULT; 2320 goto cleanup; 2321 } 2322 ret = prog - (u8 *)image; 2323 2324 cleanup: 2325 kfree(branches); 2326 return ret; 2327 } 2328 2329 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 2330 { 2331 u8 *jg_reloc, *prog = *pprog; 2332 int pivot, err, jg_bytes = 1; 2333 s64 jg_offset; 2334 2335 if (a == b) { 2336 /* Leaf node of recursion, i.e. not a range of indices 2337 * anymore. 2338 */ 2339 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2340 if (!is_simm32(progs[a])) 2341 return -1; 2342 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2343 progs[a]); 2344 err = emit_cond_near_jump(&prog, /* je func */ 2345 (void *)progs[a], image + (prog - buf), 2346 X86_JE); 2347 if (err) 2348 return err; 2349 2350 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 2351 2352 *pprog = prog; 2353 return 0; 2354 } 2355 2356 /* Not a leaf node, so we pivot, and recursively descend into 2357 * the lower and upper ranges. 2358 */ 2359 pivot = (b - a) / 2; 2360 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2361 if (!is_simm32(progs[a + pivot])) 2362 return -1; 2363 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2364 2365 if (pivot > 2) { /* jg upper_part */ 2366 /* Require near jump. */ 2367 jg_bytes = 4; 2368 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2369 } else { 2370 EMIT2(X86_JG, 0); 2371 } 2372 jg_reloc = prog; 2373 2374 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2375 progs, image, buf); 2376 if (err) 2377 return err; 2378 2379 /* From Intel 64 and IA-32 Architectures Optimization 2380 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2381 * Coding Rule 11: All branch targets should be 16-byte 2382 * aligned. 2383 */ 2384 emit_align(&prog, 16); 2385 jg_offset = prog - jg_reloc; 2386 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2387 2388 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2389 b, progs, image, buf); 2390 if (err) 2391 return err; 2392 2393 *pprog = prog; 2394 return 0; 2395 } 2396 2397 static int cmp_ips(const void *a, const void *b) 2398 { 2399 const s64 *ipa = a; 2400 const s64 *ipb = b; 2401 2402 if (*ipa > *ipb) 2403 return 1; 2404 if (*ipa < *ipb) 2405 return -1; 2406 return 0; 2407 } 2408 2409 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 2410 { 2411 u8 *prog = buf; 2412 2413 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2414 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 2415 } 2416 2417 struct x64_jit_data { 2418 struct bpf_binary_header *rw_header; 2419 struct bpf_binary_header *header; 2420 int *addrs; 2421 u8 *image; 2422 int proglen; 2423 struct jit_context ctx; 2424 }; 2425 2426 #define MAX_PASSES 20 2427 #define PADDING_PASSES (MAX_PASSES - 5) 2428 2429 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2430 { 2431 struct bpf_binary_header *rw_header = NULL; 2432 struct bpf_binary_header *header = NULL; 2433 struct bpf_prog *tmp, *orig_prog = prog; 2434 struct x64_jit_data *jit_data; 2435 int proglen, oldproglen = 0; 2436 struct jit_context ctx = {}; 2437 bool tmp_blinded = false; 2438 bool extra_pass = false; 2439 bool padding = false; 2440 u8 *rw_image = NULL; 2441 u8 *image = NULL; 2442 int *addrs; 2443 int pass; 2444 int i; 2445 2446 if (!prog->jit_requested) 2447 return orig_prog; 2448 2449 tmp = bpf_jit_blind_constants(prog); 2450 /* 2451 * If blinding was requested and we failed during blinding, 2452 * we must fall back to the interpreter. 2453 */ 2454 if (IS_ERR(tmp)) 2455 return orig_prog; 2456 if (tmp != prog) { 2457 tmp_blinded = true; 2458 prog = tmp; 2459 } 2460 2461 jit_data = prog->aux->jit_data; 2462 if (!jit_data) { 2463 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2464 if (!jit_data) { 2465 prog = orig_prog; 2466 goto out; 2467 } 2468 prog->aux->jit_data = jit_data; 2469 } 2470 addrs = jit_data->addrs; 2471 if (addrs) { 2472 ctx = jit_data->ctx; 2473 oldproglen = jit_data->proglen; 2474 image = jit_data->image; 2475 header = jit_data->header; 2476 rw_header = jit_data->rw_header; 2477 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2478 extra_pass = true; 2479 padding = true; 2480 goto skip_init_addrs; 2481 } 2482 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2483 if (!addrs) { 2484 prog = orig_prog; 2485 goto out_addrs; 2486 } 2487 2488 /* 2489 * Before first pass, make a rough estimation of addrs[] 2490 * each BPF instruction is translated to less than 64 bytes 2491 */ 2492 for (proglen = 0, i = 0; i <= prog->len; i++) { 2493 proglen += 64; 2494 addrs[i] = proglen; 2495 } 2496 ctx.cleanup_addr = proglen; 2497 skip_init_addrs: 2498 2499 /* 2500 * JITed image shrinks with every pass and the loop iterates 2501 * until the image stops shrinking. Very large BPF programs 2502 * may converge on the last pass. In such case do one more 2503 * pass to emit the final image. 2504 */ 2505 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2506 if (!padding && pass >= PADDING_PASSES) 2507 padding = true; 2508 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2509 if (proglen <= 0) { 2510 out_image: 2511 image = NULL; 2512 if (header) { 2513 bpf_arch_text_copy(&header->size, &rw_header->size, 2514 sizeof(rw_header->size)); 2515 bpf_jit_binary_pack_free(header, rw_header); 2516 } 2517 /* Fall back to interpreter mode */ 2518 prog = orig_prog; 2519 if (extra_pass) { 2520 prog->bpf_func = NULL; 2521 prog->jited = 0; 2522 prog->jited_len = 0; 2523 } 2524 goto out_addrs; 2525 } 2526 if (image) { 2527 if (proglen != oldproglen) { 2528 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2529 proglen, oldproglen); 2530 goto out_image; 2531 } 2532 break; 2533 } 2534 if (proglen == oldproglen) { 2535 /* 2536 * The number of entries in extable is the number of BPF_LDX 2537 * insns that access kernel memory via "pointer to BTF type". 2538 * The verifier changed their opcode from LDX|MEM|size 2539 * to LDX|PROBE_MEM|size to make JITing easier. 2540 */ 2541 u32 align = __alignof__(struct exception_table_entry); 2542 u32 extable_size = prog->aux->num_exentries * 2543 sizeof(struct exception_table_entry); 2544 2545 /* allocate module memory for x86 insns and extable */ 2546 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2547 &image, align, &rw_header, &rw_image, 2548 jit_fill_hole); 2549 if (!header) { 2550 prog = orig_prog; 2551 goto out_addrs; 2552 } 2553 prog->aux->extable = (void *) image + roundup(proglen, align); 2554 } 2555 oldproglen = proglen; 2556 cond_resched(); 2557 } 2558 2559 if (bpf_jit_enable > 1) 2560 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2561 2562 if (image) { 2563 if (!prog->is_func || extra_pass) { 2564 /* 2565 * bpf_jit_binary_pack_finalize fails in two scenarios: 2566 * 1) header is not pointing to proper module memory; 2567 * 2) the arch doesn't support bpf_arch_text_copy(). 2568 * 2569 * Both cases are serious bugs and justify WARN_ON. 2570 */ 2571 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2572 /* header has been freed */ 2573 header = NULL; 2574 goto out_image; 2575 } 2576 2577 bpf_tail_call_direct_fixup(prog); 2578 } else { 2579 jit_data->addrs = addrs; 2580 jit_data->ctx = ctx; 2581 jit_data->proglen = proglen; 2582 jit_data->image = image; 2583 jit_data->header = header; 2584 jit_data->rw_header = rw_header; 2585 } 2586 prog->bpf_func = (void *)image; 2587 prog->jited = 1; 2588 prog->jited_len = proglen; 2589 } else { 2590 prog = orig_prog; 2591 } 2592 2593 if (!image || !prog->is_func || extra_pass) { 2594 if (image) 2595 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2596 out_addrs: 2597 kvfree(addrs); 2598 kfree(jit_data); 2599 prog->aux->jit_data = NULL; 2600 } 2601 out: 2602 if (tmp_blinded) 2603 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2604 tmp : orig_prog); 2605 return prog; 2606 } 2607 2608 bool bpf_jit_supports_kfunc_call(void) 2609 { 2610 return true; 2611 } 2612 2613 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2614 { 2615 if (text_poke_copy(dst, src, len) == NULL) 2616 return ERR_PTR(-EINVAL); 2617 return dst; 2618 } 2619 2620 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 2621 bool bpf_jit_supports_subprog_tailcalls(void) 2622 { 2623 return true; 2624 } 2625 2626 void bpf_jit_free(struct bpf_prog *prog) 2627 { 2628 if (prog->jited) { 2629 struct x64_jit_data *jit_data = prog->aux->jit_data; 2630 struct bpf_binary_header *hdr; 2631 2632 /* 2633 * If we fail the final pass of JIT (from jit_subprogs), 2634 * the program may not be finalized yet. Call finalize here 2635 * before freeing it. 2636 */ 2637 if (jit_data) { 2638 bpf_jit_binary_pack_finalize(prog, jit_data->header, 2639 jit_data->rw_header); 2640 kvfree(jit_data->addrs); 2641 kfree(jit_data); 2642 } 2643 hdr = bpf_jit_binary_pack_hdr(prog); 2644 bpf_jit_binary_pack_free(hdr, NULL); 2645 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2646 } 2647 2648 bpf_prog_unlock_free(prog); 2649 } 2650