1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 20 { 21 if (len == 1) 22 *ptr = bytes; 23 else if (len == 2) 24 *(u16 *)ptr = bytes; 25 else { 26 *(u32 *)ptr = bytes; 27 barrier(); 28 } 29 return ptr + len; 30 } 31 32 #define EMIT(bytes, len) \ 33 do { prog = emit_code(prog, bytes, len); } while (0) 34 35 #define EMIT1(b1) EMIT(b1, 1) 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39 40 #define EMIT1_off32(b1, off) \ 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 46 #define EMIT4_off32(b1, b2, b3, b4, off) \ 47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 48 49 #ifdef CONFIG_X86_KERNEL_IBT 50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 51 #else 52 #define EMIT_ENDBR() 53 #endif 54 55 static bool is_imm8(int value) 56 { 57 return value <= 127 && value >= -128; 58 } 59 60 static bool is_simm32(s64 value) 61 { 62 return value == (s64)(s32)value; 63 } 64 65 static bool is_uimm32(u64 value) 66 { 67 return value == (u64)(u32)value; 68 } 69 70 /* mov dst, src */ 71 #define EMIT_mov(DST, SRC) \ 72 do { \ 73 if (DST != SRC) \ 74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 75 } while (0) 76 77 static int bpf_size_to_x86_bytes(int bpf_size) 78 { 79 if (bpf_size == BPF_W) 80 return 4; 81 else if (bpf_size == BPF_H) 82 return 2; 83 else if (bpf_size == BPF_B) 84 return 1; 85 else if (bpf_size == BPF_DW) 86 return 4; /* imm32 */ 87 else 88 return 0; 89 } 90 91 /* 92 * List of x86 cond jumps opcodes (. + s8) 93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 94 */ 95 #define X86_JB 0x72 96 #define X86_JAE 0x73 97 #define X86_JE 0x74 98 #define X86_JNE 0x75 99 #define X86_JBE 0x76 100 #define X86_JA 0x77 101 #define X86_JL 0x7C 102 #define X86_JGE 0x7D 103 #define X86_JLE 0x7E 104 #define X86_JG 0x7F 105 106 /* Pick a register outside of BPF range for JIT internal work */ 107 #define AUX_REG (MAX_BPF_JIT_REG + 1) 108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 109 110 /* 111 * The following table maps BPF registers to x86-64 registers. 112 * 113 * x86-64 register R12 is unused, since if used as base address 114 * register in load/store instructions, it always needs an 115 * extra byte of encoding and is callee saved. 116 * 117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 118 * trampoline. x86-64 register R10 is used for blinding (if enabled). 119 */ 120 static const int reg2hex[] = { 121 [BPF_REG_0] = 0, /* RAX */ 122 [BPF_REG_1] = 7, /* RDI */ 123 [BPF_REG_2] = 6, /* RSI */ 124 [BPF_REG_3] = 2, /* RDX */ 125 [BPF_REG_4] = 1, /* RCX */ 126 [BPF_REG_5] = 0, /* R8 */ 127 [BPF_REG_6] = 3, /* RBX callee saved */ 128 [BPF_REG_7] = 5, /* R13 callee saved */ 129 [BPF_REG_8] = 6, /* R14 callee saved */ 130 [BPF_REG_9] = 7, /* R15 callee saved */ 131 [BPF_REG_FP] = 5, /* RBP readonly */ 132 [BPF_REG_AX] = 2, /* R10 temp register */ 133 [AUX_REG] = 3, /* R11 temp register */ 134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 135 }; 136 137 static const int reg2pt_regs[] = { 138 [BPF_REG_0] = offsetof(struct pt_regs, ax), 139 [BPF_REG_1] = offsetof(struct pt_regs, di), 140 [BPF_REG_2] = offsetof(struct pt_regs, si), 141 [BPF_REG_3] = offsetof(struct pt_regs, dx), 142 [BPF_REG_4] = offsetof(struct pt_regs, cx), 143 [BPF_REG_5] = offsetof(struct pt_regs, r8), 144 [BPF_REG_6] = offsetof(struct pt_regs, bx), 145 [BPF_REG_7] = offsetof(struct pt_regs, r13), 146 [BPF_REG_8] = offsetof(struct pt_regs, r14), 147 [BPF_REG_9] = offsetof(struct pt_regs, r15), 148 }; 149 150 /* 151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 152 * which need extra byte of encoding. 153 * rax,rcx,...,rbp have simpler encoding 154 */ 155 static bool is_ereg(u32 reg) 156 { 157 return (1 << reg) & (BIT(BPF_REG_5) | 158 BIT(AUX_REG) | 159 BIT(BPF_REG_7) | 160 BIT(BPF_REG_8) | 161 BIT(BPF_REG_9) | 162 BIT(X86_REG_R9) | 163 BIT(BPF_REG_AX)); 164 } 165 166 /* 167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 169 * of encoding. al,cl,dl,bl have simpler encoding. 170 */ 171 static bool is_ereg_8l(u32 reg) 172 { 173 return is_ereg(reg) || 174 (1 << reg) & (BIT(BPF_REG_1) | 175 BIT(BPF_REG_2) | 176 BIT(BPF_REG_FP)); 177 } 178 179 static bool is_axreg(u32 reg) 180 { 181 return reg == BPF_REG_0; 182 } 183 184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 185 static u8 add_1mod(u8 byte, u32 reg) 186 { 187 if (is_ereg(reg)) 188 byte |= 1; 189 return byte; 190 } 191 192 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 193 { 194 if (is_ereg(r1)) 195 byte |= 1; 196 if (is_ereg(r2)) 197 byte |= 4; 198 return byte; 199 } 200 201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 202 static u8 add_1reg(u8 byte, u32 dst_reg) 203 { 204 return byte + reg2hex[dst_reg]; 205 } 206 207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 209 { 210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 211 } 212 213 /* Some 1-byte opcodes for binary ALU operations */ 214 static u8 simple_alu_opcodes[] = { 215 [BPF_ADD] = 0x01, 216 [BPF_SUB] = 0x29, 217 [BPF_AND] = 0x21, 218 [BPF_OR] = 0x09, 219 [BPF_XOR] = 0x31, 220 [BPF_LSH] = 0xE0, 221 [BPF_RSH] = 0xE8, 222 [BPF_ARSH] = 0xF8, 223 }; 224 225 static void jit_fill_hole(void *area, unsigned int size) 226 { 227 /* Fill whole space with INT3 instructions */ 228 memset(area, 0xcc, size); 229 } 230 231 int bpf_arch_text_invalidate(void *dst, size_t len) 232 { 233 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 234 } 235 236 struct jit_context { 237 int cleanup_addr; /* Epilogue code offset */ 238 239 /* 240 * Program specific offsets of labels in the code; these rely on the 241 * JIT doing at least 2 passes, recording the position on the first 242 * pass, only to generate the correct offset on the second pass. 243 */ 244 int tail_call_direct_label; 245 int tail_call_indirect_label; 246 }; 247 248 /* Maximum number of bytes emitted while JITing one eBPF insn */ 249 #define BPF_MAX_INSN_SIZE 128 250 #define BPF_INSN_SAFETY 64 251 252 /* Number of bytes emit_patch() needs to generate instructions */ 253 #define X86_PATCH_SIZE 5 254 /* Number of bytes that will be skipped on tailcall */ 255 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 256 257 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 258 { 259 u8 *prog = *pprog; 260 261 if (callee_regs_used[0]) 262 EMIT1(0x53); /* push rbx */ 263 if (callee_regs_used[1]) 264 EMIT2(0x41, 0x55); /* push r13 */ 265 if (callee_regs_used[2]) 266 EMIT2(0x41, 0x56); /* push r14 */ 267 if (callee_regs_used[3]) 268 EMIT2(0x41, 0x57); /* push r15 */ 269 *pprog = prog; 270 } 271 272 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 273 { 274 u8 *prog = *pprog; 275 276 if (callee_regs_used[3]) 277 EMIT2(0x41, 0x5F); /* pop r15 */ 278 if (callee_regs_used[2]) 279 EMIT2(0x41, 0x5E); /* pop r14 */ 280 if (callee_regs_used[1]) 281 EMIT2(0x41, 0x5D); /* pop r13 */ 282 if (callee_regs_used[0]) 283 EMIT1(0x5B); /* pop rbx */ 284 *pprog = prog; 285 } 286 287 /* 288 * Emit x86-64 prologue code for BPF program. 289 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 290 * while jumping to another program 291 */ 292 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 293 bool tail_call_reachable, bool is_subprog) 294 { 295 u8 *prog = *pprog; 296 297 /* BPF trampoline can be made to work without these nops, 298 * but let's waste 5 bytes for now and optimize later 299 */ 300 EMIT_ENDBR(); 301 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 302 prog += X86_PATCH_SIZE; 303 if (!ebpf_from_cbpf) { 304 if (tail_call_reachable && !is_subprog) 305 EMIT2(0x31, 0xC0); /* xor eax, eax */ 306 else 307 EMIT2(0x66, 0x90); /* nop2 */ 308 } 309 EMIT1(0x55); /* push rbp */ 310 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 311 312 /* X86_TAIL_CALL_OFFSET is here */ 313 EMIT_ENDBR(); 314 315 /* sub rsp, rounded_stack_depth */ 316 if (stack_depth) 317 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 318 if (tail_call_reachable) 319 EMIT1(0x50); /* push rax */ 320 *pprog = prog; 321 } 322 323 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 324 { 325 u8 *prog = *pprog; 326 s64 offset; 327 328 offset = func - (ip + X86_PATCH_SIZE); 329 if (!is_simm32(offset)) { 330 pr_err("Target call %p is out of range\n", func); 331 return -ERANGE; 332 } 333 EMIT1_off32(opcode, offset); 334 *pprog = prog; 335 return 0; 336 } 337 338 static int emit_call(u8 **pprog, void *func, void *ip) 339 { 340 return emit_patch(pprog, func, ip, 0xE8); 341 } 342 343 static int emit_jump(u8 **pprog, void *func, void *ip) 344 { 345 return emit_patch(pprog, func, ip, 0xE9); 346 } 347 348 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 349 void *old_addr, void *new_addr) 350 { 351 const u8 *nop_insn = x86_nops[5]; 352 u8 old_insn[X86_PATCH_SIZE]; 353 u8 new_insn[X86_PATCH_SIZE]; 354 u8 *prog; 355 int ret; 356 357 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 358 if (old_addr) { 359 prog = old_insn; 360 ret = t == BPF_MOD_CALL ? 361 emit_call(&prog, old_addr, ip) : 362 emit_jump(&prog, old_addr, ip); 363 if (ret) 364 return ret; 365 } 366 367 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 368 if (new_addr) { 369 prog = new_insn; 370 ret = t == BPF_MOD_CALL ? 371 emit_call(&prog, new_addr, ip) : 372 emit_jump(&prog, new_addr, ip); 373 if (ret) 374 return ret; 375 } 376 377 ret = -EBUSY; 378 mutex_lock(&text_mutex); 379 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 380 goto out; 381 ret = 1; 382 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 383 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 384 ret = 0; 385 } 386 out: 387 mutex_unlock(&text_mutex); 388 return ret; 389 } 390 391 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 392 void *old_addr, void *new_addr) 393 { 394 if (!is_kernel_text((long)ip) && 395 !is_bpf_text_address((long)ip)) 396 /* BPF poking in modules is not supported */ 397 return -EINVAL; 398 399 /* 400 * See emit_prologue(), for IBT builds the trampoline hook is preceded 401 * with an ENDBR instruction. 402 */ 403 if (is_endbr(*(u32 *)ip)) 404 ip += ENDBR_INSN_SIZE; 405 406 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 407 } 408 409 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 410 411 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 412 { 413 u8 *prog = *pprog; 414 415 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 416 EMIT_LFENCE(); 417 EMIT2(0xFF, 0xE0 + reg); 418 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 419 OPTIMIZER_HIDE_VAR(reg); 420 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 421 } else { 422 EMIT2(0xFF, 0xE0 + reg); 423 } 424 425 *pprog = prog; 426 } 427 428 static void emit_return(u8 **pprog, u8 *ip) 429 { 430 u8 *prog = *pprog; 431 432 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 433 emit_jump(&prog, &__x86_return_thunk, ip); 434 } else { 435 EMIT1(0xC3); /* ret */ 436 if (IS_ENABLED(CONFIG_SLS)) 437 EMIT1(0xCC); /* int3 */ 438 } 439 440 *pprog = prog; 441 } 442 443 /* 444 * Generate the following code: 445 * 446 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 447 * if (index >= array->map.max_entries) 448 * goto out; 449 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 450 * goto out; 451 * prog = array->ptrs[index]; 452 * if (prog == NULL) 453 * goto out; 454 * goto *(prog->bpf_func + prologue_size); 455 * out: 456 */ 457 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 458 u32 stack_depth, u8 *ip, 459 struct jit_context *ctx) 460 { 461 int tcc_off = -4 - round_up(stack_depth, 8); 462 u8 *prog = *pprog, *start = *pprog; 463 int offset; 464 465 /* 466 * rdi - pointer to ctx 467 * rsi - pointer to bpf_array 468 * rdx - index in bpf_array 469 */ 470 471 /* 472 * if (index >= array->map.max_entries) 473 * goto out; 474 */ 475 EMIT2(0x89, 0xD2); /* mov edx, edx */ 476 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 477 offsetof(struct bpf_array, map.max_entries)); 478 479 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 480 EMIT2(X86_JBE, offset); /* jbe out */ 481 482 /* 483 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 484 * goto out; 485 */ 486 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 487 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 488 489 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 490 EMIT2(X86_JAE, offset); /* jae out */ 491 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 492 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 493 494 /* prog = array->ptrs[index]; */ 495 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 496 offsetof(struct bpf_array, ptrs)); 497 498 /* 499 * if (prog == NULL) 500 * goto out; 501 */ 502 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 503 504 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 505 EMIT2(X86_JE, offset); /* je out */ 506 507 pop_callee_regs(&prog, callee_regs_used); 508 509 EMIT1(0x58); /* pop rax */ 510 if (stack_depth) 511 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 512 round_up(stack_depth, 8)); 513 514 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 515 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 516 offsetof(struct bpf_prog, bpf_func)); 517 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 518 X86_TAIL_CALL_OFFSET); 519 /* 520 * Now we're ready to jump into next BPF program 521 * rdi == ctx (1st arg) 522 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 523 */ 524 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 525 526 /* out: */ 527 ctx->tail_call_indirect_label = prog - start; 528 *pprog = prog; 529 } 530 531 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 532 u8 **pprog, u8 *ip, 533 bool *callee_regs_used, u32 stack_depth, 534 struct jit_context *ctx) 535 { 536 int tcc_off = -4 - round_up(stack_depth, 8); 537 u8 *prog = *pprog, *start = *pprog; 538 int offset; 539 540 /* 541 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 542 * goto out; 543 */ 544 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 545 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 546 547 offset = ctx->tail_call_direct_label - (prog + 2 - start); 548 EMIT2(X86_JAE, offset); /* jae out */ 549 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 550 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 551 552 poke->tailcall_bypass = ip + (prog - start); 553 poke->adj_off = X86_TAIL_CALL_OFFSET; 554 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 555 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 556 557 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 558 poke->tailcall_bypass); 559 560 pop_callee_regs(&prog, callee_regs_used); 561 EMIT1(0x58); /* pop rax */ 562 if (stack_depth) 563 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 564 565 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 566 prog += X86_PATCH_SIZE; 567 568 /* out: */ 569 ctx->tail_call_direct_label = prog - start; 570 571 *pprog = prog; 572 } 573 574 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 575 { 576 struct bpf_jit_poke_descriptor *poke; 577 struct bpf_array *array; 578 struct bpf_prog *target; 579 int i, ret; 580 581 for (i = 0; i < prog->aux->size_poke_tab; i++) { 582 poke = &prog->aux->poke_tab[i]; 583 if (poke->aux && poke->aux != prog->aux) 584 continue; 585 586 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 587 588 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 589 continue; 590 591 array = container_of(poke->tail_call.map, struct bpf_array, map); 592 mutex_lock(&array->aux->poke_mutex); 593 target = array->ptrs[poke->tail_call.key]; 594 if (target) { 595 ret = __bpf_arch_text_poke(poke->tailcall_target, 596 BPF_MOD_JUMP, NULL, 597 (u8 *)target->bpf_func + 598 poke->adj_off); 599 BUG_ON(ret < 0); 600 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 601 BPF_MOD_JUMP, 602 (u8 *)poke->tailcall_target + 603 X86_PATCH_SIZE, NULL); 604 BUG_ON(ret < 0); 605 } 606 WRITE_ONCE(poke->tailcall_target_stable, true); 607 mutex_unlock(&array->aux->poke_mutex); 608 } 609 } 610 611 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 612 u32 dst_reg, const u32 imm32) 613 { 614 u8 *prog = *pprog; 615 u8 b1, b2, b3; 616 617 /* 618 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 619 * (which zero-extends imm32) to save 2 bytes. 620 */ 621 if (sign_propagate && (s32)imm32 < 0) { 622 /* 'mov %rax, imm32' sign extends imm32 */ 623 b1 = add_1mod(0x48, dst_reg); 624 b2 = 0xC7; 625 b3 = 0xC0; 626 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 627 goto done; 628 } 629 630 /* 631 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 632 * to save 3 bytes. 633 */ 634 if (imm32 == 0) { 635 if (is_ereg(dst_reg)) 636 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 637 b2 = 0x31; /* xor */ 638 b3 = 0xC0; 639 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 640 goto done; 641 } 642 643 /* mov %eax, imm32 */ 644 if (is_ereg(dst_reg)) 645 EMIT1(add_1mod(0x40, dst_reg)); 646 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 647 done: 648 *pprog = prog; 649 } 650 651 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 652 const u32 imm32_hi, const u32 imm32_lo) 653 { 654 u8 *prog = *pprog; 655 656 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 657 /* 658 * For emitting plain u32, where sign bit must not be 659 * propagated LLVM tends to load imm64 over mov32 660 * directly, so save couple of bytes by just doing 661 * 'mov %eax, imm32' instead. 662 */ 663 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 664 } else { 665 /* movabsq %rax, imm64 */ 666 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 667 EMIT(imm32_lo, 4); 668 EMIT(imm32_hi, 4); 669 } 670 671 *pprog = prog; 672 } 673 674 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 675 { 676 u8 *prog = *pprog; 677 678 if (is64) { 679 /* mov dst, src */ 680 EMIT_mov(dst_reg, src_reg); 681 } else { 682 /* mov32 dst, src */ 683 if (is_ereg(dst_reg) || is_ereg(src_reg)) 684 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 685 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 686 } 687 688 *pprog = prog; 689 } 690 691 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 692 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 693 { 694 u8 *prog = *pprog; 695 696 if (is_imm8(off)) { 697 /* 1-byte signed displacement. 698 * 699 * If off == 0 we could skip this and save one extra byte, but 700 * special case of x86 R13 which always needs an offset is not 701 * worth the hassle 702 */ 703 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 704 } else { 705 /* 4-byte signed displacement */ 706 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 707 } 708 *pprog = prog; 709 } 710 711 /* 712 * Emit a REX byte if it will be necessary to address these registers 713 */ 714 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 715 { 716 u8 *prog = *pprog; 717 718 if (is64) 719 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 720 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 721 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 722 *pprog = prog; 723 } 724 725 /* 726 * Similar version of maybe_emit_mod() for a single register 727 */ 728 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 729 { 730 u8 *prog = *pprog; 731 732 if (is64) 733 EMIT1(add_1mod(0x48, reg)); 734 else if (is_ereg(reg)) 735 EMIT1(add_1mod(0x40, reg)); 736 *pprog = prog; 737 } 738 739 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 740 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 741 { 742 u8 *prog = *pprog; 743 744 switch (size) { 745 case BPF_B: 746 /* Emit 'movzx rax, byte ptr [rax + off]' */ 747 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 748 break; 749 case BPF_H: 750 /* Emit 'movzx rax, word ptr [rax + off]' */ 751 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 752 break; 753 case BPF_W: 754 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 755 if (is_ereg(dst_reg) || is_ereg(src_reg)) 756 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 757 else 758 EMIT1(0x8B); 759 break; 760 case BPF_DW: 761 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 762 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 763 break; 764 } 765 emit_insn_suffix(&prog, src_reg, dst_reg, off); 766 *pprog = prog; 767 } 768 769 /* STX: *(u8*)(dst_reg + off) = src_reg */ 770 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 771 { 772 u8 *prog = *pprog; 773 774 switch (size) { 775 case BPF_B: 776 /* Emit 'mov byte ptr [rax + off], al' */ 777 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 778 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 779 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 780 else 781 EMIT1(0x88); 782 break; 783 case BPF_H: 784 if (is_ereg(dst_reg) || is_ereg(src_reg)) 785 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 786 else 787 EMIT2(0x66, 0x89); 788 break; 789 case BPF_W: 790 if (is_ereg(dst_reg) || is_ereg(src_reg)) 791 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 792 else 793 EMIT1(0x89); 794 break; 795 case BPF_DW: 796 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 797 break; 798 } 799 emit_insn_suffix(&prog, dst_reg, src_reg, off); 800 *pprog = prog; 801 } 802 803 static int emit_atomic(u8 **pprog, u8 atomic_op, 804 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 805 { 806 u8 *prog = *pprog; 807 808 EMIT1(0xF0); /* lock prefix */ 809 810 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 811 812 /* emit opcode */ 813 switch (atomic_op) { 814 case BPF_ADD: 815 case BPF_AND: 816 case BPF_OR: 817 case BPF_XOR: 818 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 819 EMIT1(simple_alu_opcodes[atomic_op]); 820 break; 821 case BPF_ADD | BPF_FETCH: 822 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 823 EMIT2(0x0F, 0xC1); 824 break; 825 case BPF_XCHG: 826 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 827 EMIT1(0x87); 828 break; 829 case BPF_CMPXCHG: 830 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 831 EMIT2(0x0F, 0xB1); 832 break; 833 default: 834 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 835 return -EFAULT; 836 } 837 838 emit_insn_suffix(&prog, dst_reg, src_reg, off); 839 840 *pprog = prog; 841 return 0; 842 } 843 844 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 845 { 846 u32 reg = x->fixup >> 8; 847 848 /* jump over faulting load and clear dest register */ 849 *(unsigned long *)((void *)regs + reg) = 0; 850 regs->ip += x->fixup & 0xff; 851 return true; 852 } 853 854 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 855 bool *regs_used, bool *tail_call_seen) 856 { 857 int i; 858 859 for (i = 1; i <= insn_cnt; i++, insn++) { 860 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 861 *tail_call_seen = true; 862 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 863 regs_used[0] = true; 864 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 865 regs_used[1] = true; 866 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 867 regs_used[2] = true; 868 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 869 regs_used[3] = true; 870 } 871 } 872 873 static void emit_nops(u8 **pprog, int len) 874 { 875 u8 *prog = *pprog; 876 int i, noplen; 877 878 while (len > 0) { 879 noplen = len; 880 881 if (noplen > ASM_NOP_MAX) 882 noplen = ASM_NOP_MAX; 883 884 for (i = 0; i < noplen; i++) 885 EMIT1(x86_nops[noplen][i]); 886 len -= noplen; 887 } 888 889 *pprog = prog; 890 } 891 892 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 893 894 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 895 int oldproglen, struct jit_context *ctx, bool jmp_padding) 896 { 897 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 898 struct bpf_insn *insn = bpf_prog->insnsi; 899 bool callee_regs_used[4] = {}; 900 int insn_cnt = bpf_prog->len; 901 bool tail_call_seen = false; 902 bool seen_exit = false; 903 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 904 int i, excnt = 0; 905 int ilen, proglen = 0; 906 u8 *prog = temp; 907 int err; 908 909 detect_reg_usage(insn, insn_cnt, callee_regs_used, 910 &tail_call_seen); 911 912 /* tail call's presence in current prog implies it is reachable */ 913 tail_call_reachable |= tail_call_seen; 914 915 emit_prologue(&prog, bpf_prog->aux->stack_depth, 916 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 917 bpf_prog->aux->func_idx != 0); 918 push_callee_regs(&prog, callee_regs_used); 919 920 ilen = prog - temp; 921 if (rw_image) 922 memcpy(rw_image + proglen, temp, ilen); 923 proglen += ilen; 924 addrs[0] = proglen; 925 prog = temp; 926 927 for (i = 1; i <= insn_cnt; i++, insn++) { 928 const s32 imm32 = insn->imm; 929 u32 dst_reg = insn->dst_reg; 930 u32 src_reg = insn->src_reg; 931 u8 b2 = 0, b3 = 0; 932 u8 *start_of_ldx; 933 s64 jmp_offset; 934 u8 jmp_cond; 935 u8 *func; 936 int nops; 937 938 switch (insn->code) { 939 /* ALU */ 940 case BPF_ALU | BPF_ADD | BPF_X: 941 case BPF_ALU | BPF_SUB | BPF_X: 942 case BPF_ALU | BPF_AND | BPF_X: 943 case BPF_ALU | BPF_OR | BPF_X: 944 case BPF_ALU | BPF_XOR | BPF_X: 945 case BPF_ALU64 | BPF_ADD | BPF_X: 946 case BPF_ALU64 | BPF_SUB | BPF_X: 947 case BPF_ALU64 | BPF_AND | BPF_X: 948 case BPF_ALU64 | BPF_OR | BPF_X: 949 case BPF_ALU64 | BPF_XOR | BPF_X: 950 maybe_emit_mod(&prog, dst_reg, src_reg, 951 BPF_CLASS(insn->code) == BPF_ALU64); 952 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 953 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 954 break; 955 956 case BPF_ALU64 | BPF_MOV | BPF_X: 957 case BPF_ALU | BPF_MOV | BPF_X: 958 emit_mov_reg(&prog, 959 BPF_CLASS(insn->code) == BPF_ALU64, 960 dst_reg, src_reg); 961 break; 962 963 /* neg dst */ 964 case BPF_ALU | BPF_NEG: 965 case BPF_ALU64 | BPF_NEG: 966 maybe_emit_1mod(&prog, dst_reg, 967 BPF_CLASS(insn->code) == BPF_ALU64); 968 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 969 break; 970 971 case BPF_ALU | BPF_ADD | BPF_K: 972 case BPF_ALU | BPF_SUB | BPF_K: 973 case BPF_ALU | BPF_AND | BPF_K: 974 case BPF_ALU | BPF_OR | BPF_K: 975 case BPF_ALU | BPF_XOR | BPF_K: 976 case BPF_ALU64 | BPF_ADD | BPF_K: 977 case BPF_ALU64 | BPF_SUB | BPF_K: 978 case BPF_ALU64 | BPF_AND | BPF_K: 979 case BPF_ALU64 | BPF_OR | BPF_K: 980 case BPF_ALU64 | BPF_XOR | BPF_K: 981 maybe_emit_1mod(&prog, dst_reg, 982 BPF_CLASS(insn->code) == BPF_ALU64); 983 984 /* 985 * b3 holds 'normal' opcode, b2 short form only valid 986 * in case dst is eax/rax. 987 */ 988 switch (BPF_OP(insn->code)) { 989 case BPF_ADD: 990 b3 = 0xC0; 991 b2 = 0x05; 992 break; 993 case BPF_SUB: 994 b3 = 0xE8; 995 b2 = 0x2D; 996 break; 997 case BPF_AND: 998 b3 = 0xE0; 999 b2 = 0x25; 1000 break; 1001 case BPF_OR: 1002 b3 = 0xC8; 1003 b2 = 0x0D; 1004 break; 1005 case BPF_XOR: 1006 b3 = 0xF0; 1007 b2 = 0x35; 1008 break; 1009 } 1010 1011 if (is_imm8(imm32)) 1012 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1013 else if (is_axreg(dst_reg)) 1014 EMIT1_off32(b2, imm32); 1015 else 1016 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1017 break; 1018 1019 case BPF_ALU64 | BPF_MOV | BPF_K: 1020 case BPF_ALU | BPF_MOV | BPF_K: 1021 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1022 dst_reg, imm32); 1023 break; 1024 1025 case BPF_LD | BPF_IMM | BPF_DW: 1026 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1027 insn++; 1028 i++; 1029 break; 1030 1031 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1032 case BPF_ALU | BPF_MOD | BPF_X: 1033 case BPF_ALU | BPF_DIV | BPF_X: 1034 case BPF_ALU | BPF_MOD | BPF_K: 1035 case BPF_ALU | BPF_DIV | BPF_K: 1036 case BPF_ALU64 | BPF_MOD | BPF_X: 1037 case BPF_ALU64 | BPF_DIV | BPF_X: 1038 case BPF_ALU64 | BPF_MOD | BPF_K: 1039 case BPF_ALU64 | BPF_DIV | BPF_K: { 1040 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1041 1042 if (dst_reg != BPF_REG_0) 1043 EMIT1(0x50); /* push rax */ 1044 if (dst_reg != BPF_REG_3) 1045 EMIT1(0x52); /* push rdx */ 1046 1047 if (BPF_SRC(insn->code) == BPF_X) { 1048 if (src_reg == BPF_REG_0 || 1049 src_reg == BPF_REG_3) { 1050 /* mov r11, src_reg */ 1051 EMIT_mov(AUX_REG, src_reg); 1052 src_reg = AUX_REG; 1053 } 1054 } else { 1055 /* mov r11, imm32 */ 1056 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1057 src_reg = AUX_REG; 1058 } 1059 1060 if (dst_reg != BPF_REG_0) 1061 /* mov rax, dst_reg */ 1062 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1063 1064 /* 1065 * xor edx, edx 1066 * equivalent to 'xor rdx, rdx', but one byte less 1067 */ 1068 EMIT2(0x31, 0xd2); 1069 1070 /* div src_reg */ 1071 maybe_emit_1mod(&prog, src_reg, is64); 1072 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1073 1074 if (BPF_OP(insn->code) == BPF_MOD && 1075 dst_reg != BPF_REG_3) 1076 /* mov dst_reg, rdx */ 1077 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1078 else if (BPF_OP(insn->code) == BPF_DIV && 1079 dst_reg != BPF_REG_0) 1080 /* mov dst_reg, rax */ 1081 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1082 1083 if (dst_reg != BPF_REG_3) 1084 EMIT1(0x5A); /* pop rdx */ 1085 if (dst_reg != BPF_REG_0) 1086 EMIT1(0x58); /* pop rax */ 1087 break; 1088 } 1089 1090 case BPF_ALU | BPF_MUL | BPF_K: 1091 case BPF_ALU64 | BPF_MUL | BPF_K: 1092 maybe_emit_mod(&prog, dst_reg, dst_reg, 1093 BPF_CLASS(insn->code) == BPF_ALU64); 1094 1095 if (is_imm8(imm32)) 1096 /* imul dst_reg, dst_reg, imm8 */ 1097 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1098 imm32); 1099 else 1100 /* imul dst_reg, dst_reg, imm32 */ 1101 EMIT2_off32(0x69, 1102 add_2reg(0xC0, dst_reg, dst_reg), 1103 imm32); 1104 break; 1105 1106 case BPF_ALU | BPF_MUL | BPF_X: 1107 case BPF_ALU64 | BPF_MUL | BPF_X: 1108 maybe_emit_mod(&prog, src_reg, dst_reg, 1109 BPF_CLASS(insn->code) == BPF_ALU64); 1110 1111 /* imul dst_reg, src_reg */ 1112 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1113 break; 1114 1115 /* Shifts */ 1116 case BPF_ALU | BPF_LSH | BPF_K: 1117 case BPF_ALU | BPF_RSH | BPF_K: 1118 case BPF_ALU | BPF_ARSH | BPF_K: 1119 case BPF_ALU64 | BPF_LSH | BPF_K: 1120 case BPF_ALU64 | BPF_RSH | BPF_K: 1121 case BPF_ALU64 | BPF_ARSH | BPF_K: 1122 maybe_emit_1mod(&prog, dst_reg, 1123 BPF_CLASS(insn->code) == BPF_ALU64); 1124 1125 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1126 if (imm32 == 1) 1127 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1128 else 1129 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1130 break; 1131 1132 case BPF_ALU | BPF_LSH | BPF_X: 1133 case BPF_ALU | BPF_RSH | BPF_X: 1134 case BPF_ALU | BPF_ARSH | BPF_X: 1135 case BPF_ALU64 | BPF_LSH | BPF_X: 1136 case BPF_ALU64 | BPF_RSH | BPF_X: 1137 case BPF_ALU64 | BPF_ARSH | BPF_X: 1138 1139 /* Check for bad case when dst_reg == rcx */ 1140 if (dst_reg == BPF_REG_4) { 1141 /* mov r11, dst_reg */ 1142 EMIT_mov(AUX_REG, dst_reg); 1143 dst_reg = AUX_REG; 1144 } 1145 1146 if (src_reg != BPF_REG_4) { /* common case */ 1147 EMIT1(0x51); /* push rcx */ 1148 1149 /* mov rcx, src_reg */ 1150 EMIT_mov(BPF_REG_4, src_reg); 1151 } 1152 1153 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1154 maybe_emit_1mod(&prog, dst_reg, 1155 BPF_CLASS(insn->code) == BPF_ALU64); 1156 1157 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1158 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1159 1160 if (src_reg != BPF_REG_4) 1161 EMIT1(0x59); /* pop rcx */ 1162 1163 if (insn->dst_reg == BPF_REG_4) 1164 /* mov dst_reg, r11 */ 1165 EMIT_mov(insn->dst_reg, AUX_REG); 1166 break; 1167 1168 case BPF_ALU | BPF_END | BPF_FROM_BE: 1169 switch (imm32) { 1170 case 16: 1171 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1172 EMIT1(0x66); 1173 if (is_ereg(dst_reg)) 1174 EMIT1(0x41); 1175 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1176 1177 /* Emit 'movzwl eax, ax' */ 1178 if (is_ereg(dst_reg)) 1179 EMIT3(0x45, 0x0F, 0xB7); 1180 else 1181 EMIT2(0x0F, 0xB7); 1182 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1183 break; 1184 case 32: 1185 /* Emit 'bswap eax' to swap lower 4 bytes */ 1186 if (is_ereg(dst_reg)) 1187 EMIT2(0x41, 0x0F); 1188 else 1189 EMIT1(0x0F); 1190 EMIT1(add_1reg(0xC8, dst_reg)); 1191 break; 1192 case 64: 1193 /* Emit 'bswap rax' to swap 8 bytes */ 1194 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1195 add_1reg(0xC8, dst_reg)); 1196 break; 1197 } 1198 break; 1199 1200 case BPF_ALU | BPF_END | BPF_FROM_LE: 1201 switch (imm32) { 1202 case 16: 1203 /* 1204 * Emit 'movzwl eax, ax' to zero extend 16-bit 1205 * into 64 bit 1206 */ 1207 if (is_ereg(dst_reg)) 1208 EMIT3(0x45, 0x0F, 0xB7); 1209 else 1210 EMIT2(0x0F, 0xB7); 1211 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1212 break; 1213 case 32: 1214 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1215 if (is_ereg(dst_reg)) 1216 EMIT1(0x45); 1217 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1218 break; 1219 case 64: 1220 /* nop */ 1221 break; 1222 } 1223 break; 1224 1225 /* speculation barrier */ 1226 case BPF_ST | BPF_NOSPEC: 1227 if (boot_cpu_has(X86_FEATURE_XMM2)) 1228 EMIT_LFENCE(); 1229 break; 1230 1231 /* ST: *(u8*)(dst_reg + off) = imm */ 1232 case BPF_ST | BPF_MEM | BPF_B: 1233 if (is_ereg(dst_reg)) 1234 EMIT2(0x41, 0xC6); 1235 else 1236 EMIT1(0xC6); 1237 goto st; 1238 case BPF_ST | BPF_MEM | BPF_H: 1239 if (is_ereg(dst_reg)) 1240 EMIT3(0x66, 0x41, 0xC7); 1241 else 1242 EMIT2(0x66, 0xC7); 1243 goto st; 1244 case BPF_ST | BPF_MEM | BPF_W: 1245 if (is_ereg(dst_reg)) 1246 EMIT2(0x41, 0xC7); 1247 else 1248 EMIT1(0xC7); 1249 goto st; 1250 case BPF_ST | BPF_MEM | BPF_DW: 1251 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1252 1253 st: if (is_imm8(insn->off)) 1254 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1255 else 1256 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1257 1258 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1259 break; 1260 1261 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1262 case BPF_STX | BPF_MEM | BPF_B: 1263 case BPF_STX | BPF_MEM | BPF_H: 1264 case BPF_STX | BPF_MEM | BPF_W: 1265 case BPF_STX | BPF_MEM | BPF_DW: 1266 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1267 break; 1268 1269 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1270 case BPF_LDX | BPF_MEM | BPF_B: 1271 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1272 case BPF_LDX | BPF_MEM | BPF_H: 1273 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1274 case BPF_LDX | BPF_MEM | BPF_W: 1275 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1276 case BPF_LDX | BPF_MEM | BPF_DW: 1277 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1278 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1279 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1280 * add abs(insn->off) to the limit to make sure that negative 1281 * offset won't be an issue. 1282 * insn->off is s16, so it won't affect valid pointers. 1283 */ 1284 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1285 u8 *end_of_jmp1, *end_of_jmp2; 1286 1287 /* Conservatively check that src_reg + insn->off is a kernel address: 1288 * 1. src_reg + insn->off >= limit 1289 * 2. src_reg + insn->off doesn't become small positive. 1290 * Cannot do src_reg + insn->off >= limit in one branch, 1291 * since it needs two spare registers, but JIT has only one. 1292 */ 1293 1294 /* movabsq r11, limit */ 1295 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1296 EMIT((u32)limit, 4); 1297 EMIT(limit >> 32, 4); 1298 /* cmp src_reg, r11 */ 1299 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1300 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1301 /* if unsigned '<' goto end_of_jmp2 */ 1302 EMIT2(X86_JB, 0); 1303 end_of_jmp1 = prog; 1304 1305 /* mov r11, src_reg */ 1306 emit_mov_reg(&prog, true, AUX_REG, src_reg); 1307 /* add r11, insn->off */ 1308 maybe_emit_1mod(&prog, AUX_REG, true); 1309 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1310 /* jmp if not carry to start_of_ldx 1311 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1312 * that has to be rejected. 1313 */ 1314 EMIT2(0x73 /* JNC */, 0); 1315 end_of_jmp2 = prog; 1316 1317 /* xor dst_reg, dst_reg */ 1318 emit_mov_imm32(&prog, false, dst_reg, 0); 1319 /* jmp byte_after_ldx */ 1320 EMIT2(0xEB, 0); 1321 1322 /* populate jmp_offset for JB above to jump to xor dst_reg */ 1323 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1324 /* populate jmp_offset for JNC above to jump to start_of_ldx */ 1325 start_of_ldx = prog; 1326 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 1327 } 1328 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1329 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1330 struct exception_table_entry *ex; 1331 u8 *_insn = image + proglen + (start_of_ldx - temp); 1332 s64 delta; 1333 1334 /* populate jmp_offset for JMP above */ 1335 start_of_ldx[-1] = prog - start_of_ldx; 1336 1337 if (!bpf_prog->aux->extable) 1338 break; 1339 1340 if (excnt >= bpf_prog->aux->num_exentries) { 1341 pr_err("ex gen bug\n"); 1342 return -EFAULT; 1343 } 1344 ex = &bpf_prog->aux->extable[excnt++]; 1345 1346 delta = _insn - (u8 *)&ex->insn; 1347 if (!is_simm32(delta)) { 1348 pr_err("extable->insn doesn't fit into 32-bit\n"); 1349 return -EFAULT; 1350 } 1351 /* switch ex to rw buffer for writes */ 1352 ex = (void *)rw_image + ((void *)ex - (void *)image); 1353 1354 ex->insn = delta; 1355 1356 ex->data = EX_TYPE_BPF; 1357 1358 if (dst_reg > BPF_REG_9) { 1359 pr_err("verifier error\n"); 1360 return -EFAULT; 1361 } 1362 /* 1363 * Compute size of x86 insn and its target dest x86 register. 1364 * ex_handler_bpf() will use lower 8 bits to adjust 1365 * pt_regs->ip to jump over this x86 instruction 1366 * and upper bits to figure out which pt_regs to zero out. 1367 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1368 * of 4 bytes will be ignored and rbx will be zero inited. 1369 */ 1370 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1371 } 1372 break; 1373 1374 case BPF_STX | BPF_ATOMIC | BPF_W: 1375 case BPF_STX | BPF_ATOMIC | BPF_DW: 1376 if (insn->imm == (BPF_AND | BPF_FETCH) || 1377 insn->imm == (BPF_OR | BPF_FETCH) || 1378 insn->imm == (BPF_XOR | BPF_FETCH)) { 1379 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1380 u32 real_src_reg = src_reg; 1381 u32 real_dst_reg = dst_reg; 1382 u8 *branch_target; 1383 1384 /* 1385 * Can't be implemented with a single x86 insn. 1386 * Need to do a CMPXCHG loop. 1387 */ 1388 1389 /* Will need RAX as a CMPXCHG operand so save R0 */ 1390 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1391 if (src_reg == BPF_REG_0) 1392 real_src_reg = BPF_REG_AX; 1393 if (dst_reg == BPF_REG_0) 1394 real_dst_reg = BPF_REG_AX; 1395 1396 branch_target = prog; 1397 /* Load old value */ 1398 emit_ldx(&prog, BPF_SIZE(insn->code), 1399 BPF_REG_0, real_dst_reg, insn->off); 1400 /* 1401 * Perform the (commutative) operation locally, 1402 * put the result in the AUX_REG. 1403 */ 1404 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1405 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1406 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1407 add_2reg(0xC0, AUX_REG, real_src_reg)); 1408 /* Attempt to swap in new value */ 1409 err = emit_atomic(&prog, BPF_CMPXCHG, 1410 real_dst_reg, AUX_REG, 1411 insn->off, 1412 BPF_SIZE(insn->code)); 1413 if (WARN_ON(err)) 1414 return err; 1415 /* 1416 * ZF tells us whether we won the race. If it's 1417 * cleared we need to try again. 1418 */ 1419 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1420 /* Return the pre-modification value */ 1421 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1422 /* Restore R0 after clobbering RAX */ 1423 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1424 break; 1425 } 1426 1427 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1428 insn->off, BPF_SIZE(insn->code)); 1429 if (err) 1430 return err; 1431 break; 1432 1433 /* call */ 1434 case BPF_JMP | BPF_CALL: 1435 func = (u8 *) __bpf_call_base + imm32; 1436 if (tail_call_reachable) { 1437 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1438 EMIT3_off32(0x48, 0x8B, 0x85, 1439 -round_up(bpf_prog->aux->stack_depth, 8) - 8); 1440 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1441 return -EINVAL; 1442 } else { 1443 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1444 return -EINVAL; 1445 } 1446 break; 1447 1448 case BPF_JMP | BPF_TAIL_CALL: 1449 if (imm32) 1450 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1451 &prog, image + addrs[i - 1], 1452 callee_regs_used, 1453 bpf_prog->aux->stack_depth, 1454 ctx); 1455 else 1456 emit_bpf_tail_call_indirect(&prog, 1457 callee_regs_used, 1458 bpf_prog->aux->stack_depth, 1459 image + addrs[i - 1], 1460 ctx); 1461 break; 1462 1463 /* cond jump */ 1464 case BPF_JMP | BPF_JEQ | BPF_X: 1465 case BPF_JMP | BPF_JNE | BPF_X: 1466 case BPF_JMP | BPF_JGT | BPF_X: 1467 case BPF_JMP | BPF_JLT | BPF_X: 1468 case BPF_JMP | BPF_JGE | BPF_X: 1469 case BPF_JMP | BPF_JLE | BPF_X: 1470 case BPF_JMP | BPF_JSGT | BPF_X: 1471 case BPF_JMP | BPF_JSLT | BPF_X: 1472 case BPF_JMP | BPF_JSGE | BPF_X: 1473 case BPF_JMP | BPF_JSLE | BPF_X: 1474 case BPF_JMP32 | BPF_JEQ | BPF_X: 1475 case BPF_JMP32 | BPF_JNE | BPF_X: 1476 case BPF_JMP32 | BPF_JGT | BPF_X: 1477 case BPF_JMP32 | BPF_JLT | BPF_X: 1478 case BPF_JMP32 | BPF_JGE | BPF_X: 1479 case BPF_JMP32 | BPF_JLE | BPF_X: 1480 case BPF_JMP32 | BPF_JSGT | BPF_X: 1481 case BPF_JMP32 | BPF_JSLT | BPF_X: 1482 case BPF_JMP32 | BPF_JSGE | BPF_X: 1483 case BPF_JMP32 | BPF_JSLE | BPF_X: 1484 /* cmp dst_reg, src_reg */ 1485 maybe_emit_mod(&prog, dst_reg, src_reg, 1486 BPF_CLASS(insn->code) == BPF_JMP); 1487 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1488 goto emit_cond_jmp; 1489 1490 case BPF_JMP | BPF_JSET | BPF_X: 1491 case BPF_JMP32 | BPF_JSET | BPF_X: 1492 /* test dst_reg, src_reg */ 1493 maybe_emit_mod(&prog, dst_reg, src_reg, 1494 BPF_CLASS(insn->code) == BPF_JMP); 1495 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1496 goto emit_cond_jmp; 1497 1498 case BPF_JMP | BPF_JSET | BPF_K: 1499 case BPF_JMP32 | BPF_JSET | BPF_K: 1500 /* test dst_reg, imm32 */ 1501 maybe_emit_1mod(&prog, dst_reg, 1502 BPF_CLASS(insn->code) == BPF_JMP); 1503 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1504 goto emit_cond_jmp; 1505 1506 case BPF_JMP | BPF_JEQ | BPF_K: 1507 case BPF_JMP | BPF_JNE | BPF_K: 1508 case BPF_JMP | BPF_JGT | BPF_K: 1509 case BPF_JMP | BPF_JLT | BPF_K: 1510 case BPF_JMP | BPF_JGE | BPF_K: 1511 case BPF_JMP | BPF_JLE | BPF_K: 1512 case BPF_JMP | BPF_JSGT | BPF_K: 1513 case BPF_JMP | BPF_JSLT | BPF_K: 1514 case BPF_JMP | BPF_JSGE | BPF_K: 1515 case BPF_JMP | BPF_JSLE | BPF_K: 1516 case BPF_JMP32 | BPF_JEQ | BPF_K: 1517 case BPF_JMP32 | BPF_JNE | BPF_K: 1518 case BPF_JMP32 | BPF_JGT | BPF_K: 1519 case BPF_JMP32 | BPF_JLT | BPF_K: 1520 case BPF_JMP32 | BPF_JGE | BPF_K: 1521 case BPF_JMP32 | BPF_JLE | BPF_K: 1522 case BPF_JMP32 | BPF_JSGT | BPF_K: 1523 case BPF_JMP32 | BPF_JSLT | BPF_K: 1524 case BPF_JMP32 | BPF_JSGE | BPF_K: 1525 case BPF_JMP32 | BPF_JSLE | BPF_K: 1526 /* test dst_reg, dst_reg to save one extra byte */ 1527 if (imm32 == 0) { 1528 maybe_emit_mod(&prog, dst_reg, dst_reg, 1529 BPF_CLASS(insn->code) == BPF_JMP); 1530 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1531 goto emit_cond_jmp; 1532 } 1533 1534 /* cmp dst_reg, imm8/32 */ 1535 maybe_emit_1mod(&prog, dst_reg, 1536 BPF_CLASS(insn->code) == BPF_JMP); 1537 1538 if (is_imm8(imm32)) 1539 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1540 else 1541 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1542 1543 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1544 switch (BPF_OP(insn->code)) { 1545 case BPF_JEQ: 1546 jmp_cond = X86_JE; 1547 break; 1548 case BPF_JSET: 1549 case BPF_JNE: 1550 jmp_cond = X86_JNE; 1551 break; 1552 case BPF_JGT: 1553 /* GT is unsigned '>', JA in x86 */ 1554 jmp_cond = X86_JA; 1555 break; 1556 case BPF_JLT: 1557 /* LT is unsigned '<', JB in x86 */ 1558 jmp_cond = X86_JB; 1559 break; 1560 case BPF_JGE: 1561 /* GE is unsigned '>=', JAE in x86 */ 1562 jmp_cond = X86_JAE; 1563 break; 1564 case BPF_JLE: 1565 /* LE is unsigned '<=', JBE in x86 */ 1566 jmp_cond = X86_JBE; 1567 break; 1568 case BPF_JSGT: 1569 /* Signed '>', GT in x86 */ 1570 jmp_cond = X86_JG; 1571 break; 1572 case BPF_JSLT: 1573 /* Signed '<', LT in x86 */ 1574 jmp_cond = X86_JL; 1575 break; 1576 case BPF_JSGE: 1577 /* Signed '>=', GE in x86 */ 1578 jmp_cond = X86_JGE; 1579 break; 1580 case BPF_JSLE: 1581 /* Signed '<=', LE in x86 */ 1582 jmp_cond = X86_JLE; 1583 break; 1584 default: /* to silence GCC warning */ 1585 return -EFAULT; 1586 } 1587 jmp_offset = addrs[i + insn->off] - addrs[i]; 1588 if (is_imm8(jmp_offset)) { 1589 if (jmp_padding) { 1590 /* To keep the jmp_offset valid, the extra bytes are 1591 * padded before the jump insn, so we subtract the 1592 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1593 * 1594 * If the previous pass already emits an imm8 1595 * jmp_cond, then this BPF insn won't shrink, so 1596 * "nops" is 0. 1597 * 1598 * On the other hand, if the previous pass emits an 1599 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1600 * keep the image from shrinking further. 1601 * 1602 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1603 * is 2 bytes, so the size difference is 4 bytes. 1604 */ 1605 nops = INSN_SZ_DIFF - 2; 1606 if (nops != 0 && nops != 4) { 1607 pr_err("unexpected jmp_cond padding: %d bytes\n", 1608 nops); 1609 return -EFAULT; 1610 } 1611 emit_nops(&prog, nops); 1612 } 1613 EMIT2(jmp_cond, jmp_offset); 1614 } else if (is_simm32(jmp_offset)) { 1615 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1616 } else { 1617 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1618 return -EFAULT; 1619 } 1620 1621 break; 1622 1623 case BPF_JMP | BPF_JA: 1624 if (insn->off == -1) 1625 /* -1 jmp instructions will always jump 1626 * backwards two bytes. Explicitly handling 1627 * this case avoids wasting too many passes 1628 * when there are long sequences of replaced 1629 * dead code. 1630 */ 1631 jmp_offset = -2; 1632 else 1633 jmp_offset = addrs[i + insn->off] - addrs[i]; 1634 1635 if (!jmp_offset) { 1636 /* 1637 * If jmp_padding is enabled, the extra nops will 1638 * be inserted. Otherwise, optimize out nop jumps. 1639 */ 1640 if (jmp_padding) { 1641 /* There are 3 possible conditions. 1642 * (1) This BPF_JA is already optimized out in 1643 * the previous run, so there is no need 1644 * to pad any extra byte (0 byte). 1645 * (2) The previous pass emits an imm8 jmp, 1646 * so we pad 2 bytes to match the previous 1647 * insn size. 1648 * (3) Similarly, the previous pass emits an 1649 * imm32 jmp, and 5 bytes is padded. 1650 */ 1651 nops = INSN_SZ_DIFF; 1652 if (nops != 0 && nops != 2 && nops != 5) { 1653 pr_err("unexpected nop jump padding: %d bytes\n", 1654 nops); 1655 return -EFAULT; 1656 } 1657 emit_nops(&prog, nops); 1658 } 1659 break; 1660 } 1661 emit_jmp: 1662 if (is_imm8(jmp_offset)) { 1663 if (jmp_padding) { 1664 /* To avoid breaking jmp_offset, the extra bytes 1665 * are padded before the actual jmp insn, so 1666 * 2 bytes is subtracted from INSN_SZ_DIFF. 1667 * 1668 * If the previous pass already emits an imm8 1669 * jmp, there is nothing to pad (0 byte). 1670 * 1671 * If it emits an imm32 jmp (5 bytes) previously 1672 * and now an imm8 jmp (2 bytes), then we pad 1673 * (5 - 2 = 3) bytes to stop the image from 1674 * shrinking further. 1675 */ 1676 nops = INSN_SZ_DIFF - 2; 1677 if (nops != 0 && nops != 3) { 1678 pr_err("unexpected jump padding: %d bytes\n", 1679 nops); 1680 return -EFAULT; 1681 } 1682 emit_nops(&prog, INSN_SZ_DIFF - 2); 1683 } 1684 EMIT2(0xEB, jmp_offset); 1685 } else if (is_simm32(jmp_offset)) { 1686 EMIT1_off32(0xE9, jmp_offset); 1687 } else { 1688 pr_err("jmp gen bug %llx\n", jmp_offset); 1689 return -EFAULT; 1690 } 1691 break; 1692 1693 case BPF_JMP | BPF_EXIT: 1694 if (seen_exit) { 1695 jmp_offset = ctx->cleanup_addr - addrs[i]; 1696 goto emit_jmp; 1697 } 1698 seen_exit = true; 1699 /* Update cleanup_addr */ 1700 ctx->cleanup_addr = proglen; 1701 pop_callee_regs(&prog, callee_regs_used); 1702 EMIT1(0xC9); /* leave */ 1703 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 1704 break; 1705 1706 default: 1707 /* 1708 * By design x86-64 JIT should support all BPF instructions. 1709 * This error will be seen if new instruction was added 1710 * to the interpreter, but not to the JIT, or if there is 1711 * junk in bpf_prog. 1712 */ 1713 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1714 return -EINVAL; 1715 } 1716 1717 ilen = prog - temp; 1718 if (ilen > BPF_MAX_INSN_SIZE) { 1719 pr_err("bpf_jit: fatal insn size error\n"); 1720 return -EFAULT; 1721 } 1722 1723 if (image) { 1724 /* 1725 * When populating the image, assert that: 1726 * 1727 * i) We do not write beyond the allocated space, and 1728 * ii) addrs[i] did not change from the prior run, in order 1729 * to validate assumptions made for computing branch 1730 * displacements. 1731 */ 1732 if (unlikely(proglen + ilen > oldproglen || 1733 proglen + ilen != addrs[i])) { 1734 pr_err("bpf_jit: fatal error\n"); 1735 return -EFAULT; 1736 } 1737 memcpy(rw_image + proglen, temp, ilen); 1738 } 1739 proglen += ilen; 1740 addrs[i] = proglen; 1741 prog = temp; 1742 } 1743 1744 if (image && excnt != bpf_prog->aux->num_exentries) { 1745 pr_err("extable is not populated\n"); 1746 return -EFAULT; 1747 } 1748 return proglen; 1749 } 1750 1751 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1752 int stack_size) 1753 { 1754 int i; 1755 /* Store function arguments to stack. 1756 * For a function that accepts two pointers the sequence will be: 1757 * mov QWORD PTR [rbp-0x10],rdi 1758 * mov QWORD PTR [rbp-0x8],rsi 1759 */ 1760 for (i = 0; i < min(nr_args, 6); i++) 1761 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1762 BPF_REG_FP, 1763 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1764 -(stack_size - i * 8)); 1765 } 1766 1767 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1768 int stack_size) 1769 { 1770 int i; 1771 1772 /* Restore function arguments from stack. 1773 * For a function that accepts two pointers the sequence will be: 1774 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1775 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1776 */ 1777 for (i = 0; i < min(nr_args, 6); i++) 1778 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1779 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1780 BPF_REG_FP, 1781 -(stack_size - i * 8)); 1782 } 1783 1784 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1785 struct bpf_tramp_link *l, int stack_size, 1786 int run_ctx_off, bool save_ret) 1787 { 1788 void (*exit)(struct bpf_prog *prog, u64 start, 1789 struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit; 1790 u64 (*enter)(struct bpf_prog *prog, 1791 struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter; 1792 u8 *prog = *pprog; 1793 u8 *jmp_insn; 1794 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 1795 struct bpf_prog *p = l->link.prog; 1796 u64 cookie = l->cookie; 1797 1798 /* mov rdi, cookie */ 1799 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 1800 1801 /* Prepare struct bpf_tramp_run_ctx. 1802 * 1803 * bpf_tramp_run_ctx is already preserved by 1804 * arch_prepare_bpf_trampoline(). 1805 * 1806 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 1807 */ 1808 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 1809 1810 if (p->aux->sleepable) { 1811 enter = __bpf_prog_enter_sleepable; 1812 exit = __bpf_prog_exit_sleepable; 1813 } else if (p->expected_attach_type == BPF_LSM_CGROUP) { 1814 enter = __bpf_prog_enter_lsm_cgroup; 1815 exit = __bpf_prog_exit_lsm_cgroup; 1816 } 1817 1818 /* arg1: mov rdi, progs[i] */ 1819 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1820 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 1821 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 1822 1823 if (emit_call(&prog, enter, prog)) 1824 return -EINVAL; 1825 /* remember prog start time returned by __bpf_prog_enter */ 1826 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1827 1828 /* if (__bpf_prog_enter*(prog) == 0) 1829 * goto skip_exec_of_prog; 1830 */ 1831 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1832 /* emit 2 nops that will be replaced with JE insn */ 1833 jmp_insn = prog; 1834 emit_nops(&prog, 2); 1835 1836 /* arg1: lea rdi, [rbp - stack_size] */ 1837 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1838 /* arg2: progs[i]->insnsi for interpreter */ 1839 if (!p->jited) 1840 emit_mov_imm64(&prog, BPF_REG_2, 1841 (long) p->insnsi >> 32, 1842 (u32) (long) p->insnsi); 1843 /* call JITed bpf program or interpreter */ 1844 if (emit_call(&prog, p->bpf_func, prog)) 1845 return -EINVAL; 1846 1847 /* 1848 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1849 * of the previous call which is then passed on the stack to 1850 * the next BPF program. 1851 * 1852 * BPF_TRAMP_FENTRY trampoline may need to return the return 1853 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1854 */ 1855 if (save_ret) 1856 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1857 1858 /* replace 2 nops with JE insn, since jmp target is known */ 1859 jmp_insn[0] = X86_JE; 1860 jmp_insn[1] = prog - jmp_insn - 2; 1861 1862 /* arg1: mov rdi, progs[i] */ 1863 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1864 /* arg2: mov rsi, rbx <- start time in nsec */ 1865 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1866 /* arg3: lea rdx, [rbp - run_ctx_off] */ 1867 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 1868 if (emit_call(&prog, exit, prog)) 1869 return -EINVAL; 1870 1871 *pprog = prog; 1872 return 0; 1873 } 1874 1875 static void emit_align(u8 **pprog, u32 align) 1876 { 1877 u8 *target, *prog = *pprog; 1878 1879 target = PTR_ALIGN(prog, align); 1880 if (target != prog) 1881 emit_nops(&prog, target - prog); 1882 1883 *pprog = prog; 1884 } 1885 1886 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1887 { 1888 u8 *prog = *pprog; 1889 s64 offset; 1890 1891 offset = func - (ip + 2 + 4); 1892 if (!is_simm32(offset)) { 1893 pr_err("Target %p is out of range\n", func); 1894 return -EINVAL; 1895 } 1896 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 1897 *pprog = prog; 1898 return 0; 1899 } 1900 1901 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1902 struct bpf_tramp_links *tl, int stack_size, 1903 int run_ctx_off, bool save_ret) 1904 { 1905 int i; 1906 u8 *prog = *pprog; 1907 1908 for (i = 0; i < tl->nr_links; i++) { 1909 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 1910 run_ctx_off, save_ret)) 1911 return -EINVAL; 1912 } 1913 *pprog = prog; 1914 return 0; 1915 } 1916 1917 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1918 struct bpf_tramp_links *tl, int stack_size, 1919 int run_ctx_off, u8 **branches) 1920 { 1921 u8 *prog = *pprog; 1922 int i; 1923 1924 /* The first fmod_ret program will receive a garbage return value. 1925 * Set this to 0 to avoid confusing the program. 1926 */ 1927 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1928 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1929 for (i = 0; i < tl->nr_links; i++) { 1930 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 1931 return -EINVAL; 1932 1933 /* mod_ret prog stored return value into [rbp - 8]. Emit: 1934 * if (*(u64 *)(rbp - 8) != 0) 1935 * goto do_fexit; 1936 */ 1937 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 1938 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1939 1940 /* Save the location of the branch and Generate 6 nops 1941 * (4 bytes for an offset and 2 bytes for the jump) These nops 1942 * are replaced with a conditional jump once do_fexit (i.e. the 1943 * start of the fexit invocation) is finalized. 1944 */ 1945 branches[i] = prog; 1946 emit_nops(&prog, 4 + 2); 1947 } 1948 1949 *pprog = prog; 1950 return 0; 1951 } 1952 1953 /* Example: 1954 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1955 * its 'struct btf_func_model' will be nr_args=2 1956 * The assembly code when eth_type_trans is executing after trampoline: 1957 * 1958 * push rbp 1959 * mov rbp, rsp 1960 * sub rsp, 16 // space for skb and dev 1961 * push rbx // temp regs to pass start time 1962 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1963 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1964 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1965 * mov rbx, rax // remember start time in bpf stats are enabled 1966 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1967 * call addr_of_jited_FENTRY_prog 1968 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1969 * mov rsi, rbx // prog start time 1970 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1971 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1972 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1973 * pop rbx 1974 * leave 1975 * ret 1976 * 1977 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1978 * replaced with 'call generated_bpf_trampoline'. When it returns 1979 * eth_type_trans will continue executing with original skb and dev pointers. 1980 * 1981 * The assembly code when eth_type_trans is called from trampoline: 1982 * 1983 * push rbp 1984 * mov rbp, rsp 1985 * sub rsp, 24 // space for skb, dev, return value 1986 * push rbx // temp regs to pass start time 1987 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1988 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1989 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1990 * mov rbx, rax // remember start time if bpf stats are enabled 1991 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1992 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1993 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1994 * mov rsi, rbx // prog start time 1995 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1996 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1997 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1998 * call eth_type_trans+5 // execute body of eth_type_trans 1999 * mov qword ptr [rbp - 8], rax // save return value 2000 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2001 * mov rbx, rax // remember start time in bpf stats are enabled 2002 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2003 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2004 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2005 * mov rsi, rbx // prog start time 2006 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2007 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2008 * pop rbx 2009 * leave 2010 * add rsp, 8 // skip eth_type_trans's frame 2011 * ret // return to its caller 2012 */ 2013 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 2014 const struct btf_func_model *m, u32 flags, 2015 struct bpf_tramp_links *tlinks, 2016 void *orig_call) 2017 { 2018 int ret, i, nr_args = m->nr_args; 2019 int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; 2020 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2021 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2022 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2023 u8 **branches = NULL; 2024 u8 *prog; 2025 bool save_ret; 2026 2027 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 2028 if (nr_args > 6) 2029 return -ENOTSUPP; 2030 2031 /* Generated trampoline stack layout: 2032 * 2033 * RBP + 8 [ return address ] 2034 * RBP + 0 [ RBP ] 2035 * 2036 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2037 * BPF_TRAMP_F_RET_FENTRY_RET flags 2038 * 2039 * [ reg_argN ] always 2040 * [ ... ] 2041 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2042 * 2043 * RBP - args_off [ args count ] always 2044 * 2045 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2046 * 2047 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2048 */ 2049 2050 /* room for return value of orig_call or fentry prog */ 2051 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2052 if (save_ret) 2053 stack_size += 8; 2054 2055 regs_off = stack_size; 2056 2057 /* args count */ 2058 stack_size += 8; 2059 args_off = stack_size; 2060 2061 if (flags & BPF_TRAMP_F_IP_ARG) 2062 stack_size += 8; /* room for IP address argument */ 2063 2064 ip_off = stack_size; 2065 2066 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2067 run_ctx_off = stack_size; 2068 2069 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2070 /* skip patched call instruction and point orig_call to actual 2071 * body of the kernel function. 2072 */ 2073 if (is_endbr(*(u32 *)orig_call)) 2074 orig_call += ENDBR_INSN_SIZE; 2075 orig_call += X86_PATCH_SIZE; 2076 } 2077 2078 prog = image; 2079 2080 EMIT_ENDBR(); 2081 EMIT1(0x55); /* push rbp */ 2082 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2083 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2084 EMIT1(0x53); /* push rbx */ 2085 2086 /* Store number of arguments of the traced function: 2087 * mov rax, nr_args 2088 * mov QWORD PTR [rbp - args_off], rax 2089 */ 2090 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args); 2091 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2092 2093 if (flags & BPF_TRAMP_F_IP_ARG) { 2094 /* Store IP address of the traced function: 2095 * mov rax, QWORD PTR [rbp + 8] 2096 * sub rax, X86_PATCH_SIZE 2097 * mov QWORD PTR [rbp - ip_off], rax 2098 */ 2099 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2100 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 2101 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2102 } 2103 2104 save_regs(m, &prog, nr_args, regs_off); 2105 2106 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2107 /* arg1: mov rdi, im */ 2108 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2109 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2110 ret = -EINVAL; 2111 goto cleanup; 2112 } 2113 } 2114 2115 if (fentry->nr_links) 2116 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2117 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2118 return -EINVAL; 2119 2120 if (fmod_ret->nr_links) { 2121 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2122 GFP_KERNEL); 2123 if (!branches) 2124 return -ENOMEM; 2125 2126 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2127 run_ctx_off, branches)) { 2128 ret = -EINVAL; 2129 goto cleanup; 2130 } 2131 } 2132 2133 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2134 restore_regs(m, &prog, nr_args, regs_off); 2135 2136 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2137 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2138 EMIT2(0xff, 0xd0); /* call *rax */ 2139 } else { 2140 /* call original function */ 2141 if (emit_call(&prog, orig_call, prog)) { 2142 ret = -EINVAL; 2143 goto cleanup; 2144 } 2145 } 2146 /* remember return value in a stack for bpf prog to access */ 2147 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2148 im->ip_after_call = prog; 2149 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2150 prog += X86_PATCH_SIZE; 2151 } 2152 2153 if (fmod_ret->nr_links) { 2154 /* From Intel 64 and IA-32 Architectures Optimization 2155 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2156 * Coding Rule 11: All branch targets should be 16-byte 2157 * aligned. 2158 */ 2159 emit_align(&prog, 16); 2160 /* Update the branches saved in invoke_bpf_mod_ret with the 2161 * aligned address of do_fexit. 2162 */ 2163 for (i = 0; i < fmod_ret->nr_links; i++) 2164 emit_cond_near_jump(&branches[i], prog, branches[i], 2165 X86_JNE); 2166 } 2167 2168 if (fexit->nr_links) 2169 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2170 ret = -EINVAL; 2171 goto cleanup; 2172 } 2173 2174 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2175 restore_regs(m, &prog, nr_args, regs_off); 2176 2177 /* This needs to be done regardless. If there were fmod_ret programs, 2178 * the return value is only updated on the stack and still needs to be 2179 * restored to R0. 2180 */ 2181 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2182 im->ip_epilogue = prog; 2183 /* arg1: mov rdi, im */ 2184 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2185 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2186 ret = -EINVAL; 2187 goto cleanup; 2188 } 2189 } 2190 /* restore return value of orig_call or fentry prog back into RAX */ 2191 if (save_ret) 2192 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2193 2194 EMIT1(0x5B); /* pop rbx */ 2195 EMIT1(0xC9); /* leave */ 2196 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2197 /* skip our return address and return to parent */ 2198 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2199 emit_return(&prog, prog); 2200 /* Make sure the trampoline generation logic doesn't overflow */ 2201 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2202 ret = -EFAULT; 2203 goto cleanup; 2204 } 2205 ret = prog - (u8 *)image; 2206 2207 cleanup: 2208 kfree(branches); 2209 return ret; 2210 } 2211 2212 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 2213 { 2214 u8 *jg_reloc, *prog = *pprog; 2215 int pivot, err, jg_bytes = 1; 2216 s64 jg_offset; 2217 2218 if (a == b) { 2219 /* Leaf node of recursion, i.e. not a range of indices 2220 * anymore. 2221 */ 2222 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2223 if (!is_simm32(progs[a])) 2224 return -1; 2225 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2226 progs[a]); 2227 err = emit_cond_near_jump(&prog, /* je func */ 2228 (void *)progs[a], prog, 2229 X86_JE); 2230 if (err) 2231 return err; 2232 2233 emit_indirect_jump(&prog, 2 /* rdx */, prog); 2234 2235 *pprog = prog; 2236 return 0; 2237 } 2238 2239 /* Not a leaf node, so we pivot, and recursively descend into 2240 * the lower and upper ranges. 2241 */ 2242 pivot = (b - a) / 2; 2243 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2244 if (!is_simm32(progs[a + pivot])) 2245 return -1; 2246 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2247 2248 if (pivot > 2) { /* jg upper_part */ 2249 /* Require near jump. */ 2250 jg_bytes = 4; 2251 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2252 } else { 2253 EMIT2(X86_JG, 0); 2254 } 2255 jg_reloc = prog; 2256 2257 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2258 progs); 2259 if (err) 2260 return err; 2261 2262 /* From Intel 64 and IA-32 Architectures Optimization 2263 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2264 * Coding Rule 11: All branch targets should be 16-byte 2265 * aligned. 2266 */ 2267 emit_align(&prog, 16); 2268 jg_offset = prog - jg_reloc; 2269 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2270 2271 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2272 b, progs); 2273 if (err) 2274 return err; 2275 2276 *pprog = prog; 2277 return 0; 2278 } 2279 2280 static int cmp_ips(const void *a, const void *b) 2281 { 2282 const s64 *ipa = a; 2283 const s64 *ipb = b; 2284 2285 if (*ipa > *ipb) 2286 return 1; 2287 if (*ipa < *ipb) 2288 return -1; 2289 return 0; 2290 } 2291 2292 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 2293 { 2294 u8 *prog = image; 2295 2296 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2297 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 2298 } 2299 2300 struct x64_jit_data { 2301 struct bpf_binary_header *rw_header; 2302 struct bpf_binary_header *header; 2303 int *addrs; 2304 u8 *image; 2305 int proglen; 2306 struct jit_context ctx; 2307 }; 2308 2309 #define MAX_PASSES 20 2310 #define PADDING_PASSES (MAX_PASSES - 5) 2311 2312 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2313 { 2314 struct bpf_binary_header *rw_header = NULL; 2315 struct bpf_binary_header *header = NULL; 2316 struct bpf_prog *tmp, *orig_prog = prog; 2317 struct x64_jit_data *jit_data; 2318 int proglen, oldproglen = 0; 2319 struct jit_context ctx = {}; 2320 bool tmp_blinded = false; 2321 bool extra_pass = false; 2322 bool padding = false; 2323 u8 *rw_image = NULL; 2324 u8 *image = NULL; 2325 int *addrs; 2326 int pass; 2327 int i; 2328 2329 if (!prog->jit_requested) 2330 return orig_prog; 2331 2332 tmp = bpf_jit_blind_constants(prog); 2333 /* 2334 * If blinding was requested and we failed during blinding, 2335 * we must fall back to the interpreter. 2336 */ 2337 if (IS_ERR(tmp)) 2338 return orig_prog; 2339 if (tmp != prog) { 2340 tmp_blinded = true; 2341 prog = tmp; 2342 } 2343 2344 jit_data = prog->aux->jit_data; 2345 if (!jit_data) { 2346 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2347 if (!jit_data) { 2348 prog = orig_prog; 2349 goto out; 2350 } 2351 prog->aux->jit_data = jit_data; 2352 } 2353 addrs = jit_data->addrs; 2354 if (addrs) { 2355 ctx = jit_data->ctx; 2356 oldproglen = jit_data->proglen; 2357 image = jit_data->image; 2358 header = jit_data->header; 2359 rw_header = jit_data->rw_header; 2360 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2361 extra_pass = true; 2362 padding = true; 2363 goto skip_init_addrs; 2364 } 2365 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2366 if (!addrs) { 2367 prog = orig_prog; 2368 goto out_addrs; 2369 } 2370 2371 /* 2372 * Before first pass, make a rough estimation of addrs[] 2373 * each BPF instruction is translated to less than 64 bytes 2374 */ 2375 for (proglen = 0, i = 0; i <= prog->len; i++) { 2376 proglen += 64; 2377 addrs[i] = proglen; 2378 } 2379 ctx.cleanup_addr = proglen; 2380 skip_init_addrs: 2381 2382 /* 2383 * JITed image shrinks with every pass and the loop iterates 2384 * until the image stops shrinking. Very large BPF programs 2385 * may converge on the last pass. In such case do one more 2386 * pass to emit the final image. 2387 */ 2388 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2389 if (!padding && pass >= PADDING_PASSES) 2390 padding = true; 2391 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2392 if (proglen <= 0) { 2393 out_image: 2394 image = NULL; 2395 if (header) { 2396 bpf_arch_text_copy(&header->size, &rw_header->size, 2397 sizeof(rw_header->size)); 2398 bpf_jit_binary_pack_free(header, rw_header); 2399 } 2400 /* Fall back to interpreter mode */ 2401 prog = orig_prog; 2402 if (extra_pass) { 2403 prog->bpf_func = NULL; 2404 prog->jited = 0; 2405 prog->jited_len = 0; 2406 } 2407 goto out_addrs; 2408 } 2409 if (image) { 2410 if (proglen != oldproglen) { 2411 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2412 proglen, oldproglen); 2413 goto out_image; 2414 } 2415 break; 2416 } 2417 if (proglen == oldproglen) { 2418 /* 2419 * The number of entries in extable is the number of BPF_LDX 2420 * insns that access kernel memory via "pointer to BTF type". 2421 * The verifier changed their opcode from LDX|MEM|size 2422 * to LDX|PROBE_MEM|size to make JITing easier. 2423 */ 2424 u32 align = __alignof__(struct exception_table_entry); 2425 u32 extable_size = prog->aux->num_exentries * 2426 sizeof(struct exception_table_entry); 2427 2428 /* allocate module memory for x86 insns and extable */ 2429 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2430 &image, align, &rw_header, &rw_image, 2431 jit_fill_hole); 2432 if (!header) { 2433 prog = orig_prog; 2434 goto out_addrs; 2435 } 2436 prog->aux->extable = (void *) image + roundup(proglen, align); 2437 } 2438 oldproglen = proglen; 2439 cond_resched(); 2440 } 2441 2442 if (bpf_jit_enable > 1) 2443 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2444 2445 if (image) { 2446 if (!prog->is_func || extra_pass) { 2447 /* 2448 * bpf_jit_binary_pack_finalize fails in two scenarios: 2449 * 1) header is not pointing to proper module memory; 2450 * 2) the arch doesn't support bpf_arch_text_copy(). 2451 * 2452 * Both cases are serious bugs and justify WARN_ON. 2453 */ 2454 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2455 /* header has been freed */ 2456 header = NULL; 2457 goto out_image; 2458 } 2459 2460 bpf_tail_call_direct_fixup(prog); 2461 } else { 2462 jit_data->addrs = addrs; 2463 jit_data->ctx = ctx; 2464 jit_data->proglen = proglen; 2465 jit_data->image = image; 2466 jit_data->header = header; 2467 jit_data->rw_header = rw_header; 2468 } 2469 prog->bpf_func = (void *)image; 2470 prog->jited = 1; 2471 prog->jited_len = proglen; 2472 } else { 2473 prog = orig_prog; 2474 } 2475 2476 if (!image || !prog->is_func || extra_pass) { 2477 if (image) 2478 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2479 out_addrs: 2480 kvfree(addrs); 2481 kfree(jit_data); 2482 prog->aux->jit_data = NULL; 2483 } 2484 out: 2485 if (tmp_blinded) 2486 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2487 tmp : orig_prog); 2488 return prog; 2489 } 2490 2491 bool bpf_jit_supports_kfunc_call(void) 2492 { 2493 return true; 2494 } 2495 2496 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2497 { 2498 if (text_poke_copy(dst, src, len) == NULL) 2499 return ERR_PTR(-EINVAL); 2500 return dst; 2501 } 2502 2503 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 2504 bool bpf_jit_supports_subprog_tailcalls(void) 2505 { 2506 return true; 2507 } 2508 2509 void bpf_jit_free(struct bpf_prog *prog) 2510 { 2511 if (prog->jited) { 2512 struct x64_jit_data *jit_data = prog->aux->jit_data; 2513 struct bpf_binary_header *hdr; 2514 2515 /* 2516 * If we fail the final pass of JIT (from jit_subprogs), 2517 * the program may not be finalized yet. Call finalize here 2518 * before freeing it. 2519 */ 2520 if (jit_data) { 2521 bpf_jit_binary_pack_finalize(prog, jit_data->header, 2522 jit_data->rw_header); 2523 kvfree(jit_data->addrs); 2524 kfree(jit_data); 2525 } 2526 hdr = bpf_jit_binary_pack_hdr(prog); 2527 bpf_jit_binary_pack_free(hdr, NULL); 2528 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2529 } 2530 2531 bpf_prog_unlock_free(prog); 2532 } 2533