1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/ftrace.h> 16 #include <asm/set_memory.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/text-patching.h> 19 20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 21 { 22 if (len == 1) 23 *ptr = bytes; 24 else if (len == 2) 25 *(u16 *)ptr = bytes; 26 else { 27 *(u32 *)ptr = bytes; 28 barrier(); 29 } 30 return ptr + len; 31 } 32 33 #define EMIT(bytes, len) \ 34 do { prog = emit_code(prog, bytes, len); } while (0) 35 36 #define EMIT1(b1) EMIT(b1, 1) 37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 40 41 #define EMIT1_off32(b1, off) \ 42 do { EMIT1(b1); EMIT(off, 4); } while (0) 43 #define EMIT2_off32(b1, b2, off) \ 44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 45 #define EMIT3_off32(b1, b2, b3, off) \ 46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 47 #define EMIT4_off32(b1, b2, b3, b4, off) \ 48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 49 50 #ifdef CONFIG_X86_KERNEL_IBT 51 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 52 #else 53 #define EMIT_ENDBR() 54 #endif 55 56 static bool is_imm8(int value) 57 { 58 return value <= 127 && value >= -128; 59 } 60 61 /* 62 * Let us limit the positive offset to be <= 123. 63 * This is to ensure eventual jit convergence For the following patterns: 64 * ... 65 * pass4, final_proglen=4391: 66 * ... 67 * 20e: 48 85 ff test rdi,rdi 68 * 211: 74 7d je 0x290 69 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] 70 * ... 71 * 289: 48 85 ff test rdi,rdi 72 * 28c: 74 17 je 0x2a5 73 * 28e: e9 7f ff ff ff jmp 0x212 74 * 293: bf 03 00 00 00 mov edi,0x3 75 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) 76 * and insn at 0x28e is 5-byte jmp insn with offset -129. 77 * 78 * pass5, final_proglen=4392: 79 * ... 80 * 20e: 48 85 ff test rdi,rdi 81 * 211: 0f 84 80 00 00 00 je 0x297 82 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] 83 * ... 84 * 28d: 48 85 ff test rdi,rdi 85 * 290: 74 1a je 0x2ac 86 * 292: eb 84 jmp 0x218 87 * 294: bf 03 00 00 00 mov edi,0x3 88 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset 89 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). 90 * At the same time, insn at 0x292 is a 2-byte insn since its offset is 91 * -124. 92 * 93 * pass6 will repeat the same code as in pass4 and this will prevent 94 * eventual convergence. 95 * 96 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) 97 * cycle in the above. In the above example je offset <= 0x7c should work. 98 * 99 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence 100 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should 101 * avoid no convergence issue. 102 * 103 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn 104 * to maximum 123 (0x7b). This way, the jit pass can eventually converge. 105 */ 106 static bool is_imm8_jmp_offset(int value) 107 { 108 return value <= 123 && value >= -128; 109 } 110 111 static bool is_simm32(s64 value) 112 { 113 return value == (s64)(s32)value; 114 } 115 116 static bool is_uimm32(u64 value) 117 { 118 return value == (u64)(u32)value; 119 } 120 121 /* mov dst, src */ 122 #define EMIT_mov(DST, SRC) \ 123 do { \ 124 if (DST != SRC) \ 125 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 126 } while (0) 127 128 static int bpf_size_to_x86_bytes(int bpf_size) 129 { 130 if (bpf_size == BPF_W) 131 return 4; 132 else if (bpf_size == BPF_H) 133 return 2; 134 else if (bpf_size == BPF_B) 135 return 1; 136 else if (bpf_size == BPF_DW) 137 return 4; /* imm32 */ 138 else 139 return 0; 140 } 141 142 /* 143 * List of x86 cond jumps opcodes (. + s8) 144 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 145 */ 146 #define X86_JB 0x72 147 #define X86_JAE 0x73 148 #define X86_JE 0x74 149 #define X86_JNE 0x75 150 #define X86_JBE 0x76 151 #define X86_JA 0x77 152 #define X86_JL 0x7C 153 #define X86_JGE 0x7D 154 #define X86_JLE 0x7E 155 #define X86_JG 0x7F 156 157 /* Pick a register outside of BPF range for JIT internal work */ 158 #define AUX_REG (MAX_BPF_JIT_REG + 1) 159 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 160 161 /* 162 * The following table maps BPF registers to x86-64 registers. 163 * 164 * x86-64 register R12 is unused, since if used as base address 165 * register in load/store instructions, it always needs an 166 * extra byte of encoding and is callee saved. 167 * 168 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 169 * trampoline. x86-64 register R10 is used for blinding (if enabled). 170 */ 171 static const int reg2hex[] = { 172 [BPF_REG_0] = 0, /* RAX */ 173 [BPF_REG_1] = 7, /* RDI */ 174 [BPF_REG_2] = 6, /* RSI */ 175 [BPF_REG_3] = 2, /* RDX */ 176 [BPF_REG_4] = 1, /* RCX */ 177 [BPF_REG_5] = 0, /* R8 */ 178 [BPF_REG_6] = 3, /* RBX callee saved */ 179 [BPF_REG_7] = 5, /* R13 callee saved */ 180 [BPF_REG_8] = 6, /* R14 callee saved */ 181 [BPF_REG_9] = 7, /* R15 callee saved */ 182 [BPF_REG_FP] = 5, /* RBP readonly */ 183 [BPF_REG_AX] = 2, /* R10 temp register */ 184 [AUX_REG] = 3, /* R11 temp register */ 185 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 186 }; 187 188 static const int reg2pt_regs[] = { 189 [BPF_REG_0] = offsetof(struct pt_regs, ax), 190 [BPF_REG_1] = offsetof(struct pt_regs, di), 191 [BPF_REG_2] = offsetof(struct pt_regs, si), 192 [BPF_REG_3] = offsetof(struct pt_regs, dx), 193 [BPF_REG_4] = offsetof(struct pt_regs, cx), 194 [BPF_REG_5] = offsetof(struct pt_regs, r8), 195 [BPF_REG_6] = offsetof(struct pt_regs, bx), 196 [BPF_REG_7] = offsetof(struct pt_regs, r13), 197 [BPF_REG_8] = offsetof(struct pt_regs, r14), 198 [BPF_REG_9] = offsetof(struct pt_regs, r15), 199 }; 200 201 /* 202 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 203 * which need extra byte of encoding. 204 * rax,rcx,...,rbp have simpler encoding 205 */ 206 static bool is_ereg(u32 reg) 207 { 208 return (1 << reg) & (BIT(BPF_REG_5) | 209 BIT(AUX_REG) | 210 BIT(BPF_REG_7) | 211 BIT(BPF_REG_8) | 212 BIT(BPF_REG_9) | 213 BIT(X86_REG_R9) | 214 BIT(BPF_REG_AX)); 215 } 216 217 /* 218 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 219 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 220 * of encoding. al,cl,dl,bl have simpler encoding. 221 */ 222 static bool is_ereg_8l(u32 reg) 223 { 224 return is_ereg(reg) || 225 (1 << reg) & (BIT(BPF_REG_1) | 226 BIT(BPF_REG_2) | 227 BIT(BPF_REG_FP)); 228 } 229 230 static bool is_axreg(u32 reg) 231 { 232 return reg == BPF_REG_0; 233 } 234 235 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 236 static u8 add_1mod(u8 byte, u32 reg) 237 { 238 if (is_ereg(reg)) 239 byte |= 1; 240 return byte; 241 } 242 243 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 244 { 245 if (is_ereg(r1)) 246 byte |= 1; 247 if (is_ereg(r2)) 248 byte |= 4; 249 return byte; 250 } 251 252 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 253 static u8 add_1reg(u8 byte, u32 dst_reg) 254 { 255 return byte + reg2hex[dst_reg]; 256 } 257 258 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 259 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 260 { 261 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 262 } 263 264 /* Some 1-byte opcodes for binary ALU operations */ 265 static u8 simple_alu_opcodes[] = { 266 [BPF_ADD] = 0x01, 267 [BPF_SUB] = 0x29, 268 [BPF_AND] = 0x21, 269 [BPF_OR] = 0x09, 270 [BPF_XOR] = 0x31, 271 [BPF_LSH] = 0xE0, 272 [BPF_RSH] = 0xE8, 273 [BPF_ARSH] = 0xF8, 274 }; 275 276 static void jit_fill_hole(void *area, unsigned int size) 277 { 278 /* Fill whole space with INT3 instructions */ 279 memset(area, 0xcc, size); 280 } 281 282 int bpf_arch_text_invalidate(void *dst, size_t len) 283 { 284 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 285 } 286 287 struct jit_context { 288 int cleanup_addr; /* Epilogue code offset */ 289 290 /* 291 * Program specific offsets of labels in the code; these rely on the 292 * JIT doing at least 2 passes, recording the position on the first 293 * pass, only to generate the correct offset on the second pass. 294 */ 295 int tail_call_direct_label; 296 int tail_call_indirect_label; 297 }; 298 299 /* Maximum number of bytes emitted while JITing one eBPF insn */ 300 #define BPF_MAX_INSN_SIZE 128 301 #define BPF_INSN_SAFETY 64 302 303 /* Number of bytes emit_patch() needs to generate instructions */ 304 #define X86_PATCH_SIZE 5 305 /* Number of bytes that will be skipped on tailcall */ 306 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 307 308 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 309 { 310 u8 *prog = *pprog; 311 312 if (callee_regs_used[0]) 313 EMIT1(0x53); /* push rbx */ 314 if (callee_regs_used[1]) 315 EMIT2(0x41, 0x55); /* push r13 */ 316 if (callee_regs_used[2]) 317 EMIT2(0x41, 0x56); /* push r14 */ 318 if (callee_regs_used[3]) 319 EMIT2(0x41, 0x57); /* push r15 */ 320 *pprog = prog; 321 } 322 323 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 324 { 325 u8 *prog = *pprog; 326 327 if (callee_regs_used[3]) 328 EMIT2(0x41, 0x5F); /* pop r15 */ 329 if (callee_regs_used[2]) 330 EMIT2(0x41, 0x5E); /* pop r14 */ 331 if (callee_regs_used[1]) 332 EMIT2(0x41, 0x5D); /* pop r13 */ 333 if (callee_regs_used[0]) 334 EMIT1(0x5B); /* pop rbx */ 335 *pprog = prog; 336 } 337 338 /* 339 * Emit x86-64 prologue code for BPF program. 340 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 341 * while jumping to another program 342 */ 343 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 344 bool tail_call_reachable, bool is_subprog) 345 { 346 u8 *prog = *pprog; 347 348 /* BPF trampoline can be made to work without these nops, 349 * but let's waste 5 bytes for now and optimize later 350 */ 351 EMIT_ENDBR(); 352 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 353 prog += X86_PATCH_SIZE; 354 if (!ebpf_from_cbpf) { 355 if (tail_call_reachable && !is_subprog) 356 EMIT2(0x31, 0xC0); /* xor eax, eax */ 357 else 358 EMIT2(0x66, 0x90); /* nop2 */ 359 } 360 EMIT1(0x55); /* push rbp */ 361 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 362 363 /* X86_TAIL_CALL_OFFSET is here */ 364 EMIT_ENDBR(); 365 366 /* sub rsp, rounded_stack_depth */ 367 if (stack_depth) 368 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 369 if (tail_call_reachable) 370 EMIT1(0x50); /* push rax */ 371 *pprog = prog; 372 } 373 374 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 375 { 376 u8 *prog = *pprog; 377 s64 offset; 378 379 offset = func - (ip + X86_PATCH_SIZE); 380 if (!is_simm32(offset)) { 381 pr_err("Target call %p is out of range\n", func); 382 return -ERANGE; 383 } 384 EMIT1_off32(opcode, offset); 385 *pprog = prog; 386 return 0; 387 } 388 389 static int emit_call(u8 **pprog, void *func, void *ip) 390 { 391 return emit_patch(pprog, func, ip, 0xE8); 392 } 393 394 static int emit_rsb_call(u8 **pprog, void *func, void *ip) 395 { 396 OPTIMIZER_HIDE_VAR(func); 397 ip += x86_call_depth_emit_accounting(pprog, func); 398 return emit_patch(pprog, func, ip, 0xE8); 399 } 400 401 static int emit_jump(u8 **pprog, void *func, void *ip) 402 { 403 return emit_patch(pprog, func, ip, 0xE9); 404 } 405 406 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 407 void *old_addr, void *new_addr) 408 { 409 const u8 *nop_insn = x86_nops[5]; 410 u8 old_insn[X86_PATCH_SIZE]; 411 u8 new_insn[X86_PATCH_SIZE]; 412 u8 *prog; 413 int ret; 414 415 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 416 if (old_addr) { 417 prog = old_insn; 418 ret = t == BPF_MOD_CALL ? 419 emit_call(&prog, old_addr, ip) : 420 emit_jump(&prog, old_addr, ip); 421 if (ret) 422 return ret; 423 } 424 425 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 426 if (new_addr) { 427 prog = new_insn; 428 ret = t == BPF_MOD_CALL ? 429 emit_call(&prog, new_addr, ip) : 430 emit_jump(&prog, new_addr, ip); 431 if (ret) 432 return ret; 433 } 434 435 ret = -EBUSY; 436 mutex_lock(&text_mutex); 437 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 438 goto out; 439 ret = 1; 440 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 441 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 442 ret = 0; 443 } 444 out: 445 mutex_unlock(&text_mutex); 446 return ret; 447 } 448 449 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 450 void *old_addr, void *new_addr) 451 { 452 if (!is_kernel_text((long)ip) && 453 !is_bpf_text_address((long)ip)) 454 /* BPF poking in modules is not supported */ 455 return -EINVAL; 456 457 /* 458 * See emit_prologue(), for IBT builds the trampoline hook is preceded 459 * with an ENDBR instruction. 460 */ 461 if (is_endbr(*(u32 *)ip)) 462 ip += ENDBR_INSN_SIZE; 463 464 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 465 } 466 467 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 468 469 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 470 { 471 u8 *prog = *pprog; 472 473 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 474 EMIT_LFENCE(); 475 EMIT2(0xFF, 0xE0 + reg); 476 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 477 OPTIMIZER_HIDE_VAR(reg); 478 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 479 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); 480 else 481 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 482 } else { 483 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 484 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 485 EMIT1(0xCC); /* int3 */ 486 } 487 488 *pprog = prog; 489 } 490 491 static void emit_return(u8 **pprog, u8 *ip) 492 { 493 u8 *prog = *pprog; 494 495 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 496 emit_jump(&prog, x86_return_thunk, ip); 497 } else { 498 EMIT1(0xC3); /* ret */ 499 if (IS_ENABLED(CONFIG_SLS)) 500 EMIT1(0xCC); /* int3 */ 501 } 502 503 *pprog = prog; 504 } 505 506 /* 507 * Generate the following code: 508 * 509 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 510 * if (index >= array->map.max_entries) 511 * goto out; 512 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 513 * goto out; 514 * prog = array->ptrs[index]; 515 * if (prog == NULL) 516 * goto out; 517 * goto *(prog->bpf_func + prologue_size); 518 * out: 519 */ 520 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 521 u32 stack_depth, u8 *ip, 522 struct jit_context *ctx) 523 { 524 int tcc_off = -4 - round_up(stack_depth, 8); 525 u8 *prog = *pprog, *start = *pprog; 526 int offset; 527 528 /* 529 * rdi - pointer to ctx 530 * rsi - pointer to bpf_array 531 * rdx - index in bpf_array 532 */ 533 534 /* 535 * if (index >= array->map.max_entries) 536 * goto out; 537 */ 538 EMIT2(0x89, 0xD2); /* mov edx, edx */ 539 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 540 offsetof(struct bpf_array, map.max_entries)); 541 542 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 543 EMIT2(X86_JBE, offset); /* jbe out */ 544 545 /* 546 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 547 * goto out; 548 */ 549 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 550 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 551 552 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 553 EMIT2(X86_JAE, offset); /* jae out */ 554 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 555 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 556 557 /* prog = array->ptrs[index]; */ 558 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 559 offsetof(struct bpf_array, ptrs)); 560 561 /* 562 * if (prog == NULL) 563 * goto out; 564 */ 565 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 566 567 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 568 EMIT2(X86_JE, offset); /* je out */ 569 570 pop_callee_regs(&prog, callee_regs_used); 571 572 EMIT1(0x58); /* pop rax */ 573 if (stack_depth) 574 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 575 round_up(stack_depth, 8)); 576 577 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 578 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 579 offsetof(struct bpf_prog, bpf_func)); 580 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 581 X86_TAIL_CALL_OFFSET); 582 /* 583 * Now we're ready to jump into next BPF program 584 * rdi == ctx (1st arg) 585 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 586 */ 587 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 588 589 /* out: */ 590 ctx->tail_call_indirect_label = prog - start; 591 *pprog = prog; 592 } 593 594 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 595 u8 **pprog, u8 *ip, 596 bool *callee_regs_used, u32 stack_depth, 597 struct jit_context *ctx) 598 { 599 int tcc_off = -4 - round_up(stack_depth, 8); 600 u8 *prog = *pprog, *start = *pprog; 601 int offset; 602 603 /* 604 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 605 * goto out; 606 */ 607 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 608 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 609 610 offset = ctx->tail_call_direct_label - (prog + 2 - start); 611 EMIT2(X86_JAE, offset); /* jae out */ 612 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 613 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 614 615 poke->tailcall_bypass = ip + (prog - start); 616 poke->adj_off = X86_TAIL_CALL_OFFSET; 617 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 618 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 619 620 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 621 poke->tailcall_bypass); 622 623 pop_callee_regs(&prog, callee_regs_used); 624 EMIT1(0x58); /* pop rax */ 625 if (stack_depth) 626 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 627 628 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 629 prog += X86_PATCH_SIZE; 630 631 /* out: */ 632 ctx->tail_call_direct_label = prog - start; 633 634 *pprog = prog; 635 } 636 637 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 638 { 639 struct bpf_jit_poke_descriptor *poke; 640 struct bpf_array *array; 641 struct bpf_prog *target; 642 int i, ret; 643 644 for (i = 0; i < prog->aux->size_poke_tab; i++) { 645 poke = &prog->aux->poke_tab[i]; 646 if (poke->aux && poke->aux != prog->aux) 647 continue; 648 649 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 650 651 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 652 continue; 653 654 array = container_of(poke->tail_call.map, struct bpf_array, map); 655 mutex_lock(&array->aux->poke_mutex); 656 target = array->ptrs[poke->tail_call.key]; 657 if (target) { 658 ret = __bpf_arch_text_poke(poke->tailcall_target, 659 BPF_MOD_JUMP, NULL, 660 (u8 *)target->bpf_func + 661 poke->adj_off); 662 BUG_ON(ret < 0); 663 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 664 BPF_MOD_JUMP, 665 (u8 *)poke->tailcall_target + 666 X86_PATCH_SIZE, NULL); 667 BUG_ON(ret < 0); 668 } 669 WRITE_ONCE(poke->tailcall_target_stable, true); 670 mutex_unlock(&array->aux->poke_mutex); 671 } 672 } 673 674 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 675 u32 dst_reg, const u32 imm32) 676 { 677 u8 *prog = *pprog; 678 u8 b1, b2, b3; 679 680 /* 681 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 682 * (which zero-extends imm32) to save 2 bytes. 683 */ 684 if (sign_propagate && (s32)imm32 < 0) { 685 /* 'mov %rax, imm32' sign extends imm32 */ 686 b1 = add_1mod(0x48, dst_reg); 687 b2 = 0xC7; 688 b3 = 0xC0; 689 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 690 goto done; 691 } 692 693 /* 694 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 695 * to save 3 bytes. 696 */ 697 if (imm32 == 0) { 698 if (is_ereg(dst_reg)) 699 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 700 b2 = 0x31; /* xor */ 701 b3 = 0xC0; 702 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 703 goto done; 704 } 705 706 /* mov %eax, imm32 */ 707 if (is_ereg(dst_reg)) 708 EMIT1(add_1mod(0x40, dst_reg)); 709 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 710 done: 711 *pprog = prog; 712 } 713 714 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 715 const u32 imm32_hi, const u32 imm32_lo) 716 { 717 u8 *prog = *pprog; 718 719 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 720 /* 721 * For emitting plain u32, where sign bit must not be 722 * propagated LLVM tends to load imm64 over mov32 723 * directly, so save couple of bytes by just doing 724 * 'mov %eax, imm32' instead. 725 */ 726 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 727 } else { 728 /* movabsq rax, imm64 */ 729 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 730 EMIT(imm32_lo, 4); 731 EMIT(imm32_hi, 4); 732 } 733 734 *pprog = prog; 735 } 736 737 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 738 { 739 u8 *prog = *pprog; 740 741 if (is64) { 742 /* mov dst, src */ 743 EMIT_mov(dst_reg, src_reg); 744 } else { 745 /* mov32 dst, src */ 746 if (is_ereg(dst_reg) || is_ereg(src_reg)) 747 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 748 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 749 } 750 751 *pprog = prog; 752 } 753 754 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, 755 u32 src_reg) 756 { 757 u8 *prog = *pprog; 758 759 if (is64) { 760 /* movs[b,w,l]q dst, src */ 761 if (num_bits == 8) 762 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, 763 add_2reg(0xC0, src_reg, dst_reg)); 764 else if (num_bits == 16) 765 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, 766 add_2reg(0xC0, src_reg, dst_reg)); 767 else if (num_bits == 32) 768 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, 769 add_2reg(0xC0, src_reg, dst_reg)); 770 } else { 771 /* movs[b,w]l dst, src */ 772 if (num_bits == 8) { 773 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, 774 add_2reg(0xC0, src_reg, dst_reg)); 775 } else if (num_bits == 16) { 776 if (is_ereg(dst_reg) || is_ereg(src_reg)) 777 EMIT1(add_2mod(0x40, src_reg, dst_reg)); 778 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, 779 add_2reg(0xC0, src_reg, dst_reg)); 780 } 781 } 782 783 *pprog = prog; 784 } 785 786 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 787 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 788 { 789 u8 *prog = *pprog; 790 791 if (is_imm8(off)) { 792 /* 1-byte signed displacement. 793 * 794 * If off == 0 we could skip this and save one extra byte, but 795 * special case of x86 R13 which always needs an offset is not 796 * worth the hassle 797 */ 798 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 799 } else { 800 /* 4-byte signed displacement */ 801 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 802 } 803 *pprog = prog; 804 } 805 806 /* 807 * Emit a REX byte if it will be necessary to address these registers 808 */ 809 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 810 { 811 u8 *prog = *pprog; 812 813 if (is64) 814 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 815 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 816 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 817 *pprog = prog; 818 } 819 820 /* 821 * Similar version of maybe_emit_mod() for a single register 822 */ 823 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 824 { 825 u8 *prog = *pprog; 826 827 if (is64) 828 EMIT1(add_1mod(0x48, reg)); 829 else if (is_ereg(reg)) 830 EMIT1(add_1mod(0x40, reg)); 831 *pprog = prog; 832 } 833 834 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 835 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 836 { 837 u8 *prog = *pprog; 838 839 switch (size) { 840 case BPF_B: 841 /* Emit 'movzx rax, byte ptr [rax + off]' */ 842 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 843 break; 844 case BPF_H: 845 /* Emit 'movzx rax, word ptr [rax + off]' */ 846 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 847 break; 848 case BPF_W: 849 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 850 if (is_ereg(dst_reg) || is_ereg(src_reg)) 851 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 852 else 853 EMIT1(0x8B); 854 break; 855 case BPF_DW: 856 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 857 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 858 break; 859 } 860 emit_insn_suffix(&prog, src_reg, dst_reg, off); 861 *pprog = prog; 862 } 863 864 /* LDSX: dst_reg = *(s8*)(src_reg + off) */ 865 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 866 { 867 u8 *prog = *pprog; 868 869 switch (size) { 870 case BPF_B: 871 /* Emit 'movsx rax, byte ptr [rax + off]' */ 872 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); 873 break; 874 case BPF_H: 875 /* Emit 'movsx rax, word ptr [rax + off]' */ 876 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); 877 break; 878 case BPF_W: 879 /* Emit 'movsx rax, dword ptr [rax+0x14]' */ 880 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); 881 break; 882 } 883 emit_insn_suffix(&prog, src_reg, dst_reg, off); 884 *pprog = prog; 885 } 886 887 /* STX: *(u8*)(dst_reg + off) = src_reg */ 888 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 889 { 890 u8 *prog = *pprog; 891 892 switch (size) { 893 case BPF_B: 894 /* Emit 'mov byte ptr [rax + off], al' */ 895 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 896 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 897 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 898 else 899 EMIT1(0x88); 900 break; 901 case BPF_H: 902 if (is_ereg(dst_reg) || is_ereg(src_reg)) 903 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 904 else 905 EMIT2(0x66, 0x89); 906 break; 907 case BPF_W: 908 if (is_ereg(dst_reg) || is_ereg(src_reg)) 909 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 910 else 911 EMIT1(0x89); 912 break; 913 case BPF_DW: 914 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 915 break; 916 } 917 emit_insn_suffix(&prog, dst_reg, src_reg, off); 918 *pprog = prog; 919 } 920 921 static int emit_atomic(u8 **pprog, u8 atomic_op, 922 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 923 { 924 u8 *prog = *pprog; 925 926 EMIT1(0xF0); /* lock prefix */ 927 928 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 929 930 /* emit opcode */ 931 switch (atomic_op) { 932 case BPF_ADD: 933 case BPF_AND: 934 case BPF_OR: 935 case BPF_XOR: 936 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 937 EMIT1(simple_alu_opcodes[atomic_op]); 938 break; 939 case BPF_ADD | BPF_FETCH: 940 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 941 EMIT2(0x0F, 0xC1); 942 break; 943 case BPF_XCHG: 944 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 945 EMIT1(0x87); 946 break; 947 case BPF_CMPXCHG: 948 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 949 EMIT2(0x0F, 0xB1); 950 break; 951 default: 952 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 953 return -EFAULT; 954 } 955 956 emit_insn_suffix(&prog, dst_reg, src_reg, off); 957 958 *pprog = prog; 959 return 0; 960 } 961 962 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 963 { 964 u32 reg = x->fixup >> 8; 965 966 /* jump over faulting load and clear dest register */ 967 *(unsigned long *)((void *)regs + reg) = 0; 968 regs->ip += x->fixup & 0xff; 969 return true; 970 } 971 972 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 973 bool *regs_used, bool *tail_call_seen) 974 { 975 int i; 976 977 for (i = 1; i <= insn_cnt; i++, insn++) { 978 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 979 *tail_call_seen = true; 980 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 981 regs_used[0] = true; 982 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 983 regs_used[1] = true; 984 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 985 regs_used[2] = true; 986 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 987 regs_used[3] = true; 988 } 989 } 990 991 static void emit_nops(u8 **pprog, int len) 992 { 993 u8 *prog = *pprog; 994 int i, noplen; 995 996 while (len > 0) { 997 noplen = len; 998 999 if (noplen > ASM_NOP_MAX) 1000 noplen = ASM_NOP_MAX; 1001 1002 for (i = 0; i < noplen; i++) 1003 EMIT1(x86_nops[noplen][i]); 1004 len -= noplen; 1005 } 1006 1007 *pprog = prog; 1008 } 1009 1010 /* emit the 3-byte VEX prefix 1011 * 1012 * r: same as rex.r, extra bit for ModRM reg field 1013 * x: same as rex.x, extra bit for SIB index field 1014 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 1015 * m: opcode map select, encoding escape bytes e.g. 0x0f38 1016 * w: same as rex.w (32 bit or 64 bit) or opcode specific 1017 * src_reg2: additional source reg (encoded as BPF reg) 1018 * l: vector length (128 bit or 256 bit) or reserved 1019 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 1020 */ 1021 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 1022 bool w, u8 src_reg2, bool l, u8 pp) 1023 { 1024 u8 *prog = *pprog; 1025 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 1026 u8 b1, b2; 1027 u8 vvvv = reg2hex[src_reg2]; 1028 1029 /* reg2hex gives only the lower 3 bit of vvvv */ 1030 if (is_ereg(src_reg2)) 1031 vvvv |= 1 << 3; 1032 1033 /* 1034 * 2nd byte of 3-byte VEX prefix 1035 * ~ means bit inverted encoding 1036 * 1037 * 7 0 1038 * +---+---+---+---+---+---+---+---+ 1039 * |~R |~X |~B | m | 1040 * +---+---+---+---+---+---+---+---+ 1041 */ 1042 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 1043 /* 1044 * 3rd byte of 3-byte VEX prefix 1045 * 1046 * 7 0 1047 * +---+---+---+---+---+---+---+---+ 1048 * | W | ~vvvv | L | pp | 1049 * +---+---+---+---+---+---+---+---+ 1050 */ 1051 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 1052 1053 EMIT3(b0, b1, b2); 1054 *pprog = prog; 1055 } 1056 1057 /* emit BMI2 shift instruction */ 1058 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 1059 { 1060 u8 *prog = *pprog; 1061 bool r = is_ereg(dst_reg); 1062 u8 m = 2; /* escape code 0f38 */ 1063 1064 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 1065 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 1066 *pprog = prog; 1067 } 1068 1069 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 1070 1071 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1072 #define RESTORE_TAIL_CALL_CNT(stack) \ 1073 EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) 1074 1075 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 1076 int oldproglen, struct jit_context *ctx, bool jmp_padding) 1077 { 1078 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 1079 struct bpf_insn *insn = bpf_prog->insnsi; 1080 bool callee_regs_used[4] = {}; 1081 int insn_cnt = bpf_prog->len; 1082 bool tail_call_seen = false; 1083 bool seen_exit = false; 1084 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 1085 int i, excnt = 0; 1086 int ilen, proglen = 0; 1087 u8 *prog = temp; 1088 int err; 1089 1090 detect_reg_usage(insn, insn_cnt, callee_regs_used, 1091 &tail_call_seen); 1092 1093 /* tail call's presence in current prog implies it is reachable */ 1094 tail_call_reachable |= tail_call_seen; 1095 1096 emit_prologue(&prog, bpf_prog->aux->stack_depth, 1097 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 1098 bpf_prog->aux->func_idx != 0); 1099 push_callee_regs(&prog, callee_regs_used); 1100 1101 ilen = prog - temp; 1102 if (rw_image) 1103 memcpy(rw_image + proglen, temp, ilen); 1104 proglen += ilen; 1105 addrs[0] = proglen; 1106 prog = temp; 1107 1108 for (i = 1; i <= insn_cnt; i++, insn++) { 1109 const s32 imm32 = insn->imm; 1110 u32 dst_reg = insn->dst_reg; 1111 u32 src_reg = insn->src_reg; 1112 u8 b2 = 0, b3 = 0; 1113 u8 *start_of_ldx; 1114 s64 jmp_offset; 1115 s16 insn_off; 1116 u8 jmp_cond; 1117 u8 *func; 1118 int nops; 1119 1120 switch (insn->code) { 1121 /* ALU */ 1122 case BPF_ALU | BPF_ADD | BPF_X: 1123 case BPF_ALU | BPF_SUB | BPF_X: 1124 case BPF_ALU | BPF_AND | BPF_X: 1125 case BPF_ALU | BPF_OR | BPF_X: 1126 case BPF_ALU | BPF_XOR | BPF_X: 1127 case BPF_ALU64 | BPF_ADD | BPF_X: 1128 case BPF_ALU64 | BPF_SUB | BPF_X: 1129 case BPF_ALU64 | BPF_AND | BPF_X: 1130 case BPF_ALU64 | BPF_OR | BPF_X: 1131 case BPF_ALU64 | BPF_XOR | BPF_X: 1132 maybe_emit_mod(&prog, dst_reg, src_reg, 1133 BPF_CLASS(insn->code) == BPF_ALU64); 1134 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1135 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1136 break; 1137 1138 case BPF_ALU64 | BPF_MOV | BPF_X: 1139 case BPF_ALU | BPF_MOV | BPF_X: 1140 if (insn->off == 0) 1141 emit_mov_reg(&prog, 1142 BPF_CLASS(insn->code) == BPF_ALU64, 1143 dst_reg, src_reg); 1144 else 1145 emit_movsx_reg(&prog, insn->off, 1146 BPF_CLASS(insn->code) == BPF_ALU64, 1147 dst_reg, src_reg); 1148 break; 1149 1150 /* neg dst */ 1151 case BPF_ALU | BPF_NEG: 1152 case BPF_ALU64 | BPF_NEG: 1153 maybe_emit_1mod(&prog, dst_reg, 1154 BPF_CLASS(insn->code) == BPF_ALU64); 1155 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1156 break; 1157 1158 case BPF_ALU | BPF_ADD | BPF_K: 1159 case BPF_ALU | BPF_SUB | BPF_K: 1160 case BPF_ALU | BPF_AND | BPF_K: 1161 case BPF_ALU | BPF_OR | BPF_K: 1162 case BPF_ALU | BPF_XOR | BPF_K: 1163 case BPF_ALU64 | BPF_ADD | BPF_K: 1164 case BPF_ALU64 | BPF_SUB | BPF_K: 1165 case BPF_ALU64 | BPF_AND | BPF_K: 1166 case BPF_ALU64 | BPF_OR | BPF_K: 1167 case BPF_ALU64 | BPF_XOR | BPF_K: 1168 maybe_emit_1mod(&prog, dst_reg, 1169 BPF_CLASS(insn->code) == BPF_ALU64); 1170 1171 /* 1172 * b3 holds 'normal' opcode, b2 short form only valid 1173 * in case dst is eax/rax. 1174 */ 1175 switch (BPF_OP(insn->code)) { 1176 case BPF_ADD: 1177 b3 = 0xC0; 1178 b2 = 0x05; 1179 break; 1180 case BPF_SUB: 1181 b3 = 0xE8; 1182 b2 = 0x2D; 1183 break; 1184 case BPF_AND: 1185 b3 = 0xE0; 1186 b2 = 0x25; 1187 break; 1188 case BPF_OR: 1189 b3 = 0xC8; 1190 b2 = 0x0D; 1191 break; 1192 case BPF_XOR: 1193 b3 = 0xF0; 1194 b2 = 0x35; 1195 break; 1196 } 1197 1198 if (is_imm8(imm32)) 1199 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1200 else if (is_axreg(dst_reg)) 1201 EMIT1_off32(b2, imm32); 1202 else 1203 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1204 break; 1205 1206 case BPF_ALU64 | BPF_MOV | BPF_K: 1207 case BPF_ALU | BPF_MOV | BPF_K: 1208 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1209 dst_reg, imm32); 1210 break; 1211 1212 case BPF_LD | BPF_IMM | BPF_DW: 1213 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1214 insn++; 1215 i++; 1216 break; 1217 1218 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1219 case BPF_ALU | BPF_MOD | BPF_X: 1220 case BPF_ALU | BPF_DIV | BPF_X: 1221 case BPF_ALU | BPF_MOD | BPF_K: 1222 case BPF_ALU | BPF_DIV | BPF_K: 1223 case BPF_ALU64 | BPF_MOD | BPF_X: 1224 case BPF_ALU64 | BPF_DIV | BPF_X: 1225 case BPF_ALU64 | BPF_MOD | BPF_K: 1226 case BPF_ALU64 | BPF_DIV | BPF_K: { 1227 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1228 1229 if (dst_reg != BPF_REG_0) 1230 EMIT1(0x50); /* push rax */ 1231 if (dst_reg != BPF_REG_3) 1232 EMIT1(0x52); /* push rdx */ 1233 1234 if (BPF_SRC(insn->code) == BPF_X) { 1235 if (src_reg == BPF_REG_0 || 1236 src_reg == BPF_REG_3) { 1237 /* mov r11, src_reg */ 1238 EMIT_mov(AUX_REG, src_reg); 1239 src_reg = AUX_REG; 1240 } 1241 } else { 1242 /* mov r11, imm32 */ 1243 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1244 src_reg = AUX_REG; 1245 } 1246 1247 if (dst_reg != BPF_REG_0) 1248 /* mov rax, dst_reg */ 1249 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1250 1251 if (insn->off == 0) { 1252 /* 1253 * xor edx, edx 1254 * equivalent to 'xor rdx, rdx', but one byte less 1255 */ 1256 EMIT2(0x31, 0xd2); 1257 1258 /* div src_reg */ 1259 maybe_emit_1mod(&prog, src_reg, is64); 1260 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1261 } else { 1262 if (BPF_CLASS(insn->code) == BPF_ALU) 1263 EMIT1(0x99); /* cdq */ 1264 else 1265 EMIT2(0x48, 0x99); /* cqo */ 1266 1267 /* idiv src_reg */ 1268 maybe_emit_1mod(&prog, src_reg, is64); 1269 EMIT2(0xF7, add_1reg(0xF8, src_reg)); 1270 } 1271 1272 if (BPF_OP(insn->code) == BPF_MOD && 1273 dst_reg != BPF_REG_3) 1274 /* mov dst_reg, rdx */ 1275 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1276 else if (BPF_OP(insn->code) == BPF_DIV && 1277 dst_reg != BPF_REG_0) 1278 /* mov dst_reg, rax */ 1279 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1280 1281 if (dst_reg != BPF_REG_3) 1282 EMIT1(0x5A); /* pop rdx */ 1283 if (dst_reg != BPF_REG_0) 1284 EMIT1(0x58); /* pop rax */ 1285 break; 1286 } 1287 1288 case BPF_ALU | BPF_MUL | BPF_K: 1289 case BPF_ALU64 | BPF_MUL | BPF_K: 1290 maybe_emit_mod(&prog, dst_reg, dst_reg, 1291 BPF_CLASS(insn->code) == BPF_ALU64); 1292 1293 if (is_imm8(imm32)) 1294 /* imul dst_reg, dst_reg, imm8 */ 1295 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1296 imm32); 1297 else 1298 /* imul dst_reg, dst_reg, imm32 */ 1299 EMIT2_off32(0x69, 1300 add_2reg(0xC0, dst_reg, dst_reg), 1301 imm32); 1302 break; 1303 1304 case BPF_ALU | BPF_MUL | BPF_X: 1305 case BPF_ALU64 | BPF_MUL | BPF_X: 1306 maybe_emit_mod(&prog, src_reg, dst_reg, 1307 BPF_CLASS(insn->code) == BPF_ALU64); 1308 1309 /* imul dst_reg, src_reg */ 1310 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1311 break; 1312 1313 /* Shifts */ 1314 case BPF_ALU | BPF_LSH | BPF_K: 1315 case BPF_ALU | BPF_RSH | BPF_K: 1316 case BPF_ALU | BPF_ARSH | BPF_K: 1317 case BPF_ALU64 | BPF_LSH | BPF_K: 1318 case BPF_ALU64 | BPF_RSH | BPF_K: 1319 case BPF_ALU64 | BPF_ARSH | BPF_K: 1320 maybe_emit_1mod(&prog, dst_reg, 1321 BPF_CLASS(insn->code) == BPF_ALU64); 1322 1323 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1324 if (imm32 == 1) 1325 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1326 else 1327 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1328 break; 1329 1330 case BPF_ALU | BPF_LSH | BPF_X: 1331 case BPF_ALU | BPF_RSH | BPF_X: 1332 case BPF_ALU | BPF_ARSH | BPF_X: 1333 case BPF_ALU64 | BPF_LSH | BPF_X: 1334 case BPF_ALU64 | BPF_RSH | BPF_X: 1335 case BPF_ALU64 | BPF_ARSH | BPF_X: 1336 /* BMI2 shifts aren't better when shift count is already in rcx */ 1337 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1338 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1339 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1340 u8 op; 1341 1342 switch (BPF_OP(insn->code)) { 1343 case BPF_LSH: 1344 op = 1; /* prefix 0x66 */ 1345 break; 1346 case BPF_RSH: 1347 op = 3; /* prefix 0xf2 */ 1348 break; 1349 case BPF_ARSH: 1350 op = 2; /* prefix 0xf3 */ 1351 break; 1352 } 1353 1354 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1355 1356 break; 1357 } 1358 1359 if (src_reg != BPF_REG_4) { /* common case */ 1360 /* Check for bad case when dst_reg == rcx */ 1361 if (dst_reg == BPF_REG_4) { 1362 /* mov r11, dst_reg */ 1363 EMIT_mov(AUX_REG, dst_reg); 1364 dst_reg = AUX_REG; 1365 } else { 1366 EMIT1(0x51); /* push rcx */ 1367 } 1368 /* mov rcx, src_reg */ 1369 EMIT_mov(BPF_REG_4, src_reg); 1370 } 1371 1372 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1373 maybe_emit_1mod(&prog, dst_reg, 1374 BPF_CLASS(insn->code) == BPF_ALU64); 1375 1376 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1377 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1378 1379 if (src_reg != BPF_REG_4) { 1380 if (insn->dst_reg == BPF_REG_4) 1381 /* mov dst_reg, r11 */ 1382 EMIT_mov(insn->dst_reg, AUX_REG); 1383 else 1384 EMIT1(0x59); /* pop rcx */ 1385 } 1386 1387 break; 1388 1389 case BPF_ALU | BPF_END | BPF_FROM_BE: 1390 case BPF_ALU64 | BPF_END | BPF_FROM_LE: 1391 switch (imm32) { 1392 case 16: 1393 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1394 EMIT1(0x66); 1395 if (is_ereg(dst_reg)) 1396 EMIT1(0x41); 1397 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1398 1399 /* Emit 'movzwl eax, ax' */ 1400 if (is_ereg(dst_reg)) 1401 EMIT3(0x45, 0x0F, 0xB7); 1402 else 1403 EMIT2(0x0F, 0xB7); 1404 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1405 break; 1406 case 32: 1407 /* Emit 'bswap eax' to swap lower 4 bytes */ 1408 if (is_ereg(dst_reg)) 1409 EMIT2(0x41, 0x0F); 1410 else 1411 EMIT1(0x0F); 1412 EMIT1(add_1reg(0xC8, dst_reg)); 1413 break; 1414 case 64: 1415 /* Emit 'bswap rax' to swap 8 bytes */ 1416 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1417 add_1reg(0xC8, dst_reg)); 1418 break; 1419 } 1420 break; 1421 1422 case BPF_ALU | BPF_END | BPF_FROM_LE: 1423 switch (imm32) { 1424 case 16: 1425 /* 1426 * Emit 'movzwl eax, ax' to zero extend 16-bit 1427 * into 64 bit 1428 */ 1429 if (is_ereg(dst_reg)) 1430 EMIT3(0x45, 0x0F, 0xB7); 1431 else 1432 EMIT2(0x0F, 0xB7); 1433 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1434 break; 1435 case 32: 1436 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1437 if (is_ereg(dst_reg)) 1438 EMIT1(0x45); 1439 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1440 break; 1441 case 64: 1442 /* nop */ 1443 break; 1444 } 1445 break; 1446 1447 /* speculation barrier */ 1448 case BPF_ST | BPF_NOSPEC: 1449 EMIT_LFENCE(); 1450 break; 1451 1452 /* ST: *(u8*)(dst_reg + off) = imm */ 1453 case BPF_ST | BPF_MEM | BPF_B: 1454 if (is_ereg(dst_reg)) 1455 EMIT2(0x41, 0xC6); 1456 else 1457 EMIT1(0xC6); 1458 goto st; 1459 case BPF_ST | BPF_MEM | BPF_H: 1460 if (is_ereg(dst_reg)) 1461 EMIT3(0x66, 0x41, 0xC7); 1462 else 1463 EMIT2(0x66, 0xC7); 1464 goto st; 1465 case BPF_ST | BPF_MEM | BPF_W: 1466 if (is_ereg(dst_reg)) 1467 EMIT2(0x41, 0xC7); 1468 else 1469 EMIT1(0xC7); 1470 goto st; 1471 case BPF_ST | BPF_MEM | BPF_DW: 1472 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1473 1474 st: if (is_imm8(insn->off)) 1475 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1476 else 1477 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1478 1479 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1480 break; 1481 1482 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1483 case BPF_STX | BPF_MEM | BPF_B: 1484 case BPF_STX | BPF_MEM | BPF_H: 1485 case BPF_STX | BPF_MEM | BPF_W: 1486 case BPF_STX | BPF_MEM | BPF_DW: 1487 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1488 break; 1489 1490 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1491 case BPF_LDX | BPF_MEM | BPF_B: 1492 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1493 case BPF_LDX | BPF_MEM | BPF_H: 1494 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1495 case BPF_LDX | BPF_MEM | BPF_W: 1496 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1497 case BPF_LDX | BPF_MEM | BPF_DW: 1498 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1499 /* LDXS: dst_reg = *(s8*)(src_reg + off) */ 1500 case BPF_LDX | BPF_MEMSX | BPF_B: 1501 case BPF_LDX | BPF_MEMSX | BPF_H: 1502 case BPF_LDX | BPF_MEMSX | BPF_W: 1503 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: 1504 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: 1505 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: 1506 insn_off = insn->off; 1507 1508 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1509 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1510 /* Conservatively check that src_reg + insn->off is a kernel address: 1511 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE 1512 * and 1513 * src_reg + insn->off < VSYSCALL_ADDR 1514 */ 1515 1516 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; 1517 u8 *end_of_jmp; 1518 1519 /* movabsq r10, VSYSCALL_ADDR */ 1520 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, 1521 (u32)(long)VSYSCALL_ADDR); 1522 1523 /* mov src_reg, r11 */ 1524 EMIT_mov(AUX_REG, src_reg); 1525 1526 if (insn->off) { 1527 /* add r11, insn->off */ 1528 maybe_emit_1mod(&prog, AUX_REG, true); 1529 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1530 } 1531 1532 /* sub r11, r10 */ 1533 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 1534 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 1535 1536 /* movabsq r10, limit */ 1537 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, 1538 (u32)(long)limit); 1539 1540 /* cmp r10, r11 */ 1541 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); 1542 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); 1543 1544 /* if unsigned '>', goto load */ 1545 EMIT2(X86_JA, 0); 1546 end_of_jmp = prog; 1547 1548 /* xor dst_reg, dst_reg */ 1549 emit_mov_imm32(&prog, false, dst_reg, 0); 1550 /* jmp byte_after_ldx */ 1551 EMIT2(0xEB, 0); 1552 1553 /* populate jmp_offset for JAE above to jump to start_of_ldx */ 1554 start_of_ldx = prog; 1555 end_of_jmp[-1] = start_of_ldx - end_of_jmp; 1556 } 1557 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || 1558 BPF_MODE(insn->code) == BPF_MEMSX) 1559 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1560 else 1561 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1562 if (BPF_MODE(insn->code) == BPF_PROBE_MEM || 1563 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { 1564 struct exception_table_entry *ex; 1565 u8 *_insn = image + proglen + (start_of_ldx - temp); 1566 s64 delta; 1567 1568 /* populate jmp_offset for JMP above */ 1569 start_of_ldx[-1] = prog - start_of_ldx; 1570 1571 if (!bpf_prog->aux->extable) 1572 break; 1573 1574 if (excnt >= bpf_prog->aux->num_exentries) { 1575 pr_err("ex gen bug\n"); 1576 return -EFAULT; 1577 } 1578 ex = &bpf_prog->aux->extable[excnt++]; 1579 1580 delta = _insn - (u8 *)&ex->insn; 1581 if (!is_simm32(delta)) { 1582 pr_err("extable->insn doesn't fit into 32-bit\n"); 1583 return -EFAULT; 1584 } 1585 /* switch ex to rw buffer for writes */ 1586 ex = (void *)rw_image + ((void *)ex - (void *)image); 1587 1588 ex->insn = delta; 1589 1590 ex->data = EX_TYPE_BPF; 1591 1592 if (dst_reg > BPF_REG_9) { 1593 pr_err("verifier error\n"); 1594 return -EFAULT; 1595 } 1596 /* 1597 * Compute size of x86 insn and its target dest x86 register. 1598 * ex_handler_bpf() will use lower 8 bits to adjust 1599 * pt_regs->ip to jump over this x86 instruction 1600 * and upper bits to figure out which pt_regs to zero out. 1601 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1602 * of 4 bytes will be ignored and rbx will be zero inited. 1603 */ 1604 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1605 } 1606 break; 1607 1608 case BPF_STX | BPF_ATOMIC | BPF_W: 1609 case BPF_STX | BPF_ATOMIC | BPF_DW: 1610 if (insn->imm == (BPF_AND | BPF_FETCH) || 1611 insn->imm == (BPF_OR | BPF_FETCH) || 1612 insn->imm == (BPF_XOR | BPF_FETCH)) { 1613 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1614 u32 real_src_reg = src_reg; 1615 u32 real_dst_reg = dst_reg; 1616 u8 *branch_target; 1617 1618 /* 1619 * Can't be implemented with a single x86 insn. 1620 * Need to do a CMPXCHG loop. 1621 */ 1622 1623 /* Will need RAX as a CMPXCHG operand so save R0 */ 1624 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1625 if (src_reg == BPF_REG_0) 1626 real_src_reg = BPF_REG_AX; 1627 if (dst_reg == BPF_REG_0) 1628 real_dst_reg = BPF_REG_AX; 1629 1630 branch_target = prog; 1631 /* Load old value */ 1632 emit_ldx(&prog, BPF_SIZE(insn->code), 1633 BPF_REG_0, real_dst_reg, insn->off); 1634 /* 1635 * Perform the (commutative) operation locally, 1636 * put the result in the AUX_REG. 1637 */ 1638 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1639 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1640 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1641 add_2reg(0xC0, AUX_REG, real_src_reg)); 1642 /* Attempt to swap in new value */ 1643 err = emit_atomic(&prog, BPF_CMPXCHG, 1644 real_dst_reg, AUX_REG, 1645 insn->off, 1646 BPF_SIZE(insn->code)); 1647 if (WARN_ON(err)) 1648 return err; 1649 /* 1650 * ZF tells us whether we won the race. If it's 1651 * cleared we need to try again. 1652 */ 1653 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1654 /* Return the pre-modification value */ 1655 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1656 /* Restore R0 after clobbering RAX */ 1657 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1658 break; 1659 } 1660 1661 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1662 insn->off, BPF_SIZE(insn->code)); 1663 if (err) 1664 return err; 1665 break; 1666 1667 /* call */ 1668 case BPF_JMP | BPF_CALL: { 1669 int offs; 1670 1671 func = (u8 *) __bpf_call_base + imm32; 1672 if (tail_call_reachable) { 1673 RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); 1674 if (!imm32) 1675 return -EINVAL; 1676 offs = 7 + x86_call_depth_emit_accounting(&prog, func); 1677 } else { 1678 if (!imm32) 1679 return -EINVAL; 1680 offs = x86_call_depth_emit_accounting(&prog, func); 1681 } 1682 if (emit_call(&prog, func, image + addrs[i - 1] + offs)) 1683 return -EINVAL; 1684 break; 1685 } 1686 1687 case BPF_JMP | BPF_TAIL_CALL: 1688 if (imm32) 1689 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1690 &prog, image + addrs[i - 1], 1691 callee_regs_used, 1692 bpf_prog->aux->stack_depth, 1693 ctx); 1694 else 1695 emit_bpf_tail_call_indirect(&prog, 1696 callee_regs_used, 1697 bpf_prog->aux->stack_depth, 1698 image + addrs[i - 1], 1699 ctx); 1700 break; 1701 1702 /* cond jump */ 1703 case BPF_JMP | BPF_JEQ | BPF_X: 1704 case BPF_JMP | BPF_JNE | BPF_X: 1705 case BPF_JMP | BPF_JGT | BPF_X: 1706 case BPF_JMP | BPF_JLT | BPF_X: 1707 case BPF_JMP | BPF_JGE | BPF_X: 1708 case BPF_JMP | BPF_JLE | BPF_X: 1709 case BPF_JMP | BPF_JSGT | BPF_X: 1710 case BPF_JMP | BPF_JSLT | BPF_X: 1711 case BPF_JMP | BPF_JSGE | BPF_X: 1712 case BPF_JMP | BPF_JSLE | BPF_X: 1713 case BPF_JMP32 | BPF_JEQ | BPF_X: 1714 case BPF_JMP32 | BPF_JNE | BPF_X: 1715 case BPF_JMP32 | BPF_JGT | BPF_X: 1716 case BPF_JMP32 | BPF_JLT | BPF_X: 1717 case BPF_JMP32 | BPF_JGE | BPF_X: 1718 case BPF_JMP32 | BPF_JLE | BPF_X: 1719 case BPF_JMP32 | BPF_JSGT | BPF_X: 1720 case BPF_JMP32 | BPF_JSLT | BPF_X: 1721 case BPF_JMP32 | BPF_JSGE | BPF_X: 1722 case BPF_JMP32 | BPF_JSLE | BPF_X: 1723 /* cmp dst_reg, src_reg */ 1724 maybe_emit_mod(&prog, dst_reg, src_reg, 1725 BPF_CLASS(insn->code) == BPF_JMP); 1726 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1727 goto emit_cond_jmp; 1728 1729 case BPF_JMP | BPF_JSET | BPF_X: 1730 case BPF_JMP32 | BPF_JSET | BPF_X: 1731 /* test dst_reg, src_reg */ 1732 maybe_emit_mod(&prog, dst_reg, src_reg, 1733 BPF_CLASS(insn->code) == BPF_JMP); 1734 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1735 goto emit_cond_jmp; 1736 1737 case BPF_JMP | BPF_JSET | BPF_K: 1738 case BPF_JMP32 | BPF_JSET | BPF_K: 1739 /* test dst_reg, imm32 */ 1740 maybe_emit_1mod(&prog, dst_reg, 1741 BPF_CLASS(insn->code) == BPF_JMP); 1742 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1743 goto emit_cond_jmp; 1744 1745 case BPF_JMP | BPF_JEQ | BPF_K: 1746 case BPF_JMP | BPF_JNE | BPF_K: 1747 case BPF_JMP | BPF_JGT | BPF_K: 1748 case BPF_JMP | BPF_JLT | BPF_K: 1749 case BPF_JMP | BPF_JGE | BPF_K: 1750 case BPF_JMP | BPF_JLE | BPF_K: 1751 case BPF_JMP | BPF_JSGT | BPF_K: 1752 case BPF_JMP | BPF_JSLT | BPF_K: 1753 case BPF_JMP | BPF_JSGE | BPF_K: 1754 case BPF_JMP | BPF_JSLE | BPF_K: 1755 case BPF_JMP32 | BPF_JEQ | BPF_K: 1756 case BPF_JMP32 | BPF_JNE | BPF_K: 1757 case BPF_JMP32 | BPF_JGT | BPF_K: 1758 case BPF_JMP32 | BPF_JLT | BPF_K: 1759 case BPF_JMP32 | BPF_JGE | BPF_K: 1760 case BPF_JMP32 | BPF_JLE | BPF_K: 1761 case BPF_JMP32 | BPF_JSGT | BPF_K: 1762 case BPF_JMP32 | BPF_JSLT | BPF_K: 1763 case BPF_JMP32 | BPF_JSGE | BPF_K: 1764 case BPF_JMP32 | BPF_JSLE | BPF_K: 1765 /* test dst_reg, dst_reg to save one extra byte */ 1766 if (imm32 == 0) { 1767 maybe_emit_mod(&prog, dst_reg, dst_reg, 1768 BPF_CLASS(insn->code) == BPF_JMP); 1769 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1770 goto emit_cond_jmp; 1771 } 1772 1773 /* cmp dst_reg, imm8/32 */ 1774 maybe_emit_1mod(&prog, dst_reg, 1775 BPF_CLASS(insn->code) == BPF_JMP); 1776 1777 if (is_imm8(imm32)) 1778 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1779 else 1780 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1781 1782 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1783 switch (BPF_OP(insn->code)) { 1784 case BPF_JEQ: 1785 jmp_cond = X86_JE; 1786 break; 1787 case BPF_JSET: 1788 case BPF_JNE: 1789 jmp_cond = X86_JNE; 1790 break; 1791 case BPF_JGT: 1792 /* GT is unsigned '>', JA in x86 */ 1793 jmp_cond = X86_JA; 1794 break; 1795 case BPF_JLT: 1796 /* LT is unsigned '<', JB in x86 */ 1797 jmp_cond = X86_JB; 1798 break; 1799 case BPF_JGE: 1800 /* GE is unsigned '>=', JAE in x86 */ 1801 jmp_cond = X86_JAE; 1802 break; 1803 case BPF_JLE: 1804 /* LE is unsigned '<=', JBE in x86 */ 1805 jmp_cond = X86_JBE; 1806 break; 1807 case BPF_JSGT: 1808 /* Signed '>', GT in x86 */ 1809 jmp_cond = X86_JG; 1810 break; 1811 case BPF_JSLT: 1812 /* Signed '<', LT in x86 */ 1813 jmp_cond = X86_JL; 1814 break; 1815 case BPF_JSGE: 1816 /* Signed '>=', GE in x86 */ 1817 jmp_cond = X86_JGE; 1818 break; 1819 case BPF_JSLE: 1820 /* Signed '<=', LE in x86 */ 1821 jmp_cond = X86_JLE; 1822 break; 1823 default: /* to silence GCC warning */ 1824 return -EFAULT; 1825 } 1826 jmp_offset = addrs[i + insn->off] - addrs[i]; 1827 if (is_imm8_jmp_offset(jmp_offset)) { 1828 if (jmp_padding) { 1829 /* To keep the jmp_offset valid, the extra bytes are 1830 * padded before the jump insn, so we subtract the 1831 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1832 * 1833 * If the previous pass already emits an imm8 1834 * jmp_cond, then this BPF insn won't shrink, so 1835 * "nops" is 0. 1836 * 1837 * On the other hand, if the previous pass emits an 1838 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1839 * keep the image from shrinking further. 1840 * 1841 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1842 * is 2 bytes, so the size difference is 4 bytes. 1843 */ 1844 nops = INSN_SZ_DIFF - 2; 1845 if (nops != 0 && nops != 4) { 1846 pr_err("unexpected jmp_cond padding: %d bytes\n", 1847 nops); 1848 return -EFAULT; 1849 } 1850 emit_nops(&prog, nops); 1851 } 1852 EMIT2(jmp_cond, jmp_offset); 1853 } else if (is_simm32(jmp_offset)) { 1854 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1855 } else { 1856 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1857 return -EFAULT; 1858 } 1859 1860 break; 1861 1862 case BPF_JMP | BPF_JA: 1863 case BPF_JMP32 | BPF_JA: 1864 if (BPF_CLASS(insn->code) == BPF_JMP) { 1865 if (insn->off == -1) 1866 /* -1 jmp instructions will always jump 1867 * backwards two bytes. Explicitly handling 1868 * this case avoids wasting too many passes 1869 * when there are long sequences of replaced 1870 * dead code. 1871 */ 1872 jmp_offset = -2; 1873 else 1874 jmp_offset = addrs[i + insn->off] - addrs[i]; 1875 } else { 1876 if (insn->imm == -1) 1877 jmp_offset = -2; 1878 else 1879 jmp_offset = addrs[i + insn->imm] - addrs[i]; 1880 } 1881 1882 if (!jmp_offset) { 1883 /* 1884 * If jmp_padding is enabled, the extra nops will 1885 * be inserted. Otherwise, optimize out nop jumps. 1886 */ 1887 if (jmp_padding) { 1888 /* There are 3 possible conditions. 1889 * (1) This BPF_JA is already optimized out in 1890 * the previous run, so there is no need 1891 * to pad any extra byte (0 byte). 1892 * (2) The previous pass emits an imm8 jmp, 1893 * so we pad 2 bytes to match the previous 1894 * insn size. 1895 * (3) Similarly, the previous pass emits an 1896 * imm32 jmp, and 5 bytes is padded. 1897 */ 1898 nops = INSN_SZ_DIFF; 1899 if (nops != 0 && nops != 2 && nops != 5) { 1900 pr_err("unexpected nop jump padding: %d bytes\n", 1901 nops); 1902 return -EFAULT; 1903 } 1904 emit_nops(&prog, nops); 1905 } 1906 break; 1907 } 1908 emit_jmp: 1909 if (is_imm8_jmp_offset(jmp_offset)) { 1910 if (jmp_padding) { 1911 /* To avoid breaking jmp_offset, the extra bytes 1912 * are padded before the actual jmp insn, so 1913 * 2 bytes is subtracted from INSN_SZ_DIFF. 1914 * 1915 * If the previous pass already emits an imm8 1916 * jmp, there is nothing to pad (0 byte). 1917 * 1918 * If it emits an imm32 jmp (5 bytes) previously 1919 * and now an imm8 jmp (2 bytes), then we pad 1920 * (5 - 2 = 3) bytes to stop the image from 1921 * shrinking further. 1922 */ 1923 nops = INSN_SZ_DIFF - 2; 1924 if (nops != 0 && nops != 3) { 1925 pr_err("unexpected jump padding: %d bytes\n", 1926 nops); 1927 return -EFAULT; 1928 } 1929 emit_nops(&prog, INSN_SZ_DIFF - 2); 1930 } 1931 EMIT2(0xEB, jmp_offset); 1932 } else if (is_simm32(jmp_offset)) { 1933 EMIT1_off32(0xE9, jmp_offset); 1934 } else { 1935 pr_err("jmp gen bug %llx\n", jmp_offset); 1936 return -EFAULT; 1937 } 1938 break; 1939 1940 case BPF_JMP | BPF_EXIT: 1941 if (seen_exit) { 1942 jmp_offset = ctx->cleanup_addr - addrs[i]; 1943 goto emit_jmp; 1944 } 1945 seen_exit = true; 1946 /* Update cleanup_addr */ 1947 ctx->cleanup_addr = proglen; 1948 pop_callee_regs(&prog, callee_regs_used); 1949 EMIT1(0xC9); /* leave */ 1950 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 1951 break; 1952 1953 default: 1954 /* 1955 * By design x86-64 JIT should support all BPF instructions. 1956 * This error will be seen if new instruction was added 1957 * to the interpreter, but not to the JIT, or if there is 1958 * junk in bpf_prog. 1959 */ 1960 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1961 return -EINVAL; 1962 } 1963 1964 ilen = prog - temp; 1965 if (ilen > BPF_MAX_INSN_SIZE) { 1966 pr_err("bpf_jit: fatal insn size error\n"); 1967 return -EFAULT; 1968 } 1969 1970 if (image) { 1971 /* 1972 * When populating the image, assert that: 1973 * 1974 * i) We do not write beyond the allocated space, and 1975 * ii) addrs[i] did not change from the prior run, in order 1976 * to validate assumptions made for computing branch 1977 * displacements. 1978 */ 1979 if (unlikely(proglen + ilen > oldproglen || 1980 proglen + ilen != addrs[i])) { 1981 pr_err("bpf_jit: fatal error\n"); 1982 return -EFAULT; 1983 } 1984 memcpy(rw_image + proglen, temp, ilen); 1985 } 1986 proglen += ilen; 1987 addrs[i] = proglen; 1988 prog = temp; 1989 } 1990 1991 if (image && excnt != bpf_prog->aux->num_exentries) { 1992 pr_err("extable is not populated\n"); 1993 return -EFAULT; 1994 } 1995 return proglen; 1996 } 1997 1998 static void clean_stack_garbage(const struct btf_func_model *m, 1999 u8 **pprog, int nr_stack_slots, 2000 int stack_size) 2001 { 2002 int arg_size, off; 2003 u8 *prog; 2004 2005 /* Generally speaking, the compiler will pass the arguments 2006 * on-stack with "push" instruction, which will take 8-byte 2007 * on the stack. In this case, there won't be garbage values 2008 * while we copy the arguments from origin stack frame to current 2009 * in BPF_DW. 2010 * 2011 * However, sometimes the compiler will only allocate 4-byte on 2012 * the stack for the arguments. For now, this case will only 2013 * happen if there is only one argument on-stack and its size 2014 * not more than 4 byte. In this case, there will be garbage 2015 * values on the upper 4-byte where we store the argument on 2016 * current stack frame. 2017 * 2018 * arguments on origin stack: 2019 * 2020 * stack_arg_1(4-byte) xxx(4-byte) 2021 * 2022 * what we copy: 2023 * 2024 * stack_arg_1(8-byte): stack_arg_1(origin) xxx 2025 * 2026 * and the xxx is the garbage values which we should clean here. 2027 */ 2028 if (nr_stack_slots != 1) 2029 return; 2030 2031 /* the size of the last argument */ 2032 arg_size = m->arg_size[m->nr_args - 1]; 2033 if (arg_size <= 4) { 2034 off = -(stack_size - 4); 2035 prog = *pprog; 2036 /* mov DWORD PTR [rbp + off], 0 */ 2037 if (!is_imm8(off)) 2038 EMIT2_off32(0xC7, 0x85, off); 2039 else 2040 EMIT3(0xC7, 0x45, off); 2041 EMIT(0, 4); 2042 *pprog = prog; 2043 } 2044 } 2045 2046 /* get the count of the regs that are used to pass arguments */ 2047 static int get_nr_used_regs(const struct btf_func_model *m) 2048 { 2049 int i, arg_regs, nr_used_regs = 0; 2050 2051 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2052 arg_regs = (m->arg_size[i] + 7) / 8; 2053 if (nr_used_regs + arg_regs <= 6) 2054 nr_used_regs += arg_regs; 2055 2056 if (nr_used_regs >= 6) 2057 break; 2058 } 2059 2060 return nr_used_regs; 2061 } 2062 2063 static void save_args(const struct btf_func_model *m, u8 **prog, 2064 int stack_size, bool for_call_origin) 2065 { 2066 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; 2067 int i, j; 2068 2069 /* Store function arguments to stack. 2070 * For a function that accepts two pointers the sequence will be: 2071 * mov QWORD PTR [rbp-0x10],rdi 2072 * mov QWORD PTR [rbp-0x8],rsi 2073 */ 2074 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2075 arg_regs = (m->arg_size[i] + 7) / 8; 2076 2077 /* According to the research of Yonghong, struct members 2078 * should be all in register or all on the stack. 2079 * Meanwhile, the compiler will pass the argument on regs 2080 * if the remaining regs can hold the argument. 2081 * 2082 * Disorder of the args can happen. For example: 2083 * 2084 * struct foo_struct { 2085 * long a; 2086 * int b; 2087 * }; 2088 * int foo(char, char, char, char, char, struct foo_struct, 2089 * char); 2090 * 2091 * the arg1-5,arg7 will be passed by regs, and arg6 will 2092 * by stack. 2093 */ 2094 if (nr_regs + arg_regs > 6) { 2095 /* copy function arguments from origin stack frame 2096 * into current stack frame. 2097 * 2098 * The starting address of the arguments on-stack 2099 * is: 2100 * rbp + 8(push rbp) + 2101 * 8(return addr of origin call) + 2102 * 8(return addr of the caller) 2103 * which means: rbp + 24 2104 */ 2105 for (j = 0; j < arg_regs; j++) { 2106 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 2107 nr_stack_slots * 8 + 0x18); 2108 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, 2109 -stack_size); 2110 2111 if (!nr_stack_slots) 2112 first_off = stack_size; 2113 stack_size -= 8; 2114 nr_stack_slots++; 2115 } 2116 } else { 2117 /* Only copy the arguments on-stack to current 2118 * 'stack_size' and ignore the regs, used to 2119 * prepare the arguments on-stack for orign call. 2120 */ 2121 if (for_call_origin) { 2122 nr_regs += arg_regs; 2123 continue; 2124 } 2125 2126 /* copy the arguments from regs into stack */ 2127 for (j = 0; j < arg_regs; j++) { 2128 emit_stx(prog, BPF_DW, BPF_REG_FP, 2129 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2130 -stack_size); 2131 stack_size -= 8; 2132 nr_regs++; 2133 } 2134 } 2135 } 2136 2137 clean_stack_garbage(m, prog, nr_stack_slots, first_off); 2138 } 2139 2140 static void restore_regs(const struct btf_func_model *m, u8 **prog, 2141 int stack_size) 2142 { 2143 int i, j, arg_regs, nr_regs = 0; 2144 2145 /* Restore function arguments from stack. 2146 * For a function that accepts two pointers the sequence will be: 2147 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 2148 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 2149 * 2150 * The logic here is similar to what we do in save_args() 2151 */ 2152 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { 2153 arg_regs = (m->arg_size[i] + 7) / 8; 2154 if (nr_regs + arg_regs <= 6) { 2155 for (j = 0; j < arg_regs; j++) { 2156 emit_ldx(prog, BPF_DW, 2157 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, 2158 BPF_REG_FP, 2159 -stack_size); 2160 stack_size -= 8; 2161 nr_regs++; 2162 } 2163 } else { 2164 stack_size -= 8 * arg_regs; 2165 } 2166 2167 if (nr_regs >= 6) 2168 break; 2169 } 2170 } 2171 2172 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 2173 struct bpf_tramp_link *l, int stack_size, 2174 int run_ctx_off, bool save_ret) 2175 { 2176 u8 *prog = *pprog; 2177 u8 *jmp_insn; 2178 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 2179 struct bpf_prog *p = l->link.prog; 2180 u64 cookie = l->cookie; 2181 2182 /* mov rdi, cookie */ 2183 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 2184 2185 /* Prepare struct bpf_tramp_run_ctx. 2186 * 2187 * bpf_tramp_run_ctx is already preserved by 2188 * arch_prepare_bpf_trampoline(). 2189 * 2190 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 2191 */ 2192 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 2193 2194 /* arg1: mov rdi, progs[i] */ 2195 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2196 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 2197 if (!is_imm8(-run_ctx_off)) 2198 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); 2199 else 2200 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 2201 2202 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog)) 2203 return -EINVAL; 2204 /* remember prog start time returned by __bpf_prog_enter */ 2205 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 2206 2207 /* if (__bpf_prog_enter*(prog) == 0) 2208 * goto skip_exec_of_prog; 2209 */ 2210 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 2211 /* emit 2 nops that will be replaced with JE insn */ 2212 jmp_insn = prog; 2213 emit_nops(&prog, 2); 2214 2215 /* arg1: lea rdi, [rbp - stack_size] */ 2216 if (!is_imm8(-stack_size)) 2217 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); 2218 else 2219 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 2220 /* arg2: progs[i]->insnsi for interpreter */ 2221 if (!p->jited) 2222 emit_mov_imm64(&prog, BPF_REG_2, 2223 (long) p->insnsi >> 32, 2224 (u32) (long) p->insnsi); 2225 /* call JITed bpf program or interpreter */ 2226 if (emit_rsb_call(&prog, p->bpf_func, prog)) 2227 return -EINVAL; 2228 2229 /* 2230 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 2231 * of the previous call which is then passed on the stack to 2232 * the next BPF program. 2233 * 2234 * BPF_TRAMP_FENTRY trampoline may need to return the return 2235 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 2236 */ 2237 if (save_ret) 2238 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2239 2240 /* replace 2 nops with JE insn, since jmp target is known */ 2241 jmp_insn[0] = X86_JE; 2242 jmp_insn[1] = prog - jmp_insn - 2; 2243 2244 /* arg1: mov rdi, progs[i] */ 2245 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 2246 /* arg2: mov rsi, rbx <- start time in nsec */ 2247 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 2248 /* arg3: lea rdx, [rbp - run_ctx_off] */ 2249 if (!is_imm8(-run_ctx_off)) 2250 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); 2251 else 2252 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 2253 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog)) 2254 return -EINVAL; 2255 2256 *pprog = prog; 2257 return 0; 2258 } 2259 2260 static void emit_align(u8 **pprog, u32 align) 2261 { 2262 u8 *target, *prog = *pprog; 2263 2264 target = PTR_ALIGN(prog, align); 2265 if (target != prog) 2266 emit_nops(&prog, target - prog); 2267 2268 *pprog = prog; 2269 } 2270 2271 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 2272 { 2273 u8 *prog = *pprog; 2274 s64 offset; 2275 2276 offset = func - (ip + 2 + 4); 2277 if (!is_simm32(offset)) { 2278 pr_err("Target %p is out of range\n", func); 2279 return -EINVAL; 2280 } 2281 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2282 *pprog = prog; 2283 return 0; 2284 } 2285 2286 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2287 struct bpf_tramp_links *tl, int stack_size, 2288 int run_ctx_off, bool save_ret) 2289 { 2290 int i; 2291 u8 *prog = *pprog; 2292 2293 for (i = 0; i < tl->nr_links; i++) { 2294 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2295 run_ctx_off, save_ret)) 2296 return -EINVAL; 2297 } 2298 *pprog = prog; 2299 return 0; 2300 } 2301 2302 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2303 struct bpf_tramp_links *tl, int stack_size, 2304 int run_ctx_off, u8 **branches) 2305 { 2306 u8 *prog = *pprog; 2307 int i; 2308 2309 /* The first fmod_ret program will receive a garbage return value. 2310 * Set this to 0 to avoid confusing the program. 2311 */ 2312 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2313 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2314 for (i = 0; i < tl->nr_links; i++) { 2315 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 2316 return -EINVAL; 2317 2318 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2319 * if (*(u64 *)(rbp - 8) != 0) 2320 * goto do_fexit; 2321 */ 2322 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2323 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2324 2325 /* Save the location of the branch and Generate 6 nops 2326 * (4 bytes for an offset and 2 bytes for the jump) These nops 2327 * are replaced with a conditional jump once do_fexit (i.e. the 2328 * start of the fexit invocation) is finalized. 2329 */ 2330 branches[i] = prog; 2331 emit_nops(&prog, 4 + 2); 2332 } 2333 2334 *pprog = prog; 2335 return 0; 2336 } 2337 2338 /* Example: 2339 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2340 * its 'struct btf_func_model' will be nr_args=2 2341 * The assembly code when eth_type_trans is executing after trampoline: 2342 * 2343 * push rbp 2344 * mov rbp, rsp 2345 * sub rsp, 16 // space for skb and dev 2346 * push rbx // temp regs to pass start time 2347 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2348 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2349 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2350 * mov rbx, rax // remember start time in bpf stats are enabled 2351 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2352 * call addr_of_jited_FENTRY_prog 2353 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2354 * mov rsi, rbx // prog start time 2355 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2356 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2357 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2358 * pop rbx 2359 * leave 2360 * ret 2361 * 2362 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2363 * replaced with 'call generated_bpf_trampoline'. When it returns 2364 * eth_type_trans will continue executing with original skb and dev pointers. 2365 * 2366 * The assembly code when eth_type_trans is called from trampoline: 2367 * 2368 * push rbp 2369 * mov rbp, rsp 2370 * sub rsp, 24 // space for skb, dev, return value 2371 * push rbx // temp regs to pass start time 2372 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2373 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2374 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2375 * mov rbx, rax // remember start time if bpf stats are enabled 2376 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2377 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2378 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2379 * mov rsi, rbx // prog start time 2380 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2381 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2382 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2383 * call eth_type_trans+5 // execute body of eth_type_trans 2384 * mov qword ptr [rbp - 8], rax // save return value 2385 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2386 * mov rbx, rax // remember start time in bpf stats are enabled 2387 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2388 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2389 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2390 * mov rsi, rbx // prog start time 2391 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2392 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2393 * pop rbx 2394 * leave 2395 * add rsp, 8 // skip eth_type_trans's frame 2396 * ret // return to its caller 2397 */ 2398 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 2399 const struct btf_func_model *m, u32 flags, 2400 struct bpf_tramp_links *tlinks, 2401 void *func_addr) 2402 { 2403 int i, ret, nr_regs = m->nr_args, stack_size = 0; 2404 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; 2405 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2406 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2407 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2408 void *orig_call = func_addr; 2409 u8 **branches = NULL; 2410 u8 *prog; 2411 bool save_ret; 2412 2413 /* extra registers for struct arguments */ 2414 for (i = 0; i < m->nr_args; i++) 2415 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2416 nr_regs += (m->arg_size[i] + 7) / 8 - 1; 2417 2418 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 2419 * are passed through regs, the remains are through stack. 2420 */ 2421 if (nr_regs > MAX_BPF_FUNC_ARGS) 2422 return -ENOTSUPP; 2423 2424 /* Generated trampoline stack layout: 2425 * 2426 * RBP + 8 [ return address ] 2427 * RBP + 0 [ RBP ] 2428 * 2429 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2430 * BPF_TRAMP_F_RET_FENTRY_RET flags 2431 * 2432 * [ reg_argN ] always 2433 * [ ... ] 2434 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2435 * 2436 * RBP - nregs_off [ regs count ] always 2437 * 2438 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2439 * 2440 * RBP - rbx_off [ rbx value ] always 2441 * 2442 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2443 * 2444 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG 2445 * [ ... ] 2446 * [ stack_arg2 ] 2447 * RBP - arg_stack_off [ stack_arg1 ] 2448 * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX 2449 */ 2450 2451 /* room for return value of orig_call or fentry prog */ 2452 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2453 if (save_ret) 2454 stack_size += 8; 2455 2456 stack_size += nr_regs * 8; 2457 regs_off = stack_size; 2458 2459 /* regs count */ 2460 stack_size += 8; 2461 nregs_off = stack_size; 2462 2463 if (flags & BPF_TRAMP_F_IP_ARG) 2464 stack_size += 8; /* room for IP address argument */ 2465 2466 ip_off = stack_size; 2467 2468 stack_size += 8; 2469 rbx_off = stack_size; 2470 2471 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2472 run_ctx_off = stack_size; 2473 2474 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { 2475 /* the space that used to pass arguments on-stack */ 2476 stack_size += (nr_regs - get_nr_used_regs(m)) * 8; 2477 /* make sure the stack pointer is 16-byte aligned if we 2478 * need pass arguments on stack, which means 2479 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] 2480 * should be 16-byte aligned. Following code depend on 2481 * that stack_size is already 8-byte aligned. 2482 */ 2483 stack_size += (stack_size % 16) ? 0 : 8; 2484 } 2485 2486 arg_stack_off = stack_size; 2487 2488 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2489 /* skip patched call instruction and point orig_call to actual 2490 * body of the kernel function. 2491 */ 2492 if (is_endbr(*(u32 *)orig_call)) 2493 orig_call += ENDBR_INSN_SIZE; 2494 orig_call += X86_PATCH_SIZE; 2495 } 2496 2497 prog = image; 2498 2499 EMIT_ENDBR(); 2500 /* 2501 * This is the direct-call trampoline, as such it needs accounting 2502 * for the __fentry__ call. 2503 */ 2504 x86_call_depth_emit_accounting(&prog, NULL); 2505 EMIT1(0x55); /* push rbp */ 2506 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2507 if (!is_imm8(stack_size)) 2508 /* sub rsp, stack_size */ 2509 EMIT3_off32(0x48, 0x81, 0xEC, stack_size); 2510 else 2511 /* sub rsp, stack_size */ 2512 EMIT4(0x48, 0x83, 0xEC, stack_size); 2513 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 2514 EMIT1(0x50); /* push rax */ 2515 /* mov QWORD PTR [rbp - rbx_off], rbx */ 2516 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); 2517 2518 /* Store number of argument registers of the traced function: 2519 * mov rax, nr_regs 2520 * mov QWORD PTR [rbp - nregs_off], rax 2521 */ 2522 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); 2523 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); 2524 2525 if (flags & BPF_TRAMP_F_IP_ARG) { 2526 /* Store IP address of the traced function: 2527 * movabsq rax, func_addr 2528 * mov QWORD PTR [rbp - ip_off], rax 2529 */ 2530 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 2531 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2532 } 2533 2534 save_args(m, &prog, regs_off, false); 2535 2536 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2537 /* arg1: mov rdi, im */ 2538 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2539 if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) { 2540 ret = -EINVAL; 2541 goto cleanup; 2542 } 2543 } 2544 2545 if (fentry->nr_links) 2546 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2547 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2548 return -EINVAL; 2549 2550 if (fmod_ret->nr_links) { 2551 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2552 GFP_KERNEL); 2553 if (!branches) 2554 return -ENOMEM; 2555 2556 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2557 run_ctx_off, branches)) { 2558 ret = -EINVAL; 2559 goto cleanup; 2560 } 2561 } 2562 2563 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2564 restore_regs(m, &prog, regs_off); 2565 save_args(m, &prog, arg_stack_off, true); 2566 2567 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 2568 /* Before calling the original function, restore the 2569 * tail_call_cnt from stack to rax. 2570 */ 2571 RESTORE_TAIL_CALL_CNT(stack_size); 2572 2573 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2574 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); 2575 EMIT2(0xff, 0xd3); /* call *rbx */ 2576 } else { 2577 /* call original function */ 2578 if (emit_rsb_call(&prog, orig_call, prog)) { 2579 ret = -EINVAL; 2580 goto cleanup; 2581 } 2582 } 2583 /* remember return value in a stack for bpf prog to access */ 2584 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2585 im->ip_after_call = prog; 2586 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2587 prog += X86_PATCH_SIZE; 2588 } 2589 2590 if (fmod_ret->nr_links) { 2591 /* From Intel 64 and IA-32 Architectures Optimization 2592 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2593 * Coding Rule 11: All branch targets should be 16-byte 2594 * aligned. 2595 */ 2596 emit_align(&prog, 16); 2597 /* Update the branches saved in invoke_bpf_mod_ret with the 2598 * aligned address of do_fexit. 2599 */ 2600 for (i = 0; i < fmod_ret->nr_links; i++) 2601 emit_cond_near_jump(&branches[i], prog, branches[i], 2602 X86_JNE); 2603 } 2604 2605 if (fexit->nr_links) 2606 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2607 ret = -EINVAL; 2608 goto cleanup; 2609 } 2610 2611 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2612 restore_regs(m, &prog, regs_off); 2613 2614 /* This needs to be done regardless. If there were fmod_ret programs, 2615 * the return value is only updated on the stack and still needs to be 2616 * restored to R0. 2617 */ 2618 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2619 im->ip_epilogue = prog; 2620 /* arg1: mov rdi, im */ 2621 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2622 if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) { 2623 ret = -EINVAL; 2624 goto cleanup; 2625 } 2626 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 2627 /* Before running the original function, restore the 2628 * tail_call_cnt from stack to rax. 2629 */ 2630 RESTORE_TAIL_CALL_CNT(stack_size); 2631 2632 /* restore return value of orig_call or fentry prog back into RAX */ 2633 if (save_ret) 2634 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2635 2636 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); 2637 EMIT1(0xC9); /* leave */ 2638 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2639 /* skip our return address and return to parent */ 2640 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2641 emit_return(&prog, prog); 2642 /* Make sure the trampoline generation logic doesn't overflow */ 2643 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2644 ret = -EFAULT; 2645 goto cleanup; 2646 } 2647 ret = prog - (u8 *)image; 2648 2649 cleanup: 2650 kfree(branches); 2651 return ret; 2652 } 2653 2654 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 2655 { 2656 u8 *jg_reloc, *prog = *pprog; 2657 int pivot, err, jg_bytes = 1; 2658 s64 jg_offset; 2659 2660 if (a == b) { 2661 /* Leaf node of recursion, i.e. not a range of indices 2662 * anymore. 2663 */ 2664 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2665 if (!is_simm32(progs[a])) 2666 return -1; 2667 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2668 progs[a]); 2669 err = emit_cond_near_jump(&prog, /* je func */ 2670 (void *)progs[a], image + (prog - buf), 2671 X86_JE); 2672 if (err) 2673 return err; 2674 2675 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 2676 2677 *pprog = prog; 2678 return 0; 2679 } 2680 2681 /* Not a leaf node, so we pivot, and recursively descend into 2682 * the lower and upper ranges. 2683 */ 2684 pivot = (b - a) / 2; 2685 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2686 if (!is_simm32(progs[a + pivot])) 2687 return -1; 2688 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2689 2690 if (pivot > 2) { /* jg upper_part */ 2691 /* Require near jump. */ 2692 jg_bytes = 4; 2693 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2694 } else { 2695 EMIT2(X86_JG, 0); 2696 } 2697 jg_reloc = prog; 2698 2699 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2700 progs, image, buf); 2701 if (err) 2702 return err; 2703 2704 /* From Intel 64 and IA-32 Architectures Optimization 2705 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2706 * Coding Rule 11: All branch targets should be 16-byte 2707 * aligned. 2708 */ 2709 emit_align(&prog, 16); 2710 jg_offset = prog - jg_reloc; 2711 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2712 2713 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2714 b, progs, image, buf); 2715 if (err) 2716 return err; 2717 2718 *pprog = prog; 2719 return 0; 2720 } 2721 2722 static int cmp_ips(const void *a, const void *b) 2723 { 2724 const s64 *ipa = a; 2725 const s64 *ipb = b; 2726 2727 if (*ipa > *ipb) 2728 return 1; 2729 if (*ipa < *ipb) 2730 return -1; 2731 return 0; 2732 } 2733 2734 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 2735 { 2736 u8 *prog = buf; 2737 2738 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2739 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 2740 } 2741 2742 struct x64_jit_data { 2743 struct bpf_binary_header *rw_header; 2744 struct bpf_binary_header *header; 2745 int *addrs; 2746 u8 *image; 2747 int proglen; 2748 struct jit_context ctx; 2749 }; 2750 2751 #define MAX_PASSES 20 2752 #define PADDING_PASSES (MAX_PASSES - 5) 2753 2754 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2755 { 2756 struct bpf_binary_header *rw_header = NULL; 2757 struct bpf_binary_header *header = NULL; 2758 struct bpf_prog *tmp, *orig_prog = prog; 2759 struct x64_jit_data *jit_data; 2760 int proglen, oldproglen = 0; 2761 struct jit_context ctx = {}; 2762 bool tmp_blinded = false; 2763 bool extra_pass = false; 2764 bool padding = false; 2765 u8 *rw_image = NULL; 2766 u8 *image = NULL; 2767 int *addrs; 2768 int pass; 2769 int i; 2770 2771 if (!prog->jit_requested) 2772 return orig_prog; 2773 2774 tmp = bpf_jit_blind_constants(prog); 2775 /* 2776 * If blinding was requested and we failed during blinding, 2777 * we must fall back to the interpreter. 2778 */ 2779 if (IS_ERR(tmp)) 2780 return orig_prog; 2781 if (tmp != prog) { 2782 tmp_blinded = true; 2783 prog = tmp; 2784 } 2785 2786 jit_data = prog->aux->jit_data; 2787 if (!jit_data) { 2788 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2789 if (!jit_data) { 2790 prog = orig_prog; 2791 goto out; 2792 } 2793 prog->aux->jit_data = jit_data; 2794 } 2795 addrs = jit_data->addrs; 2796 if (addrs) { 2797 ctx = jit_data->ctx; 2798 oldproglen = jit_data->proglen; 2799 image = jit_data->image; 2800 header = jit_data->header; 2801 rw_header = jit_data->rw_header; 2802 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2803 extra_pass = true; 2804 padding = true; 2805 goto skip_init_addrs; 2806 } 2807 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2808 if (!addrs) { 2809 prog = orig_prog; 2810 goto out_addrs; 2811 } 2812 2813 /* 2814 * Before first pass, make a rough estimation of addrs[] 2815 * each BPF instruction is translated to less than 64 bytes 2816 */ 2817 for (proglen = 0, i = 0; i <= prog->len; i++) { 2818 proglen += 64; 2819 addrs[i] = proglen; 2820 } 2821 ctx.cleanup_addr = proglen; 2822 skip_init_addrs: 2823 2824 /* 2825 * JITed image shrinks with every pass and the loop iterates 2826 * until the image stops shrinking. Very large BPF programs 2827 * may converge on the last pass. In such case do one more 2828 * pass to emit the final image. 2829 */ 2830 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2831 if (!padding && pass >= PADDING_PASSES) 2832 padding = true; 2833 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2834 if (proglen <= 0) { 2835 out_image: 2836 image = NULL; 2837 if (header) { 2838 bpf_arch_text_copy(&header->size, &rw_header->size, 2839 sizeof(rw_header->size)); 2840 bpf_jit_binary_pack_free(header, rw_header); 2841 } 2842 /* Fall back to interpreter mode */ 2843 prog = orig_prog; 2844 if (extra_pass) { 2845 prog->bpf_func = NULL; 2846 prog->jited = 0; 2847 prog->jited_len = 0; 2848 } 2849 goto out_addrs; 2850 } 2851 if (image) { 2852 if (proglen != oldproglen) { 2853 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2854 proglen, oldproglen); 2855 goto out_image; 2856 } 2857 break; 2858 } 2859 if (proglen == oldproglen) { 2860 /* 2861 * The number of entries in extable is the number of BPF_LDX 2862 * insns that access kernel memory via "pointer to BTF type". 2863 * The verifier changed their opcode from LDX|MEM|size 2864 * to LDX|PROBE_MEM|size to make JITing easier. 2865 */ 2866 u32 align = __alignof__(struct exception_table_entry); 2867 u32 extable_size = prog->aux->num_exentries * 2868 sizeof(struct exception_table_entry); 2869 2870 /* allocate module memory for x86 insns and extable */ 2871 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2872 &image, align, &rw_header, &rw_image, 2873 jit_fill_hole); 2874 if (!header) { 2875 prog = orig_prog; 2876 goto out_addrs; 2877 } 2878 prog->aux->extable = (void *) image + roundup(proglen, align); 2879 } 2880 oldproglen = proglen; 2881 cond_resched(); 2882 } 2883 2884 if (bpf_jit_enable > 1) 2885 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); 2886 2887 if (image) { 2888 if (!prog->is_func || extra_pass) { 2889 /* 2890 * bpf_jit_binary_pack_finalize fails in two scenarios: 2891 * 1) header is not pointing to proper module memory; 2892 * 2) the arch doesn't support bpf_arch_text_copy(). 2893 * 2894 * Both cases are serious bugs and justify WARN_ON. 2895 */ 2896 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2897 /* header has been freed */ 2898 header = NULL; 2899 goto out_image; 2900 } 2901 2902 bpf_tail_call_direct_fixup(prog); 2903 } else { 2904 jit_data->addrs = addrs; 2905 jit_data->ctx = ctx; 2906 jit_data->proglen = proglen; 2907 jit_data->image = image; 2908 jit_data->header = header; 2909 jit_data->rw_header = rw_header; 2910 } 2911 prog->bpf_func = (void *)image; 2912 prog->jited = 1; 2913 prog->jited_len = proglen; 2914 } else { 2915 prog = orig_prog; 2916 } 2917 2918 if (!image || !prog->is_func || extra_pass) { 2919 if (image) 2920 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2921 out_addrs: 2922 kvfree(addrs); 2923 kfree(jit_data); 2924 prog->aux->jit_data = NULL; 2925 } 2926 out: 2927 if (tmp_blinded) 2928 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2929 tmp : orig_prog); 2930 return prog; 2931 } 2932 2933 bool bpf_jit_supports_kfunc_call(void) 2934 { 2935 return true; 2936 } 2937 2938 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2939 { 2940 if (text_poke_copy(dst, src, len) == NULL) 2941 return ERR_PTR(-EINVAL); 2942 return dst; 2943 } 2944 2945 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 2946 bool bpf_jit_supports_subprog_tailcalls(void) 2947 { 2948 return true; 2949 } 2950 2951 void bpf_jit_free(struct bpf_prog *prog) 2952 { 2953 if (prog->jited) { 2954 struct x64_jit_data *jit_data = prog->aux->jit_data; 2955 struct bpf_binary_header *hdr; 2956 2957 /* 2958 * If we fail the final pass of JIT (from jit_subprogs), 2959 * the program may not be finalized yet. Call finalize here 2960 * before freeing it. 2961 */ 2962 if (jit_data) { 2963 bpf_jit_binary_pack_finalize(prog, jit_data->header, 2964 jit_data->rw_header); 2965 kvfree(jit_data->addrs); 2966 kfree(jit_data); 2967 } 2968 hdr = bpf_jit_binary_pack_hdr(prog); 2969 bpf_jit_binary_pack_free(hdr, NULL); 2970 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2971 } 2972 2973 bpf_prog_unlock_free(prog); 2974 } 2975 2976 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 2977 struct bpf_prog *new, struct bpf_prog *old) 2978 { 2979 u8 *old_addr, *new_addr, *old_bypass_addr; 2980 int ret; 2981 2982 old_bypass_addr = old ? NULL : poke->bypass_addr; 2983 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; 2984 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; 2985 2986 /* 2987 * On program loading or teardown, the program's kallsym entry 2988 * might not be in place, so we use __bpf_arch_text_poke to skip 2989 * the kallsyms check. 2990 */ 2991 if (new) { 2992 ret = __bpf_arch_text_poke(poke->tailcall_target, 2993 BPF_MOD_JUMP, 2994 old_addr, new_addr); 2995 BUG_ON(ret < 0); 2996 if (!old) { 2997 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 2998 BPF_MOD_JUMP, 2999 poke->bypass_addr, 3000 NULL); 3001 BUG_ON(ret < 0); 3002 } 3003 } else { 3004 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 3005 BPF_MOD_JUMP, 3006 old_bypass_addr, 3007 poke->bypass_addr); 3008 BUG_ON(ret < 0); 3009 /* let other CPUs finish the execution of program 3010 * so that it will not possible to expose them 3011 * to invalid nop, stack unwind, nop state 3012 */ 3013 if (!ret) 3014 synchronize_rcu(); 3015 ret = __bpf_arch_text_poke(poke->tailcall_target, 3016 BPF_MOD_JUMP, 3017 old_addr, NULL); 3018 BUG_ON(ret < 0); 3019 } 3020 } 3021