1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 20 { 21 if (len == 1) 22 *ptr = bytes; 23 else if (len == 2) 24 *(u16 *)ptr = bytes; 25 else { 26 *(u32 *)ptr = bytes; 27 barrier(); 28 } 29 return ptr + len; 30 } 31 32 #define EMIT(bytes, len) \ 33 do { prog = emit_code(prog, bytes, len); } while (0) 34 35 #define EMIT1(b1) EMIT(b1, 1) 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39 40 #define EMIT1_off32(b1, off) \ 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 46 #define EMIT4_off32(b1, b2, b3, b4, off) \ 47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 48 49 #ifdef CONFIG_X86_KERNEL_IBT 50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 51 #else 52 #define EMIT_ENDBR() 53 #endif 54 55 static bool is_imm8(int value) 56 { 57 return value <= 127 && value >= -128; 58 } 59 60 static bool is_simm32(s64 value) 61 { 62 return value == (s64)(s32)value; 63 } 64 65 static bool is_uimm32(u64 value) 66 { 67 return value == (u64)(u32)value; 68 } 69 70 /* mov dst, src */ 71 #define EMIT_mov(DST, SRC) \ 72 do { \ 73 if (DST != SRC) \ 74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 75 } while (0) 76 77 static int bpf_size_to_x86_bytes(int bpf_size) 78 { 79 if (bpf_size == BPF_W) 80 return 4; 81 else if (bpf_size == BPF_H) 82 return 2; 83 else if (bpf_size == BPF_B) 84 return 1; 85 else if (bpf_size == BPF_DW) 86 return 4; /* imm32 */ 87 else 88 return 0; 89 } 90 91 /* 92 * List of x86 cond jumps opcodes (. + s8) 93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 94 */ 95 #define X86_JB 0x72 96 #define X86_JAE 0x73 97 #define X86_JE 0x74 98 #define X86_JNE 0x75 99 #define X86_JBE 0x76 100 #define X86_JA 0x77 101 #define X86_JL 0x7C 102 #define X86_JGE 0x7D 103 #define X86_JLE 0x7E 104 #define X86_JG 0x7F 105 106 /* Pick a register outside of BPF range for JIT internal work */ 107 #define AUX_REG (MAX_BPF_JIT_REG + 1) 108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 109 110 /* 111 * The following table maps BPF registers to x86-64 registers. 112 * 113 * x86-64 register R12 is unused, since if used as base address 114 * register in load/store instructions, it always needs an 115 * extra byte of encoding and is callee saved. 116 * 117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 118 * trampoline. x86-64 register R10 is used for blinding (if enabled). 119 */ 120 static const int reg2hex[] = { 121 [BPF_REG_0] = 0, /* RAX */ 122 [BPF_REG_1] = 7, /* RDI */ 123 [BPF_REG_2] = 6, /* RSI */ 124 [BPF_REG_3] = 2, /* RDX */ 125 [BPF_REG_4] = 1, /* RCX */ 126 [BPF_REG_5] = 0, /* R8 */ 127 [BPF_REG_6] = 3, /* RBX callee saved */ 128 [BPF_REG_7] = 5, /* R13 callee saved */ 129 [BPF_REG_8] = 6, /* R14 callee saved */ 130 [BPF_REG_9] = 7, /* R15 callee saved */ 131 [BPF_REG_FP] = 5, /* RBP readonly */ 132 [BPF_REG_AX] = 2, /* R10 temp register */ 133 [AUX_REG] = 3, /* R11 temp register */ 134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 135 }; 136 137 static const int reg2pt_regs[] = { 138 [BPF_REG_0] = offsetof(struct pt_regs, ax), 139 [BPF_REG_1] = offsetof(struct pt_regs, di), 140 [BPF_REG_2] = offsetof(struct pt_regs, si), 141 [BPF_REG_3] = offsetof(struct pt_regs, dx), 142 [BPF_REG_4] = offsetof(struct pt_regs, cx), 143 [BPF_REG_5] = offsetof(struct pt_regs, r8), 144 [BPF_REG_6] = offsetof(struct pt_regs, bx), 145 [BPF_REG_7] = offsetof(struct pt_regs, r13), 146 [BPF_REG_8] = offsetof(struct pt_regs, r14), 147 [BPF_REG_9] = offsetof(struct pt_regs, r15), 148 }; 149 150 /* 151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 152 * which need extra byte of encoding. 153 * rax,rcx,...,rbp have simpler encoding 154 */ 155 static bool is_ereg(u32 reg) 156 { 157 return (1 << reg) & (BIT(BPF_REG_5) | 158 BIT(AUX_REG) | 159 BIT(BPF_REG_7) | 160 BIT(BPF_REG_8) | 161 BIT(BPF_REG_9) | 162 BIT(X86_REG_R9) | 163 BIT(BPF_REG_AX)); 164 } 165 166 /* 167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 169 * of encoding. al,cl,dl,bl have simpler encoding. 170 */ 171 static bool is_ereg_8l(u32 reg) 172 { 173 return is_ereg(reg) || 174 (1 << reg) & (BIT(BPF_REG_1) | 175 BIT(BPF_REG_2) | 176 BIT(BPF_REG_FP)); 177 } 178 179 static bool is_axreg(u32 reg) 180 { 181 return reg == BPF_REG_0; 182 } 183 184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 185 static u8 add_1mod(u8 byte, u32 reg) 186 { 187 if (is_ereg(reg)) 188 byte |= 1; 189 return byte; 190 } 191 192 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 193 { 194 if (is_ereg(r1)) 195 byte |= 1; 196 if (is_ereg(r2)) 197 byte |= 4; 198 return byte; 199 } 200 201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 202 static u8 add_1reg(u8 byte, u32 dst_reg) 203 { 204 return byte + reg2hex[dst_reg]; 205 } 206 207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 209 { 210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 211 } 212 213 /* Some 1-byte opcodes for binary ALU operations */ 214 static u8 simple_alu_opcodes[] = { 215 [BPF_ADD] = 0x01, 216 [BPF_SUB] = 0x29, 217 [BPF_AND] = 0x21, 218 [BPF_OR] = 0x09, 219 [BPF_XOR] = 0x31, 220 [BPF_LSH] = 0xE0, 221 [BPF_RSH] = 0xE8, 222 [BPF_ARSH] = 0xF8, 223 }; 224 225 static void jit_fill_hole(void *area, unsigned int size) 226 { 227 /* Fill whole space with INT3 instructions */ 228 memset(area, 0xcc, size); 229 } 230 231 int bpf_arch_text_invalidate(void *dst, size_t len) 232 { 233 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 234 } 235 236 struct jit_context { 237 int cleanup_addr; /* Epilogue code offset */ 238 239 /* 240 * Program specific offsets of labels in the code; these rely on the 241 * JIT doing at least 2 passes, recording the position on the first 242 * pass, only to generate the correct offset on the second pass. 243 */ 244 int tail_call_direct_label; 245 int tail_call_indirect_label; 246 }; 247 248 /* Maximum number of bytes emitted while JITing one eBPF insn */ 249 #define BPF_MAX_INSN_SIZE 128 250 #define BPF_INSN_SAFETY 64 251 252 /* Number of bytes emit_patch() needs to generate instructions */ 253 #define X86_PATCH_SIZE 5 254 /* Number of bytes that will be skipped on tailcall */ 255 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 256 257 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 258 { 259 u8 *prog = *pprog; 260 261 if (callee_regs_used[0]) 262 EMIT1(0x53); /* push rbx */ 263 if (callee_regs_used[1]) 264 EMIT2(0x41, 0x55); /* push r13 */ 265 if (callee_regs_used[2]) 266 EMIT2(0x41, 0x56); /* push r14 */ 267 if (callee_regs_used[3]) 268 EMIT2(0x41, 0x57); /* push r15 */ 269 *pprog = prog; 270 } 271 272 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 273 { 274 u8 *prog = *pprog; 275 276 if (callee_regs_used[3]) 277 EMIT2(0x41, 0x5F); /* pop r15 */ 278 if (callee_regs_used[2]) 279 EMIT2(0x41, 0x5E); /* pop r14 */ 280 if (callee_regs_used[1]) 281 EMIT2(0x41, 0x5D); /* pop r13 */ 282 if (callee_regs_used[0]) 283 EMIT1(0x5B); /* pop rbx */ 284 *pprog = prog; 285 } 286 287 /* 288 * Emit x86-64 prologue code for BPF program. 289 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 290 * while jumping to another program 291 */ 292 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 293 bool tail_call_reachable, bool is_subprog) 294 { 295 u8 *prog = *pprog; 296 297 /* BPF trampoline can be made to work without these nops, 298 * but let's waste 5 bytes for now and optimize later 299 */ 300 EMIT_ENDBR(); 301 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 302 prog += X86_PATCH_SIZE; 303 if (!ebpf_from_cbpf) { 304 if (tail_call_reachable && !is_subprog) 305 EMIT2(0x31, 0xC0); /* xor eax, eax */ 306 else 307 EMIT2(0x66, 0x90); /* nop2 */ 308 } 309 EMIT1(0x55); /* push rbp */ 310 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 311 312 /* X86_TAIL_CALL_OFFSET is here */ 313 EMIT_ENDBR(); 314 315 /* sub rsp, rounded_stack_depth */ 316 if (stack_depth) 317 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 318 if (tail_call_reachable) 319 EMIT1(0x50); /* push rax */ 320 *pprog = prog; 321 } 322 323 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 324 { 325 u8 *prog = *pprog; 326 s64 offset; 327 328 offset = func - (ip + X86_PATCH_SIZE); 329 if (!is_simm32(offset)) { 330 pr_err("Target call %p is out of range\n", func); 331 return -ERANGE; 332 } 333 EMIT1_off32(opcode, offset); 334 *pprog = prog; 335 return 0; 336 } 337 338 static int emit_call(u8 **pprog, void *func, void *ip) 339 { 340 return emit_patch(pprog, func, ip, 0xE8); 341 } 342 343 static int emit_jump(u8 **pprog, void *func, void *ip) 344 { 345 return emit_patch(pprog, func, ip, 0xE9); 346 } 347 348 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 349 void *old_addr, void *new_addr) 350 { 351 const u8 *nop_insn = x86_nops[5]; 352 u8 old_insn[X86_PATCH_SIZE]; 353 u8 new_insn[X86_PATCH_SIZE]; 354 u8 *prog; 355 int ret; 356 357 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 358 if (old_addr) { 359 prog = old_insn; 360 ret = t == BPF_MOD_CALL ? 361 emit_call(&prog, old_addr, ip) : 362 emit_jump(&prog, old_addr, ip); 363 if (ret) 364 return ret; 365 } 366 367 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 368 if (new_addr) { 369 prog = new_insn; 370 ret = t == BPF_MOD_CALL ? 371 emit_call(&prog, new_addr, ip) : 372 emit_jump(&prog, new_addr, ip); 373 if (ret) 374 return ret; 375 } 376 377 ret = -EBUSY; 378 mutex_lock(&text_mutex); 379 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 380 goto out; 381 ret = 1; 382 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 383 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 384 ret = 0; 385 } 386 out: 387 mutex_unlock(&text_mutex); 388 return ret; 389 } 390 391 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 392 void *old_addr, void *new_addr) 393 { 394 if (!is_kernel_text((long)ip) && 395 !is_bpf_text_address((long)ip)) 396 /* BPF poking in modules is not supported */ 397 return -EINVAL; 398 399 /* 400 * See emit_prologue(), for IBT builds the trampoline hook is preceded 401 * with an ENDBR instruction. 402 */ 403 if (is_endbr(*(u32 *)ip)) 404 ip += ENDBR_INSN_SIZE; 405 406 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 407 } 408 409 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 410 411 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 412 { 413 u8 *prog = *pprog; 414 415 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 416 EMIT_LFENCE(); 417 EMIT2(0xFF, 0xE0 + reg); 418 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 419 OPTIMIZER_HIDE_VAR(reg); 420 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 421 } else { 422 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 423 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 424 EMIT1(0xCC); /* int3 */ 425 } 426 427 *pprog = prog; 428 } 429 430 static void emit_return(u8 **pprog, u8 *ip) 431 { 432 u8 *prog = *pprog; 433 434 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 435 emit_jump(&prog, &__x86_return_thunk, ip); 436 } else { 437 EMIT1(0xC3); /* ret */ 438 if (IS_ENABLED(CONFIG_SLS)) 439 EMIT1(0xCC); /* int3 */ 440 } 441 442 *pprog = prog; 443 } 444 445 /* 446 * Generate the following code: 447 * 448 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 449 * if (index >= array->map.max_entries) 450 * goto out; 451 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 452 * goto out; 453 * prog = array->ptrs[index]; 454 * if (prog == NULL) 455 * goto out; 456 * goto *(prog->bpf_func + prologue_size); 457 * out: 458 */ 459 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 460 u32 stack_depth, u8 *ip, 461 struct jit_context *ctx) 462 { 463 int tcc_off = -4 - round_up(stack_depth, 8); 464 u8 *prog = *pprog, *start = *pprog; 465 int offset; 466 467 /* 468 * rdi - pointer to ctx 469 * rsi - pointer to bpf_array 470 * rdx - index in bpf_array 471 */ 472 473 /* 474 * if (index >= array->map.max_entries) 475 * goto out; 476 */ 477 EMIT2(0x89, 0xD2); /* mov edx, edx */ 478 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 479 offsetof(struct bpf_array, map.max_entries)); 480 481 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 482 EMIT2(X86_JBE, offset); /* jbe out */ 483 484 /* 485 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 486 * goto out; 487 */ 488 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 489 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 490 491 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 492 EMIT2(X86_JAE, offset); /* jae out */ 493 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 494 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 495 496 /* prog = array->ptrs[index]; */ 497 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 498 offsetof(struct bpf_array, ptrs)); 499 500 /* 501 * if (prog == NULL) 502 * goto out; 503 */ 504 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 505 506 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 507 EMIT2(X86_JE, offset); /* je out */ 508 509 pop_callee_regs(&prog, callee_regs_used); 510 511 EMIT1(0x58); /* pop rax */ 512 if (stack_depth) 513 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 514 round_up(stack_depth, 8)); 515 516 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 517 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 518 offsetof(struct bpf_prog, bpf_func)); 519 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 520 X86_TAIL_CALL_OFFSET); 521 /* 522 * Now we're ready to jump into next BPF program 523 * rdi == ctx (1st arg) 524 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 525 */ 526 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 527 528 /* out: */ 529 ctx->tail_call_indirect_label = prog - start; 530 *pprog = prog; 531 } 532 533 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 534 u8 **pprog, u8 *ip, 535 bool *callee_regs_used, u32 stack_depth, 536 struct jit_context *ctx) 537 { 538 int tcc_off = -4 - round_up(stack_depth, 8); 539 u8 *prog = *pprog, *start = *pprog; 540 int offset; 541 542 /* 543 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 544 * goto out; 545 */ 546 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 547 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 548 549 offset = ctx->tail_call_direct_label - (prog + 2 - start); 550 EMIT2(X86_JAE, offset); /* jae out */ 551 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 552 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 553 554 poke->tailcall_bypass = ip + (prog - start); 555 poke->adj_off = X86_TAIL_CALL_OFFSET; 556 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 557 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 558 559 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 560 poke->tailcall_bypass); 561 562 pop_callee_regs(&prog, callee_regs_used); 563 EMIT1(0x58); /* pop rax */ 564 if (stack_depth) 565 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 566 567 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 568 prog += X86_PATCH_SIZE; 569 570 /* out: */ 571 ctx->tail_call_direct_label = prog - start; 572 573 *pprog = prog; 574 } 575 576 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 577 { 578 struct bpf_jit_poke_descriptor *poke; 579 struct bpf_array *array; 580 struct bpf_prog *target; 581 int i, ret; 582 583 for (i = 0; i < prog->aux->size_poke_tab; i++) { 584 poke = &prog->aux->poke_tab[i]; 585 if (poke->aux && poke->aux != prog->aux) 586 continue; 587 588 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 589 590 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 591 continue; 592 593 array = container_of(poke->tail_call.map, struct bpf_array, map); 594 mutex_lock(&array->aux->poke_mutex); 595 target = array->ptrs[poke->tail_call.key]; 596 if (target) { 597 ret = __bpf_arch_text_poke(poke->tailcall_target, 598 BPF_MOD_JUMP, NULL, 599 (u8 *)target->bpf_func + 600 poke->adj_off); 601 BUG_ON(ret < 0); 602 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 603 BPF_MOD_JUMP, 604 (u8 *)poke->tailcall_target + 605 X86_PATCH_SIZE, NULL); 606 BUG_ON(ret < 0); 607 } 608 WRITE_ONCE(poke->tailcall_target_stable, true); 609 mutex_unlock(&array->aux->poke_mutex); 610 } 611 } 612 613 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 614 u32 dst_reg, const u32 imm32) 615 { 616 u8 *prog = *pprog; 617 u8 b1, b2, b3; 618 619 /* 620 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 621 * (which zero-extends imm32) to save 2 bytes. 622 */ 623 if (sign_propagate && (s32)imm32 < 0) { 624 /* 'mov %rax, imm32' sign extends imm32 */ 625 b1 = add_1mod(0x48, dst_reg); 626 b2 = 0xC7; 627 b3 = 0xC0; 628 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 629 goto done; 630 } 631 632 /* 633 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 634 * to save 3 bytes. 635 */ 636 if (imm32 == 0) { 637 if (is_ereg(dst_reg)) 638 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 639 b2 = 0x31; /* xor */ 640 b3 = 0xC0; 641 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 642 goto done; 643 } 644 645 /* mov %eax, imm32 */ 646 if (is_ereg(dst_reg)) 647 EMIT1(add_1mod(0x40, dst_reg)); 648 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 649 done: 650 *pprog = prog; 651 } 652 653 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 654 const u32 imm32_hi, const u32 imm32_lo) 655 { 656 u8 *prog = *pprog; 657 658 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 659 /* 660 * For emitting plain u32, where sign bit must not be 661 * propagated LLVM tends to load imm64 over mov32 662 * directly, so save couple of bytes by just doing 663 * 'mov %eax, imm32' instead. 664 */ 665 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 666 } else { 667 /* movabsq rax, imm64 */ 668 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 669 EMIT(imm32_lo, 4); 670 EMIT(imm32_hi, 4); 671 } 672 673 *pprog = prog; 674 } 675 676 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 677 { 678 u8 *prog = *pprog; 679 680 if (is64) { 681 /* mov dst, src */ 682 EMIT_mov(dst_reg, src_reg); 683 } else { 684 /* mov32 dst, src */ 685 if (is_ereg(dst_reg) || is_ereg(src_reg)) 686 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 687 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 688 } 689 690 *pprog = prog; 691 } 692 693 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 694 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 695 { 696 u8 *prog = *pprog; 697 698 if (is_imm8(off)) { 699 /* 1-byte signed displacement. 700 * 701 * If off == 0 we could skip this and save one extra byte, but 702 * special case of x86 R13 which always needs an offset is not 703 * worth the hassle 704 */ 705 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 706 } else { 707 /* 4-byte signed displacement */ 708 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 709 } 710 *pprog = prog; 711 } 712 713 /* 714 * Emit a REX byte if it will be necessary to address these registers 715 */ 716 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 717 { 718 u8 *prog = *pprog; 719 720 if (is64) 721 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 722 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 723 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 724 *pprog = prog; 725 } 726 727 /* 728 * Similar version of maybe_emit_mod() for a single register 729 */ 730 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 731 { 732 u8 *prog = *pprog; 733 734 if (is64) 735 EMIT1(add_1mod(0x48, reg)); 736 else if (is_ereg(reg)) 737 EMIT1(add_1mod(0x40, reg)); 738 *pprog = prog; 739 } 740 741 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 742 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 743 { 744 u8 *prog = *pprog; 745 746 switch (size) { 747 case BPF_B: 748 /* Emit 'movzx rax, byte ptr [rax + off]' */ 749 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 750 break; 751 case BPF_H: 752 /* Emit 'movzx rax, word ptr [rax + off]' */ 753 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 754 break; 755 case BPF_W: 756 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 757 if (is_ereg(dst_reg) || is_ereg(src_reg)) 758 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 759 else 760 EMIT1(0x8B); 761 break; 762 case BPF_DW: 763 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 764 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 765 break; 766 } 767 emit_insn_suffix(&prog, src_reg, dst_reg, off); 768 *pprog = prog; 769 } 770 771 /* STX: *(u8*)(dst_reg + off) = src_reg */ 772 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 773 { 774 u8 *prog = *pprog; 775 776 switch (size) { 777 case BPF_B: 778 /* Emit 'mov byte ptr [rax + off], al' */ 779 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 780 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 781 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 782 else 783 EMIT1(0x88); 784 break; 785 case BPF_H: 786 if (is_ereg(dst_reg) || is_ereg(src_reg)) 787 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 788 else 789 EMIT2(0x66, 0x89); 790 break; 791 case BPF_W: 792 if (is_ereg(dst_reg) || is_ereg(src_reg)) 793 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 794 else 795 EMIT1(0x89); 796 break; 797 case BPF_DW: 798 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 799 break; 800 } 801 emit_insn_suffix(&prog, dst_reg, src_reg, off); 802 *pprog = prog; 803 } 804 805 static int emit_atomic(u8 **pprog, u8 atomic_op, 806 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 807 { 808 u8 *prog = *pprog; 809 810 EMIT1(0xF0); /* lock prefix */ 811 812 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 813 814 /* emit opcode */ 815 switch (atomic_op) { 816 case BPF_ADD: 817 case BPF_AND: 818 case BPF_OR: 819 case BPF_XOR: 820 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 821 EMIT1(simple_alu_opcodes[atomic_op]); 822 break; 823 case BPF_ADD | BPF_FETCH: 824 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 825 EMIT2(0x0F, 0xC1); 826 break; 827 case BPF_XCHG: 828 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 829 EMIT1(0x87); 830 break; 831 case BPF_CMPXCHG: 832 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 833 EMIT2(0x0F, 0xB1); 834 break; 835 default: 836 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 837 return -EFAULT; 838 } 839 840 emit_insn_suffix(&prog, dst_reg, src_reg, off); 841 842 *pprog = prog; 843 return 0; 844 } 845 846 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 847 { 848 u32 reg = x->fixup >> 8; 849 850 /* jump over faulting load and clear dest register */ 851 *(unsigned long *)((void *)regs + reg) = 0; 852 regs->ip += x->fixup & 0xff; 853 return true; 854 } 855 856 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 857 bool *regs_used, bool *tail_call_seen) 858 { 859 int i; 860 861 for (i = 1; i <= insn_cnt; i++, insn++) { 862 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 863 *tail_call_seen = true; 864 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 865 regs_used[0] = true; 866 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 867 regs_used[1] = true; 868 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 869 regs_used[2] = true; 870 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 871 regs_used[3] = true; 872 } 873 } 874 875 static void emit_nops(u8 **pprog, int len) 876 { 877 u8 *prog = *pprog; 878 int i, noplen; 879 880 while (len > 0) { 881 noplen = len; 882 883 if (noplen > ASM_NOP_MAX) 884 noplen = ASM_NOP_MAX; 885 886 for (i = 0; i < noplen; i++) 887 EMIT1(x86_nops[noplen][i]); 888 len -= noplen; 889 } 890 891 *pprog = prog; 892 } 893 894 /* emit the 3-byte VEX prefix 895 * 896 * r: same as rex.r, extra bit for ModRM reg field 897 * x: same as rex.x, extra bit for SIB index field 898 * b: same as rex.b, extra bit for ModRM r/m, or SIB base 899 * m: opcode map select, encoding escape bytes e.g. 0x0f38 900 * w: same as rex.w (32 bit or 64 bit) or opcode specific 901 * src_reg2: additional source reg (encoded as BPF reg) 902 * l: vector length (128 bit or 256 bit) or reserved 903 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) 904 */ 905 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, 906 bool w, u8 src_reg2, bool l, u8 pp) 907 { 908 u8 *prog = *pprog; 909 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ 910 u8 b1, b2; 911 u8 vvvv = reg2hex[src_reg2]; 912 913 /* reg2hex gives only the lower 3 bit of vvvv */ 914 if (is_ereg(src_reg2)) 915 vvvv |= 1 << 3; 916 917 /* 918 * 2nd byte of 3-byte VEX prefix 919 * ~ means bit inverted encoding 920 * 921 * 7 0 922 * +---+---+---+---+---+---+---+---+ 923 * |~R |~X |~B | m | 924 * +---+---+---+---+---+---+---+---+ 925 */ 926 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); 927 /* 928 * 3rd byte of 3-byte VEX prefix 929 * 930 * 7 0 931 * +---+---+---+---+---+---+---+---+ 932 * | W | ~vvvv | L | pp | 933 * +---+---+---+---+---+---+---+---+ 934 */ 935 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); 936 937 EMIT3(b0, b1, b2); 938 *pprog = prog; 939 } 940 941 /* emit BMI2 shift instruction */ 942 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) 943 { 944 u8 *prog = *pprog; 945 bool r = is_ereg(dst_reg); 946 u8 m = 2; /* escape code 0f38 */ 947 948 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); 949 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); 950 *pprog = prog; 951 } 952 953 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 954 955 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 956 int oldproglen, struct jit_context *ctx, bool jmp_padding) 957 { 958 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 959 struct bpf_insn *insn = bpf_prog->insnsi; 960 bool callee_regs_used[4] = {}; 961 int insn_cnt = bpf_prog->len; 962 bool tail_call_seen = false; 963 bool seen_exit = false; 964 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 965 int i, excnt = 0; 966 int ilen, proglen = 0; 967 u8 *prog = temp; 968 int err; 969 970 detect_reg_usage(insn, insn_cnt, callee_regs_used, 971 &tail_call_seen); 972 973 /* tail call's presence in current prog implies it is reachable */ 974 tail_call_reachable |= tail_call_seen; 975 976 emit_prologue(&prog, bpf_prog->aux->stack_depth, 977 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 978 bpf_prog->aux->func_idx != 0); 979 push_callee_regs(&prog, callee_regs_used); 980 981 ilen = prog - temp; 982 if (rw_image) 983 memcpy(rw_image + proglen, temp, ilen); 984 proglen += ilen; 985 addrs[0] = proglen; 986 prog = temp; 987 988 for (i = 1; i <= insn_cnt; i++, insn++) { 989 const s32 imm32 = insn->imm; 990 u32 dst_reg = insn->dst_reg; 991 u32 src_reg = insn->src_reg; 992 u8 b2 = 0, b3 = 0; 993 u8 *start_of_ldx; 994 s64 jmp_offset; 995 s16 insn_off; 996 u8 jmp_cond; 997 u8 *func; 998 int nops; 999 1000 switch (insn->code) { 1001 /* ALU */ 1002 case BPF_ALU | BPF_ADD | BPF_X: 1003 case BPF_ALU | BPF_SUB | BPF_X: 1004 case BPF_ALU | BPF_AND | BPF_X: 1005 case BPF_ALU | BPF_OR | BPF_X: 1006 case BPF_ALU | BPF_XOR | BPF_X: 1007 case BPF_ALU64 | BPF_ADD | BPF_X: 1008 case BPF_ALU64 | BPF_SUB | BPF_X: 1009 case BPF_ALU64 | BPF_AND | BPF_X: 1010 case BPF_ALU64 | BPF_OR | BPF_X: 1011 case BPF_ALU64 | BPF_XOR | BPF_X: 1012 maybe_emit_mod(&prog, dst_reg, src_reg, 1013 BPF_CLASS(insn->code) == BPF_ALU64); 1014 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 1015 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 1016 break; 1017 1018 case BPF_ALU64 | BPF_MOV | BPF_X: 1019 case BPF_ALU | BPF_MOV | BPF_X: 1020 emit_mov_reg(&prog, 1021 BPF_CLASS(insn->code) == BPF_ALU64, 1022 dst_reg, src_reg); 1023 break; 1024 1025 /* neg dst */ 1026 case BPF_ALU | BPF_NEG: 1027 case BPF_ALU64 | BPF_NEG: 1028 maybe_emit_1mod(&prog, dst_reg, 1029 BPF_CLASS(insn->code) == BPF_ALU64); 1030 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 1031 break; 1032 1033 case BPF_ALU | BPF_ADD | BPF_K: 1034 case BPF_ALU | BPF_SUB | BPF_K: 1035 case BPF_ALU | BPF_AND | BPF_K: 1036 case BPF_ALU | BPF_OR | BPF_K: 1037 case BPF_ALU | BPF_XOR | BPF_K: 1038 case BPF_ALU64 | BPF_ADD | BPF_K: 1039 case BPF_ALU64 | BPF_SUB | BPF_K: 1040 case BPF_ALU64 | BPF_AND | BPF_K: 1041 case BPF_ALU64 | BPF_OR | BPF_K: 1042 case BPF_ALU64 | BPF_XOR | BPF_K: 1043 maybe_emit_1mod(&prog, dst_reg, 1044 BPF_CLASS(insn->code) == BPF_ALU64); 1045 1046 /* 1047 * b3 holds 'normal' opcode, b2 short form only valid 1048 * in case dst is eax/rax. 1049 */ 1050 switch (BPF_OP(insn->code)) { 1051 case BPF_ADD: 1052 b3 = 0xC0; 1053 b2 = 0x05; 1054 break; 1055 case BPF_SUB: 1056 b3 = 0xE8; 1057 b2 = 0x2D; 1058 break; 1059 case BPF_AND: 1060 b3 = 0xE0; 1061 b2 = 0x25; 1062 break; 1063 case BPF_OR: 1064 b3 = 0xC8; 1065 b2 = 0x0D; 1066 break; 1067 case BPF_XOR: 1068 b3 = 0xF0; 1069 b2 = 0x35; 1070 break; 1071 } 1072 1073 if (is_imm8(imm32)) 1074 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 1075 else if (is_axreg(dst_reg)) 1076 EMIT1_off32(b2, imm32); 1077 else 1078 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 1079 break; 1080 1081 case BPF_ALU64 | BPF_MOV | BPF_K: 1082 case BPF_ALU | BPF_MOV | BPF_K: 1083 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1084 dst_reg, imm32); 1085 break; 1086 1087 case BPF_LD | BPF_IMM | BPF_DW: 1088 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1089 insn++; 1090 i++; 1091 break; 1092 1093 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1094 case BPF_ALU | BPF_MOD | BPF_X: 1095 case BPF_ALU | BPF_DIV | BPF_X: 1096 case BPF_ALU | BPF_MOD | BPF_K: 1097 case BPF_ALU | BPF_DIV | BPF_K: 1098 case BPF_ALU64 | BPF_MOD | BPF_X: 1099 case BPF_ALU64 | BPF_DIV | BPF_X: 1100 case BPF_ALU64 | BPF_MOD | BPF_K: 1101 case BPF_ALU64 | BPF_DIV | BPF_K: { 1102 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1103 1104 if (dst_reg != BPF_REG_0) 1105 EMIT1(0x50); /* push rax */ 1106 if (dst_reg != BPF_REG_3) 1107 EMIT1(0x52); /* push rdx */ 1108 1109 if (BPF_SRC(insn->code) == BPF_X) { 1110 if (src_reg == BPF_REG_0 || 1111 src_reg == BPF_REG_3) { 1112 /* mov r11, src_reg */ 1113 EMIT_mov(AUX_REG, src_reg); 1114 src_reg = AUX_REG; 1115 } 1116 } else { 1117 /* mov r11, imm32 */ 1118 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1119 src_reg = AUX_REG; 1120 } 1121 1122 if (dst_reg != BPF_REG_0) 1123 /* mov rax, dst_reg */ 1124 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1125 1126 /* 1127 * xor edx, edx 1128 * equivalent to 'xor rdx, rdx', but one byte less 1129 */ 1130 EMIT2(0x31, 0xd2); 1131 1132 /* div src_reg */ 1133 maybe_emit_1mod(&prog, src_reg, is64); 1134 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1135 1136 if (BPF_OP(insn->code) == BPF_MOD && 1137 dst_reg != BPF_REG_3) 1138 /* mov dst_reg, rdx */ 1139 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1140 else if (BPF_OP(insn->code) == BPF_DIV && 1141 dst_reg != BPF_REG_0) 1142 /* mov dst_reg, rax */ 1143 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1144 1145 if (dst_reg != BPF_REG_3) 1146 EMIT1(0x5A); /* pop rdx */ 1147 if (dst_reg != BPF_REG_0) 1148 EMIT1(0x58); /* pop rax */ 1149 break; 1150 } 1151 1152 case BPF_ALU | BPF_MUL | BPF_K: 1153 case BPF_ALU64 | BPF_MUL | BPF_K: 1154 maybe_emit_mod(&prog, dst_reg, dst_reg, 1155 BPF_CLASS(insn->code) == BPF_ALU64); 1156 1157 if (is_imm8(imm32)) 1158 /* imul dst_reg, dst_reg, imm8 */ 1159 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1160 imm32); 1161 else 1162 /* imul dst_reg, dst_reg, imm32 */ 1163 EMIT2_off32(0x69, 1164 add_2reg(0xC0, dst_reg, dst_reg), 1165 imm32); 1166 break; 1167 1168 case BPF_ALU | BPF_MUL | BPF_X: 1169 case BPF_ALU64 | BPF_MUL | BPF_X: 1170 maybe_emit_mod(&prog, src_reg, dst_reg, 1171 BPF_CLASS(insn->code) == BPF_ALU64); 1172 1173 /* imul dst_reg, src_reg */ 1174 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1175 break; 1176 1177 /* Shifts */ 1178 case BPF_ALU | BPF_LSH | BPF_K: 1179 case BPF_ALU | BPF_RSH | BPF_K: 1180 case BPF_ALU | BPF_ARSH | BPF_K: 1181 case BPF_ALU64 | BPF_LSH | BPF_K: 1182 case BPF_ALU64 | BPF_RSH | BPF_K: 1183 case BPF_ALU64 | BPF_ARSH | BPF_K: 1184 maybe_emit_1mod(&prog, dst_reg, 1185 BPF_CLASS(insn->code) == BPF_ALU64); 1186 1187 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1188 if (imm32 == 1) 1189 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1190 else 1191 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1192 break; 1193 1194 case BPF_ALU | BPF_LSH | BPF_X: 1195 case BPF_ALU | BPF_RSH | BPF_X: 1196 case BPF_ALU | BPF_ARSH | BPF_X: 1197 case BPF_ALU64 | BPF_LSH | BPF_X: 1198 case BPF_ALU64 | BPF_RSH | BPF_X: 1199 case BPF_ALU64 | BPF_ARSH | BPF_X: 1200 /* BMI2 shifts aren't better when shift count is already in rcx */ 1201 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { 1202 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ 1203 bool w = (BPF_CLASS(insn->code) == BPF_ALU64); 1204 u8 op; 1205 1206 switch (BPF_OP(insn->code)) { 1207 case BPF_LSH: 1208 op = 1; /* prefix 0x66 */ 1209 break; 1210 case BPF_RSH: 1211 op = 3; /* prefix 0xf2 */ 1212 break; 1213 case BPF_ARSH: 1214 op = 2; /* prefix 0xf3 */ 1215 break; 1216 } 1217 1218 emit_shiftx(&prog, dst_reg, src_reg, w, op); 1219 1220 break; 1221 } 1222 1223 if (src_reg != BPF_REG_4) { /* common case */ 1224 /* Check for bad case when dst_reg == rcx */ 1225 if (dst_reg == BPF_REG_4) { 1226 /* mov r11, dst_reg */ 1227 EMIT_mov(AUX_REG, dst_reg); 1228 dst_reg = AUX_REG; 1229 } else { 1230 EMIT1(0x51); /* push rcx */ 1231 } 1232 /* mov rcx, src_reg */ 1233 EMIT_mov(BPF_REG_4, src_reg); 1234 } 1235 1236 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1237 maybe_emit_1mod(&prog, dst_reg, 1238 BPF_CLASS(insn->code) == BPF_ALU64); 1239 1240 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1241 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1242 1243 if (src_reg != BPF_REG_4) { 1244 if (insn->dst_reg == BPF_REG_4) 1245 /* mov dst_reg, r11 */ 1246 EMIT_mov(insn->dst_reg, AUX_REG); 1247 else 1248 EMIT1(0x59); /* pop rcx */ 1249 } 1250 1251 break; 1252 1253 case BPF_ALU | BPF_END | BPF_FROM_BE: 1254 switch (imm32) { 1255 case 16: 1256 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1257 EMIT1(0x66); 1258 if (is_ereg(dst_reg)) 1259 EMIT1(0x41); 1260 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1261 1262 /* Emit 'movzwl eax, ax' */ 1263 if (is_ereg(dst_reg)) 1264 EMIT3(0x45, 0x0F, 0xB7); 1265 else 1266 EMIT2(0x0F, 0xB7); 1267 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1268 break; 1269 case 32: 1270 /* Emit 'bswap eax' to swap lower 4 bytes */ 1271 if (is_ereg(dst_reg)) 1272 EMIT2(0x41, 0x0F); 1273 else 1274 EMIT1(0x0F); 1275 EMIT1(add_1reg(0xC8, dst_reg)); 1276 break; 1277 case 64: 1278 /* Emit 'bswap rax' to swap 8 bytes */ 1279 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1280 add_1reg(0xC8, dst_reg)); 1281 break; 1282 } 1283 break; 1284 1285 case BPF_ALU | BPF_END | BPF_FROM_LE: 1286 switch (imm32) { 1287 case 16: 1288 /* 1289 * Emit 'movzwl eax, ax' to zero extend 16-bit 1290 * into 64 bit 1291 */ 1292 if (is_ereg(dst_reg)) 1293 EMIT3(0x45, 0x0F, 0xB7); 1294 else 1295 EMIT2(0x0F, 0xB7); 1296 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1297 break; 1298 case 32: 1299 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1300 if (is_ereg(dst_reg)) 1301 EMIT1(0x45); 1302 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1303 break; 1304 case 64: 1305 /* nop */ 1306 break; 1307 } 1308 break; 1309 1310 /* speculation barrier */ 1311 case BPF_ST | BPF_NOSPEC: 1312 EMIT_LFENCE(); 1313 break; 1314 1315 /* ST: *(u8*)(dst_reg + off) = imm */ 1316 case BPF_ST | BPF_MEM | BPF_B: 1317 if (is_ereg(dst_reg)) 1318 EMIT2(0x41, 0xC6); 1319 else 1320 EMIT1(0xC6); 1321 goto st; 1322 case BPF_ST | BPF_MEM | BPF_H: 1323 if (is_ereg(dst_reg)) 1324 EMIT3(0x66, 0x41, 0xC7); 1325 else 1326 EMIT2(0x66, 0xC7); 1327 goto st; 1328 case BPF_ST | BPF_MEM | BPF_W: 1329 if (is_ereg(dst_reg)) 1330 EMIT2(0x41, 0xC7); 1331 else 1332 EMIT1(0xC7); 1333 goto st; 1334 case BPF_ST | BPF_MEM | BPF_DW: 1335 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1336 1337 st: if (is_imm8(insn->off)) 1338 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1339 else 1340 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1341 1342 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1343 break; 1344 1345 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1346 case BPF_STX | BPF_MEM | BPF_B: 1347 case BPF_STX | BPF_MEM | BPF_H: 1348 case BPF_STX | BPF_MEM | BPF_W: 1349 case BPF_STX | BPF_MEM | BPF_DW: 1350 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1351 break; 1352 1353 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1354 case BPF_LDX | BPF_MEM | BPF_B: 1355 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1356 case BPF_LDX | BPF_MEM | BPF_H: 1357 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1358 case BPF_LDX | BPF_MEM | BPF_W: 1359 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1360 case BPF_LDX | BPF_MEM | BPF_DW: 1361 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1362 insn_off = insn->off; 1363 1364 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1365 /* Conservatively check that src_reg + insn->off is a kernel address: 1366 * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE 1367 * src_reg is used as scratch for src_reg += insn->off and restored 1368 * after emit_ldx if necessary 1369 */ 1370 1371 u64 limit = TASK_SIZE_MAX + PAGE_SIZE; 1372 u8 *end_of_jmp; 1373 1374 /* At end of these emitted checks, insn->off will have been added 1375 * to src_reg, so no need to do relative load with insn->off offset 1376 */ 1377 insn_off = 0; 1378 1379 /* movabsq r11, limit */ 1380 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1381 EMIT((u32)limit, 4); 1382 EMIT(limit >> 32, 4); 1383 1384 if (insn->off) { 1385 /* add src_reg, insn->off */ 1386 maybe_emit_1mod(&prog, src_reg, true); 1387 EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off); 1388 } 1389 1390 /* cmp src_reg, r11 */ 1391 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1392 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1393 1394 /* if unsigned '>=', goto load */ 1395 EMIT2(X86_JAE, 0); 1396 end_of_jmp = prog; 1397 1398 /* xor dst_reg, dst_reg */ 1399 emit_mov_imm32(&prog, false, dst_reg, 0); 1400 /* jmp byte_after_ldx */ 1401 EMIT2(0xEB, 0); 1402 1403 /* populate jmp_offset for JAE above to jump to start_of_ldx */ 1404 start_of_ldx = prog; 1405 end_of_jmp[-1] = start_of_ldx - end_of_jmp; 1406 } 1407 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); 1408 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1409 struct exception_table_entry *ex; 1410 u8 *_insn = image + proglen + (start_of_ldx - temp); 1411 s64 delta; 1412 1413 /* populate jmp_offset for JMP above */ 1414 start_of_ldx[-1] = prog - start_of_ldx; 1415 1416 if (insn->off && src_reg != dst_reg) { 1417 /* sub src_reg, insn->off 1418 * Restore src_reg after "add src_reg, insn->off" in prev 1419 * if statement. But if src_reg == dst_reg, emit_ldx 1420 * above already clobbered src_reg, so no need to restore. 1421 * If add src_reg, insn->off was unnecessary, no need to 1422 * restore either. 1423 */ 1424 maybe_emit_1mod(&prog, src_reg, true); 1425 EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off); 1426 } 1427 1428 if (!bpf_prog->aux->extable) 1429 break; 1430 1431 if (excnt >= bpf_prog->aux->num_exentries) { 1432 pr_err("ex gen bug\n"); 1433 return -EFAULT; 1434 } 1435 ex = &bpf_prog->aux->extable[excnt++]; 1436 1437 delta = _insn - (u8 *)&ex->insn; 1438 if (!is_simm32(delta)) { 1439 pr_err("extable->insn doesn't fit into 32-bit\n"); 1440 return -EFAULT; 1441 } 1442 /* switch ex to rw buffer for writes */ 1443 ex = (void *)rw_image + ((void *)ex - (void *)image); 1444 1445 ex->insn = delta; 1446 1447 ex->data = EX_TYPE_BPF; 1448 1449 if (dst_reg > BPF_REG_9) { 1450 pr_err("verifier error\n"); 1451 return -EFAULT; 1452 } 1453 /* 1454 * Compute size of x86 insn and its target dest x86 register. 1455 * ex_handler_bpf() will use lower 8 bits to adjust 1456 * pt_regs->ip to jump over this x86 instruction 1457 * and upper bits to figure out which pt_regs to zero out. 1458 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1459 * of 4 bytes will be ignored and rbx will be zero inited. 1460 */ 1461 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1462 } 1463 break; 1464 1465 case BPF_STX | BPF_ATOMIC | BPF_W: 1466 case BPF_STX | BPF_ATOMIC | BPF_DW: 1467 if (insn->imm == (BPF_AND | BPF_FETCH) || 1468 insn->imm == (BPF_OR | BPF_FETCH) || 1469 insn->imm == (BPF_XOR | BPF_FETCH)) { 1470 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1471 u32 real_src_reg = src_reg; 1472 u32 real_dst_reg = dst_reg; 1473 u8 *branch_target; 1474 1475 /* 1476 * Can't be implemented with a single x86 insn. 1477 * Need to do a CMPXCHG loop. 1478 */ 1479 1480 /* Will need RAX as a CMPXCHG operand so save R0 */ 1481 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1482 if (src_reg == BPF_REG_0) 1483 real_src_reg = BPF_REG_AX; 1484 if (dst_reg == BPF_REG_0) 1485 real_dst_reg = BPF_REG_AX; 1486 1487 branch_target = prog; 1488 /* Load old value */ 1489 emit_ldx(&prog, BPF_SIZE(insn->code), 1490 BPF_REG_0, real_dst_reg, insn->off); 1491 /* 1492 * Perform the (commutative) operation locally, 1493 * put the result in the AUX_REG. 1494 */ 1495 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1496 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1497 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1498 add_2reg(0xC0, AUX_REG, real_src_reg)); 1499 /* Attempt to swap in new value */ 1500 err = emit_atomic(&prog, BPF_CMPXCHG, 1501 real_dst_reg, AUX_REG, 1502 insn->off, 1503 BPF_SIZE(insn->code)); 1504 if (WARN_ON(err)) 1505 return err; 1506 /* 1507 * ZF tells us whether we won the race. If it's 1508 * cleared we need to try again. 1509 */ 1510 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1511 /* Return the pre-modification value */ 1512 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1513 /* Restore R0 after clobbering RAX */ 1514 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1515 break; 1516 } 1517 1518 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1519 insn->off, BPF_SIZE(insn->code)); 1520 if (err) 1521 return err; 1522 break; 1523 1524 /* call */ 1525 case BPF_JMP | BPF_CALL: 1526 func = (u8 *) __bpf_call_base + imm32; 1527 if (tail_call_reachable) { 1528 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1529 EMIT3_off32(0x48, 0x8B, 0x85, 1530 -round_up(bpf_prog->aux->stack_depth, 8) - 8); 1531 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1532 return -EINVAL; 1533 } else { 1534 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1535 return -EINVAL; 1536 } 1537 break; 1538 1539 case BPF_JMP | BPF_TAIL_CALL: 1540 if (imm32) 1541 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1542 &prog, image + addrs[i - 1], 1543 callee_regs_used, 1544 bpf_prog->aux->stack_depth, 1545 ctx); 1546 else 1547 emit_bpf_tail_call_indirect(&prog, 1548 callee_regs_used, 1549 bpf_prog->aux->stack_depth, 1550 image + addrs[i - 1], 1551 ctx); 1552 break; 1553 1554 /* cond jump */ 1555 case BPF_JMP | BPF_JEQ | BPF_X: 1556 case BPF_JMP | BPF_JNE | BPF_X: 1557 case BPF_JMP | BPF_JGT | BPF_X: 1558 case BPF_JMP | BPF_JLT | BPF_X: 1559 case BPF_JMP | BPF_JGE | BPF_X: 1560 case BPF_JMP | BPF_JLE | BPF_X: 1561 case BPF_JMP | BPF_JSGT | BPF_X: 1562 case BPF_JMP | BPF_JSLT | BPF_X: 1563 case BPF_JMP | BPF_JSGE | BPF_X: 1564 case BPF_JMP | BPF_JSLE | BPF_X: 1565 case BPF_JMP32 | BPF_JEQ | BPF_X: 1566 case BPF_JMP32 | BPF_JNE | BPF_X: 1567 case BPF_JMP32 | BPF_JGT | BPF_X: 1568 case BPF_JMP32 | BPF_JLT | BPF_X: 1569 case BPF_JMP32 | BPF_JGE | BPF_X: 1570 case BPF_JMP32 | BPF_JLE | BPF_X: 1571 case BPF_JMP32 | BPF_JSGT | BPF_X: 1572 case BPF_JMP32 | BPF_JSLT | BPF_X: 1573 case BPF_JMP32 | BPF_JSGE | BPF_X: 1574 case BPF_JMP32 | BPF_JSLE | BPF_X: 1575 /* cmp dst_reg, src_reg */ 1576 maybe_emit_mod(&prog, dst_reg, src_reg, 1577 BPF_CLASS(insn->code) == BPF_JMP); 1578 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1579 goto emit_cond_jmp; 1580 1581 case BPF_JMP | BPF_JSET | BPF_X: 1582 case BPF_JMP32 | BPF_JSET | BPF_X: 1583 /* test dst_reg, src_reg */ 1584 maybe_emit_mod(&prog, dst_reg, src_reg, 1585 BPF_CLASS(insn->code) == BPF_JMP); 1586 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1587 goto emit_cond_jmp; 1588 1589 case BPF_JMP | BPF_JSET | BPF_K: 1590 case BPF_JMP32 | BPF_JSET | BPF_K: 1591 /* test dst_reg, imm32 */ 1592 maybe_emit_1mod(&prog, dst_reg, 1593 BPF_CLASS(insn->code) == BPF_JMP); 1594 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1595 goto emit_cond_jmp; 1596 1597 case BPF_JMP | BPF_JEQ | BPF_K: 1598 case BPF_JMP | BPF_JNE | BPF_K: 1599 case BPF_JMP | BPF_JGT | BPF_K: 1600 case BPF_JMP | BPF_JLT | BPF_K: 1601 case BPF_JMP | BPF_JGE | BPF_K: 1602 case BPF_JMP | BPF_JLE | BPF_K: 1603 case BPF_JMP | BPF_JSGT | BPF_K: 1604 case BPF_JMP | BPF_JSLT | BPF_K: 1605 case BPF_JMP | BPF_JSGE | BPF_K: 1606 case BPF_JMP | BPF_JSLE | BPF_K: 1607 case BPF_JMP32 | BPF_JEQ | BPF_K: 1608 case BPF_JMP32 | BPF_JNE | BPF_K: 1609 case BPF_JMP32 | BPF_JGT | BPF_K: 1610 case BPF_JMP32 | BPF_JLT | BPF_K: 1611 case BPF_JMP32 | BPF_JGE | BPF_K: 1612 case BPF_JMP32 | BPF_JLE | BPF_K: 1613 case BPF_JMP32 | BPF_JSGT | BPF_K: 1614 case BPF_JMP32 | BPF_JSLT | BPF_K: 1615 case BPF_JMP32 | BPF_JSGE | BPF_K: 1616 case BPF_JMP32 | BPF_JSLE | BPF_K: 1617 /* test dst_reg, dst_reg to save one extra byte */ 1618 if (imm32 == 0) { 1619 maybe_emit_mod(&prog, dst_reg, dst_reg, 1620 BPF_CLASS(insn->code) == BPF_JMP); 1621 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1622 goto emit_cond_jmp; 1623 } 1624 1625 /* cmp dst_reg, imm8/32 */ 1626 maybe_emit_1mod(&prog, dst_reg, 1627 BPF_CLASS(insn->code) == BPF_JMP); 1628 1629 if (is_imm8(imm32)) 1630 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1631 else 1632 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1633 1634 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1635 switch (BPF_OP(insn->code)) { 1636 case BPF_JEQ: 1637 jmp_cond = X86_JE; 1638 break; 1639 case BPF_JSET: 1640 case BPF_JNE: 1641 jmp_cond = X86_JNE; 1642 break; 1643 case BPF_JGT: 1644 /* GT is unsigned '>', JA in x86 */ 1645 jmp_cond = X86_JA; 1646 break; 1647 case BPF_JLT: 1648 /* LT is unsigned '<', JB in x86 */ 1649 jmp_cond = X86_JB; 1650 break; 1651 case BPF_JGE: 1652 /* GE is unsigned '>=', JAE in x86 */ 1653 jmp_cond = X86_JAE; 1654 break; 1655 case BPF_JLE: 1656 /* LE is unsigned '<=', JBE in x86 */ 1657 jmp_cond = X86_JBE; 1658 break; 1659 case BPF_JSGT: 1660 /* Signed '>', GT in x86 */ 1661 jmp_cond = X86_JG; 1662 break; 1663 case BPF_JSLT: 1664 /* Signed '<', LT in x86 */ 1665 jmp_cond = X86_JL; 1666 break; 1667 case BPF_JSGE: 1668 /* Signed '>=', GE in x86 */ 1669 jmp_cond = X86_JGE; 1670 break; 1671 case BPF_JSLE: 1672 /* Signed '<=', LE in x86 */ 1673 jmp_cond = X86_JLE; 1674 break; 1675 default: /* to silence GCC warning */ 1676 return -EFAULT; 1677 } 1678 jmp_offset = addrs[i + insn->off] - addrs[i]; 1679 if (is_imm8(jmp_offset)) { 1680 if (jmp_padding) { 1681 /* To keep the jmp_offset valid, the extra bytes are 1682 * padded before the jump insn, so we subtract the 1683 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1684 * 1685 * If the previous pass already emits an imm8 1686 * jmp_cond, then this BPF insn won't shrink, so 1687 * "nops" is 0. 1688 * 1689 * On the other hand, if the previous pass emits an 1690 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1691 * keep the image from shrinking further. 1692 * 1693 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1694 * is 2 bytes, so the size difference is 4 bytes. 1695 */ 1696 nops = INSN_SZ_DIFF - 2; 1697 if (nops != 0 && nops != 4) { 1698 pr_err("unexpected jmp_cond padding: %d bytes\n", 1699 nops); 1700 return -EFAULT; 1701 } 1702 emit_nops(&prog, nops); 1703 } 1704 EMIT2(jmp_cond, jmp_offset); 1705 } else if (is_simm32(jmp_offset)) { 1706 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1707 } else { 1708 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1709 return -EFAULT; 1710 } 1711 1712 break; 1713 1714 case BPF_JMP | BPF_JA: 1715 if (insn->off == -1) 1716 /* -1 jmp instructions will always jump 1717 * backwards two bytes. Explicitly handling 1718 * this case avoids wasting too many passes 1719 * when there are long sequences of replaced 1720 * dead code. 1721 */ 1722 jmp_offset = -2; 1723 else 1724 jmp_offset = addrs[i + insn->off] - addrs[i]; 1725 1726 if (!jmp_offset) { 1727 /* 1728 * If jmp_padding is enabled, the extra nops will 1729 * be inserted. Otherwise, optimize out nop jumps. 1730 */ 1731 if (jmp_padding) { 1732 /* There are 3 possible conditions. 1733 * (1) This BPF_JA is already optimized out in 1734 * the previous run, so there is no need 1735 * to pad any extra byte (0 byte). 1736 * (2) The previous pass emits an imm8 jmp, 1737 * so we pad 2 bytes to match the previous 1738 * insn size. 1739 * (3) Similarly, the previous pass emits an 1740 * imm32 jmp, and 5 bytes is padded. 1741 */ 1742 nops = INSN_SZ_DIFF; 1743 if (nops != 0 && nops != 2 && nops != 5) { 1744 pr_err("unexpected nop jump padding: %d bytes\n", 1745 nops); 1746 return -EFAULT; 1747 } 1748 emit_nops(&prog, nops); 1749 } 1750 break; 1751 } 1752 emit_jmp: 1753 if (is_imm8(jmp_offset)) { 1754 if (jmp_padding) { 1755 /* To avoid breaking jmp_offset, the extra bytes 1756 * are padded before the actual jmp insn, so 1757 * 2 bytes is subtracted from INSN_SZ_DIFF. 1758 * 1759 * If the previous pass already emits an imm8 1760 * jmp, there is nothing to pad (0 byte). 1761 * 1762 * If it emits an imm32 jmp (5 bytes) previously 1763 * and now an imm8 jmp (2 bytes), then we pad 1764 * (5 - 2 = 3) bytes to stop the image from 1765 * shrinking further. 1766 */ 1767 nops = INSN_SZ_DIFF - 2; 1768 if (nops != 0 && nops != 3) { 1769 pr_err("unexpected jump padding: %d bytes\n", 1770 nops); 1771 return -EFAULT; 1772 } 1773 emit_nops(&prog, INSN_SZ_DIFF - 2); 1774 } 1775 EMIT2(0xEB, jmp_offset); 1776 } else if (is_simm32(jmp_offset)) { 1777 EMIT1_off32(0xE9, jmp_offset); 1778 } else { 1779 pr_err("jmp gen bug %llx\n", jmp_offset); 1780 return -EFAULT; 1781 } 1782 break; 1783 1784 case BPF_JMP | BPF_EXIT: 1785 if (seen_exit) { 1786 jmp_offset = ctx->cleanup_addr - addrs[i]; 1787 goto emit_jmp; 1788 } 1789 seen_exit = true; 1790 /* Update cleanup_addr */ 1791 ctx->cleanup_addr = proglen; 1792 pop_callee_regs(&prog, callee_regs_used); 1793 EMIT1(0xC9); /* leave */ 1794 emit_return(&prog, image + addrs[i - 1] + (prog - temp)); 1795 break; 1796 1797 default: 1798 /* 1799 * By design x86-64 JIT should support all BPF instructions. 1800 * This error will be seen if new instruction was added 1801 * to the interpreter, but not to the JIT, or if there is 1802 * junk in bpf_prog. 1803 */ 1804 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1805 return -EINVAL; 1806 } 1807 1808 ilen = prog - temp; 1809 if (ilen > BPF_MAX_INSN_SIZE) { 1810 pr_err("bpf_jit: fatal insn size error\n"); 1811 return -EFAULT; 1812 } 1813 1814 if (image) { 1815 /* 1816 * When populating the image, assert that: 1817 * 1818 * i) We do not write beyond the allocated space, and 1819 * ii) addrs[i] did not change from the prior run, in order 1820 * to validate assumptions made for computing branch 1821 * displacements. 1822 */ 1823 if (unlikely(proglen + ilen > oldproglen || 1824 proglen + ilen != addrs[i])) { 1825 pr_err("bpf_jit: fatal error\n"); 1826 return -EFAULT; 1827 } 1828 memcpy(rw_image + proglen, temp, ilen); 1829 } 1830 proglen += ilen; 1831 addrs[i] = proglen; 1832 prog = temp; 1833 } 1834 1835 if (image && excnt != bpf_prog->aux->num_exentries) { 1836 pr_err("extable is not populated\n"); 1837 return -EFAULT; 1838 } 1839 return proglen; 1840 } 1841 1842 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1843 int stack_size) 1844 { 1845 int i, j, arg_size, nr_regs; 1846 /* Store function arguments to stack. 1847 * For a function that accepts two pointers the sequence will be: 1848 * mov QWORD PTR [rbp-0x10],rdi 1849 * mov QWORD PTR [rbp-0x8],rsi 1850 */ 1851 for (i = 0, j = 0; i < min(nr_args, 6); i++) { 1852 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { 1853 nr_regs = (m->arg_size[i] + 7) / 8; 1854 arg_size = 8; 1855 } else { 1856 nr_regs = 1; 1857 arg_size = m->arg_size[i]; 1858 } 1859 1860 while (nr_regs) { 1861 emit_stx(prog, bytes_to_bpf_size(arg_size), 1862 BPF_REG_FP, 1863 j == 5 ? X86_REG_R9 : BPF_REG_1 + j, 1864 -(stack_size - j * 8)); 1865 nr_regs--; 1866 j++; 1867 } 1868 } 1869 } 1870 1871 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1872 int stack_size) 1873 { 1874 int i, j, arg_size, nr_regs; 1875 1876 /* Restore function arguments from stack. 1877 * For a function that accepts two pointers the sequence will be: 1878 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1879 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1880 */ 1881 for (i = 0, j = 0; i < min(nr_args, 6); i++) { 1882 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { 1883 nr_regs = (m->arg_size[i] + 7) / 8; 1884 arg_size = 8; 1885 } else { 1886 nr_regs = 1; 1887 arg_size = m->arg_size[i]; 1888 } 1889 1890 while (nr_regs) { 1891 emit_ldx(prog, bytes_to_bpf_size(arg_size), 1892 j == 5 ? X86_REG_R9 : BPF_REG_1 + j, 1893 BPF_REG_FP, 1894 -(stack_size - j * 8)); 1895 nr_regs--; 1896 j++; 1897 } 1898 } 1899 } 1900 1901 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1902 struct bpf_tramp_link *l, int stack_size, 1903 int run_ctx_off, bool save_ret) 1904 { 1905 u8 *prog = *pprog; 1906 u8 *jmp_insn; 1907 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 1908 struct bpf_prog *p = l->link.prog; 1909 u64 cookie = l->cookie; 1910 1911 /* mov rdi, cookie */ 1912 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 1913 1914 /* Prepare struct bpf_tramp_run_ctx. 1915 * 1916 * bpf_tramp_run_ctx is already preserved by 1917 * arch_prepare_bpf_trampoline(). 1918 * 1919 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 1920 */ 1921 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 1922 1923 /* arg1: mov rdi, progs[i] */ 1924 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1925 /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 1926 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 1927 1928 if (emit_call(&prog, bpf_trampoline_enter(p), prog)) 1929 return -EINVAL; 1930 /* remember prog start time returned by __bpf_prog_enter */ 1931 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1932 1933 /* if (__bpf_prog_enter*(prog) == 0) 1934 * goto skip_exec_of_prog; 1935 */ 1936 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1937 /* emit 2 nops that will be replaced with JE insn */ 1938 jmp_insn = prog; 1939 emit_nops(&prog, 2); 1940 1941 /* arg1: lea rdi, [rbp - stack_size] */ 1942 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1943 /* arg2: progs[i]->insnsi for interpreter */ 1944 if (!p->jited) 1945 emit_mov_imm64(&prog, BPF_REG_2, 1946 (long) p->insnsi >> 32, 1947 (u32) (long) p->insnsi); 1948 /* call JITed bpf program or interpreter */ 1949 if (emit_call(&prog, p->bpf_func, prog)) 1950 return -EINVAL; 1951 1952 /* 1953 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1954 * of the previous call which is then passed on the stack to 1955 * the next BPF program. 1956 * 1957 * BPF_TRAMP_FENTRY trampoline may need to return the return 1958 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1959 */ 1960 if (save_ret) 1961 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1962 1963 /* replace 2 nops with JE insn, since jmp target is known */ 1964 jmp_insn[0] = X86_JE; 1965 jmp_insn[1] = prog - jmp_insn - 2; 1966 1967 /* arg1: mov rdi, progs[i] */ 1968 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1969 /* arg2: mov rsi, rbx <- start time in nsec */ 1970 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1971 /* arg3: lea rdx, [rbp - run_ctx_off] */ 1972 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 1973 if (emit_call(&prog, bpf_trampoline_exit(p), prog)) 1974 return -EINVAL; 1975 1976 *pprog = prog; 1977 return 0; 1978 } 1979 1980 static void emit_align(u8 **pprog, u32 align) 1981 { 1982 u8 *target, *prog = *pprog; 1983 1984 target = PTR_ALIGN(prog, align); 1985 if (target != prog) 1986 emit_nops(&prog, target - prog); 1987 1988 *pprog = prog; 1989 } 1990 1991 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1992 { 1993 u8 *prog = *pprog; 1994 s64 offset; 1995 1996 offset = func - (ip + 2 + 4); 1997 if (!is_simm32(offset)) { 1998 pr_err("Target %p is out of range\n", func); 1999 return -EINVAL; 2000 } 2001 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 2002 *pprog = prog; 2003 return 0; 2004 } 2005 2006 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 2007 struct bpf_tramp_links *tl, int stack_size, 2008 int run_ctx_off, bool save_ret) 2009 { 2010 int i; 2011 u8 *prog = *pprog; 2012 2013 for (i = 0; i < tl->nr_links; i++) { 2014 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 2015 run_ctx_off, save_ret)) 2016 return -EINVAL; 2017 } 2018 *pprog = prog; 2019 return 0; 2020 } 2021 2022 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 2023 struct bpf_tramp_links *tl, int stack_size, 2024 int run_ctx_off, u8 **branches) 2025 { 2026 u8 *prog = *pprog; 2027 int i; 2028 2029 /* The first fmod_ret program will receive a garbage return value. 2030 * Set this to 0 to avoid confusing the program. 2031 */ 2032 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 2033 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2034 for (i = 0; i < tl->nr_links; i++) { 2035 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 2036 return -EINVAL; 2037 2038 /* mod_ret prog stored return value into [rbp - 8]. Emit: 2039 * if (*(u64 *)(rbp - 8) != 0) 2040 * goto do_fexit; 2041 */ 2042 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 2043 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 2044 2045 /* Save the location of the branch and Generate 6 nops 2046 * (4 bytes for an offset and 2 bytes for the jump) These nops 2047 * are replaced with a conditional jump once do_fexit (i.e. the 2048 * start of the fexit invocation) is finalized. 2049 */ 2050 branches[i] = prog; 2051 emit_nops(&prog, 4 + 2); 2052 } 2053 2054 *pprog = prog; 2055 return 0; 2056 } 2057 2058 /* Example: 2059 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 2060 * its 'struct btf_func_model' will be nr_args=2 2061 * The assembly code when eth_type_trans is executing after trampoline: 2062 * 2063 * push rbp 2064 * mov rbp, rsp 2065 * sub rsp, 16 // space for skb and dev 2066 * push rbx // temp regs to pass start time 2067 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 2068 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 2069 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2070 * mov rbx, rax // remember start time in bpf stats are enabled 2071 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 2072 * call addr_of_jited_FENTRY_prog 2073 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2074 * mov rsi, rbx // prog start time 2075 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2076 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 2077 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 2078 * pop rbx 2079 * leave 2080 * ret 2081 * 2082 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 2083 * replaced with 'call generated_bpf_trampoline'. When it returns 2084 * eth_type_trans will continue executing with original skb and dev pointers. 2085 * 2086 * The assembly code when eth_type_trans is called from trampoline: 2087 * 2088 * push rbp 2089 * mov rbp, rsp 2090 * sub rsp, 24 // space for skb, dev, return value 2091 * push rbx // temp regs to pass start time 2092 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 2093 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 2094 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2095 * mov rbx, rax // remember start time if bpf stats are enabled 2096 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2097 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 2098 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2099 * mov rsi, rbx // prog start time 2100 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2101 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 2102 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 2103 * call eth_type_trans+5 // execute body of eth_type_trans 2104 * mov qword ptr [rbp - 8], rax // save return value 2105 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 2106 * mov rbx, rax // remember start time in bpf stats are enabled 2107 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 2108 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 2109 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 2110 * mov rsi, rbx // prog start time 2111 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 2112 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 2113 * pop rbx 2114 * leave 2115 * add rsp, 8 // skip eth_type_trans's frame 2116 * ret // return to its caller 2117 */ 2118 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 2119 const struct btf_func_model *m, u32 flags, 2120 struct bpf_tramp_links *tlinks, 2121 void *func_addr) 2122 { 2123 int ret, i, nr_args = m->nr_args, extra_nregs = 0; 2124 int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; 2125 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2126 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2127 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2128 void *orig_call = func_addr; 2129 u8 **branches = NULL; 2130 u8 *prog; 2131 bool save_ret; 2132 2133 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 2134 if (nr_args > 6) 2135 return -ENOTSUPP; 2136 2137 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { 2138 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 2139 extra_nregs += (m->arg_size[i] + 7) / 8 - 1; 2140 } 2141 if (nr_args + extra_nregs > 6) 2142 return -ENOTSUPP; 2143 stack_size += extra_nregs * 8; 2144 2145 /* Generated trampoline stack layout: 2146 * 2147 * RBP + 8 [ return address ] 2148 * RBP + 0 [ RBP ] 2149 * 2150 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2151 * BPF_TRAMP_F_RET_FENTRY_RET flags 2152 * 2153 * [ reg_argN ] always 2154 * [ ... ] 2155 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2156 * 2157 * RBP - args_off [ arg regs count ] always 2158 * 2159 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2160 * 2161 * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 2162 */ 2163 2164 /* room for return value of orig_call or fentry prog */ 2165 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2166 if (save_ret) 2167 stack_size += 8; 2168 2169 regs_off = stack_size; 2170 2171 /* args count */ 2172 stack_size += 8; 2173 args_off = stack_size; 2174 2175 if (flags & BPF_TRAMP_F_IP_ARG) 2176 stack_size += 8; /* room for IP address argument */ 2177 2178 ip_off = stack_size; 2179 2180 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2181 run_ctx_off = stack_size; 2182 2183 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2184 /* skip patched call instruction and point orig_call to actual 2185 * body of the kernel function. 2186 */ 2187 if (is_endbr(*(u32 *)orig_call)) 2188 orig_call += ENDBR_INSN_SIZE; 2189 orig_call += X86_PATCH_SIZE; 2190 } 2191 2192 prog = image; 2193 2194 EMIT_ENDBR(); 2195 EMIT1(0x55); /* push rbp */ 2196 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2197 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2198 EMIT1(0x53); /* push rbx */ 2199 2200 /* Store number of argument registers of the traced function: 2201 * mov rax, nr_args + extra_nregs 2202 * mov QWORD PTR [rbp - args_off], rax 2203 */ 2204 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs); 2205 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2206 2207 if (flags & BPF_TRAMP_F_IP_ARG) { 2208 /* Store IP address of the traced function: 2209 * movabsq rax, func_addr 2210 * mov QWORD PTR [rbp - ip_off], rax 2211 */ 2212 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); 2213 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2214 } 2215 2216 save_regs(m, &prog, nr_args, regs_off); 2217 2218 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2219 /* arg1: mov rdi, im */ 2220 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2221 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2222 ret = -EINVAL; 2223 goto cleanup; 2224 } 2225 } 2226 2227 if (fentry->nr_links) 2228 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2229 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2230 return -EINVAL; 2231 2232 if (fmod_ret->nr_links) { 2233 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2234 GFP_KERNEL); 2235 if (!branches) 2236 return -ENOMEM; 2237 2238 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2239 run_ctx_off, branches)) { 2240 ret = -EINVAL; 2241 goto cleanup; 2242 } 2243 } 2244 2245 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2246 restore_regs(m, &prog, nr_args, regs_off); 2247 2248 if (flags & BPF_TRAMP_F_ORIG_STACK) { 2249 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2250 EMIT2(0xff, 0xd0); /* call *rax */ 2251 } else { 2252 /* call original function */ 2253 if (emit_call(&prog, orig_call, prog)) { 2254 ret = -EINVAL; 2255 goto cleanup; 2256 } 2257 } 2258 /* remember return value in a stack for bpf prog to access */ 2259 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2260 im->ip_after_call = prog; 2261 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2262 prog += X86_PATCH_SIZE; 2263 } 2264 2265 if (fmod_ret->nr_links) { 2266 /* From Intel 64 and IA-32 Architectures Optimization 2267 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2268 * Coding Rule 11: All branch targets should be 16-byte 2269 * aligned. 2270 */ 2271 emit_align(&prog, 16); 2272 /* Update the branches saved in invoke_bpf_mod_ret with the 2273 * aligned address of do_fexit. 2274 */ 2275 for (i = 0; i < fmod_ret->nr_links; i++) 2276 emit_cond_near_jump(&branches[i], prog, branches[i], 2277 X86_JNE); 2278 } 2279 2280 if (fexit->nr_links) 2281 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2282 ret = -EINVAL; 2283 goto cleanup; 2284 } 2285 2286 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2287 restore_regs(m, &prog, nr_args, regs_off); 2288 2289 /* This needs to be done regardless. If there were fmod_ret programs, 2290 * the return value is only updated on the stack and still needs to be 2291 * restored to R0. 2292 */ 2293 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2294 im->ip_epilogue = prog; 2295 /* arg1: mov rdi, im */ 2296 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2297 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2298 ret = -EINVAL; 2299 goto cleanup; 2300 } 2301 } 2302 /* restore return value of orig_call or fentry prog back into RAX */ 2303 if (save_ret) 2304 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2305 2306 EMIT1(0x5B); /* pop rbx */ 2307 EMIT1(0xC9); /* leave */ 2308 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2309 /* skip our return address and return to parent */ 2310 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2311 emit_return(&prog, prog); 2312 /* Make sure the trampoline generation logic doesn't overflow */ 2313 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2314 ret = -EFAULT; 2315 goto cleanup; 2316 } 2317 ret = prog - (u8 *)image; 2318 2319 cleanup: 2320 kfree(branches); 2321 return ret; 2322 } 2323 2324 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) 2325 { 2326 u8 *jg_reloc, *prog = *pprog; 2327 int pivot, err, jg_bytes = 1; 2328 s64 jg_offset; 2329 2330 if (a == b) { 2331 /* Leaf node of recursion, i.e. not a range of indices 2332 * anymore. 2333 */ 2334 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2335 if (!is_simm32(progs[a])) 2336 return -1; 2337 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2338 progs[a]); 2339 err = emit_cond_near_jump(&prog, /* je func */ 2340 (void *)progs[a], image + (prog - buf), 2341 X86_JE); 2342 if (err) 2343 return err; 2344 2345 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); 2346 2347 *pprog = prog; 2348 return 0; 2349 } 2350 2351 /* Not a leaf node, so we pivot, and recursively descend into 2352 * the lower and upper ranges. 2353 */ 2354 pivot = (b - a) / 2; 2355 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2356 if (!is_simm32(progs[a + pivot])) 2357 return -1; 2358 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2359 2360 if (pivot > 2) { /* jg upper_part */ 2361 /* Require near jump. */ 2362 jg_bytes = 4; 2363 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2364 } else { 2365 EMIT2(X86_JG, 0); 2366 } 2367 jg_reloc = prog; 2368 2369 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2370 progs, image, buf); 2371 if (err) 2372 return err; 2373 2374 /* From Intel 64 and IA-32 Architectures Optimization 2375 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2376 * Coding Rule 11: All branch targets should be 16-byte 2377 * aligned. 2378 */ 2379 emit_align(&prog, 16); 2380 jg_offset = prog - jg_reloc; 2381 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2382 2383 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2384 b, progs, image, buf); 2385 if (err) 2386 return err; 2387 2388 *pprog = prog; 2389 return 0; 2390 } 2391 2392 static int cmp_ips(const void *a, const void *b) 2393 { 2394 const s64 *ipa = a; 2395 const s64 *ipb = b; 2396 2397 if (*ipa > *ipb) 2398 return 1; 2399 if (*ipa < *ipb) 2400 return -1; 2401 return 0; 2402 } 2403 2404 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) 2405 { 2406 u8 *prog = buf; 2407 2408 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2409 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); 2410 } 2411 2412 struct x64_jit_data { 2413 struct bpf_binary_header *rw_header; 2414 struct bpf_binary_header *header; 2415 int *addrs; 2416 u8 *image; 2417 int proglen; 2418 struct jit_context ctx; 2419 }; 2420 2421 #define MAX_PASSES 20 2422 #define PADDING_PASSES (MAX_PASSES - 5) 2423 2424 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2425 { 2426 struct bpf_binary_header *rw_header = NULL; 2427 struct bpf_binary_header *header = NULL; 2428 struct bpf_prog *tmp, *orig_prog = prog; 2429 struct x64_jit_data *jit_data; 2430 int proglen, oldproglen = 0; 2431 struct jit_context ctx = {}; 2432 bool tmp_blinded = false; 2433 bool extra_pass = false; 2434 bool padding = false; 2435 u8 *rw_image = NULL; 2436 u8 *image = NULL; 2437 int *addrs; 2438 int pass; 2439 int i; 2440 2441 if (!prog->jit_requested) 2442 return orig_prog; 2443 2444 tmp = bpf_jit_blind_constants(prog); 2445 /* 2446 * If blinding was requested and we failed during blinding, 2447 * we must fall back to the interpreter. 2448 */ 2449 if (IS_ERR(tmp)) 2450 return orig_prog; 2451 if (tmp != prog) { 2452 tmp_blinded = true; 2453 prog = tmp; 2454 } 2455 2456 jit_data = prog->aux->jit_data; 2457 if (!jit_data) { 2458 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2459 if (!jit_data) { 2460 prog = orig_prog; 2461 goto out; 2462 } 2463 prog->aux->jit_data = jit_data; 2464 } 2465 addrs = jit_data->addrs; 2466 if (addrs) { 2467 ctx = jit_data->ctx; 2468 oldproglen = jit_data->proglen; 2469 image = jit_data->image; 2470 header = jit_data->header; 2471 rw_header = jit_data->rw_header; 2472 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2473 extra_pass = true; 2474 padding = true; 2475 goto skip_init_addrs; 2476 } 2477 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2478 if (!addrs) { 2479 prog = orig_prog; 2480 goto out_addrs; 2481 } 2482 2483 /* 2484 * Before first pass, make a rough estimation of addrs[] 2485 * each BPF instruction is translated to less than 64 bytes 2486 */ 2487 for (proglen = 0, i = 0; i <= prog->len; i++) { 2488 proglen += 64; 2489 addrs[i] = proglen; 2490 } 2491 ctx.cleanup_addr = proglen; 2492 skip_init_addrs: 2493 2494 /* 2495 * JITed image shrinks with every pass and the loop iterates 2496 * until the image stops shrinking. Very large BPF programs 2497 * may converge on the last pass. In such case do one more 2498 * pass to emit the final image. 2499 */ 2500 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2501 if (!padding && pass >= PADDING_PASSES) 2502 padding = true; 2503 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2504 if (proglen <= 0) { 2505 out_image: 2506 image = NULL; 2507 if (header) { 2508 bpf_arch_text_copy(&header->size, &rw_header->size, 2509 sizeof(rw_header->size)); 2510 bpf_jit_binary_pack_free(header, rw_header); 2511 } 2512 /* Fall back to interpreter mode */ 2513 prog = orig_prog; 2514 if (extra_pass) { 2515 prog->bpf_func = NULL; 2516 prog->jited = 0; 2517 prog->jited_len = 0; 2518 } 2519 goto out_addrs; 2520 } 2521 if (image) { 2522 if (proglen != oldproglen) { 2523 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2524 proglen, oldproglen); 2525 goto out_image; 2526 } 2527 break; 2528 } 2529 if (proglen == oldproglen) { 2530 /* 2531 * The number of entries in extable is the number of BPF_LDX 2532 * insns that access kernel memory via "pointer to BTF type". 2533 * The verifier changed their opcode from LDX|MEM|size 2534 * to LDX|PROBE_MEM|size to make JITing easier. 2535 */ 2536 u32 align = __alignof__(struct exception_table_entry); 2537 u32 extable_size = prog->aux->num_exentries * 2538 sizeof(struct exception_table_entry); 2539 2540 /* allocate module memory for x86 insns and extable */ 2541 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2542 &image, align, &rw_header, &rw_image, 2543 jit_fill_hole); 2544 if (!header) { 2545 prog = orig_prog; 2546 goto out_addrs; 2547 } 2548 prog->aux->extable = (void *) image + roundup(proglen, align); 2549 } 2550 oldproglen = proglen; 2551 cond_resched(); 2552 } 2553 2554 if (bpf_jit_enable > 1) 2555 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2556 2557 if (image) { 2558 if (!prog->is_func || extra_pass) { 2559 /* 2560 * bpf_jit_binary_pack_finalize fails in two scenarios: 2561 * 1) header is not pointing to proper module memory; 2562 * 2) the arch doesn't support bpf_arch_text_copy(). 2563 * 2564 * Both cases are serious bugs and justify WARN_ON. 2565 */ 2566 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2567 /* header has been freed */ 2568 header = NULL; 2569 goto out_image; 2570 } 2571 2572 bpf_tail_call_direct_fixup(prog); 2573 } else { 2574 jit_data->addrs = addrs; 2575 jit_data->ctx = ctx; 2576 jit_data->proglen = proglen; 2577 jit_data->image = image; 2578 jit_data->header = header; 2579 jit_data->rw_header = rw_header; 2580 } 2581 prog->bpf_func = (void *)image; 2582 prog->jited = 1; 2583 prog->jited_len = proglen; 2584 } else { 2585 prog = orig_prog; 2586 } 2587 2588 if (!image || !prog->is_func || extra_pass) { 2589 if (image) 2590 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2591 out_addrs: 2592 kvfree(addrs); 2593 kfree(jit_data); 2594 prog->aux->jit_data = NULL; 2595 } 2596 out: 2597 if (tmp_blinded) 2598 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2599 tmp : orig_prog); 2600 return prog; 2601 } 2602 2603 bool bpf_jit_supports_kfunc_call(void) 2604 { 2605 return true; 2606 } 2607 2608 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2609 { 2610 if (text_poke_copy(dst, src, len) == NULL) 2611 return ERR_PTR(-EINVAL); 2612 return dst; 2613 } 2614 2615 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 2616 bool bpf_jit_supports_subprog_tailcalls(void) 2617 { 2618 return true; 2619 } 2620 2621 void bpf_jit_free(struct bpf_prog *prog) 2622 { 2623 if (prog->jited) { 2624 struct x64_jit_data *jit_data = prog->aux->jit_data; 2625 struct bpf_binary_header *hdr; 2626 2627 /* 2628 * If we fail the final pass of JIT (from jit_subprogs), 2629 * the program may not be finalized yet. Call finalize here 2630 * before freeing it. 2631 */ 2632 if (jit_data) { 2633 bpf_jit_binary_pack_finalize(prog, jit_data->header, 2634 jit_data->rw_header); 2635 kvfree(jit_data->addrs); 2636 kfree(jit_data); 2637 } 2638 hdr = bpf_jit_binary_pack_hdr(prog); 2639 bpf_jit_binary_pack_free(hdr, NULL); 2640 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2641 } 2642 2643 bpf_prog_unlock_free(prog); 2644 } 2645