1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 20 { 21 if (len == 1) 22 *ptr = bytes; 23 else if (len == 2) 24 *(u16 *)ptr = bytes; 25 else { 26 *(u32 *)ptr = bytes; 27 barrier(); 28 } 29 return ptr + len; 30 } 31 32 #define EMIT(bytes, len) \ 33 do { prog = emit_code(prog, bytes, len); } while (0) 34 35 #define EMIT1(b1) EMIT(b1, 1) 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39 40 #define EMIT1_off32(b1, off) \ 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 46 #define EMIT4_off32(b1, b2, b3, b4, off) \ 47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 48 49 #ifdef CONFIG_X86_KERNEL_IBT 50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 51 #else 52 #define EMIT_ENDBR() 53 #endif 54 55 static bool is_imm8(int value) 56 { 57 return value <= 127 && value >= -128; 58 } 59 60 static bool is_simm32(s64 value) 61 { 62 return value == (s64)(s32)value; 63 } 64 65 static bool is_uimm32(u64 value) 66 { 67 return value == (u64)(u32)value; 68 } 69 70 /* mov dst, src */ 71 #define EMIT_mov(DST, SRC) \ 72 do { \ 73 if (DST != SRC) \ 74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 75 } while (0) 76 77 static int bpf_size_to_x86_bytes(int bpf_size) 78 { 79 if (bpf_size == BPF_W) 80 return 4; 81 else if (bpf_size == BPF_H) 82 return 2; 83 else if (bpf_size == BPF_B) 84 return 1; 85 else if (bpf_size == BPF_DW) 86 return 4; /* imm32 */ 87 else 88 return 0; 89 } 90 91 /* 92 * List of x86 cond jumps opcodes (. + s8) 93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 94 */ 95 #define X86_JB 0x72 96 #define X86_JAE 0x73 97 #define X86_JE 0x74 98 #define X86_JNE 0x75 99 #define X86_JBE 0x76 100 #define X86_JA 0x77 101 #define X86_JL 0x7C 102 #define X86_JGE 0x7D 103 #define X86_JLE 0x7E 104 #define X86_JG 0x7F 105 106 /* Pick a register outside of BPF range for JIT internal work */ 107 #define AUX_REG (MAX_BPF_JIT_REG + 1) 108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 109 110 /* 111 * The following table maps BPF registers to x86-64 registers. 112 * 113 * x86-64 register R12 is unused, since if used as base address 114 * register in load/store instructions, it always needs an 115 * extra byte of encoding and is callee saved. 116 * 117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 118 * trampoline. x86-64 register R10 is used for blinding (if enabled). 119 */ 120 static const int reg2hex[] = { 121 [BPF_REG_0] = 0, /* RAX */ 122 [BPF_REG_1] = 7, /* RDI */ 123 [BPF_REG_2] = 6, /* RSI */ 124 [BPF_REG_3] = 2, /* RDX */ 125 [BPF_REG_4] = 1, /* RCX */ 126 [BPF_REG_5] = 0, /* R8 */ 127 [BPF_REG_6] = 3, /* RBX callee saved */ 128 [BPF_REG_7] = 5, /* R13 callee saved */ 129 [BPF_REG_8] = 6, /* R14 callee saved */ 130 [BPF_REG_9] = 7, /* R15 callee saved */ 131 [BPF_REG_FP] = 5, /* RBP readonly */ 132 [BPF_REG_AX] = 2, /* R10 temp register */ 133 [AUX_REG] = 3, /* R11 temp register */ 134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 135 }; 136 137 static const int reg2pt_regs[] = { 138 [BPF_REG_0] = offsetof(struct pt_regs, ax), 139 [BPF_REG_1] = offsetof(struct pt_regs, di), 140 [BPF_REG_2] = offsetof(struct pt_regs, si), 141 [BPF_REG_3] = offsetof(struct pt_regs, dx), 142 [BPF_REG_4] = offsetof(struct pt_regs, cx), 143 [BPF_REG_5] = offsetof(struct pt_regs, r8), 144 [BPF_REG_6] = offsetof(struct pt_regs, bx), 145 [BPF_REG_7] = offsetof(struct pt_regs, r13), 146 [BPF_REG_8] = offsetof(struct pt_regs, r14), 147 [BPF_REG_9] = offsetof(struct pt_regs, r15), 148 }; 149 150 /* 151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 152 * which need extra byte of encoding. 153 * rax,rcx,...,rbp have simpler encoding 154 */ 155 static bool is_ereg(u32 reg) 156 { 157 return (1 << reg) & (BIT(BPF_REG_5) | 158 BIT(AUX_REG) | 159 BIT(BPF_REG_7) | 160 BIT(BPF_REG_8) | 161 BIT(BPF_REG_9) | 162 BIT(X86_REG_R9) | 163 BIT(BPF_REG_AX)); 164 } 165 166 /* 167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 169 * of encoding. al,cl,dl,bl have simpler encoding. 170 */ 171 static bool is_ereg_8l(u32 reg) 172 { 173 return is_ereg(reg) || 174 (1 << reg) & (BIT(BPF_REG_1) | 175 BIT(BPF_REG_2) | 176 BIT(BPF_REG_FP)); 177 } 178 179 static bool is_axreg(u32 reg) 180 { 181 return reg == BPF_REG_0; 182 } 183 184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 185 static u8 add_1mod(u8 byte, u32 reg) 186 { 187 if (is_ereg(reg)) 188 byte |= 1; 189 return byte; 190 } 191 192 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 193 { 194 if (is_ereg(r1)) 195 byte |= 1; 196 if (is_ereg(r2)) 197 byte |= 4; 198 return byte; 199 } 200 201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 202 static u8 add_1reg(u8 byte, u32 dst_reg) 203 { 204 return byte + reg2hex[dst_reg]; 205 } 206 207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 209 { 210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 211 } 212 213 /* Some 1-byte opcodes for binary ALU operations */ 214 static u8 simple_alu_opcodes[] = { 215 [BPF_ADD] = 0x01, 216 [BPF_SUB] = 0x29, 217 [BPF_AND] = 0x21, 218 [BPF_OR] = 0x09, 219 [BPF_XOR] = 0x31, 220 [BPF_LSH] = 0xE0, 221 [BPF_RSH] = 0xE8, 222 [BPF_ARSH] = 0xF8, 223 }; 224 225 static void jit_fill_hole(void *area, unsigned int size) 226 { 227 /* Fill whole space with INT3 instructions */ 228 memset(area, 0xcc, size); 229 } 230 231 struct jit_context { 232 int cleanup_addr; /* Epilogue code offset */ 233 234 /* 235 * Program specific offsets of labels in the code; these rely on the 236 * JIT doing at least 2 passes, recording the position on the first 237 * pass, only to generate the correct offset on the second pass. 238 */ 239 int tail_call_direct_label; 240 int tail_call_indirect_label; 241 }; 242 243 /* Maximum number of bytes emitted while JITing one eBPF insn */ 244 #define BPF_MAX_INSN_SIZE 128 245 #define BPF_INSN_SAFETY 64 246 247 /* Number of bytes emit_patch() needs to generate instructions */ 248 #define X86_PATCH_SIZE 5 249 /* Number of bytes that will be skipped on tailcall */ 250 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 251 252 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 253 { 254 u8 *prog = *pprog; 255 256 if (callee_regs_used[0]) 257 EMIT1(0x53); /* push rbx */ 258 if (callee_regs_used[1]) 259 EMIT2(0x41, 0x55); /* push r13 */ 260 if (callee_regs_used[2]) 261 EMIT2(0x41, 0x56); /* push r14 */ 262 if (callee_regs_used[3]) 263 EMIT2(0x41, 0x57); /* push r15 */ 264 *pprog = prog; 265 } 266 267 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 268 { 269 u8 *prog = *pprog; 270 271 if (callee_regs_used[3]) 272 EMIT2(0x41, 0x5F); /* pop r15 */ 273 if (callee_regs_used[2]) 274 EMIT2(0x41, 0x5E); /* pop r14 */ 275 if (callee_regs_used[1]) 276 EMIT2(0x41, 0x5D); /* pop r13 */ 277 if (callee_regs_used[0]) 278 EMIT1(0x5B); /* pop rbx */ 279 *pprog = prog; 280 } 281 282 /* 283 * Emit x86-64 prologue code for BPF program. 284 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 285 * while jumping to another program 286 */ 287 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 288 bool tail_call_reachable, bool is_subprog) 289 { 290 u8 *prog = *pprog; 291 292 /* BPF trampoline can be made to work without these nops, 293 * but let's waste 5 bytes for now and optimize later 294 */ 295 EMIT_ENDBR(); 296 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 297 prog += X86_PATCH_SIZE; 298 if (!ebpf_from_cbpf) { 299 if (tail_call_reachable && !is_subprog) 300 EMIT2(0x31, 0xC0); /* xor eax, eax */ 301 else 302 EMIT2(0x66, 0x90); /* nop2 */ 303 } 304 EMIT1(0x55); /* push rbp */ 305 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 306 307 /* X86_TAIL_CALL_OFFSET is here */ 308 EMIT_ENDBR(); 309 310 /* sub rsp, rounded_stack_depth */ 311 if (stack_depth) 312 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 313 if (tail_call_reachable) 314 EMIT1(0x50); /* push rax */ 315 *pprog = prog; 316 } 317 318 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 319 { 320 u8 *prog = *pprog; 321 s64 offset; 322 323 offset = func - (ip + X86_PATCH_SIZE); 324 if (!is_simm32(offset)) { 325 pr_err("Target call %p is out of range\n", func); 326 return -ERANGE; 327 } 328 EMIT1_off32(opcode, offset); 329 *pprog = prog; 330 return 0; 331 } 332 333 static int emit_call(u8 **pprog, void *func, void *ip) 334 { 335 return emit_patch(pprog, func, ip, 0xE8); 336 } 337 338 static int emit_jump(u8 **pprog, void *func, void *ip) 339 { 340 return emit_patch(pprog, func, ip, 0xE9); 341 } 342 343 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 344 void *old_addr, void *new_addr) 345 { 346 const u8 *nop_insn = x86_nops[5]; 347 u8 old_insn[X86_PATCH_SIZE]; 348 u8 new_insn[X86_PATCH_SIZE]; 349 u8 *prog; 350 int ret; 351 352 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 353 if (old_addr) { 354 prog = old_insn; 355 ret = t == BPF_MOD_CALL ? 356 emit_call(&prog, old_addr, ip) : 357 emit_jump(&prog, old_addr, ip); 358 if (ret) 359 return ret; 360 } 361 362 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 363 if (new_addr) { 364 prog = new_insn; 365 ret = t == BPF_MOD_CALL ? 366 emit_call(&prog, new_addr, ip) : 367 emit_jump(&prog, new_addr, ip); 368 if (ret) 369 return ret; 370 } 371 372 ret = -EBUSY; 373 mutex_lock(&text_mutex); 374 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 375 goto out; 376 ret = 1; 377 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 378 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 379 ret = 0; 380 } 381 out: 382 mutex_unlock(&text_mutex); 383 return ret; 384 } 385 386 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 387 void *old_addr, void *new_addr) 388 { 389 if (!is_kernel_text((long)ip) && 390 !is_bpf_text_address((long)ip)) 391 /* BPF poking in modules is not supported */ 392 return -EINVAL; 393 394 /* 395 * See emit_prologue(), for IBT builds the trampoline hook is preceded 396 * with an ENDBR instruction. 397 */ 398 if (is_endbr(*(u32 *)ip)) 399 ip += ENDBR_INSN_SIZE; 400 401 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 402 } 403 404 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 405 406 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 407 { 408 u8 *prog = *pprog; 409 410 #ifdef CONFIG_RETPOLINE 411 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 412 EMIT_LFENCE(); 413 EMIT2(0xFF, 0xE0 + reg); 414 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 415 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 416 } else 417 #endif 418 EMIT2(0xFF, 0xE0 + reg); 419 420 *pprog = prog; 421 } 422 423 /* 424 * Generate the following code: 425 * 426 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 427 * if (index >= array->map.max_entries) 428 * goto out; 429 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 430 * goto out; 431 * prog = array->ptrs[index]; 432 * if (prog == NULL) 433 * goto out; 434 * goto *(prog->bpf_func + prologue_size); 435 * out: 436 */ 437 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 438 u32 stack_depth, u8 *ip, 439 struct jit_context *ctx) 440 { 441 int tcc_off = -4 - round_up(stack_depth, 8); 442 u8 *prog = *pprog, *start = *pprog; 443 int offset; 444 445 /* 446 * rdi - pointer to ctx 447 * rsi - pointer to bpf_array 448 * rdx - index in bpf_array 449 */ 450 451 /* 452 * if (index >= array->map.max_entries) 453 * goto out; 454 */ 455 EMIT2(0x89, 0xD2); /* mov edx, edx */ 456 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 457 offsetof(struct bpf_array, map.max_entries)); 458 459 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 460 EMIT2(X86_JBE, offset); /* jbe out */ 461 462 /* 463 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 464 * goto out; 465 */ 466 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 467 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 468 469 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 470 EMIT2(X86_JAE, offset); /* jae out */ 471 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 472 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 473 474 /* prog = array->ptrs[index]; */ 475 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 476 offsetof(struct bpf_array, ptrs)); 477 478 /* 479 * if (prog == NULL) 480 * goto out; 481 */ 482 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 483 484 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 485 EMIT2(X86_JE, offset); /* je out */ 486 487 pop_callee_regs(&prog, callee_regs_used); 488 489 EMIT1(0x58); /* pop rax */ 490 if (stack_depth) 491 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 492 round_up(stack_depth, 8)); 493 494 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 495 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 496 offsetof(struct bpf_prog, bpf_func)); 497 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 498 X86_TAIL_CALL_OFFSET); 499 /* 500 * Now we're ready to jump into next BPF program 501 * rdi == ctx (1st arg) 502 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 503 */ 504 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 505 506 /* out: */ 507 ctx->tail_call_indirect_label = prog - start; 508 *pprog = prog; 509 } 510 511 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 512 u8 **pprog, u8 *ip, 513 bool *callee_regs_used, u32 stack_depth, 514 struct jit_context *ctx) 515 { 516 int tcc_off = -4 - round_up(stack_depth, 8); 517 u8 *prog = *pprog, *start = *pprog; 518 int offset; 519 520 /* 521 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 522 * goto out; 523 */ 524 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 525 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 526 527 offset = ctx->tail_call_direct_label - (prog + 2 - start); 528 EMIT2(X86_JAE, offset); /* jae out */ 529 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 530 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 531 532 poke->tailcall_bypass = ip + (prog - start); 533 poke->adj_off = X86_TAIL_CALL_OFFSET; 534 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 535 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 536 537 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 538 poke->tailcall_bypass); 539 540 pop_callee_regs(&prog, callee_regs_used); 541 EMIT1(0x58); /* pop rax */ 542 if (stack_depth) 543 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 544 545 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 546 prog += X86_PATCH_SIZE; 547 548 /* out: */ 549 ctx->tail_call_direct_label = prog - start; 550 551 *pprog = prog; 552 } 553 554 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 555 { 556 struct bpf_jit_poke_descriptor *poke; 557 struct bpf_array *array; 558 struct bpf_prog *target; 559 int i, ret; 560 561 for (i = 0; i < prog->aux->size_poke_tab; i++) { 562 poke = &prog->aux->poke_tab[i]; 563 if (poke->aux && poke->aux != prog->aux) 564 continue; 565 566 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 567 568 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 569 continue; 570 571 array = container_of(poke->tail_call.map, struct bpf_array, map); 572 mutex_lock(&array->aux->poke_mutex); 573 target = array->ptrs[poke->tail_call.key]; 574 if (target) { 575 ret = __bpf_arch_text_poke(poke->tailcall_target, 576 BPF_MOD_JUMP, NULL, 577 (u8 *)target->bpf_func + 578 poke->adj_off); 579 BUG_ON(ret < 0); 580 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 581 BPF_MOD_JUMP, 582 (u8 *)poke->tailcall_target + 583 X86_PATCH_SIZE, NULL); 584 BUG_ON(ret < 0); 585 } 586 WRITE_ONCE(poke->tailcall_target_stable, true); 587 mutex_unlock(&array->aux->poke_mutex); 588 } 589 } 590 591 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 592 u32 dst_reg, const u32 imm32) 593 { 594 u8 *prog = *pprog; 595 u8 b1, b2, b3; 596 597 /* 598 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 599 * (which zero-extends imm32) to save 2 bytes. 600 */ 601 if (sign_propagate && (s32)imm32 < 0) { 602 /* 'mov %rax, imm32' sign extends imm32 */ 603 b1 = add_1mod(0x48, dst_reg); 604 b2 = 0xC7; 605 b3 = 0xC0; 606 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 607 goto done; 608 } 609 610 /* 611 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 612 * to save 3 bytes. 613 */ 614 if (imm32 == 0) { 615 if (is_ereg(dst_reg)) 616 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 617 b2 = 0x31; /* xor */ 618 b3 = 0xC0; 619 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 620 goto done; 621 } 622 623 /* mov %eax, imm32 */ 624 if (is_ereg(dst_reg)) 625 EMIT1(add_1mod(0x40, dst_reg)); 626 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 627 done: 628 *pprog = prog; 629 } 630 631 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 632 const u32 imm32_hi, const u32 imm32_lo) 633 { 634 u8 *prog = *pprog; 635 636 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 637 /* 638 * For emitting plain u32, where sign bit must not be 639 * propagated LLVM tends to load imm64 over mov32 640 * directly, so save couple of bytes by just doing 641 * 'mov %eax, imm32' instead. 642 */ 643 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 644 } else { 645 /* movabsq %rax, imm64 */ 646 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 647 EMIT(imm32_lo, 4); 648 EMIT(imm32_hi, 4); 649 } 650 651 *pprog = prog; 652 } 653 654 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 655 { 656 u8 *prog = *pprog; 657 658 if (is64) { 659 /* mov dst, src */ 660 EMIT_mov(dst_reg, src_reg); 661 } else { 662 /* mov32 dst, src */ 663 if (is_ereg(dst_reg) || is_ereg(src_reg)) 664 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 665 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 666 } 667 668 *pprog = prog; 669 } 670 671 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 672 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 673 { 674 u8 *prog = *pprog; 675 676 if (is_imm8(off)) { 677 /* 1-byte signed displacement. 678 * 679 * If off == 0 we could skip this and save one extra byte, but 680 * special case of x86 R13 which always needs an offset is not 681 * worth the hassle 682 */ 683 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 684 } else { 685 /* 4-byte signed displacement */ 686 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 687 } 688 *pprog = prog; 689 } 690 691 /* 692 * Emit a REX byte if it will be necessary to address these registers 693 */ 694 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 695 { 696 u8 *prog = *pprog; 697 698 if (is64) 699 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 700 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 701 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 702 *pprog = prog; 703 } 704 705 /* 706 * Similar version of maybe_emit_mod() for a single register 707 */ 708 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 709 { 710 u8 *prog = *pprog; 711 712 if (is64) 713 EMIT1(add_1mod(0x48, reg)); 714 else if (is_ereg(reg)) 715 EMIT1(add_1mod(0x40, reg)); 716 *pprog = prog; 717 } 718 719 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 720 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 721 { 722 u8 *prog = *pprog; 723 724 switch (size) { 725 case BPF_B: 726 /* Emit 'movzx rax, byte ptr [rax + off]' */ 727 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 728 break; 729 case BPF_H: 730 /* Emit 'movzx rax, word ptr [rax + off]' */ 731 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 732 break; 733 case BPF_W: 734 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 735 if (is_ereg(dst_reg) || is_ereg(src_reg)) 736 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 737 else 738 EMIT1(0x8B); 739 break; 740 case BPF_DW: 741 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 742 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 743 break; 744 } 745 emit_insn_suffix(&prog, src_reg, dst_reg, off); 746 *pprog = prog; 747 } 748 749 /* STX: *(u8*)(dst_reg + off) = src_reg */ 750 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 751 { 752 u8 *prog = *pprog; 753 754 switch (size) { 755 case BPF_B: 756 /* Emit 'mov byte ptr [rax + off], al' */ 757 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 758 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 759 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 760 else 761 EMIT1(0x88); 762 break; 763 case BPF_H: 764 if (is_ereg(dst_reg) || is_ereg(src_reg)) 765 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 766 else 767 EMIT2(0x66, 0x89); 768 break; 769 case BPF_W: 770 if (is_ereg(dst_reg) || is_ereg(src_reg)) 771 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 772 else 773 EMIT1(0x89); 774 break; 775 case BPF_DW: 776 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 777 break; 778 } 779 emit_insn_suffix(&prog, dst_reg, src_reg, off); 780 *pprog = prog; 781 } 782 783 static int emit_atomic(u8 **pprog, u8 atomic_op, 784 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 785 { 786 u8 *prog = *pprog; 787 788 EMIT1(0xF0); /* lock prefix */ 789 790 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 791 792 /* emit opcode */ 793 switch (atomic_op) { 794 case BPF_ADD: 795 case BPF_AND: 796 case BPF_OR: 797 case BPF_XOR: 798 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 799 EMIT1(simple_alu_opcodes[atomic_op]); 800 break; 801 case BPF_ADD | BPF_FETCH: 802 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 803 EMIT2(0x0F, 0xC1); 804 break; 805 case BPF_XCHG: 806 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 807 EMIT1(0x87); 808 break; 809 case BPF_CMPXCHG: 810 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 811 EMIT2(0x0F, 0xB1); 812 break; 813 default: 814 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 815 return -EFAULT; 816 } 817 818 emit_insn_suffix(&prog, dst_reg, src_reg, off); 819 820 *pprog = prog; 821 return 0; 822 } 823 824 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 825 { 826 u32 reg = x->fixup >> 8; 827 828 /* jump over faulting load and clear dest register */ 829 *(unsigned long *)((void *)regs + reg) = 0; 830 regs->ip += x->fixup & 0xff; 831 return true; 832 } 833 834 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 835 bool *regs_used, bool *tail_call_seen) 836 { 837 int i; 838 839 for (i = 1; i <= insn_cnt; i++, insn++) { 840 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 841 *tail_call_seen = true; 842 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 843 regs_used[0] = true; 844 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 845 regs_used[1] = true; 846 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 847 regs_used[2] = true; 848 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 849 regs_used[3] = true; 850 } 851 } 852 853 static void emit_nops(u8 **pprog, int len) 854 { 855 u8 *prog = *pprog; 856 int i, noplen; 857 858 while (len > 0) { 859 noplen = len; 860 861 if (noplen > ASM_NOP_MAX) 862 noplen = ASM_NOP_MAX; 863 864 for (i = 0; i < noplen; i++) 865 EMIT1(x86_nops[noplen][i]); 866 len -= noplen; 867 } 868 869 *pprog = prog; 870 } 871 872 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 873 874 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 875 int oldproglen, struct jit_context *ctx, bool jmp_padding) 876 { 877 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 878 struct bpf_insn *insn = bpf_prog->insnsi; 879 bool callee_regs_used[4] = {}; 880 int insn_cnt = bpf_prog->len; 881 bool tail_call_seen = false; 882 bool seen_exit = false; 883 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 884 int i, excnt = 0; 885 int ilen, proglen = 0; 886 u8 *prog = temp; 887 int err; 888 889 detect_reg_usage(insn, insn_cnt, callee_regs_used, 890 &tail_call_seen); 891 892 /* tail call's presence in current prog implies it is reachable */ 893 tail_call_reachable |= tail_call_seen; 894 895 emit_prologue(&prog, bpf_prog->aux->stack_depth, 896 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 897 bpf_prog->aux->func_idx != 0); 898 push_callee_regs(&prog, callee_regs_used); 899 900 ilen = prog - temp; 901 if (rw_image) 902 memcpy(rw_image + proglen, temp, ilen); 903 proglen += ilen; 904 addrs[0] = proglen; 905 prog = temp; 906 907 for (i = 1; i <= insn_cnt; i++, insn++) { 908 const s32 imm32 = insn->imm; 909 u32 dst_reg = insn->dst_reg; 910 u32 src_reg = insn->src_reg; 911 u8 b2 = 0, b3 = 0; 912 u8 *start_of_ldx; 913 s64 jmp_offset; 914 u8 jmp_cond; 915 u8 *func; 916 int nops; 917 918 switch (insn->code) { 919 /* ALU */ 920 case BPF_ALU | BPF_ADD | BPF_X: 921 case BPF_ALU | BPF_SUB | BPF_X: 922 case BPF_ALU | BPF_AND | BPF_X: 923 case BPF_ALU | BPF_OR | BPF_X: 924 case BPF_ALU | BPF_XOR | BPF_X: 925 case BPF_ALU64 | BPF_ADD | BPF_X: 926 case BPF_ALU64 | BPF_SUB | BPF_X: 927 case BPF_ALU64 | BPF_AND | BPF_X: 928 case BPF_ALU64 | BPF_OR | BPF_X: 929 case BPF_ALU64 | BPF_XOR | BPF_X: 930 maybe_emit_mod(&prog, dst_reg, src_reg, 931 BPF_CLASS(insn->code) == BPF_ALU64); 932 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 933 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 934 break; 935 936 case BPF_ALU64 | BPF_MOV | BPF_X: 937 case BPF_ALU | BPF_MOV | BPF_X: 938 emit_mov_reg(&prog, 939 BPF_CLASS(insn->code) == BPF_ALU64, 940 dst_reg, src_reg); 941 break; 942 943 /* neg dst */ 944 case BPF_ALU | BPF_NEG: 945 case BPF_ALU64 | BPF_NEG: 946 maybe_emit_1mod(&prog, dst_reg, 947 BPF_CLASS(insn->code) == BPF_ALU64); 948 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 949 break; 950 951 case BPF_ALU | BPF_ADD | BPF_K: 952 case BPF_ALU | BPF_SUB | BPF_K: 953 case BPF_ALU | BPF_AND | BPF_K: 954 case BPF_ALU | BPF_OR | BPF_K: 955 case BPF_ALU | BPF_XOR | BPF_K: 956 case BPF_ALU64 | BPF_ADD | BPF_K: 957 case BPF_ALU64 | BPF_SUB | BPF_K: 958 case BPF_ALU64 | BPF_AND | BPF_K: 959 case BPF_ALU64 | BPF_OR | BPF_K: 960 case BPF_ALU64 | BPF_XOR | BPF_K: 961 maybe_emit_1mod(&prog, dst_reg, 962 BPF_CLASS(insn->code) == BPF_ALU64); 963 964 /* 965 * b3 holds 'normal' opcode, b2 short form only valid 966 * in case dst is eax/rax. 967 */ 968 switch (BPF_OP(insn->code)) { 969 case BPF_ADD: 970 b3 = 0xC0; 971 b2 = 0x05; 972 break; 973 case BPF_SUB: 974 b3 = 0xE8; 975 b2 = 0x2D; 976 break; 977 case BPF_AND: 978 b3 = 0xE0; 979 b2 = 0x25; 980 break; 981 case BPF_OR: 982 b3 = 0xC8; 983 b2 = 0x0D; 984 break; 985 case BPF_XOR: 986 b3 = 0xF0; 987 b2 = 0x35; 988 break; 989 } 990 991 if (is_imm8(imm32)) 992 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 993 else if (is_axreg(dst_reg)) 994 EMIT1_off32(b2, imm32); 995 else 996 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 997 break; 998 999 case BPF_ALU64 | BPF_MOV | BPF_K: 1000 case BPF_ALU | BPF_MOV | BPF_K: 1001 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1002 dst_reg, imm32); 1003 break; 1004 1005 case BPF_LD | BPF_IMM | BPF_DW: 1006 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1007 insn++; 1008 i++; 1009 break; 1010 1011 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1012 case BPF_ALU | BPF_MOD | BPF_X: 1013 case BPF_ALU | BPF_DIV | BPF_X: 1014 case BPF_ALU | BPF_MOD | BPF_K: 1015 case BPF_ALU | BPF_DIV | BPF_K: 1016 case BPF_ALU64 | BPF_MOD | BPF_X: 1017 case BPF_ALU64 | BPF_DIV | BPF_X: 1018 case BPF_ALU64 | BPF_MOD | BPF_K: 1019 case BPF_ALU64 | BPF_DIV | BPF_K: { 1020 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1021 1022 if (dst_reg != BPF_REG_0) 1023 EMIT1(0x50); /* push rax */ 1024 if (dst_reg != BPF_REG_3) 1025 EMIT1(0x52); /* push rdx */ 1026 1027 if (BPF_SRC(insn->code) == BPF_X) { 1028 if (src_reg == BPF_REG_0 || 1029 src_reg == BPF_REG_3) { 1030 /* mov r11, src_reg */ 1031 EMIT_mov(AUX_REG, src_reg); 1032 src_reg = AUX_REG; 1033 } 1034 } else { 1035 /* mov r11, imm32 */ 1036 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1037 src_reg = AUX_REG; 1038 } 1039 1040 if (dst_reg != BPF_REG_0) 1041 /* mov rax, dst_reg */ 1042 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1043 1044 /* 1045 * xor edx, edx 1046 * equivalent to 'xor rdx, rdx', but one byte less 1047 */ 1048 EMIT2(0x31, 0xd2); 1049 1050 /* div src_reg */ 1051 maybe_emit_1mod(&prog, src_reg, is64); 1052 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1053 1054 if (BPF_OP(insn->code) == BPF_MOD && 1055 dst_reg != BPF_REG_3) 1056 /* mov dst_reg, rdx */ 1057 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1058 else if (BPF_OP(insn->code) == BPF_DIV && 1059 dst_reg != BPF_REG_0) 1060 /* mov dst_reg, rax */ 1061 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1062 1063 if (dst_reg != BPF_REG_3) 1064 EMIT1(0x5A); /* pop rdx */ 1065 if (dst_reg != BPF_REG_0) 1066 EMIT1(0x58); /* pop rax */ 1067 break; 1068 } 1069 1070 case BPF_ALU | BPF_MUL | BPF_K: 1071 case BPF_ALU64 | BPF_MUL | BPF_K: 1072 maybe_emit_mod(&prog, dst_reg, dst_reg, 1073 BPF_CLASS(insn->code) == BPF_ALU64); 1074 1075 if (is_imm8(imm32)) 1076 /* imul dst_reg, dst_reg, imm8 */ 1077 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1078 imm32); 1079 else 1080 /* imul dst_reg, dst_reg, imm32 */ 1081 EMIT2_off32(0x69, 1082 add_2reg(0xC0, dst_reg, dst_reg), 1083 imm32); 1084 break; 1085 1086 case BPF_ALU | BPF_MUL | BPF_X: 1087 case BPF_ALU64 | BPF_MUL | BPF_X: 1088 maybe_emit_mod(&prog, src_reg, dst_reg, 1089 BPF_CLASS(insn->code) == BPF_ALU64); 1090 1091 /* imul dst_reg, src_reg */ 1092 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1093 break; 1094 1095 /* Shifts */ 1096 case BPF_ALU | BPF_LSH | BPF_K: 1097 case BPF_ALU | BPF_RSH | BPF_K: 1098 case BPF_ALU | BPF_ARSH | BPF_K: 1099 case BPF_ALU64 | BPF_LSH | BPF_K: 1100 case BPF_ALU64 | BPF_RSH | BPF_K: 1101 case BPF_ALU64 | BPF_ARSH | BPF_K: 1102 maybe_emit_1mod(&prog, dst_reg, 1103 BPF_CLASS(insn->code) == BPF_ALU64); 1104 1105 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1106 if (imm32 == 1) 1107 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1108 else 1109 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1110 break; 1111 1112 case BPF_ALU | BPF_LSH | BPF_X: 1113 case BPF_ALU | BPF_RSH | BPF_X: 1114 case BPF_ALU | BPF_ARSH | BPF_X: 1115 case BPF_ALU64 | BPF_LSH | BPF_X: 1116 case BPF_ALU64 | BPF_RSH | BPF_X: 1117 case BPF_ALU64 | BPF_ARSH | BPF_X: 1118 1119 /* Check for bad case when dst_reg == rcx */ 1120 if (dst_reg == BPF_REG_4) { 1121 /* mov r11, dst_reg */ 1122 EMIT_mov(AUX_REG, dst_reg); 1123 dst_reg = AUX_REG; 1124 } 1125 1126 if (src_reg != BPF_REG_4) { /* common case */ 1127 EMIT1(0x51); /* push rcx */ 1128 1129 /* mov rcx, src_reg */ 1130 EMIT_mov(BPF_REG_4, src_reg); 1131 } 1132 1133 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1134 maybe_emit_1mod(&prog, dst_reg, 1135 BPF_CLASS(insn->code) == BPF_ALU64); 1136 1137 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1138 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1139 1140 if (src_reg != BPF_REG_4) 1141 EMIT1(0x59); /* pop rcx */ 1142 1143 if (insn->dst_reg == BPF_REG_4) 1144 /* mov dst_reg, r11 */ 1145 EMIT_mov(insn->dst_reg, AUX_REG); 1146 break; 1147 1148 case BPF_ALU | BPF_END | BPF_FROM_BE: 1149 switch (imm32) { 1150 case 16: 1151 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1152 EMIT1(0x66); 1153 if (is_ereg(dst_reg)) 1154 EMIT1(0x41); 1155 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1156 1157 /* Emit 'movzwl eax, ax' */ 1158 if (is_ereg(dst_reg)) 1159 EMIT3(0x45, 0x0F, 0xB7); 1160 else 1161 EMIT2(0x0F, 0xB7); 1162 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1163 break; 1164 case 32: 1165 /* Emit 'bswap eax' to swap lower 4 bytes */ 1166 if (is_ereg(dst_reg)) 1167 EMIT2(0x41, 0x0F); 1168 else 1169 EMIT1(0x0F); 1170 EMIT1(add_1reg(0xC8, dst_reg)); 1171 break; 1172 case 64: 1173 /* Emit 'bswap rax' to swap 8 bytes */ 1174 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1175 add_1reg(0xC8, dst_reg)); 1176 break; 1177 } 1178 break; 1179 1180 case BPF_ALU | BPF_END | BPF_FROM_LE: 1181 switch (imm32) { 1182 case 16: 1183 /* 1184 * Emit 'movzwl eax, ax' to zero extend 16-bit 1185 * into 64 bit 1186 */ 1187 if (is_ereg(dst_reg)) 1188 EMIT3(0x45, 0x0F, 0xB7); 1189 else 1190 EMIT2(0x0F, 0xB7); 1191 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1192 break; 1193 case 32: 1194 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1195 if (is_ereg(dst_reg)) 1196 EMIT1(0x45); 1197 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1198 break; 1199 case 64: 1200 /* nop */ 1201 break; 1202 } 1203 break; 1204 1205 /* speculation barrier */ 1206 case BPF_ST | BPF_NOSPEC: 1207 if (boot_cpu_has(X86_FEATURE_XMM2)) 1208 EMIT_LFENCE(); 1209 break; 1210 1211 /* ST: *(u8*)(dst_reg + off) = imm */ 1212 case BPF_ST | BPF_MEM | BPF_B: 1213 if (is_ereg(dst_reg)) 1214 EMIT2(0x41, 0xC6); 1215 else 1216 EMIT1(0xC6); 1217 goto st; 1218 case BPF_ST | BPF_MEM | BPF_H: 1219 if (is_ereg(dst_reg)) 1220 EMIT3(0x66, 0x41, 0xC7); 1221 else 1222 EMIT2(0x66, 0xC7); 1223 goto st; 1224 case BPF_ST | BPF_MEM | BPF_W: 1225 if (is_ereg(dst_reg)) 1226 EMIT2(0x41, 0xC7); 1227 else 1228 EMIT1(0xC7); 1229 goto st; 1230 case BPF_ST | BPF_MEM | BPF_DW: 1231 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1232 1233 st: if (is_imm8(insn->off)) 1234 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1235 else 1236 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1237 1238 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1239 break; 1240 1241 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1242 case BPF_STX | BPF_MEM | BPF_B: 1243 case BPF_STX | BPF_MEM | BPF_H: 1244 case BPF_STX | BPF_MEM | BPF_W: 1245 case BPF_STX | BPF_MEM | BPF_DW: 1246 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1247 break; 1248 1249 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1250 case BPF_LDX | BPF_MEM | BPF_B: 1251 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1252 case BPF_LDX | BPF_MEM | BPF_H: 1253 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1254 case BPF_LDX | BPF_MEM | BPF_W: 1255 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1256 case BPF_LDX | BPF_MEM | BPF_DW: 1257 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1258 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1259 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1260 * add abs(insn->off) to the limit to make sure that negative 1261 * offset won't be an issue. 1262 * insn->off is s16, so it won't affect valid pointers. 1263 */ 1264 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1265 u8 *end_of_jmp1, *end_of_jmp2; 1266 1267 /* Conservatively check that src_reg + insn->off is a kernel address: 1268 * 1. src_reg + insn->off >= limit 1269 * 2. src_reg + insn->off doesn't become small positive. 1270 * Cannot do src_reg + insn->off >= limit in one branch, 1271 * since it needs two spare registers, but JIT has only one. 1272 */ 1273 1274 /* movabsq r11, limit */ 1275 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1276 EMIT((u32)limit, 4); 1277 EMIT(limit >> 32, 4); 1278 /* cmp src_reg, r11 */ 1279 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1280 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1281 /* if unsigned '<' goto end_of_jmp2 */ 1282 EMIT2(X86_JB, 0); 1283 end_of_jmp1 = prog; 1284 1285 /* mov r11, src_reg */ 1286 emit_mov_reg(&prog, true, AUX_REG, src_reg); 1287 /* add r11, insn->off */ 1288 maybe_emit_1mod(&prog, AUX_REG, true); 1289 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1290 /* jmp if not carry to start_of_ldx 1291 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1292 * that has to be rejected. 1293 */ 1294 EMIT2(0x73 /* JNC */, 0); 1295 end_of_jmp2 = prog; 1296 1297 /* xor dst_reg, dst_reg */ 1298 emit_mov_imm32(&prog, false, dst_reg, 0); 1299 /* jmp byte_after_ldx */ 1300 EMIT2(0xEB, 0); 1301 1302 /* populate jmp_offset for JB above to jump to xor dst_reg */ 1303 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1304 /* populate jmp_offset for JNC above to jump to start_of_ldx */ 1305 start_of_ldx = prog; 1306 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 1307 } 1308 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1309 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1310 struct exception_table_entry *ex; 1311 u8 *_insn = image + proglen + (start_of_ldx - temp); 1312 s64 delta; 1313 1314 /* populate jmp_offset for JMP above */ 1315 start_of_ldx[-1] = prog - start_of_ldx; 1316 1317 if (!bpf_prog->aux->extable) 1318 break; 1319 1320 if (excnt >= bpf_prog->aux->num_exentries) { 1321 pr_err("ex gen bug\n"); 1322 return -EFAULT; 1323 } 1324 ex = &bpf_prog->aux->extable[excnt++]; 1325 1326 delta = _insn - (u8 *)&ex->insn; 1327 if (!is_simm32(delta)) { 1328 pr_err("extable->insn doesn't fit into 32-bit\n"); 1329 return -EFAULT; 1330 } 1331 /* switch ex to rw buffer for writes */ 1332 ex = (void *)rw_image + ((void *)ex - (void *)image); 1333 1334 ex->insn = delta; 1335 1336 ex->data = EX_TYPE_BPF; 1337 1338 if (dst_reg > BPF_REG_9) { 1339 pr_err("verifier error\n"); 1340 return -EFAULT; 1341 } 1342 /* 1343 * Compute size of x86 insn and its target dest x86 register. 1344 * ex_handler_bpf() will use lower 8 bits to adjust 1345 * pt_regs->ip to jump over this x86 instruction 1346 * and upper bits to figure out which pt_regs to zero out. 1347 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1348 * of 4 bytes will be ignored and rbx will be zero inited. 1349 */ 1350 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1351 } 1352 break; 1353 1354 case BPF_STX | BPF_ATOMIC | BPF_W: 1355 case BPF_STX | BPF_ATOMIC | BPF_DW: 1356 if (insn->imm == (BPF_AND | BPF_FETCH) || 1357 insn->imm == (BPF_OR | BPF_FETCH) || 1358 insn->imm == (BPF_XOR | BPF_FETCH)) { 1359 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1360 u32 real_src_reg = src_reg; 1361 u32 real_dst_reg = dst_reg; 1362 u8 *branch_target; 1363 1364 /* 1365 * Can't be implemented with a single x86 insn. 1366 * Need to do a CMPXCHG loop. 1367 */ 1368 1369 /* Will need RAX as a CMPXCHG operand so save R0 */ 1370 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1371 if (src_reg == BPF_REG_0) 1372 real_src_reg = BPF_REG_AX; 1373 if (dst_reg == BPF_REG_0) 1374 real_dst_reg = BPF_REG_AX; 1375 1376 branch_target = prog; 1377 /* Load old value */ 1378 emit_ldx(&prog, BPF_SIZE(insn->code), 1379 BPF_REG_0, real_dst_reg, insn->off); 1380 /* 1381 * Perform the (commutative) operation locally, 1382 * put the result in the AUX_REG. 1383 */ 1384 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1385 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1386 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1387 add_2reg(0xC0, AUX_REG, real_src_reg)); 1388 /* Attempt to swap in new value */ 1389 err = emit_atomic(&prog, BPF_CMPXCHG, 1390 real_dst_reg, AUX_REG, 1391 insn->off, 1392 BPF_SIZE(insn->code)); 1393 if (WARN_ON(err)) 1394 return err; 1395 /* 1396 * ZF tells us whether we won the race. If it's 1397 * cleared we need to try again. 1398 */ 1399 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1400 /* Return the pre-modification value */ 1401 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1402 /* Restore R0 after clobbering RAX */ 1403 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1404 break; 1405 } 1406 1407 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1408 insn->off, BPF_SIZE(insn->code)); 1409 if (err) 1410 return err; 1411 break; 1412 1413 /* call */ 1414 case BPF_JMP | BPF_CALL: 1415 func = (u8 *) __bpf_call_base + imm32; 1416 if (tail_call_reachable) { 1417 EMIT3_off32(0x48, 0x8B, 0x85, 1418 -(bpf_prog->aux->stack_depth + 8)); 1419 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1420 return -EINVAL; 1421 } else { 1422 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1423 return -EINVAL; 1424 } 1425 break; 1426 1427 case BPF_JMP | BPF_TAIL_CALL: 1428 if (imm32) 1429 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1430 &prog, image + addrs[i - 1], 1431 callee_regs_used, 1432 bpf_prog->aux->stack_depth, 1433 ctx); 1434 else 1435 emit_bpf_tail_call_indirect(&prog, 1436 callee_regs_used, 1437 bpf_prog->aux->stack_depth, 1438 image + addrs[i - 1], 1439 ctx); 1440 break; 1441 1442 /* cond jump */ 1443 case BPF_JMP | BPF_JEQ | BPF_X: 1444 case BPF_JMP | BPF_JNE | BPF_X: 1445 case BPF_JMP | BPF_JGT | BPF_X: 1446 case BPF_JMP | BPF_JLT | BPF_X: 1447 case BPF_JMP | BPF_JGE | BPF_X: 1448 case BPF_JMP | BPF_JLE | BPF_X: 1449 case BPF_JMP | BPF_JSGT | BPF_X: 1450 case BPF_JMP | BPF_JSLT | BPF_X: 1451 case BPF_JMP | BPF_JSGE | BPF_X: 1452 case BPF_JMP | BPF_JSLE | BPF_X: 1453 case BPF_JMP32 | BPF_JEQ | BPF_X: 1454 case BPF_JMP32 | BPF_JNE | BPF_X: 1455 case BPF_JMP32 | BPF_JGT | BPF_X: 1456 case BPF_JMP32 | BPF_JLT | BPF_X: 1457 case BPF_JMP32 | BPF_JGE | BPF_X: 1458 case BPF_JMP32 | BPF_JLE | BPF_X: 1459 case BPF_JMP32 | BPF_JSGT | BPF_X: 1460 case BPF_JMP32 | BPF_JSLT | BPF_X: 1461 case BPF_JMP32 | BPF_JSGE | BPF_X: 1462 case BPF_JMP32 | BPF_JSLE | BPF_X: 1463 /* cmp dst_reg, src_reg */ 1464 maybe_emit_mod(&prog, dst_reg, src_reg, 1465 BPF_CLASS(insn->code) == BPF_JMP); 1466 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1467 goto emit_cond_jmp; 1468 1469 case BPF_JMP | BPF_JSET | BPF_X: 1470 case BPF_JMP32 | BPF_JSET | BPF_X: 1471 /* test dst_reg, src_reg */ 1472 maybe_emit_mod(&prog, dst_reg, src_reg, 1473 BPF_CLASS(insn->code) == BPF_JMP); 1474 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1475 goto emit_cond_jmp; 1476 1477 case BPF_JMP | BPF_JSET | BPF_K: 1478 case BPF_JMP32 | BPF_JSET | BPF_K: 1479 /* test dst_reg, imm32 */ 1480 maybe_emit_1mod(&prog, dst_reg, 1481 BPF_CLASS(insn->code) == BPF_JMP); 1482 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1483 goto emit_cond_jmp; 1484 1485 case BPF_JMP | BPF_JEQ | BPF_K: 1486 case BPF_JMP | BPF_JNE | BPF_K: 1487 case BPF_JMP | BPF_JGT | BPF_K: 1488 case BPF_JMP | BPF_JLT | BPF_K: 1489 case BPF_JMP | BPF_JGE | BPF_K: 1490 case BPF_JMP | BPF_JLE | BPF_K: 1491 case BPF_JMP | BPF_JSGT | BPF_K: 1492 case BPF_JMP | BPF_JSLT | BPF_K: 1493 case BPF_JMP | BPF_JSGE | BPF_K: 1494 case BPF_JMP | BPF_JSLE | BPF_K: 1495 case BPF_JMP32 | BPF_JEQ | BPF_K: 1496 case BPF_JMP32 | BPF_JNE | BPF_K: 1497 case BPF_JMP32 | BPF_JGT | BPF_K: 1498 case BPF_JMP32 | BPF_JLT | BPF_K: 1499 case BPF_JMP32 | BPF_JGE | BPF_K: 1500 case BPF_JMP32 | BPF_JLE | BPF_K: 1501 case BPF_JMP32 | BPF_JSGT | BPF_K: 1502 case BPF_JMP32 | BPF_JSLT | BPF_K: 1503 case BPF_JMP32 | BPF_JSGE | BPF_K: 1504 case BPF_JMP32 | BPF_JSLE | BPF_K: 1505 /* test dst_reg, dst_reg to save one extra byte */ 1506 if (imm32 == 0) { 1507 maybe_emit_mod(&prog, dst_reg, dst_reg, 1508 BPF_CLASS(insn->code) == BPF_JMP); 1509 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1510 goto emit_cond_jmp; 1511 } 1512 1513 /* cmp dst_reg, imm8/32 */ 1514 maybe_emit_1mod(&prog, dst_reg, 1515 BPF_CLASS(insn->code) == BPF_JMP); 1516 1517 if (is_imm8(imm32)) 1518 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1519 else 1520 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1521 1522 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1523 switch (BPF_OP(insn->code)) { 1524 case BPF_JEQ: 1525 jmp_cond = X86_JE; 1526 break; 1527 case BPF_JSET: 1528 case BPF_JNE: 1529 jmp_cond = X86_JNE; 1530 break; 1531 case BPF_JGT: 1532 /* GT is unsigned '>', JA in x86 */ 1533 jmp_cond = X86_JA; 1534 break; 1535 case BPF_JLT: 1536 /* LT is unsigned '<', JB in x86 */ 1537 jmp_cond = X86_JB; 1538 break; 1539 case BPF_JGE: 1540 /* GE is unsigned '>=', JAE in x86 */ 1541 jmp_cond = X86_JAE; 1542 break; 1543 case BPF_JLE: 1544 /* LE is unsigned '<=', JBE in x86 */ 1545 jmp_cond = X86_JBE; 1546 break; 1547 case BPF_JSGT: 1548 /* Signed '>', GT in x86 */ 1549 jmp_cond = X86_JG; 1550 break; 1551 case BPF_JSLT: 1552 /* Signed '<', LT in x86 */ 1553 jmp_cond = X86_JL; 1554 break; 1555 case BPF_JSGE: 1556 /* Signed '>=', GE in x86 */ 1557 jmp_cond = X86_JGE; 1558 break; 1559 case BPF_JSLE: 1560 /* Signed '<=', LE in x86 */ 1561 jmp_cond = X86_JLE; 1562 break; 1563 default: /* to silence GCC warning */ 1564 return -EFAULT; 1565 } 1566 jmp_offset = addrs[i + insn->off] - addrs[i]; 1567 if (is_imm8(jmp_offset)) { 1568 if (jmp_padding) { 1569 /* To keep the jmp_offset valid, the extra bytes are 1570 * padded before the jump insn, so we subtract the 1571 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1572 * 1573 * If the previous pass already emits an imm8 1574 * jmp_cond, then this BPF insn won't shrink, so 1575 * "nops" is 0. 1576 * 1577 * On the other hand, if the previous pass emits an 1578 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1579 * keep the image from shrinking further. 1580 * 1581 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1582 * is 2 bytes, so the size difference is 4 bytes. 1583 */ 1584 nops = INSN_SZ_DIFF - 2; 1585 if (nops != 0 && nops != 4) { 1586 pr_err("unexpected jmp_cond padding: %d bytes\n", 1587 nops); 1588 return -EFAULT; 1589 } 1590 emit_nops(&prog, nops); 1591 } 1592 EMIT2(jmp_cond, jmp_offset); 1593 } else if (is_simm32(jmp_offset)) { 1594 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1595 } else { 1596 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1597 return -EFAULT; 1598 } 1599 1600 break; 1601 1602 case BPF_JMP | BPF_JA: 1603 if (insn->off == -1) 1604 /* -1 jmp instructions will always jump 1605 * backwards two bytes. Explicitly handling 1606 * this case avoids wasting too many passes 1607 * when there are long sequences of replaced 1608 * dead code. 1609 */ 1610 jmp_offset = -2; 1611 else 1612 jmp_offset = addrs[i + insn->off] - addrs[i]; 1613 1614 if (!jmp_offset) { 1615 /* 1616 * If jmp_padding is enabled, the extra nops will 1617 * be inserted. Otherwise, optimize out nop jumps. 1618 */ 1619 if (jmp_padding) { 1620 /* There are 3 possible conditions. 1621 * (1) This BPF_JA is already optimized out in 1622 * the previous run, so there is no need 1623 * to pad any extra byte (0 byte). 1624 * (2) The previous pass emits an imm8 jmp, 1625 * so we pad 2 bytes to match the previous 1626 * insn size. 1627 * (3) Similarly, the previous pass emits an 1628 * imm32 jmp, and 5 bytes is padded. 1629 */ 1630 nops = INSN_SZ_DIFF; 1631 if (nops != 0 && nops != 2 && nops != 5) { 1632 pr_err("unexpected nop jump padding: %d bytes\n", 1633 nops); 1634 return -EFAULT; 1635 } 1636 emit_nops(&prog, nops); 1637 } 1638 break; 1639 } 1640 emit_jmp: 1641 if (is_imm8(jmp_offset)) { 1642 if (jmp_padding) { 1643 /* To avoid breaking jmp_offset, the extra bytes 1644 * are padded before the actual jmp insn, so 1645 * 2 bytes is subtracted from INSN_SZ_DIFF. 1646 * 1647 * If the previous pass already emits an imm8 1648 * jmp, there is nothing to pad (0 byte). 1649 * 1650 * If it emits an imm32 jmp (5 bytes) previously 1651 * and now an imm8 jmp (2 bytes), then we pad 1652 * (5 - 2 = 3) bytes to stop the image from 1653 * shrinking further. 1654 */ 1655 nops = INSN_SZ_DIFF - 2; 1656 if (nops != 0 && nops != 3) { 1657 pr_err("unexpected jump padding: %d bytes\n", 1658 nops); 1659 return -EFAULT; 1660 } 1661 emit_nops(&prog, INSN_SZ_DIFF - 2); 1662 } 1663 EMIT2(0xEB, jmp_offset); 1664 } else if (is_simm32(jmp_offset)) { 1665 EMIT1_off32(0xE9, jmp_offset); 1666 } else { 1667 pr_err("jmp gen bug %llx\n", jmp_offset); 1668 return -EFAULT; 1669 } 1670 break; 1671 1672 case BPF_JMP | BPF_EXIT: 1673 if (seen_exit) { 1674 jmp_offset = ctx->cleanup_addr - addrs[i]; 1675 goto emit_jmp; 1676 } 1677 seen_exit = true; 1678 /* Update cleanup_addr */ 1679 ctx->cleanup_addr = proglen; 1680 pop_callee_regs(&prog, callee_regs_used); 1681 EMIT1(0xC9); /* leave */ 1682 EMIT1(0xC3); /* ret */ 1683 break; 1684 1685 default: 1686 /* 1687 * By design x86-64 JIT should support all BPF instructions. 1688 * This error will be seen if new instruction was added 1689 * to the interpreter, but not to the JIT, or if there is 1690 * junk in bpf_prog. 1691 */ 1692 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1693 return -EINVAL; 1694 } 1695 1696 ilen = prog - temp; 1697 if (ilen > BPF_MAX_INSN_SIZE) { 1698 pr_err("bpf_jit: fatal insn size error\n"); 1699 return -EFAULT; 1700 } 1701 1702 if (image) { 1703 /* 1704 * When populating the image, assert that: 1705 * 1706 * i) We do not write beyond the allocated space, and 1707 * ii) addrs[i] did not change from the prior run, in order 1708 * to validate assumptions made for computing branch 1709 * displacements. 1710 */ 1711 if (unlikely(proglen + ilen > oldproglen || 1712 proglen + ilen != addrs[i])) { 1713 pr_err("bpf_jit: fatal error\n"); 1714 return -EFAULT; 1715 } 1716 memcpy(rw_image + proglen, temp, ilen); 1717 } 1718 proglen += ilen; 1719 addrs[i] = proglen; 1720 prog = temp; 1721 } 1722 1723 if (image && excnt != bpf_prog->aux->num_exentries) { 1724 pr_err("extable is not populated\n"); 1725 return -EFAULT; 1726 } 1727 return proglen; 1728 } 1729 1730 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1731 int stack_size) 1732 { 1733 int i; 1734 /* Store function arguments to stack. 1735 * For a function that accepts two pointers the sequence will be: 1736 * mov QWORD PTR [rbp-0x10],rdi 1737 * mov QWORD PTR [rbp-0x8],rsi 1738 */ 1739 for (i = 0; i < min(nr_args, 6); i++) 1740 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1741 BPF_REG_FP, 1742 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1743 -(stack_size - i * 8)); 1744 } 1745 1746 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1747 int stack_size) 1748 { 1749 int i; 1750 1751 /* Restore function arguments from stack. 1752 * For a function that accepts two pointers the sequence will be: 1753 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1754 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1755 */ 1756 for (i = 0; i < min(nr_args, 6); i++) 1757 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1758 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1759 BPF_REG_FP, 1760 -(stack_size - i * 8)); 1761 } 1762 1763 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1764 struct bpf_prog *p, int stack_size, bool save_ret) 1765 { 1766 u8 *prog = *pprog; 1767 u8 *jmp_insn; 1768 1769 /* arg1: mov rdi, progs[i] */ 1770 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1771 if (emit_call(&prog, 1772 p->aux->sleepable ? __bpf_prog_enter_sleepable : 1773 __bpf_prog_enter, prog)) 1774 return -EINVAL; 1775 /* remember prog start time returned by __bpf_prog_enter */ 1776 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1777 1778 /* if (__bpf_prog_enter*(prog) == 0) 1779 * goto skip_exec_of_prog; 1780 */ 1781 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1782 /* emit 2 nops that will be replaced with JE insn */ 1783 jmp_insn = prog; 1784 emit_nops(&prog, 2); 1785 1786 /* arg1: lea rdi, [rbp - stack_size] */ 1787 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1788 /* arg2: progs[i]->insnsi for interpreter */ 1789 if (!p->jited) 1790 emit_mov_imm64(&prog, BPF_REG_2, 1791 (long) p->insnsi >> 32, 1792 (u32) (long) p->insnsi); 1793 /* call JITed bpf program or interpreter */ 1794 if (emit_call(&prog, p->bpf_func, prog)) 1795 return -EINVAL; 1796 1797 /* 1798 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1799 * of the previous call which is then passed on the stack to 1800 * the next BPF program. 1801 * 1802 * BPF_TRAMP_FENTRY trampoline may need to return the return 1803 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1804 */ 1805 if (save_ret) 1806 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1807 1808 /* replace 2 nops with JE insn, since jmp target is known */ 1809 jmp_insn[0] = X86_JE; 1810 jmp_insn[1] = prog - jmp_insn - 2; 1811 1812 /* arg1: mov rdi, progs[i] */ 1813 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1814 /* arg2: mov rsi, rbx <- start time in nsec */ 1815 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1816 if (emit_call(&prog, 1817 p->aux->sleepable ? __bpf_prog_exit_sleepable : 1818 __bpf_prog_exit, prog)) 1819 return -EINVAL; 1820 1821 *pprog = prog; 1822 return 0; 1823 } 1824 1825 static void emit_align(u8 **pprog, u32 align) 1826 { 1827 u8 *target, *prog = *pprog; 1828 1829 target = PTR_ALIGN(prog, align); 1830 if (target != prog) 1831 emit_nops(&prog, target - prog); 1832 1833 *pprog = prog; 1834 } 1835 1836 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1837 { 1838 u8 *prog = *pprog; 1839 s64 offset; 1840 1841 offset = func - (ip + 2 + 4); 1842 if (!is_simm32(offset)) { 1843 pr_err("Target %p is out of range\n", func); 1844 return -EINVAL; 1845 } 1846 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 1847 *pprog = prog; 1848 return 0; 1849 } 1850 1851 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1852 struct bpf_tramp_progs *tp, int stack_size, 1853 bool save_ret) 1854 { 1855 int i; 1856 u8 *prog = *pprog; 1857 1858 for (i = 0; i < tp->nr_progs; i++) { 1859 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, 1860 save_ret)) 1861 return -EINVAL; 1862 } 1863 *pprog = prog; 1864 return 0; 1865 } 1866 1867 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1868 struct bpf_tramp_progs *tp, int stack_size, 1869 u8 **branches) 1870 { 1871 u8 *prog = *pprog; 1872 int i; 1873 1874 /* The first fmod_ret program will receive a garbage return value. 1875 * Set this to 0 to avoid confusing the program. 1876 */ 1877 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1878 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1879 for (i = 0; i < tp->nr_progs; i++) { 1880 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) 1881 return -EINVAL; 1882 1883 /* mod_ret prog stored return value into [rbp - 8]. Emit: 1884 * if (*(u64 *)(rbp - 8) != 0) 1885 * goto do_fexit; 1886 */ 1887 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 1888 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1889 1890 /* Save the location of the branch and Generate 6 nops 1891 * (4 bytes for an offset and 2 bytes for the jump) These nops 1892 * are replaced with a conditional jump once do_fexit (i.e. the 1893 * start of the fexit invocation) is finalized. 1894 */ 1895 branches[i] = prog; 1896 emit_nops(&prog, 4 + 2); 1897 } 1898 1899 *pprog = prog; 1900 return 0; 1901 } 1902 1903 static bool is_valid_bpf_tramp_flags(unsigned int flags) 1904 { 1905 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1906 (flags & BPF_TRAMP_F_SKIP_FRAME)) 1907 return false; 1908 1909 /* 1910 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, 1911 * and it must be used alone. 1912 */ 1913 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && 1914 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) 1915 return false; 1916 1917 return true; 1918 } 1919 1920 /* Example: 1921 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1922 * its 'struct btf_func_model' will be nr_args=2 1923 * The assembly code when eth_type_trans is executing after trampoline: 1924 * 1925 * push rbp 1926 * mov rbp, rsp 1927 * sub rsp, 16 // space for skb and dev 1928 * push rbx // temp regs to pass start time 1929 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1930 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1931 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1932 * mov rbx, rax // remember start time in bpf stats are enabled 1933 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1934 * call addr_of_jited_FENTRY_prog 1935 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1936 * mov rsi, rbx // prog start time 1937 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1938 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1939 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1940 * pop rbx 1941 * leave 1942 * ret 1943 * 1944 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1945 * replaced with 'call generated_bpf_trampoline'. When it returns 1946 * eth_type_trans will continue executing with original skb and dev pointers. 1947 * 1948 * The assembly code when eth_type_trans is called from trampoline: 1949 * 1950 * push rbp 1951 * mov rbp, rsp 1952 * sub rsp, 24 // space for skb, dev, return value 1953 * push rbx // temp regs to pass start time 1954 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1955 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1956 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1957 * mov rbx, rax // remember start time if bpf stats are enabled 1958 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1959 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1960 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1961 * mov rsi, rbx // prog start time 1962 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1963 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1964 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1965 * call eth_type_trans+5 // execute body of eth_type_trans 1966 * mov qword ptr [rbp - 8], rax // save return value 1967 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1968 * mov rbx, rax // remember start time in bpf stats are enabled 1969 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1970 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1971 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1972 * mov rsi, rbx // prog start time 1973 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1974 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1975 * pop rbx 1976 * leave 1977 * add rsp, 8 // skip eth_type_trans's frame 1978 * ret // return to its caller 1979 */ 1980 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 1981 const struct btf_func_model *m, u32 flags, 1982 struct bpf_tramp_progs *tprogs, 1983 void *orig_call) 1984 { 1985 int ret, i, nr_args = m->nr_args; 1986 int regs_off, ip_off, args_off, stack_size = nr_args * 8; 1987 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; 1988 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; 1989 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1990 u8 **branches = NULL; 1991 u8 *prog; 1992 bool save_ret; 1993 1994 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1995 if (nr_args > 6) 1996 return -ENOTSUPP; 1997 1998 if (!is_valid_bpf_tramp_flags(flags)) 1999 return -EINVAL; 2000 2001 /* Generated trampoline stack layout: 2002 * 2003 * RBP + 8 [ return address ] 2004 * RBP + 0 [ RBP ] 2005 * 2006 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2007 * BPF_TRAMP_F_RET_FENTRY_RET flags 2008 * 2009 * [ reg_argN ] always 2010 * [ ... ] 2011 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2012 * 2013 * RBP - args_off [ args count ] always 2014 * 2015 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2016 */ 2017 2018 /* room for return value of orig_call or fentry prog */ 2019 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2020 if (save_ret) 2021 stack_size += 8; 2022 2023 regs_off = stack_size; 2024 2025 /* args count */ 2026 stack_size += 8; 2027 args_off = stack_size; 2028 2029 if (flags & BPF_TRAMP_F_IP_ARG) 2030 stack_size += 8; /* room for IP address argument */ 2031 2032 ip_off = stack_size; 2033 2034 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2035 /* skip patched call instruction and point orig_call to actual 2036 * body of the kernel function. 2037 */ 2038 if (is_endbr(*(u32 *)orig_call)) 2039 orig_call += ENDBR_INSN_SIZE; 2040 orig_call += X86_PATCH_SIZE; 2041 } 2042 2043 prog = image; 2044 2045 EMIT_ENDBR(); 2046 EMIT1(0x55); /* push rbp */ 2047 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2048 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2049 EMIT1(0x53); /* push rbx */ 2050 2051 /* Store number of arguments of the traced function: 2052 * mov rax, nr_args 2053 * mov QWORD PTR [rbp - args_off], rax 2054 */ 2055 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args); 2056 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2057 2058 if (flags & BPF_TRAMP_F_IP_ARG) { 2059 /* Store IP address of the traced function: 2060 * mov rax, QWORD PTR [rbp + 8] 2061 * sub rax, X86_PATCH_SIZE 2062 * mov QWORD PTR [rbp - ip_off], rax 2063 */ 2064 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2065 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 2066 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2067 } 2068 2069 save_regs(m, &prog, nr_args, regs_off); 2070 2071 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2072 /* arg1: mov rdi, im */ 2073 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2074 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2075 ret = -EINVAL; 2076 goto cleanup; 2077 } 2078 } 2079 2080 if (fentry->nr_progs) 2081 if (invoke_bpf(m, &prog, fentry, regs_off, 2082 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2083 return -EINVAL; 2084 2085 if (fmod_ret->nr_progs) { 2086 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), 2087 GFP_KERNEL); 2088 if (!branches) 2089 return -ENOMEM; 2090 2091 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2092 branches)) { 2093 ret = -EINVAL; 2094 goto cleanup; 2095 } 2096 } 2097 2098 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2099 restore_regs(m, &prog, nr_args, regs_off); 2100 2101 /* call original function */ 2102 if (emit_call(&prog, orig_call, prog)) { 2103 ret = -EINVAL; 2104 goto cleanup; 2105 } 2106 /* remember return value in a stack for bpf prog to access */ 2107 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2108 im->ip_after_call = prog; 2109 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2110 prog += X86_PATCH_SIZE; 2111 } 2112 2113 if (fmod_ret->nr_progs) { 2114 /* From Intel 64 and IA-32 Architectures Optimization 2115 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2116 * Coding Rule 11: All branch targets should be 16-byte 2117 * aligned. 2118 */ 2119 emit_align(&prog, 16); 2120 /* Update the branches saved in invoke_bpf_mod_ret with the 2121 * aligned address of do_fexit. 2122 */ 2123 for (i = 0; i < fmod_ret->nr_progs; i++) 2124 emit_cond_near_jump(&branches[i], prog, branches[i], 2125 X86_JNE); 2126 } 2127 2128 if (fexit->nr_progs) 2129 if (invoke_bpf(m, &prog, fexit, regs_off, false)) { 2130 ret = -EINVAL; 2131 goto cleanup; 2132 } 2133 2134 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2135 restore_regs(m, &prog, nr_args, regs_off); 2136 2137 /* This needs to be done regardless. If there were fmod_ret programs, 2138 * the return value is only updated on the stack and still needs to be 2139 * restored to R0. 2140 */ 2141 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2142 im->ip_epilogue = prog; 2143 /* arg1: mov rdi, im */ 2144 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2145 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2146 ret = -EINVAL; 2147 goto cleanup; 2148 } 2149 } 2150 /* restore return value of orig_call or fentry prog back into RAX */ 2151 if (save_ret) 2152 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2153 2154 EMIT1(0x5B); /* pop rbx */ 2155 EMIT1(0xC9); /* leave */ 2156 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2157 /* skip our return address and return to parent */ 2158 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2159 EMIT1(0xC3); /* ret */ 2160 /* Make sure the trampoline generation logic doesn't overflow */ 2161 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2162 ret = -EFAULT; 2163 goto cleanup; 2164 } 2165 ret = prog - (u8 *)image; 2166 2167 cleanup: 2168 kfree(branches); 2169 return ret; 2170 } 2171 2172 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 2173 { 2174 u8 *jg_reloc, *prog = *pprog; 2175 int pivot, err, jg_bytes = 1; 2176 s64 jg_offset; 2177 2178 if (a == b) { 2179 /* Leaf node of recursion, i.e. not a range of indices 2180 * anymore. 2181 */ 2182 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2183 if (!is_simm32(progs[a])) 2184 return -1; 2185 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2186 progs[a]); 2187 err = emit_cond_near_jump(&prog, /* je func */ 2188 (void *)progs[a], prog, 2189 X86_JE); 2190 if (err) 2191 return err; 2192 2193 emit_indirect_jump(&prog, 2 /* rdx */, prog); 2194 2195 *pprog = prog; 2196 return 0; 2197 } 2198 2199 /* Not a leaf node, so we pivot, and recursively descend into 2200 * the lower and upper ranges. 2201 */ 2202 pivot = (b - a) / 2; 2203 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2204 if (!is_simm32(progs[a + pivot])) 2205 return -1; 2206 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2207 2208 if (pivot > 2) { /* jg upper_part */ 2209 /* Require near jump. */ 2210 jg_bytes = 4; 2211 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2212 } else { 2213 EMIT2(X86_JG, 0); 2214 } 2215 jg_reloc = prog; 2216 2217 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2218 progs); 2219 if (err) 2220 return err; 2221 2222 /* From Intel 64 and IA-32 Architectures Optimization 2223 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2224 * Coding Rule 11: All branch targets should be 16-byte 2225 * aligned. 2226 */ 2227 emit_align(&prog, 16); 2228 jg_offset = prog - jg_reloc; 2229 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2230 2231 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2232 b, progs); 2233 if (err) 2234 return err; 2235 2236 *pprog = prog; 2237 return 0; 2238 } 2239 2240 static int cmp_ips(const void *a, const void *b) 2241 { 2242 const s64 *ipa = a; 2243 const s64 *ipb = b; 2244 2245 if (*ipa > *ipb) 2246 return 1; 2247 if (*ipa < *ipb) 2248 return -1; 2249 return 0; 2250 } 2251 2252 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 2253 { 2254 u8 *prog = image; 2255 2256 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2257 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 2258 } 2259 2260 struct x64_jit_data { 2261 struct bpf_binary_header *rw_header; 2262 struct bpf_binary_header *header; 2263 int *addrs; 2264 u8 *image; 2265 int proglen; 2266 struct jit_context ctx; 2267 }; 2268 2269 #define MAX_PASSES 20 2270 #define PADDING_PASSES (MAX_PASSES - 5) 2271 2272 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2273 { 2274 struct bpf_binary_header *rw_header = NULL; 2275 struct bpf_binary_header *header = NULL; 2276 struct bpf_prog *tmp, *orig_prog = prog; 2277 struct x64_jit_data *jit_data; 2278 int proglen, oldproglen = 0; 2279 struct jit_context ctx = {}; 2280 bool tmp_blinded = false; 2281 bool extra_pass = false; 2282 bool padding = false; 2283 u8 *rw_image = NULL; 2284 u8 *image = NULL; 2285 int *addrs; 2286 int pass; 2287 int i; 2288 2289 if (!prog->jit_requested) 2290 return orig_prog; 2291 2292 tmp = bpf_jit_blind_constants(prog); 2293 /* 2294 * If blinding was requested and we failed during blinding, 2295 * we must fall back to the interpreter. 2296 */ 2297 if (IS_ERR(tmp)) 2298 return orig_prog; 2299 if (tmp != prog) { 2300 tmp_blinded = true; 2301 prog = tmp; 2302 } 2303 2304 jit_data = prog->aux->jit_data; 2305 if (!jit_data) { 2306 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2307 if (!jit_data) { 2308 prog = orig_prog; 2309 goto out; 2310 } 2311 prog->aux->jit_data = jit_data; 2312 } 2313 addrs = jit_data->addrs; 2314 if (addrs) { 2315 ctx = jit_data->ctx; 2316 oldproglen = jit_data->proglen; 2317 image = jit_data->image; 2318 header = jit_data->header; 2319 rw_header = jit_data->rw_header; 2320 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2321 extra_pass = true; 2322 padding = true; 2323 goto skip_init_addrs; 2324 } 2325 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2326 if (!addrs) { 2327 prog = orig_prog; 2328 goto out_addrs; 2329 } 2330 2331 /* 2332 * Before first pass, make a rough estimation of addrs[] 2333 * each BPF instruction is translated to less than 64 bytes 2334 */ 2335 for (proglen = 0, i = 0; i <= prog->len; i++) { 2336 proglen += 64; 2337 addrs[i] = proglen; 2338 } 2339 ctx.cleanup_addr = proglen; 2340 skip_init_addrs: 2341 2342 /* 2343 * JITed image shrinks with every pass and the loop iterates 2344 * until the image stops shrinking. Very large BPF programs 2345 * may converge on the last pass. In such case do one more 2346 * pass to emit the final image. 2347 */ 2348 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2349 if (!padding && pass >= PADDING_PASSES) 2350 padding = true; 2351 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2352 if (proglen <= 0) { 2353 out_image: 2354 image = NULL; 2355 if (header) { 2356 bpf_arch_text_copy(&header->size, &rw_header->size, 2357 sizeof(rw_header->size)); 2358 bpf_jit_binary_pack_free(header, rw_header); 2359 } 2360 /* Fall back to interpreter mode */ 2361 prog = orig_prog; 2362 if (extra_pass) { 2363 prog->bpf_func = NULL; 2364 prog->jited = 0; 2365 prog->jited_len = 0; 2366 } 2367 goto out_addrs; 2368 } 2369 if (image) { 2370 if (proglen != oldproglen) { 2371 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2372 proglen, oldproglen); 2373 goto out_image; 2374 } 2375 break; 2376 } 2377 if (proglen == oldproglen) { 2378 /* 2379 * The number of entries in extable is the number of BPF_LDX 2380 * insns that access kernel memory via "pointer to BTF type". 2381 * The verifier changed their opcode from LDX|MEM|size 2382 * to LDX|PROBE_MEM|size to make JITing easier. 2383 */ 2384 u32 align = __alignof__(struct exception_table_entry); 2385 u32 extable_size = prog->aux->num_exentries * 2386 sizeof(struct exception_table_entry); 2387 2388 /* allocate module memory for x86 insns and extable */ 2389 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2390 &image, align, &rw_header, &rw_image, 2391 jit_fill_hole); 2392 if (!header) { 2393 prog = orig_prog; 2394 goto out_addrs; 2395 } 2396 prog->aux->extable = (void *) image + roundup(proglen, align); 2397 } 2398 oldproglen = proglen; 2399 cond_resched(); 2400 } 2401 2402 if (bpf_jit_enable > 1) 2403 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2404 2405 if (image) { 2406 if (!prog->is_func || extra_pass) { 2407 /* 2408 * bpf_jit_binary_pack_finalize fails in two scenarios: 2409 * 1) header is not pointing to proper module memory; 2410 * 2) the arch doesn't support bpf_arch_text_copy(). 2411 * 2412 * Both cases are serious bugs and justify WARN_ON. 2413 */ 2414 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2415 /* header has been freed */ 2416 header = NULL; 2417 goto out_image; 2418 } 2419 2420 bpf_tail_call_direct_fixup(prog); 2421 } else { 2422 jit_data->addrs = addrs; 2423 jit_data->ctx = ctx; 2424 jit_data->proglen = proglen; 2425 jit_data->image = image; 2426 jit_data->header = header; 2427 jit_data->rw_header = rw_header; 2428 } 2429 prog->bpf_func = (void *)image; 2430 prog->jited = 1; 2431 prog->jited_len = proglen; 2432 } else { 2433 prog = orig_prog; 2434 } 2435 2436 if (!image || !prog->is_func || extra_pass) { 2437 if (image) 2438 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2439 out_addrs: 2440 kvfree(addrs); 2441 kfree(jit_data); 2442 prog->aux->jit_data = NULL; 2443 } 2444 out: 2445 if (tmp_blinded) 2446 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2447 tmp : orig_prog); 2448 return prog; 2449 } 2450 2451 bool bpf_jit_supports_kfunc_call(void) 2452 { 2453 return true; 2454 } 2455 2456 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2457 { 2458 if (text_poke_copy(dst, src, len) == NULL) 2459 return ERR_PTR(-EINVAL); 2460 return dst; 2461 } 2462