1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 20 { 21 if (len == 1) 22 *ptr = bytes; 23 else if (len == 2) 24 *(u16 *)ptr = bytes; 25 else { 26 *(u32 *)ptr = bytes; 27 barrier(); 28 } 29 return ptr + len; 30 } 31 32 #define EMIT(bytes, len) \ 33 do { prog = emit_code(prog, bytes, len); } while (0) 34 35 #define EMIT1(b1) EMIT(b1, 1) 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39 40 #define EMIT1_off32(b1, off) \ 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 46 #define EMIT4_off32(b1, b2, b3, b4, off) \ 47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 48 49 #ifdef CONFIG_X86_KERNEL_IBT 50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 51 #else 52 #define EMIT_ENDBR() 53 #endif 54 55 static bool is_imm8(int value) 56 { 57 return value <= 127 && value >= -128; 58 } 59 60 static bool is_simm32(s64 value) 61 { 62 return value == (s64)(s32)value; 63 } 64 65 static bool is_uimm32(u64 value) 66 { 67 return value == (u64)(u32)value; 68 } 69 70 /* mov dst, src */ 71 #define EMIT_mov(DST, SRC) \ 72 do { \ 73 if (DST != SRC) \ 74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 75 } while (0) 76 77 static int bpf_size_to_x86_bytes(int bpf_size) 78 { 79 if (bpf_size == BPF_W) 80 return 4; 81 else if (bpf_size == BPF_H) 82 return 2; 83 else if (bpf_size == BPF_B) 84 return 1; 85 else if (bpf_size == BPF_DW) 86 return 4; /* imm32 */ 87 else 88 return 0; 89 } 90 91 /* 92 * List of x86 cond jumps opcodes (. + s8) 93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 94 */ 95 #define X86_JB 0x72 96 #define X86_JAE 0x73 97 #define X86_JE 0x74 98 #define X86_JNE 0x75 99 #define X86_JBE 0x76 100 #define X86_JA 0x77 101 #define X86_JL 0x7C 102 #define X86_JGE 0x7D 103 #define X86_JLE 0x7E 104 #define X86_JG 0x7F 105 106 /* Pick a register outside of BPF range for JIT internal work */ 107 #define AUX_REG (MAX_BPF_JIT_REG + 1) 108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 109 110 /* 111 * The following table maps BPF registers to x86-64 registers. 112 * 113 * x86-64 register R12 is unused, since if used as base address 114 * register in load/store instructions, it always needs an 115 * extra byte of encoding and is callee saved. 116 * 117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 118 * trampoline. x86-64 register R10 is used for blinding (if enabled). 119 */ 120 static const int reg2hex[] = { 121 [BPF_REG_0] = 0, /* RAX */ 122 [BPF_REG_1] = 7, /* RDI */ 123 [BPF_REG_2] = 6, /* RSI */ 124 [BPF_REG_3] = 2, /* RDX */ 125 [BPF_REG_4] = 1, /* RCX */ 126 [BPF_REG_5] = 0, /* R8 */ 127 [BPF_REG_6] = 3, /* RBX callee saved */ 128 [BPF_REG_7] = 5, /* R13 callee saved */ 129 [BPF_REG_8] = 6, /* R14 callee saved */ 130 [BPF_REG_9] = 7, /* R15 callee saved */ 131 [BPF_REG_FP] = 5, /* RBP readonly */ 132 [BPF_REG_AX] = 2, /* R10 temp register */ 133 [AUX_REG] = 3, /* R11 temp register */ 134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 135 }; 136 137 static const int reg2pt_regs[] = { 138 [BPF_REG_0] = offsetof(struct pt_regs, ax), 139 [BPF_REG_1] = offsetof(struct pt_regs, di), 140 [BPF_REG_2] = offsetof(struct pt_regs, si), 141 [BPF_REG_3] = offsetof(struct pt_regs, dx), 142 [BPF_REG_4] = offsetof(struct pt_regs, cx), 143 [BPF_REG_5] = offsetof(struct pt_regs, r8), 144 [BPF_REG_6] = offsetof(struct pt_regs, bx), 145 [BPF_REG_7] = offsetof(struct pt_regs, r13), 146 [BPF_REG_8] = offsetof(struct pt_regs, r14), 147 [BPF_REG_9] = offsetof(struct pt_regs, r15), 148 }; 149 150 /* 151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 152 * which need extra byte of encoding. 153 * rax,rcx,...,rbp have simpler encoding 154 */ 155 static bool is_ereg(u32 reg) 156 { 157 return (1 << reg) & (BIT(BPF_REG_5) | 158 BIT(AUX_REG) | 159 BIT(BPF_REG_7) | 160 BIT(BPF_REG_8) | 161 BIT(BPF_REG_9) | 162 BIT(X86_REG_R9) | 163 BIT(BPF_REG_AX)); 164 } 165 166 /* 167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 169 * of encoding. al,cl,dl,bl have simpler encoding. 170 */ 171 static bool is_ereg_8l(u32 reg) 172 { 173 return is_ereg(reg) || 174 (1 << reg) & (BIT(BPF_REG_1) | 175 BIT(BPF_REG_2) | 176 BIT(BPF_REG_FP)); 177 } 178 179 static bool is_axreg(u32 reg) 180 { 181 return reg == BPF_REG_0; 182 } 183 184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 185 static u8 add_1mod(u8 byte, u32 reg) 186 { 187 if (is_ereg(reg)) 188 byte |= 1; 189 return byte; 190 } 191 192 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 193 { 194 if (is_ereg(r1)) 195 byte |= 1; 196 if (is_ereg(r2)) 197 byte |= 4; 198 return byte; 199 } 200 201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 202 static u8 add_1reg(u8 byte, u32 dst_reg) 203 { 204 return byte + reg2hex[dst_reg]; 205 } 206 207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 209 { 210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 211 } 212 213 /* Some 1-byte opcodes for binary ALU operations */ 214 static u8 simple_alu_opcodes[] = { 215 [BPF_ADD] = 0x01, 216 [BPF_SUB] = 0x29, 217 [BPF_AND] = 0x21, 218 [BPF_OR] = 0x09, 219 [BPF_XOR] = 0x31, 220 [BPF_LSH] = 0xE0, 221 [BPF_RSH] = 0xE8, 222 [BPF_ARSH] = 0xF8, 223 }; 224 225 static void jit_fill_hole(void *area, unsigned int size) 226 { 227 /* Fill whole space with INT3 instructions */ 228 memset(area, 0xcc, size); 229 } 230 231 struct jit_context { 232 int cleanup_addr; /* Epilogue code offset */ 233 234 /* 235 * Program specific offsets of labels in the code; these rely on the 236 * JIT doing at least 2 passes, recording the position on the first 237 * pass, only to generate the correct offset on the second pass. 238 */ 239 int tail_call_direct_label; 240 int tail_call_indirect_label; 241 }; 242 243 /* Maximum number of bytes emitted while JITing one eBPF insn */ 244 #define BPF_MAX_INSN_SIZE 128 245 #define BPF_INSN_SAFETY 64 246 247 /* Number of bytes emit_patch() needs to generate instructions */ 248 #define X86_PATCH_SIZE 5 249 /* Number of bytes that will be skipped on tailcall */ 250 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 251 252 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 253 { 254 u8 *prog = *pprog; 255 256 if (callee_regs_used[0]) 257 EMIT1(0x53); /* push rbx */ 258 if (callee_regs_used[1]) 259 EMIT2(0x41, 0x55); /* push r13 */ 260 if (callee_regs_used[2]) 261 EMIT2(0x41, 0x56); /* push r14 */ 262 if (callee_regs_used[3]) 263 EMIT2(0x41, 0x57); /* push r15 */ 264 *pprog = prog; 265 } 266 267 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 268 { 269 u8 *prog = *pprog; 270 271 if (callee_regs_used[3]) 272 EMIT2(0x41, 0x5F); /* pop r15 */ 273 if (callee_regs_used[2]) 274 EMIT2(0x41, 0x5E); /* pop r14 */ 275 if (callee_regs_used[1]) 276 EMIT2(0x41, 0x5D); /* pop r13 */ 277 if (callee_regs_used[0]) 278 EMIT1(0x5B); /* pop rbx */ 279 *pprog = prog; 280 } 281 282 /* 283 * Emit x86-64 prologue code for BPF program. 284 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 285 * while jumping to another program 286 */ 287 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 288 bool tail_call_reachable, bool is_subprog) 289 { 290 u8 *prog = *pprog; 291 292 /* BPF trampoline can be made to work without these nops, 293 * but let's waste 5 bytes for now and optimize later 294 */ 295 EMIT_ENDBR(); 296 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 297 prog += X86_PATCH_SIZE; 298 if (!ebpf_from_cbpf) { 299 if (tail_call_reachable && !is_subprog) 300 EMIT2(0x31, 0xC0); /* xor eax, eax */ 301 else 302 EMIT2(0x66, 0x90); /* nop2 */ 303 } 304 EMIT1(0x55); /* push rbp */ 305 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 306 307 /* X86_TAIL_CALL_OFFSET is here */ 308 EMIT_ENDBR(); 309 310 /* sub rsp, rounded_stack_depth */ 311 if (stack_depth) 312 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 313 if (tail_call_reachable) 314 EMIT1(0x50); /* push rax */ 315 *pprog = prog; 316 } 317 318 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 319 { 320 u8 *prog = *pprog; 321 s64 offset; 322 323 offset = func - (ip + X86_PATCH_SIZE); 324 if (!is_simm32(offset)) { 325 pr_err("Target call %p is out of range\n", func); 326 return -ERANGE; 327 } 328 EMIT1_off32(opcode, offset); 329 *pprog = prog; 330 return 0; 331 } 332 333 static int emit_call(u8 **pprog, void *func, void *ip) 334 { 335 return emit_patch(pprog, func, ip, 0xE8); 336 } 337 338 static int emit_jump(u8 **pprog, void *func, void *ip) 339 { 340 return emit_patch(pprog, func, ip, 0xE9); 341 } 342 343 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 344 void *old_addr, void *new_addr) 345 { 346 const u8 *nop_insn = x86_nops[5]; 347 u8 old_insn[X86_PATCH_SIZE]; 348 u8 new_insn[X86_PATCH_SIZE]; 349 u8 *prog; 350 int ret; 351 352 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 353 if (old_addr) { 354 prog = old_insn; 355 ret = t == BPF_MOD_CALL ? 356 emit_call(&prog, old_addr, ip) : 357 emit_jump(&prog, old_addr, ip); 358 if (ret) 359 return ret; 360 } 361 362 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 363 if (new_addr) { 364 prog = new_insn; 365 ret = t == BPF_MOD_CALL ? 366 emit_call(&prog, new_addr, ip) : 367 emit_jump(&prog, new_addr, ip); 368 if (ret) 369 return ret; 370 } 371 372 ret = -EBUSY; 373 mutex_lock(&text_mutex); 374 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 375 goto out; 376 ret = 1; 377 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 378 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 379 ret = 0; 380 } 381 out: 382 mutex_unlock(&text_mutex); 383 return ret; 384 } 385 386 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 387 void *old_addr, void *new_addr) 388 { 389 if (!is_kernel_text((long)ip) && 390 !is_bpf_text_address((long)ip)) 391 /* BPF poking in modules is not supported */ 392 return -EINVAL; 393 394 /* 395 * See emit_prologue(), for IBT builds the trampoline hook is preceded 396 * with an ENDBR instruction. 397 */ 398 if (is_endbr(*(u32 *)ip)) 399 ip += ENDBR_INSN_SIZE; 400 401 return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 402 } 403 404 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 405 406 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 407 { 408 u8 *prog = *pprog; 409 410 #ifdef CONFIG_RETPOLINE 411 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 412 EMIT_LFENCE(); 413 EMIT2(0xFF, 0xE0 + reg); 414 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 415 OPTIMIZER_HIDE_VAR(reg); 416 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 417 } else 418 #endif 419 EMIT2(0xFF, 0xE0 + reg); 420 421 *pprog = prog; 422 } 423 424 /* 425 * Generate the following code: 426 * 427 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 428 * if (index >= array->map.max_entries) 429 * goto out; 430 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 431 * goto out; 432 * prog = array->ptrs[index]; 433 * if (prog == NULL) 434 * goto out; 435 * goto *(prog->bpf_func + prologue_size); 436 * out: 437 */ 438 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 439 u32 stack_depth, u8 *ip, 440 struct jit_context *ctx) 441 { 442 int tcc_off = -4 - round_up(stack_depth, 8); 443 u8 *prog = *pprog, *start = *pprog; 444 int offset; 445 446 /* 447 * rdi - pointer to ctx 448 * rsi - pointer to bpf_array 449 * rdx - index in bpf_array 450 */ 451 452 /* 453 * if (index >= array->map.max_entries) 454 * goto out; 455 */ 456 EMIT2(0x89, 0xD2); /* mov edx, edx */ 457 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 458 offsetof(struct bpf_array, map.max_entries)); 459 460 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 461 EMIT2(X86_JBE, offset); /* jbe out */ 462 463 /* 464 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 465 * goto out; 466 */ 467 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 468 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 469 470 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 471 EMIT2(X86_JAE, offset); /* jae out */ 472 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 473 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 474 475 /* prog = array->ptrs[index]; */ 476 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 477 offsetof(struct bpf_array, ptrs)); 478 479 /* 480 * if (prog == NULL) 481 * goto out; 482 */ 483 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 484 485 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 486 EMIT2(X86_JE, offset); /* je out */ 487 488 pop_callee_regs(&prog, callee_regs_used); 489 490 EMIT1(0x58); /* pop rax */ 491 if (stack_depth) 492 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 493 round_up(stack_depth, 8)); 494 495 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 496 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 497 offsetof(struct bpf_prog, bpf_func)); 498 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 499 X86_TAIL_CALL_OFFSET); 500 /* 501 * Now we're ready to jump into next BPF program 502 * rdi == ctx (1st arg) 503 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 504 */ 505 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 506 507 /* out: */ 508 ctx->tail_call_indirect_label = prog - start; 509 *pprog = prog; 510 } 511 512 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 513 u8 **pprog, u8 *ip, 514 bool *callee_regs_used, u32 stack_depth, 515 struct jit_context *ctx) 516 { 517 int tcc_off = -4 - round_up(stack_depth, 8); 518 u8 *prog = *pprog, *start = *pprog; 519 int offset; 520 521 /* 522 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 523 * goto out; 524 */ 525 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 526 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 527 528 offset = ctx->tail_call_direct_label - (prog + 2 - start); 529 EMIT2(X86_JAE, offset); /* jae out */ 530 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 531 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 532 533 poke->tailcall_bypass = ip + (prog - start); 534 poke->adj_off = X86_TAIL_CALL_OFFSET; 535 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 536 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 537 538 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 539 poke->tailcall_bypass); 540 541 pop_callee_regs(&prog, callee_regs_used); 542 EMIT1(0x58); /* pop rax */ 543 if (stack_depth) 544 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 545 546 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 547 prog += X86_PATCH_SIZE; 548 549 /* out: */ 550 ctx->tail_call_direct_label = prog - start; 551 552 *pprog = prog; 553 } 554 555 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 556 { 557 struct bpf_jit_poke_descriptor *poke; 558 struct bpf_array *array; 559 struct bpf_prog *target; 560 int i, ret; 561 562 for (i = 0; i < prog->aux->size_poke_tab; i++) { 563 poke = &prog->aux->poke_tab[i]; 564 if (poke->aux && poke->aux != prog->aux) 565 continue; 566 567 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 568 569 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 570 continue; 571 572 array = container_of(poke->tail_call.map, struct bpf_array, map); 573 mutex_lock(&array->aux->poke_mutex); 574 target = array->ptrs[poke->tail_call.key]; 575 if (target) { 576 ret = __bpf_arch_text_poke(poke->tailcall_target, 577 BPF_MOD_JUMP, NULL, 578 (u8 *)target->bpf_func + 579 poke->adj_off); 580 BUG_ON(ret < 0); 581 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 582 BPF_MOD_JUMP, 583 (u8 *)poke->tailcall_target + 584 X86_PATCH_SIZE, NULL); 585 BUG_ON(ret < 0); 586 } 587 WRITE_ONCE(poke->tailcall_target_stable, true); 588 mutex_unlock(&array->aux->poke_mutex); 589 } 590 } 591 592 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 593 u32 dst_reg, const u32 imm32) 594 { 595 u8 *prog = *pprog; 596 u8 b1, b2, b3; 597 598 /* 599 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 600 * (which zero-extends imm32) to save 2 bytes. 601 */ 602 if (sign_propagate && (s32)imm32 < 0) { 603 /* 'mov %rax, imm32' sign extends imm32 */ 604 b1 = add_1mod(0x48, dst_reg); 605 b2 = 0xC7; 606 b3 = 0xC0; 607 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 608 goto done; 609 } 610 611 /* 612 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 613 * to save 3 bytes. 614 */ 615 if (imm32 == 0) { 616 if (is_ereg(dst_reg)) 617 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 618 b2 = 0x31; /* xor */ 619 b3 = 0xC0; 620 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 621 goto done; 622 } 623 624 /* mov %eax, imm32 */ 625 if (is_ereg(dst_reg)) 626 EMIT1(add_1mod(0x40, dst_reg)); 627 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 628 done: 629 *pprog = prog; 630 } 631 632 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 633 const u32 imm32_hi, const u32 imm32_lo) 634 { 635 u8 *prog = *pprog; 636 637 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 638 /* 639 * For emitting plain u32, where sign bit must not be 640 * propagated LLVM tends to load imm64 over mov32 641 * directly, so save couple of bytes by just doing 642 * 'mov %eax, imm32' instead. 643 */ 644 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 645 } else { 646 /* movabsq %rax, imm64 */ 647 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 648 EMIT(imm32_lo, 4); 649 EMIT(imm32_hi, 4); 650 } 651 652 *pprog = prog; 653 } 654 655 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 656 { 657 u8 *prog = *pprog; 658 659 if (is64) { 660 /* mov dst, src */ 661 EMIT_mov(dst_reg, src_reg); 662 } else { 663 /* mov32 dst, src */ 664 if (is_ereg(dst_reg) || is_ereg(src_reg)) 665 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 666 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 667 } 668 669 *pprog = prog; 670 } 671 672 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 673 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 674 { 675 u8 *prog = *pprog; 676 677 if (is_imm8(off)) { 678 /* 1-byte signed displacement. 679 * 680 * If off == 0 we could skip this and save one extra byte, but 681 * special case of x86 R13 which always needs an offset is not 682 * worth the hassle 683 */ 684 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 685 } else { 686 /* 4-byte signed displacement */ 687 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 688 } 689 *pprog = prog; 690 } 691 692 /* 693 * Emit a REX byte if it will be necessary to address these registers 694 */ 695 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 696 { 697 u8 *prog = *pprog; 698 699 if (is64) 700 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 701 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 702 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 703 *pprog = prog; 704 } 705 706 /* 707 * Similar version of maybe_emit_mod() for a single register 708 */ 709 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 710 { 711 u8 *prog = *pprog; 712 713 if (is64) 714 EMIT1(add_1mod(0x48, reg)); 715 else if (is_ereg(reg)) 716 EMIT1(add_1mod(0x40, reg)); 717 *pprog = prog; 718 } 719 720 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 721 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 722 { 723 u8 *prog = *pprog; 724 725 switch (size) { 726 case BPF_B: 727 /* Emit 'movzx rax, byte ptr [rax + off]' */ 728 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 729 break; 730 case BPF_H: 731 /* Emit 'movzx rax, word ptr [rax + off]' */ 732 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 733 break; 734 case BPF_W: 735 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 736 if (is_ereg(dst_reg) || is_ereg(src_reg)) 737 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 738 else 739 EMIT1(0x8B); 740 break; 741 case BPF_DW: 742 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 743 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 744 break; 745 } 746 emit_insn_suffix(&prog, src_reg, dst_reg, off); 747 *pprog = prog; 748 } 749 750 /* STX: *(u8*)(dst_reg + off) = src_reg */ 751 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 752 { 753 u8 *prog = *pprog; 754 755 switch (size) { 756 case BPF_B: 757 /* Emit 'mov byte ptr [rax + off], al' */ 758 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 759 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 760 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 761 else 762 EMIT1(0x88); 763 break; 764 case BPF_H: 765 if (is_ereg(dst_reg) || is_ereg(src_reg)) 766 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 767 else 768 EMIT2(0x66, 0x89); 769 break; 770 case BPF_W: 771 if (is_ereg(dst_reg) || is_ereg(src_reg)) 772 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 773 else 774 EMIT1(0x89); 775 break; 776 case BPF_DW: 777 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 778 break; 779 } 780 emit_insn_suffix(&prog, dst_reg, src_reg, off); 781 *pprog = prog; 782 } 783 784 static int emit_atomic(u8 **pprog, u8 atomic_op, 785 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 786 { 787 u8 *prog = *pprog; 788 789 EMIT1(0xF0); /* lock prefix */ 790 791 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 792 793 /* emit opcode */ 794 switch (atomic_op) { 795 case BPF_ADD: 796 case BPF_AND: 797 case BPF_OR: 798 case BPF_XOR: 799 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 800 EMIT1(simple_alu_opcodes[atomic_op]); 801 break; 802 case BPF_ADD | BPF_FETCH: 803 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 804 EMIT2(0x0F, 0xC1); 805 break; 806 case BPF_XCHG: 807 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 808 EMIT1(0x87); 809 break; 810 case BPF_CMPXCHG: 811 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 812 EMIT2(0x0F, 0xB1); 813 break; 814 default: 815 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 816 return -EFAULT; 817 } 818 819 emit_insn_suffix(&prog, dst_reg, src_reg, off); 820 821 *pprog = prog; 822 return 0; 823 } 824 825 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 826 { 827 u32 reg = x->fixup >> 8; 828 829 /* jump over faulting load and clear dest register */ 830 *(unsigned long *)((void *)regs + reg) = 0; 831 regs->ip += x->fixup & 0xff; 832 return true; 833 } 834 835 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 836 bool *regs_used, bool *tail_call_seen) 837 { 838 int i; 839 840 for (i = 1; i <= insn_cnt; i++, insn++) { 841 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 842 *tail_call_seen = true; 843 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 844 regs_used[0] = true; 845 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 846 regs_used[1] = true; 847 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 848 regs_used[2] = true; 849 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 850 regs_used[3] = true; 851 } 852 } 853 854 static void emit_nops(u8 **pprog, int len) 855 { 856 u8 *prog = *pprog; 857 int i, noplen; 858 859 while (len > 0) { 860 noplen = len; 861 862 if (noplen > ASM_NOP_MAX) 863 noplen = ASM_NOP_MAX; 864 865 for (i = 0; i < noplen; i++) 866 EMIT1(x86_nops[noplen][i]); 867 len -= noplen; 868 } 869 870 *pprog = prog; 871 } 872 873 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 874 875 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 876 int oldproglen, struct jit_context *ctx, bool jmp_padding) 877 { 878 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 879 struct bpf_insn *insn = bpf_prog->insnsi; 880 bool callee_regs_used[4] = {}; 881 int insn_cnt = bpf_prog->len; 882 bool tail_call_seen = false; 883 bool seen_exit = false; 884 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 885 int i, excnt = 0; 886 int ilen, proglen = 0; 887 u8 *prog = temp; 888 int err; 889 890 detect_reg_usage(insn, insn_cnt, callee_regs_used, 891 &tail_call_seen); 892 893 /* tail call's presence in current prog implies it is reachable */ 894 tail_call_reachable |= tail_call_seen; 895 896 emit_prologue(&prog, bpf_prog->aux->stack_depth, 897 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 898 bpf_prog->aux->func_idx != 0); 899 push_callee_regs(&prog, callee_regs_used); 900 901 ilen = prog - temp; 902 if (rw_image) 903 memcpy(rw_image + proglen, temp, ilen); 904 proglen += ilen; 905 addrs[0] = proglen; 906 prog = temp; 907 908 for (i = 1; i <= insn_cnt; i++, insn++) { 909 const s32 imm32 = insn->imm; 910 u32 dst_reg = insn->dst_reg; 911 u32 src_reg = insn->src_reg; 912 u8 b2 = 0, b3 = 0; 913 u8 *start_of_ldx; 914 s64 jmp_offset; 915 u8 jmp_cond; 916 u8 *func; 917 int nops; 918 919 switch (insn->code) { 920 /* ALU */ 921 case BPF_ALU | BPF_ADD | BPF_X: 922 case BPF_ALU | BPF_SUB | BPF_X: 923 case BPF_ALU | BPF_AND | BPF_X: 924 case BPF_ALU | BPF_OR | BPF_X: 925 case BPF_ALU | BPF_XOR | BPF_X: 926 case BPF_ALU64 | BPF_ADD | BPF_X: 927 case BPF_ALU64 | BPF_SUB | BPF_X: 928 case BPF_ALU64 | BPF_AND | BPF_X: 929 case BPF_ALU64 | BPF_OR | BPF_X: 930 case BPF_ALU64 | BPF_XOR | BPF_X: 931 maybe_emit_mod(&prog, dst_reg, src_reg, 932 BPF_CLASS(insn->code) == BPF_ALU64); 933 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 934 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 935 break; 936 937 case BPF_ALU64 | BPF_MOV | BPF_X: 938 case BPF_ALU | BPF_MOV | BPF_X: 939 emit_mov_reg(&prog, 940 BPF_CLASS(insn->code) == BPF_ALU64, 941 dst_reg, src_reg); 942 break; 943 944 /* neg dst */ 945 case BPF_ALU | BPF_NEG: 946 case BPF_ALU64 | BPF_NEG: 947 maybe_emit_1mod(&prog, dst_reg, 948 BPF_CLASS(insn->code) == BPF_ALU64); 949 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 950 break; 951 952 case BPF_ALU | BPF_ADD | BPF_K: 953 case BPF_ALU | BPF_SUB | BPF_K: 954 case BPF_ALU | BPF_AND | BPF_K: 955 case BPF_ALU | BPF_OR | BPF_K: 956 case BPF_ALU | BPF_XOR | BPF_K: 957 case BPF_ALU64 | BPF_ADD | BPF_K: 958 case BPF_ALU64 | BPF_SUB | BPF_K: 959 case BPF_ALU64 | BPF_AND | BPF_K: 960 case BPF_ALU64 | BPF_OR | BPF_K: 961 case BPF_ALU64 | BPF_XOR | BPF_K: 962 maybe_emit_1mod(&prog, dst_reg, 963 BPF_CLASS(insn->code) == BPF_ALU64); 964 965 /* 966 * b3 holds 'normal' opcode, b2 short form only valid 967 * in case dst is eax/rax. 968 */ 969 switch (BPF_OP(insn->code)) { 970 case BPF_ADD: 971 b3 = 0xC0; 972 b2 = 0x05; 973 break; 974 case BPF_SUB: 975 b3 = 0xE8; 976 b2 = 0x2D; 977 break; 978 case BPF_AND: 979 b3 = 0xE0; 980 b2 = 0x25; 981 break; 982 case BPF_OR: 983 b3 = 0xC8; 984 b2 = 0x0D; 985 break; 986 case BPF_XOR: 987 b3 = 0xF0; 988 b2 = 0x35; 989 break; 990 } 991 992 if (is_imm8(imm32)) 993 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 994 else if (is_axreg(dst_reg)) 995 EMIT1_off32(b2, imm32); 996 else 997 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 998 break; 999 1000 case BPF_ALU64 | BPF_MOV | BPF_K: 1001 case BPF_ALU | BPF_MOV | BPF_K: 1002 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 1003 dst_reg, imm32); 1004 break; 1005 1006 case BPF_LD | BPF_IMM | BPF_DW: 1007 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1008 insn++; 1009 i++; 1010 break; 1011 1012 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1013 case BPF_ALU | BPF_MOD | BPF_X: 1014 case BPF_ALU | BPF_DIV | BPF_X: 1015 case BPF_ALU | BPF_MOD | BPF_K: 1016 case BPF_ALU | BPF_DIV | BPF_K: 1017 case BPF_ALU64 | BPF_MOD | BPF_X: 1018 case BPF_ALU64 | BPF_DIV | BPF_X: 1019 case BPF_ALU64 | BPF_MOD | BPF_K: 1020 case BPF_ALU64 | BPF_DIV | BPF_K: { 1021 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1022 1023 if (dst_reg != BPF_REG_0) 1024 EMIT1(0x50); /* push rax */ 1025 if (dst_reg != BPF_REG_3) 1026 EMIT1(0x52); /* push rdx */ 1027 1028 if (BPF_SRC(insn->code) == BPF_X) { 1029 if (src_reg == BPF_REG_0 || 1030 src_reg == BPF_REG_3) { 1031 /* mov r11, src_reg */ 1032 EMIT_mov(AUX_REG, src_reg); 1033 src_reg = AUX_REG; 1034 } 1035 } else { 1036 /* mov r11, imm32 */ 1037 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1038 src_reg = AUX_REG; 1039 } 1040 1041 if (dst_reg != BPF_REG_0) 1042 /* mov rax, dst_reg */ 1043 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1044 1045 /* 1046 * xor edx, edx 1047 * equivalent to 'xor rdx, rdx', but one byte less 1048 */ 1049 EMIT2(0x31, 0xd2); 1050 1051 /* div src_reg */ 1052 maybe_emit_1mod(&prog, src_reg, is64); 1053 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1054 1055 if (BPF_OP(insn->code) == BPF_MOD && 1056 dst_reg != BPF_REG_3) 1057 /* mov dst_reg, rdx */ 1058 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1059 else if (BPF_OP(insn->code) == BPF_DIV && 1060 dst_reg != BPF_REG_0) 1061 /* mov dst_reg, rax */ 1062 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1063 1064 if (dst_reg != BPF_REG_3) 1065 EMIT1(0x5A); /* pop rdx */ 1066 if (dst_reg != BPF_REG_0) 1067 EMIT1(0x58); /* pop rax */ 1068 break; 1069 } 1070 1071 case BPF_ALU | BPF_MUL | BPF_K: 1072 case BPF_ALU64 | BPF_MUL | BPF_K: 1073 maybe_emit_mod(&prog, dst_reg, dst_reg, 1074 BPF_CLASS(insn->code) == BPF_ALU64); 1075 1076 if (is_imm8(imm32)) 1077 /* imul dst_reg, dst_reg, imm8 */ 1078 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1079 imm32); 1080 else 1081 /* imul dst_reg, dst_reg, imm32 */ 1082 EMIT2_off32(0x69, 1083 add_2reg(0xC0, dst_reg, dst_reg), 1084 imm32); 1085 break; 1086 1087 case BPF_ALU | BPF_MUL | BPF_X: 1088 case BPF_ALU64 | BPF_MUL | BPF_X: 1089 maybe_emit_mod(&prog, src_reg, dst_reg, 1090 BPF_CLASS(insn->code) == BPF_ALU64); 1091 1092 /* imul dst_reg, src_reg */ 1093 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1094 break; 1095 1096 /* Shifts */ 1097 case BPF_ALU | BPF_LSH | BPF_K: 1098 case BPF_ALU | BPF_RSH | BPF_K: 1099 case BPF_ALU | BPF_ARSH | BPF_K: 1100 case BPF_ALU64 | BPF_LSH | BPF_K: 1101 case BPF_ALU64 | BPF_RSH | BPF_K: 1102 case BPF_ALU64 | BPF_ARSH | BPF_K: 1103 maybe_emit_1mod(&prog, dst_reg, 1104 BPF_CLASS(insn->code) == BPF_ALU64); 1105 1106 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1107 if (imm32 == 1) 1108 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1109 else 1110 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1111 break; 1112 1113 case BPF_ALU | BPF_LSH | BPF_X: 1114 case BPF_ALU | BPF_RSH | BPF_X: 1115 case BPF_ALU | BPF_ARSH | BPF_X: 1116 case BPF_ALU64 | BPF_LSH | BPF_X: 1117 case BPF_ALU64 | BPF_RSH | BPF_X: 1118 case BPF_ALU64 | BPF_ARSH | BPF_X: 1119 1120 /* Check for bad case when dst_reg == rcx */ 1121 if (dst_reg == BPF_REG_4) { 1122 /* mov r11, dst_reg */ 1123 EMIT_mov(AUX_REG, dst_reg); 1124 dst_reg = AUX_REG; 1125 } 1126 1127 if (src_reg != BPF_REG_4) { /* common case */ 1128 EMIT1(0x51); /* push rcx */ 1129 1130 /* mov rcx, src_reg */ 1131 EMIT_mov(BPF_REG_4, src_reg); 1132 } 1133 1134 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1135 maybe_emit_1mod(&prog, dst_reg, 1136 BPF_CLASS(insn->code) == BPF_ALU64); 1137 1138 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1139 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1140 1141 if (src_reg != BPF_REG_4) 1142 EMIT1(0x59); /* pop rcx */ 1143 1144 if (insn->dst_reg == BPF_REG_4) 1145 /* mov dst_reg, r11 */ 1146 EMIT_mov(insn->dst_reg, AUX_REG); 1147 break; 1148 1149 case BPF_ALU | BPF_END | BPF_FROM_BE: 1150 switch (imm32) { 1151 case 16: 1152 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1153 EMIT1(0x66); 1154 if (is_ereg(dst_reg)) 1155 EMIT1(0x41); 1156 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1157 1158 /* Emit 'movzwl eax, ax' */ 1159 if (is_ereg(dst_reg)) 1160 EMIT3(0x45, 0x0F, 0xB7); 1161 else 1162 EMIT2(0x0F, 0xB7); 1163 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1164 break; 1165 case 32: 1166 /* Emit 'bswap eax' to swap lower 4 bytes */ 1167 if (is_ereg(dst_reg)) 1168 EMIT2(0x41, 0x0F); 1169 else 1170 EMIT1(0x0F); 1171 EMIT1(add_1reg(0xC8, dst_reg)); 1172 break; 1173 case 64: 1174 /* Emit 'bswap rax' to swap 8 bytes */ 1175 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1176 add_1reg(0xC8, dst_reg)); 1177 break; 1178 } 1179 break; 1180 1181 case BPF_ALU | BPF_END | BPF_FROM_LE: 1182 switch (imm32) { 1183 case 16: 1184 /* 1185 * Emit 'movzwl eax, ax' to zero extend 16-bit 1186 * into 64 bit 1187 */ 1188 if (is_ereg(dst_reg)) 1189 EMIT3(0x45, 0x0F, 0xB7); 1190 else 1191 EMIT2(0x0F, 0xB7); 1192 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1193 break; 1194 case 32: 1195 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1196 if (is_ereg(dst_reg)) 1197 EMIT1(0x45); 1198 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1199 break; 1200 case 64: 1201 /* nop */ 1202 break; 1203 } 1204 break; 1205 1206 /* speculation barrier */ 1207 case BPF_ST | BPF_NOSPEC: 1208 if (boot_cpu_has(X86_FEATURE_XMM2)) 1209 EMIT_LFENCE(); 1210 break; 1211 1212 /* ST: *(u8*)(dst_reg + off) = imm */ 1213 case BPF_ST | BPF_MEM | BPF_B: 1214 if (is_ereg(dst_reg)) 1215 EMIT2(0x41, 0xC6); 1216 else 1217 EMIT1(0xC6); 1218 goto st; 1219 case BPF_ST | BPF_MEM | BPF_H: 1220 if (is_ereg(dst_reg)) 1221 EMIT3(0x66, 0x41, 0xC7); 1222 else 1223 EMIT2(0x66, 0xC7); 1224 goto st; 1225 case BPF_ST | BPF_MEM | BPF_W: 1226 if (is_ereg(dst_reg)) 1227 EMIT2(0x41, 0xC7); 1228 else 1229 EMIT1(0xC7); 1230 goto st; 1231 case BPF_ST | BPF_MEM | BPF_DW: 1232 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1233 1234 st: if (is_imm8(insn->off)) 1235 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1236 else 1237 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1238 1239 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1240 break; 1241 1242 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1243 case BPF_STX | BPF_MEM | BPF_B: 1244 case BPF_STX | BPF_MEM | BPF_H: 1245 case BPF_STX | BPF_MEM | BPF_W: 1246 case BPF_STX | BPF_MEM | BPF_DW: 1247 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1248 break; 1249 1250 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1251 case BPF_LDX | BPF_MEM | BPF_B: 1252 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1253 case BPF_LDX | BPF_MEM | BPF_H: 1254 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1255 case BPF_LDX | BPF_MEM | BPF_W: 1256 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1257 case BPF_LDX | BPF_MEM | BPF_DW: 1258 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1259 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1260 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1261 * add abs(insn->off) to the limit to make sure that negative 1262 * offset won't be an issue. 1263 * insn->off is s16, so it won't affect valid pointers. 1264 */ 1265 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1266 u8 *end_of_jmp1, *end_of_jmp2; 1267 1268 /* Conservatively check that src_reg + insn->off is a kernel address: 1269 * 1. src_reg + insn->off >= limit 1270 * 2. src_reg + insn->off doesn't become small positive. 1271 * Cannot do src_reg + insn->off >= limit in one branch, 1272 * since it needs two spare registers, but JIT has only one. 1273 */ 1274 1275 /* movabsq r11, limit */ 1276 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1277 EMIT((u32)limit, 4); 1278 EMIT(limit >> 32, 4); 1279 /* cmp src_reg, r11 */ 1280 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1281 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1282 /* if unsigned '<' goto end_of_jmp2 */ 1283 EMIT2(X86_JB, 0); 1284 end_of_jmp1 = prog; 1285 1286 /* mov r11, src_reg */ 1287 emit_mov_reg(&prog, true, AUX_REG, src_reg); 1288 /* add r11, insn->off */ 1289 maybe_emit_1mod(&prog, AUX_REG, true); 1290 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1291 /* jmp if not carry to start_of_ldx 1292 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1293 * that has to be rejected. 1294 */ 1295 EMIT2(0x73 /* JNC */, 0); 1296 end_of_jmp2 = prog; 1297 1298 /* xor dst_reg, dst_reg */ 1299 emit_mov_imm32(&prog, false, dst_reg, 0); 1300 /* jmp byte_after_ldx */ 1301 EMIT2(0xEB, 0); 1302 1303 /* populate jmp_offset for JB above to jump to xor dst_reg */ 1304 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1305 /* populate jmp_offset for JNC above to jump to start_of_ldx */ 1306 start_of_ldx = prog; 1307 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 1308 } 1309 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1310 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1311 struct exception_table_entry *ex; 1312 u8 *_insn = image + proglen + (start_of_ldx - temp); 1313 s64 delta; 1314 1315 /* populate jmp_offset for JMP above */ 1316 start_of_ldx[-1] = prog - start_of_ldx; 1317 1318 if (!bpf_prog->aux->extable) 1319 break; 1320 1321 if (excnt >= bpf_prog->aux->num_exentries) { 1322 pr_err("ex gen bug\n"); 1323 return -EFAULT; 1324 } 1325 ex = &bpf_prog->aux->extable[excnt++]; 1326 1327 delta = _insn - (u8 *)&ex->insn; 1328 if (!is_simm32(delta)) { 1329 pr_err("extable->insn doesn't fit into 32-bit\n"); 1330 return -EFAULT; 1331 } 1332 /* switch ex to rw buffer for writes */ 1333 ex = (void *)rw_image + ((void *)ex - (void *)image); 1334 1335 ex->insn = delta; 1336 1337 ex->data = EX_TYPE_BPF; 1338 1339 if (dst_reg > BPF_REG_9) { 1340 pr_err("verifier error\n"); 1341 return -EFAULT; 1342 } 1343 /* 1344 * Compute size of x86 insn and its target dest x86 register. 1345 * ex_handler_bpf() will use lower 8 bits to adjust 1346 * pt_regs->ip to jump over this x86 instruction 1347 * and upper bits to figure out which pt_regs to zero out. 1348 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1349 * of 4 bytes will be ignored and rbx will be zero inited. 1350 */ 1351 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1352 } 1353 break; 1354 1355 case BPF_STX | BPF_ATOMIC | BPF_W: 1356 case BPF_STX | BPF_ATOMIC | BPF_DW: 1357 if (insn->imm == (BPF_AND | BPF_FETCH) || 1358 insn->imm == (BPF_OR | BPF_FETCH) || 1359 insn->imm == (BPF_XOR | BPF_FETCH)) { 1360 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1361 u32 real_src_reg = src_reg; 1362 u32 real_dst_reg = dst_reg; 1363 u8 *branch_target; 1364 1365 /* 1366 * Can't be implemented with a single x86 insn. 1367 * Need to do a CMPXCHG loop. 1368 */ 1369 1370 /* Will need RAX as a CMPXCHG operand so save R0 */ 1371 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1372 if (src_reg == BPF_REG_0) 1373 real_src_reg = BPF_REG_AX; 1374 if (dst_reg == BPF_REG_0) 1375 real_dst_reg = BPF_REG_AX; 1376 1377 branch_target = prog; 1378 /* Load old value */ 1379 emit_ldx(&prog, BPF_SIZE(insn->code), 1380 BPF_REG_0, real_dst_reg, insn->off); 1381 /* 1382 * Perform the (commutative) operation locally, 1383 * put the result in the AUX_REG. 1384 */ 1385 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1386 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1387 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1388 add_2reg(0xC0, AUX_REG, real_src_reg)); 1389 /* Attempt to swap in new value */ 1390 err = emit_atomic(&prog, BPF_CMPXCHG, 1391 real_dst_reg, AUX_REG, 1392 insn->off, 1393 BPF_SIZE(insn->code)); 1394 if (WARN_ON(err)) 1395 return err; 1396 /* 1397 * ZF tells us whether we won the race. If it's 1398 * cleared we need to try again. 1399 */ 1400 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1401 /* Return the pre-modification value */ 1402 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1403 /* Restore R0 after clobbering RAX */ 1404 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1405 break; 1406 } 1407 1408 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1409 insn->off, BPF_SIZE(insn->code)); 1410 if (err) 1411 return err; 1412 break; 1413 1414 /* call */ 1415 case BPF_JMP | BPF_CALL: 1416 func = (u8 *) __bpf_call_base + imm32; 1417 if (tail_call_reachable) { 1418 EMIT3_off32(0x48, 0x8B, 0x85, 1419 -(bpf_prog->aux->stack_depth + 8)); 1420 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1421 return -EINVAL; 1422 } else { 1423 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1424 return -EINVAL; 1425 } 1426 break; 1427 1428 case BPF_JMP | BPF_TAIL_CALL: 1429 if (imm32) 1430 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1431 &prog, image + addrs[i - 1], 1432 callee_regs_used, 1433 bpf_prog->aux->stack_depth, 1434 ctx); 1435 else 1436 emit_bpf_tail_call_indirect(&prog, 1437 callee_regs_used, 1438 bpf_prog->aux->stack_depth, 1439 image + addrs[i - 1], 1440 ctx); 1441 break; 1442 1443 /* cond jump */ 1444 case BPF_JMP | BPF_JEQ | BPF_X: 1445 case BPF_JMP | BPF_JNE | BPF_X: 1446 case BPF_JMP | BPF_JGT | BPF_X: 1447 case BPF_JMP | BPF_JLT | BPF_X: 1448 case BPF_JMP | BPF_JGE | BPF_X: 1449 case BPF_JMP | BPF_JLE | BPF_X: 1450 case BPF_JMP | BPF_JSGT | BPF_X: 1451 case BPF_JMP | BPF_JSLT | BPF_X: 1452 case BPF_JMP | BPF_JSGE | BPF_X: 1453 case BPF_JMP | BPF_JSLE | BPF_X: 1454 case BPF_JMP32 | BPF_JEQ | BPF_X: 1455 case BPF_JMP32 | BPF_JNE | BPF_X: 1456 case BPF_JMP32 | BPF_JGT | BPF_X: 1457 case BPF_JMP32 | BPF_JLT | BPF_X: 1458 case BPF_JMP32 | BPF_JGE | BPF_X: 1459 case BPF_JMP32 | BPF_JLE | BPF_X: 1460 case BPF_JMP32 | BPF_JSGT | BPF_X: 1461 case BPF_JMP32 | BPF_JSLT | BPF_X: 1462 case BPF_JMP32 | BPF_JSGE | BPF_X: 1463 case BPF_JMP32 | BPF_JSLE | BPF_X: 1464 /* cmp dst_reg, src_reg */ 1465 maybe_emit_mod(&prog, dst_reg, src_reg, 1466 BPF_CLASS(insn->code) == BPF_JMP); 1467 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1468 goto emit_cond_jmp; 1469 1470 case BPF_JMP | BPF_JSET | BPF_X: 1471 case BPF_JMP32 | BPF_JSET | BPF_X: 1472 /* test dst_reg, src_reg */ 1473 maybe_emit_mod(&prog, dst_reg, src_reg, 1474 BPF_CLASS(insn->code) == BPF_JMP); 1475 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1476 goto emit_cond_jmp; 1477 1478 case BPF_JMP | BPF_JSET | BPF_K: 1479 case BPF_JMP32 | BPF_JSET | BPF_K: 1480 /* test dst_reg, imm32 */ 1481 maybe_emit_1mod(&prog, dst_reg, 1482 BPF_CLASS(insn->code) == BPF_JMP); 1483 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1484 goto emit_cond_jmp; 1485 1486 case BPF_JMP | BPF_JEQ | BPF_K: 1487 case BPF_JMP | BPF_JNE | BPF_K: 1488 case BPF_JMP | BPF_JGT | BPF_K: 1489 case BPF_JMP | BPF_JLT | BPF_K: 1490 case BPF_JMP | BPF_JGE | BPF_K: 1491 case BPF_JMP | BPF_JLE | BPF_K: 1492 case BPF_JMP | BPF_JSGT | BPF_K: 1493 case BPF_JMP | BPF_JSLT | BPF_K: 1494 case BPF_JMP | BPF_JSGE | BPF_K: 1495 case BPF_JMP | BPF_JSLE | BPF_K: 1496 case BPF_JMP32 | BPF_JEQ | BPF_K: 1497 case BPF_JMP32 | BPF_JNE | BPF_K: 1498 case BPF_JMP32 | BPF_JGT | BPF_K: 1499 case BPF_JMP32 | BPF_JLT | BPF_K: 1500 case BPF_JMP32 | BPF_JGE | BPF_K: 1501 case BPF_JMP32 | BPF_JLE | BPF_K: 1502 case BPF_JMP32 | BPF_JSGT | BPF_K: 1503 case BPF_JMP32 | BPF_JSLT | BPF_K: 1504 case BPF_JMP32 | BPF_JSGE | BPF_K: 1505 case BPF_JMP32 | BPF_JSLE | BPF_K: 1506 /* test dst_reg, dst_reg to save one extra byte */ 1507 if (imm32 == 0) { 1508 maybe_emit_mod(&prog, dst_reg, dst_reg, 1509 BPF_CLASS(insn->code) == BPF_JMP); 1510 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1511 goto emit_cond_jmp; 1512 } 1513 1514 /* cmp dst_reg, imm8/32 */ 1515 maybe_emit_1mod(&prog, dst_reg, 1516 BPF_CLASS(insn->code) == BPF_JMP); 1517 1518 if (is_imm8(imm32)) 1519 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1520 else 1521 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1522 1523 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1524 switch (BPF_OP(insn->code)) { 1525 case BPF_JEQ: 1526 jmp_cond = X86_JE; 1527 break; 1528 case BPF_JSET: 1529 case BPF_JNE: 1530 jmp_cond = X86_JNE; 1531 break; 1532 case BPF_JGT: 1533 /* GT is unsigned '>', JA in x86 */ 1534 jmp_cond = X86_JA; 1535 break; 1536 case BPF_JLT: 1537 /* LT is unsigned '<', JB in x86 */ 1538 jmp_cond = X86_JB; 1539 break; 1540 case BPF_JGE: 1541 /* GE is unsigned '>=', JAE in x86 */ 1542 jmp_cond = X86_JAE; 1543 break; 1544 case BPF_JLE: 1545 /* LE is unsigned '<=', JBE in x86 */ 1546 jmp_cond = X86_JBE; 1547 break; 1548 case BPF_JSGT: 1549 /* Signed '>', GT in x86 */ 1550 jmp_cond = X86_JG; 1551 break; 1552 case BPF_JSLT: 1553 /* Signed '<', LT in x86 */ 1554 jmp_cond = X86_JL; 1555 break; 1556 case BPF_JSGE: 1557 /* Signed '>=', GE in x86 */ 1558 jmp_cond = X86_JGE; 1559 break; 1560 case BPF_JSLE: 1561 /* Signed '<=', LE in x86 */ 1562 jmp_cond = X86_JLE; 1563 break; 1564 default: /* to silence GCC warning */ 1565 return -EFAULT; 1566 } 1567 jmp_offset = addrs[i + insn->off] - addrs[i]; 1568 if (is_imm8(jmp_offset)) { 1569 if (jmp_padding) { 1570 /* To keep the jmp_offset valid, the extra bytes are 1571 * padded before the jump insn, so we subtract the 1572 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1573 * 1574 * If the previous pass already emits an imm8 1575 * jmp_cond, then this BPF insn won't shrink, so 1576 * "nops" is 0. 1577 * 1578 * On the other hand, if the previous pass emits an 1579 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1580 * keep the image from shrinking further. 1581 * 1582 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1583 * is 2 bytes, so the size difference is 4 bytes. 1584 */ 1585 nops = INSN_SZ_DIFF - 2; 1586 if (nops != 0 && nops != 4) { 1587 pr_err("unexpected jmp_cond padding: %d bytes\n", 1588 nops); 1589 return -EFAULT; 1590 } 1591 emit_nops(&prog, nops); 1592 } 1593 EMIT2(jmp_cond, jmp_offset); 1594 } else if (is_simm32(jmp_offset)) { 1595 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1596 } else { 1597 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1598 return -EFAULT; 1599 } 1600 1601 break; 1602 1603 case BPF_JMP | BPF_JA: 1604 if (insn->off == -1) 1605 /* -1 jmp instructions will always jump 1606 * backwards two bytes. Explicitly handling 1607 * this case avoids wasting too many passes 1608 * when there are long sequences of replaced 1609 * dead code. 1610 */ 1611 jmp_offset = -2; 1612 else 1613 jmp_offset = addrs[i + insn->off] - addrs[i]; 1614 1615 if (!jmp_offset) { 1616 /* 1617 * If jmp_padding is enabled, the extra nops will 1618 * be inserted. Otherwise, optimize out nop jumps. 1619 */ 1620 if (jmp_padding) { 1621 /* There are 3 possible conditions. 1622 * (1) This BPF_JA is already optimized out in 1623 * the previous run, so there is no need 1624 * to pad any extra byte (0 byte). 1625 * (2) The previous pass emits an imm8 jmp, 1626 * so we pad 2 bytes to match the previous 1627 * insn size. 1628 * (3) Similarly, the previous pass emits an 1629 * imm32 jmp, and 5 bytes is padded. 1630 */ 1631 nops = INSN_SZ_DIFF; 1632 if (nops != 0 && nops != 2 && nops != 5) { 1633 pr_err("unexpected nop jump padding: %d bytes\n", 1634 nops); 1635 return -EFAULT; 1636 } 1637 emit_nops(&prog, nops); 1638 } 1639 break; 1640 } 1641 emit_jmp: 1642 if (is_imm8(jmp_offset)) { 1643 if (jmp_padding) { 1644 /* To avoid breaking jmp_offset, the extra bytes 1645 * are padded before the actual jmp insn, so 1646 * 2 bytes is subtracted from INSN_SZ_DIFF. 1647 * 1648 * If the previous pass already emits an imm8 1649 * jmp, there is nothing to pad (0 byte). 1650 * 1651 * If it emits an imm32 jmp (5 bytes) previously 1652 * and now an imm8 jmp (2 bytes), then we pad 1653 * (5 - 2 = 3) bytes to stop the image from 1654 * shrinking further. 1655 */ 1656 nops = INSN_SZ_DIFF - 2; 1657 if (nops != 0 && nops != 3) { 1658 pr_err("unexpected jump padding: %d bytes\n", 1659 nops); 1660 return -EFAULT; 1661 } 1662 emit_nops(&prog, INSN_SZ_DIFF - 2); 1663 } 1664 EMIT2(0xEB, jmp_offset); 1665 } else if (is_simm32(jmp_offset)) { 1666 EMIT1_off32(0xE9, jmp_offset); 1667 } else { 1668 pr_err("jmp gen bug %llx\n", jmp_offset); 1669 return -EFAULT; 1670 } 1671 break; 1672 1673 case BPF_JMP | BPF_EXIT: 1674 if (seen_exit) { 1675 jmp_offset = ctx->cleanup_addr - addrs[i]; 1676 goto emit_jmp; 1677 } 1678 seen_exit = true; 1679 /* Update cleanup_addr */ 1680 ctx->cleanup_addr = proglen; 1681 pop_callee_regs(&prog, callee_regs_used); 1682 EMIT1(0xC9); /* leave */ 1683 EMIT1(0xC3); /* ret */ 1684 break; 1685 1686 default: 1687 /* 1688 * By design x86-64 JIT should support all BPF instructions. 1689 * This error will be seen if new instruction was added 1690 * to the interpreter, but not to the JIT, or if there is 1691 * junk in bpf_prog. 1692 */ 1693 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1694 return -EINVAL; 1695 } 1696 1697 ilen = prog - temp; 1698 if (ilen > BPF_MAX_INSN_SIZE) { 1699 pr_err("bpf_jit: fatal insn size error\n"); 1700 return -EFAULT; 1701 } 1702 1703 if (image) { 1704 /* 1705 * When populating the image, assert that: 1706 * 1707 * i) We do not write beyond the allocated space, and 1708 * ii) addrs[i] did not change from the prior run, in order 1709 * to validate assumptions made for computing branch 1710 * displacements. 1711 */ 1712 if (unlikely(proglen + ilen > oldproglen || 1713 proglen + ilen != addrs[i])) { 1714 pr_err("bpf_jit: fatal error\n"); 1715 return -EFAULT; 1716 } 1717 memcpy(rw_image + proglen, temp, ilen); 1718 } 1719 proglen += ilen; 1720 addrs[i] = proglen; 1721 prog = temp; 1722 } 1723 1724 if (image && excnt != bpf_prog->aux->num_exentries) { 1725 pr_err("extable is not populated\n"); 1726 return -EFAULT; 1727 } 1728 return proglen; 1729 } 1730 1731 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1732 int stack_size) 1733 { 1734 int i; 1735 /* Store function arguments to stack. 1736 * For a function that accepts two pointers the sequence will be: 1737 * mov QWORD PTR [rbp-0x10],rdi 1738 * mov QWORD PTR [rbp-0x8],rsi 1739 */ 1740 for (i = 0; i < min(nr_args, 6); i++) 1741 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1742 BPF_REG_FP, 1743 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1744 -(stack_size - i * 8)); 1745 } 1746 1747 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1748 int stack_size) 1749 { 1750 int i; 1751 1752 /* Restore function arguments from stack. 1753 * For a function that accepts two pointers the sequence will be: 1754 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1755 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1756 */ 1757 for (i = 0; i < min(nr_args, 6); i++) 1758 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1759 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1760 BPF_REG_FP, 1761 -(stack_size - i * 8)); 1762 } 1763 1764 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1765 struct bpf_prog *p, int stack_size, bool save_ret) 1766 { 1767 u8 *prog = *pprog; 1768 u8 *jmp_insn; 1769 1770 /* arg1: mov rdi, progs[i] */ 1771 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1772 if (emit_call(&prog, 1773 p->aux->sleepable ? __bpf_prog_enter_sleepable : 1774 __bpf_prog_enter, prog)) 1775 return -EINVAL; 1776 /* remember prog start time returned by __bpf_prog_enter */ 1777 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1778 1779 /* if (__bpf_prog_enter*(prog) == 0) 1780 * goto skip_exec_of_prog; 1781 */ 1782 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1783 /* emit 2 nops that will be replaced with JE insn */ 1784 jmp_insn = prog; 1785 emit_nops(&prog, 2); 1786 1787 /* arg1: lea rdi, [rbp - stack_size] */ 1788 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1789 /* arg2: progs[i]->insnsi for interpreter */ 1790 if (!p->jited) 1791 emit_mov_imm64(&prog, BPF_REG_2, 1792 (long) p->insnsi >> 32, 1793 (u32) (long) p->insnsi); 1794 /* call JITed bpf program or interpreter */ 1795 if (emit_call(&prog, p->bpf_func, prog)) 1796 return -EINVAL; 1797 1798 /* 1799 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1800 * of the previous call which is then passed on the stack to 1801 * the next BPF program. 1802 * 1803 * BPF_TRAMP_FENTRY trampoline may need to return the return 1804 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1805 */ 1806 if (save_ret) 1807 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1808 1809 /* replace 2 nops with JE insn, since jmp target is known */ 1810 jmp_insn[0] = X86_JE; 1811 jmp_insn[1] = prog - jmp_insn - 2; 1812 1813 /* arg1: mov rdi, progs[i] */ 1814 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1815 /* arg2: mov rsi, rbx <- start time in nsec */ 1816 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1817 if (emit_call(&prog, 1818 p->aux->sleepable ? __bpf_prog_exit_sleepable : 1819 __bpf_prog_exit, prog)) 1820 return -EINVAL; 1821 1822 *pprog = prog; 1823 return 0; 1824 } 1825 1826 static void emit_align(u8 **pprog, u32 align) 1827 { 1828 u8 *target, *prog = *pprog; 1829 1830 target = PTR_ALIGN(prog, align); 1831 if (target != prog) 1832 emit_nops(&prog, target - prog); 1833 1834 *pprog = prog; 1835 } 1836 1837 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1838 { 1839 u8 *prog = *pprog; 1840 s64 offset; 1841 1842 offset = func - (ip + 2 + 4); 1843 if (!is_simm32(offset)) { 1844 pr_err("Target %p is out of range\n", func); 1845 return -EINVAL; 1846 } 1847 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 1848 *pprog = prog; 1849 return 0; 1850 } 1851 1852 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1853 struct bpf_tramp_progs *tp, int stack_size, 1854 bool save_ret) 1855 { 1856 int i; 1857 u8 *prog = *pprog; 1858 1859 for (i = 0; i < tp->nr_progs; i++) { 1860 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, 1861 save_ret)) 1862 return -EINVAL; 1863 } 1864 *pprog = prog; 1865 return 0; 1866 } 1867 1868 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1869 struct bpf_tramp_progs *tp, int stack_size, 1870 u8 **branches) 1871 { 1872 u8 *prog = *pprog; 1873 int i; 1874 1875 /* The first fmod_ret program will receive a garbage return value. 1876 * Set this to 0 to avoid confusing the program. 1877 */ 1878 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1879 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1880 for (i = 0; i < tp->nr_progs; i++) { 1881 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) 1882 return -EINVAL; 1883 1884 /* mod_ret prog stored return value into [rbp - 8]. Emit: 1885 * if (*(u64 *)(rbp - 8) != 0) 1886 * goto do_fexit; 1887 */ 1888 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 1889 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1890 1891 /* Save the location of the branch and Generate 6 nops 1892 * (4 bytes for an offset and 2 bytes for the jump) These nops 1893 * are replaced with a conditional jump once do_fexit (i.e. the 1894 * start of the fexit invocation) is finalized. 1895 */ 1896 branches[i] = prog; 1897 emit_nops(&prog, 4 + 2); 1898 } 1899 1900 *pprog = prog; 1901 return 0; 1902 } 1903 1904 static bool is_valid_bpf_tramp_flags(unsigned int flags) 1905 { 1906 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1907 (flags & BPF_TRAMP_F_SKIP_FRAME)) 1908 return false; 1909 1910 /* 1911 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, 1912 * and it must be used alone. 1913 */ 1914 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && 1915 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) 1916 return false; 1917 1918 return true; 1919 } 1920 1921 /* Example: 1922 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1923 * its 'struct btf_func_model' will be nr_args=2 1924 * The assembly code when eth_type_trans is executing after trampoline: 1925 * 1926 * push rbp 1927 * mov rbp, rsp 1928 * sub rsp, 16 // space for skb and dev 1929 * push rbx // temp regs to pass start time 1930 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1931 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1932 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1933 * mov rbx, rax // remember start time in bpf stats are enabled 1934 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1935 * call addr_of_jited_FENTRY_prog 1936 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1937 * mov rsi, rbx // prog start time 1938 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1939 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1940 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1941 * pop rbx 1942 * leave 1943 * ret 1944 * 1945 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1946 * replaced with 'call generated_bpf_trampoline'. When it returns 1947 * eth_type_trans will continue executing with original skb and dev pointers. 1948 * 1949 * The assembly code when eth_type_trans is called from trampoline: 1950 * 1951 * push rbp 1952 * mov rbp, rsp 1953 * sub rsp, 24 // space for skb, dev, return value 1954 * push rbx // temp regs to pass start time 1955 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1956 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1957 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1958 * mov rbx, rax // remember start time if bpf stats are enabled 1959 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1960 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1961 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1962 * mov rsi, rbx // prog start time 1963 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1964 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1965 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1966 * call eth_type_trans+5 // execute body of eth_type_trans 1967 * mov qword ptr [rbp - 8], rax // save return value 1968 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1969 * mov rbx, rax // remember start time in bpf stats are enabled 1970 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1971 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1972 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1973 * mov rsi, rbx // prog start time 1974 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1975 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1976 * pop rbx 1977 * leave 1978 * add rsp, 8 // skip eth_type_trans's frame 1979 * ret // return to its caller 1980 */ 1981 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 1982 const struct btf_func_model *m, u32 flags, 1983 struct bpf_tramp_progs *tprogs, 1984 void *orig_call) 1985 { 1986 int ret, i, nr_args = m->nr_args; 1987 int regs_off, ip_off, args_off, stack_size = nr_args * 8; 1988 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; 1989 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; 1990 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1991 u8 **branches = NULL; 1992 u8 *prog; 1993 bool save_ret; 1994 1995 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1996 if (nr_args > 6) 1997 return -ENOTSUPP; 1998 1999 if (!is_valid_bpf_tramp_flags(flags)) 2000 return -EINVAL; 2001 2002 /* Generated trampoline stack layout: 2003 * 2004 * RBP + 8 [ return address ] 2005 * RBP + 0 [ RBP ] 2006 * 2007 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2008 * BPF_TRAMP_F_RET_FENTRY_RET flags 2009 * 2010 * [ reg_argN ] always 2011 * [ ... ] 2012 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2013 * 2014 * RBP - args_off [ args count ] always 2015 * 2016 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2017 */ 2018 2019 /* room for return value of orig_call or fentry prog */ 2020 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2021 if (save_ret) 2022 stack_size += 8; 2023 2024 regs_off = stack_size; 2025 2026 /* args count */ 2027 stack_size += 8; 2028 args_off = stack_size; 2029 2030 if (flags & BPF_TRAMP_F_IP_ARG) 2031 stack_size += 8; /* room for IP address argument */ 2032 2033 ip_off = stack_size; 2034 2035 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2036 /* skip patched call instruction and point orig_call to actual 2037 * body of the kernel function. 2038 */ 2039 if (is_endbr(*(u32 *)orig_call)) 2040 orig_call += ENDBR_INSN_SIZE; 2041 orig_call += X86_PATCH_SIZE; 2042 } 2043 2044 prog = image; 2045 2046 EMIT_ENDBR(); 2047 EMIT1(0x55); /* push rbp */ 2048 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2049 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2050 EMIT1(0x53); /* push rbx */ 2051 2052 /* Store number of arguments of the traced function: 2053 * mov rax, nr_args 2054 * mov QWORD PTR [rbp - args_off], rax 2055 */ 2056 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args); 2057 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2058 2059 if (flags & BPF_TRAMP_F_IP_ARG) { 2060 /* Store IP address of the traced function: 2061 * mov rax, QWORD PTR [rbp + 8] 2062 * sub rax, X86_PATCH_SIZE 2063 * mov QWORD PTR [rbp - ip_off], rax 2064 */ 2065 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2066 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 2067 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2068 } 2069 2070 save_regs(m, &prog, nr_args, regs_off); 2071 2072 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2073 /* arg1: mov rdi, im */ 2074 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2075 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2076 ret = -EINVAL; 2077 goto cleanup; 2078 } 2079 } 2080 2081 if (fentry->nr_progs) 2082 if (invoke_bpf(m, &prog, fentry, regs_off, 2083 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2084 return -EINVAL; 2085 2086 if (fmod_ret->nr_progs) { 2087 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), 2088 GFP_KERNEL); 2089 if (!branches) 2090 return -ENOMEM; 2091 2092 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2093 branches)) { 2094 ret = -EINVAL; 2095 goto cleanup; 2096 } 2097 } 2098 2099 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2100 restore_regs(m, &prog, nr_args, regs_off); 2101 2102 /* call original function */ 2103 if (emit_call(&prog, orig_call, prog)) { 2104 ret = -EINVAL; 2105 goto cleanup; 2106 } 2107 /* remember return value in a stack for bpf prog to access */ 2108 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2109 im->ip_after_call = prog; 2110 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2111 prog += X86_PATCH_SIZE; 2112 } 2113 2114 if (fmod_ret->nr_progs) { 2115 /* From Intel 64 and IA-32 Architectures Optimization 2116 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2117 * Coding Rule 11: All branch targets should be 16-byte 2118 * aligned. 2119 */ 2120 emit_align(&prog, 16); 2121 /* Update the branches saved in invoke_bpf_mod_ret with the 2122 * aligned address of do_fexit. 2123 */ 2124 for (i = 0; i < fmod_ret->nr_progs; i++) 2125 emit_cond_near_jump(&branches[i], prog, branches[i], 2126 X86_JNE); 2127 } 2128 2129 if (fexit->nr_progs) 2130 if (invoke_bpf(m, &prog, fexit, regs_off, false)) { 2131 ret = -EINVAL; 2132 goto cleanup; 2133 } 2134 2135 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2136 restore_regs(m, &prog, nr_args, regs_off); 2137 2138 /* This needs to be done regardless. If there were fmod_ret programs, 2139 * the return value is only updated on the stack and still needs to be 2140 * restored to R0. 2141 */ 2142 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2143 im->ip_epilogue = prog; 2144 /* arg1: mov rdi, im */ 2145 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2146 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2147 ret = -EINVAL; 2148 goto cleanup; 2149 } 2150 } 2151 /* restore return value of orig_call or fentry prog back into RAX */ 2152 if (save_ret) 2153 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2154 2155 EMIT1(0x5B); /* pop rbx */ 2156 EMIT1(0xC9); /* leave */ 2157 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2158 /* skip our return address and return to parent */ 2159 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2160 EMIT1(0xC3); /* ret */ 2161 /* Make sure the trampoline generation logic doesn't overflow */ 2162 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2163 ret = -EFAULT; 2164 goto cleanup; 2165 } 2166 ret = prog - (u8 *)image; 2167 2168 cleanup: 2169 kfree(branches); 2170 return ret; 2171 } 2172 2173 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 2174 { 2175 u8 *jg_reloc, *prog = *pprog; 2176 int pivot, err, jg_bytes = 1; 2177 s64 jg_offset; 2178 2179 if (a == b) { 2180 /* Leaf node of recursion, i.e. not a range of indices 2181 * anymore. 2182 */ 2183 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2184 if (!is_simm32(progs[a])) 2185 return -1; 2186 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2187 progs[a]); 2188 err = emit_cond_near_jump(&prog, /* je func */ 2189 (void *)progs[a], prog, 2190 X86_JE); 2191 if (err) 2192 return err; 2193 2194 emit_indirect_jump(&prog, 2 /* rdx */, prog); 2195 2196 *pprog = prog; 2197 return 0; 2198 } 2199 2200 /* Not a leaf node, so we pivot, and recursively descend into 2201 * the lower and upper ranges. 2202 */ 2203 pivot = (b - a) / 2; 2204 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2205 if (!is_simm32(progs[a + pivot])) 2206 return -1; 2207 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2208 2209 if (pivot > 2) { /* jg upper_part */ 2210 /* Require near jump. */ 2211 jg_bytes = 4; 2212 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2213 } else { 2214 EMIT2(X86_JG, 0); 2215 } 2216 jg_reloc = prog; 2217 2218 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2219 progs); 2220 if (err) 2221 return err; 2222 2223 /* From Intel 64 and IA-32 Architectures Optimization 2224 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2225 * Coding Rule 11: All branch targets should be 16-byte 2226 * aligned. 2227 */ 2228 emit_align(&prog, 16); 2229 jg_offset = prog - jg_reloc; 2230 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2231 2232 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2233 b, progs); 2234 if (err) 2235 return err; 2236 2237 *pprog = prog; 2238 return 0; 2239 } 2240 2241 static int cmp_ips(const void *a, const void *b) 2242 { 2243 const s64 *ipa = a; 2244 const s64 *ipb = b; 2245 2246 if (*ipa > *ipb) 2247 return 1; 2248 if (*ipa < *ipb) 2249 return -1; 2250 return 0; 2251 } 2252 2253 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 2254 { 2255 u8 *prog = image; 2256 2257 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2258 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 2259 } 2260 2261 struct x64_jit_data { 2262 struct bpf_binary_header *rw_header; 2263 struct bpf_binary_header *header; 2264 int *addrs; 2265 u8 *image; 2266 int proglen; 2267 struct jit_context ctx; 2268 }; 2269 2270 #define MAX_PASSES 20 2271 #define PADDING_PASSES (MAX_PASSES - 5) 2272 2273 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2274 { 2275 struct bpf_binary_header *rw_header = NULL; 2276 struct bpf_binary_header *header = NULL; 2277 struct bpf_prog *tmp, *orig_prog = prog; 2278 struct x64_jit_data *jit_data; 2279 int proglen, oldproglen = 0; 2280 struct jit_context ctx = {}; 2281 bool tmp_blinded = false; 2282 bool extra_pass = false; 2283 bool padding = false; 2284 u8 *rw_image = NULL; 2285 u8 *image = NULL; 2286 int *addrs; 2287 int pass; 2288 int i; 2289 2290 if (!prog->jit_requested) 2291 return orig_prog; 2292 2293 tmp = bpf_jit_blind_constants(prog); 2294 /* 2295 * If blinding was requested and we failed during blinding, 2296 * we must fall back to the interpreter. 2297 */ 2298 if (IS_ERR(tmp)) 2299 return orig_prog; 2300 if (tmp != prog) { 2301 tmp_blinded = true; 2302 prog = tmp; 2303 } 2304 2305 jit_data = prog->aux->jit_data; 2306 if (!jit_data) { 2307 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2308 if (!jit_data) { 2309 prog = orig_prog; 2310 goto out; 2311 } 2312 prog->aux->jit_data = jit_data; 2313 } 2314 addrs = jit_data->addrs; 2315 if (addrs) { 2316 ctx = jit_data->ctx; 2317 oldproglen = jit_data->proglen; 2318 image = jit_data->image; 2319 header = jit_data->header; 2320 rw_header = jit_data->rw_header; 2321 rw_image = (void *)rw_header + ((void *)image - (void *)header); 2322 extra_pass = true; 2323 padding = true; 2324 goto skip_init_addrs; 2325 } 2326 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2327 if (!addrs) { 2328 prog = orig_prog; 2329 goto out_addrs; 2330 } 2331 2332 /* 2333 * Before first pass, make a rough estimation of addrs[] 2334 * each BPF instruction is translated to less than 64 bytes 2335 */ 2336 for (proglen = 0, i = 0; i <= prog->len; i++) { 2337 proglen += 64; 2338 addrs[i] = proglen; 2339 } 2340 ctx.cleanup_addr = proglen; 2341 skip_init_addrs: 2342 2343 /* 2344 * JITed image shrinks with every pass and the loop iterates 2345 * until the image stops shrinking. Very large BPF programs 2346 * may converge on the last pass. In such case do one more 2347 * pass to emit the final image. 2348 */ 2349 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2350 if (!padding && pass >= PADDING_PASSES) 2351 padding = true; 2352 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2353 if (proglen <= 0) { 2354 out_image: 2355 image = NULL; 2356 if (header) { 2357 bpf_arch_text_copy(&header->size, &rw_header->size, 2358 sizeof(rw_header->size)); 2359 bpf_jit_binary_pack_free(header, rw_header); 2360 } 2361 /* Fall back to interpreter mode */ 2362 prog = orig_prog; 2363 if (extra_pass) { 2364 prog->bpf_func = NULL; 2365 prog->jited = 0; 2366 prog->jited_len = 0; 2367 } 2368 goto out_addrs; 2369 } 2370 if (image) { 2371 if (proglen != oldproglen) { 2372 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2373 proglen, oldproglen); 2374 goto out_image; 2375 } 2376 break; 2377 } 2378 if (proglen == oldproglen) { 2379 /* 2380 * The number of entries in extable is the number of BPF_LDX 2381 * insns that access kernel memory via "pointer to BTF type". 2382 * The verifier changed their opcode from LDX|MEM|size 2383 * to LDX|PROBE_MEM|size to make JITing easier. 2384 */ 2385 u32 align = __alignof__(struct exception_table_entry); 2386 u32 extable_size = prog->aux->num_exentries * 2387 sizeof(struct exception_table_entry); 2388 2389 /* allocate module memory for x86 insns and extable */ 2390 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 2391 &image, align, &rw_header, &rw_image, 2392 jit_fill_hole); 2393 if (!header) { 2394 prog = orig_prog; 2395 goto out_addrs; 2396 } 2397 prog->aux->extable = (void *) image + roundup(proglen, align); 2398 } 2399 oldproglen = proglen; 2400 cond_resched(); 2401 } 2402 2403 if (bpf_jit_enable > 1) 2404 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2405 2406 if (image) { 2407 if (!prog->is_func || extra_pass) { 2408 /* 2409 * bpf_jit_binary_pack_finalize fails in two scenarios: 2410 * 1) header is not pointing to proper module memory; 2411 * 2) the arch doesn't support bpf_arch_text_copy(). 2412 * 2413 * Both cases are serious bugs and justify WARN_ON. 2414 */ 2415 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 2416 /* header has been freed */ 2417 header = NULL; 2418 goto out_image; 2419 } 2420 2421 bpf_tail_call_direct_fixup(prog); 2422 } else { 2423 jit_data->addrs = addrs; 2424 jit_data->ctx = ctx; 2425 jit_data->proglen = proglen; 2426 jit_data->image = image; 2427 jit_data->header = header; 2428 jit_data->rw_header = rw_header; 2429 } 2430 prog->bpf_func = (void *)image; 2431 prog->jited = 1; 2432 prog->jited_len = proglen; 2433 } else { 2434 prog = orig_prog; 2435 } 2436 2437 if (!image || !prog->is_func || extra_pass) { 2438 if (image) 2439 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2440 out_addrs: 2441 kvfree(addrs); 2442 kfree(jit_data); 2443 prog->aux->jit_data = NULL; 2444 } 2445 out: 2446 if (tmp_blinded) 2447 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2448 tmp : orig_prog); 2449 return prog; 2450 } 2451 2452 bool bpf_jit_supports_kfunc_call(void) 2453 { 2454 return true; 2455 } 2456 2457 void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2458 { 2459 if (text_poke_copy(dst, src, len) == NULL) 2460 return ERR_PTR(-EINVAL); 2461 return dst; 2462 } 2463