1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * BPF JIT compiler 4 * 5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 7 */ 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/if_vlan.h> 11 #include <linux/bpf.h> 12 #include <linux/memory.h> 13 #include <linux/sort.h> 14 #include <asm/extable.h> 15 #include <asm/set_memory.h> 16 #include <asm/nospec-branch.h> 17 #include <asm/text-patching.h> 18 19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 20 { 21 if (len == 1) 22 *ptr = bytes; 23 else if (len == 2) 24 *(u16 *)ptr = bytes; 25 else { 26 *(u32 *)ptr = bytes; 27 barrier(); 28 } 29 return ptr + len; 30 } 31 32 #define EMIT(bytes, len) \ 33 do { prog = emit_code(prog, bytes, len); } while (0) 34 35 #define EMIT1(b1) EMIT(b1, 1) 36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39 40 #define EMIT1_off32(b1, off) \ 41 do { EMIT1(b1); EMIT(off, 4); } while (0) 42 #define EMIT2_off32(b1, b2, off) \ 43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 44 #define EMIT3_off32(b1, b2, b3, off) \ 45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 46 #define EMIT4_off32(b1, b2, b3, b4, off) \ 47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 48 49 static bool is_imm8(int value) 50 { 51 return value <= 127 && value >= -128; 52 } 53 54 static bool is_simm32(s64 value) 55 { 56 return value == (s64)(s32)value; 57 } 58 59 static bool is_uimm32(u64 value) 60 { 61 return value == (u64)(u32)value; 62 } 63 64 /* mov dst, src */ 65 #define EMIT_mov(DST, SRC) \ 66 do { \ 67 if (DST != SRC) \ 68 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 69 } while (0) 70 71 static int bpf_size_to_x86_bytes(int bpf_size) 72 { 73 if (bpf_size == BPF_W) 74 return 4; 75 else if (bpf_size == BPF_H) 76 return 2; 77 else if (bpf_size == BPF_B) 78 return 1; 79 else if (bpf_size == BPF_DW) 80 return 4; /* imm32 */ 81 else 82 return 0; 83 } 84 85 /* 86 * List of x86 cond jumps opcodes (. + s8) 87 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 88 */ 89 #define X86_JB 0x72 90 #define X86_JAE 0x73 91 #define X86_JE 0x74 92 #define X86_JNE 0x75 93 #define X86_JBE 0x76 94 #define X86_JA 0x77 95 #define X86_JL 0x7C 96 #define X86_JGE 0x7D 97 #define X86_JLE 0x7E 98 #define X86_JG 0x7F 99 100 /* Pick a register outside of BPF range for JIT internal work */ 101 #define AUX_REG (MAX_BPF_JIT_REG + 1) 102 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 103 104 /* 105 * The following table maps BPF registers to x86-64 registers. 106 * 107 * x86-64 register R12 is unused, since if used as base address 108 * register in load/store instructions, it always needs an 109 * extra byte of encoding and is callee saved. 110 * 111 * x86-64 register R9 is not used by BPF programs, but can be used by BPF 112 * trampoline. x86-64 register R10 is used for blinding (if enabled). 113 */ 114 static const int reg2hex[] = { 115 [BPF_REG_0] = 0, /* RAX */ 116 [BPF_REG_1] = 7, /* RDI */ 117 [BPF_REG_2] = 6, /* RSI */ 118 [BPF_REG_3] = 2, /* RDX */ 119 [BPF_REG_4] = 1, /* RCX */ 120 [BPF_REG_5] = 0, /* R8 */ 121 [BPF_REG_6] = 3, /* RBX callee saved */ 122 [BPF_REG_7] = 5, /* R13 callee saved */ 123 [BPF_REG_8] = 6, /* R14 callee saved */ 124 [BPF_REG_9] = 7, /* R15 callee saved */ 125 [BPF_REG_FP] = 5, /* RBP readonly */ 126 [BPF_REG_AX] = 2, /* R10 temp register */ 127 [AUX_REG] = 3, /* R11 temp register */ 128 [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 129 }; 130 131 static const int reg2pt_regs[] = { 132 [BPF_REG_0] = offsetof(struct pt_regs, ax), 133 [BPF_REG_1] = offsetof(struct pt_regs, di), 134 [BPF_REG_2] = offsetof(struct pt_regs, si), 135 [BPF_REG_3] = offsetof(struct pt_regs, dx), 136 [BPF_REG_4] = offsetof(struct pt_regs, cx), 137 [BPF_REG_5] = offsetof(struct pt_regs, r8), 138 [BPF_REG_6] = offsetof(struct pt_regs, bx), 139 [BPF_REG_7] = offsetof(struct pt_regs, r13), 140 [BPF_REG_8] = offsetof(struct pt_regs, r14), 141 [BPF_REG_9] = offsetof(struct pt_regs, r15), 142 }; 143 144 /* 145 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 146 * which need extra byte of encoding. 147 * rax,rcx,...,rbp have simpler encoding 148 */ 149 static bool is_ereg(u32 reg) 150 { 151 return (1 << reg) & (BIT(BPF_REG_5) | 152 BIT(AUX_REG) | 153 BIT(BPF_REG_7) | 154 BIT(BPF_REG_8) | 155 BIT(BPF_REG_9) | 156 BIT(X86_REG_R9) | 157 BIT(BPF_REG_AX)); 158 } 159 160 /* 161 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 162 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 163 * of encoding. al,cl,dl,bl have simpler encoding. 164 */ 165 static bool is_ereg_8l(u32 reg) 166 { 167 return is_ereg(reg) || 168 (1 << reg) & (BIT(BPF_REG_1) | 169 BIT(BPF_REG_2) | 170 BIT(BPF_REG_FP)); 171 } 172 173 static bool is_axreg(u32 reg) 174 { 175 return reg == BPF_REG_0; 176 } 177 178 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 179 static u8 add_1mod(u8 byte, u32 reg) 180 { 181 if (is_ereg(reg)) 182 byte |= 1; 183 return byte; 184 } 185 186 static u8 add_2mod(u8 byte, u32 r1, u32 r2) 187 { 188 if (is_ereg(r1)) 189 byte |= 1; 190 if (is_ereg(r2)) 191 byte |= 4; 192 return byte; 193 } 194 195 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 196 static u8 add_1reg(u8 byte, u32 dst_reg) 197 { 198 return byte + reg2hex[dst_reg]; 199 } 200 201 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 202 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 203 { 204 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 205 } 206 207 /* Some 1-byte opcodes for binary ALU operations */ 208 static u8 simple_alu_opcodes[] = { 209 [BPF_ADD] = 0x01, 210 [BPF_SUB] = 0x29, 211 [BPF_AND] = 0x21, 212 [BPF_OR] = 0x09, 213 [BPF_XOR] = 0x31, 214 [BPF_LSH] = 0xE0, 215 [BPF_RSH] = 0xE8, 216 [BPF_ARSH] = 0xF8, 217 }; 218 219 static void jit_fill_hole(void *area, unsigned int size) 220 { 221 /* Fill whole space with INT3 instructions */ 222 memset(area, 0xcc, size); 223 } 224 225 struct jit_context { 226 int cleanup_addr; /* Epilogue code offset */ 227 228 /* 229 * Program specific offsets of labels in the code; these rely on the 230 * JIT doing at least 2 passes, recording the position on the first 231 * pass, only to generate the correct offset on the second pass. 232 */ 233 int tail_call_direct_label; 234 int tail_call_indirect_label; 235 }; 236 237 /* Maximum number of bytes emitted while JITing one eBPF insn */ 238 #define BPF_MAX_INSN_SIZE 128 239 #define BPF_INSN_SAFETY 64 240 241 /* Number of bytes emit_patch() needs to generate instructions */ 242 #define X86_PATCH_SIZE 5 243 /* Number of bytes that will be skipped on tailcall */ 244 #define X86_TAIL_CALL_OFFSET 11 245 246 static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 247 { 248 u8 *prog = *pprog; 249 250 if (callee_regs_used[0]) 251 EMIT1(0x53); /* push rbx */ 252 if (callee_regs_used[1]) 253 EMIT2(0x41, 0x55); /* push r13 */ 254 if (callee_regs_used[2]) 255 EMIT2(0x41, 0x56); /* push r14 */ 256 if (callee_regs_used[3]) 257 EMIT2(0x41, 0x57); /* push r15 */ 258 *pprog = prog; 259 } 260 261 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 262 { 263 u8 *prog = *pprog; 264 265 if (callee_regs_used[3]) 266 EMIT2(0x41, 0x5F); /* pop r15 */ 267 if (callee_regs_used[2]) 268 EMIT2(0x41, 0x5E); /* pop r14 */ 269 if (callee_regs_used[1]) 270 EMIT2(0x41, 0x5D); /* pop r13 */ 271 if (callee_regs_used[0]) 272 EMIT1(0x5B); /* pop rbx */ 273 *pprog = prog; 274 } 275 276 /* 277 * Emit x86-64 prologue code for BPF program. 278 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 279 * while jumping to another program 280 */ 281 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 282 bool tail_call_reachable, bool is_subprog) 283 { 284 u8 *prog = *pprog; 285 286 /* BPF trampoline can be made to work without these nops, 287 * but let's waste 5 bytes for now and optimize later 288 */ 289 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 290 prog += X86_PATCH_SIZE; 291 if (!ebpf_from_cbpf) { 292 if (tail_call_reachable && !is_subprog) 293 EMIT2(0x31, 0xC0); /* xor eax, eax */ 294 else 295 EMIT2(0x66, 0x90); /* nop2 */ 296 } 297 EMIT1(0x55); /* push rbp */ 298 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 299 /* sub rsp, rounded_stack_depth */ 300 if (stack_depth) 301 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 302 if (tail_call_reachable) 303 EMIT1(0x50); /* push rax */ 304 *pprog = prog; 305 } 306 307 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 308 { 309 u8 *prog = *pprog; 310 s64 offset; 311 312 offset = func - (ip + X86_PATCH_SIZE); 313 if (!is_simm32(offset)) { 314 pr_err("Target call %p is out of range\n", func); 315 return -ERANGE; 316 } 317 EMIT1_off32(opcode, offset); 318 *pprog = prog; 319 return 0; 320 } 321 322 static int emit_call(u8 **pprog, void *func, void *ip) 323 { 324 return emit_patch(pprog, func, ip, 0xE8); 325 } 326 327 static int emit_jump(u8 **pprog, void *func, void *ip) 328 { 329 return emit_patch(pprog, func, ip, 0xE9); 330 } 331 332 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 333 void *old_addr, void *new_addr, 334 const bool text_live) 335 { 336 const u8 *nop_insn = x86_nops[5]; 337 u8 old_insn[X86_PATCH_SIZE]; 338 u8 new_insn[X86_PATCH_SIZE]; 339 u8 *prog; 340 int ret; 341 342 memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 343 if (old_addr) { 344 prog = old_insn; 345 ret = t == BPF_MOD_CALL ? 346 emit_call(&prog, old_addr, ip) : 347 emit_jump(&prog, old_addr, ip); 348 if (ret) 349 return ret; 350 } 351 352 memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 353 if (new_addr) { 354 prog = new_insn; 355 ret = t == BPF_MOD_CALL ? 356 emit_call(&prog, new_addr, ip) : 357 emit_jump(&prog, new_addr, ip); 358 if (ret) 359 return ret; 360 } 361 362 ret = -EBUSY; 363 mutex_lock(&text_mutex); 364 if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 365 goto out; 366 ret = 1; 367 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 368 if (text_live) 369 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 370 else 371 memcpy(ip, new_insn, X86_PATCH_SIZE); 372 ret = 0; 373 } 374 out: 375 mutex_unlock(&text_mutex); 376 return ret; 377 } 378 379 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 380 void *old_addr, void *new_addr) 381 { 382 if (!is_kernel_text((long)ip) && 383 !is_bpf_text_address((long)ip)) 384 /* BPF poking in modules is not supported */ 385 return -EINVAL; 386 387 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); 388 } 389 390 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 391 392 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 393 { 394 u8 *prog = *pprog; 395 396 #ifdef CONFIG_RETPOLINE 397 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) { 398 EMIT_LFENCE(); 399 EMIT2(0xFF, 0xE0 + reg); 400 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 401 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 402 } else 403 #endif 404 EMIT2(0xFF, 0xE0 + reg); 405 406 *pprog = prog; 407 } 408 409 /* 410 * Generate the following code: 411 * 412 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 413 * if (index >= array->map.max_entries) 414 * goto out; 415 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 416 * goto out; 417 * prog = array->ptrs[index]; 418 * if (prog == NULL) 419 * goto out; 420 * goto *(prog->bpf_func + prologue_size); 421 * out: 422 */ 423 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 424 u32 stack_depth, u8 *ip, 425 struct jit_context *ctx) 426 { 427 int tcc_off = -4 - round_up(stack_depth, 8); 428 u8 *prog = *pprog, *start = *pprog; 429 int offset; 430 431 /* 432 * rdi - pointer to ctx 433 * rsi - pointer to bpf_array 434 * rdx - index in bpf_array 435 */ 436 437 /* 438 * if (index >= array->map.max_entries) 439 * goto out; 440 */ 441 EMIT2(0x89, 0xD2); /* mov edx, edx */ 442 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 443 offsetof(struct bpf_array, map.max_entries)); 444 445 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 446 EMIT2(X86_JBE, offset); /* jbe out */ 447 448 /* 449 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 450 * goto out; 451 */ 452 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 453 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 454 455 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 456 EMIT2(X86_JAE, offset); /* jae out */ 457 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 458 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 459 460 /* prog = array->ptrs[index]; */ 461 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 462 offsetof(struct bpf_array, ptrs)); 463 464 /* 465 * if (prog == NULL) 466 * goto out; 467 */ 468 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 469 470 offset = ctx->tail_call_indirect_label - (prog + 2 - start); 471 EMIT2(X86_JE, offset); /* je out */ 472 473 pop_callee_regs(&prog, callee_regs_used); 474 475 EMIT1(0x58); /* pop rax */ 476 if (stack_depth) 477 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 478 round_up(stack_depth, 8)); 479 480 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 481 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 482 offsetof(struct bpf_prog, bpf_func)); 483 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 484 X86_TAIL_CALL_OFFSET); 485 /* 486 * Now we're ready to jump into next BPF program 487 * rdi == ctx (1st arg) 488 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 489 */ 490 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 491 492 /* out: */ 493 ctx->tail_call_indirect_label = prog - start; 494 *pprog = prog; 495 } 496 497 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 498 u8 **pprog, u8 *ip, 499 bool *callee_regs_used, u32 stack_depth, 500 struct jit_context *ctx) 501 { 502 int tcc_off = -4 - round_up(stack_depth, 8); 503 u8 *prog = *pprog, *start = *pprog; 504 int offset; 505 506 /* 507 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 508 * goto out; 509 */ 510 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 511 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 512 513 offset = ctx->tail_call_direct_label - (prog + 2 - start); 514 EMIT2(X86_JAE, offset); /* jae out */ 515 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 516 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 517 518 poke->tailcall_bypass = ip + (prog - start); 519 poke->adj_off = X86_TAIL_CALL_OFFSET; 520 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 521 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 522 523 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 524 poke->tailcall_bypass); 525 526 pop_callee_regs(&prog, callee_regs_used); 527 EMIT1(0x58); /* pop rax */ 528 if (stack_depth) 529 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 530 531 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 532 prog += X86_PATCH_SIZE; 533 534 /* out: */ 535 ctx->tail_call_direct_label = prog - start; 536 537 *pprog = prog; 538 } 539 540 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 541 { 542 struct bpf_jit_poke_descriptor *poke; 543 struct bpf_array *array; 544 struct bpf_prog *target; 545 int i, ret; 546 547 for (i = 0; i < prog->aux->size_poke_tab; i++) { 548 poke = &prog->aux->poke_tab[i]; 549 if (poke->aux && poke->aux != prog->aux) 550 continue; 551 552 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 553 554 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 555 continue; 556 557 array = container_of(poke->tail_call.map, struct bpf_array, map); 558 mutex_lock(&array->aux->poke_mutex); 559 target = array->ptrs[poke->tail_call.key]; 560 if (target) { 561 /* Plain memcpy is used when image is not live yet 562 * and still not locked as read-only. Once poke 563 * location is active (poke->tailcall_target_stable), 564 * any parallel bpf_arch_text_poke() might occur 565 * still on the read-write image until we finally 566 * locked it as read-only. Both modifications on 567 * the given image are under text_mutex to avoid 568 * interference. 569 */ 570 ret = __bpf_arch_text_poke(poke->tailcall_target, 571 BPF_MOD_JUMP, NULL, 572 (u8 *)target->bpf_func + 573 poke->adj_off, false); 574 BUG_ON(ret < 0); 575 ret = __bpf_arch_text_poke(poke->tailcall_bypass, 576 BPF_MOD_JUMP, 577 (u8 *)poke->tailcall_target + 578 X86_PATCH_SIZE, NULL, false); 579 BUG_ON(ret < 0); 580 } 581 WRITE_ONCE(poke->tailcall_target_stable, true); 582 mutex_unlock(&array->aux->poke_mutex); 583 } 584 } 585 586 static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 587 u32 dst_reg, const u32 imm32) 588 { 589 u8 *prog = *pprog; 590 u8 b1, b2, b3; 591 592 /* 593 * Optimization: if imm32 is positive, use 'mov %eax, imm32' 594 * (which zero-extends imm32) to save 2 bytes. 595 */ 596 if (sign_propagate && (s32)imm32 < 0) { 597 /* 'mov %rax, imm32' sign extends imm32 */ 598 b1 = add_1mod(0x48, dst_reg); 599 b2 = 0xC7; 600 b3 = 0xC0; 601 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 602 goto done; 603 } 604 605 /* 606 * Optimization: if imm32 is zero, use 'xor %eax, %eax' 607 * to save 3 bytes. 608 */ 609 if (imm32 == 0) { 610 if (is_ereg(dst_reg)) 611 EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 612 b2 = 0x31; /* xor */ 613 b3 = 0xC0; 614 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 615 goto done; 616 } 617 618 /* mov %eax, imm32 */ 619 if (is_ereg(dst_reg)) 620 EMIT1(add_1mod(0x40, dst_reg)); 621 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 622 done: 623 *pprog = prog; 624 } 625 626 static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 627 const u32 imm32_hi, const u32 imm32_lo) 628 { 629 u8 *prog = *pprog; 630 631 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 632 /* 633 * For emitting plain u32, where sign bit must not be 634 * propagated LLVM tends to load imm64 over mov32 635 * directly, so save couple of bytes by just doing 636 * 'mov %eax, imm32' instead. 637 */ 638 emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 639 } else { 640 /* movabsq %rax, imm64 */ 641 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 642 EMIT(imm32_lo, 4); 643 EMIT(imm32_hi, 4); 644 } 645 646 *pprog = prog; 647 } 648 649 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 650 { 651 u8 *prog = *pprog; 652 653 if (is64) { 654 /* mov dst, src */ 655 EMIT_mov(dst_reg, src_reg); 656 } else { 657 /* mov32 dst, src */ 658 if (is_ereg(dst_reg) || is_ereg(src_reg)) 659 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 660 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 661 } 662 663 *pprog = prog; 664 } 665 666 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 667 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 668 { 669 u8 *prog = *pprog; 670 671 if (is_imm8(off)) { 672 /* 1-byte signed displacement. 673 * 674 * If off == 0 we could skip this and save one extra byte, but 675 * special case of x86 R13 which always needs an offset is not 676 * worth the hassle 677 */ 678 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 679 } else { 680 /* 4-byte signed displacement */ 681 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 682 } 683 *pprog = prog; 684 } 685 686 /* 687 * Emit a REX byte if it will be necessary to address these registers 688 */ 689 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 690 { 691 u8 *prog = *pprog; 692 693 if (is64) 694 EMIT1(add_2mod(0x48, dst_reg, src_reg)); 695 else if (is_ereg(dst_reg) || is_ereg(src_reg)) 696 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 697 *pprog = prog; 698 } 699 700 /* 701 * Similar version of maybe_emit_mod() for a single register 702 */ 703 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 704 { 705 u8 *prog = *pprog; 706 707 if (is64) 708 EMIT1(add_1mod(0x48, reg)); 709 else if (is_ereg(reg)) 710 EMIT1(add_1mod(0x40, reg)); 711 *pprog = prog; 712 } 713 714 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 715 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 716 { 717 u8 *prog = *pprog; 718 719 switch (size) { 720 case BPF_B: 721 /* Emit 'movzx rax, byte ptr [rax + off]' */ 722 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 723 break; 724 case BPF_H: 725 /* Emit 'movzx rax, word ptr [rax + off]' */ 726 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 727 break; 728 case BPF_W: 729 /* Emit 'mov eax, dword ptr [rax+0x14]' */ 730 if (is_ereg(dst_reg) || is_ereg(src_reg)) 731 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 732 else 733 EMIT1(0x8B); 734 break; 735 case BPF_DW: 736 /* Emit 'mov rax, qword ptr [rax+0x14]' */ 737 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 738 break; 739 } 740 emit_insn_suffix(&prog, src_reg, dst_reg, off); 741 *pprog = prog; 742 } 743 744 /* STX: *(u8*)(dst_reg + off) = src_reg */ 745 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 746 { 747 u8 *prog = *pprog; 748 749 switch (size) { 750 case BPF_B: 751 /* Emit 'mov byte ptr [rax + off], al' */ 752 if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 753 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 754 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 755 else 756 EMIT1(0x88); 757 break; 758 case BPF_H: 759 if (is_ereg(dst_reg) || is_ereg(src_reg)) 760 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 761 else 762 EMIT2(0x66, 0x89); 763 break; 764 case BPF_W: 765 if (is_ereg(dst_reg) || is_ereg(src_reg)) 766 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 767 else 768 EMIT1(0x89); 769 break; 770 case BPF_DW: 771 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 772 break; 773 } 774 emit_insn_suffix(&prog, dst_reg, src_reg, off); 775 *pprog = prog; 776 } 777 778 static int emit_atomic(u8 **pprog, u8 atomic_op, 779 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 780 { 781 u8 *prog = *pprog; 782 783 EMIT1(0xF0); /* lock prefix */ 784 785 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 786 787 /* emit opcode */ 788 switch (atomic_op) { 789 case BPF_ADD: 790 case BPF_SUB: 791 case BPF_AND: 792 case BPF_OR: 793 case BPF_XOR: 794 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 795 EMIT1(simple_alu_opcodes[atomic_op]); 796 break; 797 case BPF_ADD | BPF_FETCH: 798 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 799 EMIT2(0x0F, 0xC1); 800 break; 801 case BPF_XCHG: 802 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 803 EMIT1(0x87); 804 break; 805 case BPF_CMPXCHG: 806 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 807 EMIT2(0x0F, 0xB1); 808 break; 809 default: 810 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 811 return -EFAULT; 812 } 813 814 emit_insn_suffix(&prog, dst_reg, src_reg, off); 815 816 *pprog = prog; 817 return 0; 818 } 819 820 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 821 { 822 u32 reg = x->fixup >> 8; 823 824 /* jump over faulting load and clear dest register */ 825 *(unsigned long *)((void *)regs + reg) = 0; 826 regs->ip += x->fixup & 0xff; 827 return true; 828 } 829 830 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 831 bool *regs_used, bool *tail_call_seen) 832 { 833 int i; 834 835 for (i = 1; i <= insn_cnt; i++, insn++) { 836 if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 837 *tail_call_seen = true; 838 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 839 regs_used[0] = true; 840 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 841 regs_used[1] = true; 842 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 843 regs_used[2] = true; 844 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 845 regs_used[3] = true; 846 } 847 } 848 849 static void emit_nops(u8 **pprog, int len) 850 { 851 u8 *prog = *pprog; 852 int i, noplen; 853 854 while (len > 0) { 855 noplen = len; 856 857 if (noplen > ASM_NOP_MAX) 858 noplen = ASM_NOP_MAX; 859 860 for (i = 0; i < noplen; i++) 861 EMIT1(x86_nops[noplen][i]); 862 len -= noplen; 863 } 864 865 *pprog = prog; 866 } 867 868 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 869 870 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 871 int oldproglen, struct jit_context *ctx, bool jmp_padding) 872 { 873 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 874 struct bpf_insn *insn = bpf_prog->insnsi; 875 bool callee_regs_used[4] = {}; 876 int insn_cnt = bpf_prog->len; 877 bool tail_call_seen = false; 878 bool seen_exit = false; 879 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 880 int i, excnt = 0; 881 int ilen, proglen = 0; 882 u8 *prog = temp; 883 int err; 884 885 detect_reg_usage(insn, insn_cnt, callee_regs_used, 886 &tail_call_seen); 887 888 /* tail call's presence in current prog implies it is reachable */ 889 tail_call_reachable |= tail_call_seen; 890 891 emit_prologue(&prog, bpf_prog->aux->stack_depth, 892 bpf_prog_was_classic(bpf_prog), tail_call_reachable, 893 bpf_prog->aux->func_idx != 0); 894 push_callee_regs(&prog, callee_regs_used); 895 896 ilen = prog - temp; 897 if (image) 898 memcpy(image + proglen, temp, ilen); 899 proglen += ilen; 900 addrs[0] = proglen; 901 prog = temp; 902 903 for (i = 1; i <= insn_cnt; i++, insn++) { 904 const s32 imm32 = insn->imm; 905 u32 dst_reg = insn->dst_reg; 906 u32 src_reg = insn->src_reg; 907 u8 b2 = 0, b3 = 0; 908 u8 *start_of_ldx; 909 s64 jmp_offset; 910 u8 jmp_cond; 911 u8 *func; 912 int nops; 913 914 switch (insn->code) { 915 /* ALU */ 916 case BPF_ALU | BPF_ADD | BPF_X: 917 case BPF_ALU | BPF_SUB | BPF_X: 918 case BPF_ALU | BPF_AND | BPF_X: 919 case BPF_ALU | BPF_OR | BPF_X: 920 case BPF_ALU | BPF_XOR | BPF_X: 921 case BPF_ALU64 | BPF_ADD | BPF_X: 922 case BPF_ALU64 | BPF_SUB | BPF_X: 923 case BPF_ALU64 | BPF_AND | BPF_X: 924 case BPF_ALU64 | BPF_OR | BPF_X: 925 case BPF_ALU64 | BPF_XOR | BPF_X: 926 maybe_emit_mod(&prog, dst_reg, src_reg, 927 BPF_CLASS(insn->code) == BPF_ALU64); 928 b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 929 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 930 break; 931 932 case BPF_ALU64 | BPF_MOV | BPF_X: 933 case BPF_ALU | BPF_MOV | BPF_X: 934 emit_mov_reg(&prog, 935 BPF_CLASS(insn->code) == BPF_ALU64, 936 dst_reg, src_reg); 937 break; 938 939 /* neg dst */ 940 case BPF_ALU | BPF_NEG: 941 case BPF_ALU64 | BPF_NEG: 942 maybe_emit_1mod(&prog, dst_reg, 943 BPF_CLASS(insn->code) == BPF_ALU64); 944 EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 945 break; 946 947 case BPF_ALU | BPF_ADD | BPF_K: 948 case BPF_ALU | BPF_SUB | BPF_K: 949 case BPF_ALU | BPF_AND | BPF_K: 950 case BPF_ALU | BPF_OR | BPF_K: 951 case BPF_ALU | BPF_XOR | BPF_K: 952 case BPF_ALU64 | BPF_ADD | BPF_K: 953 case BPF_ALU64 | BPF_SUB | BPF_K: 954 case BPF_ALU64 | BPF_AND | BPF_K: 955 case BPF_ALU64 | BPF_OR | BPF_K: 956 case BPF_ALU64 | BPF_XOR | BPF_K: 957 maybe_emit_1mod(&prog, dst_reg, 958 BPF_CLASS(insn->code) == BPF_ALU64); 959 960 /* 961 * b3 holds 'normal' opcode, b2 short form only valid 962 * in case dst is eax/rax. 963 */ 964 switch (BPF_OP(insn->code)) { 965 case BPF_ADD: 966 b3 = 0xC0; 967 b2 = 0x05; 968 break; 969 case BPF_SUB: 970 b3 = 0xE8; 971 b2 = 0x2D; 972 break; 973 case BPF_AND: 974 b3 = 0xE0; 975 b2 = 0x25; 976 break; 977 case BPF_OR: 978 b3 = 0xC8; 979 b2 = 0x0D; 980 break; 981 case BPF_XOR: 982 b3 = 0xF0; 983 b2 = 0x35; 984 break; 985 } 986 987 if (is_imm8(imm32)) 988 EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 989 else if (is_axreg(dst_reg)) 990 EMIT1_off32(b2, imm32); 991 else 992 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 993 break; 994 995 case BPF_ALU64 | BPF_MOV | BPF_K: 996 case BPF_ALU | BPF_MOV | BPF_K: 997 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 998 dst_reg, imm32); 999 break; 1000 1001 case BPF_LD | BPF_IMM | BPF_DW: 1002 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 1003 insn++; 1004 i++; 1005 break; 1006 1007 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 1008 case BPF_ALU | BPF_MOD | BPF_X: 1009 case BPF_ALU | BPF_DIV | BPF_X: 1010 case BPF_ALU | BPF_MOD | BPF_K: 1011 case BPF_ALU | BPF_DIV | BPF_K: 1012 case BPF_ALU64 | BPF_MOD | BPF_X: 1013 case BPF_ALU64 | BPF_DIV | BPF_X: 1014 case BPF_ALU64 | BPF_MOD | BPF_K: 1015 case BPF_ALU64 | BPF_DIV | BPF_K: { 1016 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1017 1018 if (dst_reg != BPF_REG_0) 1019 EMIT1(0x50); /* push rax */ 1020 if (dst_reg != BPF_REG_3) 1021 EMIT1(0x52); /* push rdx */ 1022 1023 if (BPF_SRC(insn->code) == BPF_X) { 1024 if (src_reg == BPF_REG_0 || 1025 src_reg == BPF_REG_3) { 1026 /* mov r11, src_reg */ 1027 EMIT_mov(AUX_REG, src_reg); 1028 src_reg = AUX_REG; 1029 } 1030 } else { 1031 /* mov r11, imm32 */ 1032 EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 1033 src_reg = AUX_REG; 1034 } 1035 1036 if (dst_reg != BPF_REG_0) 1037 /* mov rax, dst_reg */ 1038 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 1039 1040 /* 1041 * xor edx, edx 1042 * equivalent to 'xor rdx, rdx', but one byte less 1043 */ 1044 EMIT2(0x31, 0xd2); 1045 1046 /* div src_reg */ 1047 maybe_emit_1mod(&prog, src_reg, is64); 1048 EMIT2(0xF7, add_1reg(0xF0, src_reg)); 1049 1050 if (BPF_OP(insn->code) == BPF_MOD && 1051 dst_reg != BPF_REG_3) 1052 /* mov dst_reg, rdx */ 1053 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 1054 else if (BPF_OP(insn->code) == BPF_DIV && 1055 dst_reg != BPF_REG_0) 1056 /* mov dst_reg, rax */ 1057 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 1058 1059 if (dst_reg != BPF_REG_3) 1060 EMIT1(0x5A); /* pop rdx */ 1061 if (dst_reg != BPF_REG_0) 1062 EMIT1(0x58); /* pop rax */ 1063 break; 1064 } 1065 1066 case BPF_ALU | BPF_MUL | BPF_K: 1067 case BPF_ALU64 | BPF_MUL | BPF_K: 1068 maybe_emit_mod(&prog, dst_reg, dst_reg, 1069 BPF_CLASS(insn->code) == BPF_ALU64); 1070 1071 if (is_imm8(imm32)) 1072 /* imul dst_reg, dst_reg, imm8 */ 1073 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1074 imm32); 1075 else 1076 /* imul dst_reg, dst_reg, imm32 */ 1077 EMIT2_off32(0x69, 1078 add_2reg(0xC0, dst_reg, dst_reg), 1079 imm32); 1080 break; 1081 1082 case BPF_ALU | BPF_MUL | BPF_X: 1083 case BPF_ALU64 | BPF_MUL | BPF_X: 1084 maybe_emit_mod(&prog, src_reg, dst_reg, 1085 BPF_CLASS(insn->code) == BPF_ALU64); 1086 1087 /* imul dst_reg, src_reg */ 1088 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1089 break; 1090 1091 /* Shifts */ 1092 case BPF_ALU | BPF_LSH | BPF_K: 1093 case BPF_ALU | BPF_RSH | BPF_K: 1094 case BPF_ALU | BPF_ARSH | BPF_K: 1095 case BPF_ALU64 | BPF_LSH | BPF_K: 1096 case BPF_ALU64 | BPF_RSH | BPF_K: 1097 case BPF_ALU64 | BPF_ARSH | BPF_K: 1098 maybe_emit_1mod(&prog, dst_reg, 1099 BPF_CLASS(insn->code) == BPF_ALU64); 1100 1101 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1102 if (imm32 == 1) 1103 EMIT2(0xD1, add_1reg(b3, dst_reg)); 1104 else 1105 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 1106 break; 1107 1108 case BPF_ALU | BPF_LSH | BPF_X: 1109 case BPF_ALU | BPF_RSH | BPF_X: 1110 case BPF_ALU | BPF_ARSH | BPF_X: 1111 case BPF_ALU64 | BPF_LSH | BPF_X: 1112 case BPF_ALU64 | BPF_RSH | BPF_X: 1113 case BPF_ALU64 | BPF_ARSH | BPF_X: 1114 1115 /* Check for bad case when dst_reg == rcx */ 1116 if (dst_reg == BPF_REG_4) { 1117 /* mov r11, dst_reg */ 1118 EMIT_mov(AUX_REG, dst_reg); 1119 dst_reg = AUX_REG; 1120 } 1121 1122 if (src_reg != BPF_REG_4) { /* common case */ 1123 EMIT1(0x51); /* push rcx */ 1124 1125 /* mov rcx, src_reg */ 1126 EMIT_mov(BPF_REG_4, src_reg); 1127 } 1128 1129 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 1130 maybe_emit_1mod(&prog, dst_reg, 1131 BPF_CLASS(insn->code) == BPF_ALU64); 1132 1133 b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 1134 EMIT2(0xD3, add_1reg(b3, dst_reg)); 1135 1136 if (src_reg != BPF_REG_4) 1137 EMIT1(0x59); /* pop rcx */ 1138 1139 if (insn->dst_reg == BPF_REG_4) 1140 /* mov dst_reg, r11 */ 1141 EMIT_mov(insn->dst_reg, AUX_REG); 1142 break; 1143 1144 case BPF_ALU | BPF_END | BPF_FROM_BE: 1145 switch (imm32) { 1146 case 16: 1147 /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 1148 EMIT1(0x66); 1149 if (is_ereg(dst_reg)) 1150 EMIT1(0x41); 1151 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1152 1153 /* Emit 'movzwl eax, ax' */ 1154 if (is_ereg(dst_reg)) 1155 EMIT3(0x45, 0x0F, 0xB7); 1156 else 1157 EMIT2(0x0F, 0xB7); 1158 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1159 break; 1160 case 32: 1161 /* Emit 'bswap eax' to swap lower 4 bytes */ 1162 if (is_ereg(dst_reg)) 1163 EMIT2(0x41, 0x0F); 1164 else 1165 EMIT1(0x0F); 1166 EMIT1(add_1reg(0xC8, dst_reg)); 1167 break; 1168 case 64: 1169 /* Emit 'bswap rax' to swap 8 bytes */ 1170 EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1171 add_1reg(0xC8, dst_reg)); 1172 break; 1173 } 1174 break; 1175 1176 case BPF_ALU | BPF_END | BPF_FROM_LE: 1177 switch (imm32) { 1178 case 16: 1179 /* 1180 * Emit 'movzwl eax, ax' to zero extend 16-bit 1181 * into 64 bit 1182 */ 1183 if (is_ereg(dst_reg)) 1184 EMIT3(0x45, 0x0F, 0xB7); 1185 else 1186 EMIT2(0x0F, 0xB7); 1187 EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1188 break; 1189 case 32: 1190 /* Emit 'mov eax, eax' to clear upper 32-bits */ 1191 if (is_ereg(dst_reg)) 1192 EMIT1(0x45); 1193 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1194 break; 1195 case 64: 1196 /* nop */ 1197 break; 1198 } 1199 break; 1200 1201 /* speculation barrier */ 1202 case BPF_ST | BPF_NOSPEC: 1203 if (boot_cpu_has(X86_FEATURE_XMM2)) 1204 EMIT_LFENCE(); 1205 break; 1206 1207 /* ST: *(u8*)(dst_reg + off) = imm */ 1208 case BPF_ST | BPF_MEM | BPF_B: 1209 if (is_ereg(dst_reg)) 1210 EMIT2(0x41, 0xC6); 1211 else 1212 EMIT1(0xC6); 1213 goto st; 1214 case BPF_ST | BPF_MEM | BPF_H: 1215 if (is_ereg(dst_reg)) 1216 EMIT3(0x66, 0x41, 0xC7); 1217 else 1218 EMIT2(0x66, 0xC7); 1219 goto st; 1220 case BPF_ST | BPF_MEM | BPF_W: 1221 if (is_ereg(dst_reg)) 1222 EMIT2(0x41, 0xC7); 1223 else 1224 EMIT1(0xC7); 1225 goto st; 1226 case BPF_ST | BPF_MEM | BPF_DW: 1227 EMIT2(add_1mod(0x48, dst_reg), 0xC7); 1228 1229 st: if (is_imm8(insn->off)) 1230 EMIT2(add_1reg(0x40, dst_reg), insn->off); 1231 else 1232 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 1233 1234 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 1235 break; 1236 1237 /* STX: *(u8*)(dst_reg + off) = src_reg */ 1238 case BPF_STX | BPF_MEM | BPF_B: 1239 case BPF_STX | BPF_MEM | BPF_H: 1240 case BPF_STX | BPF_MEM | BPF_W: 1241 case BPF_STX | BPF_MEM | BPF_DW: 1242 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1243 break; 1244 1245 /* LDX: dst_reg = *(u8*)(src_reg + off) */ 1246 case BPF_LDX | BPF_MEM | BPF_B: 1247 case BPF_LDX | BPF_PROBE_MEM | BPF_B: 1248 case BPF_LDX | BPF_MEM | BPF_H: 1249 case BPF_LDX | BPF_PROBE_MEM | BPF_H: 1250 case BPF_LDX | BPF_MEM | BPF_W: 1251 case BPF_LDX | BPF_PROBE_MEM | BPF_W: 1252 case BPF_LDX | BPF_MEM | BPF_DW: 1253 case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 1254 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1255 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1256 * add abs(insn->off) to the limit to make sure that negative 1257 * offset won't be an issue. 1258 * insn->off is s16, so it won't affect valid pointers. 1259 */ 1260 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1261 u8 *end_of_jmp1, *end_of_jmp2; 1262 1263 /* Conservatively check that src_reg + insn->off is a kernel address: 1264 * 1. src_reg + insn->off >= limit 1265 * 2. src_reg + insn->off doesn't become small positive. 1266 * Cannot do src_reg + insn->off >= limit in one branch, 1267 * since it needs two spare registers, but JIT has only one. 1268 */ 1269 1270 /* movabsq r11, limit */ 1271 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1272 EMIT((u32)limit, 4); 1273 EMIT(limit >> 32, 4); 1274 /* cmp src_reg, r11 */ 1275 maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1276 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1277 /* if unsigned '<' goto end_of_jmp2 */ 1278 EMIT2(X86_JB, 0); 1279 end_of_jmp1 = prog; 1280 1281 /* mov r11, src_reg */ 1282 emit_mov_reg(&prog, true, AUX_REG, src_reg); 1283 /* add r11, insn->off */ 1284 maybe_emit_1mod(&prog, AUX_REG, true); 1285 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1286 /* jmp if not carry to start_of_ldx 1287 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1288 * that has to be rejected. 1289 */ 1290 EMIT2(0x73 /* JNC */, 0); 1291 end_of_jmp2 = prog; 1292 1293 /* xor dst_reg, dst_reg */ 1294 emit_mov_imm32(&prog, false, dst_reg, 0); 1295 /* jmp byte_after_ldx */ 1296 EMIT2(0xEB, 0); 1297 1298 /* populate jmp_offset for JB above to jump to xor dst_reg */ 1299 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1300 /* populate jmp_offset for JNC above to jump to start_of_ldx */ 1301 start_of_ldx = prog; 1302 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 1303 } 1304 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 1305 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1306 struct exception_table_entry *ex; 1307 u8 *_insn = image + proglen + (start_of_ldx - temp); 1308 s64 delta; 1309 1310 /* populate jmp_offset for JMP above */ 1311 start_of_ldx[-1] = prog - start_of_ldx; 1312 1313 if (!bpf_prog->aux->extable) 1314 break; 1315 1316 if (excnt >= bpf_prog->aux->num_exentries) { 1317 pr_err("ex gen bug\n"); 1318 return -EFAULT; 1319 } 1320 ex = &bpf_prog->aux->extable[excnt++]; 1321 1322 delta = _insn - (u8 *)&ex->insn; 1323 if (!is_simm32(delta)) { 1324 pr_err("extable->insn doesn't fit into 32-bit\n"); 1325 return -EFAULT; 1326 } 1327 ex->insn = delta; 1328 1329 ex->data = EX_TYPE_BPF; 1330 1331 if (dst_reg > BPF_REG_9) { 1332 pr_err("verifier error\n"); 1333 return -EFAULT; 1334 } 1335 /* 1336 * Compute size of x86 insn and its target dest x86 register. 1337 * ex_handler_bpf() will use lower 8 bits to adjust 1338 * pt_regs->ip to jump over this x86 instruction 1339 * and upper bits to figure out which pt_regs to zero out. 1340 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 1341 * of 4 bytes will be ignored and rbx will be zero inited. 1342 */ 1343 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 1344 } 1345 break; 1346 1347 case BPF_STX | BPF_ATOMIC | BPF_W: 1348 case BPF_STX | BPF_ATOMIC | BPF_DW: 1349 if (insn->imm == (BPF_AND | BPF_FETCH) || 1350 insn->imm == (BPF_OR | BPF_FETCH) || 1351 insn->imm == (BPF_XOR | BPF_FETCH)) { 1352 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1353 u32 real_src_reg = src_reg; 1354 u32 real_dst_reg = dst_reg; 1355 u8 *branch_target; 1356 1357 /* 1358 * Can't be implemented with a single x86 insn. 1359 * Need to do a CMPXCHG loop. 1360 */ 1361 1362 /* Will need RAX as a CMPXCHG operand so save R0 */ 1363 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1364 if (src_reg == BPF_REG_0) 1365 real_src_reg = BPF_REG_AX; 1366 if (dst_reg == BPF_REG_0) 1367 real_dst_reg = BPF_REG_AX; 1368 1369 branch_target = prog; 1370 /* Load old value */ 1371 emit_ldx(&prog, BPF_SIZE(insn->code), 1372 BPF_REG_0, real_dst_reg, insn->off); 1373 /* 1374 * Perform the (commutative) operation locally, 1375 * put the result in the AUX_REG. 1376 */ 1377 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1378 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1379 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1380 add_2reg(0xC0, AUX_REG, real_src_reg)); 1381 /* Attempt to swap in new value */ 1382 err = emit_atomic(&prog, BPF_CMPXCHG, 1383 real_dst_reg, AUX_REG, 1384 insn->off, 1385 BPF_SIZE(insn->code)); 1386 if (WARN_ON(err)) 1387 return err; 1388 /* 1389 * ZF tells us whether we won the race. If it's 1390 * cleared we need to try again. 1391 */ 1392 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1393 /* Return the pre-modification value */ 1394 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1395 /* Restore R0 after clobbering RAX */ 1396 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1397 break; 1398 } 1399 1400 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1401 insn->off, BPF_SIZE(insn->code)); 1402 if (err) 1403 return err; 1404 break; 1405 1406 /* call */ 1407 case BPF_JMP | BPF_CALL: 1408 func = (u8 *) __bpf_call_base + imm32; 1409 if (tail_call_reachable) { 1410 EMIT3_off32(0x48, 0x8B, 0x85, 1411 -(bpf_prog->aux->stack_depth + 8)); 1412 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1413 return -EINVAL; 1414 } else { 1415 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1416 return -EINVAL; 1417 } 1418 break; 1419 1420 case BPF_JMP | BPF_TAIL_CALL: 1421 if (imm32) 1422 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1423 &prog, image + addrs[i - 1], 1424 callee_regs_used, 1425 bpf_prog->aux->stack_depth, 1426 ctx); 1427 else 1428 emit_bpf_tail_call_indirect(&prog, 1429 callee_regs_used, 1430 bpf_prog->aux->stack_depth, 1431 image + addrs[i - 1], 1432 ctx); 1433 break; 1434 1435 /* cond jump */ 1436 case BPF_JMP | BPF_JEQ | BPF_X: 1437 case BPF_JMP | BPF_JNE | BPF_X: 1438 case BPF_JMP | BPF_JGT | BPF_X: 1439 case BPF_JMP | BPF_JLT | BPF_X: 1440 case BPF_JMP | BPF_JGE | BPF_X: 1441 case BPF_JMP | BPF_JLE | BPF_X: 1442 case BPF_JMP | BPF_JSGT | BPF_X: 1443 case BPF_JMP | BPF_JSLT | BPF_X: 1444 case BPF_JMP | BPF_JSGE | BPF_X: 1445 case BPF_JMP | BPF_JSLE | BPF_X: 1446 case BPF_JMP32 | BPF_JEQ | BPF_X: 1447 case BPF_JMP32 | BPF_JNE | BPF_X: 1448 case BPF_JMP32 | BPF_JGT | BPF_X: 1449 case BPF_JMP32 | BPF_JLT | BPF_X: 1450 case BPF_JMP32 | BPF_JGE | BPF_X: 1451 case BPF_JMP32 | BPF_JLE | BPF_X: 1452 case BPF_JMP32 | BPF_JSGT | BPF_X: 1453 case BPF_JMP32 | BPF_JSLT | BPF_X: 1454 case BPF_JMP32 | BPF_JSGE | BPF_X: 1455 case BPF_JMP32 | BPF_JSLE | BPF_X: 1456 /* cmp dst_reg, src_reg */ 1457 maybe_emit_mod(&prog, dst_reg, src_reg, 1458 BPF_CLASS(insn->code) == BPF_JMP); 1459 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 1460 goto emit_cond_jmp; 1461 1462 case BPF_JMP | BPF_JSET | BPF_X: 1463 case BPF_JMP32 | BPF_JSET | BPF_X: 1464 /* test dst_reg, src_reg */ 1465 maybe_emit_mod(&prog, dst_reg, src_reg, 1466 BPF_CLASS(insn->code) == BPF_JMP); 1467 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 1468 goto emit_cond_jmp; 1469 1470 case BPF_JMP | BPF_JSET | BPF_K: 1471 case BPF_JMP32 | BPF_JSET | BPF_K: 1472 /* test dst_reg, imm32 */ 1473 maybe_emit_1mod(&prog, dst_reg, 1474 BPF_CLASS(insn->code) == BPF_JMP); 1475 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 1476 goto emit_cond_jmp; 1477 1478 case BPF_JMP | BPF_JEQ | BPF_K: 1479 case BPF_JMP | BPF_JNE | BPF_K: 1480 case BPF_JMP | BPF_JGT | BPF_K: 1481 case BPF_JMP | BPF_JLT | BPF_K: 1482 case BPF_JMP | BPF_JGE | BPF_K: 1483 case BPF_JMP | BPF_JLE | BPF_K: 1484 case BPF_JMP | BPF_JSGT | BPF_K: 1485 case BPF_JMP | BPF_JSLT | BPF_K: 1486 case BPF_JMP | BPF_JSGE | BPF_K: 1487 case BPF_JMP | BPF_JSLE | BPF_K: 1488 case BPF_JMP32 | BPF_JEQ | BPF_K: 1489 case BPF_JMP32 | BPF_JNE | BPF_K: 1490 case BPF_JMP32 | BPF_JGT | BPF_K: 1491 case BPF_JMP32 | BPF_JLT | BPF_K: 1492 case BPF_JMP32 | BPF_JGE | BPF_K: 1493 case BPF_JMP32 | BPF_JLE | BPF_K: 1494 case BPF_JMP32 | BPF_JSGT | BPF_K: 1495 case BPF_JMP32 | BPF_JSLT | BPF_K: 1496 case BPF_JMP32 | BPF_JSGE | BPF_K: 1497 case BPF_JMP32 | BPF_JSLE | BPF_K: 1498 /* test dst_reg, dst_reg to save one extra byte */ 1499 if (imm32 == 0) { 1500 maybe_emit_mod(&prog, dst_reg, dst_reg, 1501 BPF_CLASS(insn->code) == BPF_JMP); 1502 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 1503 goto emit_cond_jmp; 1504 } 1505 1506 /* cmp dst_reg, imm8/32 */ 1507 maybe_emit_1mod(&prog, dst_reg, 1508 BPF_CLASS(insn->code) == BPF_JMP); 1509 1510 if (is_imm8(imm32)) 1511 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 1512 else 1513 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 1514 1515 emit_cond_jmp: /* Convert BPF opcode to x86 */ 1516 switch (BPF_OP(insn->code)) { 1517 case BPF_JEQ: 1518 jmp_cond = X86_JE; 1519 break; 1520 case BPF_JSET: 1521 case BPF_JNE: 1522 jmp_cond = X86_JNE; 1523 break; 1524 case BPF_JGT: 1525 /* GT is unsigned '>', JA in x86 */ 1526 jmp_cond = X86_JA; 1527 break; 1528 case BPF_JLT: 1529 /* LT is unsigned '<', JB in x86 */ 1530 jmp_cond = X86_JB; 1531 break; 1532 case BPF_JGE: 1533 /* GE is unsigned '>=', JAE in x86 */ 1534 jmp_cond = X86_JAE; 1535 break; 1536 case BPF_JLE: 1537 /* LE is unsigned '<=', JBE in x86 */ 1538 jmp_cond = X86_JBE; 1539 break; 1540 case BPF_JSGT: 1541 /* Signed '>', GT in x86 */ 1542 jmp_cond = X86_JG; 1543 break; 1544 case BPF_JSLT: 1545 /* Signed '<', LT in x86 */ 1546 jmp_cond = X86_JL; 1547 break; 1548 case BPF_JSGE: 1549 /* Signed '>=', GE in x86 */ 1550 jmp_cond = X86_JGE; 1551 break; 1552 case BPF_JSLE: 1553 /* Signed '<=', LE in x86 */ 1554 jmp_cond = X86_JLE; 1555 break; 1556 default: /* to silence GCC warning */ 1557 return -EFAULT; 1558 } 1559 jmp_offset = addrs[i + insn->off] - addrs[i]; 1560 if (is_imm8(jmp_offset)) { 1561 if (jmp_padding) { 1562 /* To keep the jmp_offset valid, the extra bytes are 1563 * padded before the jump insn, so we subtract the 1564 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 1565 * 1566 * If the previous pass already emits an imm8 1567 * jmp_cond, then this BPF insn won't shrink, so 1568 * "nops" is 0. 1569 * 1570 * On the other hand, if the previous pass emits an 1571 * imm32 jmp_cond, the extra 4 bytes(*) is padded to 1572 * keep the image from shrinking further. 1573 * 1574 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 1575 * is 2 bytes, so the size difference is 4 bytes. 1576 */ 1577 nops = INSN_SZ_DIFF - 2; 1578 if (nops != 0 && nops != 4) { 1579 pr_err("unexpected jmp_cond padding: %d bytes\n", 1580 nops); 1581 return -EFAULT; 1582 } 1583 emit_nops(&prog, nops); 1584 } 1585 EMIT2(jmp_cond, jmp_offset); 1586 } else if (is_simm32(jmp_offset)) { 1587 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 1588 } else { 1589 pr_err("cond_jmp gen bug %llx\n", jmp_offset); 1590 return -EFAULT; 1591 } 1592 1593 break; 1594 1595 case BPF_JMP | BPF_JA: 1596 if (insn->off == -1) 1597 /* -1 jmp instructions will always jump 1598 * backwards two bytes. Explicitly handling 1599 * this case avoids wasting too many passes 1600 * when there are long sequences of replaced 1601 * dead code. 1602 */ 1603 jmp_offset = -2; 1604 else 1605 jmp_offset = addrs[i + insn->off] - addrs[i]; 1606 1607 if (!jmp_offset) { 1608 /* 1609 * If jmp_padding is enabled, the extra nops will 1610 * be inserted. Otherwise, optimize out nop jumps. 1611 */ 1612 if (jmp_padding) { 1613 /* There are 3 possible conditions. 1614 * (1) This BPF_JA is already optimized out in 1615 * the previous run, so there is no need 1616 * to pad any extra byte (0 byte). 1617 * (2) The previous pass emits an imm8 jmp, 1618 * so we pad 2 bytes to match the previous 1619 * insn size. 1620 * (3) Similarly, the previous pass emits an 1621 * imm32 jmp, and 5 bytes is padded. 1622 */ 1623 nops = INSN_SZ_DIFF; 1624 if (nops != 0 && nops != 2 && nops != 5) { 1625 pr_err("unexpected nop jump padding: %d bytes\n", 1626 nops); 1627 return -EFAULT; 1628 } 1629 emit_nops(&prog, nops); 1630 } 1631 break; 1632 } 1633 emit_jmp: 1634 if (is_imm8(jmp_offset)) { 1635 if (jmp_padding) { 1636 /* To avoid breaking jmp_offset, the extra bytes 1637 * are padded before the actual jmp insn, so 1638 * 2 bytes is subtracted from INSN_SZ_DIFF. 1639 * 1640 * If the previous pass already emits an imm8 1641 * jmp, there is nothing to pad (0 byte). 1642 * 1643 * If it emits an imm32 jmp (5 bytes) previously 1644 * and now an imm8 jmp (2 bytes), then we pad 1645 * (5 - 2 = 3) bytes to stop the image from 1646 * shrinking further. 1647 */ 1648 nops = INSN_SZ_DIFF - 2; 1649 if (nops != 0 && nops != 3) { 1650 pr_err("unexpected jump padding: %d bytes\n", 1651 nops); 1652 return -EFAULT; 1653 } 1654 emit_nops(&prog, INSN_SZ_DIFF - 2); 1655 } 1656 EMIT2(0xEB, jmp_offset); 1657 } else if (is_simm32(jmp_offset)) { 1658 EMIT1_off32(0xE9, jmp_offset); 1659 } else { 1660 pr_err("jmp gen bug %llx\n", jmp_offset); 1661 return -EFAULT; 1662 } 1663 break; 1664 1665 case BPF_JMP | BPF_EXIT: 1666 if (seen_exit) { 1667 jmp_offset = ctx->cleanup_addr - addrs[i]; 1668 goto emit_jmp; 1669 } 1670 seen_exit = true; 1671 /* Update cleanup_addr */ 1672 ctx->cleanup_addr = proglen; 1673 pop_callee_regs(&prog, callee_regs_used); 1674 EMIT1(0xC9); /* leave */ 1675 EMIT1(0xC3); /* ret */ 1676 break; 1677 1678 default: 1679 /* 1680 * By design x86-64 JIT should support all BPF instructions. 1681 * This error will be seen if new instruction was added 1682 * to the interpreter, but not to the JIT, or if there is 1683 * junk in bpf_prog. 1684 */ 1685 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1686 return -EINVAL; 1687 } 1688 1689 ilen = prog - temp; 1690 if (ilen > BPF_MAX_INSN_SIZE) { 1691 pr_err("bpf_jit: fatal insn size error\n"); 1692 return -EFAULT; 1693 } 1694 1695 if (image) { 1696 /* 1697 * When populating the image, assert that: 1698 * 1699 * i) We do not write beyond the allocated space, and 1700 * ii) addrs[i] did not change from the prior run, in order 1701 * to validate assumptions made for computing branch 1702 * displacements. 1703 */ 1704 if (unlikely(proglen + ilen > oldproglen || 1705 proglen + ilen != addrs[i])) { 1706 pr_err("bpf_jit: fatal error\n"); 1707 return -EFAULT; 1708 } 1709 memcpy(image + proglen, temp, ilen); 1710 } 1711 proglen += ilen; 1712 addrs[i] = proglen; 1713 prog = temp; 1714 } 1715 1716 if (image && excnt != bpf_prog->aux->num_exentries) { 1717 pr_err("extable is not populated\n"); 1718 return -EFAULT; 1719 } 1720 return proglen; 1721 } 1722 1723 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1724 int stack_size) 1725 { 1726 int i; 1727 /* Store function arguments to stack. 1728 * For a function that accepts two pointers the sequence will be: 1729 * mov QWORD PTR [rbp-0x10],rdi 1730 * mov QWORD PTR [rbp-0x8],rsi 1731 */ 1732 for (i = 0; i < min(nr_args, 6); i++) 1733 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1734 BPF_REG_FP, 1735 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1736 -(stack_size - i * 8)); 1737 } 1738 1739 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1740 int stack_size) 1741 { 1742 int i; 1743 1744 /* Restore function arguments from stack. 1745 * For a function that accepts two pointers the sequence will be: 1746 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1747 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1748 */ 1749 for (i = 0; i < min(nr_args, 6); i++) 1750 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1751 i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1752 BPF_REG_FP, 1753 -(stack_size - i * 8)); 1754 } 1755 1756 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1757 struct bpf_prog *p, int stack_size, bool save_ret) 1758 { 1759 u8 *prog = *pprog; 1760 u8 *jmp_insn; 1761 1762 /* arg1: mov rdi, progs[i] */ 1763 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1764 if (emit_call(&prog, 1765 p->aux->sleepable ? __bpf_prog_enter_sleepable : 1766 __bpf_prog_enter, prog)) 1767 return -EINVAL; 1768 /* remember prog start time returned by __bpf_prog_enter */ 1769 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1770 1771 /* if (__bpf_prog_enter*(prog) == 0) 1772 * goto skip_exec_of_prog; 1773 */ 1774 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1775 /* emit 2 nops that will be replaced with JE insn */ 1776 jmp_insn = prog; 1777 emit_nops(&prog, 2); 1778 1779 /* arg1: lea rdi, [rbp - stack_size] */ 1780 EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1781 /* arg2: progs[i]->insnsi for interpreter */ 1782 if (!p->jited) 1783 emit_mov_imm64(&prog, BPF_REG_2, 1784 (long) p->insnsi >> 32, 1785 (u32) (long) p->insnsi); 1786 /* call JITed bpf program or interpreter */ 1787 if (emit_call(&prog, p->bpf_func, prog)) 1788 return -EINVAL; 1789 1790 /* 1791 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1792 * of the previous call which is then passed on the stack to 1793 * the next BPF program. 1794 * 1795 * BPF_TRAMP_FENTRY trampoline may need to return the return 1796 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1797 */ 1798 if (save_ret) 1799 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1800 1801 /* replace 2 nops with JE insn, since jmp target is known */ 1802 jmp_insn[0] = X86_JE; 1803 jmp_insn[1] = prog - jmp_insn - 2; 1804 1805 /* arg1: mov rdi, progs[i] */ 1806 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1807 /* arg2: mov rsi, rbx <- start time in nsec */ 1808 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1809 if (emit_call(&prog, 1810 p->aux->sleepable ? __bpf_prog_exit_sleepable : 1811 __bpf_prog_exit, prog)) 1812 return -EINVAL; 1813 1814 *pprog = prog; 1815 return 0; 1816 } 1817 1818 static void emit_align(u8 **pprog, u32 align) 1819 { 1820 u8 *target, *prog = *pprog; 1821 1822 target = PTR_ALIGN(prog, align); 1823 if (target != prog) 1824 emit_nops(&prog, target - prog); 1825 1826 *pprog = prog; 1827 } 1828 1829 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 1830 { 1831 u8 *prog = *pprog; 1832 s64 offset; 1833 1834 offset = func - (ip + 2 + 4); 1835 if (!is_simm32(offset)) { 1836 pr_err("Target %p is out of range\n", func); 1837 return -EINVAL; 1838 } 1839 EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 1840 *pprog = prog; 1841 return 0; 1842 } 1843 1844 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1845 struct bpf_tramp_progs *tp, int stack_size, 1846 bool save_ret) 1847 { 1848 int i; 1849 u8 *prog = *pprog; 1850 1851 for (i = 0; i < tp->nr_progs; i++) { 1852 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, 1853 save_ret)) 1854 return -EINVAL; 1855 } 1856 *pprog = prog; 1857 return 0; 1858 } 1859 1860 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1861 struct bpf_tramp_progs *tp, int stack_size, 1862 u8 **branches) 1863 { 1864 u8 *prog = *pprog; 1865 int i; 1866 1867 /* The first fmod_ret program will receive a garbage return value. 1868 * Set this to 0 to avoid confusing the program. 1869 */ 1870 emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1871 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1872 for (i = 0; i < tp->nr_progs; i++) { 1873 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) 1874 return -EINVAL; 1875 1876 /* mod_ret prog stored return value into [rbp - 8]. Emit: 1877 * if (*(u64 *)(rbp - 8) != 0) 1878 * goto do_fexit; 1879 */ 1880 /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 1881 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1882 1883 /* Save the location of the branch and Generate 6 nops 1884 * (4 bytes for an offset and 2 bytes for the jump) These nops 1885 * are replaced with a conditional jump once do_fexit (i.e. the 1886 * start of the fexit invocation) is finalized. 1887 */ 1888 branches[i] = prog; 1889 emit_nops(&prog, 4 + 2); 1890 } 1891 1892 *pprog = prog; 1893 return 0; 1894 } 1895 1896 static bool is_valid_bpf_tramp_flags(unsigned int flags) 1897 { 1898 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1899 (flags & BPF_TRAMP_F_SKIP_FRAME)) 1900 return false; 1901 1902 /* 1903 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, 1904 * and it must be used alone. 1905 */ 1906 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && 1907 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) 1908 return false; 1909 1910 return true; 1911 } 1912 1913 /* Example: 1914 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1915 * its 'struct btf_func_model' will be nr_args=2 1916 * The assembly code when eth_type_trans is executing after trampoline: 1917 * 1918 * push rbp 1919 * mov rbp, rsp 1920 * sub rsp, 16 // space for skb and dev 1921 * push rbx // temp regs to pass start time 1922 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1923 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1924 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1925 * mov rbx, rax // remember start time in bpf stats are enabled 1926 * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1927 * call addr_of_jited_FENTRY_prog 1928 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1929 * mov rsi, rbx // prog start time 1930 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1931 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1932 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1933 * pop rbx 1934 * leave 1935 * ret 1936 * 1937 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1938 * replaced with 'call generated_bpf_trampoline'. When it returns 1939 * eth_type_trans will continue executing with original skb and dev pointers. 1940 * 1941 * The assembly code when eth_type_trans is called from trampoline: 1942 * 1943 * push rbp 1944 * mov rbp, rsp 1945 * sub rsp, 24 // space for skb, dev, return value 1946 * push rbx // temp regs to pass start time 1947 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1948 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1949 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1950 * mov rbx, rax // remember start time if bpf stats are enabled 1951 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1952 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1953 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1954 * mov rsi, rbx // prog start time 1955 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1956 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1957 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1958 * call eth_type_trans+5 // execute body of eth_type_trans 1959 * mov qword ptr [rbp - 8], rax // save return value 1960 * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1961 * mov rbx, rax // remember start time in bpf stats are enabled 1962 * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1963 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1964 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1965 * mov rsi, rbx // prog start time 1966 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1967 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1968 * pop rbx 1969 * leave 1970 * add rsp, 8 // skip eth_type_trans's frame 1971 * ret // return to its caller 1972 */ 1973 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 1974 const struct btf_func_model *m, u32 flags, 1975 struct bpf_tramp_progs *tprogs, 1976 void *orig_call) 1977 { 1978 int ret, i, nr_args = m->nr_args; 1979 int regs_off, ip_off, args_off, stack_size = nr_args * 8; 1980 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; 1981 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; 1982 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1983 u8 **branches = NULL; 1984 u8 *prog; 1985 bool save_ret; 1986 1987 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1988 if (nr_args > 6) 1989 return -ENOTSUPP; 1990 1991 if (!is_valid_bpf_tramp_flags(flags)) 1992 return -EINVAL; 1993 1994 /* Generated trampoline stack layout: 1995 * 1996 * RBP + 8 [ return address ] 1997 * RBP + 0 [ RBP ] 1998 * 1999 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 2000 * BPF_TRAMP_F_RET_FENTRY_RET flags 2001 * 2002 * [ reg_argN ] always 2003 * [ ... ] 2004 * RBP - regs_off [ reg_arg1 ] program's ctx pointer 2005 * 2006 * RBP - args_off [ args count ] always 2007 * 2008 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2009 */ 2010 2011 /* room for return value of orig_call or fentry prog */ 2012 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2013 if (save_ret) 2014 stack_size += 8; 2015 2016 regs_off = stack_size; 2017 2018 /* args count */ 2019 stack_size += 8; 2020 args_off = stack_size; 2021 2022 if (flags & BPF_TRAMP_F_IP_ARG) 2023 stack_size += 8; /* room for IP address argument */ 2024 2025 ip_off = stack_size; 2026 2027 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2028 /* skip patched call instruction and point orig_call to actual 2029 * body of the kernel function. 2030 */ 2031 orig_call += X86_PATCH_SIZE; 2032 2033 prog = image; 2034 2035 EMIT1(0x55); /* push rbp */ 2036 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2037 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2038 EMIT1(0x53); /* push rbx */ 2039 2040 /* Store number of arguments of the traced function: 2041 * mov rax, nr_args 2042 * mov QWORD PTR [rbp - args_off], rax 2043 */ 2044 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args); 2045 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2046 2047 if (flags & BPF_TRAMP_F_IP_ARG) { 2048 /* Store IP address of the traced function: 2049 * mov rax, QWORD PTR [rbp + 8] 2050 * sub rax, X86_PATCH_SIZE 2051 * mov QWORD PTR [rbp - ip_off], rax 2052 */ 2053 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 2054 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 2055 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 2056 } 2057 2058 save_regs(m, &prog, nr_args, regs_off); 2059 2060 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2061 /* arg1: mov rdi, im */ 2062 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2063 if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2064 ret = -EINVAL; 2065 goto cleanup; 2066 } 2067 } 2068 2069 if (fentry->nr_progs) 2070 if (invoke_bpf(m, &prog, fentry, regs_off, 2071 flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2072 return -EINVAL; 2073 2074 if (fmod_ret->nr_progs) { 2075 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), 2076 GFP_KERNEL); 2077 if (!branches) 2078 return -ENOMEM; 2079 2080 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2081 branches)) { 2082 ret = -EINVAL; 2083 goto cleanup; 2084 } 2085 } 2086 2087 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2088 restore_regs(m, &prog, nr_args, regs_off); 2089 2090 /* call original function */ 2091 if (emit_call(&prog, orig_call, prog)) { 2092 ret = -EINVAL; 2093 goto cleanup; 2094 } 2095 /* remember return value in a stack for bpf prog to access */ 2096 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2097 im->ip_after_call = prog; 2098 memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2099 prog += X86_PATCH_SIZE; 2100 } 2101 2102 if (fmod_ret->nr_progs) { 2103 /* From Intel 64 and IA-32 Architectures Optimization 2104 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2105 * Coding Rule 11: All branch targets should be 16-byte 2106 * aligned. 2107 */ 2108 emit_align(&prog, 16); 2109 /* Update the branches saved in invoke_bpf_mod_ret with the 2110 * aligned address of do_fexit. 2111 */ 2112 for (i = 0; i < fmod_ret->nr_progs; i++) 2113 emit_cond_near_jump(&branches[i], prog, branches[i], 2114 X86_JNE); 2115 } 2116 2117 if (fexit->nr_progs) 2118 if (invoke_bpf(m, &prog, fexit, regs_off, false)) { 2119 ret = -EINVAL; 2120 goto cleanup; 2121 } 2122 2123 if (flags & BPF_TRAMP_F_RESTORE_REGS) 2124 restore_regs(m, &prog, nr_args, regs_off); 2125 2126 /* This needs to be done regardless. If there were fmod_ret programs, 2127 * the return value is only updated on the stack and still needs to be 2128 * restored to R0. 2129 */ 2130 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2131 im->ip_epilogue = prog; 2132 /* arg1: mov rdi, im */ 2133 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2134 if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2135 ret = -EINVAL; 2136 goto cleanup; 2137 } 2138 } 2139 /* restore return value of orig_call or fentry prog back into RAX */ 2140 if (save_ret) 2141 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2142 2143 EMIT1(0x5B); /* pop rbx */ 2144 EMIT1(0xC9); /* leave */ 2145 if (flags & BPF_TRAMP_F_SKIP_FRAME) 2146 /* skip our return address and return to parent */ 2147 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2148 EMIT1(0xC3); /* ret */ 2149 /* Make sure the trampoline generation logic doesn't overflow */ 2150 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2151 ret = -EFAULT; 2152 goto cleanup; 2153 } 2154 ret = prog - (u8 *)image; 2155 2156 cleanup: 2157 kfree(branches); 2158 return ret; 2159 } 2160 2161 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 2162 { 2163 u8 *jg_reloc, *prog = *pprog; 2164 int pivot, err, jg_bytes = 1; 2165 s64 jg_offset; 2166 2167 if (a == b) { 2168 /* Leaf node of recursion, i.e. not a range of indices 2169 * anymore. 2170 */ 2171 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2172 if (!is_simm32(progs[a])) 2173 return -1; 2174 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 2175 progs[a]); 2176 err = emit_cond_near_jump(&prog, /* je func */ 2177 (void *)progs[a], prog, 2178 X86_JE); 2179 if (err) 2180 return err; 2181 2182 emit_indirect_jump(&prog, 2 /* rdx */, prog); 2183 2184 *pprog = prog; 2185 return 0; 2186 } 2187 2188 /* Not a leaf node, so we pivot, and recursively descend into 2189 * the lower and upper ranges. 2190 */ 2191 pivot = (b - a) / 2; 2192 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 2193 if (!is_simm32(progs[a + pivot])) 2194 return -1; 2195 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 2196 2197 if (pivot > 2) { /* jg upper_part */ 2198 /* Require near jump. */ 2199 jg_bytes = 4; 2200 EMIT2_off32(0x0F, X86_JG + 0x10, 0); 2201 } else { 2202 EMIT2(X86_JG, 0); 2203 } 2204 jg_reloc = prog; 2205 2206 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 2207 progs); 2208 if (err) 2209 return err; 2210 2211 /* From Intel 64 and IA-32 Architectures Optimization 2212 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2213 * Coding Rule 11: All branch targets should be 16-byte 2214 * aligned. 2215 */ 2216 emit_align(&prog, 16); 2217 jg_offset = prog - jg_reloc; 2218 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 2219 2220 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 2221 b, progs); 2222 if (err) 2223 return err; 2224 2225 *pprog = prog; 2226 return 0; 2227 } 2228 2229 static int cmp_ips(const void *a, const void *b) 2230 { 2231 const s64 *ipa = a; 2232 const s64 *ipb = b; 2233 2234 if (*ipa > *ipb) 2235 return 1; 2236 if (*ipa < *ipb) 2237 return -1; 2238 return 0; 2239 } 2240 2241 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 2242 { 2243 u8 *prog = image; 2244 2245 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 2246 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 2247 } 2248 2249 struct x64_jit_data { 2250 struct bpf_binary_header *header; 2251 int *addrs; 2252 u8 *image; 2253 int proglen; 2254 struct jit_context ctx; 2255 }; 2256 2257 #define MAX_PASSES 20 2258 #define PADDING_PASSES (MAX_PASSES - 5) 2259 2260 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 2261 { 2262 struct bpf_binary_header *header = NULL; 2263 struct bpf_prog *tmp, *orig_prog = prog; 2264 struct x64_jit_data *jit_data; 2265 int proglen, oldproglen = 0; 2266 struct jit_context ctx = {}; 2267 bool tmp_blinded = false; 2268 bool extra_pass = false; 2269 bool padding = false; 2270 u8 *image = NULL; 2271 int *addrs; 2272 int pass; 2273 int i; 2274 2275 if (!prog->jit_requested) 2276 return orig_prog; 2277 2278 tmp = bpf_jit_blind_constants(prog); 2279 /* 2280 * If blinding was requested and we failed during blinding, 2281 * we must fall back to the interpreter. 2282 */ 2283 if (IS_ERR(tmp)) 2284 return orig_prog; 2285 if (tmp != prog) { 2286 tmp_blinded = true; 2287 prog = tmp; 2288 } 2289 2290 jit_data = prog->aux->jit_data; 2291 if (!jit_data) { 2292 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 2293 if (!jit_data) { 2294 prog = orig_prog; 2295 goto out; 2296 } 2297 prog->aux->jit_data = jit_data; 2298 } 2299 addrs = jit_data->addrs; 2300 if (addrs) { 2301 ctx = jit_data->ctx; 2302 oldproglen = jit_data->proglen; 2303 image = jit_data->image; 2304 header = jit_data->header; 2305 extra_pass = true; 2306 padding = true; 2307 goto skip_init_addrs; 2308 } 2309 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2310 if (!addrs) { 2311 prog = orig_prog; 2312 goto out_addrs; 2313 } 2314 2315 /* 2316 * Before first pass, make a rough estimation of addrs[] 2317 * each BPF instruction is translated to less than 64 bytes 2318 */ 2319 for (proglen = 0, i = 0; i <= prog->len; i++) { 2320 proglen += 64; 2321 addrs[i] = proglen; 2322 } 2323 ctx.cleanup_addr = proglen; 2324 skip_init_addrs: 2325 2326 /* 2327 * JITed image shrinks with every pass and the loop iterates 2328 * until the image stops shrinking. Very large BPF programs 2329 * may converge on the last pass. In such case do one more 2330 * pass to emit the final image. 2331 */ 2332 for (pass = 0; pass < MAX_PASSES || image; pass++) { 2333 if (!padding && pass >= PADDING_PASSES) 2334 padding = true; 2335 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding); 2336 if (proglen <= 0) { 2337 out_image: 2338 image = NULL; 2339 if (header) 2340 bpf_jit_binary_free(header); 2341 prog = orig_prog; 2342 goto out_addrs; 2343 } 2344 if (image) { 2345 if (proglen != oldproglen) { 2346 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2347 proglen, oldproglen); 2348 goto out_image; 2349 } 2350 break; 2351 } 2352 if (proglen == oldproglen) { 2353 /* 2354 * The number of entries in extable is the number of BPF_LDX 2355 * insns that access kernel memory via "pointer to BTF type". 2356 * The verifier changed their opcode from LDX|MEM|size 2357 * to LDX|PROBE_MEM|size to make JITing easier. 2358 */ 2359 u32 align = __alignof__(struct exception_table_entry); 2360 u32 extable_size = prog->aux->num_exentries * 2361 sizeof(struct exception_table_entry); 2362 2363 /* allocate module memory for x86 insns and extable */ 2364 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, 2365 &image, align, jit_fill_hole); 2366 if (!header) { 2367 prog = orig_prog; 2368 goto out_addrs; 2369 } 2370 prog->aux->extable = (void *) image + roundup(proglen, align); 2371 } 2372 oldproglen = proglen; 2373 cond_resched(); 2374 } 2375 2376 if (bpf_jit_enable > 1) 2377 bpf_jit_dump(prog->len, proglen, pass + 1, image); 2378 2379 if (image) { 2380 if (!prog->is_func || extra_pass) { 2381 bpf_tail_call_direct_fixup(prog); 2382 bpf_jit_binary_lock_ro(header); 2383 } else { 2384 jit_data->addrs = addrs; 2385 jit_data->ctx = ctx; 2386 jit_data->proglen = proglen; 2387 jit_data->image = image; 2388 jit_data->header = header; 2389 } 2390 prog->bpf_func = (void *)image; 2391 prog->jited = 1; 2392 prog->jited_len = proglen; 2393 } else { 2394 prog = orig_prog; 2395 } 2396 2397 if (!image || !prog->is_func || extra_pass) { 2398 if (image) 2399 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2400 out_addrs: 2401 kvfree(addrs); 2402 kfree(jit_data); 2403 prog->aux->jit_data = NULL; 2404 } 2405 out: 2406 if (tmp_blinded) 2407 bpf_jit_prog_release_other(prog, prog == orig_prog ? 2408 tmp : orig_prog); 2409 return prog; 2410 } 2411 2412 bool bpf_jit_supports_kfunc_call(void) 2413 { 2414 return true; 2415 } 2416