1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2a2c7a983SIngo Molnar /* 358ffa1b4SChristoph Hellwig * BPF JIT compiler 40a14842fSEric Dumazet * 53b58908aSEric Dumazet * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 658ffa1b4SChristoph Hellwig * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 70a14842fSEric Dumazet */ 80a14842fSEric Dumazet #include <linux/netdevice.h> 90a14842fSEric Dumazet #include <linux/filter.h> 10855ddb56SEric Dumazet #include <linux/if_vlan.h> 1171d22d58SDaniel Borkmann #include <linux/bpf.h> 125964b200SAlexei Starovoitov #include <linux/memory.h> 1375ccbef6SBjörn Töpel #include <linux/sort.h> 143dec541bSAlexei Starovoitov #include <asm/extable.h> 15d1163651SLaura Abbott #include <asm/set_memory.h> 16a493a87fSDaniel Borkmann #include <asm/nospec-branch.h> 175964b200SAlexei Starovoitov #include <asm/text-patching.h> 180a14842fSEric Dumazet 195cccc702SJoe Perches static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 200a14842fSEric Dumazet { 210a14842fSEric Dumazet if (len == 1) 220a14842fSEric Dumazet *ptr = bytes; 230a14842fSEric Dumazet else if (len == 2) 240a14842fSEric Dumazet *(u16 *)ptr = bytes; 250a14842fSEric Dumazet else { 260a14842fSEric Dumazet *(u32 *)ptr = bytes; 270a14842fSEric Dumazet barrier(); 280a14842fSEric Dumazet } 290a14842fSEric Dumazet return ptr + len; 300a14842fSEric Dumazet } 310a14842fSEric Dumazet 32b52f00e6SAlexei Starovoitov #define EMIT(bytes, len) \ 33ced50fc4SJiri Olsa do { prog = emit_code(prog, bytes, len); } while (0) 340a14842fSEric Dumazet 350a14842fSEric Dumazet #define EMIT1(b1) EMIT(b1, 1) 360a14842fSEric Dumazet #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 370a14842fSEric Dumazet #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 380a14842fSEric Dumazet #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39a2c7a983SIngo Molnar 4062258278SAlexei Starovoitov #define EMIT1_off32(b1, off) \ 4162258278SAlexei Starovoitov do { EMIT1(b1); EMIT(off, 4); } while (0) 4262258278SAlexei Starovoitov #define EMIT2_off32(b1, b2, off) \ 4362258278SAlexei Starovoitov do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 4462258278SAlexei Starovoitov #define EMIT3_off32(b1, b2, b3, off) \ 4562258278SAlexei Starovoitov do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 4662258278SAlexei Starovoitov #define EMIT4_off32(b1, b2, b3, b4, off) \ 4762258278SAlexei Starovoitov do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 480a14842fSEric Dumazet 4958912710SPeter Zijlstra #ifdef CONFIG_X86_KERNEL_IBT 5058912710SPeter Zijlstra #define EMIT_ENDBR() EMIT(gen_endbr(), 4) 5158912710SPeter Zijlstra #else 5258912710SPeter Zijlstra #define EMIT_ENDBR() 5358912710SPeter Zijlstra #endif 5458912710SPeter Zijlstra 555cccc702SJoe Perches static bool is_imm8(int value) 560a14842fSEric Dumazet { 570a14842fSEric Dumazet return value <= 127 && value >= -128; 580a14842fSEric Dumazet } 590a14842fSEric Dumazet 605cccc702SJoe Perches static bool is_simm32(s64 value) 610a14842fSEric Dumazet { 6262258278SAlexei Starovoitov return value == (s64)(s32)value; 630a14842fSEric Dumazet } 640a14842fSEric Dumazet 656fe8b9c1SDaniel Borkmann static bool is_uimm32(u64 value) 666fe8b9c1SDaniel Borkmann { 676fe8b9c1SDaniel Borkmann return value == (u64)(u32)value; 686fe8b9c1SDaniel Borkmann } 696fe8b9c1SDaniel Borkmann 70e430f34eSAlexei Starovoitov /* mov dst, src */ 71e430f34eSAlexei Starovoitov #define EMIT_mov(DST, SRC) \ 72a2c7a983SIngo Molnar do { \ 73a2c7a983SIngo Molnar if (DST != SRC) \ 74e430f34eSAlexei Starovoitov EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 750a14842fSEric Dumazet } while (0) 760a14842fSEric Dumazet 7762258278SAlexei Starovoitov static int bpf_size_to_x86_bytes(int bpf_size) 7862258278SAlexei Starovoitov { 7962258278SAlexei Starovoitov if (bpf_size == BPF_W) 8062258278SAlexei Starovoitov return 4; 8162258278SAlexei Starovoitov else if (bpf_size == BPF_H) 8262258278SAlexei Starovoitov return 2; 8362258278SAlexei Starovoitov else if (bpf_size == BPF_B) 8462258278SAlexei Starovoitov return 1; 8562258278SAlexei Starovoitov else if (bpf_size == BPF_DW) 8662258278SAlexei Starovoitov return 4; /* imm32 */ 8762258278SAlexei Starovoitov else 8862258278SAlexei Starovoitov return 0; 8962258278SAlexei Starovoitov } 9062258278SAlexei Starovoitov 91a2c7a983SIngo Molnar /* 92a2c7a983SIngo Molnar * List of x86 cond jumps opcodes (. + s8) 930a14842fSEric Dumazet * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 940a14842fSEric Dumazet */ 950a14842fSEric Dumazet #define X86_JB 0x72 960a14842fSEric Dumazet #define X86_JAE 0x73 970a14842fSEric Dumazet #define X86_JE 0x74 980a14842fSEric Dumazet #define X86_JNE 0x75 990a14842fSEric Dumazet #define X86_JBE 0x76 1000a14842fSEric Dumazet #define X86_JA 0x77 10152afc51eSDaniel Borkmann #define X86_JL 0x7C 10262258278SAlexei Starovoitov #define X86_JGE 0x7D 10352afc51eSDaniel Borkmann #define X86_JLE 0x7E 10462258278SAlexei Starovoitov #define X86_JG 0x7F 1050a14842fSEric Dumazet 106a2c7a983SIngo Molnar /* Pick a register outside of BPF range for JIT internal work */ 107959a7579SDaniel Borkmann #define AUX_REG (MAX_BPF_JIT_REG + 1) 108fec56f58SAlexei Starovoitov #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 10962258278SAlexei Starovoitov 110a2c7a983SIngo Molnar /* 111a2c7a983SIngo Molnar * The following table maps BPF registers to x86-64 registers. 112959a7579SDaniel Borkmann * 113a2c7a983SIngo Molnar * x86-64 register R12 is unused, since if used as base address 114959a7579SDaniel Borkmann * register in load/store instructions, it always needs an 115959a7579SDaniel Borkmann * extra byte of encoding and is callee saved. 116959a7579SDaniel Borkmann * 117fec56f58SAlexei Starovoitov * x86-64 register R9 is not used by BPF programs, but can be used by BPF 118fec56f58SAlexei Starovoitov * trampoline. x86-64 register R10 is used for blinding (if enabled). 11962258278SAlexei Starovoitov */ 12062258278SAlexei Starovoitov static const int reg2hex[] = { 121a2c7a983SIngo Molnar [BPF_REG_0] = 0, /* RAX */ 122a2c7a983SIngo Molnar [BPF_REG_1] = 7, /* RDI */ 123a2c7a983SIngo Molnar [BPF_REG_2] = 6, /* RSI */ 124a2c7a983SIngo Molnar [BPF_REG_3] = 2, /* RDX */ 125a2c7a983SIngo Molnar [BPF_REG_4] = 1, /* RCX */ 126a2c7a983SIngo Molnar [BPF_REG_5] = 0, /* R8 */ 127a2c7a983SIngo Molnar [BPF_REG_6] = 3, /* RBX callee saved */ 128a2c7a983SIngo Molnar [BPF_REG_7] = 5, /* R13 callee saved */ 129a2c7a983SIngo Molnar [BPF_REG_8] = 6, /* R14 callee saved */ 130a2c7a983SIngo Molnar [BPF_REG_9] = 7, /* R15 callee saved */ 131a2c7a983SIngo Molnar [BPF_REG_FP] = 5, /* RBP readonly */ 132a2c7a983SIngo Molnar [BPF_REG_AX] = 2, /* R10 temp register */ 133a2c7a983SIngo Molnar [AUX_REG] = 3, /* R11 temp register */ 134fec56f58SAlexei Starovoitov [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 13562258278SAlexei Starovoitov }; 13662258278SAlexei Starovoitov 1373dec541bSAlexei Starovoitov static const int reg2pt_regs[] = { 1383dec541bSAlexei Starovoitov [BPF_REG_0] = offsetof(struct pt_regs, ax), 1393dec541bSAlexei Starovoitov [BPF_REG_1] = offsetof(struct pt_regs, di), 1403dec541bSAlexei Starovoitov [BPF_REG_2] = offsetof(struct pt_regs, si), 1413dec541bSAlexei Starovoitov [BPF_REG_3] = offsetof(struct pt_regs, dx), 1423dec541bSAlexei Starovoitov [BPF_REG_4] = offsetof(struct pt_regs, cx), 1433dec541bSAlexei Starovoitov [BPF_REG_5] = offsetof(struct pt_regs, r8), 1443dec541bSAlexei Starovoitov [BPF_REG_6] = offsetof(struct pt_regs, bx), 1453dec541bSAlexei Starovoitov [BPF_REG_7] = offsetof(struct pt_regs, r13), 1463dec541bSAlexei Starovoitov [BPF_REG_8] = offsetof(struct pt_regs, r14), 1473dec541bSAlexei Starovoitov [BPF_REG_9] = offsetof(struct pt_regs, r15), 1483dec541bSAlexei Starovoitov }; 1493dec541bSAlexei Starovoitov 150a2c7a983SIngo Molnar /* 151a2c7a983SIngo Molnar * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 15262258278SAlexei Starovoitov * which need extra byte of encoding. 15362258278SAlexei Starovoitov * rax,rcx,...,rbp have simpler encoding 15462258278SAlexei Starovoitov */ 1555cccc702SJoe Perches static bool is_ereg(u32 reg) 15662258278SAlexei Starovoitov { 157d148134bSJoe Perches return (1 << reg) & (BIT(BPF_REG_5) | 158d148134bSJoe Perches BIT(AUX_REG) | 159d148134bSJoe Perches BIT(BPF_REG_7) | 160d148134bSJoe Perches BIT(BPF_REG_8) | 161959a7579SDaniel Borkmann BIT(BPF_REG_9) | 162fec56f58SAlexei Starovoitov BIT(X86_REG_R9) | 163959a7579SDaniel Borkmann BIT(BPF_REG_AX)); 16462258278SAlexei Starovoitov } 16562258278SAlexei Starovoitov 166aee194b1SLuke Nelson /* 167aee194b1SLuke Nelson * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 168aee194b1SLuke Nelson * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 169aee194b1SLuke Nelson * of encoding. al,cl,dl,bl have simpler encoding. 170aee194b1SLuke Nelson */ 171aee194b1SLuke Nelson static bool is_ereg_8l(u32 reg) 172aee194b1SLuke Nelson { 173aee194b1SLuke Nelson return is_ereg(reg) || 174aee194b1SLuke Nelson (1 << reg) & (BIT(BPF_REG_1) | 175aee194b1SLuke Nelson BIT(BPF_REG_2) | 176aee194b1SLuke Nelson BIT(BPF_REG_FP)); 177aee194b1SLuke Nelson } 178aee194b1SLuke Nelson 179de0a444dSDaniel Borkmann static bool is_axreg(u32 reg) 180de0a444dSDaniel Borkmann { 181de0a444dSDaniel Borkmann return reg == BPF_REG_0; 182de0a444dSDaniel Borkmann } 183de0a444dSDaniel Borkmann 184a2c7a983SIngo Molnar /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 1855cccc702SJoe Perches static u8 add_1mod(u8 byte, u32 reg) 18662258278SAlexei Starovoitov { 18762258278SAlexei Starovoitov if (is_ereg(reg)) 18862258278SAlexei Starovoitov byte |= 1; 18962258278SAlexei Starovoitov return byte; 19062258278SAlexei Starovoitov } 19162258278SAlexei Starovoitov 1925cccc702SJoe Perches static u8 add_2mod(u8 byte, u32 r1, u32 r2) 19362258278SAlexei Starovoitov { 19462258278SAlexei Starovoitov if (is_ereg(r1)) 19562258278SAlexei Starovoitov byte |= 1; 19662258278SAlexei Starovoitov if (is_ereg(r2)) 19762258278SAlexei Starovoitov byte |= 4; 19862258278SAlexei Starovoitov return byte; 19962258278SAlexei Starovoitov } 20062258278SAlexei Starovoitov 201a2c7a983SIngo Molnar /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 2025cccc702SJoe Perches static u8 add_1reg(u8 byte, u32 dst_reg) 20362258278SAlexei Starovoitov { 204e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg]; 20562258278SAlexei Starovoitov } 20662258278SAlexei Starovoitov 207a2c7a983SIngo Molnar /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 2085cccc702SJoe Perches static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 20962258278SAlexei Starovoitov { 210e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 21162258278SAlexei Starovoitov } 21262258278SAlexei Starovoitov 213e5f02cacSBrendan Jackman /* Some 1-byte opcodes for binary ALU operations */ 214e5f02cacSBrendan Jackman static u8 simple_alu_opcodes[] = { 215e5f02cacSBrendan Jackman [BPF_ADD] = 0x01, 216e5f02cacSBrendan Jackman [BPF_SUB] = 0x29, 217e5f02cacSBrendan Jackman [BPF_AND] = 0x21, 218e5f02cacSBrendan Jackman [BPF_OR] = 0x09, 219e5f02cacSBrendan Jackman [BPF_XOR] = 0x31, 220e5f02cacSBrendan Jackman [BPF_LSH] = 0xE0, 221e5f02cacSBrendan Jackman [BPF_RSH] = 0xE8, 222e5f02cacSBrendan Jackman [BPF_ARSH] = 0xF8, 223e5f02cacSBrendan Jackman }; 224e5f02cacSBrendan Jackman 225738cbe72SDaniel Borkmann static void jit_fill_hole(void *area, unsigned int size) 226738cbe72SDaniel Borkmann { 227a2c7a983SIngo Molnar /* Fill whole space with INT3 instructions */ 228738cbe72SDaniel Borkmann memset(area, 0xcc, size); 229738cbe72SDaniel Borkmann } 230738cbe72SDaniel Borkmann 231fe736565SSong Liu int bpf_arch_text_invalidate(void *dst, size_t len) 232fe736565SSong Liu { 233fe736565SSong Liu return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); 234fe736565SSong Liu } 235fe736565SSong Liu 236f3c2af7bSAlexei Starovoitov struct jit_context { 237a2c7a983SIngo Molnar int cleanup_addr; /* Epilogue code offset */ 238dceba081SPeter Zijlstra 239dceba081SPeter Zijlstra /* 240dceba081SPeter Zijlstra * Program specific offsets of labels in the code; these rely on the 241dceba081SPeter Zijlstra * JIT doing at least 2 passes, recording the position on the first 242dceba081SPeter Zijlstra * pass, only to generate the correct offset on the second pass. 243dceba081SPeter Zijlstra */ 244dceba081SPeter Zijlstra int tail_call_direct_label; 245dceba081SPeter Zijlstra int tail_call_indirect_label; 246f3c2af7bSAlexei Starovoitov }; 247f3c2af7bSAlexei Starovoitov 248a2c7a983SIngo Molnar /* Maximum number of bytes emitted while JITing one eBPF insn */ 249e0ee9c12SAlexei Starovoitov #define BPF_MAX_INSN_SIZE 128 250e0ee9c12SAlexei Starovoitov #define BPF_INSN_SAFETY 64 2514b3da77bSDaniel Borkmann 2524b3da77bSDaniel Borkmann /* Number of bytes emit_patch() needs to generate instructions */ 2534b3da77bSDaniel Borkmann #define X86_PATCH_SIZE 5 254ebf7d1f5SMaciej Fijalkowski /* Number of bytes that will be skipped on tailcall */ 25558912710SPeter Zijlstra #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) 256e0ee9c12SAlexei Starovoitov 257ebf7d1f5SMaciej Fijalkowski static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 258ebf7d1f5SMaciej Fijalkowski { 259ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog; 260ebf7d1f5SMaciej Fijalkowski 261ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0]) 262ebf7d1f5SMaciej Fijalkowski EMIT1(0x53); /* push rbx */ 263ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1]) 264ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x55); /* push r13 */ 265ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2]) 266ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x56); /* push r14 */ 267ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3]) 268ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x57); /* push r15 */ 269ebf7d1f5SMaciej Fijalkowski *pprog = prog; 270ebf7d1f5SMaciej Fijalkowski } 271ebf7d1f5SMaciej Fijalkowski 272ebf7d1f5SMaciej Fijalkowski static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 273ebf7d1f5SMaciej Fijalkowski { 274ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog; 275ebf7d1f5SMaciej Fijalkowski 276ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3]) 277ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5F); /* pop r15 */ 278ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2]) 279ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5E); /* pop r14 */ 280ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1]) 281ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5D); /* pop r13 */ 282ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0]) 283ebf7d1f5SMaciej Fijalkowski EMIT1(0x5B); /* pop rbx */ 284ebf7d1f5SMaciej Fijalkowski *pprog = prog; 285ebf7d1f5SMaciej Fijalkowski } 286b52f00e6SAlexei Starovoitov 287a2c7a983SIngo Molnar /* 288ebf7d1f5SMaciej Fijalkowski * Emit x86-64 prologue code for BPF program. 289ebf7d1f5SMaciej Fijalkowski * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 290ebf7d1f5SMaciej Fijalkowski * while jumping to another program 291b52f00e6SAlexei Starovoitov */ 292ebf7d1f5SMaciej Fijalkowski static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 293ebf7d1f5SMaciej Fijalkowski bool tail_call_reachable, bool is_subprog) 2940a14842fSEric Dumazet { 295b52f00e6SAlexei Starovoitov u8 *prog = *pprog; 2960a14842fSEric Dumazet 2979fd4a39dSAlexei Starovoitov /* BPF trampoline can be made to work without these nops, 2989fd4a39dSAlexei Starovoitov * but let's waste 5 bytes for now and optimize later 2999fd4a39dSAlexei Starovoitov */ 30058912710SPeter Zijlstra EMIT_ENDBR(); 301ced50fc4SJiri Olsa memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 302ced50fc4SJiri Olsa prog += X86_PATCH_SIZE; 303ebf7d1f5SMaciej Fijalkowski if (!ebpf_from_cbpf) { 304ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable && !is_subprog) 305ebf7d1f5SMaciej Fijalkowski EMIT2(0x31, 0xC0); /* xor eax, eax */ 306ebf7d1f5SMaciej Fijalkowski else 307ebf7d1f5SMaciej Fijalkowski EMIT2(0x66, 0x90); /* nop2 */ 308ebf7d1f5SMaciej Fijalkowski } 309fe8d9571SAlexei Starovoitov EMIT1(0x55); /* push rbp */ 310fe8d9571SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 31158912710SPeter Zijlstra 31258912710SPeter Zijlstra /* X86_TAIL_CALL_OFFSET is here */ 31358912710SPeter Zijlstra EMIT_ENDBR(); 31458912710SPeter Zijlstra 315fe8d9571SAlexei Starovoitov /* sub rsp, rounded_stack_depth */ 3164d0b8c0bSMaciej Fijalkowski if (stack_depth) 317fe8d9571SAlexei Starovoitov EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 318ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable) 319ebf7d1f5SMaciej Fijalkowski EMIT1(0x50); /* push rax */ 320b52f00e6SAlexei Starovoitov *pprog = prog; 321b52f00e6SAlexei Starovoitov } 322b52f00e6SAlexei Starovoitov 323428d5df1SDaniel Borkmann static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 324428d5df1SDaniel Borkmann { 325428d5df1SDaniel Borkmann u8 *prog = *pprog; 326428d5df1SDaniel Borkmann s64 offset; 327428d5df1SDaniel Borkmann 328428d5df1SDaniel Borkmann offset = func - (ip + X86_PATCH_SIZE); 329428d5df1SDaniel Borkmann if (!is_simm32(offset)) { 330428d5df1SDaniel Borkmann pr_err("Target call %p is out of range\n", func); 331428d5df1SDaniel Borkmann return -ERANGE; 332428d5df1SDaniel Borkmann } 333428d5df1SDaniel Borkmann EMIT1_off32(opcode, offset); 334428d5df1SDaniel Borkmann *pprog = prog; 335428d5df1SDaniel Borkmann return 0; 336428d5df1SDaniel Borkmann } 337428d5df1SDaniel Borkmann 338428d5df1SDaniel Borkmann static int emit_call(u8 **pprog, void *func, void *ip) 339428d5df1SDaniel Borkmann { 340428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE8); 341428d5df1SDaniel Borkmann } 342428d5df1SDaniel Borkmann 343428d5df1SDaniel Borkmann static int emit_jump(u8 **pprog, void *func, void *ip) 344428d5df1SDaniel Borkmann { 345428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE9); 346428d5df1SDaniel Borkmann } 347428d5df1SDaniel Borkmann 348428d5df1SDaniel Borkmann static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 3491022a549SSong Liu void *old_addr, void *new_addr) 350428d5df1SDaniel Borkmann { 351a89dfde3SPeter Zijlstra const u8 *nop_insn = x86_nops[5]; 352b553a6ecSDaniel Borkmann u8 old_insn[X86_PATCH_SIZE]; 353b553a6ecSDaniel Borkmann u8 new_insn[X86_PATCH_SIZE]; 354428d5df1SDaniel Borkmann u8 *prog; 355428d5df1SDaniel Borkmann int ret; 356428d5df1SDaniel Borkmann 357428d5df1SDaniel Borkmann memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 358b553a6ecSDaniel Borkmann if (old_addr) { 359428d5df1SDaniel Borkmann prog = old_insn; 360b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ? 361b553a6ecSDaniel Borkmann emit_call(&prog, old_addr, ip) : 362b553a6ecSDaniel Borkmann emit_jump(&prog, old_addr, ip); 363428d5df1SDaniel Borkmann if (ret) 364428d5df1SDaniel Borkmann return ret; 365428d5df1SDaniel Borkmann } 366b553a6ecSDaniel Borkmann 367428d5df1SDaniel Borkmann memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 368b553a6ecSDaniel Borkmann if (new_addr) { 369b553a6ecSDaniel Borkmann prog = new_insn; 370b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ? 371b553a6ecSDaniel Borkmann emit_call(&prog, new_addr, ip) : 372b553a6ecSDaniel Borkmann emit_jump(&prog, new_addr, ip); 373428d5df1SDaniel Borkmann if (ret) 374428d5df1SDaniel Borkmann return ret; 375428d5df1SDaniel Borkmann } 376428d5df1SDaniel Borkmann 377428d5df1SDaniel Borkmann ret = -EBUSY; 378428d5df1SDaniel Borkmann mutex_lock(&text_mutex); 379428d5df1SDaniel Borkmann if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 380428d5df1SDaniel Borkmann goto out; 381ebf7d1f5SMaciej Fijalkowski ret = 1; 382b553a6ecSDaniel Borkmann if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 383428d5df1SDaniel Borkmann text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 384428d5df1SDaniel Borkmann ret = 0; 385ebf7d1f5SMaciej Fijalkowski } 386428d5df1SDaniel Borkmann out: 387428d5df1SDaniel Borkmann mutex_unlock(&text_mutex); 388428d5df1SDaniel Borkmann return ret; 389428d5df1SDaniel Borkmann } 390428d5df1SDaniel Borkmann 391428d5df1SDaniel Borkmann int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 392428d5df1SDaniel Borkmann void *old_addr, void *new_addr) 393428d5df1SDaniel Borkmann { 394428d5df1SDaniel Borkmann if (!is_kernel_text((long)ip) && 395428d5df1SDaniel Borkmann !is_bpf_text_address((long)ip)) 396428d5df1SDaniel Borkmann /* BPF poking in modules is not supported */ 397428d5df1SDaniel Borkmann return -EINVAL; 398428d5df1SDaniel Borkmann 39958912710SPeter Zijlstra /* 40058912710SPeter Zijlstra * See emit_prologue(), for IBT builds the trampoline hook is preceded 40158912710SPeter Zijlstra * with an ENDBR instruction. 40258912710SPeter Zijlstra */ 40358912710SPeter Zijlstra if (is_endbr(*(u32 *)ip)) 40458912710SPeter Zijlstra ip += ENDBR_INSN_SIZE; 40558912710SPeter Zijlstra 4061022a549SSong Liu return __bpf_arch_text_poke(ip, t, old_addr, new_addr); 407428d5df1SDaniel Borkmann } 408428d5df1SDaniel Borkmann 40987c87ecdSPeter Zijlstra #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 41087c87ecdSPeter Zijlstra 41187c87ecdSPeter Zijlstra static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 41287c87ecdSPeter Zijlstra { 41387c87ecdSPeter Zijlstra u8 *prog = *pprog; 41487c87ecdSPeter Zijlstra 41587c87ecdSPeter Zijlstra #ifdef CONFIG_RETPOLINE 416d45476d9SPeter Zijlstra (Intel) if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 41787c87ecdSPeter Zijlstra EMIT_LFENCE(); 41887c87ecdSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg); 41987c87ecdSPeter Zijlstra } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 420be8a0965SPeter Zijlstra OPTIMIZER_HIDE_VAR(reg); 42187c87ecdSPeter Zijlstra emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 42287c87ecdSPeter Zijlstra } else 42387c87ecdSPeter Zijlstra #endif 42487c87ecdSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg); 42587c87ecdSPeter Zijlstra 42687c87ecdSPeter Zijlstra *pprog = prog; 42787c87ecdSPeter Zijlstra } 42887c87ecdSPeter Zijlstra 429a2c7a983SIngo Molnar /* 430a2c7a983SIngo Molnar * Generate the following code: 431a2c7a983SIngo Molnar * 432b52f00e6SAlexei Starovoitov * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 433b52f00e6SAlexei Starovoitov * if (index >= array->map.max_entries) 434b52f00e6SAlexei Starovoitov * goto out; 435ebf7f6f0STiezhu Yang * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 436b52f00e6SAlexei Starovoitov * goto out; 4372a36f0b9SWang Nan * prog = array->ptrs[index]; 438b52f00e6SAlexei Starovoitov * if (prog == NULL) 439b52f00e6SAlexei Starovoitov * goto out; 440b52f00e6SAlexei Starovoitov * goto *(prog->bpf_func + prologue_size); 441b52f00e6SAlexei Starovoitov * out: 442b52f00e6SAlexei Starovoitov */ 443ebf7d1f5SMaciej Fijalkowski static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 444dceba081SPeter Zijlstra u32 stack_depth, u8 *ip, 445dceba081SPeter Zijlstra struct jit_context *ctx) 446b52f00e6SAlexei Starovoitov { 447ebf7d1f5SMaciej Fijalkowski int tcc_off = -4 - round_up(stack_depth, 8); 448dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog; 449dceba081SPeter Zijlstra int offset; 4504d0b8c0bSMaciej Fijalkowski 451a2c7a983SIngo Molnar /* 452a2c7a983SIngo Molnar * rdi - pointer to ctx 453b52f00e6SAlexei Starovoitov * rsi - pointer to bpf_array 454b52f00e6SAlexei Starovoitov * rdx - index in bpf_array 455b52f00e6SAlexei Starovoitov */ 456b52f00e6SAlexei Starovoitov 457a2c7a983SIngo Molnar /* 458a2c7a983SIngo Molnar * if (index >= array->map.max_entries) 459b52f00e6SAlexei Starovoitov * goto out; 460b52f00e6SAlexei Starovoitov */ 46190caccddSAlexei Starovoitov EMIT2(0x89, 0xD2); /* mov edx, edx */ 46290caccddSAlexei Starovoitov EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 463b52f00e6SAlexei Starovoitov offsetof(struct bpf_array, map.max_entries)); 464dceba081SPeter Zijlstra 465dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 466dceba081SPeter Zijlstra EMIT2(X86_JBE, offset); /* jbe out */ 467b52f00e6SAlexei Starovoitov 468a2c7a983SIngo Molnar /* 469ebf7f6f0STiezhu Yang * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 470b52f00e6SAlexei Starovoitov * goto out; 471b52f00e6SAlexei Starovoitov */ 472ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 473b52f00e6SAlexei Starovoitov EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 474dceba081SPeter Zijlstra 475dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 476ebf7f6f0STiezhu Yang EMIT2(X86_JAE, offset); /* jae out */ 477b52f00e6SAlexei Starovoitov EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 478ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 479b52f00e6SAlexei Starovoitov 4802a36f0b9SWang Nan /* prog = array->ptrs[index]; */ 4810d4ddce3SMaciej Fijalkowski EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 4822a36f0b9SWang Nan offsetof(struct bpf_array, ptrs)); 483b52f00e6SAlexei Starovoitov 484a2c7a983SIngo Molnar /* 485a2c7a983SIngo Molnar * if (prog == NULL) 486b52f00e6SAlexei Starovoitov * goto out; 487b52f00e6SAlexei Starovoitov */ 4880d4ddce3SMaciej Fijalkowski EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 489b52f00e6SAlexei Starovoitov 490dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 491dceba081SPeter Zijlstra EMIT2(X86_JE, offset); /* je out */ 492dceba081SPeter Zijlstra 493dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used); 494ebf7d1f5SMaciej Fijalkowski 495ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */ 4964d0b8c0bSMaciej Fijalkowski if (stack_depth) 497ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 498ebf7d1f5SMaciej Fijalkowski round_up(stack_depth, 8)); 499ebf7d1f5SMaciej Fijalkowski 500ebf7d1f5SMaciej Fijalkowski /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 5010d4ddce3SMaciej Fijalkowski EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 502b52f00e6SAlexei Starovoitov offsetof(struct bpf_prog, bpf_func)); 503ebf7d1f5SMaciej Fijalkowski EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 504ebf7d1f5SMaciej Fijalkowski X86_TAIL_CALL_OFFSET); 505a2c7a983SIngo Molnar /* 5060d4ddce3SMaciej Fijalkowski * Now we're ready to jump into next BPF program 507b52f00e6SAlexei Starovoitov * rdi == ctx (1st arg) 508ebf7d1f5SMaciej Fijalkowski * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 509b52f00e6SAlexei Starovoitov */ 51087c87ecdSPeter Zijlstra emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 511b52f00e6SAlexei Starovoitov 512b52f00e6SAlexei Starovoitov /* out: */ 513dceba081SPeter Zijlstra ctx->tail_call_indirect_label = prog - start; 514b52f00e6SAlexei Starovoitov *pprog = prog; 515b52f00e6SAlexei Starovoitov } 516b52f00e6SAlexei Starovoitov 517428d5df1SDaniel Borkmann static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 518dceba081SPeter Zijlstra u8 **pprog, u8 *ip, 519dceba081SPeter Zijlstra bool *callee_regs_used, u32 stack_depth, 520dceba081SPeter Zijlstra struct jit_context *ctx) 521428d5df1SDaniel Borkmann { 522ebf7d1f5SMaciej Fijalkowski int tcc_off = -4 - round_up(stack_depth, 8); 523dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog; 524dceba081SPeter Zijlstra int offset; 525ebf7d1f5SMaciej Fijalkowski 526428d5df1SDaniel Borkmann /* 527ebf7f6f0STiezhu Yang * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) 528428d5df1SDaniel Borkmann * goto out; 529428d5df1SDaniel Borkmann */ 530ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 531428d5df1SDaniel Borkmann EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 532dceba081SPeter Zijlstra 533dceba081SPeter Zijlstra offset = ctx->tail_call_direct_label - (prog + 2 - start); 534ebf7f6f0STiezhu Yang EMIT2(X86_JAE, offset); /* jae out */ 535428d5df1SDaniel Borkmann EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 536ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 537428d5df1SDaniel Borkmann 538dceba081SPeter Zijlstra poke->tailcall_bypass = ip + (prog - start); 539ebf7d1f5SMaciej Fijalkowski poke->adj_off = X86_TAIL_CALL_OFFSET; 540dceba081SPeter Zijlstra poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 541ebf7d1f5SMaciej Fijalkowski poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 542ebf7d1f5SMaciej Fijalkowski 543ebf7d1f5SMaciej Fijalkowski emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 544ebf7d1f5SMaciej Fijalkowski poke->tailcall_bypass); 545ebf7d1f5SMaciej Fijalkowski 546dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used); 547ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */ 5484d0b8c0bSMaciej Fijalkowski if (stack_depth) 549ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 550428d5df1SDaniel Borkmann 551a89dfde3SPeter Zijlstra memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 552428d5df1SDaniel Borkmann prog += X86_PATCH_SIZE; 553dceba081SPeter Zijlstra 554428d5df1SDaniel Borkmann /* out: */ 555dceba081SPeter Zijlstra ctx->tail_call_direct_label = prog - start; 556428d5df1SDaniel Borkmann 557428d5df1SDaniel Borkmann *pprog = prog; 558428d5df1SDaniel Borkmann } 559428d5df1SDaniel Borkmann 560428d5df1SDaniel Borkmann static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 561428d5df1SDaniel Borkmann { 562428d5df1SDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 563428d5df1SDaniel Borkmann struct bpf_array *array; 564428d5df1SDaniel Borkmann struct bpf_prog *target; 565428d5df1SDaniel Borkmann int i, ret; 566428d5df1SDaniel Borkmann 567428d5df1SDaniel Borkmann for (i = 0; i < prog->aux->size_poke_tab; i++) { 568428d5df1SDaniel Borkmann poke = &prog->aux->poke_tab[i]; 569f263a814SJohn Fastabend if (poke->aux && poke->aux != prog->aux) 570f263a814SJohn Fastabend continue; 571f263a814SJohn Fastabend 572cf71b174SMaciej Fijalkowski WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 573428d5df1SDaniel Borkmann 574428d5df1SDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 575428d5df1SDaniel Borkmann continue; 576428d5df1SDaniel Borkmann 577428d5df1SDaniel Borkmann array = container_of(poke->tail_call.map, struct bpf_array, map); 578428d5df1SDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 579428d5df1SDaniel Borkmann target = array->ptrs[poke->tail_call.key]; 580428d5df1SDaniel Borkmann if (target) { 581cf71b174SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_target, 582cf71b174SMaciej Fijalkowski BPF_MOD_JUMP, NULL, 583428d5df1SDaniel Borkmann (u8 *)target->bpf_func + 5841022a549SSong Liu poke->adj_off); 585428d5df1SDaniel Borkmann BUG_ON(ret < 0); 586ebf7d1f5SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_bypass, 587ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 588ebf7d1f5SMaciej Fijalkowski (u8 *)poke->tailcall_target + 5891022a549SSong Liu X86_PATCH_SIZE, NULL); 590ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0); 591428d5df1SDaniel Borkmann } 592cf71b174SMaciej Fijalkowski WRITE_ONCE(poke->tailcall_target_stable, true); 593428d5df1SDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 594428d5df1SDaniel Borkmann } 595428d5df1SDaniel Borkmann } 596428d5df1SDaniel Borkmann 5976fe8b9c1SDaniel Borkmann static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 5986fe8b9c1SDaniel Borkmann u32 dst_reg, const u32 imm32) 5996fe8b9c1SDaniel Borkmann { 6006fe8b9c1SDaniel Borkmann u8 *prog = *pprog; 6016fe8b9c1SDaniel Borkmann u8 b1, b2, b3; 6026fe8b9c1SDaniel Borkmann 603a2c7a983SIngo Molnar /* 604a2c7a983SIngo Molnar * Optimization: if imm32 is positive, use 'mov %eax, imm32' 6056fe8b9c1SDaniel Borkmann * (which zero-extends imm32) to save 2 bytes. 6066fe8b9c1SDaniel Borkmann */ 6076fe8b9c1SDaniel Borkmann if (sign_propagate && (s32)imm32 < 0) { 6086fe8b9c1SDaniel Borkmann /* 'mov %rax, imm32' sign extends imm32 */ 6096fe8b9c1SDaniel Borkmann b1 = add_1mod(0x48, dst_reg); 6106fe8b9c1SDaniel Borkmann b2 = 0xC7; 6116fe8b9c1SDaniel Borkmann b3 = 0xC0; 6126fe8b9c1SDaniel Borkmann EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 6136fe8b9c1SDaniel Borkmann goto done; 6146fe8b9c1SDaniel Borkmann } 6156fe8b9c1SDaniel Borkmann 616a2c7a983SIngo Molnar /* 617a2c7a983SIngo Molnar * Optimization: if imm32 is zero, use 'xor %eax, %eax' 6186fe8b9c1SDaniel Borkmann * to save 3 bytes. 6196fe8b9c1SDaniel Borkmann */ 6206fe8b9c1SDaniel Borkmann if (imm32 == 0) { 6216fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg)) 6226fe8b9c1SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 6236fe8b9c1SDaniel Borkmann b2 = 0x31; /* xor */ 6246fe8b9c1SDaniel Borkmann b3 = 0xC0; 6256fe8b9c1SDaniel Borkmann EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 6266fe8b9c1SDaniel Borkmann goto done; 6276fe8b9c1SDaniel Borkmann } 6286fe8b9c1SDaniel Borkmann 6296fe8b9c1SDaniel Borkmann /* mov %eax, imm32 */ 6306fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg)) 6316fe8b9c1SDaniel Borkmann EMIT1(add_1mod(0x40, dst_reg)); 6326fe8b9c1SDaniel Borkmann EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 6336fe8b9c1SDaniel Borkmann done: 6346fe8b9c1SDaniel Borkmann *pprog = prog; 6356fe8b9c1SDaniel Borkmann } 6366fe8b9c1SDaniel Borkmann 6376fe8b9c1SDaniel Borkmann static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 6386fe8b9c1SDaniel Borkmann const u32 imm32_hi, const u32 imm32_lo) 6396fe8b9c1SDaniel Borkmann { 6406fe8b9c1SDaniel Borkmann u8 *prog = *pprog; 6416fe8b9c1SDaniel Borkmann 6426fe8b9c1SDaniel Borkmann if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 643a2c7a983SIngo Molnar /* 644a2c7a983SIngo Molnar * For emitting plain u32, where sign bit must not be 6456fe8b9c1SDaniel Borkmann * propagated LLVM tends to load imm64 over mov32 6466fe8b9c1SDaniel Borkmann * directly, so save couple of bytes by just doing 6476fe8b9c1SDaniel Borkmann * 'mov %eax, imm32' instead. 6486fe8b9c1SDaniel Borkmann */ 6496fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 6506fe8b9c1SDaniel Borkmann } else { 6516fe8b9c1SDaniel Borkmann /* movabsq %rax, imm64 */ 6526fe8b9c1SDaniel Borkmann EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 6536fe8b9c1SDaniel Borkmann EMIT(imm32_lo, 4); 6546fe8b9c1SDaniel Borkmann EMIT(imm32_hi, 4); 6556fe8b9c1SDaniel Borkmann } 6566fe8b9c1SDaniel Borkmann 6576fe8b9c1SDaniel Borkmann *pprog = prog; 6586fe8b9c1SDaniel Borkmann } 6596fe8b9c1SDaniel Borkmann 6604c38e2f3SDaniel Borkmann static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 6614c38e2f3SDaniel Borkmann { 6624c38e2f3SDaniel Borkmann u8 *prog = *pprog; 6634c38e2f3SDaniel Borkmann 6644c38e2f3SDaniel Borkmann if (is64) { 6654c38e2f3SDaniel Borkmann /* mov dst, src */ 6664c38e2f3SDaniel Borkmann EMIT_mov(dst_reg, src_reg); 6674c38e2f3SDaniel Borkmann } else { 6684c38e2f3SDaniel Borkmann /* mov32 dst, src */ 6694c38e2f3SDaniel Borkmann if (is_ereg(dst_reg) || is_ereg(src_reg)) 6704c38e2f3SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, src_reg)); 6714c38e2f3SDaniel Borkmann EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 6724c38e2f3SDaniel Borkmann } 6734c38e2f3SDaniel Borkmann 6744c38e2f3SDaniel Borkmann *pprog = prog; 6754c38e2f3SDaniel Borkmann } 6764c38e2f3SDaniel Borkmann 67711c11d07SBrendan Jackman /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 67811c11d07SBrendan Jackman static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 67911c11d07SBrendan Jackman { 68011c11d07SBrendan Jackman u8 *prog = *pprog; 68111c11d07SBrendan Jackman 68211c11d07SBrendan Jackman if (is_imm8(off)) { 68311c11d07SBrendan Jackman /* 1-byte signed displacement. 68411c11d07SBrendan Jackman * 68511c11d07SBrendan Jackman * If off == 0 we could skip this and save one extra byte, but 68611c11d07SBrendan Jackman * special case of x86 R13 which always needs an offset is not 68711c11d07SBrendan Jackman * worth the hassle 68811c11d07SBrendan Jackman */ 68911c11d07SBrendan Jackman EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 69011c11d07SBrendan Jackman } else { 69111c11d07SBrendan Jackman /* 4-byte signed displacement */ 69211c11d07SBrendan Jackman EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 69311c11d07SBrendan Jackman } 69411c11d07SBrendan Jackman *pprog = prog; 69511c11d07SBrendan Jackman } 69611c11d07SBrendan Jackman 69774007cfcSBrendan Jackman /* 69874007cfcSBrendan Jackman * Emit a REX byte if it will be necessary to address these registers 69974007cfcSBrendan Jackman */ 70074007cfcSBrendan Jackman static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 70174007cfcSBrendan Jackman { 70274007cfcSBrendan Jackman u8 *prog = *pprog; 70374007cfcSBrendan Jackman 70474007cfcSBrendan Jackman if (is64) 70574007cfcSBrendan Jackman EMIT1(add_2mod(0x48, dst_reg, src_reg)); 70674007cfcSBrendan Jackman else if (is_ereg(dst_reg) || is_ereg(src_reg)) 70774007cfcSBrendan Jackman EMIT1(add_2mod(0x40, dst_reg, src_reg)); 70874007cfcSBrendan Jackman *pprog = prog; 70974007cfcSBrendan Jackman } 71074007cfcSBrendan Jackman 7116364d7d7SJie Meng /* 7126364d7d7SJie Meng * Similar version of maybe_emit_mod() for a single register 7136364d7d7SJie Meng */ 7146364d7d7SJie Meng static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 7156364d7d7SJie Meng { 7166364d7d7SJie Meng u8 *prog = *pprog; 7176364d7d7SJie Meng 7186364d7d7SJie Meng if (is64) 7196364d7d7SJie Meng EMIT1(add_1mod(0x48, reg)); 7206364d7d7SJie Meng else if (is_ereg(reg)) 7216364d7d7SJie Meng EMIT1(add_1mod(0x40, reg)); 7226364d7d7SJie Meng *pprog = prog; 7236364d7d7SJie Meng } 7246364d7d7SJie Meng 7253b2744e6SAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */ 7263b2744e6SAlexei Starovoitov static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 7273b2744e6SAlexei Starovoitov { 7283b2744e6SAlexei Starovoitov u8 *prog = *pprog; 7293b2744e6SAlexei Starovoitov 7303b2744e6SAlexei Starovoitov switch (size) { 7313b2744e6SAlexei Starovoitov case BPF_B: 7323b2744e6SAlexei Starovoitov /* Emit 'movzx rax, byte ptr [rax + off]' */ 7333b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 7343b2744e6SAlexei Starovoitov break; 7353b2744e6SAlexei Starovoitov case BPF_H: 7363b2744e6SAlexei Starovoitov /* Emit 'movzx rax, word ptr [rax + off]' */ 7373b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 7383b2744e6SAlexei Starovoitov break; 7393b2744e6SAlexei Starovoitov case BPF_W: 7403b2744e6SAlexei Starovoitov /* Emit 'mov eax, dword ptr [rax+0x14]' */ 7413b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7423b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 7433b2744e6SAlexei Starovoitov else 7443b2744e6SAlexei Starovoitov EMIT1(0x8B); 7453b2744e6SAlexei Starovoitov break; 7463b2744e6SAlexei Starovoitov case BPF_DW: 7473b2744e6SAlexei Starovoitov /* Emit 'mov rax, qword ptr [rax+0x14]' */ 7483b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 7493b2744e6SAlexei Starovoitov break; 7503b2744e6SAlexei Starovoitov } 75111c11d07SBrendan Jackman emit_insn_suffix(&prog, src_reg, dst_reg, off); 7523b2744e6SAlexei Starovoitov *pprog = prog; 7533b2744e6SAlexei Starovoitov } 7543b2744e6SAlexei Starovoitov 7553b2744e6SAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */ 7563b2744e6SAlexei Starovoitov static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 7573b2744e6SAlexei Starovoitov { 7583b2744e6SAlexei Starovoitov u8 *prog = *pprog; 7593b2744e6SAlexei Starovoitov 7603b2744e6SAlexei Starovoitov switch (size) { 7613b2744e6SAlexei Starovoitov case BPF_B: 7623b2744e6SAlexei Starovoitov /* Emit 'mov byte ptr [rax + off], al' */ 763aee194b1SLuke Nelson if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 764aee194b1SLuke Nelson /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 7653b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 7663b2744e6SAlexei Starovoitov else 7673b2744e6SAlexei Starovoitov EMIT1(0x88); 7683b2744e6SAlexei Starovoitov break; 7693b2744e6SAlexei Starovoitov case BPF_H: 7703b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7713b2744e6SAlexei Starovoitov EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 7723b2744e6SAlexei Starovoitov else 7733b2744e6SAlexei Starovoitov EMIT2(0x66, 0x89); 7743b2744e6SAlexei Starovoitov break; 7753b2744e6SAlexei Starovoitov case BPF_W: 7763b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7773b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 7783b2744e6SAlexei Starovoitov else 7793b2744e6SAlexei Starovoitov EMIT1(0x89); 7803b2744e6SAlexei Starovoitov break; 7813b2744e6SAlexei Starovoitov case BPF_DW: 7823b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 7833b2744e6SAlexei Starovoitov break; 7843b2744e6SAlexei Starovoitov } 78511c11d07SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off); 7863b2744e6SAlexei Starovoitov *pprog = prog; 7873b2744e6SAlexei Starovoitov } 7883b2744e6SAlexei Starovoitov 78991c960b0SBrendan Jackman static int emit_atomic(u8 **pprog, u8 atomic_op, 79091c960b0SBrendan Jackman u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 79191c960b0SBrendan Jackman { 79291c960b0SBrendan Jackman u8 *prog = *pprog; 79391c960b0SBrendan Jackman 79491c960b0SBrendan Jackman EMIT1(0xF0); /* lock prefix */ 79591c960b0SBrendan Jackman 79691c960b0SBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 79791c960b0SBrendan Jackman 79891c960b0SBrendan Jackman /* emit opcode */ 79991c960b0SBrendan Jackman switch (atomic_op) { 80091c960b0SBrendan Jackman case BPF_ADD: 801981f94c3SBrendan Jackman case BPF_AND: 802981f94c3SBrendan Jackman case BPF_OR: 803981f94c3SBrendan Jackman case BPF_XOR: 80491c960b0SBrendan Jackman /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 80591c960b0SBrendan Jackman EMIT1(simple_alu_opcodes[atomic_op]); 80691c960b0SBrendan Jackman break; 8075ca419f2SBrendan Jackman case BPF_ADD | BPF_FETCH: 8085ca419f2SBrendan Jackman /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 8095ca419f2SBrendan Jackman EMIT2(0x0F, 0xC1); 8105ca419f2SBrendan Jackman break; 8115ffa2550SBrendan Jackman case BPF_XCHG: 8125ffa2550SBrendan Jackman /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 8135ffa2550SBrendan Jackman EMIT1(0x87); 8145ffa2550SBrendan Jackman break; 8155ffa2550SBrendan Jackman case BPF_CMPXCHG: 8165ffa2550SBrendan Jackman /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 8175ffa2550SBrendan Jackman EMIT2(0x0F, 0xB1); 8185ffa2550SBrendan Jackman break; 81991c960b0SBrendan Jackman default: 82091c960b0SBrendan Jackman pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 82191c960b0SBrendan Jackman return -EFAULT; 82291c960b0SBrendan Jackman } 82391c960b0SBrendan Jackman 82491c960b0SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off); 82591c960b0SBrendan Jackman 82691c960b0SBrendan Jackman *pprog = prog; 82791c960b0SBrendan Jackman return 0; 82891c960b0SBrendan Jackman } 82991c960b0SBrendan Jackman 83046d28947SThomas Gleixner bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 8313dec541bSAlexei Starovoitov { 8323dec541bSAlexei Starovoitov u32 reg = x->fixup >> 8; 8333dec541bSAlexei Starovoitov 8343dec541bSAlexei Starovoitov /* jump over faulting load and clear dest register */ 8353dec541bSAlexei Starovoitov *(unsigned long *)((void *)regs + reg) = 0; 8363dec541bSAlexei Starovoitov regs->ip += x->fixup & 0xff; 8373dec541bSAlexei Starovoitov return true; 8383dec541bSAlexei Starovoitov } 8393dec541bSAlexei Starovoitov 840ebf7d1f5SMaciej Fijalkowski static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 841ebf7d1f5SMaciej Fijalkowski bool *regs_used, bool *tail_call_seen) 842ebf7d1f5SMaciej Fijalkowski { 843ebf7d1f5SMaciej Fijalkowski int i; 844ebf7d1f5SMaciej Fijalkowski 845ebf7d1f5SMaciej Fijalkowski for (i = 1; i <= insn_cnt; i++, insn++) { 846ebf7d1f5SMaciej Fijalkowski if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 847ebf7d1f5SMaciej Fijalkowski *tail_call_seen = true; 848ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 849ebf7d1f5SMaciej Fijalkowski regs_used[0] = true; 850ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 851ebf7d1f5SMaciej Fijalkowski regs_used[1] = true; 852ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 853ebf7d1f5SMaciej Fijalkowski regs_used[2] = true; 854ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 855ebf7d1f5SMaciej Fijalkowski regs_used[3] = true; 856ebf7d1f5SMaciej Fijalkowski } 857ebf7d1f5SMaciej Fijalkowski } 858ebf7d1f5SMaciej Fijalkowski 859ced50fc4SJiri Olsa static void emit_nops(u8 **pprog, int len) 86093c5aeccSGary Lin { 86193c5aeccSGary Lin u8 *prog = *pprog; 862ced50fc4SJiri Olsa int i, noplen; 86393c5aeccSGary Lin 86493c5aeccSGary Lin while (len > 0) { 86593c5aeccSGary Lin noplen = len; 86693c5aeccSGary Lin 86793c5aeccSGary Lin if (noplen > ASM_NOP_MAX) 86893c5aeccSGary Lin noplen = ASM_NOP_MAX; 86993c5aeccSGary Lin 87093c5aeccSGary Lin for (i = 0; i < noplen; i++) 871a89dfde3SPeter Zijlstra EMIT1(x86_nops[noplen][i]); 87293c5aeccSGary Lin len -= noplen; 87393c5aeccSGary Lin } 87493c5aeccSGary Lin 87593c5aeccSGary Lin *pprog = prog; 87693c5aeccSGary Lin } 87793c5aeccSGary Lin 87893c5aeccSGary Lin #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 87993c5aeccSGary Lin 8801022a549SSong Liu static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, 88193c5aeccSGary Lin int oldproglen, struct jit_context *ctx, bool jmp_padding) 882b52f00e6SAlexei Starovoitov { 883ebf7d1f5SMaciej Fijalkowski bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 884b52f00e6SAlexei Starovoitov struct bpf_insn *insn = bpf_prog->insnsi; 885ebf7d1f5SMaciej Fijalkowski bool callee_regs_used[4] = {}; 886b52f00e6SAlexei Starovoitov int insn_cnt = bpf_prog->len; 887ebf7d1f5SMaciej Fijalkowski bool tail_call_seen = false; 888b52f00e6SAlexei Starovoitov bool seen_exit = false; 889b52f00e6SAlexei Starovoitov u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 890ced50fc4SJiri Olsa int i, excnt = 0; 89193c5aeccSGary Lin int ilen, proglen = 0; 892b52f00e6SAlexei Starovoitov u8 *prog = temp; 89391c960b0SBrendan Jackman int err; 894b52f00e6SAlexei Starovoitov 895ebf7d1f5SMaciej Fijalkowski detect_reg_usage(insn, insn_cnt, callee_regs_used, 896ebf7d1f5SMaciej Fijalkowski &tail_call_seen); 897ebf7d1f5SMaciej Fijalkowski 898ebf7d1f5SMaciej Fijalkowski /* tail call's presence in current prog implies it is reachable */ 899ebf7d1f5SMaciej Fijalkowski tail_call_reachable |= tail_call_seen; 900ebf7d1f5SMaciej Fijalkowski 90108691752SDaniel Borkmann emit_prologue(&prog, bpf_prog->aux->stack_depth, 902ebf7d1f5SMaciej Fijalkowski bpf_prog_was_classic(bpf_prog), tail_call_reachable, 903ebf7d1f5SMaciej Fijalkowski bpf_prog->aux->func_idx != 0); 904ebf7d1f5SMaciej Fijalkowski push_callee_regs(&prog, callee_regs_used); 90593c5aeccSGary Lin 90693c5aeccSGary Lin ilen = prog - temp; 9071022a549SSong Liu if (rw_image) 9081022a549SSong Liu memcpy(rw_image + proglen, temp, ilen); 90993c5aeccSGary Lin proglen += ilen; 91093c5aeccSGary Lin addrs[0] = proglen; 91193c5aeccSGary Lin prog = temp; 912b52f00e6SAlexei Starovoitov 9137c2e988fSAlexei Starovoitov for (i = 1; i <= insn_cnt; i++, insn++) { 914e430f34eSAlexei Starovoitov const s32 imm32 = insn->imm; 915e430f34eSAlexei Starovoitov u32 dst_reg = insn->dst_reg; 916e430f34eSAlexei Starovoitov u32 src_reg = insn->src_reg; 9176fe8b9c1SDaniel Borkmann u8 b2 = 0, b3 = 0; 9184c5de127SAlexei Starovoitov u8 *start_of_ldx; 91962258278SAlexei Starovoitov s64 jmp_offset; 92062258278SAlexei Starovoitov u8 jmp_cond; 92162258278SAlexei Starovoitov u8 *func; 92293c5aeccSGary Lin int nops; 92362258278SAlexei Starovoitov 92462258278SAlexei Starovoitov switch (insn->code) { 92562258278SAlexei Starovoitov /* ALU */ 92662258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_X: 92762258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_X: 92862258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_X: 92962258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_X: 93062258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_X: 93162258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_X: 93262258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_X: 93362258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_X: 93462258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_X: 93562258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_X: 93674007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 93774007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_ALU64); 938e5f02cacSBrendan Jackman b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 939e430f34eSAlexei Starovoitov EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 9400a14842fSEric Dumazet break; 94162258278SAlexei Starovoitov 94262258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_X: 94362258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_X: 9444c38e2f3SDaniel Borkmann emit_mov_reg(&prog, 9454c38e2f3SDaniel Borkmann BPF_CLASS(insn->code) == BPF_ALU64, 9464c38e2f3SDaniel Borkmann dst_reg, src_reg); 94762258278SAlexei Starovoitov break; 94862258278SAlexei Starovoitov 949e430f34eSAlexei Starovoitov /* neg dst */ 95062258278SAlexei Starovoitov case BPF_ALU | BPF_NEG: 95162258278SAlexei Starovoitov case BPF_ALU64 | BPF_NEG: 9526364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 9536364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 954e430f34eSAlexei Starovoitov EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 95562258278SAlexei Starovoitov break; 95662258278SAlexei Starovoitov 95762258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_K: 95862258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_K: 95962258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_K: 96062258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_K: 96162258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_K: 96262258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_K: 96362258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_K: 96462258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_K: 96562258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_K: 96662258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_K: 9676364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 9686364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 96962258278SAlexei Starovoitov 970a2c7a983SIngo Molnar /* 971a2c7a983SIngo Molnar * b3 holds 'normal' opcode, b2 short form only valid 972de0a444dSDaniel Borkmann * in case dst is eax/rax. 973de0a444dSDaniel Borkmann */ 97462258278SAlexei Starovoitov switch (BPF_OP(insn->code)) { 975de0a444dSDaniel Borkmann case BPF_ADD: 976de0a444dSDaniel Borkmann b3 = 0xC0; 977de0a444dSDaniel Borkmann b2 = 0x05; 978de0a444dSDaniel Borkmann break; 979de0a444dSDaniel Borkmann case BPF_SUB: 980de0a444dSDaniel Borkmann b3 = 0xE8; 981de0a444dSDaniel Borkmann b2 = 0x2D; 982de0a444dSDaniel Borkmann break; 983de0a444dSDaniel Borkmann case BPF_AND: 984de0a444dSDaniel Borkmann b3 = 0xE0; 985de0a444dSDaniel Borkmann b2 = 0x25; 986de0a444dSDaniel Borkmann break; 987de0a444dSDaniel Borkmann case BPF_OR: 988de0a444dSDaniel Borkmann b3 = 0xC8; 989de0a444dSDaniel Borkmann b2 = 0x0D; 990de0a444dSDaniel Borkmann break; 991de0a444dSDaniel Borkmann case BPF_XOR: 992de0a444dSDaniel Borkmann b3 = 0xF0; 993de0a444dSDaniel Borkmann b2 = 0x35; 994de0a444dSDaniel Borkmann break; 99562258278SAlexei Starovoitov } 99662258278SAlexei Starovoitov 997e430f34eSAlexei Starovoitov if (is_imm8(imm32)) 998e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 999de0a444dSDaniel Borkmann else if (is_axreg(dst_reg)) 1000de0a444dSDaniel Borkmann EMIT1_off32(b2, imm32); 100162258278SAlexei Starovoitov else 1002e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 100362258278SAlexei Starovoitov break; 100462258278SAlexei Starovoitov 100562258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_K: 100662258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_K: 10076fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 10086fe8b9c1SDaniel Borkmann dst_reg, imm32); 100962258278SAlexei Starovoitov break; 101062258278SAlexei Starovoitov 101102ab695bSAlexei Starovoitov case BPF_LD | BPF_IMM | BPF_DW: 10126fe8b9c1SDaniel Borkmann emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 101302ab695bSAlexei Starovoitov insn++; 101402ab695bSAlexei Starovoitov i++; 101502ab695bSAlexei Starovoitov break; 101602ab695bSAlexei Starovoitov 1017e430f34eSAlexei Starovoitov /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 101862258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_X: 101962258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_X: 102062258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_K: 102162258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_K: 102262258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_X: 102362258278SAlexei Starovoitov case BPF_ALU64 | BPF_DIV | BPF_X: 102462258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_K: 102557a610f1SJie Meng case BPF_ALU64 | BPF_DIV | BPF_K: { 10264c38e2f3SDaniel Borkmann bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 10274c38e2f3SDaniel Borkmann 1028d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_0) 102962258278SAlexei Starovoitov EMIT1(0x50); /* push rax */ 1030d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3) 103162258278SAlexei Starovoitov EMIT1(0x52); /* push rdx */ 103262258278SAlexei Starovoitov 103357a610f1SJie Meng if (BPF_SRC(insn->code) == BPF_X) { 103457a610f1SJie Meng if (src_reg == BPF_REG_0 || 103557a610f1SJie Meng src_reg == BPF_REG_3) { 103662258278SAlexei Starovoitov /* mov r11, src_reg */ 103762258278SAlexei Starovoitov EMIT_mov(AUX_REG, src_reg); 103857a610f1SJie Meng src_reg = AUX_REG; 103957a610f1SJie Meng } 104057a610f1SJie Meng } else { 104162258278SAlexei Starovoitov /* mov r11, imm32 */ 104262258278SAlexei Starovoitov EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 104357a610f1SJie Meng src_reg = AUX_REG; 104457a610f1SJie Meng } 104562258278SAlexei Starovoitov 104657a610f1SJie Meng if (dst_reg != BPF_REG_0) 104762258278SAlexei Starovoitov /* mov rax, dst_reg */ 104857a610f1SJie Meng emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 104962258278SAlexei Starovoitov 105062258278SAlexei Starovoitov /* 105162258278SAlexei Starovoitov * xor edx, edx 105262258278SAlexei Starovoitov * equivalent to 'xor rdx, rdx', but one byte less 105362258278SAlexei Starovoitov */ 105462258278SAlexei Starovoitov EMIT2(0x31, 0xd2); 105562258278SAlexei Starovoitov 105657a610f1SJie Meng /* div src_reg */ 10576364d7d7SJie Meng maybe_emit_1mod(&prog, src_reg, is64); 105857a610f1SJie Meng EMIT2(0xF7, add_1reg(0xF0, src_reg)); 105962258278SAlexei Starovoitov 106057a610f1SJie Meng if (BPF_OP(insn->code) == BPF_MOD && 106157a610f1SJie Meng dst_reg != BPF_REG_3) 106257a610f1SJie Meng /* mov dst_reg, rdx */ 106357a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 106457a610f1SJie Meng else if (BPF_OP(insn->code) == BPF_DIV && 106557a610f1SJie Meng dst_reg != BPF_REG_0) 106657a610f1SJie Meng /* mov dst_reg, rax */ 106757a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 106862258278SAlexei Starovoitov 1069d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3) 107062258278SAlexei Starovoitov EMIT1(0x5A); /* pop rdx */ 107157a610f1SJie Meng if (dst_reg != BPF_REG_0) 107262258278SAlexei Starovoitov EMIT1(0x58); /* pop rax */ 107362258278SAlexei Starovoitov break; 10744c38e2f3SDaniel Borkmann } 107562258278SAlexei Starovoitov 107662258278SAlexei Starovoitov case BPF_ALU | BPF_MUL | BPF_K: 107762258278SAlexei Starovoitov case BPF_ALU64 | BPF_MUL | BPF_K: 10786364d7d7SJie Meng maybe_emit_mod(&prog, dst_reg, dst_reg, 10796364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 108062258278SAlexei Starovoitov 1081c0354077SJie Meng if (is_imm8(imm32)) 1082c0354077SJie Meng /* imul dst_reg, dst_reg, imm8 */ 1083c0354077SJie Meng EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1084c0354077SJie Meng imm32); 108562258278SAlexei Starovoitov else 1086c0354077SJie Meng /* imul dst_reg, dst_reg, imm32 */ 1087c0354077SJie Meng EMIT2_off32(0x69, 1088c0354077SJie Meng add_2reg(0xC0, dst_reg, dst_reg), 1089c0354077SJie Meng imm32); 109062258278SAlexei Starovoitov break; 1091c0354077SJie Meng 1092c0354077SJie Meng case BPF_ALU | BPF_MUL | BPF_X: 1093c0354077SJie Meng case BPF_ALU64 | BPF_MUL | BPF_X: 10946364d7d7SJie Meng maybe_emit_mod(&prog, src_reg, dst_reg, 10956364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 1096c0354077SJie Meng 1097c0354077SJie Meng /* imul dst_reg, src_reg */ 1098c0354077SJie Meng EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1099c0354077SJie Meng break; 1100c0354077SJie Meng 1101a2c7a983SIngo Molnar /* Shifts */ 110262258278SAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_K: 110362258278SAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_K: 110462258278SAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_K: 110562258278SAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_K: 110662258278SAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_K: 110762258278SAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_K: 11086364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 11096364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 111062258278SAlexei Starovoitov 1111e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 111288e69a1fSDaniel Borkmann if (imm32 == 1) 111388e69a1fSDaniel Borkmann EMIT2(0xD1, add_1reg(b3, dst_reg)); 111488e69a1fSDaniel Borkmann else 1115e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 111662258278SAlexei Starovoitov break; 111762258278SAlexei Starovoitov 111872b603eeSAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_X: 111972b603eeSAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_X: 112072b603eeSAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_X: 112172b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_X: 112272b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_X: 112372b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_X: 112472b603eeSAlexei Starovoitov 1125a2c7a983SIngo Molnar /* Check for bad case when dst_reg == rcx */ 112672b603eeSAlexei Starovoitov if (dst_reg == BPF_REG_4) { 112772b603eeSAlexei Starovoitov /* mov r11, dst_reg */ 112872b603eeSAlexei Starovoitov EMIT_mov(AUX_REG, dst_reg); 112972b603eeSAlexei Starovoitov dst_reg = AUX_REG; 113072b603eeSAlexei Starovoitov } 113172b603eeSAlexei Starovoitov 113272b603eeSAlexei Starovoitov if (src_reg != BPF_REG_4) { /* common case */ 113372b603eeSAlexei Starovoitov EMIT1(0x51); /* push rcx */ 113472b603eeSAlexei Starovoitov 113572b603eeSAlexei Starovoitov /* mov rcx, src_reg */ 113672b603eeSAlexei Starovoitov EMIT_mov(BPF_REG_4, src_reg); 113772b603eeSAlexei Starovoitov } 113872b603eeSAlexei Starovoitov 113972b603eeSAlexei Starovoitov /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 11406364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 11416364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 114272b603eeSAlexei Starovoitov 1143e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 114472b603eeSAlexei Starovoitov EMIT2(0xD3, add_1reg(b3, dst_reg)); 114572b603eeSAlexei Starovoitov 114672b603eeSAlexei Starovoitov if (src_reg != BPF_REG_4) 114772b603eeSAlexei Starovoitov EMIT1(0x59); /* pop rcx */ 114872b603eeSAlexei Starovoitov 114972b603eeSAlexei Starovoitov if (insn->dst_reg == BPF_REG_4) 115072b603eeSAlexei Starovoitov /* mov dst_reg, r11 */ 115172b603eeSAlexei Starovoitov EMIT_mov(insn->dst_reg, AUX_REG); 115272b603eeSAlexei Starovoitov break; 115372b603eeSAlexei Starovoitov 115462258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_BE: 1155e430f34eSAlexei Starovoitov switch (imm32) { 115662258278SAlexei Starovoitov case 16: 1157a2c7a983SIngo Molnar /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 115862258278SAlexei Starovoitov EMIT1(0x66); 1159e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 116062258278SAlexei Starovoitov EMIT1(0x41); 1161e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1162343f845bSAlexei Starovoitov 1163a2c7a983SIngo Molnar /* Emit 'movzwl eax, ax' */ 1164343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1165343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7); 1166343f845bSAlexei Starovoitov else 1167343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7); 1168343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 116962258278SAlexei Starovoitov break; 117062258278SAlexei Starovoitov case 32: 1171a2c7a983SIngo Molnar /* Emit 'bswap eax' to swap lower 4 bytes */ 1172e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 117362258278SAlexei Starovoitov EMIT2(0x41, 0x0F); 117462258278SAlexei Starovoitov else 117562258278SAlexei Starovoitov EMIT1(0x0F); 1176e430f34eSAlexei Starovoitov EMIT1(add_1reg(0xC8, dst_reg)); 117762258278SAlexei Starovoitov break; 117862258278SAlexei Starovoitov case 64: 1179a2c7a983SIngo Molnar /* Emit 'bswap rax' to swap 8 bytes */ 1180e430f34eSAlexei Starovoitov EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1181e430f34eSAlexei Starovoitov add_1reg(0xC8, dst_reg)); 118262258278SAlexei Starovoitov break; 118362258278SAlexei Starovoitov } 118462258278SAlexei Starovoitov break; 118562258278SAlexei Starovoitov 118662258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_LE: 1187343f845bSAlexei Starovoitov switch (imm32) { 1188343f845bSAlexei Starovoitov case 16: 1189a2c7a983SIngo Molnar /* 1190a2c7a983SIngo Molnar * Emit 'movzwl eax, ax' to zero extend 16-bit 1191343f845bSAlexei Starovoitov * into 64 bit 1192343f845bSAlexei Starovoitov */ 1193343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1194343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7); 1195343f845bSAlexei Starovoitov else 1196343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7); 1197343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1198343f845bSAlexei Starovoitov break; 1199343f845bSAlexei Starovoitov case 32: 1200a2c7a983SIngo Molnar /* Emit 'mov eax, eax' to clear upper 32-bits */ 1201343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1202343f845bSAlexei Starovoitov EMIT1(0x45); 1203343f845bSAlexei Starovoitov EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1204343f845bSAlexei Starovoitov break; 1205343f845bSAlexei Starovoitov case 64: 1206343f845bSAlexei Starovoitov /* nop */ 1207343f845bSAlexei Starovoitov break; 1208343f845bSAlexei Starovoitov } 120962258278SAlexei Starovoitov break; 121062258278SAlexei Starovoitov 1211f5e81d11SDaniel Borkmann /* speculation barrier */ 1212f5e81d11SDaniel Borkmann case BPF_ST | BPF_NOSPEC: 1213f5e81d11SDaniel Borkmann if (boot_cpu_has(X86_FEATURE_XMM2)) 121487c87ecdSPeter Zijlstra EMIT_LFENCE(); 1215f5e81d11SDaniel Borkmann break; 1216f5e81d11SDaniel Borkmann 1217e430f34eSAlexei Starovoitov /* ST: *(u8*)(dst_reg + off) = imm */ 121862258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_B: 1219e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 122062258278SAlexei Starovoitov EMIT2(0x41, 0xC6); 122162258278SAlexei Starovoitov else 122262258278SAlexei Starovoitov EMIT1(0xC6); 122362258278SAlexei Starovoitov goto st; 122462258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_H: 1225e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 122662258278SAlexei Starovoitov EMIT3(0x66, 0x41, 0xC7); 122762258278SAlexei Starovoitov else 122862258278SAlexei Starovoitov EMIT2(0x66, 0xC7); 122962258278SAlexei Starovoitov goto st; 123062258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_W: 1231e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 123262258278SAlexei Starovoitov EMIT2(0x41, 0xC7); 123362258278SAlexei Starovoitov else 123462258278SAlexei Starovoitov EMIT1(0xC7); 123562258278SAlexei Starovoitov goto st; 123662258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_DW: 1237e430f34eSAlexei Starovoitov EMIT2(add_1mod(0x48, dst_reg), 0xC7); 123862258278SAlexei Starovoitov 123962258278SAlexei Starovoitov st: if (is_imm8(insn->off)) 1240e430f34eSAlexei Starovoitov EMIT2(add_1reg(0x40, dst_reg), insn->off); 124162258278SAlexei Starovoitov else 1242e430f34eSAlexei Starovoitov EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 124362258278SAlexei Starovoitov 1244e430f34eSAlexei Starovoitov EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 124562258278SAlexei Starovoitov break; 124662258278SAlexei Starovoitov 1247e430f34eSAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */ 124862258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_B: 124962258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_H: 125062258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_W: 125162258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_DW: 12523b2744e6SAlexei Starovoitov emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 125362258278SAlexei Starovoitov break; 125462258278SAlexei Starovoitov 1255e430f34eSAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */ 125662258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_B: 12573dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_B: 125862258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_H: 12593dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_H: 126062258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_W: 12613dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_W: 126262258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_DW: 12633dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 12644c5de127SAlexei Starovoitov if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1265588a25e9SAlexei Starovoitov /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1266588a25e9SAlexei Starovoitov * add abs(insn->off) to the limit to make sure that negative 1267588a25e9SAlexei Starovoitov * offset won't be an issue. 1268588a25e9SAlexei Starovoitov * insn->off is s16, so it won't affect valid pointers. 1269588a25e9SAlexei Starovoitov */ 1270588a25e9SAlexei Starovoitov u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1271588a25e9SAlexei Starovoitov u8 *end_of_jmp1, *end_of_jmp2; 1272588a25e9SAlexei Starovoitov 1273588a25e9SAlexei Starovoitov /* Conservatively check that src_reg + insn->off is a kernel address: 1274588a25e9SAlexei Starovoitov * 1. src_reg + insn->off >= limit 1275588a25e9SAlexei Starovoitov * 2. src_reg + insn->off doesn't become small positive. 1276588a25e9SAlexei Starovoitov * Cannot do src_reg + insn->off >= limit in one branch, 1277588a25e9SAlexei Starovoitov * since it needs two spare registers, but JIT has only one. 1278588a25e9SAlexei Starovoitov */ 1279588a25e9SAlexei Starovoitov 1280588a25e9SAlexei Starovoitov /* movabsq r11, limit */ 1281588a25e9SAlexei Starovoitov EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1282588a25e9SAlexei Starovoitov EMIT((u32)limit, 4); 1283588a25e9SAlexei Starovoitov EMIT(limit >> 32, 4); 1284588a25e9SAlexei Starovoitov /* cmp src_reg, r11 */ 1285588a25e9SAlexei Starovoitov maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1286588a25e9SAlexei Starovoitov EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1287588a25e9SAlexei Starovoitov /* if unsigned '<' goto end_of_jmp2 */ 1288588a25e9SAlexei Starovoitov EMIT2(X86_JB, 0); 1289588a25e9SAlexei Starovoitov end_of_jmp1 = prog; 1290588a25e9SAlexei Starovoitov 1291588a25e9SAlexei Starovoitov /* mov r11, src_reg */ 1292588a25e9SAlexei Starovoitov emit_mov_reg(&prog, true, AUX_REG, src_reg); 1293588a25e9SAlexei Starovoitov /* add r11, insn->off */ 1294588a25e9SAlexei Starovoitov maybe_emit_1mod(&prog, AUX_REG, true); 1295588a25e9SAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1296588a25e9SAlexei Starovoitov /* jmp if not carry to start_of_ldx 1297588a25e9SAlexei Starovoitov * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1298588a25e9SAlexei Starovoitov * that has to be rejected. 1299588a25e9SAlexei Starovoitov */ 1300588a25e9SAlexei Starovoitov EMIT2(0x73 /* JNC */, 0); 1301588a25e9SAlexei Starovoitov end_of_jmp2 = prog; 1302588a25e9SAlexei Starovoitov 13034c5de127SAlexei Starovoitov /* xor dst_reg, dst_reg */ 13044c5de127SAlexei Starovoitov emit_mov_imm32(&prog, false, dst_reg, 0); 13054c5de127SAlexei Starovoitov /* jmp byte_after_ldx */ 13064c5de127SAlexei Starovoitov EMIT2(0xEB, 0); 13074c5de127SAlexei Starovoitov 1308588a25e9SAlexei Starovoitov /* populate jmp_offset for JB above to jump to xor dst_reg */ 1309588a25e9SAlexei Starovoitov end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1310588a25e9SAlexei Starovoitov /* populate jmp_offset for JNC above to jump to start_of_ldx */ 13114c5de127SAlexei Starovoitov start_of_ldx = prog; 1312588a25e9SAlexei Starovoitov end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 13134c5de127SAlexei Starovoitov } 13143b2744e6SAlexei Starovoitov emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 13153dec541bSAlexei Starovoitov if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 13163dec541bSAlexei Starovoitov struct exception_table_entry *ex; 1317328aac5eSRavi Bangoria u8 *_insn = image + proglen + (start_of_ldx - temp); 13183dec541bSAlexei Starovoitov s64 delta; 13193dec541bSAlexei Starovoitov 13204c5de127SAlexei Starovoitov /* populate jmp_offset for JMP above */ 13214c5de127SAlexei Starovoitov start_of_ldx[-1] = prog - start_of_ldx; 13224c5de127SAlexei Starovoitov 13233dec541bSAlexei Starovoitov if (!bpf_prog->aux->extable) 13243dec541bSAlexei Starovoitov break; 13253dec541bSAlexei Starovoitov 13263dec541bSAlexei Starovoitov if (excnt >= bpf_prog->aux->num_exentries) { 13273dec541bSAlexei Starovoitov pr_err("ex gen bug\n"); 13283dec541bSAlexei Starovoitov return -EFAULT; 13293dec541bSAlexei Starovoitov } 13303dec541bSAlexei Starovoitov ex = &bpf_prog->aux->extable[excnt++]; 13313dec541bSAlexei Starovoitov 13323dec541bSAlexei Starovoitov delta = _insn - (u8 *)&ex->insn; 13333dec541bSAlexei Starovoitov if (!is_simm32(delta)) { 13343dec541bSAlexei Starovoitov pr_err("extable->insn doesn't fit into 32-bit\n"); 13353dec541bSAlexei Starovoitov return -EFAULT; 13363dec541bSAlexei Starovoitov } 13371022a549SSong Liu /* switch ex to rw buffer for writes */ 13381022a549SSong Liu ex = (void *)rw_image + ((void *)ex - (void *)image); 13391022a549SSong Liu 13403dec541bSAlexei Starovoitov ex->insn = delta; 13413dec541bSAlexei Starovoitov 13424b5305deSPeter Zijlstra ex->data = EX_TYPE_BPF; 13433dec541bSAlexei Starovoitov 13443dec541bSAlexei Starovoitov if (dst_reg > BPF_REG_9) { 13453dec541bSAlexei Starovoitov pr_err("verifier error\n"); 13463dec541bSAlexei Starovoitov return -EFAULT; 13473dec541bSAlexei Starovoitov } 13483dec541bSAlexei Starovoitov /* 13493dec541bSAlexei Starovoitov * Compute size of x86 insn and its target dest x86 register. 13503dec541bSAlexei Starovoitov * ex_handler_bpf() will use lower 8 bits to adjust 13513dec541bSAlexei Starovoitov * pt_regs->ip to jump over this x86 instruction 13523dec541bSAlexei Starovoitov * and upper bits to figure out which pt_regs to zero out. 13533dec541bSAlexei Starovoitov * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 13543dec541bSAlexei Starovoitov * of 4 bytes will be ignored and rbx will be zero inited. 13553dec541bSAlexei Starovoitov */ 1356433956e9SAlexei Starovoitov ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 13573dec541bSAlexei Starovoitov } 135862258278SAlexei Starovoitov break; 135962258278SAlexei Starovoitov 136091c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_W: 136191c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_DW: 1362981f94c3SBrendan Jackman if (insn->imm == (BPF_AND | BPF_FETCH) || 1363981f94c3SBrendan Jackman insn->imm == (BPF_OR | BPF_FETCH) || 1364981f94c3SBrendan Jackman insn->imm == (BPF_XOR | BPF_FETCH)) { 1365981f94c3SBrendan Jackman bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1366b29dd96bSBrendan Jackman u32 real_src_reg = src_reg; 1367ced18582SJohan Almbladh u32 real_dst_reg = dst_reg; 1368ced18582SJohan Almbladh u8 *branch_target; 1369981f94c3SBrendan Jackman 1370981f94c3SBrendan Jackman /* 1371981f94c3SBrendan Jackman * Can't be implemented with a single x86 insn. 1372981f94c3SBrendan Jackman * Need to do a CMPXCHG loop. 1373981f94c3SBrendan Jackman */ 1374981f94c3SBrendan Jackman 1375981f94c3SBrendan Jackman /* Will need RAX as a CMPXCHG operand so save R0 */ 1376981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1377b29dd96bSBrendan Jackman if (src_reg == BPF_REG_0) 1378b29dd96bSBrendan Jackman real_src_reg = BPF_REG_AX; 1379ced18582SJohan Almbladh if (dst_reg == BPF_REG_0) 1380ced18582SJohan Almbladh real_dst_reg = BPF_REG_AX; 1381b29dd96bSBrendan Jackman 1382981f94c3SBrendan Jackman branch_target = prog; 1383981f94c3SBrendan Jackman /* Load old value */ 1384981f94c3SBrendan Jackman emit_ldx(&prog, BPF_SIZE(insn->code), 1385ced18582SJohan Almbladh BPF_REG_0, real_dst_reg, insn->off); 1386981f94c3SBrendan Jackman /* 1387981f94c3SBrendan Jackman * Perform the (commutative) operation locally, 1388981f94c3SBrendan Jackman * put the result in the AUX_REG. 1389981f94c3SBrendan Jackman */ 1390981f94c3SBrendan Jackman emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1391b29dd96bSBrendan Jackman maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1392981f94c3SBrendan Jackman EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1393b29dd96bSBrendan Jackman add_2reg(0xC0, AUX_REG, real_src_reg)); 1394981f94c3SBrendan Jackman /* Attempt to swap in new value */ 1395981f94c3SBrendan Jackman err = emit_atomic(&prog, BPF_CMPXCHG, 1396ced18582SJohan Almbladh real_dst_reg, AUX_REG, 1397ced18582SJohan Almbladh insn->off, 1398981f94c3SBrendan Jackman BPF_SIZE(insn->code)); 1399981f94c3SBrendan Jackman if (WARN_ON(err)) 1400981f94c3SBrendan Jackman return err; 1401981f94c3SBrendan Jackman /* 1402981f94c3SBrendan Jackman * ZF tells us whether we won the race. If it's 1403981f94c3SBrendan Jackman * cleared we need to try again. 1404981f94c3SBrendan Jackman */ 1405981f94c3SBrendan Jackman EMIT2(X86_JNE, -(prog - branch_target) - 2); 1406981f94c3SBrendan Jackman /* Return the pre-modification value */ 1407b29dd96bSBrendan Jackman emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1408981f94c3SBrendan Jackman /* Restore R0 after clobbering RAX */ 1409981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1410981f94c3SBrendan Jackman break; 1411981f94c3SBrendan Jackman } 1412981f94c3SBrendan Jackman 141391c960b0SBrendan Jackman err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 141491c960b0SBrendan Jackman insn->off, BPF_SIZE(insn->code)); 141591c960b0SBrendan Jackman if (err) 141691c960b0SBrendan Jackman return err; 141762258278SAlexei Starovoitov break; 141862258278SAlexei Starovoitov 141962258278SAlexei Starovoitov /* call */ 142062258278SAlexei Starovoitov case BPF_JMP | BPF_CALL: 1421e430f34eSAlexei Starovoitov func = (u8 *) __bpf_call_base + imm32; 1422ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable) { 1423ff672c67SJakub Sitnicki /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ 1424ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x8B, 0x85, 1425ff672c67SJakub Sitnicki -round_up(bpf_prog->aux->stack_depth, 8) - 8); 1426ebf7d1f5SMaciej Fijalkowski if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1427ebf7d1f5SMaciej Fijalkowski return -EINVAL; 1428ebf7d1f5SMaciej Fijalkowski } else { 14293b2744e6SAlexei Starovoitov if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1430f3c2af7bSAlexei Starovoitov return -EINVAL; 1431ebf7d1f5SMaciej Fijalkowski } 143262258278SAlexei Starovoitov break; 143362258278SAlexei Starovoitov 143471189fa9SAlexei Starovoitov case BPF_JMP | BPF_TAIL_CALL: 1435428d5df1SDaniel Borkmann if (imm32) 1436428d5df1SDaniel Borkmann emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1437dceba081SPeter Zijlstra &prog, image + addrs[i - 1], 1438ebf7d1f5SMaciej Fijalkowski callee_regs_used, 1439dceba081SPeter Zijlstra bpf_prog->aux->stack_depth, 1440dceba081SPeter Zijlstra ctx); 1441428d5df1SDaniel Borkmann else 1442ebf7d1f5SMaciej Fijalkowski emit_bpf_tail_call_indirect(&prog, 1443ebf7d1f5SMaciej Fijalkowski callee_regs_used, 1444dceba081SPeter Zijlstra bpf_prog->aux->stack_depth, 1445dceba081SPeter Zijlstra image + addrs[i - 1], 1446dceba081SPeter Zijlstra ctx); 1447b52f00e6SAlexei Starovoitov break; 1448b52f00e6SAlexei Starovoitov 144962258278SAlexei Starovoitov /* cond jump */ 145062258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_X: 145162258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_X: 145262258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_X: 145352afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_X: 145462258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_X: 145552afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_X: 145662258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_X: 145752afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_X: 145862258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_X: 145952afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_X: 14603f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_X: 14613f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_X: 14623f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_X: 14633f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_X: 14643f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_X: 14653f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_X: 14663f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_X: 14673f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_X: 14683f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_X: 14693f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_X: 1470e430f34eSAlexei Starovoitov /* cmp dst_reg, src_reg */ 147174007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 147274007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 14733f5d6525SJiong Wang EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 147462258278SAlexei Starovoitov goto emit_cond_jmp; 147562258278SAlexei Starovoitov 147662258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_X: 14773f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_X: 1478e430f34eSAlexei Starovoitov /* test dst_reg, src_reg */ 147974007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 148074007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 14813f5d6525SJiong Wang EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 148262258278SAlexei Starovoitov goto emit_cond_jmp; 148362258278SAlexei Starovoitov 148462258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_K: 14853f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_K: 1486e430f34eSAlexei Starovoitov /* test dst_reg, imm32 */ 14876364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 14886364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP); 1489e430f34eSAlexei Starovoitov EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 149062258278SAlexei Starovoitov goto emit_cond_jmp; 149162258278SAlexei Starovoitov 149262258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_K: 149362258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_K: 149462258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_K: 149552afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_K: 149662258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_K: 149752afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_K: 149862258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_K: 149952afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_K: 150062258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_K: 150152afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_K: 15023f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_K: 15033f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_K: 15043f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_K: 15053f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_K: 15063f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_K: 15073f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_K: 15083f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_K: 15093f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_K: 15103f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_K: 15113f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_K: 151238f51c07SDaniel Borkmann /* test dst_reg, dst_reg to save one extra byte */ 151338f51c07SDaniel Borkmann if (imm32 == 0) { 151474007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, dst_reg, 151574007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 151638f51c07SDaniel Borkmann EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 151738f51c07SDaniel Borkmann goto emit_cond_jmp; 151838f51c07SDaniel Borkmann } 151938f51c07SDaniel Borkmann 1520e430f34eSAlexei Starovoitov /* cmp dst_reg, imm8/32 */ 15216364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 15226364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP); 152362258278SAlexei Starovoitov 1524e430f34eSAlexei Starovoitov if (is_imm8(imm32)) 1525e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 152662258278SAlexei Starovoitov else 1527e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 152862258278SAlexei Starovoitov 1529a2c7a983SIngo Molnar emit_cond_jmp: /* Convert BPF opcode to x86 */ 153062258278SAlexei Starovoitov switch (BPF_OP(insn->code)) { 153162258278SAlexei Starovoitov case BPF_JEQ: 153262258278SAlexei Starovoitov jmp_cond = X86_JE; 153362258278SAlexei Starovoitov break; 153462258278SAlexei Starovoitov case BPF_JSET: 153562258278SAlexei Starovoitov case BPF_JNE: 153662258278SAlexei Starovoitov jmp_cond = X86_JNE; 153762258278SAlexei Starovoitov break; 153862258278SAlexei Starovoitov case BPF_JGT: 153962258278SAlexei Starovoitov /* GT is unsigned '>', JA in x86 */ 154062258278SAlexei Starovoitov jmp_cond = X86_JA; 154162258278SAlexei Starovoitov break; 154252afc51eSDaniel Borkmann case BPF_JLT: 154352afc51eSDaniel Borkmann /* LT is unsigned '<', JB in x86 */ 154452afc51eSDaniel Borkmann jmp_cond = X86_JB; 154552afc51eSDaniel Borkmann break; 154662258278SAlexei Starovoitov case BPF_JGE: 154762258278SAlexei Starovoitov /* GE is unsigned '>=', JAE in x86 */ 154862258278SAlexei Starovoitov jmp_cond = X86_JAE; 154962258278SAlexei Starovoitov break; 155052afc51eSDaniel Borkmann case BPF_JLE: 155152afc51eSDaniel Borkmann /* LE is unsigned '<=', JBE in x86 */ 155252afc51eSDaniel Borkmann jmp_cond = X86_JBE; 155352afc51eSDaniel Borkmann break; 155462258278SAlexei Starovoitov case BPF_JSGT: 1555a2c7a983SIngo Molnar /* Signed '>', GT in x86 */ 155662258278SAlexei Starovoitov jmp_cond = X86_JG; 155762258278SAlexei Starovoitov break; 155852afc51eSDaniel Borkmann case BPF_JSLT: 1559a2c7a983SIngo Molnar /* Signed '<', LT in x86 */ 156052afc51eSDaniel Borkmann jmp_cond = X86_JL; 156152afc51eSDaniel Borkmann break; 156262258278SAlexei Starovoitov case BPF_JSGE: 1563a2c7a983SIngo Molnar /* Signed '>=', GE in x86 */ 156462258278SAlexei Starovoitov jmp_cond = X86_JGE; 156562258278SAlexei Starovoitov break; 156652afc51eSDaniel Borkmann case BPF_JSLE: 1567a2c7a983SIngo Molnar /* Signed '<=', LE in x86 */ 156852afc51eSDaniel Borkmann jmp_cond = X86_JLE; 156952afc51eSDaniel Borkmann break; 1570a2c7a983SIngo Molnar default: /* to silence GCC warning */ 157162258278SAlexei Starovoitov return -EFAULT; 157262258278SAlexei Starovoitov } 157362258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i]; 157462258278SAlexei Starovoitov if (is_imm8(jmp_offset)) { 157593c5aeccSGary Lin if (jmp_padding) { 157693c5aeccSGary Lin /* To keep the jmp_offset valid, the extra bytes are 1577d9f6e12fSIngo Molnar * padded before the jump insn, so we subtract the 157893c5aeccSGary Lin * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 157993c5aeccSGary Lin * 158093c5aeccSGary Lin * If the previous pass already emits an imm8 158193c5aeccSGary Lin * jmp_cond, then this BPF insn won't shrink, so 158293c5aeccSGary Lin * "nops" is 0. 158393c5aeccSGary Lin * 158493c5aeccSGary Lin * On the other hand, if the previous pass emits an 158593c5aeccSGary Lin * imm32 jmp_cond, the extra 4 bytes(*) is padded to 158693c5aeccSGary Lin * keep the image from shrinking further. 158793c5aeccSGary Lin * 158893c5aeccSGary Lin * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 158993c5aeccSGary Lin * is 2 bytes, so the size difference is 4 bytes. 159093c5aeccSGary Lin */ 159193c5aeccSGary Lin nops = INSN_SZ_DIFF - 2; 159293c5aeccSGary Lin if (nops != 0 && nops != 4) { 159393c5aeccSGary Lin pr_err("unexpected jmp_cond padding: %d bytes\n", 159493c5aeccSGary Lin nops); 159593c5aeccSGary Lin return -EFAULT; 159693c5aeccSGary Lin } 1597ced50fc4SJiri Olsa emit_nops(&prog, nops); 159893c5aeccSGary Lin } 159962258278SAlexei Starovoitov EMIT2(jmp_cond, jmp_offset); 160062258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) { 160162258278SAlexei Starovoitov EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 16023b58908aSEric Dumazet } else { 160362258278SAlexei Starovoitov pr_err("cond_jmp gen bug %llx\n", jmp_offset); 160462258278SAlexei Starovoitov return -EFAULT; 16053b58908aSEric Dumazet } 160662258278SAlexei Starovoitov 16073b58908aSEric Dumazet break; 160862258278SAlexei Starovoitov 160962258278SAlexei Starovoitov case BPF_JMP | BPF_JA: 16101612a981SGianluca Borello if (insn->off == -1) 16111612a981SGianluca Borello /* -1 jmp instructions will always jump 16121612a981SGianluca Borello * backwards two bytes. Explicitly handling 16131612a981SGianluca Borello * this case avoids wasting too many passes 16141612a981SGianluca Borello * when there are long sequences of replaced 16151612a981SGianluca Borello * dead code. 16161612a981SGianluca Borello */ 16171612a981SGianluca Borello jmp_offset = -2; 16181612a981SGianluca Borello else 161962258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i]; 16201612a981SGianluca Borello 162193c5aeccSGary Lin if (!jmp_offset) { 162293c5aeccSGary Lin /* 162393c5aeccSGary Lin * If jmp_padding is enabled, the extra nops will 162493c5aeccSGary Lin * be inserted. Otherwise, optimize out nop jumps. 162593c5aeccSGary Lin */ 162693c5aeccSGary Lin if (jmp_padding) { 162793c5aeccSGary Lin /* There are 3 possible conditions. 162893c5aeccSGary Lin * (1) This BPF_JA is already optimized out in 162993c5aeccSGary Lin * the previous run, so there is no need 163093c5aeccSGary Lin * to pad any extra byte (0 byte). 163193c5aeccSGary Lin * (2) The previous pass emits an imm8 jmp, 163293c5aeccSGary Lin * so we pad 2 bytes to match the previous 163393c5aeccSGary Lin * insn size. 163493c5aeccSGary Lin * (3) Similarly, the previous pass emits an 163593c5aeccSGary Lin * imm32 jmp, and 5 bytes is padded. 163693c5aeccSGary Lin */ 163793c5aeccSGary Lin nops = INSN_SZ_DIFF; 163893c5aeccSGary Lin if (nops != 0 && nops != 2 && nops != 5) { 163993c5aeccSGary Lin pr_err("unexpected nop jump padding: %d bytes\n", 164093c5aeccSGary Lin nops); 164193c5aeccSGary Lin return -EFAULT; 164293c5aeccSGary Lin } 1643ced50fc4SJiri Olsa emit_nops(&prog, nops); 164493c5aeccSGary Lin } 164562258278SAlexei Starovoitov break; 164693c5aeccSGary Lin } 164762258278SAlexei Starovoitov emit_jmp: 164862258278SAlexei Starovoitov if (is_imm8(jmp_offset)) { 164993c5aeccSGary Lin if (jmp_padding) { 165093c5aeccSGary Lin /* To avoid breaking jmp_offset, the extra bytes 165193c5aeccSGary Lin * are padded before the actual jmp insn, so 1652d9f6e12fSIngo Molnar * 2 bytes is subtracted from INSN_SZ_DIFF. 165393c5aeccSGary Lin * 165493c5aeccSGary Lin * If the previous pass already emits an imm8 165593c5aeccSGary Lin * jmp, there is nothing to pad (0 byte). 165693c5aeccSGary Lin * 165793c5aeccSGary Lin * If it emits an imm32 jmp (5 bytes) previously 165893c5aeccSGary Lin * and now an imm8 jmp (2 bytes), then we pad 165993c5aeccSGary Lin * (5 - 2 = 3) bytes to stop the image from 166093c5aeccSGary Lin * shrinking further. 166193c5aeccSGary Lin */ 166293c5aeccSGary Lin nops = INSN_SZ_DIFF - 2; 166393c5aeccSGary Lin if (nops != 0 && nops != 3) { 166493c5aeccSGary Lin pr_err("unexpected jump padding: %d bytes\n", 166593c5aeccSGary Lin nops); 166693c5aeccSGary Lin return -EFAULT; 166793c5aeccSGary Lin } 1668ced50fc4SJiri Olsa emit_nops(&prog, INSN_SZ_DIFF - 2); 166993c5aeccSGary Lin } 167062258278SAlexei Starovoitov EMIT2(0xEB, jmp_offset); 167162258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) { 167262258278SAlexei Starovoitov EMIT1_off32(0xE9, jmp_offset); 167362258278SAlexei Starovoitov } else { 167462258278SAlexei Starovoitov pr_err("jmp gen bug %llx\n", jmp_offset); 167562258278SAlexei Starovoitov return -EFAULT; 16763b58908aSEric Dumazet } 167762258278SAlexei Starovoitov break; 167862258278SAlexei Starovoitov 167962258278SAlexei Starovoitov case BPF_JMP | BPF_EXIT: 1680769e0de6SAlexei Starovoitov if (seen_exit) { 168162258278SAlexei Starovoitov jmp_offset = ctx->cleanup_addr - addrs[i]; 168262258278SAlexei Starovoitov goto emit_jmp; 168362258278SAlexei Starovoitov } 1684769e0de6SAlexei Starovoitov seen_exit = true; 1685a2c7a983SIngo Molnar /* Update cleanup_addr */ 168662258278SAlexei Starovoitov ctx->cleanup_addr = proglen; 1687ebf7d1f5SMaciej Fijalkowski pop_callee_regs(&prog, callee_regs_used); 168862258278SAlexei Starovoitov EMIT1(0xC9); /* leave */ 168962258278SAlexei Starovoitov EMIT1(0xC3); /* ret */ 16900a14842fSEric Dumazet break; 16910a14842fSEric Dumazet 16920a14842fSEric Dumazet default: 1693a2c7a983SIngo Molnar /* 1694a2c7a983SIngo Molnar * By design x86-64 JIT should support all BPF instructions. 169562258278SAlexei Starovoitov * This error will be seen if new instruction was added 1696a2c7a983SIngo Molnar * to the interpreter, but not to the JIT, or if there is 1697a2c7a983SIngo Molnar * junk in bpf_prog. 169862258278SAlexei Starovoitov */ 169962258278SAlexei Starovoitov pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1700f3c2af7bSAlexei Starovoitov return -EINVAL; 17010a14842fSEric Dumazet } 170262258278SAlexei Starovoitov 17030a14842fSEric Dumazet ilen = prog - temp; 1704e0ee9c12SAlexei Starovoitov if (ilen > BPF_MAX_INSN_SIZE) { 17059383191dSDaniel Borkmann pr_err("bpf_jit: fatal insn size error\n"); 1706e0ee9c12SAlexei Starovoitov return -EFAULT; 1707e0ee9c12SAlexei Starovoitov } 1708e0ee9c12SAlexei Starovoitov 17090a14842fSEric Dumazet if (image) { 1710e4d4d456SPiotr Krysiuk /* 1711e4d4d456SPiotr Krysiuk * When populating the image, assert that: 1712e4d4d456SPiotr Krysiuk * 1713e4d4d456SPiotr Krysiuk * i) We do not write beyond the allocated space, and 1714e4d4d456SPiotr Krysiuk * ii) addrs[i] did not change from the prior run, in order 1715e4d4d456SPiotr Krysiuk * to validate assumptions made for computing branch 1716e4d4d456SPiotr Krysiuk * displacements. 1717e4d4d456SPiotr Krysiuk */ 1718e4d4d456SPiotr Krysiuk if (unlikely(proglen + ilen > oldproglen || 1719e4d4d456SPiotr Krysiuk proglen + ilen != addrs[i])) { 17209383191dSDaniel Borkmann pr_err("bpf_jit: fatal error\n"); 1721f3c2af7bSAlexei Starovoitov return -EFAULT; 17220a14842fSEric Dumazet } 17231022a549SSong Liu memcpy(rw_image + proglen, temp, ilen); 17240a14842fSEric Dumazet } 17250a14842fSEric Dumazet proglen += ilen; 17260a14842fSEric Dumazet addrs[i] = proglen; 17270a14842fSEric Dumazet prog = temp; 17280a14842fSEric Dumazet } 17293dec541bSAlexei Starovoitov 17303dec541bSAlexei Starovoitov if (image && excnt != bpf_prog->aux->num_exentries) { 17313dec541bSAlexei Starovoitov pr_err("extable is not populated\n"); 17323dec541bSAlexei Starovoitov return -EFAULT; 17333dec541bSAlexei Starovoitov } 1734f3c2af7bSAlexei Starovoitov return proglen; 1735f3c2af7bSAlexei Starovoitov } 1736f3c2af7bSAlexei Starovoitov 173785d33df3SMartin KaFai Lau static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1738fec56f58SAlexei Starovoitov int stack_size) 1739fec56f58SAlexei Starovoitov { 1740fec56f58SAlexei Starovoitov int i; 1741fec56f58SAlexei Starovoitov /* Store function arguments to stack. 1742fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be: 1743fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x10],rdi 1744fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x8],rsi 1745fec56f58SAlexei Starovoitov */ 1746fec56f58SAlexei Starovoitov for (i = 0; i < min(nr_args, 6); i++) 1747fec56f58SAlexei Starovoitov emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1748fec56f58SAlexei Starovoitov BPF_REG_FP, 1749fec56f58SAlexei Starovoitov i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1750fec56f58SAlexei Starovoitov -(stack_size - i * 8)); 1751fec56f58SAlexei Starovoitov } 1752fec56f58SAlexei Starovoitov 175385d33df3SMartin KaFai Lau static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1754fec56f58SAlexei Starovoitov int stack_size) 1755fec56f58SAlexei Starovoitov { 1756fec56f58SAlexei Starovoitov int i; 1757fec56f58SAlexei Starovoitov 1758fec56f58SAlexei Starovoitov /* Restore function arguments from stack. 1759fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be: 1760fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1761fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1762fec56f58SAlexei Starovoitov */ 1763fec56f58SAlexei Starovoitov for (i = 0; i < min(nr_args, 6); i++) 1764fec56f58SAlexei Starovoitov emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1765fec56f58SAlexei Starovoitov i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1766fec56f58SAlexei Starovoitov BPF_REG_FP, 1767fec56f58SAlexei Starovoitov -(stack_size - i * 8)); 1768fec56f58SAlexei Starovoitov } 1769fec56f58SAlexei Starovoitov 17707e639208SKP Singh static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1771f7e0beafSKui-Feng Lee struct bpf_tramp_link *l, int stack_size, 1772e384c7b7SKui-Feng Lee int run_ctx_off, bool save_ret) 1773fec56f58SAlexei Starovoitov { 177469fd337aSStanislav Fomichev void (*exit)(struct bpf_prog *prog, u64 start, 177569fd337aSStanislav Fomichev struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit; 177669fd337aSStanislav Fomichev u64 (*enter)(struct bpf_prog *prog, 177769fd337aSStanislav Fomichev struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter; 1778fec56f58SAlexei Starovoitov u8 *prog = *pprog; 1779ca06f55bSAlexei Starovoitov u8 *jmp_insn; 1780e384c7b7SKui-Feng Lee int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); 1781f7e0beafSKui-Feng Lee struct bpf_prog *p = l->link.prog; 17822fcc8241SKui-Feng Lee u64 cookie = l->cookie; 1783fec56f58SAlexei Starovoitov 17842fcc8241SKui-Feng Lee /* mov rdi, cookie */ 17852fcc8241SKui-Feng Lee emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); 1786e384c7b7SKui-Feng Lee 1787e384c7b7SKui-Feng Lee /* Prepare struct bpf_tramp_run_ctx. 1788e384c7b7SKui-Feng Lee * 1789e384c7b7SKui-Feng Lee * bpf_tramp_run_ctx is already preserved by 1790e384c7b7SKui-Feng Lee * arch_prepare_bpf_trampoline(). 1791e384c7b7SKui-Feng Lee * 1792e384c7b7SKui-Feng Lee * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi 1793e384c7b7SKui-Feng Lee */ 1794e384c7b7SKui-Feng Lee emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); 1795e384c7b7SKui-Feng Lee 179669fd337aSStanislav Fomichev if (p->aux->sleepable) { 179769fd337aSStanislav Fomichev enter = __bpf_prog_enter_sleepable; 179869fd337aSStanislav Fomichev exit = __bpf_prog_exit_sleepable; 179969fd337aSStanislav Fomichev } else if (p->expected_attach_type == BPF_LSM_CGROUP) { 180069fd337aSStanislav Fomichev enter = __bpf_prog_enter_lsm_cgroup; 180169fd337aSStanislav Fomichev exit = __bpf_prog_exit_lsm_cgroup; 180269fd337aSStanislav Fomichev } 180369fd337aSStanislav Fomichev 1804ca06f55bSAlexei Starovoitov /* arg1: mov rdi, progs[i] */ 1805ca06f55bSAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1806e384c7b7SKui-Feng Lee /* arg2: lea rsi, [rbp - ctx_cookie_off] */ 1807e384c7b7SKui-Feng Lee EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); 1808e384c7b7SKui-Feng Lee 180969fd337aSStanislav Fomichev if (emit_call(&prog, enter, prog)) 1810fec56f58SAlexei Starovoitov return -EINVAL; 1811fec56f58SAlexei Starovoitov /* remember prog start time returned by __bpf_prog_enter */ 1812fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1813fec56f58SAlexei Starovoitov 1814ca06f55bSAlexei Starovoitov /* if (__bpf_prog_enter*(prog) == 0) 1815ca06f55bSAlexei Starovoitov * goto skip_exec_of_prog; 1816ca06f55bSAlexei Starovoitov */ 1817ca06f55bSAlexei Starovoitov EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1818ca06f55bSAlexei Starovoitov /* emit 2 nops that will be replaced with JE insn */ 1819ca06f55bSAlexei Starovoitov jmp_insn = prog; 1820ca06f55bSAlexei Starovoitov emit_nops(&prog, 2); 1821ca06f55bSAlexei Starovoitov 1822fec56f58SAlexei Starovoitov /* arg1: lea rdi, [rbp - stack_size] */ 1823fec56f58SAlexei Starovoitov EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1824fec56f58SAlexei Starovoitov /* arg2: progs[i]->insnsi for interpreter */ 18257e639208SKP Singh if (!p->jited) 1826fec56f58SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_2, 18277e639208SKP Singh (long) p->insnsi >> 32, 18287e639208SKP Singh (u32) (long) p->insnsi); 1829fec56f58SAlexei Starovoitov /* call JITed bpf program or interpreter */ 18307e639208SKP Singh if (emit_call(&prog, p->bpf_func, prog)) 1831fec56f58SAlexei Starovoitov return -EINVAL; 1832fec56f58SAlexei Starovoitov 1833356ed649SHou Tao /* 1834356ed649SHou Tao * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1835ae240823SKP Singh * of the previous call which is then passed on the stack to 1836ae240823SKP Singh * the next BPF program. 1837356ed649SHou Tao * 1838356ed649SHou Tao * BPF_TRAMP_FENTRY trampoline may need to return the return 1839356ed649SHou Tao * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1840ae240823SKP Singh */ 1841356ed649SHou Tao if (save_ret) 1842ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1843ae240823SKP Singh 1844ca06f55bSAlexei Starovoitov /* replace 2 nops with JE insn, since jmp target is known */ 1845ca06f55bSAlexei Starovoitov jmp_insn[0] = X86_JE; 1846ca06f55bSAlexei Starovoitov jmp_insn[1] = prog - jmp_insn - 2; 1847ca06f55bSAlexei Starovoitov 1848fec56f58SAlexei Starovoitov /* arg1: mov rdi, progs[i] */ 1849f2dd3b39SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1850fec56f58SAlexei Starovoitov /* arg2: mov rsi, rbx <- start time in nsec */ 1851fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1852e384c7b7SKui-Feng Lee /* arg3: lea rdx, [rbp - run_ctx_off] */ 1853e384c7b7SKui-Feng Lee EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); 185469fd337aSStanislav Fomichev if (emit_call(&prog, exit, prog)) 1855fec56f58SAlexei Starovoitov return -EINVAL; 18567e639208SKP Singh 18577e639208SKP Singh *pprog = prog; 18587e639208SKP Singh return 0; 18597e639208SKP Singh } 18607e639208SKP Singh 18617e639208SKP Singh static void emit_align(u8 **pprog, u32 align) 18627e639208SKP Singh { 18637e639208SKP Singh u8 *target, *prog = *pprog; 18647e639208SKP Singh 18657e639208SKP Singh target = PTR_ALIGN(prog, align); 18667e639208SKP Singh if (target != prog) 18677e639208SKP Singh emit_nops(&prog, target - prog); 18687e639208SKP Singh 18697e639208SKP Singh *pprog = prog; 18707e639208SKP Singh } 18717e639208SKP Singh 18727e639208SKP Singh static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 18737e639208SKP Singh { 18747e639208SKP Singh u8 *prog = *pprog; 18757e639208SKP Singh s64 offset; 18767e639208SKP Singh 18777e639208SKP Singh offset = func - (ip + 2 + 4); 18787e639208SKP Singh if (!is_simm32(offset)) { 18797e639208SKP Singh pr_err("Target %p is out of range\n", func); 18807e639208SKP Singh return -EINVAL; 18817e639208SKP Singh } 18827e639208SKP Singh EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 18837e639208SKP Singh *pprog = prog; 18847e639208SKP Singh return 0; 18857e639208SKP Singh } 18867e639208SKP Singh 18877e639208SKP Singh static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1888f7e0beafSKui-Feng Lee struct bpf_tramp_links *tl, int stack_size, 1889e384c7b7SKui-Feng Lee int run_ctx_off, bool save_ret) 18907e639208SKP Singh { 18917e639208SKP Singh int i; 18927e639208SKP Singh u8 *prog = *pprog; 18937e639208SKP Singh 1894f7e0beafSKui-Feng Lee for (i = 0; i < tl->nr_links; i++) { 1895f7e0beafSKui-Feng Lee if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 1896e384c7b7SKui-Feng Lee run_ctx_off, save_ret)) 18977e639208SKP Singh return -EINVAL; 1898fec56f58SAlexei Starovoitov } 1899fec56f58SAlexei Starovoitov *pprog = prog; 1900fec56f58SAlexei Starovoitov return 0; 1901fec56f58SAlexei Starovoitov } 1902fec56f58SAlexei Starovoitov 1903ae240823SKP Singh static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1904f7e0beafSKui-Feng Lee struct bpf_tramp_links *tl, int stack_size, 1905e384c7b7SKui-Feng Lee int run_ctx_off, u8 **branches) 1906ae240823SKP Singh { 1907ae240823SKP Singh u8 *prog = *pprog; 1908ced50fc4SJiri Olsa int i; 1909ae240823SKP Singh 1910ae240823SKP Singh /* The first fmod_ret program will receive a garbage return value. 1911ae240823SKP Singh * Set this to 0 to avoid confusing the program. 1912ae240823SKP Singh */ 1913ae240823SKP Singh emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1914ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1915f7e0beafSKui-Feng Lee for (i = 0; i < tl->nr_links; i++) { 1916e384c7b7SKui-Feng Lee if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true)) 1917ae240823SKP Singh return -EINVAL; 1918ae240823SKP Singh 191913fac1d8SAlexei Starovoitov /* mod_ret prog stored return value into [rbp - 8]. Emit: 192013fac1d8SAlexei Starovoitov * if (*(u64 *)(rbp - 8) != 0) 1921ae240823SKP Singh * goto do_fexit; 1922ae240823SKP Singh */ 192313fac1d8SAlexei Starovoitov /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 192413fac1d8SAlexei Starovoitov EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1925ae240823SKP Singh 1926ae240823SKP Singh /* Save the location of the branch and Generate 6 nops 1927ae240823SKP Singh * (4 bytes for an offset and 2 bytes for the jump) These nops 1928ae240823SKP Singh * are replaced with a conditional jump once do_fexit (i.e. the 1929ae240823SKP Singh * start of the fexit invocation) is finalized. 1930ae240823SKP Singh */ 1931ae240823SKP Singh branches[i] = prog; 1932ae240823SKP Singh emit_nops(&prog, 4 + 2); 1933ae240823SKP Singh } 1934ae240823SKP Singh 1935ae240823SKP Singh *pprog = prog; 1936ae240823SKP Singh return 0; 1937ae240823SKP Singh } 1938ae240823SKP Singh 1939fec56f58SAlexei Starovoitov /* Example: 1940fec56f58SAlexei Starovoitov * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1941fec56f58SAlexei Starovoitov * its 'struct btf_func_model' will be nr_args=2 1942fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is executing after trampoline: 1943fec56f58SAlexei Starovoitov * 1944fec56f58SAlexei Starovoitov * push rbp 1945fec56f58SAlexei Starovoitov * mov rbp, rsp 1946fec56f58SAlexei Starovoitov * sub rsp, 16 // space for skb and dev 1947fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time 1948fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1949fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1950fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1951fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled 1952fec56f58SAlexei Starovoitov * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1953fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog 1954fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1955fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1956fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1957fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1958fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1959fec56f58SAlexei Starovoitov * pop rbx 1960fec56f58SAlexei Starovoitov * leave 1961fec56f58SAlexei Starovoitov * ret 1962fec56f58SAlexei Starovoitov * 1963fec56f58SAlexei Starovoitov * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1964fec56f58SAlexei Starovoitov * replaced with 'call generated_bpf_trampoline'. When it returns 1965fec56f58SAlexei Starovoitov * eth_type_trans will continue executing with original skb and dev pointers. 1966fec56f58SAlexei Starovoitov * 1967fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is called from trampoline: 1968fec56f58SAlexei Starovoitov * 1969fec56f58SAlexei Starovoitov * push rbp 1970fec56f58SAlexei Starovoitov * mov rbp, rsp 1971fec56f58SAlexei Starovoitov * sub rsp, 24 // space for skb, dev, return value 1972fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time 1973fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1974fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1975fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1976fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time if bpf stats are enabled 1977fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1978fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1979fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1980fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1981fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1982fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1983fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1984fec56f58SAlexei Starovoitov * call eth_type_trans+5 // execute body of eth_type_trans 1985fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rax // save return value 1986fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1987fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled 1988fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1989fec56f58SAlexei Starovoitov * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1990fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1991fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1992fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1993fec56f58SAlexei Starovoitov * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1994fec56f58SAlexei Starovoitov * pop rbx 1995fec56f58SAlexei Starovoitov * leave 1996fec56f58SAlexei Starovoitov * add rsp, 8 // skip eth_type_trans's frame 1997fec56f58SAlexei Starovoitov * ret // return to its caller 1998fec56f58SAlexei Starovoitov */ 1999e21aa341SAlexei Starovoitov int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 200085d33df3SMartin KaFai Lau const struct btf_func_model *m, u32 flags, 2001f7e0beafSKui-Feng Lee struct bpf_tramp_links *tlinks, 2002fec56f58SAlexei Starovoitov void *orig_call) 2003fec56f58SAlexei Starovoitov { 2004ced50fc4SJiri Olsa int ret, i, nr_args = m->nr_args; 2005e384c7b7SKui-Feng Lee int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; 2006f7e0beafSKui-Feng Lee struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 2007f7e0beafSKui-Feng Lee struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 2008f7e0beafSKui-Feng Lee struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 2009ae240823SKP Singh u8 **branches = NULL; 2010fec56f58SAlexei Starovoitov u8 *prog; 2011356ed649SHou Tao bool save_ret; 2012fec56f58SAlexei Starovoitov 2013fec56f58SAlexei Starovoitov /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 2014fec56f58SAlexei Starovoitov if (nr_args > 6) 2015fec56f58SAlexei Starovoitov return -ENOTSUPP; 2016fec56f58SAlexei Starovoitov 20175edf6a19SJiri Olsa /* Generated trampoline stack layout: 20185edf6a19SJiri Olsa * 20195edf6a19SJiri Olsa * RBP + 8 [ return address ] 20205edf6a19SJiri Olsa * RBP + 0 [ RBP ] 20215edf6a19SJiri Olsa * 20225edf6a19SJiri Olsa * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or 20235edf6a19SJiri Olsa * BPF_TRAMP_F_RET_FENTRY_RET flags 20245edf6a19SJiri Olsa * 20255edf6a19SJiri Olsa * [ reg_argN ] always 20265edf6a19SJiri Olsa * [ ... ] 20275edf6a19SJiri Olsa * RBP - regs_off [ reg_arg1 ] program's ctx pointer 20285edf6a19SJiri Olsa * 2029f92c1e18SJiri Olsa * RBP - args_off [ args count ] always 2030f92c1e18SJiri Olsa * 20315edf6a19SJiri Olsa * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag 2032e384c7b7SKui-Feng Lee * 2033e384c7b7SKui-Feng Lee * RBP - run_ctx_off [ bpf_tramp_run_ctx ] 20345edf6a19SJiri Olsa */ 20355edf6a19SJiri Olsa 2036356ed649SHou Tao /* room for return value of orig_call or fentry prog */ 2037356ed649SHou Tao save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 2038356ed649SHou Tao if (save_ret) 2039356ed649SHou Tao stack_size += 8; 2040fec56f58SAlexei Starovoitov 20415edf6a19SJiri Olsa regs_off = stack_size; 20425edf6a19SJiri Olsa 2043f92c1e18SJiri Olsa /* args count */ 2044f92c1e18SJiri Olsa stack_size += 8; 2045f92c1e18SJiri Olsa args_off = stack_size; 2046f92c1e18SJiri Olsa 20477e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG) 20487e6f3cd8SJiri Olsa stack_size += 8; /* room for IP address argument */ 20497e6f3cd8SJiri Olsa 20505edf6a19SJiri Olsa ip_off = stack_size; 20515edf6a19SJiri Olsa 2052e384c7b7SKui-Feng Lee stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; 2053e384c7b7SKui-Feng Lee run_ctx_off = stack_size; 2054e384c7b7SKui-Feng Lee 205558912710SPeter Zijlstra if (flags & BPF_TRAMP_F_SKIP_FRAME) { 2056fec56f58SAlexei Starovoitov /* skip patched call instruction and point orig_call to actual 2057fec56f58SAlexei Starovoitov * body of the kernel function. 2058fec56f58SAlexei Starovoitov */ 205958912710SPeter Zijlstra if (is_endbr(*(u32 *)orig_call)) 206058912710SPeter Zijlstra orig_call += ENDBR_INSN_SIZE; 20614b3da77bSDaniel Borkmann orig_call += X86_PATCH_SIZE; 206258912710SPeter Zijlstra } 2063fec56f58SAlexei Starovoitov 2064fec56f58SAlexei Starovoitov prog = image; 2065fec56f58SAlexei Starovoitov 206658912710SPeter Zijlstra EMIT_ENDBR(); 2067fec56f58SAlexei Starovoitov EMIT1(0x55); /* push rbp */ 2068fec56f58SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2069fec56f58SAlexei Starovoitov EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2070fec56f58SAlexei Starovoitov EMIT1(0x53); /* push rbx */ 2071fec56f58SAlexei Starovoitov 2072f92c1e18SJiri Olsa /* Store number of arguments of the traced function: 2073f92c1e18SJiri Olsa * mov rax, nr_args 2074f92c1e18SJiri Olsa * mov QWORD PTR [rbp - args_off], rax 2075f92c1e18SJiri Olsa */ 2076f92c1e18SJiri Olsa emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args); 2077f92c1e18SJiri Olsa emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); 2078f92c1e18SJiri Olsa 20797e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG) { 20807e6f3cd8SJiri Olsa /* Store IP address of the traced function: 20817e6f3cd8SJiri Olsa * mov rax, QWORD PTR [rbp + 8] 20827e6f3cd8SJiri Olsa * sub rax, X86_PATCH_SIZE 20835edf6a19SJiri Olsa * mov QWORD PTR [rbp - ip_off], rax 20847e6f3cd8SJiri Olsa */ 20857e6f3cd8SJiri Olsa emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 20867e6f3cd8SJiri Olsa EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 20875edf6a19SJiri Olsa emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); 20887e6f3cd8SJiri Olsa } 20897e6f3cd8SJiri Olsa 20905edf6a19SJiri Olsa save_regs(m, &prog, nr_args, regs_off); 2091fec56f58SAlexei Starovoitov 2092e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 2093e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */ 2094e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2095e21aa341SAlexei Starovoitov if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2096e21aa341SAlexei Starovoitov ret = -EINVAL; 2097e21aa341SAlexei Starovoitov goto cleanup; 2098e21aa341SAlexei Starovoitov } 2099e21aa341SAlexei Starovoitov } 2100e21aa341SAlexei Starovoitov 2101f7e0beafSKui-Feng Lee if (fentry->nr_links) 2102e384c7b7SKui-Feng Lee if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, 2103356ed649SHou Tao flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2104fec56f58SAlexei Starovoitov return -EINVAL; 2105fec56f58SAlexei Starovoitov 2106f7e0beafSKui-Feng Lee if (fmod_ret->nr_links) { 2107f7e0beafSKui-Feng Lee branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), 2108ae240823SKP Singh GFP_KERNEL); 2109ae240823SKP Singh if (!branches) 2110ae240823SKP Singh return -ENOMEM; 2111ae240823SKP Singh 21125edf6a19SJiri Olsa if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, 2113e384c7b7SKui-Feng Lee run_ctx_off, branches)) { 2114ae240823SKP Singh ret = -EINVAL; 2115ae240823SKP Singh goto cleanup; 2116ae240823SKP Singh } 2117ae240823SKP Singh } 2118ae240823SKP Singh 2119fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 21205edf6a19SJiri Olsa restore_regs(m, &prog, nr_args, regs_off); 2121fec56f58SAlexei Starovoitov 2122fec56f58SAlexei Starovoitov /* call original function */ 2123ae240823SKP Singh if (emit_call(&prog, orig_call, prog)) { 2124ae240823SKP Singh ret = -EINVAL; 2125ae240823SKP Singh goto cleanup; 2126ae240823SKP Singh } 2127fec56f58SAlexei Starovoitov /* remember return value in a stack for bpf prog to access */ 2128fec56f58SAlexei Starovoitov emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2129e21aa341SAlexei Starovoitov im->ip_after_call = prog; 2130b1f480bcSIngo Molnar memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2131b9082970SStanislav Fomichev prog += X86_PATCH_SIZE; 2132fec56f58SAlexei Starovoitov } 2133fec56f58SAlexei Starovoitov 2134f7e0beafSKui-Feng Lee if (fmod_ret->nr_links) { 2135ae240823SKP Singh /* From Intel 64 and IA-32 Architectures Optimization 2136ae240823SKP Singh * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2137ae240823SKP Singh * Coding Rule 11: All branch targets should be 16-byte 2138ae240823SKP Singh * aligned. 2139ae240823SKP Singh */ 2140ae240823SKP Singh emit_align(&prog, 16); 2141ae240823SKP Singh /* Update the branches saved in invoke_bpf_mod_ret with the 2142ae240823SKP Singh * aligned address of do_fexit. 2143ae240823SKP Singh */ 2144f7e0beafSKui-Feng Lee for (i = 0; i < fmod_ret->nr_links; i++) 2145ae240823SKP Singh emit_cond_near_jump(&branches[i], prog, branches[i], 2146ae240823SKP Singh X86_JNE); 2147ae240823SKP Singh } 2148ae240823SKP Singh 2149f7e0beafSKui-Feng Lee if (fexit->nr_links) 2150e384c7b7SKui-Feng Lee if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { 2151ae240823SKP Singh ret = -EINVAL; 2152ae240823SKP Singh goto cleanup; 2153ae240823SKP Singh } 2154fec56f58SAlexei Starovoitov 2155fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_RESTORE_REGS) 21565edf6a19SJiri Olsa restore_regs(m, &prog, nr_args, regs_off); 2157fec56f58SAlexei Starovoitov 2158ae240823SKP Singh /* This needs to be done regardless. If there were fmod_ret programs, 2159ae240823SKP Singh * the return value is only updated on the stack and still needs to be 2160ae240823SKP Singh * restored to R0. 2161ae240823SKP Singh */ 2162e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 2163e21aa341SAlexei Starovoitov im->ip_epilogue = prog; 2164e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */ 2165e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2166e21aa341SAlexei Starovoitov if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2167e21aa341SAlexei Starovoitov ret = -EINVAL; 2168e21aa341SAlexei Starovoitov goto cleanup; 2169e21aa341SAlexei Starovoitov } 2170e21aa341SAlexei Starovoitov } 2171356ed649SHou Tao /* restore return value of orig_call or fentry prog back into RAX */ 2172356ed649SHou Tao if (save_ret) 2173356ed649SHou Tao emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2174fec56f58SAlexei Starovoitov 2175fec56f58SAlexei Starovoitov EMIT1(0x5B); /* pop rbx */ 2176fec56f58SAlexei Starovoitov EMIT1(0xC9); /* leave */ 2177fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_SKIP_FRAME) 2178fec56f58SAlexei Starovoitov /* skip our return address and return to parent */ 2179fec56f58SAlexei Starovoitov EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2180fec56f58SAlexei Starovoitov EMIT1(0xC3); /* ret */ 218185d33df3SMartin KaFai Lau /* Make sure the trampoline generation logic doesn't overflow */ 2182ae240823SKP Singh if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2183ae240823SKP Singh ret = -EFAULT; 2184ae240823SKP Singh goto cleanup; 2185ae240823SKP Singh } 2186ae240823SKP Singh ret = prog - (u8 *)image; 2187ae240823SKP Singh 2188ae240823SKP Singh cleanup: 2189ae240823SKP Singh kfree(branches); 2190ae240823SKP Singh return ret; 2191fec56f58SAlexei Starovoitov } 2192fec56f58SAlexei Starovoitov 219375ccbef6SBjörn Töpel static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 219475ccbef6SBjörn Töpel { 21957e639208SKP Singh u8 *jg_reloc, *prog = *pprog; 2196ced50fc4SJiri Olsa int pivot, err, jg_bytes = 1; 219775ccbef6SBjörn Töpel s64 jg_offset; 219875ccbef6SBjörn Töpel 219975ccbef6SBjörn Töpel if (a == b) { 220075ccbef6SBjörn Töpel /* Leaf node of recursion, i.e. not a range of indices 220175ccbef6SBjörn Töpel * anymore. 220275ccbef6SBjörn Töpel */ 220375ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 220475ccbef6SBjörn Töpel if (!is_simm32(progs[a])) 220575ccbef6SBjörn Töpel return -1; 220675ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 220775ccbef6SBjörn Töpel progs[a]); 220875ccbef6SBjörn Töpel err = emit_cond_near_jump(&prog, /* je func */ 220975ccbef6SBjörn Töpel (void *)progs[a], prog, 221075ccbef6SBjörn Töpel X86_JE); 221175ccbef6SBjörn Töpel if (err) 221275ccbef6SBjörn Töpel return err; 221375ccbef6SBjörn Töpel 221487c87ecdSPeter Zijlstra emit_indirect_jump(&prog, 2 /* rdx */, prog); 221575ccbef6SBjörn Töpel 221675ccbef6SBjörn Töpel *pprog = prog; 221775ccbef6SBjörn Töpel return 0; 221875ccbef6SBjörn Töpel } 221975ccbef6SBjörn Töpel 222075ccbef6SBjörn Töpel /* Not a leaf node, so we pivot, and recursively descend into 222175ccbef6SBjörn Töpel * the lower and upper ranges. 222275ccbef6SBjörn Töpel */ 222375ccbef6SBjörn Töpel pivot = (b - a) / 2; 222475ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 222575ccbef6SBjörn Töpel if (!is_simm32(progs[a + pivot])) 222675ccbef6SBjörn Töpel return -1; 222775ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 222875ccbef6SBjörn Töpel 222975ccbef6SBjörn Töpel if (pivot > 2) { /* jg upper_part */ 223075ccbef6SBjörn Töpel /* Require near jump. */ 223175ccbef6SBjörn Töpel jg_bytes = 4; 223275ccbef6SBjörn Töpel EMIT2_off32(0x0F, X86_JG + 0x10, 0); 223375ccbef6SBjörn Töpel } else { 223475ccbef6SBjörn Töpel EMIT2(X86_JG, 0); 223575ccbef6SBjörn Töpel } 223675ccbef6SBjörn Töpel jg_reloc = prog; 223775ccbef6SBjörn Töpel 223875ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 223975ccbef6SBjörn Töpel progs); 224075ccbef6SBjörn Töpel if (err) 224175ccbef6SBjörn Töpel return err; 224275ccbef6SBjörn Töpel 2243116eb788SBjörn Töpel /* From Intel 64 and IA-32 Architectures Optimization 2244116eb788SBjörn Töpel * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2245116eb788SBjörn Töpel * Coding Rule 11: All branch targets should be 16-byte 2246116eb788SBjörn Töpel * aligned. 2247116eb788SBjörn Töpel */ 22487e639208SKP Singh emit_align(&prog, 16); 224975ccbef6SBjörn Töpel jg_offset = prog - jg_reloc; 225075ccbef6SBjörn Töpel emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 225175ccbef6SBjörn Töpel 225275ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 225375ccbef6SBjörn Töpel b, progs); 225475ccbef6SBjörn Töpel if (err) 225575ccbef6SBjörn Töpel return err; 225675ccbef6SBjörn Töpel 225775ccbef6SBjörn Töpel *pprog = prog; 225875ccbef6SBjörn Töpel return 0; 225975ccbef6SBjörn Töpel } 226075ccbef6SBjörn Töpel 226175ccbef6SBjörn Töpel static int cmp_ips(const void *a, const void *b) 226275ccbef6SBjörn Töpel { 226375ccbef6SBjörn Töpel const s64 *ipa = a; 226475ccbef6SBjörn Töpel const s64 *ipb = b; 226575ccbef6SBjörn Töpel 226675ccbef6SBjörn Töpel if (*ipa > *ipb) 226775ccbef6SBjörn Töpel return 1; 226875ccbef6SBjörn Töpel if (*ipa < *ipb) 226975ccbef6SBjörn Töpel return -1; 227075ccbef6SBjörn Töpel return 0; 227175ccbef6SBjörn Töpel } 227275ccbef6SBjörn Töpel 227375ccbef6SBjörn Töpel int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 227475ccbef6SBjörn Töpel { 227575ccbef6SBjörn Töpel u8 *prog = image; 227675ccbef6SBjörn Töpel 227775ccbef6SBjörn Töpel sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 227875ccbef6SBjörn Töpel return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 227975ccbef6SBjörn Töpel } 228075ccbef6SBjörn Töpel 22811c2a088aSAlexei Starovoitov struct x64_jit_data { 22821022a549SSong Liu struct bpf_binary_header *rw_header; 22831c2a088aSAlexei Starovoitov struct bpf_binary_header *header; 22841c2a088aSAlexei Starovoitov int *addrs; 22851c2a088aSAlexei Starovoitov u8 *image; 22861c2a088aSAlexei Starovoitov int proglen; 22871c2a088aSAlexei Starovoitov struct jit_context ctx; 22881c2a088aSAlexei Starovoitov }; 22891c2a088aSAlexei Starovoitov 229093c5aeccSGary Lin #define MAX_PASSES 20 229193c5aeccSGary Lin #define PADDING_PASSES (MAX_PASSES - 5) 229293c5aeccSGary Lin 2293d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 229462258278SAlexei Starovoitov { 22951022a549SSong Liu struct bpf_binary_header *rw_header = NULL; 2296f3c2af7bSAlexei Starovoitov struct bpf_binary_header *header = NULL; 2297959a7579SDaniel Borkmann struct bpf_prog *tmp, *orig_prog = prog; 22981c2a088aSAlexei Starovoitov struct x64_jit_data *jit_data; 2299f3c2af7bSAlexei Starovoitov int proglen, oldproglen = 0; 2300f3c2af7bSAlexei Starovoitov struct jit_context ctx = {}; 2301959a7579SDaniel Borkmann bool tmp_blinded = false; 23021c2a088aSAlexei Starovoitov bool extra_pass = false; 230393c5aeccSGary Lin bool padding = false; 23041022a549SSong Liu u8 *rw_image = NULL; 2305f3c2af7bSAlexei Starovoitov u8 *image = NULL; 2306f3c2af7bSAlexei Starovoitov int *addrs; 2307f3c2af7bSAlexei Starovoitov int pass; 2308f3c2af7bSAlexei Starovoitov int i; 2309f3c2af7bSAlexei Starovoitov 231060b58afcSAlexei Starovoitov if (!prog->jit_requested) 2311959a7579SDaniel Borkmann return orig_prog; 2312959a7579SDaniel Borkmann 2313959a7579SDaniel Borkmann tmp = bpf_jit_blind_constants(prog); 2314a2c7a983SIngo Molnar /* 2315a2c7a983SIngo Molnar * If blinding was requested and we failed during blinding, 2316959a7579SDaniel Borkmann * we must fall back to the interpreter. 2317959a7579SDaniel Borkmann */ 2318959a7579SDaniel Borkmann if (IS_ERR(tmp)) 2319959a7579SDaniel Borkmann return orig_prog; 2320959a7579SDaniel Borkmann if (tmp != prog) { 2321959a7579SDaniel Borkmann tmp_blinded = true; 2322959a7579SDaniel Borkmann prog = tmp; 2323959a7579SDaniel Borkmann } 2324f3c2af7bSAlexei Starovoitov 23251c2a088aSAlexei Starovoitov jit_data = prog->aux->jit_data; 23261c2a088aSAlexei Starovoitov if (!jit_data) { 23271c2a088aSAlexei Starovoitov jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 23281c2a088aSAlexei Starovoitov if (!jit_data) { 23291c2a088aSAlexei Starovoitov prog = orig_prog; 23301c2a088aSAlexei Starovoitov goto out; 23311c2a088aSAlexei Starovoitov } 23321c2a088aSAlexei Starovoitov prog->aux->jit_data = jit_data; 23331c2a088aSAlexei Starovoitov } 23341c2a088aSAlexei Starovoitov addrs = jit_data->addrs; 23351c2a088aSAlexei Starovoitov if (addrs) { 23361c2a088aSAlexei Starovoitov ctx = jit_data->ctx; 23371c2a088aSAlexei Starovoitov oldproglen = jit_data->proglen; 23381c2a088aSAlexei Starovoitov image = jit_data->image; 23391c2a088aSAlexei Starovoitov header = jit_data->header; 23401022a549SSong Liu rw_header = jit_data->rw_header; 23411022a549SSong Liu rw_image = (void *)rw_header + ((void *)image - (void *)header); 23421c2a088aSAlexei Starovoitov extra_pass = true; 234393c5aeccSGary Lin padding = true; 23441c2a088aSAlexei Starovoitov goto skip_init_addrs; 23451c2a088aSAlexei Starovoitov } 2346de920fc6SYonghong Song addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2347959a7579SDaniel Borkmann if (!addrs) { 2348959a7579SDaniel Borkmann prog = orig_prog; 23491c2a088aSAlexei Starovoitov goto out_addrs; 2350959a7579SDaniel Borkmann } 2351f3c2af7bSAlexei Starovoitov 2352a2c7a983SIngo Molnar /* 2353a2c7a983SIngo Molnar * Before first pass, make a rough estimation of addrs[] 2354a2c7a983SIngo Molnar * each BPF instruction is translated to less than 64 bytes 2355f3c2af7bSAlexei Starovoitov */ 23567c2e988fSAlexei Starovoitov for (proglen = 0, i = 0; i <= prog->len; i++) { 2357f3c2af7bSAlexei Starovoitov proglen += 64; 2358f3c2af7bSAlexei Starovoitov addrs[i] = proglen; 2359f3c2af7bSAlexei Starovoitov } 2360f3c2af7bSAlexei Starovoitov ctx.cleanup_addr = proglen; 23611c2a088aSAlexei Starovoitov skip_init_addrs: 2362f3c2af7bSAlexei Starovoitov 2363a2c7a983SIngo Molnar /* 2364a2c7a983SIngo Molnar * JITed image shrinks with every pass and the loop iterates 2365a2c7a983SIngo Molnar * until the image stops shrinking. Very large BPF programs 23663f7352bfSAlexei Starovoitov * may converge on the last pass. In such case do one more 2367a2c7a983SIngo Molnar * pass to emit the final image. 23683f7352bfSAlexei Starovoitov */ 236993c5aeccSGary Lin for (pass = 0; pass < MAX_PASSES || image; pass++) { 237093c5aeccSGary Lin if (!padding && pass >= PADDING_PASSES) 237193c5aeccSGary Lin padding = true; 23721022a549SSong Liu proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); 2373f3c2af7bSAlexei Starovoitov if (proglen <= 0) { 23743aab8884SDaniel Borkmann out_image: 2375f3c2af7bSAlexei Starovoitov image = NULL; 2376676b2daaSSong Liu if (header) { 2377676b2daaSSong Liu bpf_arch_text_copy(&header->size, &rw_header->size, 2378676b2daaSSong Liu sizeof(rw_header->size)); 23791022a549SSong Liu bpf_jit_binary_pack_free(header, rw_header); 2380676b2daaSSong Liu } 238173e14451SHou Tao /* Fall back to interpreter mode */ 2382959a7579SDaniel Borkmann prog = orig_prog; 238373e14451SHou Tao if (extra_pass) { 238473e14451SHou Tao prog->bpf_func = NULL; 238573e14451SHou Tao prog->jited = 0; 238673e14451SHou Tao prog->jited_len = 0; 238773e14451SHou Tao } 2388959a7579SDaniel Borkmann goto out_addrs; 2389f3c2af7bSAlexei Starovoitov } 23900a14842fSEric Dumazet if (image) { 2391e0ee9c12SAlexei Starovoitov if (proglen != oldproglen) { 2392f3c2af7bSAlexei Starovoitov pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2393f3c2af7bSAlexei Starovoitov proglen, oldproglen); 23943aab8884SDaniel Borkmann goto out_image; 2395e0ee9c12SAlexei Starovoitov } 23960a14842fSEric Dumazet break; 23970a14842fSEric Dumazet } 23980a14842fSEric Dumazet if (proglen == oldproglen) { 23993dec541bSAlexei Starovoitov /* 24003dec541bSAlexei Starovoitov * The number of entries in extable is the number of BPF_LDX 24013dec541bSAlexei Starovoitov * insns that access kernel memory via "pointer to BTF type". 24023dec541bSAlexei Starovoitov * The verifier changed their opcode from LDX|MEM|size 24033dec541bSAlexei Starovoitov * to LDX|PROBE_MEM|size to make JITing easier. 24043dec541bSAlexei Starovoitov */ 24053dec541bSAlexei Starovoitov u32 align = __alignof__(struct exception_table_entry); 24063dec541bSAlexei Starovoitov u32 extable_size = prog->aux->num_exentries * 24073dec541bSAlexei Starovoitov sizeof(struct exception_table_entry); 24083dec541bSAlexei Starovoitov 24093dec541bSAlexei Starovoitov /* allocate module memory for x86 insns and extable */ 24101022a549SSong Liu header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, 24111022a549SSong Liu &image, align, &rw_header, &rw_image, 24121022a549SSong Liu jit_fill_hole); 2413959a7579SDaniel Borkmann if (!header) { 2414959a7579SDaniel Borkmann prog = orig_prog; 2415959a7579SDaniel Borkmann goto out_addrs; 2416959a7579SDaniel Borkmann } 24173dec541bSAlexei Starovoitov prog->aux->extable = (void *) image + roundup(proglen, align); 24180a14842fSEric Dumazet } 24190a14842fSEric Dumazet oldproglen = proglen; 24206007b080SDaniel Borkmann cond_resched(); 24210a14842fSEric Dumazet } 242279617801SDaniel Borkmann 24230a14842fSEric Dumazet if (bpf_jit_enable > 1) 2424485d6511SDaniel Borkmann bpf_jit_dump(prog->len, proglen, pass + 1, image); 24250a14842fSEric Dumazet 24260a14842fSEric Dumazet if (image) { 24271c2a088aSAlexei Starovoitov if (!prog->is_func || extra_pass) { 24281022a549SSong Liu /* 24291022a549SSong Liu * bpf_jit_binary_pack_finalize fails in two scenarios: 24301022a549SSong Liu * 1) header is not pointing to proper module memory; 24311022a549SSong Liu * 2) the arch doesn't support bpf_arch_text_copy(). 24321022a549SSong Liu * 2433f95f768fSSong Liu * Both cases are serious bugs and justify WARN_ON. 24341022a549SSong Liu */ 2435f95f768fSSong Liu if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { 243673e14451SHou Tao /* header has been freed */ 243773e14451SHou Tao header = NULL; 243873e14451SHou Tao goto out_image; 2439f95f768fSSong Liu } 2440f95f768fSSong Liu 2441428d5df1SDaniel Borkmann bpf_tail_call_direct_fixup(prog); 24421c2a088aSAlexei Starovoitov } else { 24431c2a088aSAlexei Starovoitov jit_data->addrs = addrs; 24441c2a088aSAlexei Starovoitov jit_data->ctx = ctx; 24451c2a088aSAlexei Starovoitov jit_data->proglen = proglen; 24461c2a088aSAlexei Starovoitov jit_data->image = image; 24471c2a088aSAlexei Starovoitov jit_data->header = header; 24481022a549SSong Liu jit_data->rw_header = rw_header; 24491c2a088aSAlexei Starovoitov } 2450f3c2af7bSAlexei Starovoitov prog->bpf_func = (void *)image; 2451a91263d5SDaniel Borkmann prog->jited = 1; 2452783d28ddSMartin KaFai Lau prog->jited_len = proglen; 24539d5ecb09SDaniel Borkmann } else { 24549d5ecb09SDaniel Borkmann prog = orig_prog; 24550a14842fSEric Dumazet } 2456959a7579SDaniel Borkmann 245739f56ca9SDaniel Borkmann if (!image || !prog->is_func || extra_pass) { 2458c454a46bSMartin KaFai Lau if (image) 24597c2e988fSAlexei Starovoitov bpf_prog_fill_jited_linfo(prog, addrs + 1); 2460959a7579SDaniel Borkmann out_addrs: 2461de920fc6SYonghong Song kvfree(addrs); 24621c2a088aSAlexei Starovoitov kfree(jit_data); 24631c2a088aSAlexei Starovoitov prog->aux->jit_data = NULL; 24641c2a088aSAlexei Starovoitov } 2465959a7579SDaniel Borkmann out: 2466959a7579SDaniel Borkmann if (tmp_blinded) 2467959a7579SDaniel Borkmann bpf_jit_prog_release_other(prog, prog == orig_prog ? 2468959a7579SDaniel Borkmann tmp : orig_prog); 2469d1c55ab5SDaniel Borkmann return prog; 24700a14842fSEric Dumazet } 2471e6ac2450SMartin KaFai Lau 2472e6ac2450SMartin KaFai Lau bool bpf_jit_supports_kfunc_call(void) 2473e6ac2450SMartin KaFai Lau { 2474e6ac2450SMartin KaFai Lau return true; 2475e6ac2450SMartin KaFai Lau } 2476ebc1415dSSong Liu 2477ebc1415dSSong Liu void *bpf_arch_text_copy(void *dst, void *src, size_t len) 2478ebc1415dSSong Liu { 2479ebc1415dSSong Liu if (text_poke_copy(dst, src, len) == NULL) 2480ebc1415dSSong Liu return ERR_PTR(-EINVAL); 2481ebc1415dSSong Liu return dst; 2482ebc1415dSSong Liu } 248395acd881STony Ambardar 248495acd881STony Ambardar /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ 248595acd881STony Ambardar bool bpf_jit_supports_subprog_tailcalls(void) 248695acd881STony Ambardar { 248795acd881STony Ambardar return true; 248895acd881STony Ambardar } 2489*1d5f82d9SSong Liu 2490*1d5f82d9SSong Liu void bpf_jit_free(struct bpf_prog *prog) 2491*1d5f82d9SSong Liu { 2492*1d5f82d9SSong Liu if (prog->jited) { 2493*1d5f82d9SSong Liu struct x64_jit_data *jit_data = prog->aux->jit_data; 2494*1d5f82d9SSong Liu struct bpf_binary_header *hdr; 2495*1d5f82d9SSong Liu 2496*1d5f82d9SSong Liu /* 2497*1d5f82d9SSong Liu * If we fail the final pass of JIT (from jit_subprogs), 2498*1d5f82d9SSong Liu * the program may not be finalized yet. Call finalize here 2499*1d5f82d9SSong Liu * before freeing it. 2500*1d5f82d9SSong Liu */ 2501*1d5f82d9SSong Liu if (jit_data) { 2502*1d5f82d9SSong Liu bpf_jit_binary_pack_finalize(prog, jit_data->header, 2503*1d5f82d9SSong Liu jit_data->rw_header); 2504*1d5f82d9SSong Liu kvfree(jit_data->addrs); 2505*1d5f82d9SSong Liu kfree(jit_data); 2506*1d5f82d9SSong Liu } 2507*1d5f82d9SSong Liu hdr = bpf_jit_binary_pack_hdr(prog); 2508*1d5f82d9SSong Liu bpf_jit_binary_pack_free(hdr, NULL); 2509*1d5f82d9SSong Liu WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); 2510*1d5f82d9SSong Liu } 2511*1d5f82d9SSong Liu 2512*1d5f82d9SSong Liu bpf_prog_unlock_free(prog); 2513*1d5f82d9SSong Liu } 2514