1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2a2c7a983SIngo Molnar /* 3a2c7a983SIngo Molnar * bpf_jit_comp.c: BPF JIT compiler 40a14842fSEric Dumazet * 53b58908aSEric Dumazet * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) 662258278SAlexei Starovoitov * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 70a14842fSEric Dumazet */ 80a14842fSEric Dumazet #include <linux/netdevice.h> 90a14842fSEric Dumazet #include <linux/filter.h> 10855ddb56SEric Dumazet #include <linux/if_vlan.h> 1171d22d58SDaniel Borkmann #include <linux/bpf.h> 125964b200SAlexei Starovoitov #include <linux/memory.h> 1375ccbef6SBjörn Töpel #include <linux/sort.h> 143dec541bSAlexei Starovoitov #include <asm/extable.h> 15d1163651SLaura Abbott #include <asm/set_memory.h> 16a493a87fSDaniel Borkmann #include <asm/nospec-branch.h> 175964b200SAlexei Starovoitov #include <asm/text-patching.h> 180a14842fSEric Dumazet 195cccc702SJoe Perches static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 200a14842fSEric Dumazet { 210a14842fSEric Dumazet if (len == 1) 220a14842fSEric Dumazet *ptr = bytes; 230a14842fSEric Dumazet else if (len == 2) 240a14842fSEric Dumazet *(u16 *)ptr = bytes; 250a14842fSEric Dumazet else { 260a14842fSEric Dumazet *(u32 *)ptr = bytes; 270a14842fSEric Dumazet barrier(); 280a14842fSEric Dumazet } 290a14842fSEric Dumazet return ptr + len; 300a14842fSEric Dumazet } 310a14842fSEric Dumazet 32b52f00e6SAlexei Starovoitov #define EMIT(bytes, len) \ 33ced50fc4SJiri Olsa do { prog = emit_code(prog, bytes, len); } while (0) 340a14842fSEric Dumazet 350a14842fSEric Dumazet #define EMIT1(b1) EMIT(b1, 1) 360a14842fSEric Dumazet #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) 370a14842fSEric Dumazet #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) 380a14842fSEric Dumazet #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 39a2c7a983SIngo Molnar 4062258278SAlexei Starovoitov #define EMIT1_off32(b1, off) \ 4162258278SAlexei Starovoitov do { EMIT1(b1); EMIT(off, 4); } while (0) 4262258278SAlexei Starovoitov #define EMIT2_off32(b1, b2, off) \ 4362258278SAlexei Starovoitov do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 4462258278SAlexei Starovoitov #define EMIT3_off32(b1, b2, b3, off) \ 4562258278SAlexei Starovoitov do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 4662258278SAlexei Starovoitov #define EMIT4_off32(b1, b2, b3, b4, off) \ 4762258278SAlexei Starovoitov do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 480a14842fSEric Dumazet 495cccc702SJoe Perches static bool is_imm8(int value) 500a14842fSEric Dumazet { 510a14842fSEric Dumazet return value <= 127 && value >= -128; 520a14842fSEric Dumazet } 530a14842fSEric Dumazet 545cccc702SJoe Perches static bool is_simm32(s64 value) 550a14842fSEric Dumazet { 5662258278SAlexei Starovoitov return value == (s64)(s32)value; 570a14842fSEric Dumazet } 580a14842fSEric Dumazet 596fe8b9c1SDaniel Borkmann static bool is_uimm32(u64 value) 606fe8b9c1SDaniel Borkmann { 616fe8b9c1SDaniel Borkmann return value == (u64)(u32)value; 626fe8b9c1SDaniel Borkmann } 636fe8b9c1SDaniel Borkmann 64e430f34eSAlexei Starovoitov /* mov dst, src */ 65e430f34eSAlexei Starovoitov #define EMIT_mov(DST, SRC) \ 66a2c7a983SIngo Molnar do { \ 67a2c7a983SIngo Molnar if (DST != SRC) \ 68e430f34eSAlexei Starovoitov EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ 690a14842fSEric Dumazet } while (0) 700a14842fSEric Dumazet 7162258278SAlexei Starovoitov static int bpf_size_to_x86_bytes(int bpf_size) 7262258278SAlexei Starovoitov { 7362258278SAlexei Starovoitov if (bpf_size == BPF_W) 7462258278SAlexei Starovoitov return 4; 7562258278SAlexei Starovoitov else if (bpf_size == BPF_H) 7662258278SAlexei Starovoitov return 2; 7762258278SAlexei Starovoitov else if (bpf_size == BPF_B) 7862258278SAlexei Starovoitov return 1; 7962258278SAlexei Starovoitov else if (bpf_size == BPF_DW) 8062258278SAlexei Starovoitov return 4; /* imm32 */ 8162258278SAlexei Starovoitov else 8262258278SAlexei Starovoitov return 0; 8362258278SAlexei Starovoitov } 8462258278SAlexei Starovoitov 85a2c7a983SIngo Molnar /* 86a2c7a983SIngo Molnar * List of x86 cond jumps opcodes (. + s8) 870a14842fSEric Dumazet * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) 880a14842fSEric Dumazet */ 890a14842fSEric Dumazet #define X86_JB 0x72 900a14842fSEric Dumazet #define X86_JAE 0x73 910a14842fSEric Dumazet #define X86_JE 0x74 920a14842fSEric Dumazet #define X86_JNE 0x75 930a14842fSEric Dumazet #define X86_JBE 0x76 940a14842fSEric Dumazet #define X86_JA 0x77 9552afc51eSDaniel Borkmann #define X86_JL 0x7C 9662258278SAlexei Starovoitov #define X86_JGE 0x7D 9752afc51eSDaniel Borkmann #define X86_JLE 0x7E 9862258278SAlexei Starovoitov #define X86_JG 0x7F 990a14842fSEric Dumazet 100a2c7a983SIngo Molnar /* Pick a register outside of BPF range for JIT internal work */ 101959a7579SDaniel Borkmann #define AUX_REG (MAX_BPF_JIT_REG + 1) 102fec56f58SAlexei Starovoitov #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) 10362258278SAlexei Starovoitov 104a2c7a983SIngo Molnar /* 105a2c7a983SIngo Molnar * The following table maps BPF registers to x86-64 registers. 106959a7579SDaniel Borkmann * 107a2c7a983SIngo Molnar * x86-64 register R12 is unused, since if used as base address 108959a7579SDaniel Borkmann * register in load/store instructions, it always needs an 109959a7579SDaniel Borkmann * extra byte of encoding and is callee saved. 110959a7579SDaniel Borkmann * 111fec56f58SAlexei Starovoitov * x86-64 register R9 is not used by BPF programs, but can be used by BPF 112fec56f58SAlexei Starovoitov * trampoline. x86-64 register R10 is used for blinding (if enabled). 11362258278SAlexei Starovoitov */ 11462258278SAlexei Starovoitov static const int reg2hex[] = { 115a2c7a983SIngo Molnar [BPF_REG_0] = 0, /* RAX */ 116a2c7a983SIngo Molnar [BPF_REG_1] = 7, /* RDI */ 117a2c7a983SIngo Molnar [BPF_REG_2] = 6, /* RSI */ 118a2c7a983SIngo Molnar [BPF_REG_3] = 2, /* RDX */ 119a2c7a983SIngo Molnar [BPF_REG_4] = 1, /* RCX */ 120a2c7a983SIngo Molnar [BPF_REG_5] = 0, /* R8 */ 121a2c7a983SIngo Molnar [BPF_REG_6] = 3, /* RBX callee saved */ 122a2c7a983SIngo Molnar [BPF_REG_7] = 5, /* R13 callee saved */ 123a2c7a983SIngo Molnar [BPF_REG_8] = 6, /* R14 callee saved */ 124a2c7a983SIngo Molnar [BPF_REG_9] = 7, /* R15 callee saved */ 125a2c7a983SIngo Molnar [BPF_REG_FP] = 5, /* RBP readonly */ 126a2c7a983SIngo Molnar [BPF_REG_AX] = 2, /* R10 temp register */ 127a2c7a983SIngo Molnar [AUX_REG] = 3, /* R11 temp register */ 128fec56f58SAlexei Starovoitov [X86_REG_R9] = 1, /* R9 register, 6th function argument */ 12962258278SAlexei Starovoitov }; 13062258278SAlexei Starovoitov 1313dec541bSAlexei Starovoitov static const int reg2pt_regs[] = { 1323dec541bSAlexei Starovoitov [BPF_REG_0] = offsetof(struct pt_regs, ax), 1333dec541bSAlexei Starovoitov [BPF_REG_1] = offsetof(struct pt_regs, di), 1343dec541bSAlexei Starovoitov [BPF_REG_2] = offsetof(struct pt_regs, si), 1353dec541bSAlexei Starovoitov [BPF_REG_3] = offsetof(struct pt_regs, dx), 1363dec541bSAlexei Starovoitov [BPF_REG_4] = offsetof(struct pt_regs, cx), 1373dec541bSAlexei Starovoitov [BPF_REG_5] = offsetof(struct pt_regs, r8), 1383dec541bSAlexei Starovoitov [BPF_REG_6] = offsetof(struct pt_regs, bx), 1393dec541bSAlexei Starovoitov [BPF_REG_7] = offsetof(struct pt_regs, r13), 1403dec541bSAlexei Starovoitov [BPF_REG_8] = offsetof(struct pt_regs, r14), 1413dec541bSAlexei Starovoitov [BPF_REG_9] = offsetof(struct pt_regs, r15), 1423dec541bSAlexei Starovoitov }; 1433dec541bSAlexei Starovoitov 144a2c7a983SIngo Molnar /* 145a2c7a983SIngo Molnar * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 14662258278SAlexei Starovoitov * which need extra byte of encoding. 14762258278SAlexei Starovoitov * rax,rcx,...,rbp have simpler encoding 14862258278SAlexei Starovoitov */ 1495cccc702SJoe Perches static bool is_ereg(u32 reg) 15062258278SAlexei Starovoitov { 151d148134bSJoe Perches return (1 << reg) & (BIT(BPF_REG_5) | 152d148134bSJoe Perches BIT(AUX_REG) | 153d148134bSJoe Perches BIT(BPF_REG_7) | 154d148134bSJoe Perches BIT(BPF_REG_8) | 155959a7579SDaniel Borkmann BIT(BPF_REG_9) | 156fec56f58SAlexei Starovoitov BIT(X86_REG_R9) | 157959a7579SDaniel Borkmann BIT(BPF_REG_AX)); 15862258278SAlexei Starovoitov } 15962258278SAlexei Starovoitov 160aee194b1SLuke Nelson /* 161aee194b1SLuke Nelson * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 162aee194b1SLuke Nelson * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte 163aee194b1SLuke Nelson * of encoding. al,cl,dl,bl have simpler encoding. 164aee194b1SLuke Nelson */ 165aee194b1SLuke Nelson static bool is_ereg_8l(u32 reg) 166aee194b1SLuke Nelson { 167aee194b1SLuke Nelson return is_ereg(reg) || 168aee194b1SLuke Nelson (1 << reg) & (BIT(BPF_REG_1) | 169aee194b1SLuke Nelson BIT(BPF_REG_2) | 170aee194b1SLuke Nelson BIT(BPF_REG_FP)); 171aee194b1SLuke Nelson } 172aee194b1SLuke Nelson 173de0a444dSDaniel Borkmann static bool is_axreg(u32 reg) 174de0a444dSDaniel Borkmann { 175de0a444dSDaniel Borkmann return reg == BPF_REG_0; 176de0a444dSDaniel Borkmann } 177de0a444dSDaniel Borkmann 178a2c7a983SIngo Molnar /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ 1795cccc702SJoe Perches static u8 add_1mod(u8 byte, u32 reg) 18062258278SAlexei Starovoitov { 18162258278SAlexei Starovoitov if (is_ereg(reg)) 18262258278SAlexei Starovoitov byte |= 1; 18362258278SAlexei Starovoitov return byte; 18462258278SAlexei Starovoitov } 18562258278SAlexei Starovoitov 1865cccc702SJoe Perches static u8 add_2mod(u8 byte, u32 r1, u32 r2) 18762258278SAlexei Starovoitov { 18862258278SAlexei Starovoitov if (is_ereg(r1)) 18962258278SAlexei Starovoitov byte |= 1; 19062258278SAlexei Starovoitov if (is_ereg(r2)) 19162258278SAlexei Starovoitov byte |= 4; 19262258278SAlexei Starovoitov return byte; 19362258278SAlexei Starovoitov } 19462258278SAlexei Starovoitov 195a2c7a983SIngo Molnar /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ 1965cccc702SJoe Perches static u8 add_1reg(u8 byte, u32 dst_reg) 19762258278SAlexei Starovoitov { 198e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg]; 19962258278SAlexei Starovoitov } 20062258278SAlexei Starovoitov 201a2c7a983SIngo Molnar /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ 2025cccc702SJoe Perches static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 20362258278SAlexei Starovoitov { 204e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 20562258278SAlexei Starovoitov } 20662258278SAlexei Starovoitov 207e5f02cacSBrendan Jackman /* Some 1-byte opcodes for binary ALU operations */ 208e5f02cacSBrendan Jackman static u8 simple_alu_opcodes[] = { 209e5f02cacSBrendan Jackman [BPF_ADD] = 0x01, 210e5f02cacSBrendan Jackman [BPF_SUB] = 0x29, 211e5f02cacSBrendan Jackman [BPF_AND] = 0x21, 212e5f02cacSBrendan Jackman [BPF_OR] = 0x09, 213e5f02cacSBrendan Jackman [BPF_XOR] = 0x31, 214e5f02cacSBrendan Jackman [BPF_LSH] = 0xE0, 215e5f02cacSBrendan Jackman [BPF_RSH] = 0xE8, 216e5f02cacSBrendan Jackman [BPF_ARSH] = 0xF8, 217e5f02cacSBrendan Jackman }; 218e5f02cacSBrendan Jackman 219738cbe72SDaniel Borkmann static void jit_fill_hole(void *area, unsigned int size) 220738cbe72SDaniel Borkmann { 221a2c7a983SIngo Molnar /* Fill whole space with INT3 instructions */ 222738cbe72SDaniel Borkmann memset(area, 0xcc, size); 223738cbe72SDaniel Borkmann } 224738cbe72SDaniel Borkmann 225f3c2af7bSAlexei Starovoitov struct jit_context { 226a2c7a983SIngo Molnar int cleanup_addr; /* Epilogue code offset */ 227dceba081SPeter Zijlstra 228dceba081SPeter Zijlstra /* 229dceba081SPeter Zijlstra * Program specific offsets of labels in the code; these rely on the 230dceba081SPeter Zijlstra * JIT doing at least 2 passes, recording the position on the first 231dceba081SPeter Zijlstra * pass, only to generate the correct offset on the second pass. 232dceba081SPeter Zijlstra */ 233dceba081SPeter Zijlstra int tail_call_direct_label; 234dceba081SPeter Zijlstra int tail_call_indirect_label; 235f3c2af7bSAlexei Starovoitov }; 236f3c2af7bSAlexei Starovoitov 237a2c7a983SIngo Molnar /* Maximum number of bytes emitted while JITing one eBPF insn */ 238e0ee9c12SAlexei Starovoitov #define BPF_MAX_INSN_SIZE 128 239e0ee9c12SAlexei Starovoitov #define BPF_INSN_SAFETY 64 2404b3da77bSDaniel Borkmann 2414b3da77bSDaniel Borkmann /* Number of bytes emit_patch() needs to generate instructions */ 2424b3da77bSDaniel Borkmann #define X86_PATCH_SIZE 5 243ebf7d1f5SMaciej Fijalkowski /* Number of bytes that will be skipped on tailcall */ 244ebf7d1f5SMaciej Fijalkowski #define X86_TAIL_CALL_OFFSET 11 245e0ee9c12SAlexei Starovoitov 246ebf7d1f5SMaciej Fijalkowski static void push_callee_regs(u8 **pprog, bool *callee_regs_used) 247ebf7d1f5SMaciej Fijalkowski { 248ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog; 249ebf7d1f5SMaciej Fijalkowski 250ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0]) 251ebf7d1f5SMaciej Fijalkowski EMIT1(0x53); /* push rbx */ 252ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1]) 253ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x55); /* push r13 */ 254ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2]) 255ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x56); /* push r14 */ 256ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3]) 257ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x57); /* push r15 */ 258ebf7d1f5SMaciej Fijalkowski *pprog = prog; 259ebf7d1f5SMaciej Fijalkowski } 260ebf7d1f5SMaciej Fijalkowski 261ebf7d1f5SMaciej Fijalkowski static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) 262ebf7d1f5SMaciej Fijalkowski { 263ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog; 264ebf7d1f5SMaciej Fijalkowski 265ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3]) 266ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5F); /* pop r15 */ 267ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2]) 268ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5E); /* pop r14 */ 269ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1]) 270ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5D); /* pop r13 */ 271ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0]) 272ebf7d1f5SMaciej Fijalkowski EMIT1(0x5B); /* pop rbx */ 273ebf7d1f5SMaciej Fijalkowski *pprog = prog; 274ebf7d1f5SMaciej Fijalkowski } 275b52f00e6SAlexei Starovoitov 276a2c7a983SIngo Molnar /* 277ebf7d1f5SMaciej Fijalkowski * Emit x86-64 prologue code for BPF program. 278ebf7d1f5SMaciej Fijalkowski * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes 279ebf7d1f5SMaciej Fijalkowski * while jumping to another program 280b52f00e6SAlexei Starovoitov */ 281ebf7d1f5SMaciej Fijalkowski static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, 282ebf7d1f5SMaciej Fijalkowski bool tail_call_reachable, bool is_subprog) 2830a14842fSEric Dumazet { 284b52f00e6SAlexei Starovoitov u8 *prog = *pprog; 2850a14842fSEric Dumazet 2869fd4a39dSAlexei Starovoitov /* BPF trampoline can be made to work without these nops, 2879fd4a39dSAlexei Starovoitov * but let's waste 5 bytes for now and optimize later 2889fd4a39dSAlexei Starovoitov */ 289ced50fc4SJiri Olsa memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 290ced50fc4SJiri Olsa prog += X86_PATCH_SIZE; 291ebf7d1f5SMaciej Fijalkowski if (!ebpf_from_cbpf) { 292ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable && !is_subprog) 293ebf7d1f5SMaciej Fijalkowski EMIT2(0x31, 0xC0); /* xor eax, eax */ 294ebf7d1f5SMaciej Fijalkowski else 295ebf7d1f5SMaciej Fijalkowski EMIT2(0x66, 0x90); /* nop2 */ 296ebf7d1f5SMaciej Fijalkowski } 297fe8d9571SAlexei Starovoitov EMIT1(0x55); /* push rbp */ 298fe8d9571SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 299fe8d9571SAlexei Starovoitov /* sub rsp, rounded_stack_depth */ 3004d0b8c0bSMaciej Fijalkowski if (stack_depth) 301fe8d9571SAlexei Starovoitov EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); 302ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable) 303ebf7d1f5SMaciej Fijalkowski EMIT1(0x50); /* push rax */ 304b52f00e6SAlexei Starovoitov *pprog = prog; 305b52f00e6SAlexei Starovoitov } 306b52f00e6SAlexei Starovoitov 307428d5df1SDaniel Borkmann static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) 308428d5df1SDaniel Borkmann { 309428d5df1SDaniel Borkmann u8 *prog = *pprog; 310428d5df1SDaniel Borkmann s64 offset; 311428d5df1SDaniel Borkmann 312428d5df1SDaniel Borkmann offset = func - (ip + X86_PATCH_SIZE); 313428d5df1SDaniel Borkmann if (!is_simm32(offset)) { 314428d5df1SDaniel Borkmann pr_err("Target call %p is out of range\n", func); 315428d5df1SDaniel Borkmann return -ERANGE; 316428d5df1SDaniel Borkmann } 317428d5df1SDaniel Borkmann EMIT1_off32(opcode, offset); 318428d5df1SDaniel Borkmann *pprog = prog; 319428d5df1SDaniel Borkmann return 0; 320428d5df1SDaniel Borkmann } 321428d5df1SDaniel Borkmann 322428d5df1SDaniel Borkmann static int emit_call(u8 **pprog, void *func, void *ip) 323428d5df1SDaniel Borkmann { 324428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE8); 325428d5df1SDaniel Borkmann } 326428d5df1SDaniel Borkmann 327428d5df1SDaniel Borkmann static int emit_jump(u8 **pprog, void *func, void *ip) 328428d5df1SDaniel Borkmann { 329428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE9); 330428d5df1SDaniel Borkmann } 331428d5df1SDaniel Borkmann 332428d5df1SDaniel Borkmann static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 333428d5df1SDaniel Borkmann void *old_addr, void *new_addr, 334428d5df1SDaniel Borkmann const bool text_live) 335428d5df1SDaniel Borkmann { 336a89dfde3SPeter Zijlstra const u8 *nop_insn = x86_nops[5]; 337b553a6ecSDaniel Borkmann u8 old_insn[X86_PATCH_SIZE]; 338b553a6ecSDaniel Borkmann u8 new_insn[X86_PATCH_SIZE]; 339428d5df1SDaniel Borkmann u8 *prog; 340428d5df1SDaniel Borkmann int ret; 341428d5df1SDaniel Borkmann 342428d5df1SDaniel Borkmann memcpy(old_insn, nop_insn, X86_PATCH_SIZE); 343b553a6ecSDaniel Borkmann if (old_addr) { 344428d5df1SDaniel Borkmann prog = old_insn; 345b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ? 346b553a6ecSDaniel Borkmann emit_call(&prog, old_addr, ip) : 347b553a6ecSDaniel Borkmann emit_jump(&prog, old_addr, ip); 348428d5df1SDaniel Borkmann if (ret) 349428d5df1SDaniel Borkmann return ret; 350428d5df1SDaniel Borkmann } 351b553a6ecSDaniel Borkmann 352428d5df1SDaniel Borkmann memcpy(new_insn, nop_insn, X86_PATCH_SIZE); 353b553a6ecSDaniel Borkmann if (new_addr) { 354b553a6ecSDaniel Borkmann prog = new_insn; 355b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ? 356b553a6ecSDaniel Borkmann emit_call(&prog, new_addr, ip) : 357b553a6ecSDaniel Borkmann emit_jump(&prog, new_addr, ip); 358428d5df1SDaniel Borkmann if (ret) 359428d5df1SDaniel Borkmann return ret; 360428d5df1SDaniel Borkmann } 361428d5df1SDaniel Borkmann 362428d5df1SDaniel Borkmann ret = -EBUSY; 363428d5df1SDaniel Borkmann mutex_lock(&text_mutex); 364428d5df1SDaniel Borkmann if (memcmp(ip, old_insn, X86_PATCH_SIZE)) 365428d5df1SDaniel Borkmann goto out; 366ebf7d1f5SMaciej Fijalkowski ret = 1; 367b553a6ecSDaniel Borkmann if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { 368428d5df1SDaniel Borkmann if (text_live) 369428d5df1SDaniel Borkmann text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); 370428d5df1SDaniel Borkmann else 371428d5df1SDaniel Borkmann memcpy(ip, new_insn, X86_PATCH_SIZE); 372428d5df1SDaniel Borkmann ret = 0; 373ebf7d1f5SMaciej Fijalkowski } 374428d5df1SDaniel Borkmann out: 375428d5df1SDaniel Borkmann mutex_unlock(&text_mutex); 376428d5df1SDaniel Borkmann return ret; 377428d5df1SDaniel Borkmann } 378428d5df1SDaniel Borkmann 379428d5df1SDaniel Borkmann int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, 380428d5df1SDaniel Borkmann void *old_addr, void *new_addr) 381428d5df1SDaniel Borkmann { 382428d5df1SDaniel Borkmann if (!is_kernel_text((long)ip) && 383428d5df1SDaniel Borkmann !is_bpf_text_address((long)ip)) 384428d5df1SDaniel Borkmann /* BPF poking in modules is not supported */ 385428d5df1SDaniel Borkmann return -EINVAL; 386428d5df1SDaniel Borkmann 387428d5df1SDaniel Borkmann return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); 388428d5df1SDaniel Borkmann } 389428d5df1SDaniel Borkmann 39087c87ecdSPeter Zijlstra #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) 39187c87ecdSPeter Zijlstra 39287c87ecdSPeter Zijlstra static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) 39387c87ecdSPeter Zijlstra { 39487c87ecdSPeter Zijlstra u8 *prog = *pprog; 39587c87ecdSPeter Zijlstra 39687c87ecdSPeter Zijlstra #ifdef CONFIG_RETPOLINE 39787c87ecdSPeter Zijlstra if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) { 39887c87ecdSPeter Zijlstra EMIT_LFENCE(); 39987c87ecdSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg); 40087c87ecdSPeter Zijlstra } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 40187c87ecdSPeter Zijlstra emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 40287c87ecdSPeter Zijlstra } else 40387c87ecdSPeter Zijlstra #endif 40487c87ecdSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg); 40587c87ecdSPeter Zijlstra 40687c87ecdSPeter Zijlstra *pprog = prog; 40787c87ecdSPeter Zijlstra } 40887c87ecdSPeter Zijlstra 409a2c7a983SIngo Molnar /* 410a2c7a983SIngo Molnar * Generate the following code: 411a2c7a983SIngo Molnar * 412b52f00e6SAlexei Starovoitov * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... 413b52f00e6SAlexei Starovoitov * if (index >= array->map.max_entries) 414b52f00e6SAlexei Starovoitov * goto out; 415b52f00e6SAlexei Starovoitov * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) 416b52f00e6SAlexei Starovoitov * goto out; 4172a36f0b9SWang Nan * prog = array->ptrs[index]; 418b52f00e6SAlexei Starovoitov * if (prog == NULL) 419b52f00e6SAlexei Starovoitov * goto out; 420b52f00e6SAlexei Starovoitov * goto *(prog->bpf_func + prologue_size); 421b52f00e6SAlexei Starovoitov * out: 422b52f00e6SAlexei Starovoitov */ 423ebf7d1f5SMaciej Fijalkowski static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, 424dceba081SPeter Zijlstra u32 stack_depth, u8 *ip, 425dceba081SPeter Zijlstra struct jit_context *ctx) 426b52f00e6SAlexei Starovoitov { 427ebf7d1f5SMaciej Fijalkowski int tcc_off = -4 - round_up(stack_depth, 8); 428dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog; 429dceba081SPeter Zijlstra int offset; 4304d0b8c0bSMaciej Fijalkowski 431a2c7a983SIngo Molnar /* 432a2c7a983SIngo Molnar * rdi - pointer to ctx 433b52f00e6SAlexei Starovoitov * rsi - pointer to bpf_array 434b52f00e6SAlexei Starovoitov * rdx - index in bpf_array 435b52f00e6SAlexei Starovoitov */ 436b52f00e6SAlexei Starovoitov 437a2c7a983SIngo Molnar /* 438a2c7a983SIngo Molnar * if (index >= array->map.max_entries) 439b52f00e6SAlexei Starovoitov * goto out; 440b52f00e6SAlexei Starovoitov */ 44190caccddSAlexei Starovoitov EMIT2(0x89, 0xD2); /* mov edx, edx */ 44290caccddSAlexei Starovoitov EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 443b52f00e6SAlexei Starovoitov offsetof(struct bpf_array, map.max_entries)); 444dceba081SPeter Zijlstra 445dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 446dceba081SPeter Zijlstra EMIT2(X86_JBE, offset); /* jbe out */ 447b52f00e6SAlexei Starovoitov 448a2c7a983SIngo Molnar /* 449a2c7a983SIngo Molnar * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 450b52f00e6SAlexei Starovoitov * goto out; 451b52f00e6SAlexei Starovoitov */ 452ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 453b52f00e6SAlexei Starovoitov EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 454dceba081SPeter Zijlstra 455dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 456dceba081SPeter Zijlstra EMIT2(X86_JA, offset); /* ja out */ 457b52f00e6SAlexei Starovoitov EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 458ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 459b52f00e6SAlexei Starovoitov 4602a36f0b9SWang Nan /* prog = array->ptrs[index]; */ 4610d4ddce3SMaciej Fijalkowski EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ 4622a36f0b9SWang Nan offsetof(struct bpf_array, ptrs)); 463b52f00e6SAlexei Starovoitov 464a2c7a983SIngo Molnar /* 465a2c7a983SIngo Molnar * if (prog == NULL) 466b52f00e6SAlexei Starovoitov * goto out; 467b52f00e6SAlexei Starovoitov */ 4680d4ddce3SMaciej Fijalkowski EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ 469b52f00e6SAlexei Starovoitov 470dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start); 471dceba081SPeter Zijlstra EMIT2(X86_JE, offset); /* je out */ 472dceba081SPeter Zijlstra 473dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used); 474ebf7d1f5SMaciej Fijalkowski 475ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */ 4764d0b8c0bSMaciej Fijalkowski if (stack_depth) 477ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ 478ebf7d1f5SMaciej Fijalkowski round_up(stack_depth, 8)); 479ebf7d1f5SMaciej Fijalkowski 480ebf7d1f5SMaciej Fijalkowski /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ 4810d4ddce3SMaciej Fijalkowski EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ 482b52f00e6SAlexei Starovoitov offsetof(struct bpf_prog, bpf_func)); 483ebf7d1f5SMaciej Fijalkowski EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ 484ebf7d1f5SMaciej Fijalkowski X86_TAIL_CALL_OFFSET); 485a2c7a983SIngo Molnar /* 4860d4ddce3SMaciej Fijalkowski * Now we're ready to jump into next BPF program 487b52f00e6SAlexei Starovoitov * rdi == ctx (1st arg) 488ebf7d1f5SMaciej Fijalkowski * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET 489b52f00e6SAlexei Starovoitov */ 49087c87ecdSPeter Zijlstra emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); 491b52f00e6SAlexei Starovoitov 492b52f00e6SAlexei Starovoitov /* out: */ 493dceba081SPeter Zijlstra ctx->tail_call_indirect_label = prog - start; 494b52f00e6SAlexei Starovoitov *pprog = prog; 495b52f00e6SAlexei Starovoitov } 496b52f00e6SAlexei Starovoitov 497428d5df1SDaniel Borkmann static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, 498dceba081SPeter Zijlstra u8 **pprog, u8 *ip, 499dceba081SPeter Zijlstra bool *callee_regs_used, u32 stack_depth, 500dceba081SPeter Zijlstra struct jit_context *ctx) 501428d5df1SDaniel Borkmann { 502ebf7d1f5SMaciej Fijalkowski int tcc_off = -4 - round_up(stack_depth, 8); 503dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog; 504dceba081SPeter Zijlstra int offset; 505ebf7d1f5SMaciej Fijalkowski 506428d5df1SDaniel Borkmann /* 507428d5df1SDaniel Borkmann * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 508428d5df1SDaniel Borkmann * goto out; 509428d5df1SDaniel Borkmann */ 510ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ 511428d5df1SDaniel Borkmann EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 512dceba081SPeter Zijlstra 513dceba081SPeter Zijlstra offset = ctx->tail_call_direct_label - (prog + 2 - start); 514dceba081SPeter Zijlstra EMIT2(X86_JA, offset); /* ja out */ 515428d5df1SDaniel Borkmann EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 516ebf7d1f5SMaciej Fijalkowski EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ 517428d5df1SDaniel Borkmann 518dceba081SPeter Zijlstra poke->tailcall_bypass = ip + (prog - start); 519ebf7d1f5SMaciej Fijalkowski poke->adj_off = X86_TAIL_CALL_OFFSET; 520dceba081SPeter Zijlstra poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; 521ebf7d1f5SMaciej Fijalkowski poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; 522ebf7d1f5SMaciej Fijalkowski 523ebf7d1f5SMaciej Fijalkowski emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, 524ebf7d1f5SMaciej Fijalkowski poke->tailcall_bypass); 525ebf7d1f5SMaciej Fijalkowski 526dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used); 527ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */ 5284d0b8c0bSMaciej Fijalkowski if (stack_depth) 529ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); 530428d5df1SDaniel Borkmann 531a89dfde3SPeter Zijlstra memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 532428d5df1SDaniel Borkmann prog += X86_PATCH_SIZE; 533dceba081SPeter Zijlstra 534428d5df1SDaniel Borkmann /* out: */ 535dceba081SPeter Zijlstra ctx->tail_call_direct_label = prog - start; 536428d5df1SDaniel Borkmann 537428d5df1SDaniel Borkmann *pprog = prog; 538428d5df1SDaniel Borkmann } 539428d5df1SDaniel Borkmann 540428d5df1SDaniel Borkmann static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) 541428d5df1SDaniel Borkmann { 542428d5df1SDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 543428d5df1SDaniel Borkmann struct bpf_array *array; 544428d5df1SDaniel Borkmann struct bpf_prog *target; 545428d5df1SDaniel Borkmann int i, ret; 546428d5df1SDaniel Borkmann 547428d5df1SDaniel Borkmann for (i = 0; i < prog->aux->size_poke_tab; i++) { 548428d5df1SDaniel Borkmann poke = &prog->aux->poke_tab[i]; 549f263a814SJohn Fastabend if (poke->aux && poke->aux != prog->aux) 550f263a814SJohn Fastabend continue; 551f263a814SJohn Fastabend 552cf71b174SMaciej Fijalkowski WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); 553428d5df1SDaniel Borkmann 554428d5df1SDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 555428d5df1SDaniel Borkmann continue; 556428d5df1SDaniel Borkmann 557428d5df1SDaniel Borkmann array = container_of(poke->tail_call.map, struct bpf_array, map); 558428d5df1SDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 559428d5df1SDaniel Borkmann target = array->ptrs[poke->tail_call.key]; 560428d5df1SDaniel Borkmann if (target) { 561428d5df1SDaniel Borkmann /* Plain memcpy is used when image is not live yet 562428d5df1SDaniel Borkmann * and still not locked as read-only. Once poke 563cf71b174SMaciej Fijalkowski * location is active (poke->tailcall_target_stable), 564cf71b174SMaciej Fijalkowski * any parallel bpf_arch_text_poke() might occur 565cf71b174SMaciej Fijalkowski * still on the read-write image until we finally 566cf71b174SMaciej Fijalkowski * locked it as read-only. Both modifications on 567cf71b174SMaciej Fijalkowski * the given image are under text_mutex to avoid 568cf71b174SMaciej Fijalkowski * interference. 569428d5df1SDaniel Borkmann */ 570cf71b174SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_target, 571cf71b174SMaciej Fijalkowski BPF_MOD_JUMP, NULL, 572428d5df1SDaniel Borkmann (u8 *)target->bpf_func + 573428d5df1SDaniel Borkmann poke->adj_off, false); 574428d5df1SDaniel Borkmann BUG_ON(ret < 0); 575ebf7d1f5SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_bypass, 576ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 577ebf7d1f5SMaciej Fijalkowski (u8 *)poke->tailcall_target + 578ebf7d1f5SMaciej Fijalkowski X86_PATCH_SIZE, NULL, false); 579ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0); 580428d5df1SDaniel Borkmann } 581cf71b174SMaciej Fijalkowski WRITE_ONCE(poke->tailcall_target_stable, true); 582428d5df1SDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 583428d5df1SDaniel Borkmann } 584428d5df1SDaniel Borkmann } 585428d5df1SDaniel Borkmann 5866fe8b9c1SDaniel Borkmann static void emit_mov_imm32(u8 **pprog, bool sign_propagate, 5876fe8b9c1SDaniel Borkmann u32 dst_reg, const u32 imm32) 5886fe8b9c1SDaniel Borkmann { 5896fe8b9c1SDaniel Borkmann u8 *prog = *pprog; 5906fe8b9c1SDaniel Borkmann u8 b1, b2, b3; 5916fe8b9c1SDaniel Borkmann 592a2c7a983SIngo Molnar /* 593a2c7a983SIngo Molnar * Optimization: if imm32 is positive, use 'mov %eax, imm32' 5946fe8b9c1SDaniel Borkmann * (which zero-extends imm32) to save 2 bytes. 5956fe8b9c1SDaniel Borkmann */ 5966fe8b9c1SDaniel Borkmann if (sign_propagate && (s32)imm32 < 0) { 5976fe8b9c1SDaniel Borkmann /* 'mov %rax, imm32' sign extends imm32 */ 5986fe8b9c1SDaniel Borkmann b1 = add_1mod(0x48, dst_reg); 5996fe8b9c1SDaniel Borkmann b2 = 0xC7; 6006fe8b9c1SDaniel Borkmann b3 = 0xC0; 6016fe8b9c1SDaniel Borkmann EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); 6026fe8b9c1SDaniel Borkmann goto done; 6036fe8b9c1SDaniel Borkmann } 6046fe8b9c1SDaniel Borkmann 605a2c7a983SIngo Molnar /* 606a2c7a983SIngo Molnar * Optimization: if imm32 is zero, use 'xor %eax, %eax' 6076fe8b9c1SDaniel Borkmann * to save 3 bytes. 6086fe8b9c1SDaniel Borkmann */ 6096fe8b9c1SDaniel Borkmann if (imm32 == 0) { 6106fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg)) 6116fe8b9c1SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 6126fe8b9c1SDaniel Borkmann b2 = 0x31; /* xor */ 6136fe8b9c1SDaniel Borkmann b3 = 0xC0; 6146fe8b9c1SDaniel Borkmann EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); 6156fe8b9c1SDaniel Borkmann goto done; 6166fe8b9c1SDaniel Borkmann } 6176fe8b9c1SDaniel Borkmann 6186fe8b9c1SDaniel Borkmann /* mov %eax, imm32 */ 6196fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg)) 6206fe8b9c1SDaniel Borkmann EMIT1(add_1mod(0x40, dst_reg)); 6216fe8b9c1SDaniel Borkmann EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 6226fe8b9c1SDaniel Borkmann done: 6236fe8b9c1SDaniel Borkmann *pprog = prog; 6246fe8b9c1SDaniel Borkmann } 6256fe8b9c1SDaniel Borkmann 6266fe8b9c1SDaniel Borkmann static void emit_mov_imm64(u8 **pprog, u32 dst_reg, 6276fe8b9c1SDaniel Borkmann const u32 imm32_hi, const u32 imm32_lo) 6286fe8b9c1SDaniel Borkmann { 6296fe8b9c1SDaniel Borkmann u8 *prog = *pprog; 6306fe8b9c1SDaniel Borkmann 6316fe8b9c1SDaniel Borkmann if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) { 632a2c7a983SIngo Molnar /* 633a2c7a983SIngo Molnar * For emitting plain u32, where sign bit must not be 6346fe8b9c1SDaniel Borkmann * propagated LLVM tends to load imm64 over mov32 6356fe8b9c1SDaniel Borkmann * directly, so save couple of bytes by just doing 6366fe8b9c1SDaniel Borkmann * 'mov %eax, imm32' instead. 6376fe8b9c1SDaniel Borkmann */ 6386fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, false, dst_reg, imm32_lo); 6396fe8b9c1SDaniel Borkmann } else { 6406fe8b9c1SDaniel Borkmann /* movabsq %rax, imm64 */ 6416fe8b9c1SDaniel Borkmann EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); 6426fe8b9c1SDaniel Borkmann EMIT(imm32_lo, 4); 6436fe8b9c1SDaniel Borkmann EMIT(imm32_hi, 4); 6446fe8b9c1SDaniel Borkmann } 6456fe8b9c1SDaniel Borkmann 6466fe8b9c1SDaniel Borkmann *pprog = prog; 6476fe8b9c1SDaniel Borkmann } 6486fe8b9c1SDaniel Borkmann 6494c38e2f3SDaniel Borkmann static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) 6504c38e2f3SDaniel Borkmann { 6514c38e2f3SDaniel Borkmann u8 *prog = *pprog; 6524c38e2f3SDaniel Borkmann 6534c38e2f3SDaniel Borkmann if (is64) { 6544c38e2f3SDaniel Borkmann /* mov dst, src */ 6554c38e2f3SDaniel Borkmann EMIT_mov(dst_reg, src_reg); 6564c38e2f3SDaniel Borkmann } else { 6574c38e2f3SDaniel Borkmann /* mov32 dst, src */ 6584c38e2f3SDaniel Borkmann if (is_ereg(dst_reg) || is_ereg(src_reg)) 6594c38e2f3SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, src_reg)); 6604c38e2f3SDaniel Borkmann EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 6614c38e2f3SDaniel Borkmann } 6624c38e2f3SDaniel Borkmann 6634c38e2f3SDaniel Borkmann *pprog = prog; 6644c38e2f3SDaniel Borkmann } 6654c38e2f3SDaniel Borkmann 66611c11d07SBrendan Jackman /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ 66711c11d07SBrendan Jackman static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) 66811c11d07SBrendan Jackman { 66911c11d07SBrendan Jackman u8 *prog = *pprog; 67011c11d07SBrendan Jackman 67111c11d07SBrendan Jackman if (is_imm8(off)) { 67211c11d07SBrendan Jackman /* 1-byte signed displacement. 67311c11d07SBrendan Jackman * 67411c11d07SBrendan Jackman * If off == 0 we could skip this and save one extra byte, but 67511c11d07SBrendan Jackman * special case of x86 R13 which always needs an offset is not 67611c11d07SBrendan Jackman * worth the hassle 67711c11d07SBrendan Jackman */ 67811c11d07SBrendan Jackman EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); 67911c11d07SBrendan Jackman } else { 68011c11d07SBrendan Jackman /* 4-byte signed displacement */ 68111c11d07SBrendan Jackman EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); 68211c11d07SBrendan Jackman } 68311c11d07SBrendan Jackman *pprog = prog; 68411c11d07SBrendan Jackman } 68511c11d07SBrendan Jackman 68674007cfcSBrendan Jackman /* 68774007cfcSBrendan Jackman * Emit a REX byte if it will be necessary to address these registers 68874007cfcSBrendan Jackman */ 68974007cfcSBrendan Jackman static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) 69074007cfcSBrendan Jackman { 69174007cfcSBrendan Jackman u8 *prog = *pprog; 69274007cfcSBrendan Jackman 69374007cfcSBrendan Jackman if (is64) 69474007cfcSBrendan Jackman EMIT1(add_2mod(0x48, dst_reg, src_reg)); 69574007cfcSBrendan Jackman else if (is_ereg(dst_reg) || is_ereg(src_reg)) 69674007cfcSBrendan Jackman EMIT1(add_2mod(0x40, dst_reg, src_reg)); 69774007cfcSBrendan Jackman *pprog = prog; 69874007cfcSBrendan Jackman } 69974007cfcSBrendan Jackman 7006364d7d7SJie Meng /* 7016364d7d7SJie Meng * Similar version of maybe_emit_mod() for a single register 7026364d7d7SJie Meng */ 7036364d7d7SJie Meng static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) 7046364d7d7SJie Meng { 7056364d7d7SJie Meng u8 *prog = *pprog; 7066364d7d7SJie Meng 7076364d7d7SJie Meng if (is64) 7086364d7d7SJie Meng EMIT1(add_1mod(0x48, reg)); 7096364d7d7SJie Meng else if (is_ereg(reg)) 7106364d7d7SJie Meng EMIT1(add_1mod(0x40, reg)); 7116364d7d7SJie Meng *pprog = prog; 7126364d7d7SJie Meng } 7136364d7d7SJie Meng 7143b2744e6SAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */ 7153b2744e6SAlexei Starovoitov static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 7163b2744e6SAlexei Starovoitov { 7173b2744e6SAlexei Starovoitov u8 *prog = *pprog; 7183b2744e6SAlexei Starovoitov 7193b2744e6SAlexei Starovoitov switch (size) { 7203b2744e6SAlexei Starovoitov case BPF_B: 7213b2744e6SAlexei Starovoitov /* Emit 'movzx rax, byte ptr [rax + off]' */ 7223b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); 7233b2744e6SAlexei Starovoitov break; 7243b2744e6SAlexei Starovoitov case BPF_H: 7253b2744e6SAlexei Starovoitov /* Emit 'movzx rax, word ptr [rax + off]' */ 7263b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); 7273b2744e6SAlexei Starovoitov break; 7283b2744e6SAlexei Starovoitov case BPF_W: 7293b2744e6SAlexei Starovoitov /* Emit 'mov eax, dword ptr [rax+0x14]' */ 7303b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7313b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); 7323b2744e6SAlexei Starovoitov else 7333b2744e6SAlexei Starovoitov EMIT1(0x8B); 7343b2744e6SAlexei Starovoitov break; 7353b2744e6SAlexei Starovoitov case BPF_DW: 7363b2744e6SAlexei Starovoitov /* Emit 'mov rax, qword ptr [rax+0x14]' */ 7373b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); 7383b2744e6SAlexei Starovoitov break; 7393b2744e6SAlexei Starovoitov } 74011c11d07SBrendan Jackman emit_insn_suffix(&prog, src_reg, dst_reg, off); 7413b2744e6SAlexei Starovoitov *pprog = prog; 7423b2744e6SAlexei Starovoitov } 7433b2744e6SAlexei Starovoitov 7443b2744e6SAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */ 7453b2744e6SAlexei Starovoitov static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) 7463b2744e6SAlexei Starovoitov { 7473b2744e6SAlexei Starovoitov u8 *prog = *pprog; 7483b2744e6SAlexei Starovoitov 7493b2744e6SAlexei Starovoitov switch (size) { 7503b2744e6SAlexei Starovoitov case BPF_B: 7513b2744e6SAlexei Starovoitov /* Emit 'mov byte ptr [rax + off], al' */ 752aee194b1SLuke Nelson if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) 753aee194b1SLuke Nelson /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ 7543b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); 7553b2744e6SAlexei Starovoitov else 7563b2744e6SAlexei Starovoitov EMIT1(0x88); 7573b2744e6SAlexei Starovoitov break; 7583b2744e6SAlexei Starovoitov case BPF_H: 7593b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7603b2744e6SAlexei Starovoitov EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); 7613b2744e6SAlexei Starovoitov else 7623b2744e6SAlexei Starovoitov EMIT2(0x66, 0x89); 7633b2744e6SAlexei Starovoitov break; 7643b2744e6SAlexei Starovoitov case BPF_W: 7653b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg)) 7663b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); 7673b2744e6SAlexei Starovoitov else 7683b2744e6SAlexei Starovoitov EMIT1(0x89); 7693b2744e6SAlexei Starovoitov break; 7703b2744e6SAlexei Starovoitov case BPF_DW: 7713b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); 7723b2744e6SAlexei Starovoitov break; 7733b2744e6SAlexei Starovoitov } 77411c11d07SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off); 7753b2744e6SAlexei Starovoitov *pprog = prog; 7763b2744e6SAlexei Starovoitov } 7773b2744e6SAlexei Starovoitov 77891c960b0SBrendan Jackman static int emit_atomic(u8 **pprog, u8 atomic_op, 77991c960b0SBrendan Jackman u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 78091c960b0SBrendan Jackman { 78191c960b0SBrendan Jackman u8 *prog = *pprog; 78291c960b0SBrendan Jackman 78391c960b0SBrendan Jackman EMIT1(0xF0); /* lock prefix */ 78491c960b0SBrendan Jackman 78591c960b0SBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 78691c960b0SBrendan Jackman 78791c960b0SBrendan Jackman /* emit opcode */ 78891c960b0SBrendan Jackman switch (atomic_op) { 78991c960b0SBrendan Jackman case BPF_ADD: 790981f94c3SBrendan Jackman case BPF_SUB: 791981f94c3SBrendan Jackman case BPF_AND: 792981f94c3SBrendan Jackman case BPF_OR: 793981f94c3SBrendan Jackman case BPF_XOR: 79491c960b0SBrendan Jackman /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 79591c960b0SBrendan Jackman EMIT1(simple_alu_opcodes[atomic_op]); 79691c960b0SBrendan Jackman break; 7975ca419f2SBrendan Jackman case BPF_ADD | BPF_FETCH: 7985ca419f2SBrendan Jackman /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ 7995ca419f2SBrendan Jackman EMIT2(0x0F, 0xC1); 8005ca419f2SBrendan Jackman break; 8015ffa2550SBrendan Jackman case BPF_XCHG: 8025ffa2550SBrendan Jackman /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ 8035ffa2550SBrendan Jackman EMIT1(0x87); 8045ffa2550SBrendan Jackman break; 8055ffa2550SBrendan Jackman case BPF_CMPXCHG: 8065ffa2550SBrendan Jackman /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ 8075ffa2550SBrendan Jackman EMIT2(0x0F, 0xB1); 8085ffa2550SBrendan Jackman break; 80991c960b0SBrendan Jackman default: 81091c960b0SBrendan Jackman pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 81191c960b0SBrendan Jackman return -EFAULT; 81291c960b0SBrendan Jackman } 81391c960b0SBrendan Jackman 81491c960b0SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off); 81591c960b0SBrendan Jackman 81691c960b0SBrendan Jackman *pprog = prog; 81791c960b0SBrendan Jackman return 0; 81891c960b0SBrendan Jackman } 81991c960b0SBrendan Jackman 82046d28947SThomas Gleixner bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 8213dec541bSAlexei Starovoitov { 8223dec541bSAlexei Starovoitov u32 reg = x->fixup >> 8; 8233dec541bSAlexei Starovoitov 8243dec541bSAlexei Starovoitov /* jump over faulting load and clear dest register */ 8253dec541bSAlexei Starovoitov *(unsigned long *)((void *)regs + reg) = 0; 8263dec541bSAlexei Starovoitov regs->ip += x->fixup & 0xff; 8273dec541bSAlexei Starovoitov return true; 8283dec541bSAlexei Starovoitov } 8293dec541bSAlexei Starovoitov 830ebf7d1f5SMaciej Fijalkowski static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, 831ebf7d1f5SMaciej Fijalkowski bool *regs_used, bool *tail_call_seen) 832ebf7d1f5SMaciej Fijalkowski { 833ebf7d1f5SMaciej Fijalkowski int i; 834ebf7d1f5SMaciej Fijalkowski 835ebf7d1f5SMaciej Fijalkowski for (i = 1; i <= insn_cnt; i++, insn++) { 836ebf7d1f5SMaciej Fijalkowski if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) 837ebf7d1f5SMaciej Fijalkowski *tail_call_seen = true; 838ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) 839ebf7d1f5SMaciej Fijalkowski regs_used[0] = true; 840ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) 841ebf7d1f5SMaciej Fijalkowski regs_used[1] = true; 842ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) 843ebf7d1f5SMaciej Fijalkowski regs_used[2] = true; 844ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) 845ebf7d1f5SMaciej Fijalkowski regs_used[3] = true; 846ebf7d1f5SMaciej Fijalkowski } 847ebf7d1f5SMaciej Fijalkowski } 848ebf7d1f5SMaciej Fijalkowski 849ced50fc4SJiri Olsa static void emit_nops(u8 **pprog, int len) 85093c5aeccSGary Lin { 85193c5aeccSGary Lin u8 *prog = *pprog; 852ced50fc4SJiri Olsa int i, noplen; 85393c5aeccSGary Lin 85493c5aeccSGary Lin while (len > 0) { 85593c5aeccSGary Lin noplen = len; 85693c5aeccSGary Lin 85793c5aeccSGary Lin if (noplen > ASM_NOP_MAX) 85893c5aeccSGary Lin noplen = ASM_NOP_MAX; 85993c5aeccSGary Lin 86093c5aeccSGary Lin for (i = 0; i < noplen; i++) 861a89dfde3SPeter Zijlstra EMIT1(x86_nops[noplen][i]); 86293c5aeccSGary Lin len -= noplen; 86393c5aeccSGary Lin } 86493c5aeccSGary Lin 86593c5aeccSGary Lin *pprog = prog; 86693c5aeccSGary Lin } 86793c5aeccSGary Lin 86893c5aeccSGary Lin #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) 86993c5aeccSGary Lin 870b52f00e6SAlexei Starovoitov static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 87193c5aeccSGary Lin int oldproglen, struct jit_context *ctx, bool jmp_padding) 872b52f00e6SAlexei Starovoitov { 873ebf7d1f5SMaciej Fijalkowski bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; 874b52f00e6SAlexei Starovoitov struct bpf_insn *insn = bpf_prog->insnsi; 875ebf7d1f5SMaciej Fijalkowski bool callee_regs_used[4] = {}; 876b52f00e6SAlexei Starovoitov int insn_cnt = bpf_prog->len; 877ebf7d1f5SMaciej Fijalkowski bool tail_call_seen = false; 878b52f00e6SAlexei Starovoitov bool seen_exit = false; 879b52f00e6SAlexei Starovoitov u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; 880ced50fc4SJiri Olsa int i, excnt = 0; 88193c5aeccSGary Lin int ilen, proglen = 0; 882b52f00e6SAlexei Starovoitov u8 *prog = temp; 88391c960b0SBrendan Jackman int err; 884b52f00e6SAlexei Starovoitov 885ebf7d1f5SMaciej Fijalkowski detect_reg_usage(insn, insn_cnt, callee_regs_used, 886ebf7d1f5SMaciej Fijalkowski &tail_call_seen); 887ebf7d1f5SMaciej Fijalkowski 888ebf7d1f5SMaciej Fijalkowski /* tail call's presence in current prog implies it is reachable */ 889ebf7d1f5SMaciej Fijalkowski tail_call_reachable |= tail_call_seen; 890ebf7d1f5SMaciej Fijalkowski 89108691752SDaniel Borkmann emit_prologue(&prog, bpf_prog->aux->stack_depth, 892ebf7d1f5SMaciej Fijalkowski bpf_prog_was_classic(bpf_prog), tail_call_reachable, 893ebf7d1f5SMaciej Fijalkowski bpf_prog->aux->func_idx != 0); 894ebf7d1f5SMaciej Fijalkowski push_callee_regs(&prog, callee_regs_used); 89593c5aeccSGary Lin 89693c5aeccSGary Lin ilen = prog - temp; 89793c5aeccSGary Lin if (image) 89893c5aeccSGary Lin memcpy(image + proglen, temp, ilen); 89993c5aeccSGary Lin proglen += ilen; 90093c5aeccSGary Lin addrs[0] = proglen; 90193c5aeccSGary Lin prog = temp; 902b52f00e6SAlexei Starovoitov 9037c2e988fSAlexei Starovoitov for (i = 1; i <= insn_cnt; i++, insn++) { 904e430f34eSAlexei Starovoitov const s32 imm32 = insn->imm; 905e430f34eSAlexei Starovoitov u32 dst_reg = insn->dst_reg; 906e430f34eSAlexei Starovoitov u32 src_reg = insn->src_reg; 9076fe8b9c1SDaniel Borkmann u8 b2 = 0, b3 = 0; 9084c5de127SAlexei Starovoitov u8 *start_of_ldx; 90962258278SAlexei Starovoitov s64 jmp_offset; 91062258278SAlexei Starovoitov u8 jmp_cond; 91162258278SAlexei Starovoitov u8 *func; 91293c5aeccSGary Lin int nops; 91362258278SAlexei Starovoitov 91462258278SAlexei Starovoitov switch (insn->code) { 91562258278SAlexei Starovoitov /* ALU */ 91662258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_X: 91762258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_X: 91862258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_X: 91962258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_X: 92062258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_X: 92162258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_X: 92262258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_X: 92362258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_X: 92462258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_X: 92562258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_X: 92674007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 92774007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_ALU64); 928e5f02cacSBrendan Jackman b2 = simple_alu_opcodes[BPF_OP(insn->code)]; 929e430f34eSAlexei Starovoitov EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 9300a14842fSEric Dumazet break; 93162258278SAlexei Starovoitov 93262258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_X: 93362258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_X: 9344c38e2f3SDaniel Borkmann emit_mov_reg(&prog, 9354c38e2f3SDaniel Borkmann BPF_CLASS(insn->code) == BPF_ALU64, 9364c38e2f3SDaniel Borkmann dst_reg, src_reg); 93762258278SAlexei Starovoitov break; 93862258278SAlexei Starovoitov 939e430f34eSAlexei Starovoitov /* neg dst */ 94062258278SAlexei Starovoitov case BPF_ALU | BPF_NEG: 94162258278SAlexei Starovoitov case BPF_ALU64 | BPF_NEG: 9426364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 9436364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 944e430f34eSAlexei Starovoitov EMIT2(0xF7, add_1reg(0xD8, dst_reg)); 94562258278SAlexei Starovoitov break; 94662258278SAlexei Starovoitov 94762258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_K: 94862258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_K: 94962258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_K: 95062258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_K: 95162258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_K: 95262258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_K: 95362258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_K: 95462258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_K: 95562258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_K: 95662258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_K: 9576364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 9586364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 95962258278SAlexei Starovoitov 960a2c7a983SIngo Molnar /* 961a2c7a983SIngo Molnar * b3 holds 'normal' opcode, b2 short form only valid 962de0a444dSDaniel Borkmann * in case dst is eax/rax. 963de0a444dSDaniel Borkmann */ 96462258278SAlexei Starovoitov switch (BPF_OP(insn->code)) { 965de0a444dSDaniel Borkmann case BPF_ADD: 966de0a444dSDaniel Borkmann b3 = 0xC0; 967de0a444dSDaniel Borkmann b2 = 0x05; 968de0a444dSDaniel Borkmann break; 969de0a444dSDaniel Borkmann case BPF_SUB: 970de0a444dSDaniel Borkmann b3 = 0xE8; 971de0a444dSDaniel Borkmann b2 = 0x2D; 972de0a444dSDaniel Borkmann break; 973de0a444dSDaniel Borkmann case BPF_AND: 974de0a444dSDaniel Borkmann b3 = 0xE0; 975de0a444dSDaniel Borkmann b2 = 0x25; 976de0a444dSDaniel Borkmann break; 977de0a444dSDaniel Borkmann case BPF_OR: 978de0a444dSDaniel Borkmann b3 = 0xC8; 979de0a444dSDaniel Borkmann b2 = 0x0D; 980de0a444dSDaniel Borkmann break; 981de0a444dSDaniel Borkmann case BPF_XOR: 982de0a444dSDaniel Borkmann b3 = 0xF0; 983de0a444dSDaniel Borkmann b2 = 0x35; 984de0a444dSDaniel Borkmann break; 98562258278SAlexei Starovoitov } 98662258278SAlexei Starovoitov 987e430f34eSAlexei Starovoitov if (is_imm8(imm32)) 988e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(b3, dst_reg), imm32); 989de0a444dSDaniel Borkmann else if (is_axreg(dst_reg)) 990de0a444dSDaniel Borkmann EMIT1_off32(b2, imm32); 99162258278SAlexei Starovoitov else 992e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); 99362258278SAlexei Starovoitov break; 99462258278SAlexei Starovoitov 99562258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_K: 99662258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_K: 9976fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, 9986fe8b9c1SDaniel Borkmann dst_reg, imm32); 99962258278SAlexei Starovoitov break; 100062258278SAlexei Starovoitov 100102ab695bSAlexei Starovoitov case BPF_LD | BPF_IMM | BPF_DW: 10026fe8b9c1SDaniel Borkmann emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); 100302ab695bSAlexei Starovoitov insn++; 100402ab695bSAlexei Starovoitov i++; 100502ab695bSAlexei Starovoitov break; 100602ab695bSAlexei Starovoitov 1007e430f34eSAlexei Starovoitov /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 100862258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_X: 100962258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_X: 101062258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_K: 101162258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_K: 101262258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_X: 101362258278SAlexei Starovoitov case BPF_ALU64 | BPF_DIV | BPF_X: 101462258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_K: 101557a610f1SJie Meng case BPF_ALU64 | BPF_DIV | BPF_K: { 10164c38e2f3SDaniel Borkmann bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 10174c38e2f3SDaniel Borkmann 1018d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_0) 101962258278SAlexei Starovoitov EMIT1(0x50); /* push rax */ 1020d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3) 102162258278SAlexei Starovoitov EMIT1(0x52); /* push rdx */ 102262258278SAlexei Starovoitov 102357a610f1SJie Meng if (BPF_SRC(insn->code) == BPF_X) { 102457a610f1SJie Meng if (src_reg == BPF_REG_0 || 102557a610f1SJie Meng src_reg == BPF_REG_3) { 102662258278SAlexei Starovoitov /* mov r11, src_reg */ 102762258278SAlexei Starovoitov EMIT_mov(AUX_REG, src_reg); 102857a610f1SJie Meng src_reg = AUX_REG; 102957a610f1SJie Meng } 103057a610f1SJie Meng } else { 103162258278SAlexei Starovoitov /* mov r11, imm32 */ 103262258278SAlexei Starovoitov EMIT3_off32(0x49, 0xC7, 0xC3, imm32); 103357a610f1SJie Meng src_reg = AUX_REG; 103457a610f1SJie Meng } 103562258278SAlexei Starovoitov 103657a610f1SJie Meng if (dst_reg != BPF_REG_0) 103762258278SAlexei Starovoitov /* mov rax, dst_reg */ 103857a610f1SJie Meng emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); 103962258278SAlexei Starovoitov 104062258278SAlexei Starovoitov /* 104162258278SAlexei Starovoitov * xor edx, edx 104262258278SAlexei Starovoitov * equivalent to 'xor rdx, rdx', but one byte less 104362258278SAlexei Starovoitov */ 104462258278SAlexei Starovoitov EMIT2(0x31, 0xd2); 104562258278SAlexei Starovoitov 104657a610f1SJie Meng /* div src_reg */ 10476364d7d7SJie Meng maybe_emit_1mod(&prog, src_reg, is64); 104857a610f1SJie Meng EMIT2(0xF7, add_1reg(0xF0, src_reg)); 104962258278SAlexei Starovoitov 105057a610f1SJie Meng if (BPF_OP(insn->code) == BPF_MOD && 105157a610f1SJie Meng dst_reg != BPF_REG_3) 105257a610f1SJie Meng /* mov dst_reg, rdx */ 105357a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); 105457a610f1SJie Meng else if (BPF_OP(insn->code) == BPF_DIV && 105557a610f1SJie Meng dst_reg != BPF_REG_0) 105657a610f1SJie Meng /* mov dst_reg, rax */ 105757a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); 105862258278SAlexei Starovoitov 1059d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3) 106062258278SAlexei Starovoitov EMIT1(0x5A); /* pop rdx */ 106157a610f1SJie Meng if (dst_reg != BPF_REG_0) 106262258278SAlexei Starovoitov EMIT1(0x58); /* pop rax */ 106362258278SAlexei Starovoitov break; 10644c38e2f3SDaniel Borkmann } 106562258278SAlexei Starovoitov 106662258278SAlexei Starovoitov case BPF_ALU | BPF_MUL | BPF_K: 106762258278SAlexei Starovoitov case BPF_ALU64 | BPF_MUL | BPF_K: 10686364d7d7SJie Meng maybe_emit_mod(&prog, dst_reg, dst_reg, 10696364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 107062258278SAlexei Starovoitov 1071c0354077SJie Meng if (is_imm8(imm32)) 1072c0354077SJie Meng /* imul dst_reg, dst_reg, imm8 */ 1073c0354077SJie Meng EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1074c0354077SJie Meng imm32); 107562258278SAlexei Starovoitov else 1076c0354077SJie Meng /* imul dst_reg, dst_reg, imm32 */ 1077c0354077SJie Meng EMIT2_off32(0x69, 1078c0354077SJie Meng add_2reg(0xC0, dst_reg, dst_reg), 1079c0354077SJie Meng imm32); 108062258278SAlexei Starovoitov break; 1081c0354077SJie Meng 1082c0354077SJie Meng case BPF_ALU | BPF_MUL | BPF_X: 1083c0354077SJie Meng case BPF_ALU64 | BPF_MUL | BPF_X: 10846364d7d7SJie Meng maybe_emit_mod(&prog, src_reg, dst_reg, 10856364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 1086c0354077SJie Meng 1087c0354077SJie Meng /* imul dst_reg, src_reg */ 1088c0354077SJie Meng EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1089c0354077SJie Meng break; 1090c0354077SJie Meng 1091a2c7a983SIngo Molnar /* Shifts */ 109262258278SAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_K: 109362258278SAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_K: 109462258278SAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_K: 109562258278SAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_K: 109662258278SAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_K: 109762258278SAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_K: 10986364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 10996364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 110062258278SAlexei Starovoitov 1101e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 110288e69a1fSDaniel Borkmann if (imm32 == 1) 110388e69a1fSDaniel Borkmann EMIT2(0xD1, add_1reg(b3, dst_reg)); 110488e69a1fSDaniel Borkmann else 1105e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 110662258278SAlexei Starovoitov break; 110762258278SAlexei Starovoitov 110872b603eeSAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_X: 110972b603eeSAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_X: 111072b603eeSAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_X: 111172b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_X: 111272b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_X: 111372b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_X: 111472b603eeSAlexei Starovoitov 1115a2c7a983SIngo Molnar /* Check for bad case when dst_reg == rcx */ 111672b603eeSAlexei Starovoitov if (dst_reg == BPF_REG_4) { 111772b603eeSAlexei Starovoitov /* mov r11, dst_reg */ 111872b603eeSAlexei Starovoitov EMIT_mov(AUX_REG, dst_reg); 111972b603eeSAlexei Starovoitov dst_reg = AUX_REG; 112072b603eeSAlexei Starovoitov } 112172b603eeSAlexei Starovoitov 112272b603eeSAlexei Starovoitov if (src_reg != BPF_REG_4) { /* common case */ 112372b603eeSAlexei Starovoitov EMIT1(0x51); /* push rcx */ 112472b603eeSAlexei Starovoitov 112572b603eeSAlexei Starovoitov /* mov rcx, src_reg */ 112672b603eeSAlexei Starovoitov EMIT_mov(BPF_REG_4, src_reg); 112772b603eeSAlexei Starovoitov } 112872b603eeSAlexei Starovoitov 112972b603eeSAlexei Starovoitov /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ 11306364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 11316364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64); 113272b603eeSAlexei Starovoitov 1133e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)]; 113472b603eeSAlexei Starovoitov EMIT2(0xD3, add_1reg(b3, dst_reg)); 113572b603eeSAlexei Starovoitov 113672b603eeSAlexei Starovoitov if (src_reg != BPF_REG_4) 113772b603eeSAlexei Starovoitov EMIT1(0x59); /* pop rcx */ 113872b603eeSAlexei Starovoitov 113972b603eeSAlexei Starovoitov if (insn->dst_reg == BPF_REG_4) 114072b603eeSAlexei Starovoitov /* mov dst_reg, r11 */ 114172b603eeSAlexei Starovoitov EMIT_mov(insn->dst_reg, AUX_REG); 114272b603eeSAlexei Starovoitov break; 114372b603eeSAlexei Starovoitov 114462258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_BE: 1145e430f34eSAlexei Starovoitov switch (imm32) { 114662258278SAlexei Starovoitov case 16: 1147a2c7a983SIngo Molnar /* Emit 'ror %ax, 8' to swap lower 2 bytes */ 114862258278SAlexei Starovoitov EMIT1(0x66); 1149e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 115062258278SAlexei Starovoitov EMIT1(0x41); 1151e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 1152343f845bSAlexei Starovoitov 1153a2c7a983SIngo Molnar /* Emit 'movzwl eax, ax' */ 1154343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1155343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7); 1156343f845bSAlexei Starovoitov else 1157343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7); 1158343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 115962258278SAlexei Starovoitov break; 116062258278SAlexei Starovoitov case 32: 1161a2c7a983SIngo Molnar /* Emit 'bswap eax' to swap lower 4 bytes */ 1162e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 116362258278SAlexei Starovoitov EMIT2(0x41, 0x0F); 116462258278SAlexei Starovoitov else 116562258278SAlexei Starovoitov EMIT1(0x0F); 1166e430f34eSAlexei Starovoitov EMIT1(add_1reg(0xC8, dst_reg)); 116762258278SAlexei Starovoitov break; 116862258278SAlexei Starovoitov case 64: 1169a2c7a983SIngo Molnar /* Emit 'bswap rax' to swap 8 bytes */ 1170e430f34eSAlexei Starovoitov EMIT3(add_1mod(0x48, dst_reg), 0x0F, 1171e430f34eSAlexei Starovoitov add_1reg(0xC8, dst_reg)); 117262258278SAlexei Starovoitov break; 117362258278SAlexei Starovoitov } 117462258278SAlexei Starovoitov break; 117562258278SAlexei Starovoitov 117662258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_LE: 1177343f845bSAlexei Starovoitov switch (imm32) { 1178343f845bSAlexei Starovoitov case 16: 1179a2c7a983SIngo Molnar /* 1180a2c7a983SIngo Molnar * Emit 'movzwl eax, ax' to zero extend 16-bit 1181343f845bSAlexei Starovoitov * into 64 bit 1182343f845bSAlexei Starovoitov */ 1183343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1184343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7); 1185343f845bSAlexei Starovoitov else 1186343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7); 1187343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); 1188343f845bSAlexei Starovoitov break; 1189343f845bSAlexei Starovoitov case 32: 1190a2c7a983SIngo Molnar /* Emit 'mov eax, eax' to clear upper 32-bits */ 1191343f845bSAlexei Starovoitov if (is_ereg(dst_reg)) 1192343f845bSAlexei Starovoitov EMIT1(0x45); 1193343f845bSAlexei Starovoitov EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); 1194343f845bSAlexei Starovoitov break; 1195343f845bSAlexei Starovoitov case 64: 1196343f845bSAlexei Starovoitov /* nop */ 1197343f845bSAlexei Starovoitov break; 1198343f845bSAlexei Starovoitov } 119962258278SAlexei Starovoitov break; 120062258278SAlexei Starovoitov 1201f5e81d11SDaniel Borkmann /* speculation barrier */ 1202f5e81d11SDaniel Borkmann case BPF_ST | BPF_NOSPEC: 1203f5e81d11SDaniel Borkmann if (boot_cpu_has(X86_FEATURE_XMM2)) 120487c87ecdSPeter Zijlstra EMIT_LFENCE(); 1205f5e81d11SDaniel Borkmann break; 1206f5e81d11SDaniel Borkmann 1207e430f34eSAlexei Starovoitov /* ST: *(u8*)(dst_reg + off) = imm */ 120862258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_B: 1209e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 121062258278SAlexei Starovoitov EMIT2(0x41, 0xC6); 121162258278SAlexei Starovoitov else 121262258278SAlexei Starovoitov EMIT1(0xC6); 121362258278SAlexei Starovoitov goto st; 121462258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_H: 1215e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 121662258278SAlexei Starovoitov EMIT3(0x66, 0x41, 0xC7); 121762258278SAlexei Starovoitov else 121862258278SAlexei Starovoitov EMIT2(0x66, 0xC7); 121962258278SAlexei Starovoitov goto st; 122062258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_W: 1221e430f34eSAlexei Starovoitov if (is_ereg(dst_reg)) 122262258278SAlexei Starovoitov EMIT2(0x41, 0xC7); 122362258278SAlexei Starovoitov else 122462258278SAlexei Starovoitov EMIT1(0xC7); 122562258278SAlexei Starovoitov goto st; 122662258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_DW: 1227e430f34eSAlexei Starovoitov EMIT2(add_1mod(0x48, dst_reg), 0xC7); 122862258278SAlexei Starovoitov 122962258278SAlexei Starovoitov st: if (is_imm8(insn->off)) 1230e430f34eSAlexei Starovoitov EMIT2(add_1reg(0x40, dst_reg), insn->off); 123162258278SAlexei Starovoitov else 1232e430f34eSAlexei Starovoitov EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); 123362258278SAlexei Starovoitov 1234e430f34eSAlexei Starovoitov EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 123562258278SAlexei Starovoitov break; 123662258278SAlexei Starovoitov 1237e430f34eSAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */ 123862258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_B: 123962258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_H: 124062258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_W: 124162258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_DW: 12423b2744e6SAlexei Starovoitov emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 124362258278SAlexei Starovoitov break; 124462258278SAlexei Starovoitov 1245e430f34eSAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */ 124662258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_B: 12473dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_B: 124862258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_H: 12493dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_H: 125062258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_W: 12513dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_W: 125262258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_DW: 12533dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_DW: 12544c5de127SAlexei Starovoitov if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 1255*588a25e9SAlexei Starovoitov /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM 1256*588a25e9SAlexei Starovoitov * add abs(insn->off) to the limit to make sure that negative 1257*588a25e9SAlexei Starovoitov * offset won't be an issue. 1258*588a25e9SAlexei Starovoitov * insn->off is s16, so it won't affect valid pointers. 1259*588a25e9SAlexei Starovoitov */ 1260*588a25e9SAlexei Starovoitov u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); 1261*588a25e9SAlexei Starovoitov u8 *end_of_jmp1, *end_of_jmp2; 1262*588a25e9SAlexei Starovoitov 1263*588a25e9SAlexei Starovoitov /* Conservatively check that src_reg + insn->off is a kernel address: 1264*588a25e9SAlexei Starovoitov * 1. src_reg + insn->off >= limit 1265*588a25e9SAlexei Starovoitov * 2. src_reg + insn->off doesn't become small positive. 1266*588a25e9SAlexei Starovoitov * Cannot do src_reg + insn->off >= limit in one branch, 1267*588a25e9SAlexei Starovoitov * since it needs two spare registers, but JIT has only one. 1268*588a25e9SAlexei Starovoitov */ 1269*588a25e9SAlexei Starovoitov 1270*588a25e9SAlexei Starovoitov /* movabsq r11, limit */ 1271*588a25e9SAlexei Starovoitov EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); 1272*588a25e9SAlexei Starovoitov EMIT((u32)limit, 4); 1273*588a25e9SAlexei Starovoitov EMIT(limit >> 32, 4); 1274*588a25e9SAlexei Starovoitov /* cmp src_reg, r11 */ 1275*588a25e9SAlexei Starovoitov maybe_emit_mod(&prog, src_reg, AUX_REG, true); 1276*588a25e9SAlexei Starovoitov EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); 1277*588a25e9SAlexei Starovoitov /* if unsigned '<' goto end_of_jmp2 */ 1278*588a25e9SAlexei Starovoitov EMIT2(X86_JB, 0); 1279*588a25e9SAlexei Starovoitov end_of_jmp1 = prog; 1280*588a25e9SAlexei Starovoitov 1281*588a25e9SAlexei Starovoitov /* mov r11, src_reg */ 1282*588a25e9SAlexei Starovoitov emit_mov_reg(&prog, true, AUX_REG, src_reg); 1283*588a25e9SAlexei Starovoitov /* add r11, insn->off */ 1284*588a25e9SAlexei Starovoitov maybe_emit_1mod(&prog, AUX_REG, true); 1285*588a25e9SAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); 1286*588a25e9SAlexei Starovoitov /* jmp if not carry to start_of_ldx 1287*588a25e9SAlexei Starovoitov * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr 1288*588a25e9SAlexei Starovoitov * that has to be rejected. 1289*588a25e9SAlexei Starovoitov */ 1290*588a25e9SAlexei Starovoitov EMIT2(0x73 /* JNC */, 0); 1291*588a25e9SAlexei Starovoitov end_of_jmp2 = prog; 1292*588a25e9SAlexei Starovoitov 12934c5de127SAlexei Starovoitov /* xor dst_reg, dst_reg */ 12944c5de127SAlexei Starovoitov emit_mov_imm32(&prog, false, dst_reg, 0); 12954c5de127SAlexei Starovoitov /* jmp byte_after_ldx */ 12964c5de127SAlexei Starovoitov EMIT2(0xEB, 0); 12974c5de127SAlexei Starovoitov 1298*588a25e9SAlexei Starovoitov /* populate jmp_offset for JB above to jump to xor dst_reg */ 1299*588a25e9SAlexei Starovoitov end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; 1300*588a25e9SAlexei Starovoitov /* populate jmp_offset for JNC above to jump to start_of_ldx */ 13014c5de127SAlexei Starovoitov start_of_ldx = prog; 1302*588a25e9SAlexei Starovoitov end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; 13034c5de127SAlexei Starovoitov } 13043b2744e6SAlexei Starovoitov emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); 13053dec541bSAlexei Starovoitov if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { 13063dec541bSAlexei Starovoitov struct exception_table_entry *ex; 1307328aac5eSRavi Bangoria u8 *_insn = image + proglen + (start_of_ldx - temp); 13083dec541bSAlexei Starovoitov s64 delta; 13093dec541bSAlexei Starovoitov 13104c5de127SAlexei Starovoitov /* populate jmp_offset for JMP above */ 13114c5de127SAlexei Starovoitov start_of_ldx[-1] = prog - start_of_ldx; 13124c5de127SAlexei Starovoitov 13133dec541bSAlexei Starovoitov if (!bpf_prog->aux->extable) 13143dec541bSAlexei Starovoitov break; 13153dec541bSAlexei Starovoitov 13163dec541bSAlexei Starovoitov if (excnt >= bpf_prog->aux->num_exentries) { 13173dec541bSAlexei Starovoitov pr_err("ex gen bug\n"); 13183dec541bSAlexei Starovoitov return -EFAULT; 13193dec541bSAlexei Starovoitov } 13203dec541bSAlexei Starovoitov ex = &bpf_prog->aux->extable[excnt++]; 13213dec541bSAlexei Starovoitov 13223dec541bSAlexei Starovoitov delta = _insn - (u8 *)&ex->insn; 13233dec541bSAlexei Starovoitov if (!is_simm32(delta)) { 13243dec541bSAlexei Starovoitov pr_err("extable->insn doesn't fit into 32-bit\n"); 13253dec541bSAlexei Starovoitov return -EFAULT; 13263dec541bSAlexei Starovoitov } 13273dec541bSAlexei Starovoitov ex->insn = delta; 13283dec541bSAlexei Starovoitov 132946d28947SThomas Gleixner ex->type = EX_TYPE_BPF; 13303dec541bSAlexei Starovoitov 13313dec541bSAlexei Starovoitov if (dst_reg > BPF_REG_9) { 13323dec541bSAlexei Starovoitov pr_err("verifier error\n"); 13333dec541bSAlexei Starovoitov return -EFAULT; 13343dec541bSAlexei Starovoitov } 13353dec541bSAlexei Starovoitov /* 13363dec541bSAlexei Starovoitov * Compute size of x86 insn and its target dest x86 register. 13373dec541bSAlexei Starovoitov * ex_handler_bpf() will use lower 8 bits to adjust 13383dec541bSAlexei Starovoitov * pt_regs->ip to jump over this x86 instruction 13393dec541bSAlexei Starovoitov * and upper bits to figure out which pt_regs to zero out. 13403dec541bSAlexei Starovoitov * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" 13413dec541bSAlexei Starovoitov * of 4 bytes will be ignored and rbx will be zero inited. 13423dec541bSAlexei Starovoitov */ 1343433956e9SAlexei Starovoitov ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); 13443dec541bSAlexei Starovoitov } 134562258278SAlexei Starovoitov break; 134662258278SAlexei Starovoitov 134791c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_W: 134891c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_DW: 1349981f94c3SBrendan Jackman if (insn->imm == (BPF_AND | BPF_FETCH) || 1350981f94c3SBrendan Jackman insn->imm == (BPF_OR | BPF_FETCH) || 1351981f94c3SBrendan Jackman insn->imm == (BPF_XOR | BPF_FETCH)) { 1352981f94c3SBrendan Jackman bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1353b29dd96bSBrendan Jackman u32 real_src_reg = src_reg; 1354ced18582SJohan Almbladh u32 real_dst_reg = dst_reg; 1355ced18582SJohan Almbladh u8 *branch_target; 1356981f94c3SBrendan Jackman 1357981f94c3SBrendan Jackman /* 1358981f94c3SBrendan Jackman * Can't be implemented with a single x86 insn. 1359981f94c3SBrendan Jackman * Need to do a CMPXCHG loop. 1360981f94c3SBrendan Jackman */ 1361981f94c3SBrendan Jackman 1362981f94c3SBrendan Jackman /* Will need RAX as a CMPXCHG operand so save R0 */ 1363981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1364b29dd96bSBrendan Jackman if (src_reg == BPF_REG_0) 1365b29dd96bSBrendan Jackman real_src_reg = BPF_REG_AX; 1366ced18582SJohan Almbladh if (dst_reg == BPF_REG_0) 1367ced18582SJohan Almbladh real_dst_reg = BPF_REG_AX; 1368b29dd96bSBrendan Jackman 1369981f94c3SBrendan Jackman branch_target = prog; 1370981f94c3SBrendan Jackman /* Load old value */ 1371981f94c3SBrendan Jackman emit_ldx(&prog, BPF_SIZE(insn->code), 1372ced18582SJohan Almbladh BPF_REG_0, real_dst_reg, insn->off); 1373981f94c3SBrendan Jackman /* 1374981f94c3SBrendan Jackman * Perform the (commutative) operation locally, 1375981f94c3SBrendan Jackman * put the result in the AUX_REG. 1376981f94c3SBrendan Jackman */ 1377981f94c3SBrendan Jackman emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1378b29dd96bSBrendan Jackman maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1379981f94c3SBrendan Jackman EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1380b29dd96bSBrendan Jackman add_2reg(0xC0, AUX_REG, real_src_reg)); 1381981f94c3SBrendan Jackman /* Attempt to swap in new value */ 1382981f94c3SBrendan Jackman err = emit_atomic(&prog, BPF_CMPXCHG, 1383ced18582SJohan Almbladh real_dst_reg, AUX_REG, 1384ced18582SJohan Almbladh insn->off, 1385981f94c3SBrendan Jackman BPF_SIZE(insn->code)); 1386981f94c3SBrendan Jackman if (WARN_ON(err)) 1387981f94c3SBrendan Jackman return err; 1388981f94c3SBrendan Jackman /* 1389981f94c3SBrendan Jackman * ZF tells us whether we won the race. If it's 1390981f94c3SBrendan Jackman * cleared we need to try again. 1391981f94c3SBrendan Jackman */ 1392981f94c3SBrendan Jackman EMIT2(X86_JNE, -(prog - branch_target) - 2); 1393981f94c3SBrendan Jackman /* Return the pre-modification value */ 1394b29dd96bSBrendan Jackman emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1395981f94c3SBrendan Jackman /* Restore R0 after clobbering RAX */ 1396981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1397981f94c3SBrendan Jackman break; 1398981f94c3SBrendan Jackman } 1399981f94c3SBrendan Jackman 140091c960b0SBrendan Jackman err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 140191c960b0SBrendan Jackman insn->off, BPF_SIZE(insn->code)); 140291c960b0SBrendan Jackman if (err) 140391c960b0SBrendan Jackman return err; 140462258278SAlexei Starovoitov break; 140562258278SAlexei Starovoitov 140662258278SAlexei Starovoitov /* call */ 140762258278SAlexei Starovoitov case BPF_JMP | BPF_CALL: 1408e430f34eSAlexei Starovoitov func = (u8 *) __bpf_call_base + imm32; 1409ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable) { 1410ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x8B, 0x85, 1411ebf7d1f5SMaciej Fijalkowski -(bpf_prog->aux->stack_depth + 8)); 1412ebf7d1f5SMaciej Fijalkowski if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) 1413ebf7d1f5SMaciej Fijalkowski return -EINVAL; 1414ebf7d1f5SMaciej Fijalkowski } else { 14153b2744e6SAlexei Starovoitov if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) 1416f3c2af7bSAlexei Starovoitov return -EINVAL; 1417ebf7d1f5SMaciej Fijalkowski } 141862258278SAlexei Starovoitov break; 141962258278SAlexei Starovoitov 142071189fa9SAlexei Starovoitov case BPF_JMP | BPF_TAIL_CALL: 1421428d5df1SDaniel Borkmann if (imm32) 1422428d5df1SDaniel Borkmann emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], 1423dceba081SPeter Zijlstra &prog, image + addrs[i - 1], 1424ebf7d1f5SMaciej Fijalkowski callee_regs_used, 1425dceba081SPeter Zijlstra bpf_prog->aux->stack_depth, 1426dceba081SPeter Zijlstra ctx); 1427428d5df1SDaniel Borkmann else 1428ebf7d1f5SMaciej Fijalkowski emit_bpf_tail_call_indirect(&prog, 1429ebf7d1f5SMaciej Fijalkowski callee_regs_used, 1430dceba081SPeter Zijlstra bpf_prog->aux->stack_depth, 1431dceba081SPeter Zijlstra image + addrs[i - 1], 1432dceba081SPeter Zijlstra ctx); 1433b52f00e6SAlexei Starovoitov break; 1434b52f00e6SAlexei Starovoitov 143562258278SAlexei Starovoitov /* cond jump */ 143662258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_X: 143762258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_X: 143862258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_X: 143952afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_X: 144062258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_X: 144152afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_X: 144262258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_X: 144352afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_X: 144462258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_X: 144552afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_X: 14463f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_X: 14473f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_X: 14483f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_X: 14493f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_X: 14503f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_X: 14513f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_X: 14523f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_X: 14533f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_X: 14543f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_X: 14553f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_X: 1456e430f34eSAlexei Starovoitov /* cmp dst_reg, src_reg */ 145774007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 145874007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 14593f5d6525SJiong Wang EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); 146062258278SAlexei Starovoitov goto emit_cond_jmp; 146162258278SAlexei Starovoitov 146262258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_X: 14633f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_X: 1464e430f34eSAlexei Starovoitov /* test dst_reg, src_reg */ 146574007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, 146674007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 14673f5d6525SJiong Wang EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); 146862258278SAlexei Starovoitov goto emit_cond_jmp; 146962258278SAlexei Starovoitov 147062258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_K: 14713f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_K: 1472e430f34eSAlexei Starovoitov /* test dst_reg, imm32 */ 14736364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 14746364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP); 1475e430f34eSAlexei Starovoitov EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); 147662258278SAlexei Starovoitov goto emit_cond_jmp; 147762258278SAlexei Starovoitov 147862258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_K: 147962258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_K: 148062258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_K: 148152afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_K: 148262258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_K: 148352afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_K: 148462258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_K: 148552afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_K: 148662258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_K: 148752afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_K: 14883f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_K: 14893f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_K: 14903f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_K: 14913f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_K: 14923f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_K: 14933f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_K: 14943f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_K: 14953f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_K: 14963f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_K: 14973f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_K: 149838f51c07SDaniel Borkmann /* test dst_reg, dst_reg to save one extra byte */ 149938f51c07SDaniel Borkmann if (imm32 == 0) { 150074007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, dst_reg, 150174007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP); 150238f51c07SDaniel Borkmann EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); 150338f51c07SDaniel Borkmann goto emit_cond_jmp; 150438f51c07SDaniel Borkmann } 150538f51c07SDaniel Borkmann 1506e430f34eSAlexei Starovoitov /* cmp dst_reg, imm8/32 */ 15076364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg, 15086364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP); 150962258278SAlexei Starovoitov 1510e430f34eSAlexei Starovoitov if (is_imm8(imm32)) 1511e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); 151262258278SAlexei Starovoitov else 1513e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); 151462258278SAlexei Starovoitov 1515a2c7a983SIngo Molnar emit_cond_jmp: /* Convert BPF opcode to x86 */ 151662258278SAlexei Starovoitov switch (BPF_OP(insn->code)) { 151762258278SAlexei Starovoitov case BPF_JEQ: 151862258278SAlexei Starovoitov jmp_cond = X86_JE; 151962258278SAlexei Starovoitov break; 152062258278SAlexei Starovoitov case BPF_JSET: 152162258278SAlexei Starovoitov case BPF_JNE: 152262258278SAlexei Starovoitov jmp_cond = X86_JNE; 152362258278SAlexei Starovoitov break; 152462258278SAlexei Starovoitov case BPF_JGT: 152562258278SAlexei Starovoitov /* GT is unsigned '>', JA in x86 */ 152662258278SAlexei Starovoitov jmp_cond = X86_JA; 152762258278SAlexei Starovoitov break; 152852afc51eSDaniel Borkmann case BPF_JLT: 152952afc51eSDaniel Borkmann /* LT is unsigned '<', JB in x86 */ 153052afc51eSDaniel Borkmann jmp_cond = X86_JB; 153152afc51eSDaniel Borkmann break; 153262258278SAlexei Starovoitov case BPF_JGE: 153362258278SAlexei Starovoitov /* GE is unsigned '>=', JAE in x86 */ 153462258278SAlexei Starovoitov jmp_cond = X86_JAE; 153562258278SAlexei Starovoitov break; 153652afc51eSDaniel Borkmann case BPF_JLE: 153752afc51eSDaniel Borkmann /* LE is unsigned '<=', JBE in x86 */ 153852afc51eSDaniel Borkmann jmp_cond = X86_JBE; 153952afc51eSDaniel Borkmann break; 154062258278SAlexei Starovoitov case BPF_JSGT: 1541a2c7a983SIngo Molnar /* Signed '>', GT in x86 */ 154262258278SAlexei Starovoitov jmp_cond = X86_JG; 154362258278SAlexei Starovoitov break; 154452afc51eSDaniel Borkmann case BPF_JSLT: 1545a2c7a983SIngo Molnar /* Signed '<', LT in x86 */ 154652afc51eSDaniel Borkmann jmp_cond = X86_JL; 154752afc51eSDaniel Borkmann break; 154862258278SAlexei Starovoitov case BPF_JSGE: 1549a2c7a983SIngo Molnar /* Signed '>=', GE in x86 */ 155062258278SAlexei Starovoitov jmp_cond = X86_JGE; 155162258278SAlexei Starovoitov break; 155252afc51eSDaniel Borkmann case BPF_JSLE: 1553a2c7a983SIngo Molnar /* Signed '<=', LE in x86 */ 155452afc51eSDaniel Borkmann jmp_cond = X86_JLE; 155552afc51eSDaniel Borkmann break; 1556a2c7a983SIngo Molnar default: /* to silence GCC warning */ 155762258278SAlexei Starovoitov return -EFAULT; 155862258278SAlexei Starovoitov } 155962258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i]; 156062258278SAlexei Starovoitov if (is_imm8(jmp_offset)) { 156193c5aeccSGary Lin if (jmp_padding) { 156293c5aeccSGary Lin /* To keep the jmp_offset valid, the extra bytes are 1563d9f6e12fSIngo Molnar * padded before the jump insn, so we subtract the 156493c5aeccSGary Lin * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. 156593c5aeccSGary Lin * 156693c5aeccSGary Lin * If the previous pass already emits an imm8 156793c5aeccSGary Lin * jmp_cond, then this BPF insn won't shrink, so 156893c5aeccSGary Lin * "nops" is 0. 156993c5aeccSGary Lin * 157093c5aeccSGary Lin * On the other hand, if the previous pass emits an 157193c5aeccSGary Lin * imm32 jmp_cond, the extra 4 bytes(*) is padded to 157293c5aeccSGary Lin * keep the image from shrinking further. 157393c5aeccSGary Lin * 157493c5aeccSGary Lin * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond 157593c5aeccSGary Lin * is 2 bytes, so the size difference is 4 bytes. 157693c5aeccSGary Lin */ 157793c5aeccSGary Lin nops = INSN_SZ_DIFF - 2; 157893c5aeccSGary Lin if (nops != 0 && nops != 4) { 157993c5aeccSGary Lin pr_err("unexpected jmp_cond padding: %d bytes\n", 158093c5aeccSGary Lin nops); 158193c5aeccSGary Lin return -EFAULT; 158293c5aeccSGary Lin } 1583ced50fc4SJiri Olsa emit_nops(&prog, nops); 158493c5aeccSGary Lin } 158562258278SAlexei Starovoitov EMIT2(jmp_cond, jmp_offset); 158662258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) { 158762258278SAlexei Starovoitov EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); 15883b58908aSEric Dumazet } else { 158962258278SAlexei Starovoitov pr_err("cond_jmp gen bug %llx\n", jmp_offset); 159062258278SAlexei Starovoitov return -EFAULT; 15913b58908aSEric Dumazet } 159262258278SAlexei Starovoitov 15933b58908aSEric Dumazet break; 159462258278SAlexei Starovoitov 159562258278SAlexei Starovoitov case BPF_JMP | BPF_JA: 15961612a981SGianluca Borello if (insn->off == -1) 15971612a981SGianluca Borello /* -1 jmp instructions will always jump 15981612a981SGianluca Borello * backwards two bytes. Explicitly handling 15991612a981SGianluca Borello * this case avoids wasting too many passes 16001612a981SGianluca Borello * when there are long sequences of replaced 16011612a981SGianluca Borello * dead code. 16021612a981SGianluca Borello */ 16031612a981SGianluca Borello jmp_offset = -2; 16041612a981SGianluca Borello else 160562258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i]; 16061612a981SGianluca Borello 160793c5aeccSGary Lin if (!jmp_offset) { 160893c5aeccSGary Lin /* 160993c5aeccSGary Lin * If jmp_padding is enabled, the extra nops will 161093c5aeccSGary Lin * be inserted. Otherwise, optimize out nop jumps. 161193c5aeccSGary Lin */ 161293c5aeccSGary Lin if (jmp_padding) { 161393c5aeccSGary Lin /* There are 3 possible conditions. 161493c5aeccSGary Lin * (1) This BPF_JA is already optimized out in 161593c5aeccSGary Lin * the previous run, so there is no need 161693c5aeccSGary Lin * to pad any extra byte (0 byte). 161793c5aeccSGary Lin * (2) The previous pass emits an imm8 jmp, 161893c5aeccSGary Lin * so we pad 2 bytes to match the previous 161993c5aeccSGary Lin * insn size. 162093c5aeccSGary Lin * (3) Similarly, the previous pass emits an 162193c5aeccSGary Lin * imm32 jmp, and 5 bytes is padded. 162293c5aeccSGary Lin */ 162393c5aeccSGary Lin nops = INSN_SZ_DIFF; 162493c5aeccSGary Lin if (nops != 0 && nops != 2 && nops != 5) { 162593c5aeccSGary Lin pr_err("unexpected nop jump padding: %d bytes\n", 162693c5aeccSGary Lin nops); 162793c5aeccSGary Lin return -EFAULT; 162893c5aeccSGary Lin } 1629ced50fc4SJiri Olsa emit_nops(&prog, nops); 163093c5aeccSGary Lin } 163162258278SAlexei Starovoitov break; 163293c5aeccSGary Lin } 163362258278SAlexei Starovoitov emit_jmp: 163462258278SAlexei Starovoitov if (is_imm8(jmp_offset)) { 163593c5aeccSGary Lin if (jmp_padding) { 163693c5aeccSGary Lin /* To avoid breaking jmp_offset, the extra bytes 163793c5aeccSGary Lin * are padded before the actual jmp insn, so 1638d9f6e12fSIngo Molnar * 2 bytes is subtracted from INSN_SZ_DIFF. 163993c5aeccSGary Lin * 164093c5aeccSGary Lin * If the previous pass already emits an imm8 164193c5aeccSGary Lin * jmp, there is nothing to pad (0 byte). 164293c5aeccSGary Lin * 164393c5aeccSGary Lin * If it emits an imm32 jmp (5 bytes) previously 164493c5aeccSGary Lin * and now an imm8 jmp (2 bytes), then we pad 164593c5aeccSGary Lin * (5 - 2 = 3) bytes to stop the image from 164693c5aeccSGary Lin * shrinking further. 164793c5aeccSGary Lin */ 164893c5aeccSGary Lin nops = INSN_SZ_DIFF - 2; 164993c5aeccSGary Lin if (nops != 0 && nops != 3) { 165093c5aeccSGary Lin pr_err("unexpected jump padding: %d bytes\n", 165193c5aeccSGary Lin nops); 165293c5aeccSGary Lin return -EFAULT; 165393c5aeccSGary Lin } 1654ced50fc4SJiri Olsa emit_nops(&prog, INSN_SZ_DIFF - 2); 165593c5aeccSGary Lin } 165662258278SAlexei Starovoitov EMIT2(0xEB, jmp_offset); 165762258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) { 165862258278SAlexei Starovoitov EMIT1_off32(0xE9, jmp_offset); 165962258278SAlexei Starovoitov } else { 166062258278SAlexei Starovoitov pr_err("jmp gen bug %llx\n", jmp_offset); 166162258278SAlexei Starovoitov return -EFAULT; 16623b58908aSEric Dumazet } 166362258278SAlexei Starovoitov break; 166462258278SAlexei Starovoitov 166562258278SAlexei Starovoitov case BPF_JMP | BPF_EXIT: 1666769e0de6SAlexei Starovoitov if (seen_exit) { 166762258278SAlexei Starovoitov jmp_offset = ctx->cleanup_addr - addrs[i]; 166862258278SAlexei Starovoitov goto emit_jmp; 166962258278SAlexei Starovoitov } 1670769e0de6SAlexei Starovoitov seen_exit = true; 1671a2c7a983SIngo Molnar /* Update cleanup_addr */ 167262258278SAlexei Starovoitov ctx->cleanup_addr = proglen; 1673ebf7d1f5SMaciej Fijalkowski pop_callee_regs(&prog, callee_regs_used); 167462258278SAlexei Starovoitov EMIT1(0xC9); /* leave */ 167562258278SAlexei Starovoitov EMIT1(0xC3); /* ret */ 16760a14842fSEric Dumazet break; 16770a14842fSEric Dumazet 16780a14842fSEric Dumazet default: 1679a2c7a983SIngo Molnar /* 1680a2c7a983SIngo Molnar * By design x86-64 JIT should support all BPF instructions. 168162258278SAlexei Starovoitov * This error will be seen if new instruction was added 1682a2c7a983SIngo Molnar * to the interpreter, but not to the JIT, or if there is 1683a2c7a983SIngo Molnar * junk in bpf_prog. 168462258278SAlexei Starovoitov */ 168562258278SAlexei Starovoitov pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 1686f3c2af7bSAlexei Starovoitov return -EINVAL; 16870a14842fSEric Dumazet } 168862258278SAlexei Starovoitov 16890a14842fSEric Dumazet ilen = prog - temp; 1690e0ee9c12SAlexei Starovoitov if (ilen > BPF_MAX_INSN_SIZE) { 16919383191dSDaniel Borkmann pr_err("bpf_jit: fatal insn size error\n"); 1692e0ee9c12SAlexei Starovoitov return -EFAULT; 1693e0ee9c12SAlexei Starovoitov } 1694e0ee9c12SAlexei Starovoitov 16950a14842fSEric Dumazet if (image) { 1696e4d4d456SPiotr Krysiuk /* 1697e4d4d456SPiotr Krysiuk * When populating the image, assert that: 1698e4d4d456SPiotr Krysiuk * 1699e4d4d456SPiotr Krysiuk * i) We do not write beyond the allocated space, and 1700e4d4d456SPiotr Krysiuk * ii) addrs[i] did not change from the prior run, in order 1701e4d4d456SPiotr Krysiuk * to validate assumptions made for computing branch 1702e4d4d456SPiotr Krysiuk * displacements. 1703e4d4d456SPiotr Krysiuk */ 1704e4d4d456SPiotr Krysiuk if (unlikely(proglen + ilen > oldproglen || 1705e4d4d456SPiotr Krysiuk proglen + ilen != addrs[i])) { 17069383191dSDaniel Borkmann pr_err("bpf_jit: fatal error\n"); 1707f3c2af7bSAlexei Starovoitov return -EFAULT; 17080a14842fSEric Dumazet } 17090a14842fSEric Dumazet memcpy(image + proglen, temp, ilen); 17100a14842fSEric Dumazet } 17110a14842fSEric Dumazet proglen += ilen; 17120a14842fSEric Dumazet addrs[i] = proglen; 17130a14842fSEric Dumazet prog = temp; 17140a14842fSEric Dumazet } 17153dec541bSAlexei Starovoitov 17163dec541bSAlexei Starovoitov if (image && excnt != bpf_prog->aux->num_exentries) { 17173dec541bSAlexei Starovoitov pr_err("extable is not populated\n"); 17183dec541bSAlexei Starovoitov return -EFAULT; 17193dec541bSAlexei Starovoitov } 1720f3c2af7bSAlexei Starovoitov return proglen; 1721f3c2af7bSAlexei Starovoitov } 1722f3c2af7bSAlexei Starovoitov 172385d33df3SMartin KaFai Lau static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1724fec56f58SAlexei Starovoitov int stack_size) 1725fec56f58SAlexei Starovoitov { 1726fec56f58SAlexei Starovoitov int i; 1727fec56f58SAlexei Starovoitov /* Store function arguments to stack. 1728fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be: 1729fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x10],rdi 1730fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x8],rsi 1731fec56f58SAlexei Starovoitov */ 1732fec56f58SAlexei Starovoitov for (i = 0; i < min(nr_args, 6); i++) 1733fec56f58SAlexei Starovoitov emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), 1734fec56f58SAlexei Starovoitov BPF_REG_FP, 1735fec56f58SAlexei Starovoitov i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1736fec56f58SAlexei Starovoitov -(stack_size - i * 8)); 1737fec56f58SAlexei Starovoitov } 1738fec56f58SAlexei Starovoitov 173985d33df3SMartin KaFai Lau static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, 1740fec56f58SAlexei Starovoitov int stack_size) 1741fec56f58SAlexei Starovoitov { 1742fec56f58SAlexei Starovoitov int i; 1743fec56f58SAlexei Starovoitov 1744fec56f58SAlexei Starovoitov /* Restore function arguments from stack. 1745fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be: 1746fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] 1747fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] 1748fec56f58SAlexei Starovoitov */ 1749fec56f58SAlexei Starovoitov for (i = 0; i < min(nr_args, 6); i++) 1750fec56f58SAlexei Starovoitov emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), 1751fec56f58SAlexei Starovoitov i == 5 ? X86_REG_R9 : BPF_REG_1 + i, 1752fec56f58SAlexei Starovoitov BPF_REG_FP, 1753fec56f58SAlexei Starovoitov -(stack_size - i * 8)); 1754fec56f58SAlexei Starovoitov } 1755fec56f58SAlexei Starovoitov 17567e639208SKP Singh static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1757356ed649SHou Tao struct bpf_prog *p, int stack_size, bool save_ret) 1758fec56f58SAlexei Starovoitov { 1759fec56f58SAlexei Starovoitov u8 *prog = *pprog; 1760ca06f55bSAlexei Starovoitov u8 *jmp_insn; 1761fec56f58SAlexei Starovoitov 1762ca06f55bSAlexei Starovoitov /* arg1: mov rdi, progs[i] */ 1763ca06f55bSAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1764f2dd3b39SAlexei Starovoitov if (emit_call(&prog, 1765f2dd3b39SAlexei Starovoitov p->aux->sleepable ? __bpf_prog_enter_sleepable : 1766f2dd3b39SAlexei Starovoitov __bpf_prog_enter, prog)) 1767fec56f58SAlexei Starovoitov return -EINVAL; 1768fec56f58SAlexei Starovoitov /* remember prog start time returned by __bpf_prog_enter */ 1769fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); 1770fec56f58SAlexei Starovoitov 1771ca06f55bSAlexei Starovoitov /* if (__bpf_prog_enter*(prog) == 0) 1772ca06f55bSAlexei Starovoitov * goto skip_exec_of_prog; 1773ca06f55bSAlexei Starovoitov */ 1774ca06f55bSAlexei Starovoitov EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 1775ca06f55bSAlexei Starovoitov /* emit 2 nops that will be replaced with JE insn */ 1776ca06f55bSAlexei Starovoitov jmp_insn = prog; 1777ca06f55bSAlexei Starovoitov emit_nops(&prog, 2); 1778ca06f55bSAlexei Starovoitov 1779fec56f58SAlexei Starovoitov /* arg1: lea rdi, [rbp - stack_size] */ 1780fec56f58SAlexei Starovoitov EMIT4(0x48, 0x8D, 0x7D, -stack_size); 1781fec56f58SAlexei Starovoitov /* arg2: progs[i]->insnsi for interpreter */ 17827e639208SKP Singh if (!p->jited) 1783fec56f58SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_2, 17847e639208SKP Singh (long) p->insnsi >> 32, 17857e639208SKP Singh (u32) (long) p->insnsi); 1786fec56f58SAlexei Starovoitov /* call JITed bpf program or interpreter */ 17877e639208SKP Singh if (emit_call(&prog, p->bpf_func, prog)) 1788fec56f58SAlexei Starovoitov return -EINVAL; 1789fec56f58SAlexei Starovoitov 1790356ed649SHou Tao /* 1791356ed649SHou Tao * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1792ae240823SKP Singh * of the previous call which is then passed on the stack to 1793ae240823SKP Singh * the next BPF program. 1794356ed649SHou Tao * 1795356ed649SHou Tao * BPF_TRAMP_FENTRY trampoline may need to return the return 1796356ed649SHou Tao * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1797ae240823SKP Singh */ 1798356ed649SHou Tao if (save_ret) 1799ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1800ae240823SKP Singh 1801ca06f55bSAlexei Starovoitov /* replace 2 nops with JE insn, since jmp target is known */ 1802ca06f55bSAlexei Starovoitov jmp_insn[0] = X86_JE; 1803ca06f55bSAlexei Starovoitov jmp_insn[1] = prog - jmp_insn - 2; 1804ca06f55bSAlexei Starovoitov 1805fec56f58SAlexei Starovoitov /* arg1: mov rdi, progs[i] */ 1806f2dd3b39SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); 1807fec56f58SAlexei Starovoitov /* arg2: mov rsi, rbx <- start time in nsec */ 1808fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); 1809f2dd3b39SAlexei Starovoitov if (emit_call(&prog, 1810f2dd3b39SAlexei Starovoitov p->aux->sleepable ? __bpf_prog_exit_sleepable : 1811f2dd3b39SAlexei Starovoitov __bpf_prog_exit, prog)) 1812fec56f58SAlexei Starovoitov return -EINVAL; 18137e639208SKP Singh 18147e639208SKP Singh *pprog = prog; 18157e639208SKP Singh return 0; 18167e639208SKP Singh } 18177e639208SKP Singh 18187e639208SKP Singh static void emit_align(u8 **pprog, u32 align) 18197e639208SKP Singh { 18207e639208SKP Singh u8 *target, *prog = *pprog; 18217e639208SKP Singh 18227e639208SKP Singh target = PTR_ALIGN(prog, align); 18237e639208SKP Singh if (target != prog) 18247e639208SKP Singh emit_nops(&prog, target - prog); 18257e639208SKP Singh 18267e639208SKP Singh *pprog = prog; 18277e639208SKP Singh } 18287e639208SKP Singh 18297e639208SKP Singh static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) 18307e639208SKP Singh { 18317e639208SKP Singh u8 *prog = *pprog; 18327e639208SKP Singh s64 offset; 18337e639208SKP Singh 18347e639208SKP Singh offset = func - (ip + 2 + 4); 18357e639208SKP Singh if (!is_simm32(offset)) { 18367e639208SKP Singh pr_err("Target %p is out of range\n", func); 18377e639208SKP Singh return -EINVAL; 18387e639208SKP Singh } 18397e639208SKP Singh EMIT2_off32(0x0F, jmp_cond + 0x10, offset); 18407e639208SKP Singh *pprog = prog; 18417e639208SKP Singh return 0; 18427e639208SKP Singh } 18437e639208SKP Singh 18447e639208SKP Singh static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1845356ed649SHou Tao struct bpf_tramp_progs *tp, int stack_size, 1846356ed649SHou Tao bool save_ret) 18477e639208SKP Singh { 18487e639208SKP Singh int i; 18497e639208SKP Singh u8 *prog = *pprog; 18507e639208SKP Singh 18517e639208SKP Singh for (i = 0; i < tp->nr_progs; i++) { 1852356ed649SHou Tao if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, 1853356ed649SHou Tao save_ret)) 18547e639208SKP Singh return -EINVAL; 1855fec56f58SAlexei Starovoitov } 1856fec56f58SAlexei Starovoitov *pprog = prog; 1857fec56f58SAlexei Starovoitov return 0; 1858fec56f58SAlexei Starovoitov } 1859fec56f58SAlexei Starovoitov 1860ae240823SKP Singh static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, 1861ae240823SKP Singh struct bpf_tramp_progs *tp, int stack_size, 1862ae240823SKP Singh u8 **branches) 1863ae240823SKP Singh { 1864ae240823SKP Singh u8 *prog = *pprog; 1865ced50fc4SJiri Olsa int i; 1866ae240823SKP Singh 1867ae240823SKP Singh /* The first fmod_ret program will receive a garbage return value. 1868ae240823SKP Singh * Set this to 0 to avoid confusing the program. 1869ae240823SKP Singh */ 1870ae240823SKP Singh emit_mov_imm32(&prog, false, BPF_REG_0, 0); 1871ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1872ae240823SKP Singh for (i = 0; i < tp->nr_progs; i++) { 1873ae240823SKP Singh if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) 1874ae240823SKP Singh return -EINVAL; 1875ae240823SKP Singh 187613fac1d8SAlexei Starovoitov /* mod_ret prog stored return value into [rbp - 8]. Emit: 187713fac1d8SAlexei Starovoitov * if (*(u64 *)(rbp - 8) != 0) 1878ae240823SKP Singh * goto do_fexit; 1879ae240823SKP Singh */ 188013fac1d8SAlexei Starovoitov /* cmp QWORD PTR [rbp - 0x8], 0x0 */ 188113fac1d8SAlexei Starovoitov EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); 1882ae240823SKP Singh 1883ae240823SKP Singh /* Save the location of the branch and Generate 6 nops 1884ae240823SKP Singh * (4 bytes for an offset and 2 bytes for the jump) These nops 1885ae240823SKP Singh * are replaced with a conditional jump once do_fexit (i.e. the 1886ae240823SKP Singh * start of the fexit invocation) is finalized. 1887ae240823SKP Singh */ 1888ae240823SKP Singh branches[i] = prog; 1889ae240823SKP Singh emit_nops(&prog, 4 + 2); 1890ae240823SKP Singh } 1891ae240823SKP Singh 1892ae240823SKP Singh *pprog = prog; 1893ae240823SKP Singh return 0; 1894ae240823SKP Singh } 1895ae240823SKP Singh 1896356ed649SHou Tao static bool is_valid_bpf_tramp_flags(unsigned int flags) 1897356ed649SHou Tao { 1898356ed649SHou Tao if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1899356ed649SHou Tao (flags & BPF_TRAMP_F_SKIP_FRAME)) 1900356ed649SHou Tao return false; 1901356ed649SHou Tao 1902356ed649SHou Tao /* 1903356ed649SHou Tao * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, 1904356ed649SHou Tao * and it must be used alone. 1905356ed649SHou Tao */ 1906356ed649SHou Tao if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && 1907356ed649SHou Tao (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) 1908356ed649SHou Tao return false; 1909356ed649SHou Tao 1910356ed649SHou Tao return true; 1911356ed649SHou Tao } 1912356ed649SHou Tao 1913fec56f58SAlexei Starovoitov /* Example: 1914fec56f58SAlexei Starovoitov * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 1915fec56f58SAlexei Starovoitov * its 'struct btf_func_model' will be nr_args=2 1916fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is executing after trampoline: 1917fec56f58SAlexei Starovoitov * 1918fec56f58SAlexei Starovoitov * push rbp 1919fec56f58SAlexei Starovoitov * mov rbp, rsp 1920fec56f58SAlexei Starovoitov * sub rsp, 16 // space for skb and dev 1921fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time 1922fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rdi // save skb pointer to stack 1923fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rsi // save dev pointer to stack 1924fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1925fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled 1926fec56f58SAlexei Starovoitov * lea rdi, [rbp - 16] // R1==ctx of bpf prog 1927fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog 1928fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1929fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1930fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1931fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack 1932fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack 1933fec56f58SAlexei Starovoitov * pop rbx 1934fec56f58SAlexei Starovoitov * leave 1935fec56f58SAlexei Starovoitov * ret 1936fec56f58SAlexei Starovoitov * 1937fec56f58SAlexei Starovoitov * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be 1938fec56f58SAlexei Starovoitov * replaced with 'call generated_bpf_trampoline'. When it returns 1939fec56f58SAlexei Starovoitov * eth_type_trans will continue executing with original skb and dev pointers. 1940fec56f58SAlexei Starovoitov * 1941fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is called from trampoline: 1942fec56f58SAlexei Starovoitov * 1943fec56f58SAlexei Starovoitov * push rbp 1944fec56f58SAlexei Starovoitov * mov rbp, rsp 1945fec56f58SAlexei Starovoitov * sub rsp, 24 // space for skb, dev, return value 1946fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time 1947fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 24], rdi // save skb pointer to stack 1948fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rsi // save dev pointer to stack 1949fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1950fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time if bpf stats are enabled 1951fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1952fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev 1953fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1954fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1955fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1956fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack 1957fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack 1958fec56f58SAlexei Starovoitov * call eth_type_trans+5 // execute body of eth_type_trans 1959fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rax // save return value 1960fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable 1961fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled 1962fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog 1963fec56f58SAlexei Starovoitov * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value 1964fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off 1965fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time 1966fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math 1967fec56f58SAlexei Starovoitov * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value 1968fec56f58SAlexei Starovoitov * pop rbx 1969fec56f58SAlexei Starovoitov * leave 1970fec56f58SAlexei Starovoitov * add rsp, 8 // skip eth_type_trans's frame 1971fec56f58SAlexei Starovoitov * ret // return to its caller 1972fec56f58SAlexei Starovoitov */ 1973e21aa341SAlexei Starovoitov int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 197485d33df3SMartin KaFai Lau const struct btf_func_model *m, u32 flags, 197588fd9e53SKP Singh struct bpf_tramp_progs *tprogs, 1976fec56f58SAlexei Starovoitov void *orig_call) 1977fec56f58SAlexei Starovoitov { 1978ced50fc4SJiri Olsa int ret, i, nr_args = m->nr_args; 1979fec56f58SAlexei Starovoitov int stack_size = nr_args * 8; 198088fd9e53SKP Singh struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; 198188fd9e53SKP Singh struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; 1982ae240823SKP Singh struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1983ae240823SKP Singh u8 **branches = NULL; 1984fec56f58SAlexei Starovoitov u8 *prog; 1985356ed649SHou Tao bool save_ret; 1986fec56f58SAlexei Starovoitov 1987fec56f58SAlexei Starovoitov /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1988fec56f58SAlexei Starovoitov if (nr_args > 6) 1989fec56f58SAlexei Starovoitov return -ENOTSUPP; 1990fec56f58SAlexei Starovoitov 1991356ed649SHou Tao if (!is_valid_bpf_tramp_flags(flags)) 1992fec56f58SAlexei Starovoitov return -EINVAL; 1993fec56f58SAlexei Starovoitov 1994356ed649SHou Tao /* room for return value of orig_call or fentry prog */ 1995356ed649SHou Tao save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 1996356ed649SHou Tao if (save_ret) 1997356ed649SHou Tao stack_size += 8; 1998fec56f58SAlexei Starovoitov 19997e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG) 20007e6f3cd8SJiri Olsa stack_size += 8; /* room for IP address argument */ 20017e6f3cd8SJiri Olsa 2002fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_SKIP_FRAME) 2003fec56f58SAlexei Starovoitov /* skip patched call instruction and point orig_call to actual 2004fec56f58SAlexei Starovoitov * body of the kernel function. 2005fec56f58SAlexei Starovoitov */ 20064b3da77bSDaniel Borkmann orig_call += X86_PATCH_SIZE; 2007fec56f58SAlexei Starovoitov 2008fec56f58SAlexei Starovoitov prog = image; 2009fec56f58SAlexei Starovoitov 2010fec56f58SAlexei Starovoitov EMIT1(0x55); /* push rbp */ 2011fec56f58SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ 2012fec56f58SAlexei Starovoitov EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ 2013fec56f58SAlexei Starovoitov EMIT1(0x53); /* push rbx */ 2014fec56f58SAlexei Starovoitov 20157e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG) { 20167e6f3cd8SJiri Olsa /* Store IP address of the traced function: 20177e6f3cd8SJiri Olsa * mov rax, QWORD PTR [rbp + 8] 20187e6f3cd8SJiri Olsa * sub rax, X86_PATCH_SIZE 20197e6f3cd8SJiri Olsa * mov QWORD PTR [rbp - stack_size], rax 20207e6f3cd8SJiri Olsa */ 20217e6f3cd8SJiri Olsa emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); 20227e6f3cd8SJiri Olsa EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); 20237e6f3cd8SJiri Olsa emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); 20247e6f3cd8SJiri Olsa 20257e6f3cd8SJiri Olsa /* Continue with stack_size for regs storage, stack will 20267e6f3cd8SJiri Olsa * be correctly restored with 'leave' instruction. 20277e6f3cd8SJiri Olsa */ 20287e6f3cd8SJiri Olsa stack_size -= 8; 20297e6f3cd8SJiri Olsa } 20307e6f3cd8SJiri Olsa 2031fec56f58SAlexei Starovoitov save_regs(m, &prog, nr_args, stack_size); 2032fec56f58SAlexei Starovoitov 2033e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 2034e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */ 2035e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2036e21aa341SAlexei Starovoitov if (emit_call(&prog, __bpf_tramp_enter, prog)) { 2037e21aa341SAlexei Starovoitov ret = -EINVAL; 2038e21aa341SAlexei Starovoitov goto cleanup; 2039e21aa341SAlexei Starovoitov } 2040e21aa341SAlexei Starovoitov } 2041e21aa341SAlexei Starovoitov 204288fd9e53SKP Singh if (fentry->nr_progs) 2043356ed649SHou Tao if (invoke_bpf(m, &prog, fentry, stack_size, 2044356ed649SHou Tao flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2045fec56f58SAlexei Starovoitov return -EINVAL; 2046fec56f58SAlexei Starovoitov 2047ae240823SKP Singh if (fmod_ret->nr_progs) { 2048ae240823SKP Singh branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), 2049ae240823SKP Singh GFP_KERNEL); 2050ae240823SKP Singh if (!branches) 2051ae240823SKP Singh return -ENOMEM; 2052ae240823SKP Singh 2053ae240823SKP Singh if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, 2054ae240823SKP Singh branches)) { 2055ae240823SKP Singh ret = -EINVAL; 2056ae240823SKP Singh goto cleanup; 2057ae240823SKP Singh } 2058ae240823SKP Singh } 2059ae240823SKP Singh 2060fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 2061fec56f58SAlexei Starovoitov restore_regs(m, &prog, nr_args, stack_size); 2062fec56f58SAlexei Starovoitov 2063fec56f58SAlexei Starovoitov /* call original function */ 2064ae240823SKP Singh if (emit_call(&prog, orig_call, prog)) { 2065ae240823SKP Singh ret = -EINVAL; 2066ae240823SKP Singh goto cleanup; 2067ae240823SKP Singh } 2068fec56f58SAlexei Starovoitov /* remember return value in a stack for bpf prog to access */ 2069fec56f58SAlexei Starovoitov emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2070e21aa341SAlexei Starovoitov im->ip_after_call = prog; 2071b1f480bcSIngo Molnar memcpy(prog, x86_nops[5], X86_PATCH_SIZE); 2072b9082970SStanislav Fomichev prog += X86_PATCH_SIZE; 2073fec56f58SAlexei Starovoitov } 2074fec56f58SAlexei Starovoitov 2075ae240823SKP Singh if (fmod_ret->nr_progs) { 2076ae240823SKP Singh /* From Intel 64 and IA-32 Architectures Optimization 2077ae240823SKP Singh * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2078ae240823SKP Singh * Coding Rule 11: All branch targets should be 16-byte 2079ae240823SKP Singh * aligned. 2080ae240823SKP Singh */ 2081ae240823SKP Singh emit_align(&prog, 16); 2082ae240823SKP Singh /* Update the branches saved in invoke_bpf_mod_ret with the 2083ae240823SKP Singh * aligned address of do_fexit. 2084ae240823SKP Singh */ 2085ae240823SKP Singh for (i = 0; i < fmod_ret->nr_progs; i++) 2086ae240823SKP Singh emit_cond_near_jump(&branches[i], prog, branches[i], 2087ae240823SKP Singh X86_JNE); 2088ae240823SKP Singh } 2089ae240823SKP Singh 209088fd9e53SKP Singh if (fexit->nr_progs) 2091356ed649SHou Tao if (invoke_bpf(m, &prog, fexit, stack_size, false)) { 2092ae240823SKP Singh ret = -EINVAL; 2093ae240823SKP Singh goto cleanup; 2094ae240823SKP Singh } 2095fec56f58SAlexei Starovoitov 2096fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_RESTORE_REGS) 2097fec56f58SAlexei Starovoitov restore_regs(m, &prog, nr_args, stack_size); 2098fec56f58SAlexei Starovoitov 2099ae240823SKP Singh /* This needs to be done regardless. If there were fmod_ret programs, 2100ae240823SKP Singh * the return value is only updated on the stack and still needs to be 2101ae240823SKP Singh * restored to R0. 2102ae240823SKP Singh */ 2103e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) { 2104e21aa341SAlexei Starovoitov im->ip_epilogue = prog; 2105e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */ 2106e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2107e21aa341SAlexei Starovoitov if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2108e21aa341SAlexei Starovoitov ret = -EINVAL; 2109e21aa341SAlexei Starovoitov goto cleanup; 2110e21aa341SAlexei Starovoitov } 2111e21aa341SAlexei Starovoitov } 2112356ed649SHou Tao /* restore return value of orig_call or fentry prog back into RAX */ 2113356ed649SHou Tao if (save_ret) 2114356ed649SHou Tao emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2115fec56f58SAlexei Starovoitov 2116fec56f58SAlexei Starovoitov EMIT1(0x5B); /* pop rbx */ 2117fec56f58SAlexei Starovoitov EMIT1(0xC9); /* leave */ 2118fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_SKIP_FRAME) 2119fec56f58SAlexei Starovoitov /* skip our return address and return to parent */ 2120fec56f58SAlexei Starovoitov EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ 2121fec56f58SAlexei Starovoitov EMIT1(0xC3); /* ret */ 212285d33df3SMartin KaFai Lau /* Make sure the trampoline generation logic doesn't overflow */ 2123ae240823SKP Singh if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { 2124ae240823SKP Singh ret = -EFAULT; 2125ae240823SKP Singh goto cleanup; 2126ae240823SKP Singh } 2127ae240823SKP Singh ret = prog - (u8 *)image; 2128ae240823SKP Singh 2129ae240823SKP Singh cleanup: 2130ae240823SKP Singh kfree(branches); 2131ae240823SKP Singh return ret; 2132fec56f58SAlexei Starovoitov } 2133fec56f58SAlexei Starovoitov 213475ccbef6SBjörn Töpel static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) 213575ccbef6SBjörn Töpel { 21367e639208SKP Singh u8 *jg_reloc, *prog = *pprog; 2137ced50fc4SJiri Olsa int pivot, err, jg_bytes = 1; 213875ccbef6SBjörn Töpel s64 jg_offset; 213975ccbef6SBjörn Töpel 214075ccbef6SBjörn Töpel if (a == b) { 214175ccbef6SBjörn Töpel /* Leaf node of recursion, i.e. not a range of indices 214275ccbef6SBjörn Töpel * anymore. 214375ccbef6SBjörn Töpel */ 214475ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 214575ccbef6SBjörn Töpel if (!is_simm32(progs[a])) 214675ccbef6SBjörn Töpel return -1; 214775ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), 214875ccbef6SBjörn Töpel progs[a]); 214975ccbef6SBjörn Töpel err = emit_cond_near_jump(&prog, /* je func */ 215075ccbef6SBjörn Töpel (void *)progs[a], prog, 215175ccbef6SBjörn Töpel X86_JE); 215275ccbef6SBjörn Töpel if (err) 215375ccbef6SBjörn Töpel return err; 215475ccbef6SBjörn Töpel 215587c87ecdSPeter Zijlstra emit_indirect_jump(&prog, 2 /* rdx */, prog); 215675ccbef6SBjörn Töpel 215775ccbef6SBjörn Töpel *pprog = prog; 215875ccbef6SBjörn Töpel return 0; 215975ccbef6SBjörn Töpel } 216075ccbef6SBjörn Töpel 216175ccbef6SBjörn Töpel /* Not a leaf node, so we pivot, and recursively descend into 216275ccbef6SBjörn Töpel * the lower and upper ranges. 216375ccbef6SBjörn Töpel */ 216475ccbef6SBjörn Töpel pivot = (b - a) / 2; 216575ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ 216675ccbef6SBjörn Töpel if (!is_simm32(progs[a + pivot])) 216775ccbef6SBjörn Töpel return -1; 216875ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); 216975ccbef6SBjörn Töpel 217075ccbef6SBjörn Töpel if (pivot > 2) { /* jg upper_part */ 217175ccbef6SBjörn Töpel /* Require near jump. */ 217275ccbef6SBjörn Töpel jg_bytes = 4; 217375ccbef6SBjörn Töpel EMIT2_off32(0x0F, X86_JG + 0x10, 0); 217475ccbef6SBjörn Töpel } else { 217575ccbef6SBjörn Töpel EMIT2(X86_JG, 0); 217675ccbef6SBjörn Töpel } 217775ccbef6SBjörn Töpel jg_reloc = prog; 217875ccbef6SBjörn Töpel 217975ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ 218075ccbef6SBjörn Töpel progs); 218175ccbef6SBjörn Töpel if (err) 218275ccbef6SBjörn Töpel return err; 218375ccbef6SBjörn Töpel 2184116eb788SBjörn Töpel /* From Intel 64 and IA-32 Architectures Optimization 2185116eb788SBjörn Töpel * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler 2186116eb788SBjörn Töpel * Coding Rule 11: All branch targets should be 16-byte 2187116eb788SBjörn Töpel * aligned. 2188116eb788SBjörn Töpel */ 21897e639208SKP Singh emit_align(&prog, 16); 219075ccbef6SBjörn Töpel jg_offset = prog - jg_reloc; 219175ccbef6SBjörn Töpel emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); 219275ccbef6SBjörn Töpel 219375ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ 219475ccbef6SBjörn Töpel b, progs); 219575ccbef6SBjörn Töpel if (err) 219675ccbef6SBjörn Töpel return err; 219775ccbef6SBjörn Töpel 219875ccbef6SBjörn Töpel *pprog = prog; 219975ccbef6SBjörn Töpel return 0; 220075ccbef6SBjörn Töpel } 220175ccbef6SBjörn Töpel 220275ccbef6SBjörn Töpel static int cmp_ips(const void *a, const void *b) 220375ccbef6SBjörn Töpel { 220475ccbef6SBjörn Töpel const s64 *ipa = a; 220575ccbef6SBjörn Töpel const s64 *ipb = b; 220675ccbef6SBjörn Töpel 220775ccbef6SBjörn Töpel if (*ipa > *ipb) 220875ccbef6SBjörn Töpel return 1; 220975ccbef6SBjörn Töpel if (*ipa < *ipb) 221075ccbef6SBjörn Töpel return -1; 221175ccbef6SBjörn Töpel return 0; 221275ccbef6SBjörn Töpel } 221375ccbef6SBjörn Töpel 221475ccbef6SBjörn Töpel int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 221575ccbef6SBjörn Töpel { 221675ccbef6SBjörn Töpel u8 *prog = image; 221775ccbef6SBjörn Töpel 221875ccbef6SBjörn Töpel sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); 221975ccbef6SBjörn Töpel return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); 222075ccbef6SBjörn Töpel } 222175ccbef6SBjörn Töpel 22221c2a088aSAlexei Starovoitov struct x64_jit_data { 22231c2a088aSAlexei Starovoitov struct bpf_binary_header *header; 22241c2a088aSAlexei Starovoitov int *addrs; 22251c2a088aSAlexei Starovoitov u8 *image; 22261c2a088aSAlexei Starovoitov int proglen; 22271c2a088aSAlexei Starovoitov struct jit_context ctx; 22281c2a088aSAlexei Starovoitov }; 22291c2a088aSAlexei Starovoitov 223093c5aeccSGary Lin #define MAX_PASSES 20 223193c5aeccSGary Lin #define PADDING_PASSES (MAX_PASSES - 5) 223293c5aeccSGary Lin 2233d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 223462258278SAlexei Starovoitov { 2235f3c2af7bSAlexei Starovoitov struct bpf_binary_header *header = NULL; 2236959a7579SDaniel Borkmann struct bpf_prog *tmp, *orig_prog = prog; 22371c2a088aSAlexei Starovoitov struct x64_jit_data *jit_data; 2238f3c2af7bSAlexei Starovoitov int proglen, oldproglen = 0; 2239f3c2af7bSAlexei Starovoitov struct jit_context ctx = {}; 2240959a7579SDaniel Borkmann bool tmp_blinded = false; 22411c2a088aSAlexei Starovoitov bool extra_pass = false; 224293c5aeccSGary Lin bool padding = false; 2243f3c2af7bSAlexei Starovoitov u8 *image = NULL; 2244f3c2af7bSAlexei Starovoitov int *addrs; 2245f3c2af7bSAlexei Starovoitov int pass; 2246f3c2af7bSAlexei Starovoitov int i; 2247f3c2af7bSAlexei Starovoitov 224860b58afcSAlexei Starovoitov if (!prog->jit_requested) 2249959a7579SDaniel Borkmann return orig_prog; 2250959a7579SDaniel Borkmann 2251959a7579SDaniel Borkmann tmp = bpf_jit_blind_constants(prog); 2252a2c7a983SIngo Molnar /* 2253a2c7a983SIngo Molnar * If blinding was requested and we failed during blinding, 2254959a7579SDaniel Borkmann * we must fall back to the interpreter. 2255959a7579SDaniel Borkmann */ 2256959a7579SDaniel Borkmann if (IS_ERR(tmp)) 2257959a7579SDaniel Borkmann return orig_prog; 2258959a7579SDaniel Borkmann if (tmp != prog) { 2259959a7579SDaniel Borkmann tmp_blinded = true; 2260959a7579SDaniel Borkmann prog = tmp; 2261959a7579SDaniel Borkmann } 2262f3c2af7bSAlexei Starovoitov 22631c2a088aSAlexei Starovoitov jit_data = prog->aux->jit_data; 22641c2a088aSAlexei Starovoitov if (!jit_data) { 22651c2a088aSAlexei Starovoitov jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 22661c2a088aSAlexei Starovoitov if (!jit_data) { 22671c2a088aSAlexei Starovoitov prog = orig_prog; 22681c2a088aSAlexei Starovoitov goto out; 22691c2a088aSAlexei Starovoitov } 22701c2a088aSAlexei Starovoitov prog->aux->jit_data = jit_data; 22711c2a088aSAlexei Starovoitov } 22721c2a088aSAlexei Starovoitov addrs = jit_data->addrs; 22731c2a088aSAlexei Starovoitov if (addrs) { 22741c2a088aSAlexei Starovoitov ctx = jit_data->ctx; 22751c2a088aSAlexei Starovoitov oldproglen = jit_data->proglen; 22761c2a088aSAlexei Starovoitov image = jit_data->image; 22771c2a088aSAlexei Starovoitov header = jit_data->header; 22781c2a088aSAlexei Starovoitov extra_pass = true; 227993c5aeccSGary Lin padding = true; 22801c2a088aSAlexei Starovoitov goto skip_init_addrs; 22811c2a088aSAlexei Starovoitov } 2282de920fc6SYonghong Song addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2283959a7579SDaniel Borkmann if (!addrs) { 2284959a7579SDaniel Borkmann prog = orig_prog; 22851c2a088aSAlexei Starovoitov goto out_addrs; 2286959a7579SDaniel Borkmann } 2287f3c2af7bSAlexei Starovoitov 2288a2c7a983SIngo Molnar /* 2289a2c7a983SIngo Molnar * Before first pass, make a rough estimation of addrs[] 2290a2c7a983SIngo Molnar * each BPF instruction is translated to less than 64 bytes 2291f3c2af7bSAlexei Starovoitov */ 22927c2e988fSAlexei Starovoitov for (proglen = 0, i = 0; i <= prog->len; i++) { 2293f3c2af7bSAlexei Starovoitov proglen += 64; 2294f3c2af7bSAlexei Starovoitov addrs[i] = proglen; 2295f3c2af7bSAlexei Starovoitov } 2296f3c2af7bSAlexei Starovoitov ctx.cleanup_addr = proglen; 22971c2a088aSAlexei Starovoitov skip_init_addrs: 2298f3c2af7bSAlexei Starovoitov 2299a2c7a983SIngo Molnar /* 2300a2c7a983SIngo Molnar * JITed image shrinks with every pass and the loop iterates 2301a2c7a983SIngo Molnar * until the image stops shrinking. Very large BPF programs 23023f7352bfSAlexei Starovoitov * may converge on the last pass. In such case do one more 2303a2c7a983SIngo Molnar * pass to emit the final image. 23043f7352bfSAlexei Starovoitov */ 230593c5aeccSGary Lin for (pass = 0; pass < MAX_PASSES || image; pass++) { 230693c5aeccSGary Lin if (!padding && pass >= PADDING_PASSES) 230793c5aeccSGary Lin padding = true; 230893c5aeccSGary Lin proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding); 2309f3c2af7bSAlexei Starovoitov if (proglen <= 0) { 23103aab8884SDaniel Borkmann out_image: 2311f3c2af7bSAlexei Starovoitov image = NULL; 2312f3c2af7bSAlexei Starovoitov if (header) 2313738cbe72SDaniel Borkmann bpf_jit_binary_free(header); 2314959a7579SDaniel Borkmann prog = orig_prog; 2315959a7579SDaniel Borkmann goto out_addrs; 2316f3c2af7bSAlexei Starovoitov } 23170a14842fSEric Dumazet if (image) { 2318e0ee9c12SAlexei Starovoitov if (proglen != oldproglen) { 2319f3c2af7bSAlexei Starovoitov pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 2320f3c2af7bSAlexei Starovoitov proglen, oldproglen); 23213aab8884SDaniel Borkmann goto out_image; 2322e0ee9c12SAlexei Starovoitov } 23230a14842fSEric Dumazet break; 23240a14842fSEric Dumazet } 23250a14842fSEric Dumazet if (proglen == oldproglen) { 23263dec541bSAlexei Starovoitov /* 23273dec541bSAlexei Starovoitov * The number of entries in extable is the number of BPF_LDX 23283dec541bSAlexei Starovoitov * insns that access kernel memory via "pointer to BTF type". 23293dec541bSAlexei Starovoitov * The verifier changed their opcode from LDX|MEM|size 23303dec541bSAlexei Starovoitov * to LDX|PROBE_MEM|size to make JITing easier. 23313dec541bSAlexei Starovoitov */ 23323dec541bSAlexei Starovoitov u32 align = __alignof__(struct exception_table_entry); 23333dec541bSAlexei Starovoitov u32 extable_size = prog->aux->num_exentries * 23343dec541bSAlexei Starovoitov sizeof(struct exception_table_entry); 23353dec541bSAlexei Starovoitov 23363dec541bSAlexei Starovoitov /* allocate module memory for x86 insns and extable */ 23373dec541bSAlexei Starovoitov header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, 23383dec541bSAlexei Starovoitov &image, align, jit_fill_hole); 2339959a7579SDaniel Borkmann if (!header) { 2340959a7579SDaniel Borkmann prog = orig_prog; 2341959a7579SDaniel Borkmann goto out_addrs; 2342959a7579SDaniel Borkmann } 23433dec541bSAlexei Starovoitov prog->aux->extable = (void *) image + roundup(proglen, align); 23440a14842fSEric Dumazet } 23450a14842fSEric Dumazet oldproglen = proglen; 23466007b080SDaniel Borkmann cond_resched(); 23470a14842fSEric Dumazet } 234879617801SDaniel Borkmann 23490a14842fSEric Dumazet if (bpf_jit_enable > 1) 2350485d6511SDaniel Borkmann bpf_jit_dump(prog->len, proglen, pass + 1, image); 23510a14842fSEric Dumazet 23520a14842fSEric Dumazet if (image) { 23531c2a088aSAlexei Starovoitov if (!prog->is_func || extra_pass) { 2354428d5df1SDaniel Borkmann bpf_tail_call_direct_fixup(prog); 23559d876e79SDaniel Borkmann bpf_jit_binary_lock_ro(header); 23561c2a088aSAlexei Starovoitov } else { 23571c2a088aSAlexei Starovoitov jit_data->addrs = addrs; 23581c2a088aSAlexei Starovoitov jit_data->ctx = ctx; 23591c2a088aSAlexei Starovoitov jit_data->proglen = proglen; 23601c2a088aSAlexei Starovoitov jit_data->image = image; 23611c2a088aSAlexei Starovoitov jit_data->header = header; 23621c2a088aSAlexei Starovoitov } 2363f3c2af7bSAlexei Starovoitov prog->bpf_func = (void *)image; 2364a91263d5SDaniel Borkmann prog->jited = 1; 2365783d28ddSMartin KaFai Lau prog->jited_len = proglen; 23669d5ecb09SDaniel Borkmann } else { 23679d5ecb09SDaniel Borkmann prog = orig_prog; 23680a14842fSEric Dumazet } 2369959a7579SDaniel Borkmann 237039f56ca9SDaniel Borkmann if (!image || !prog->is_func || extra_pass) { 2371c454a46bSMartin KaFai Lau if (image) 23727c2e988fSAlexei Starovoitov bpf_prog_fill_jited_linfo(prog, addrs + 1); 2373959a7579SDaniel Borkmann out_addrs: 2374de920fc6SYonghong Song kvfree(addrs); 23751c2a088aSAlexei Starovoitov kfree(jit_data); 23761c2a088aSAlexei Starovoitov prog->aux->jit_data = NULL; 23771c2a088aSAlexei Starovoitov } 2378959a7579SDaniel Borkmann out: 2379959a7579SDaniel Borkmann if (tmp_blinded) 2380959a7579SDaniel Borkmann bpf_jit_prog_release_other(prog, prog == orig_prog ? 2381959a7579SDaniel Borkmann tmp : orig_prog); 2382d1c55ab5SDaniel Borkmann return prog; 23830a14842fSEric Dumazet } 2384e6ac2450SMartin KaFai Lau 2385e6ac2450SMartin KaFai Lau bool bpf_jit_supports_kfunc_call(void) 2386e6ac2450SMartin KaFai Lau { 2387e6ac2450SMartin KaFai Lau return true; 2388e6ac2450SMartin KaFai Lau } 2389