1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_TEXT_PATCHING_H 3 #define _ASM_X86_TEXT_PATCHING_H 4 5 #include <linux/types.h> 6 #include <linux/stddef.h> 7 #include <asm/ptrace.h> 8 9 struct paravirt_patch_site; 10 #ifdef CONFIG_PARAVIRT 11 void apply_paravirt(struct paravirt_patch_site *start, 12 struct paravirt_patch_site *end); 13 #else 14 static inline void apply_paravirt(struct paravirt_patch_site *start, 15 struct paravirt_patch_site *end) 16 {} 17 #define __parainstructions NULL 18 #define __parainstructions_end NULL 19 #endif 20 21 /* 22 * Currently, the max observed size in the kernel code is 23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. 24 * Raise it if needed. 25 */ 26 #define POKE_MAX_OPCODE_SIZE 5 27 28 extern void text_poke_early(void *addr, const void *opcode, size_t len); 29 30 /* 31 * Clear and restore the kernel write-protection flag on the local CPU. 32 * Allows the kernel to edit read-only pages. 33 * Side-effect: any interrupt handler running between save and restore will have 34 * the ability to write to read-only pages. 35 * 36 * Warning: 37 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 38 * no thread can be preempted in the instructions being modified (no iret to an 39 * invalid instruction possible) or if the instructions are changed from a 40 * consistent state to another consistent state atomically. 41 * On the local CPU you need to be protected against NMI or MCE handlers seeing 42 * an inconsistent instruction while you patch. 43 */ 44 extern void *text_poke(void *addr, const void *opcode, size_t len); 45 extern void text_poke_sync(void); 46 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); 47 extern void *text_poke_copy(void *addr, const void *opcode, size_t len); 48 extern int poke_int3_handler(struct pt_regs *regs); 49 extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); 50 51 extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); 52 extern void text_poke_finish(void); 53 54 #define INT3_INSN_SIZE 1 55 #define INT3_INSN_OPCODE 0xCC 56 57 #define RET_INSN_SIZE 1 58 #define RET_INSN_OPCODE 0xC3 59 60 #define CALL_INSN_SIZE 5 61 #define CALL_INSN_OPCODE 0xE8 62 63 #define JMP32_INSN_SIZE 5 64 #define JMP32_INSN_OPCODE 0xE9 65 66 #define JMP8_INSN_SIZE 2 67 #define JMP8_INSN_OPCODE 0xEB 68 69 #define DISP32_SIZE 4 70 71 static __always_inline int text_opcode_size(u8 opcode) 72 { 73 int size = 0; 74 75 #define __CASE(insn) \ 76 case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break 77 78 switch(opcode) { 79 __CASE(INT3); 80 __CASE(RET); 81 __CASE(CALL); 82 __CASE(JMP32); 83 __CASE(JMP8); 84 } 85 86 #undef __CASE 87 88 return size; 89 } 90 91 union text_poke_insn { 92 u8 text[POKE_MAX_OPCODE_SIZE]; 93 struct { 94 u8 opcode; 95 s32 disp; 96 } __attribute__((packed)); 97 }; 98 99 static __always_inline 100 void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size) 101 { 102 union text_poke_insn *insn = buf; 103 104 BUG_ON(size < text_opcode_size(opcode)); 105 106 /* 107 * Hide the addresses to avoid the compiler folding in constants when 108 * referencing code, these can mess up annotations like 109 * ANNOTATE_NOENDBR. 110 */ 111 OPTIMIZER_HIDE_VAR(insn); 112 OPTIMIZER_HIDE_VAR(addr); 113 OPTIMIZER_HIDE_VAR(dest); 114 115 insn->opcode = opcode; 116 117 if (size > 1) { 118 insn->disp = (long)dest - (long)(addr + size); 119 if (size == 2) { 120 /* 121 * Ensure that for JMP8 the displacement 122 * actually fits the signed byte. 123 */ 124 BUG_ON((insn->disp >> 31) != (insn->disp >> 7)); 125 } 126 } 127 } 128 129 static __always_inline 130 void *text_gen_insn(u8 opcode, const void *addr, const void *dest) 131 { 132 static union text_poke_insn insn; /* per instance */ 133 __text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode)); 134 return &insn.text; 135 } 136 137 extern int after_bootmem; 138 extern __ro_after_init struct mm_struct *poking_mm; 139 extern __ro_after_init unsigned long poking_addr; 140 141 #ifndef CONFIG_UML_X86 142 static __always_inline 143 void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 144 { 145 regs->ip = ip; 146 } 147 148 static __always_inline 149 void int3_emulate_push(struct pt_regs *regs, unsigned long val) 150 { 151 /* 152 * The int3 handler in entry_64.S adds a gap between the 153 * stack where the break point happened, and the saving of 154 * pt_regs. We can extend the original stack because of 155 * this gap. See the idtentry macro's create_gap option. 156 * 157 * Similarly entry_32.S will have a gap on the stack for (any) hardware 158 * exception and pt_regs; see FIXUP_FRAME. 159 */ 160 regs->sp -= sizeof(unsigned long); 161 *(unsigned long *)regs->sp = val; 162 } 163 164 static __always_inline 165 unsigned long int3_emulate_pop(struct pt_regs *regs) 166 { 167 unsigned long val = *(unsigned long *)regs->sp; 168 regs->sp += sizeof(unsigned long); 169 return val; 170 } 171 172 static __always_inline 173 void int3_emulate_call(struct pt_regs *regs, unsigned long func) 174 { 175 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 176 int3_emulate_jmp(regs, func); 177 } 178 179 static __always_inline 180 void int3_emulate_ret(struct pt_regs *regs) 181 { 182 unsigned long ip = int3_emulate_pop(regs); 183 int3_emulate_jmp(regs, ip); 184 } 185 #endif /* !CONFIG_UML_X86 */ 186 187 #endif /* _ASM_X86_TEXT_PATCHING_H */ 188