1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 235de5b06SAndy Lutomirski #ifndef _ASM_X86_TEXT_PATCHING_H 335de5b06SAndy Lutomirski #define _ASM_X86_TEXT_PATCHING_H 435de5b06SAndy Lutomirski 535de5b06SAndy Lutomirski #include <linux/types.h> 635de5b06SAndy Lutomirski #include <linux/stddef.h> 735de5b06SAndy Lutomirski #include <asm/ptrace.h> 835de5b06SAndy Lutomirski 935de5b06SAndy Lutomirski struct paravirt_patch_site; 1035de5b06SAndy Lutomirski #ifdef CONFIG_PARAVIRT 1135de5b06SAndy Lutomirski void apply_paravirt(struct paravirt_patch_site *start, 1235de5b06SAndy Lutomirski struct paravirt_patch_site *end); 1335de5b06SAndy Lutomirski #else 1435de5b06SAndy Lutomirski static inline void apply_paravirt(struct paravirt_patch_site *start, 1535de5b06SAndy Lutomirski struct paravirt_patch_site *end) 1635de5b06SAndy Lutomirski {} 1735de5b06SAndy Lutomirski #define __parainstructions NULL 1835de5b06SAndy Lutomirski #define __parainstructions_end NULL 1935de5b06SAndy Lutomirski #endif 2035de5b06SAndy Lutomirski 21c0213b0aSDaniel Bristot de Oliveira /* 22c0213b0aSDaniel Bristot de Oliveira * Currently, the max observed size in the kernel code is 23c0213b0aSDaniel Bristot de Oliveira * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. 24c0213b0aSDaniel Bristot de Oliveira * Raise it if needed. 25c0213b0aSDaniel Bristot de Oliveira */ 26c0213b0aSDaniel Bristot de Oliveira #define POKE_MAX_OPCODE_SIZE 5 27c0213b0aSDaniel Bristot de Oliveira 280a203df5SNadav Amit extern void text_poke_early(void *addr, const void *opcode, size_t len); 2935de5b06SAndy Lutomirski 3035de5b06SAndy Lutomirski /* 3135de5b06SAndy Lutomirski * Clear and restore the kernel write-protection flag on the local CPU. 3235de5b06SAndy Lutomirski * Allows the kernel to edit read-only pages. 3335de5b06SAndy Lutomirski * Side-effect: any interrupt handler running between save and restore will have 3435de5b06SAndy Lutomirski * the ability to write to read-only pages. 3535de5b06SAndy Lutomirski * 3635de5b06SAndy Lutomirski * Warning: 3735de5b06SAndy Lutomirski * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 3835de5b06SAndy Lutomirski * no thread can be preempted in the instructions being modified (no iret to an 3935de5b06SAndy Lutomirski * invalid instruction possible) or if the instructions are changed from a 4035de5b06SAndy Lutomirski * consistent state to another consistent state atomically. 4132b1cbe3SMarco Ammon * On the local CPU you need to be protected against NMI or MCE handlers seeing 4232b1cbe3SMarco Ammon * an inconsistent instruction while you patch. 4335de5b06SAndy Lutomirski */ 4435de5b06SAndy Lutomirski extern void *text_poke(void *addr, const void *opcode, size_t len); 45e836673cSNadav Amit extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); 4635de5b06SAndy Lutomirski extern int poke_int3_handler(struct pt_regs *regs); 47c3d6324fSPeter Zijlstra extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); 4818cbc8beSPeter Zijlstra 4918cbc8beSPeter Zijlstra extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); 5018cbc8beSPeter Zijlstra extern void text_poke_finish(void); 5118cbc8beSPeter Zijlstra 524b33dadfSPeter Zijlstra #define INT3_INSN_SIZE 1 53c3d6324fSPeter Zijlstra #define INT3_INSN_OPCODE 0xCC 54c3d6324fSPeter Zijlstra 554b33dadfSPeter Zijlstra #define CALL_INSN_SIZE 5 56c3d6324fSPeter Zijlstra #define CALL_INSN_OPCODE 0xE8 57c3d6324fSPeter Zijlstra 58c3d6324fSPeter Zijlstra #define JMP32_INSN_SIZE 5 59c3d6324fSPeter Zijlstra #define JMP32_INSN_OPCODE 0xE9 60c3d6324fSPeter Zijlstra 61c3d6324fSPeter Zijlstra #define JMP8_INSN_SIZE 2 62c3d6324fSPeter Zijlstra #define JMP8_INSN_OPCODE 0xEB 634b33dadfSPeter Zijlstra 64254d2c04SPeter Zijlstra static inline int text_opcode_size(u8 opcode) 65254d2c04SPeter Zijlstra { 66254d2c04SPeter Zijlstra int size = 0; 67254d2c04SPeter Zijlstra 68254d2c04SPeter Zijlstra #define __CASE(insn) \ 69254d2c04SPeter Zijlstra case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break 70254d2c04SPeter Zijlstra 71254d2c04SPeter Zijlstra switch(opcode) { 72254d2c04SPeter Zijlstra __CASE(INT3); 73254d2c04SPeter Zijlstra __CASE(CALL); 74254d2c04SPeter Zijlstra __CASE(JMP32); 75254d2c04SPeter Zijlstra __CASE(JMP8); 76254d2c04SPeter Zijlstra } 77254d2c04SPeter Zijlstra 78254d2c04SPeter Zijlstra #undef __CASE 79254d2c04SPeter Zijlstra 80254d2c04SPeter Zijlstra return size; 81254d2c04SPeter Zijlstra } 82254d2c04SPeter Zijlstra 83*67c1d4a2SPeter Zijlstra union text_poke_insn { 84*67c1d4a2SPeter Zijlstra u8 text[POKE_MAX_OPCODE_SIZE]; 85*67c1d4a2SPeter Zijlstra struct { 86*67c1d4a2SPeter Zijlstra u8 opcode; 87*67c1d4a2SPeter Zijlstra s32 disp; 88*67c1d4a2SPeter Zijlstra } __attribute__((packed)); 89*67c1d4a2SPeter Zijlstra }; 90*67c1d4a2SPeter Zijlstra 91*67c1d4a2SPeter Zijlstra static __always_inline 92*67c1d4a2SPeter Zijlstra void *text_gen_insn(u8 opcode, const void *addr, const void *dest) 93*67c1d4a2SPeter Zijlstra { 94*67c1d4a2SPeter Zijlstra static union text_poke_insn insn; /* per instance */ 95*67c1d4a2SPeter Zijlstra int size = text_opcode_size(opcode); 96*67c1d4a2SPeter Zijlstra 97*67c1d4a2SPeter Zijlstra insn.opcode = opcode; 98*67c1d4a2SPeter Zijlstra 99*67c1d4a2SPeter Zijlstra if (size > 1) { 100*67c1d4a2SPeter Zijlstra insn.disp = (long)dest - (long)(addr + size); 101*67c1d4a2SPeter Zijlstra if (size == 2) { 102*67c1d4a2SPeter Zijlstra /* 103*67c1d4a2SPeter Zijlstra * Ensure that for JMP9 the displacement 104*67c1d4a2SPeter Zijlstra * actually fits the signed byte. 105*67c1d4a2SPeter Zijlstra */ 106*67c1d4a2SPeter Zijlstra BUG_ON((insn.disp >> 31) != (insn.disp >> 7)); 107*67c1d4a2SPeter Zijlstra } 108*67c1d4a2SPeter Zijlstra } 109*67c1d4a2SPeter Zijlstra 110*67c1d4a2SPeter Zijlstra return &insn.text; 111*67c1d4a2SPeter Zijlstra } 112254d2c04SPeter Zijlstra 113254d2c04SPeter Zijlstra extern int after_bootmem; 114254d2c04SPeter Zijlstra extern __ro_after_init struct mm_struct *poking_mm; 115254d2c04SPeter Zijlstra extern __ro_after_init unsigned long poking_addr; 116254d2c04SPeter Zijlstra 117254d2c04SPeter Zijlstra #ifndef CONFIG_UML_X86 118254d2c04SPeter Zijlstra static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 119254d2c04SPeter Zijlstra { 120254d2c04SPeter Zijlstra regs->ip = ip; 121254d2c04SPeter Zijlstra } 122254d2c04SPeter Zijlstra 1234b33dadfSPeter Zijlstra static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) 1244b33dadfSPeter Zijlstra { 1254b33dadfSPeter Zijlstra /* 1264b33dadfSPeter Zijlstra * The int3 handler in entry_64.S adds a gap between the 1274b33dadfSPeter Zijlstra * stack where the break point happened, and the saving of 1284b33dadfSPeter Zijlstra * pt_regs. We can extend the original stack because of 1294b33dadfSPeter Zijlstra * this gap. See the idtentry macro's create_gap option. 1308f4a4160SPeter Zijlstra * 1318f4a4160SPeter Zijlstra * Similarly entry_32.S will have a gap on the stack for (any) hardware 1328f4a4160SPeter Zijlstra * exception and pt_regs; see FIXUP_FRAME. 1334b33dadfSPeter Zijlstra */ 1344b33dadfSPeter Zijlstra regs->sp -= sizeof(unsigned long); 1354b33dadfSPeter Zijlstra *(unsigned long *)regs->sp = val; 1364b33dadfSPeter Zijlstra } 1374b33dadfSPeter Zijlstra 1384b33dadfSPeter Zijlstra static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) 1394b33dadfSPeter Zijlstra { 1404b33dadfSPeter Zijlstra int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 1414b33dadfSPeter Zijlstra int3_emulate_jmp(regs, func); 1424b33dadfSPeter Zijlstra } 143693713cbSSteven Rostedt (VMware) #endif /* !CONFIG_UML_X86 */ 1444b33dadfSPeter Zijlstra 14535de5b06SAndy Lutomirski #endif /* _ASM_X86_TEXT_PATCHING_H */ 146