1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 235de5b06SAndy Lutomirski #ifndef _ASM_X86_TEXT_PATCHING_H 335de5b06SAndy Lutomirski #define _ASM_X86_TEXT_PATCHING_H 435de5b06SAndy Lutomirski 535de5b06SAndy Lutomirski #include <linux/types.h> 635de5b06SAndy Lutomirski #include <linux/stddef.h> 735de5b06SAndy Lutomirski #include <asm/ptrace.h> 835de5b06SAndy Lutomirski 935de5b06SAndy Lutomirski struct paravirt_patch_site; 1035de5b06SAndy Lutomirski #ifdef CONFIG_PARAVIRT 1135de5b06SAndy Lutomirski void apply_paravirt(struct paravirt_patch_site *start, 1235de5b06SAndy Lutomirski struct paravirt_patch_site *end); 1335de5b06SAndy Lutomirski #else 1435de5b06SAndy Lutomirski static inline void apply_paravirt(struct paravirt_patch_site *start, 1535de5b06SAndy Lutomirski struct paravirt_patch_site *end) 1635de5b06SAndy Lutomirski {} 1735de5b06SAndy Lutomirski #define __parainstructions NULL 1835de5b06SAndy Lutomirski #define __parainstructions_end NULL 1935de5b06SAndy Lutomirski #endif 2035de5b06SAndy Lutomirski 2135de5b06SAndy Lutomirski extern void *text_poke_early(void *addr, const void *opcode, size_t len); 2235de5b06SAndy Lutomirski 2335de5b06SAndy Lutomirski /* 2435de5b06SAndy Lutomirski * Clear and restore the kernel write-protection flag on the local CPU. 2535de5b06SAndy Lutomirski * Allows the kernel to edit read-only pages. 2635de5b06SAndy Lutomirski * Side-effect: any interrupt handler running between save and restore will have 2735de5b06SAndy Lutomirski * the ability to write to read-only pages. 2835de5b06SAndy Lutomirski * 2935de5b06SAndy Lutomirski * Warning: 3035de5b06SAndy Lutomirski * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 3135de5b06SAndy Lutomirski * no thread can be preempted in the instructions being modified (no iret to an 3235de5b06SAndy Lutomirski * invalid instruction possible) or if the instructions are changed from a 3335de5b06SAndy Lutomirski * consistent state to another consistent state atomically. 3435de5b06SAndy Lutomirski * On the local CPU you need to be protected again NMI or MCE handlers seeing an 3535de5b06SAndy Lutomirski * inconsistent instruction while you patch. 3635de5b06SAndy Lutomirski */ 3735de5b06SAndy Lutomirski extern void *text_poke(void *addr, const void *opcode, size_t len); 3835de5b06SAndy Lutomirski extern int poke_int3_handler(struct pt_regs *regs); 3935de5b06SAndy Lutomirski extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); 406fffacb3SPavel Tatashin extern int after_bootmem; 4135de5b06SAndy Lutomirski 42*693713cbSSteven Rostedt (VMware) #ifndef CONFIG_UML_X86 434b33dadfSPeter Zijlstra static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 444b33dadfSPeter Zijlstra { 454b33dadfSPeter Zijlstra regs->ip = ip; 464b33dadfSPeter Zijlstra } 474b33dadfSPeter Zijlstra 484b33dadfSPeter Zijlstra #define INT3_INSN_SIZE 1 494b33dadfSPeter Zijlstra #define CALL_INSN_SIZE 5 504b33dadfSPeter Zijlstra 514b33dadfSPeter Zijlstra #ifdef CONFIG_X86_64 524b33dadfSPeter Zijlstra static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) 534b33dadfSPeter Zijlstra { 544b33dadfSPeter Zijlstra /* 554b33dadfSPeter Zijlstra * The int3 handler in entry_64.S adds a gap between the 564b33dadfSPeter Zijlstra * stack where the break point happened, and the saving of 574b33dadfSPeter Zijlstra * pt_regs. We can extend the original stack because of 584b33dadfSPeter Zijlstra * this gap. See the idtentry macro's create_gap option. 594b33dadfSPeter Zijlstra */ 604b33dadfSPeter Zijlstra regs->sp -= sizeof(unsigned long); 614b33dadfSPeter Zijlstra *(unsigned long *)regs->sp = val; 624b33dadfSPeter Zijlstra } 634b33dadfSPeter Zijlstra 644b33dadfSPeter Zijlstra static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) 654b33dadfSPeter Zijlstra { 664b33dadfSPeter Zijlstra int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 674b33dadfSPeter Zijlstra int3_emulate_jmp(regs, func); 684b33dadfSPeter Zijlstra } 69*693713cbSSteven Rostedt (VMware) #endif /* CONFIG_X86_64 */ 70*693713cbSSteven Rostedt (VMware) #endif /* !CONFIG_UML_X86 */ 714b33dadfSPeter Zijlstra 7235de5b06SAndy Lutomirski #endif /* _ASM_X86_TEXT_PATCHING_H */ 73