1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 235de5b06SAndy Lutomirski #ifndef _ASM_X86_TEXT_PATCHING_H 335de5b06SAndy Lutomirski #define _ASM_X86_TEXT_PATCHING_H 435de5b06SAndy Lutomirski 535de5b06SAndy Lutomirski #include <linux/types.h> 635de5b06SAndy Lutomirski #include <linux/stddef.h> 735de5b06SAndy Lutomirski #include <asm/ptrace.h> 835de5b06SAndy Lutomirski 935de5b06SAndy Lutomirski struct paravirt_patch_site; 1035de5b06SAndy Lutomirski #ifdef CONFIG_PARAVIRT 1135de5b06SAndy Lutomirski void apply_paravirt(struct paravirt_patch_site *start, 1235de5b06SAndy Lutomirski struct paravirt_patch_site *end); 1335de5b06SAndy Lutomirski #else 1435de5b06SAndy Lutomirski static inline void apply_paravirt(struct paravirt_patch_site *start, 1535de5b06SAndy Lutomirski struct paravirt_patch_site *end) 1635de5b06SAndy Lutomirski {} 1735de5b06SAndy Lutomirski #define __parainstructions NULL 1835de5b06SAndy Lutomirski #define __parainstructions_end NULL 1935de5b06SAndy Lutomirski #endif 2035de5b06SAndy Lutomirski 21c0213b0aSDaniel Bristot de Oliveira /* 22c0213b0aSDaniel Bristot de Oliveira * Currently, the max observed size in the kernel code is 23c0213b0aSDaniel Bristot de Oliveira * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. 24c0213b0aSDaniel Bristot de Oliveira * Raise it if needed. 25c0213b0aSDaniel Bristot de Oliveira */ 26c0213b0aSDaniel Bristot de Oliveira #define POKE_MAX_OPCODE_SIZE 5 27c0213b0aSDaniel Bristot de Oliveira 280a203df5SNadav Amit extern void text_poke_early(void *addr, const void *opcode, size_t len); 2935de5b06SAndy Lutomirski 3035de5b06SAndy Lutomirski /* 3135de5b06SAndy Lutomirski * Clear and restore the kernel write-protection flag on the local CPU. 3235de5b06SAndy Lutomirski * Allows the kernel to edit read-only pages. 3335de5b06SAndy Lutomirski * Side-effect: any interrupt handler running between save and restore will have 3435de5b06SAndy Lutomirski * the ability to write to read-only pages. 3535de5b06SAndy Lutomirski * 3635de5b06SAndy Lutomirski * Warning: 3735de5b06SAndy Lutomirski * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 3835de5b06SAndy Lutomirski * no thread can be preempted in the instructions being modified (no iret to an 3935de5b06SAndy Lutomirski * invalid instruction possible) or if the instructions are changed from a 4035de5b06SAndy Lutomirski * consistent state to another consistent state atomically. 4132b1cbe3SMarco Ammon * On the local CPU you need to be protected against NMI or MCE handlers seeing 4232b1cbe3SMarco Ammon * an inconsistent instruction while you patch. 4335de5b06SAndy Lutomirski */ 4435de5b06SAndy Lutomirski extern void *text_poke(void *addr, const void *opcode, size_t len); 45e836673cSNadav Amit extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); 4635de5b06SAndy Lutomirski extern int poke_int3_handler(struct pt_regs *regs); 47c3d6324fSPeter Zijlstra extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); 48*18cbc8beSPeter Zijlstra 49*18cbc8beSPeter Zijlstra extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); 50*18cbc8beSPeter Zijlstra extern void text_poke_finish(void); 51*18cbc8beSPeter Zijlstra 526fffacb3SPavel Tatashin extern int after_bootmem; 534fc19708SNadav Amit extern __ro_after_init struct mm_struct *poking_mm; 544fc19708SNadav Amit extern __ro_after_init unsigned long poking_addr; 5535de5b06SAndy Lutomirski 56693713cbSSteven Rostedt (VMware) #ifndef CONFIG_UML_X86 574b33dadfSPeter Zijlstra static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 584b33dadfSPeter Zijlstra { 594b33dadfSPeter Zijlstra regs->ip = ip; 604b33dadfSPeter Zijlstra } 614b33dadfSPeter Zijlstra 624b33dadfSPeter Zijlstra #define INT3_INSN_SIZE 1 63c3d6324fSPeter Zijlstra #define INT3_INSN_OPCODE 0xCC 64c3d6324fSPeter Zijlstra 654b33dadfSPeter Zijlstra #define CALL_INSN_SIZE 5 66c3d6324fSPeter Zijlstra #define CALL_INSN_OPCODE 0xE8 67c3d6324fSPeter Zijlstra 68c3d6324fSPeter Zijlstra #define JMP32_INSN_SIZE 5 69c3d6324fSPeter Zijlstra #define JMP32_INSN_OPCODE 0xE9 70c3d6324fSPeter Zijlstra 71c3d6324fSPeter Zijlstra #define JMP8_INSN_SIZE 2 72c3d6324fSPeter Zijlstra #define JMP8_INSN_OPCODE 0xEB 734b33dadfSPeter Zijlstra 744b33dadfSPeter Zijlstra static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) 754b33dadfSPeter Zijlstra { 764b33dadfSPeter Zijlstra /* 774b33dadfSPeter Zijlstra * The int3 handler in entry_64.S adds a gap between the 784b33dadfSPeter Zijlstra * stack where the break point happened, and the saving of 794b33dadfSPeter Zijlstra * pt_regs. We can extend the original stack because of 804b33dadfSPeter Zijlstra * this gap. See the idtentry macro's create_gap option. 818f4a4160SPeter Zijlstra * 828f4a4160SPeter Zijlstra * Similarly entry_32.S will have a gap on the stack for (any) hardware 838f4a4160SPeter Zijlstra * exception and pt_regs; see FIXUP_FRAME. 844b33dadfSPeter Zijlstra */ 854b33dadfSPeter Zijlstra regs->sp -= sizeof(unsigned long); 864b33dadfSPeter Zijlstra *(unsigned long *)regs->sp = val; 874b33dadfSPeter Zijlstra } 884b33dadfSPeter Zijlstra 894b33dadfSPeter Zijlstra static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) 904b33dadfSPeter Zijlstra { 914b33dadfSPeter Zijlstra int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 924b33dadfSPeter Zijlstra int3_emulate_jmp(regs, func); 934b33dadfSPeter Zijlstra } 94693713cbSSteven Rostedt (VMware) #endif /* !CONFIG_UML_X86 */ 954b33dadfSPeter Zijlstra 9635de5b06SAndy Lutomirski #endif /* _ASM_X86_TEXT_PATCHING_H */ 97