1fa1e03eaSRoland McGrath /* 2fa1e03eaSRoland McGrath * x86 single-step support code, common to 32-bit and 64-bit. 3fa1e03eaSRoland McGrath */ 4fa1e03eaSRoland McGrath #include <linux/sched.h> 5fa1e03eaSRoland McGrath #include <linux/mm.h> 6fa1e03eaSRoland McGrath #include <linux/ptrace.h> 7254e0a6bSAkinobu Mita #include <asm/desc.h> 8*37868fe1SAndy Lutomirski #include <asm/mmu_context.h> 9fa1e03eaSRoland McGrath 1037cd9cf3SHarvey Harrison unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 11fa1e03eaSRoland McGrath { 12fa1e03eaSRoland McGrath unsigned long addr, seg; 13fa1e03eaSRoland McGrath 1465ea5b03SH. Peter Anvin addr = regs->ip; 15fa1e03eaSRoland McGrath seg = regs->cs & 0xffff; 1665ea5b03SH. Peter Anvin if (v8086_mode(regs)) { 177122ec81SRoland McGrath addr = (addr & 0xffff) + (seg << 4); 187122ec81SRoland McGrath return addr; 197122ec81SRoland McGrath } 20fa1e03eaSRoland McGrath 21fa1e03eaSRoland McGrath /* 22fa1e03eaSRoland McGrath * We'll assume that the code segments in the GDT 23fa1e03eaSRoland McGrath * are all zero-based. That is largely true: the 24fa1e03eaSRoland McGrath * TLS segments are used for data, and the PNPBIOS 25fa1e03eaSRoland McGrath * and APM bios ones we just ignore here. 26fa1e03eaSRoland McGrath */ 273f80c1adSRoland McGrath if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { 28254e0a6bSAkinobu Mita struct desc_struct *desc; 29fa1e03eaSRoland McGrath unsigned long base; 30fa1e03eaSRoland McGrath 31fa1e03eaSRoland McGrath seg &= ~7UL; 32fa1e03eaSRoland McGrath 33fa1e03eaSRoland McGrath mutex_lock(&child->mm->context.lock); 34*37868fe1SAndy Lutomirski if (unlikely(!child->mm->context.ldt || 35*37868fe1SAndy Lutomirski (seg >> 3) >= child->mm->context.ldt->size)) 36fa1e03eaSRoland McGrath addr = -1L; /* bogus selector, access would fault */ 37fa1e03eaSRoland McGrath else { 38*37868fe1SAndy Lutomirski desc = &child->mm->context.ldt->entries[seg]; 39254e0a6bSAkinobu Mita base = get_desc_base(desc); 40fa1e03eaSRoland McGrath 41fa1e03eaSRoland McGrath /* 16-bit code segment? */ 42254e0a6bSAkinobu Mita if (!desc->d) 43fa1e03eaSRoland McGrath addr &= 0xffff; 44fa1e03eaSRoland McGrath addr += base; 45fa1e03eaSRoland McGrath } 46fa1e03eaSRoland McGrath mutex_unlock(&child->mm->context.lock); 47fa1e03eaSRoland McGrath } 48fa1e03eaSRoland McGrath 49fa1e03eaSRoland McGrath return addr; 50fa1e03eaSRoland McGrath } 51fa1e03eaSRoland McGrath 52fa1e03eaSRoland McGrath static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) 53fa1e03eaSRoland McGrath { 54fa1e03eaSRoland McGrath int i, copied; 55fa1e03eaSRoland McGrath unsigned char opcode[15]; 5637cd9cf3SHarvey Harrison unsigned long addr = convert_ip_to_linear(child, regs); 57fa1e03eaSRoland McGrath 58fa1e03eaSRoland McGrath copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); 59fa1e03eaSRoland McGrath for (i = 0; i < copied; i++) { 60fa1e03eaSRoland McGrath switch (opcode[i]) { 61fa1e03eaSRoland McGrath /* popf and iret */ 62fa1e03eaSRoland McGrath case 0x9d: case 0xcf: 63fa1e03eaSRoland McGrath return 1; 64fa1e03eaSRoland McGrath 65fa1e03eaSRoland McGrath /* CHECKME: 64 65 */ 66fa1e03eaSRoland McGrath 67fa1e03eaSRoland McGrath /* opcode and address size prefixes */ 68fa1e03eaSRoland McGrath case 0x66: case 0x67: 69fa1e03eaSRoland McGrath continue; 70fa1e03eaSRoland McGrath /* irrelevant prefixes (segment overrides and repeats) */ 71fa1e03eaSRoland McGrath case 0x26: case 0x2e: 72fa1e03eaSRoland McGrath case 0x36: case 0x3e: 73fa1e03eaSRoland McGrath case 0x64: case 0x65: 745f76cb1fSRoland McGrath case 0xf0: case 0xf2: case 0xf3: 75fa1e03eaSRoland McGrath continue; 76fa1e03eaSRoland McGrath 777122ec81SRoland McGrath #ifdef CONFIG_X86_64 78fa1e03eaSRoland McGrath case 0x40 ... 0x4f: 79318f5a2aSAndy Lutomirski if (!user_64bit_mode(regs)) 80fa1e03eaSRoland McGrath /* 32-bit mode: register increment */ 81fa1e03eaSRoland McGrath return 0; 82fa1e03eaSRoland McGrath /* 64-bit mode: REX prefix */ 83fa1e03eaSRoland McGrath continue; 847122ec81SRoland McGrath #endif 85fa1e03eaSRoland McGrath 86fa1e03eaSRoland McGrath /* CHECKME: f2, f3 */ 87fa1e03eaSRoland McGrath 88fa1e03eaSRoland McGrath /* 89fa1e03eaSRoland McGrath * pushf: NOTE! We should probably not let 90fa1e03eaSRoland McGrath * the user see the TF bit being set. But 91fa1e03eaSRoland McGrath * it's more pain than it's worth to avoid 92fa1e03eaSRoland McGrath * it, and a debugger could emulate this 93fa1e03eaSRoland McGrath * all in user space if it _really_ cares. 94fa1e03eaSRoland McGrath */ 95fa1e03eaSRoland McGrath case 0x9c: 96fa1e03eaSRoland McGrath default: 97fa1e03eaSRoland McGrath return 0; 98fa1e03eaSRoland McGrath } 99fa1e03eaSRoland McGrath } 100fa1e03eaSRoland McGrath return 0; 101fa1e03eaSRoland McGrath } 102fa1e03eaSRoland McGrath 10310faa81eSRoland McGrath /* 10410faa81eSRoland McGrath * Enable single-stepping. Return nonzero if user mode is not using TF itself. 10510faa81eSRoland McGrath */ 10610faa81eSRoland McGrath static int enable_single_step(struct task_struct *child) 107fa1e03eaSRoland McGrath { 108fa1e03eaSRoland McGrath struct pt_regs *regs = task_pt_regs(child); 1096718d0d6SRoland McGrath unsigned long oflags; 110fa1e03eaSRoland McGrath 111fa1e03eaSRoland McGrath /* 112380fdd75SRoland McGrath * If we stepped into a sysenter/syscall insn, it trapped in 113380fdd75SRoland McGrath * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 114380fdd75SRoland McGrath * If user-mode had set TF itself, then it's still clear from 115380fdd75SRoland McGrath * do_debug() and we need to set it again to restore the user 116380fdd75SRoland McGrath * state so we don't wrongly set TIF_FORCED_TF below. 117380fdd75SRoland McGrath * If enable_single_step() was used last and that is what 118380fdd75SRoland McGrath * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are 119380fdd75SRoland McGrath * already set and our bookkeeping is fine. 120380fdd75SRoland McGrath */ 121380fdd75SRoland McGrath if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) 122380fdd75SRoland McGrath regs->flags |= X86_EFLAGS_TF; 123380fdd75SRoland McGrath 124380fdd75SRoland McGrath /* 125fa1e03eaSRoland McGrath * Always set TIF_SINGLESTEP - this guarantees that 126fa1e03eaSRoland McGrath * we single-step system calls etc.. This will also 127fa1e03eaSRoland McGrath * cause us to set TF when returning to user mode. 128fa1e03eaSRoland McGrath */ 129fa1e03eaSRoland McGrath set_tsk_thread_flag(child, TIF_SINGLESTEP); 130fa1e03eaSRoland McGrath 1316718d0d6SRoland McGrath oflags = regs->flags; 132fa1e03eaSRoland McGrath 133fa1e03eaSRoland McGrath /* Set TF on the kernel stack.. */ 13465ea5b03SH. Peter Anvin regs->flags |= X86_EFLAGS_TF; 135fa1e03eaSRoland McGrath 136fa1e03eaSRoland McGrath /* 137fa1e03eaSRoland McGrath * ..but if TF is changed by the instruction we will trace, 138fa1e03eaSRoland McGrath * don't mark it as being "us" that set it, so that we 139fa1e03eaSRoland McGrath * won't clear it by hand later. 1406718d0d6SRoland McGrath * 1416718d0d6SRoland McGrath * Note that if we don't actually execute the popf because 1426718d0d6SRoland McGrath * of a signal arriving right now or suchlike, we will lose 1436718d0d6SRoland McGrath * track of the fact that it really was "us" that set it. 144fa1e03eaSRoland McGrath */ 1456718d0d6SRoland McGrath if (is_setting_trap_flag(child, regs)) { 1466718d0d6SRoland McGrath clear_tsk_thread_flag(child, TIF_FORCED_TF); 14710faa81eSRoland McGrath return 0; 1486718d0d6SRoland McGrath } 1496718d0d6SRoland McGrath 1506718d0d6SRoland McGrath /* 1516718d0d6SRoland McGrath * If TF was already set, check whether it was us who set it. 1526718d0d6SRoland McGrath * If not, we should never attempt a block step. 1536718d0d6SRoland McGrath */ 1546718d0d6SRoland McGrath if (oflags & X86_EFLAGS_TF) 1556718d0d6SRoland McGrath return test_tsk_thread_flag(child, TIF_FORCED_TF); 156fa1e03eaSRoland McGrath 157e1f28773SRoland McGrath set_tsk_thread_flag(child, TIF_FORCED_TF); 15810faa81eSRoland McGrath 15910faa81eSRoland McGrath return 1; 16010faa81eSRoland McGrath } 16110faa81eSRoland McGrath 1629bd1190aSOleg Nesterov void set_task_blockstep(struct task_struct *task, bool on) 163848e8f5fSOleg Nesterov { 164848e8f5fSOleg Nesterov unsigned long debugctl; 165848e8f5fSOleg Nesterov 16695cf00faSOleg Nesterov /* 16795cf00faSOleg Nesterov * Ensure irq/preemption can't change debugctl in between. 16895cf00faSOleg Nesterov * Note also that both TIF_BLOCKSTEP and debugctl should 16995cf00faSOleg Nesterov * be changed atomically wrt preemption. 1709899d11fSOleg Nesterov * 1719899d11fSOleg Nesterov * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if 1729899d11fSOleg Nesterov * task is current or it can't be running, otherwise we can race 1739899d11fSOleg Nesterov * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but 1749899d11fSOleg Nesterov * PTRACE_KILL is not safe. 17595cf00faSOleg Nesterov */ 17695cf00faSOleg Nesterov local_irq_disable(); 177848e8f5fSOleg Nesterov debugctl = get_debugctlmsr(); 178848e8f5fSOleg Nesterov if (on) { 179848e8f5fSOleg Nesterov debugctl |= DEBUGCTLMSR_BTF; 180848e8f5fSOleg Nesterov set_tsk_thread_flag(task, TIF_BLOCKSTEP); 181848e8f5fSOleg Nesterov } else { 182848e8f5fSOleg Nesterov debugctl &= ~DEBUGCTLMSR_BTF; 183848e8f5fSOleg Nesterov clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 184848e8f5fSOleg Nesterov } 18595cf00faSOleg Nesterov if (task == current) 186848e8f5fSOleg Nesterov update_debugctlmsr(debugctl); 18795cf00faSOleg Nesterov local_irq_enable(); 188848e8f5fSOleg Nesterov } 189848e8f5fSOleg Nesterov 19010faa81eSRoland McGrath /* 19110faa81eSRoland McGrath * Enable single or block step. 19210faa81eSRoland McGrath */ 19310faa81eSRoland McGrath static void enable_step(struct task_struct *child, bool block) 19410faa81eSRoland McGrath { 19510faa81eSRoland McGrath /* 19610faa81eSRoland McGrath * Make sure block stepping (BTF) is not enabled unless it should be. 19710faa81eSRoland McGrath * Note that we don't try to worry about any is_setting_trap_flag() 19810faa81eSRoland McGrath * instructions after the first when using block stepping. 19910faa81eSRoland McGrath * So no one should try to use debugger block stepping in a program 20010faa81eSRoland McGrath * that uses user-mode single stepping itself. 20110faa81eSRoland McGrath */ 202848e8f5fSOleg Nesterov if (enable_single_step(child) && block) 203848e8f5fSOleg Nesterov set_task_blockstep(child, true); 204848e8f5fSOleg Nesterov else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) 205848e8f5fSOleg Nesterov set_task_blockstep(child, false); 20610faa81eSRoland McGrath } 20710faa81eSRoland McGrath 20810faa81eSRoland McGrath void user_enable_single_step(struct task_struct *child) 20910faa81eSRoland McGrath { 21010faa81eSRoland McGrath enable_step(child, 0); 21110faa81eSRoland McGrath } 21210faa81eSRoland McGrath 21310faa81eSRoland McGrath void user_enable_block_step(struct task_struct *child) 21410faa81eSRoland McGrath { 21510faa81eSRoland McGrath enable_step(child, 1); 216fa1e03eaSRoland McGrath } 217fa1e03eaSRoland McGrath 218fa1e03eaSRoland McGrath void user_disable_single_step(struct task_struct *child) 219fa1e03eaSRoland McGrath { 22010faa81eSRoland McGrath /* 22110faa81eSRoland McGrath * Make sure block stepping (BTF) is disabled. 22210faa81eSRoland McGrath */ 223848e8f5fSOleg Nesterov if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) 224848e8f5fSOleg Nesterov set_task_blockstep(child, false); 22510faa81eSRoland McGrath 226fa1e03eaSRoland McGrath /* Always clear TIF_SINGLESTEP... */ 227fa1e03eaSRoland McGrath clear_tsk_thread_flag(child, TIF_SINGLESTEP); 228fa1e03eaSRoland McGrath 229fa1e03eaSRoland McGrath /* But touch TF only if it was set by us.. */ 230e1f28773SRoland McGrath if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) 23165ea5b03SH. Peter Anvin task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; 232fa1e03eaSRoland McGrath } 233