1 #include <linux/errno.h> 2 #include <linux/kernel.h> 3 #include <linux/sched.h> 4 #include <linux/perf_event.h> 5 #include <linux/bug.h> 6 #include <linux/stddef.h> 7 #include <asm/perf_regs.h> 8 #include <asm/ptrace.h> 9 10 #ifdef CONFIG_X86_32 11 #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX 12 #else 13 #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX 14 #endif 15 16 #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) 17 18 static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { 19 PT_REGS_OFFSET(PERF_REG_X86_AX, ax), 20 PT_REGS_OFFSET(PERF_REG_X86_BX, bx), 21 PT_REGS_OFFSET(PERF_REG_X86_CX, cx), 22 PT_REGS_OFFSET(PERF_REG_X86_DX, dx), 23 PT_REGS_OFFSET(PERF_REG_X86_SI, si), 24 PT_REGS_OFFSET(PERF_REG_X86_DI, di), 25 PT_REGS_OFFSET(PERF_REG_X86_BP, bp), 26 PT_REGS_OFFSET(PERF_REG_X86_SP, sp), 27 PT_REGS_OFFSET(PERF_REG_X86_IP, ip), 28 PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags), 29 PT_REGS_OFFSET(PERF_REG_X86_CS, cs), 30 PT_REGS_OFFSET(PERF_REG_X86_SS, ss), 31 #ifdef CONFIG_X86_32 32 PT_REGS_OFFSET(PERF_REG_X86_DS, ds), 33 PT_REGS_OFFSET(PERF_REG_X86_ES, es), 34 PT_REGS_OFFSET(PERF_REG_X86_FS, fs), 35 PT_REGS_OFFSET(PERF_REG_X86_GS, gs), 36 #else 37 /* 38 * The pt_regs struct does not store 39 * ds, es, fs, gs in 64 bit mode. 40 */ 41 (unsigned int) -1, 42 (unsigned int) -1, 43 (unsigned int) -1, 44 (unsigned int) -1, 45 #endif 46 #ifdef CONFIG_X86_64 47 PT_REGS_OFFSET(PERF_REG_X86_R8, r8), 48 PT_REGS_OFFSET(PERF_REG_X86_R9, r9), 49 PT_REGS_OFFSET(PERF_REG_X86_R10, r10), 50 PT_REGS_OFFSET(PERF_REG_X86_R11, r11), 51 PT_REGS_OFFSET(PERF_REG_X86_R12, r12), 52 PT_REGS_OFFSET(PERF_REG_X86_R13, r13), 53 PT_REGS_OFFSET(PERF_REG_X86_R14, r14), 54 PT_REGS_OFFSET(PERF_REG_X86_R15, r15), 55 #endif 56 }; 57 58 u64 perf_reg_value(struct pt_regs *regs, int idx) 59 { 60 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) 61 return 0; 62 63 return regs_get_register(regs, pt_regs_offset[idx]); 64 } 65 66 #define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL)) 67 68 #ifdef CONFIG_X86_32 69 int perf_reg_validate(u64 mask) 70 { 71 if (!mask || mask & REG_RESERVED) 72 return -EINVAL; 73 74 return 0; 75 } 76 77 u64 perf_reg_abi(struct task_struct *task) 78 { 79 return PERF_SAMPLE_REGS_ABI_32; 80 } 81 82 void perf_get_regs_user(struct perf_regs *regs_user, 83 struct pt_regs *regs, 84 struct pt_regs *regs_user_copy) 85 { 86 regs_user->regs = task_pt_regs(current); 87 regs_user->abi = perf_reg_abi(current); 88 } 89 #else /* CONFIG_X86_64 */ 90 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ 91 (1ULL << PERF_REG_X86_ES) | \ 92 (1ULL << PERF_REG_X86_FS) | \ 93 (1ULL << PERF_REG_X86_GS)) 94 95 int perf_reg_validate(u64 mask) 96 { 97 if (!mask || mask & REG_RESERVED) 98 return -EINVAL; 99 100 if (mask & REG_NOSUPPORT) 101 return -EINVAL; 102 103 return 0; 104 } 105 106 u64 perf_reg_abi(struct task_struct *task) 107 { 108 if (test_tsk_thread_flag(task, TIF_IA32)) 109 return PERF_SAMPLE_REGS_ABI_32; 110 else 111 return PERF_SAMPLE_REGS_ABI_64; 112 } 113 114 void perf_get_regs_user(struct perf_regs *regs_user, 115 struct pt_regs *regs, 116 struct pt_regs *regs_user_copy) 117 { 118 struct pt_regs *user_regs = task_pt_regs(current); 119 120 /* 121 * If we're in an NMI that interrupted task_pt_regs setup, then 122 * we can't sample user regs at all. This check isn't really 123 * sufficient, though, as we could be in an NMI inside an interrupt 124 * that happened during task_pt_regs setup. 125 */ 126 if (regs->sp > (unsigned long)&user_regs->r11 && 127 regs->sp <= (unsigned long)(user_regs + 1)) { 128 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 129 regs_user->regs = NULL; 130 return; 131 } 132 133 /* 134 * RIP, flags, and the argument registers are usually saved. 135 * orig_ax is probably okay, too. 136 */ 137 regs_user_copy->ip = user_regs->ip; 138 regs_user_copy->cx = user_regs->cx; 139 regs_user_copy->dx = user_regs->dx; 140 regs_user_copy->si = user_regs->si; 141 regs_user_copy->di = user_regs->di; 142 regs_user_copy->r8 = user_regs->r8; 143 regs_user_copy->r9 = user_regs->r9; 144 regs_user_copy->r10 = user_regs->r10; 145 regs_user_copy->r11 = user_regs->r11; 146 regs_user_copy->orig_ax = user_regs->orig_ax; 147 regs_user_copy->flags = user_regs->flags; 148 149 /* 150 * Don't even try to report the "rest" regs. 151 */ 152 regs_user_copy->bx = -1; 153 regs_user_copy->bp = -1; 154 regs_user_copy->r12 = -1; 155 regs_user_copy->r13 = -1; 156 regs_user_copy->r14 = -1; 157 regs_user_copy->r15 = -1; 158 159 /* 160 * For this to be at all useful, we need a reasonable guess for 161 * sp and the ABI. Be careful: we're in NMI context, and we're 162 * considering current to be the current task, so we should 163 * be careful not to look at any other percpu variables that might 164 * change during context switches. 165 */ 166 if (IS_ENABLED(CONFIG_IA32_EMULATION) && 167 task_thread_info(current)->status & TS_COMPAT) { 168 /* Easy case: we're in a compat syscall. */ 169 regs_user->abi = PERF_SAMPLE_REGS_ABI_32; 170 regs_user_copy->sp = user_regs->sp; 171 regs_user_copy->cs = user_regs->cs; 172 regs_user_copy->ss = user_regs->ss; 173 } else if (user_regs->orig_ax != -1) { 174 /* 175 * We're probably in a 64-bit syscall. 176 * Warning: this code is severely racy. At least it's better 177 * than just blindly copying user_regs. 178 */ 179 regs_user->abi = PERF_SAMPLE_REGS_ABI_64; 180 regs_user_copy->sp = this_cpu_read(old_rsp); 181 regs_user_copy->cs = __USER_CS; 182 regs_user_copy->ss = __USER_DS; 183 regs_user_copy->cx = -1; /* usually contains garbage */ 184 } else { 185 /* We're probably in an interrupt or exception. */ 186 regs_user->abi = user_64bit_mode(user_regs) ? 187 PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; 188 regs_user_copy->sp = user_regs->sp; 189 regs_user_copy->cs = user_regs->cs; 190 regs_user_copy->ss = user_regs->ss; 191 } 192 193 regs_user->regs = regs_user_copy; 194 } 195 #endif /* CONFIG_X86_32 */ 196