1 /* 2 * Xtensa hardware breakpoints/watchpoints handling functions 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2016 Cadence Design Systems Inc. 9 */ 10 11 #include <linux/hw_breakpoint.h> 12 #include <linux/log2.h> 13 #include <linux/percpu.h> 14 #include <linux/perf_event.h> 15 #include <variant/core.h> 16 17 /* Breakpoint currently in use for each IBREAKA. */ 18 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]); 19 20 /* Watchpoint currently in use for each DBREAKA. */ 21 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]); 22 23 int hw_breakpoint_slots(int type) 24 { 25 switch (type) { 26 case TYPE_INST: 27 return XCHAL_NUM_IBREAK; 28 case TYPE_DATA: 29 return XCHAL_NUM_DBREAK; 30 default: 31 pr_warn("unknown slot type: %d\n", type); 32 return 0; 33 } 34 } 35 36 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 37 { 38 unsigned int len; 39 unsigned long va; 40 41 va = hw->address; 42 len = hw->len; 43 44 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 45 } 46 47 /* 48 * Construct an arch_hw_breakpoint from a perf_event. 49 */ 50 int hw_breakpoint_arch_parse(struct perf_event *bp, 51 const struct perf_event_attr *attr, 52 struct arch_hw_breakpoint *hw) 53 { 54 /* Type */ 55 switch (attr->bp_type) { 56 case HW_BREAKPOINT_X: 57 hw->type = XTENSA_BREAKPOINT_EXECUTE; 58 break; 59 case HW_BREAKPOINT_R: 60 hw->type = XTENSA_BREAKPOINT_LOAD; 61 break; 62 case HW_BREAKPOINT_W: 63 hw->type = XTENSA_BREAKPOINT_STORE; 64 break; 65 case HW_BREAKPOINT_RW: 66 hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; 67 break; 68 default: 69 return -EINVAL; 70 } 71 72 /* Len */ 73 hw->len = attr->bp_len; 74 if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len)) 75 return -EINVAL; 76 77 /* Address */ 78 hw->address = attr->bp_addr; 79 if (hw->address & (hw->len - 1)) 80 return -EINVAL; 81 82 return 0; 83 } 84 85 int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 86 unsigned long val, void *data) 87 { 88 return NOTIFY_DONE; 89 } 90 91 static void xtensa_wsr(unsigned long v, u8 sr) 92 { 93 /* We don't have indexed wsr and creating instruction dynamically 94 * doesn't seem worth it given how small XCHAL_NUM_IBREAK and 95 * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here 96 * the switch below needs to be extended. 97 */ 98 BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2); 99 BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2); 100 101 switch (sr) { 102 #if XCHAL_NUM_IBREAK > 0 103 case SREG_IBREAKA + 0: 104 WSR(v, SREG_IBREAKA + 0); 105 break; 106 #endif 107 #if XCHAL_NUM_IBREAK > 1 108 case SREG_IBREAKA + 1: 109 WSR(v, SREG_IBREAKA + 1); 110 break; 111 #endif 112 113 #if XCHAL_NUM_DBREAK > 0 114 case SREG_DBREAKA + 0: 115 WSR(v, SREG_DBREAKA + 0); 116 break; 117 case SREG_DBREAKC + 0: 118 WSR(v, SREG_DBREAKC + 0); 119 break; 120 #endif 121 #if XCHAL_NUM_DBREAK > 1 122 case SREG_DBREAKA + 1: 123 WSR(v, SREG_DBREAKA + 1); 124 break; 125 126 case SREG_DBREAKC + 1: 127 WSR(v, SREG_DBREAKC + 1); 128 break; 129 #endif 130 } 131 } 132 133 static int alloc_slot(struct perf_event **slot, size_t n, 134 struct perf_event *bp) 135 { 136 size_t i; 137 138 for (i = 0; i < n; ++i) { 139 if (!slot[i]) { 140 slot[i] = bp; 141 return i; 142 } 143 } 144 return -EBUSY; 145 } 146 147 static void set_ibreak_regs(int reg, struct perf_event *bp) 148 { 149 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 150 unsigned long ibreakenable; 151 152 xtensa_wsr(info->address, SREG_IBREAKA + reg); 153 RSR(ibreakenable, SREG_IBREAKENABLE); 154 WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE); 155 } 156 157 static void set_dbreak_regs(int reg, struct perf_event *bp) 158 { 159 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 160 unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len; 161 162 if (info->type & XTENSA_BREAKPOINT_LOAD) 163 dbreakc |= DBREAKC_LOAD_MASK; 164 if (info->type & XTENSA_BREAKPOINT_STORE) 165 dbreakc |= DBREAKC_STOR_MASK; 166 167 xtensa_wsr(info->address, SREG_DBREAKA + reg); 168 xtensa_wsr(dbreakc, SREG_DBREAKC + reg); 169 } 170 171 int arch_install_hw_breakpoint(struct perf_event *bp) 172 { 173 int i; 174 175 if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) { 176 /* Breakpoint */ 177 i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); 178 if (i < 0) 179 return i; 180 set_ibreak_regs(i, bp); 181 182 } else { 183 /* Watchpoint */ 184 i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); 185 if (i < 0) 186 return i; 187 set_dbreak_regs(i, bp); 188 } 189 return 0; 190 } 191 192 static int free_slot(struct perf_event **slot, size_t n, 193 struct perf_event *bp) 194 { 195 size_t i; 196 197 for (i = 0; i < n; ++i) { 198 if (slot[i] == bp) { 199 slot[i] = NULL; 200 return i; 201 } 202 } 203 return -EBUSY; 204 } 205 206 void arch_uninstall_hw_breakpoint(struct perf_event *bp) 207 { 208 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 209 int i; 210 211 if (info->type == XTENSA_BREAKPOINT_EXECUTE) { 212 unsigned long ibreakenable; 213 214 /* Breakpoint */ 215 i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); 216 if (i >= 0) { 217 RSR(ibreakenable, SREG_IBREAKENABLE); 218 WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE); 219 } 220 } else { 221 /* Watchpoint */ 222 i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); 223 if (i >= 0) 224 xtensa_wsr(0, SREG_DBREAKC + i); 225 } 226 } 227 228 void hw_breakpoint_pmu_read(struct perf_event *bp) 229 { 230 } 231 232 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 233 { 234 int i; 235 struct thread_struct *t = &tsk->thread; 236 237 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) { 238 if (t->ptrace_bp[i]) { 239 unregister_hw_breakpoint(t->ptrace_bp[i]); 240 t->ptrace_bp[i] = NULL; 241 } 242 } 243 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) { 244 if (t->ptrace_wp[i]) { 245 unregister_hw_breakpoint(t->ptrace_wp[i]); 246 t->ptrace_wp[i] = NULL; 247 } 248 } 249 } 250 251 /* 252 * Set ptrace breakpoint pointers to zero for this task. 253 * This is required in order to prevent child processes from unregistering 254 * breakpoints held by their parent. 255 */ 256 void clear_ptrace_hw_breakpoint(struct task_struct *tsk) 257 { 258 memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp)); 259 memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp)); 260 } 261 262 void restore_dbreak(void) 263 { 264 int i; 265 266 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) { 267 struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i]; 268 269 if (bp) 270 set_dbreak_regs(i, bp); 271 } 272 clear_thread_flag(TIF_DB_DISABLED); 273 } 274 275 int check_hw_breakpoint(struct pt_regs *regs) 276 { 277 if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) { 278 int i; 279 struct perf_event **bp = this_cpu_ptr(bp_on_reg); 280 281 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) { 282 if (bp[i] && !bp[i]->attr.disabled && 283 regs->pc == bp[i]->attr.bp_addr) 284 perf_bp_event(bp[i], regs); 285 } 286 return 0; 287 } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) { 288 struct perf_event **bp = this_cpu_ptr(wp_on_reg); 289 int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >> 290 DEBUGCAUSE_DBNUM_SHIFT; 291 292 if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) { 293 if (user_mode(regs)) { 294 perf_bp_event(bp[dbnum], regs); 295 } else { 296 set_thread_flag(TIF_DB_DISABLED); 297 xtensa_wsr(0, SREG_DBREAKC + dbnum); 298 } 299 } else { 300 WARN_ONCE(1, 301 "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n", 302 dbnum); 303 } 304 return 0; 305 } 306 return -ENOENT; 307 } 308