1 /* 2 * SuperH process tracing 3 * 4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 5 * Copyright (C) 2002 - 2009 Paul Mundt 6 * 7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/smp.h> 17 #include <linux/errno.h> 18 #include <linux/ptrace.h> 19 #include <linux/user.h> 20 #include <linux/security.h> 21 #include <linux/signal.h> 22 #include <linux/io.h> 23 #include <linux/audit.h> 24 #include <linux/seccomp.h> 25 #include <linux/tracehook.h> 26 #include <linux/elf.h> 27 #include <linux/regset.h> 28 #include <linux/hw_breakpoint.h> 29 #include <asm/uaccess.h> 30 #include <asm/pgtable.h> 31 #include <asm/processor.h> 32 #include <asm/mmu_context.h> 33 #include <asm/syscalls.h> 34 #include <asm/fpu.h> 35 36 #define CREATE_TRACE_POINTS 37 #include <trace/events/syscalls.h> 38 39 /* 40 * This routine will get a word off of the process kernel stack. 41 */ 42 static inline int get_stack_long(struct task_struct *task, int offset) 43 { 44 unsigned char *stack; 45 46 stack = (unsigned char *)task_pt_regs(task); 47 stack += offset; 48 return (*((int *)stack)); 49 } 50 51 /* 52 * This routine will put a word on the process kernel stack. 53 */ 54 static inline int put_stack_long(struct task_struct *task, int offset, 55 unsigned long data) 56 { 57 unsigned char *stack; 58 59 stack = (unsigned char *)task_pt_regs(task); 60 stack += offset; 61 *(unsigned long *) stack = data; 62 return 0; 63 } 64 65 void ptrace_triggered(struct perf_event *bp, 66 struct perf_sample_data *data, struct pt_regs *regs) 67 { 68 struct perf_event_attr attr; 69 70 /* 71 * Disable the breakpoint request here since ptrace has defined a 72 * one-shot behaviour for breakpoint exceptions. 73 */ 74 attr = bp->attr; 75 attr.disabled = true; 76 modify_user_hw_breakpoint(bp, &attr); 77 } 78 79 static int set_single_step(struct task_struct *tsk, unsigned long addr) 80 { 81 struct thread_struct *thread = &tsk->thread; 82 struct perf_event *bp; 83 struct perf_event_attr attr; 84 85 bp = thread->ptrace_bps[0]; 86 if (!bp) { 87 ptrace_breakpoint_init(&attr); 88 89 attr.bp_addr = addr; 90 attr.bp_len = HW_BREAKPOINT_LEN_2; 91 attr.bp_type = HW_BREAKPOINT_R; 92 93 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 94 NULL, tsk); 95 if (IS_ERR(bp)) 96 return PTR_ERR(bp); 97 98 thread->ptrace_bps[0] = bp; 99 } else { 100 int err; 101 102 attr = bp->attr; 103 attr.bp_addr = addr; 104 /* reenable breakpoint */ 105 attr.disabled = false; 106 err = modify_user_hw_breakpoint(bp, &attr); 107 if (unlikely(err)) 108 return err; 109 } 110 111 return 0; 112 } 113 114 void user_enable_single_step(struct task_struct *child) 115 { 116 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); 117 118 set_tsk_thread_flag(child, TIF_SINGLESTEP); 119 120 set_single_step(child, pc); 121 } 122 123 void user_disable_single_step(struct task_struct *child) 124 { 125 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 126 } 127 128 /* 129 * Called by kernel/ptrace.c when detaching.. 130 * 131 * Make sure single step bits etc are not set. 132 */ 133 void ptrace_disable(struct task_struct *child) 134 { 135 user_disable_single_step(child); 136 } 137 138 static int genregs_get(struct task_struct *target, 139 const struct user_regset *regset, 140 unsigned int pos, unsigned int count, 141 void *kbuf, void __user *ubuf) 142 { 143 const struct pt_regs *regs = task_pt_regs(target); 144 int ret; 145 146 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 147 regs->regs, 148 0, 16 * sizeof(unsigned long)); 149 if (!ret) 150 /* PC, PR, SR, GBR, MACH, MACL, TRA */ 151 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 152 ®s->pc, 153 offsetof(struct pt_regs, pc), 154 sizeof(struct pt_regs)); 155 if (!ret) 156 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 157 sizeof(struct pt_regs), -1); 158 159 return ret; 160 } 161 162 static int genregs_set(struct task_struct *target, 163 const struct user_regset *regset, 164 unsigned int pos, unsigned int count, 165 const void *kbuf, const void __user *ubuf) 166 { 167 struct pt_regs *regs = task_pt_regs(target); 168 int ret; 169 170 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 171 regs->regs, 172 0, 16 * sizeof(unsigned long)); 173 if (!ret && count > 0) 174 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 175 ®s->pc, 176 offsetof(struct pt_regs, pc), 177 sizeof(struct pt_regs)); 178 if (!ret) 179 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 180 sizeof(struct pt_regs), -1); 181 182 return ret; 183 } 184 185 #ifdef CONFIG_SH_FPU 186 int fpregs_get(struct task_struct *target, 187 const struct user_regset *regset, 188 unsigned int pos, unsigned int count, 189 void *kbuf, void __user *ubuf) 190 { 191 int ret; 192 193 ret = init_fpu(target); 194 if (ret) 195 return ret; 196 197 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 198 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 199 &target->thread.xstate->hardfpu, 0, -1); 200 201 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 202 &target->thread.xstate->softfpu, 0, -1); 203 } 204 205 static int fpregs_set(struct task_struct *target, 206 const struct user_regset *regset, 207 unsigned int pos, unsigned int count, 208 const void *kbuf, const void __user *ubuf) 209 { 210 int ret; 211 212 ret = init_fpu(target); 213 if (ret) 214 return ret; 215 216 set_stopped_child_used_math(target); 217 218 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 219 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 220 &target->thread.xstate->hardfpu, 0, -1); 221 222 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 223 &target->thread.xstate->softfpu, 0, -1); 224 } 225 226 static int fpregs_active(struct task_struct *target, 227 const struct user_regset *regset) 228 { 229 return tsk_used_math(target) ? regset->n : 0; 230 } 231 #endif 232 233 #ifdef CONFIG_SH_DSP 234 static int dspregs_get(struct task_struct *target, 235 const struct user_regset *regset, 236 unsigned int pos, unsigned int count, 237 void *kbuf, void __user *ubuf) 238 { 239 const struct pt_dspregs *regs = 240 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 241 int ret; 242 243 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 244 0, sizeof(struct pt_dspregs)); 245 if (!ret) 246 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 247 sizeof(struct pt_dspregs), -1); 248 249 return ret; 250 } 251 252 static int dspregs_set(struct task_struct *target, 253 const struct user_regset *regset, 254 unsigned int pos, unsigned int count, 255 const void *kbuf, const void __user *ubuf) 256 { 257 struct pt_dspregs *regs = 258 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 259 int ret; 260 261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 262 0, sizeof(struct pt_dspregs)); 263 if (!ret) 264 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 265 sizeof(struct pt_dspregs), -1); 266 267 return ret; 268 } 269 270 static int dspregs_active(struct task_struct *target, 271 const struct user_regset *regset) 272 { 273 struct pt_regs *regs = task_pt_regs(target); 274 275 return regs->sr & SR_DSP ? regset->n : 0; 276 } 277 #endif 278 279 const struct pt_regs_offset regoffset_table[] = { 280 REGS_OFFSET_NAME(0), 281 REGS_OFFSET_NAME(1), 282 REGS_OFFSET_NAME(2), 283 REGS_OFFSET_NAME(3), 284 REGS_OFFSET_NAME(4), 285 REGS_OFFSET_NAME(5), 286 REGS_OFFSET_NAME(6), 287 REGS_OFFSET_NAME(7), 288 REGS_OFFSET_NAME(8), 289 REGS_OFFSET_NAME(9), 290 REGS_OFFSET_NAME(10), 291 REGS_OFFSET_NAME(11), 292 REGS_OFFSET_NAME(12), 293 REGS_OFFSET_NAME(13), 294 REGS_OFFSET_NAME(14), 295 REGS_OFFSET_NAME(15), 296 REG_OFFSET_NAME(pc), 297 REG_OFFSET_NAME(pr), 298 REG_OFFSET_NAME(sr), 299 REG_OFFSET_NAME(gbr), 300 REG_OFFSET_NAME(mach), 301 REG_OFFSET_NAME(macl), 302 REG_OFFSET_NAME(tra), 303 REG_OFFSET_END, 304 }; 305 306 /* 307 * These are our native regset flavours. 308 */ 309 enum sh_regset { 310 REGSET_GENERAL, 311 #ifdef CONFIG_SH_FPU 312 REGSET_FPU, 313 #endif 314 #ifdef CONFIG_SH_DSP 315 REGSET_DSP, 316 #endif 317 }; 318 319 static const struct user_regset sh_regsets[] = { 320 /* 321 * Format is: 322 * R0 --> R15 323 * PC, PR, SR, GBR, MACH, MACL, TRA 324 */ 325 [REGSET_GENERAL] = { 326 .core_note_type = NT_PRSTATUS, 327 .n = ELF_NGREG, 328 .size = sizeof(long), 329 .align = sizeof(long), 330 .get = genregs_get, 331 .set = genregs_set, 332 }, 333 334 #ifdef CONFIG_SH_FPU 335 [REGSET_FPU] = { 336 .core_note_type = NT_PRFPREG, 337 .n = sizeof(struct user_fpu_struct) / sizeof(long), 338 .size = sizeof(long), 339 .align = sizeof(long), 340 .get = fpregs_get, 341 .set = fpregs_set, 342 .active = fpregs_active, 343 }, 344 #endif 345 346 #ifdef CONFIG_SH_DSP 347 [REGSET_DSP] = { 348 .n = sizeof(struct pt_dspregs) / sizeof(long), 349 .size = sizeof(long), 350 .align = sizeof(long), 351 .get = dspregs_get, 352 .set = dspregs_set, 353 .active = dspregs_active, 354 }, 355 #endif 356 }; 357 358 static const struct user_regset_view user_sh_native_view = { 359 .name = "sh", 360 .e_machine = EM_SH, 361 .regsets = sh_regsets, 362 .n = ARRAY_SIZE(sh_regsets), 363 }; 364 365 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 366 { 367 return &user_sh_native_view; 368 } 369 370 long arch_ptrace(struct task_struct *child, long request, 371 unsigned long addr, unsigned long data) 372 { 373 unsigned long __user *datap = (unsigned long __user *)data; 374 int ret; 375 376 switch (request) { 377 /* read the word at location addr in the USER area. */ 378 case PTRACE_PEEKUSR: { 379 unsigned long tmp; 380 381 ret = -EIO; 382 if ((addr & 3) || addr < 0 || 383 addr > sizeof(struct user) - 3) 384 break; 385 386 if (addr < sizeof(struct pt_regs)) 387 tmp = get_stack_long(child, addr); 388 else if (addr >= offsetof(struct user, fpu) && 389 addr < offsetof(struct user, u_fpvalid)) { 390 if (!tsk_used_math(child)) { 391 if (addr == offsetof(struct user, fpu.fpscr)) 392 tmp = FPSCR_INIT; 393 else 394 tmp = 0; 395 } else { 396 unsigned long index; 397 ret = init_fpu(child); 398 if (ret) 399 break; 400 index = addr - offsetof(struct user, fpu); 401 tmp = ((unsigned long *)child->thread.xstate) 402 [index >> 2]; 403 } 404 } else if (addr == offsetof(struct user, u_fpvalid)) 405 tmp = !!tsk_used_math(child); 406 else if (addr == PT_TEXT_ADDR) 407 tmp = child->mm->start_code; 408 else if (addr == PT_DATA_ADDR) 409 tmp = child->mm->start_data; 410 else if (addr == PT_TEXT_END_ADDR) 411 tmp = child->mm->end_code; 412 else if (addr == PT_TEXT_LEN) 413 tmp = child->mm->end_code - child->mm->start_code; 414 else 415 tmp = 0; 416 ret = put_user(tmp, datap); 417 break; 418 } 419 420 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 421 ret = -EIO; 422 if ((addr & 3) || addr < 0 || 423 addr > sizeof(struct user) - 3) 424 break; 425 426 if (addr < sizeof(struct pt_regs)) 427 ret = put_stack_long(child, addr, data); 428 else if (addr >= offsetof(struct user, fpu) && 429 addr < offsetof(struct user, u_fpvalid)) { 430 unsigned long index; 431 ret = init_fpu(child); 432 if (ret) 433 break; 434 index = addr - offsetof(struct user, fpu); 435 set_stopped_child_used_math(child); 436 ((unsigned long *)child->thread.xstate) 437 [index >> 2] = data; 438 ret = 0; 439 } else if (addr == offsetof(struct user, u_fpvalid)) { 440 conditional_stopped_child_used_math(data, child); 441 ret = 0; 442 } 443 break; 444 445 case PTRACE_GETREGS: 446 return copy_regset_to_user(child, &user_sh_native_view, 447 REGSET_GENERAL, 448 0, sizeof(struct pt_regs), 449 datap); 450 case PTRACE_SETREGS: 451 return copy_regset_from_user(child, &user_sh_native_view, 452 REGSET_GENERAL, 453 0, sizeof(struct pt_regs), 454 datap); 455 #ifdef CONFIG_SH_FPU 456 case PTRACE_GETFPREGS: 457 return copy_regset_to_user(child, &user_sh_native_view, 458 REGSET_FPU, 459 0, sizeof(struct user_fpu_struct), 460 datap); 461 case PTRACE_SETFPREGS: 462 return copy_regset_from_user(child, &user_sh_native_view, 463 REGSET_FPU, 464 0, sizeof(struct user_fpu_struct), 465 datap); 466 #endif 467 #ifdef CONFIG_SH_DSP 468 case PTRACE_GETDSPREGS: 469 return copy_regset_to_user(child, &user_sh_native_view, 470 REGSET_DSP, 471 0, sizeof(struct pt_dspregs), 472 datap); 473 case PTRACE_SETDSPREGS: 474 return copy_regset_from_user(child, &user_sh_native_view, 475 REGSET_DSP, 476 0, sizeof(struct pt_dspregs), 477 datap); 478 #endif 479 default: 480 ret = ptrace_request(child, request, addr, data); 481 break; 482 } 483 484 return ret; 485 } 486 487 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 488 { 489 long ret = 0; 490 491 secure_computing_strict(regs->regs[0]); 492 493 if (test_thread_flag(TIF_SYSCALL_TRACE) && 494 tracehook_report_syscall_entry(regs)) 495 /* 496 * Tracing decided this syscall should not happen. 497 * We'll return a bogus call number to get an ENOSYS 498 * error, but leave the original number in regs->regs[0]. 499 */ 500 ret = -1L; 501 502 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 503 trace_sys_enter(regs, regs->regs[0]); 504 505 audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5], 506 regs->regs[6], regs->regs[7]); 507 508 return ret ?: regs->regs[0]; 509 } 510 511 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 512 { 513 int step; 514 515 audit_syscall_exit(regs); 516 517 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 518 trace_sys_exit(regs, regs->regs[0]); 519 520 step = test_thread_flag(TIF_SINGLESTEP); 521 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 522 tracehook_report_syscall_exit(regs, step); 523 } 524