1 /* 2 * SuperH process tracing 3 * 4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 5 * Copyright (C) 2002 - 2009 Paul Mundt 6 * 7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/smp.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/security.h> 22 #include <linux/signal.h> 23 #include <linux/io.h> 24 #include <linux/audit.h> 25 #include <linux/seccomp.h> 26 #include <linux/tracehook.h> 27 #include <linux/elf.h> 28 #include <linux/regset.h> 29 #include <linux/hw_breakpoint.h> 30 #include <linux/uaccess.h> 31 #include <asm/pgtable.h> 32 #include <asm/processor.h> 33 #include <asm/mmu_context.h> 34 #include <asm/syscalls.h> 35 #include <asm/fpu.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/syscalls.h> 39 40 /* 41 * This routine will get a word off of the process kernel stack. 42 */ 43 static inline int get_stack_long(struct task_struct *task, int offset) 44 { 45 unsigned char *stack; 46 47 stack = (unsigned char *)task_pt_regs(task); 48 stack += offset; 49 return (*((int *)stack)); 50 } 51 52 /* 53 * This routine will put a word on the process kernel stack. 54 */ 55 static inline int put_stack_long(struct task_struct *task, int offset, 56 unsigned long data) 57 { 58 unsigned char *stack; 59 60 stack = (unsigned char *)task_pt_regs(task); 61 stack += offset; 62 *(unsigned long *) stack = data; 63 return 0; 64 } 65 66 void ptrace_triggered(struct perf_event *bp, 67 struct perf_sample_data *data, struct pt_regs *regs) 68 { 69 struct perf_event_attr attr; 70 71 /* 72 * Disable the breakpoint request here since ptrace has defined a 73 * one-shot behaviour for breakpoint exceptions. 74 */ 75 attr = bp->attr; 76 attr.disabled = true; 77 modify_user_hw_breakpoint(bp, &attr); 78 } 79 80 static int set_single_step(struct task_struct *tsk, unsigned long addr) 81 { 82 struct thread_struct *thread = &tsk->thread; 83 struct perf_event *bp; 84 struct perf_event_attr attr; 85 86 bp = thread->ptrace_bps[0]; 87 if (!bp) { 88 ptrace_breakpoint_init(&attr); 89 90 attr.bp_addr = addr; 91 attr.bp_len = HW_BREAKPOINT_LEN_2; 92 attr.bp_type = HW_BREAKPOINT_R; 93 94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 95 NULL, tsk); 96 if (IS_ERR(bp)) 97 return PTR_ERR(bp); 98 99 thread->ptrace_bps[0] = bp; 100 } else { 101 int err; 102 103 attr = bp->attr; 104 attr.bp_addr = addr; 105 /* reenable breakpoint */ 106 attr.disabled = false; 107 err = modify_user_hw_breakpoint(bp, &attr); 108 if (unlikely(err)) 109 return err; 110 } 111 112 return 0; 113 } 114 115 void user_enable_single_step(struct task_struct *child) 116 { 117 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); 118 119 set_tsk_thread_flag(child, TIF_SINGLESTEP); 120 121 set_single_step(child, pc); 122 } 123 124 void user_disable_single_step(struct task_struct *child) 125 { 126 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 127 } 128 129 /* 130 * Called by kernel/ptrace.c when detaching.. 131 * 132 * Make sure single step bits etc are not set. 133 */ 134 void ptrace_disable(struct task_struct *child) 135 { 136 user_disable_single_step(child); 137 } 138 139 static int genregs_get(struct task_struct *target, 140 const struct user_regset *regset, 141 unsigned int pos, unsigned int count, 142 void *kbuf, void __user *ubuf) 143 { 144 const struct pt_regs *regs = task_pt_regs(target); 145 int ret; 146 147 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 148 regs->regs, 149 0, 16 * sizeof(unsigned long)); 150 if (!ret) 151 /* PC, PR, SR, GBR, MACH, MACL, TRA */ 152 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 153 ®s->pc, 154 offsetof(struct pt_regs, pc), 155 sizeof(struct pt_regs)); 156 if (!ret) 157 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 158 sizeof(struct pt_regs), -1); 159 160 return ret; 161 } 162 163 static int genregs_set(struct task_struct *target, 164 const struct user_regset *regset, 165 unsigned int pos, unsigned int count, 166 const void *kbuf, const void __user *ubuf) 167 { 168 struct pt_regs *regs = task_pt_regs(target); 169 int ret; 170 171 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 172 regs->regs, 173 0, 16 * sizeof(unsigned long)); 174 if (!ret && count > 0) 175 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 176 ®s->pc, 177 offsetof(struct pt_regs, pc), 178 sizeof(struct pt_regs)); 179 if (!ret) 180 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 181 sizeof(struct pt_regs), -1); 182 183 return ret; 184 } 185 186 #ifdef CONFIG_SH_FPU 187 int fpregs_get(struct task_struct *target, 188 const struct user_regset *regset, 189 unsigned int pos, unsigned int count, 190 void *kbuf, void __user *ubuf) 191 { 192 int ret; 193 194 ret = init_fpu(target); 195 if (ret) 196 return ret; 197 198 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 199 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 200 &target->thread.xstate->hardfpu, 0, -1); 201 202 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 203 &target->thread.xstate->softfpu, 0, -1); 204 } 205 206 static int fpregs_set(struct task_struct *target, 207 const struct user_regset *regset, 208 unsigned int pos, unsigned int count, 209 const void *kbuf, const void __user *ubuf) 210 { 211 int ret; 212 213 ret = init_fpu(target); 214 if (ret) 215 return ret; 216 217 set_stopped_child_used_math(target); 218 219 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 220 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 221 &target->thread.xstate->hardfpu, 0, -1); 222 223 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 224 &target->thread.xstate->softfpu, 0, -1); 225 } 226 227 static int fpregs_active(struct task_struct *target, 228 const struct user_regset *regset) 229 { 230 return tsk_used_math(target) ? regset->n : 0; 231 } 232 #endif 233 234 #ifdef CONFIG_SH_DSP 235 static int dspregs_get(struct task_struct *target, 236 const struct user_regset *regset, 237 unsigned int pos, unsigned int count, 238 void *kbuf, void __user *ubuf) 239 { 240 const struct pt_dspregs *regs = 241 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 242 int ret; 243 244 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 245 0, sizeof(struct pt_dspregs)); 246 if (!ret) 247 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 248 sizeof(struct pt_dspregs), -1); 249 250 return ret; 251 } 252 253 static int dspregs_set(struct task_struct *target, 254 const struct user_regset *regset, 255 unsigned int pos, unsigned int count, 256 const void *kbuf, const void __user *ubuf) 257 { 258 struct pt_dspregs *regs = 259 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 260 int ret; 261 262 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 263 0, sizeof(struct pt_dspregs)); 264 if (!ret) 265 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 266 sizeof(struct pt_dspregs), -1); 267 268 return ret; 269 } 270 271 static int dspregs_active(struct task_struct *target, 272 const struct user_regset *regset) 273 { 274 struct pt_regs *regs = task_pt_regs(target); 275 276 return regs->sr & SR_DSP ? regset->n : 0; 277 } 278 #endif 279 280 const struct pt_regs_offset regoffset_table[] = { 281 REGS_OFFSET_NAME(0), 282 REGS_OFFSET_NAME(1), 283 REGS_OFFSET_NAME(2), 284 REGS_OFFSET_NAME(3), 285 REGS_OFFSET_NAME(4), 286 REGS_OFFSET_NAME(5), 287 REGS_OFFSET_NAME(6), 288 REGS_OFFSET_NAME(7), 289 REGS_OFFSET_NAME(8), 290 REGS_OFFSET_NAME(9), 291 REGS_OFFSET_NAME(10), 292 REGS_OFFSET_NAME(11), 293 REGS_OFFSET_NAME(12), 294 REGS_OFFSET_NAME(13), 295 REGS_OFFSET_NAME(14), 296 REGS_OFFSET_NAME(15), 297 REG_OFFSET_NAME(pc), 298 REG_OFFSET_NAME(pr), 299 REG_OFFSET_NAME(sr), 300 REG_OFFSET_NAME(gbr), 301 REG_OFFSET_NAME(mach), 302 REG_OFFSET_NAME(macl), 303 REG_OFFSET_NAME(tra), 304 REG_OFFSET_END, 305 }; 306 307 /* 308 * These are our native regset flavours. 309 */ 310 enum sh_regset { 311 REGSET_GENERAL, 312 #ifdef CONFIG_SH_FPU 313 REGSET_FPU, 314 #endif 315 #ifdef CONFIG_SH_DSP 316 REGSET_DSP, 317 #endif 318 }; 319 320 static const struct user_regset sh_regsets[] = { 321 /* 322 * Format is: 323 * R0 --> R15 324 * PC, PR, SR, GBR, MACH, MACL, TRA 325 */ 326 [REGSET_GENERAL] = { 327 .core_note_type = NT_PRSTATUS, 328 .n = ELF_NGREG, 329 .size = sizeof(long), 330 .align = sizeof(long), 331 .get = genregs_get, 332 .set = genregs_set, 333 }, 334 335 #ifdef CONFIG_SH_FPU 336 [REGSET_FPU] = { 337 .core_note_type = NT_PRFPREG, 338 .n = sizeof(struct user_fpu_struct) / sizeof(long), 339 .size = sizeof(long), 340 .align = sizeof(long), 341 .get = fpregs_get, 342 .set = fpregs_set, 343 .active = fpregs_active, 344 }, 345 #endif 346 347 #ifdef CONFIG_SH_DSP 348 [REGSET_DSP] = { 349 .n = sizeof(struct pt_dspregs) / sizeof(long), 350 .size = sizeof(long), 351 .align = sizeof(long), 352 .get = dspregs_get, 353 .set = dspregs_set, 354 .active = dspregs_active, 355 }, 356 #endif 357 }; 358 359 static const struct user_regset_view user_sh_native_view = { 360 .name = "sh", 361 .e_machine = EM_SH, 362 .regsets = sh_regsets, 363 .n = ARRAY_SIZE(sh_regsets), 364 }; 365 366 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 367 { 368 return &user_sh_native_view; 369 } 370 371 long arch_ptrace(struct task_struct *child, long request, 372 unsigned long addr, unsigned long data) 373 { 374 unsigned long __user *datap = (unsigned long __user *)data; 375 int ret; 376 377 switch (request) { 378 /* read the word at location addr in the USER area. */ 379 case PTRACE_PEEKUSR: { 380 unsigned long tmp; 381 382 ret = -EIO; 383 if ((addr & 3) || addr < 0 || 384 addr > sizeof(struct user) - 3) 385 break; 386 387 if (addr < sizeof(struct pt_regs)) 388 tmp = get_stack_long(child, addr); 389 else if (addr >= offsetof(struct user, fpu) && 390 addr < offsetof(struct user, u_fpvalid)) { 391 if (!tsk_used_math(child)) { 392 if (addr == offsetof(struct user, fpu.fpscr)) 393 tmp = FPSCR_INIT; 394 else 395 tmp = 0; 396 } else { 397 unsigned long index; 398 ret = init_fpu(child); 399 if (ret) 400 break; 401 index = addr - offsetof(struct user, fpu); 402 tmp = ((unsigned long *)child->thread.xstate) 403 [index >> 2]; 404 } 405 } else if (addr == offsetof(struct user, u_fpvalid)) 406 tmp = !!tsk_used_math(child); 407 else if (addr == PT_TEXT_ADDR) 408 tmp = child->mm->start_code; 409 else if (addr == PT_DATA_ADDR) 410 tmp = child->mm->start_data; 411 else if (addr == PT_TEXT_END_ADDR) 412 tmp = child->mm->end_code; 413 else if (addr == PT_TEXT_LEN) 414 tmp = child->mm->end_code - child->mm->start_code; 415 else 416 tmp = 0; 417 ret = put_user(tmp, datap); 418 break; 419 } 420 421 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 422 ret = -EIO; 423 if ((addr & 3) || addr < 0 || 424 addr > sizeof(struct user) - 3) 425 break; 426 427 if (addr < sizeof(struct pt_regs)) 428 ret = put_stack_long(child, addr, data); 429 else if (addr >= offsetof(struct user, fpu) && 430 addr < offsetof(struct user, u_fpvalid)) { 431 unsigned long index; 432 ret = init_fpu(child); 433 if (ret) 434 break; 435 index = addr - offsetof(struct user, fpu); 436 set_stopped_child_used_math(child); 437 ((unsigned long *)child->thread.xstate) 438 [index >> 2] = data; 439 ret = 0; 440 } else if (addr == offsetof(struct user, u_fpvalid)) { 441 conditional_stopped_child_used_math(data, child); 442 ret = 0; 443 } 444 break; 445 446 case PTRACE_GETREGS: 447 return copy_regset_to_user(child, &user_sh_native_view, 448 REGSET_GENERAL, 449 0, sizeof(struct pt_regs), 450 datap); 451 case PTRACE_SETREGS: 452 return copy_regset_from_user(child, &user_sh_native_view, 453 REGSET_GENERAL, 454 0, sizeof(struct pt_regs), 455 datap); 456 #ifdef CONFIG_SH_FPU 457 case PTRACE_GETFPREGS: 458 return copy_regset_to_user(child, &user_sh_native_view, 459 REGSET_FPU, 460 0, sizeof(struct user_fpu_struct), 461 datap); 462 case PTRACE_SETFPREGS: 463 return copy_regset_from_user(child, &user_sh_native_view, 464 REGSET_FPU, 465 0, sizeof(struct user_fpu_struct), 466 datap); 467 #endif 468 #ifdef CONFIG_SH_DSP 469 case PTRACE_GETDSPREGS: 470 return copy_regset_to_user(child, &user_sh_native_view, 471 REGSET_DSP, 472 0, sizeof(struct pt_dspregs), 473 datap); 474 case PTRACE_SETDSPREGS: 475 return copy_regset_from_user(child, &user_sh_native_view, 476 REGSET_DSP, 477 0, sizeof(struct pt_dspregs), 478 datap); 479 #endif 480 default: 481 ret = ptrace_request(child, request, addr, data); 482 break; 483 } 484 485 return ret; 486 } 487 488 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 489 { 490 long ret = 0; 491 492 secure_computing_strict(regs->regs[0]); 493 494 if (test_thread_flag(TIF_SYSCALL_TRACE) && 495 tracehook_report_syscall_entry(regs)) 496 /* 497 * Tracing decided this syscall should not happen. 498 * We'll return a bogus call number to get an ENOSYS 499 * error, but leave the original number in regs->regs[0]. 500 */ 501 ret = -1L; 502 503 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 504 trace_sys_enter(regs, regs->regs[0]); 505 506 audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5], 507 regs->regs[6], regs->regs[7]); 508 509 return ret ?: regs->regs[0]; 510 } 511 512 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 513 { 514 int step; 515 516 audit_syscall_exit(regs); 517 518 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 519 trace_sys_exit(regs, regs->regs[0]); 520 521 step = test_thread_flag(TIF_SINGLESTEP); 522 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 523 tracehook_report_syscall_exit(regs, step); 524 } 525