1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SuperH process tracing 4 * 5 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 6 * Copyright (C) 2002 - 2009 Paul Mundt 7 * 8 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 9 */ 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/sched/task_stack.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/errno.h> 16 #include <linux/ptrace.h> 17 #include <linux/user.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/io.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/tracehook.h> 24 #include <linux/elf.h> 25 #include <linux/regset.h> 26 #include <linux/hw_breakpoint.h> 27 #include <linux/uaccess.h> 28 #include <asm/processor.h> 29 #include <asm/mmu_context.h> 30 #include <asm/syscalls.h> 31 #include <asm/fpu.h> 32 33 #define CREATE_TRACE_POINTS 34 #include <trace/events/syscalls.h> 35 36 /* 37 * This routine will get a word off of the process kernel stack. 38 */ 39 static inline int get_stack_long(struct task_struct *task, int offset) 40 { 41 unsigned char *stack; 42 43 stack = (unsigned char *)task_pt_regs(task); 44 stack += offset; 45 return (*((int *)stack)); 46 } 47 48 /* 49 * This routine will put a word on the process kernel stack. 50 */ 51 static inline int put_stack_long(struct task_struct *task, int offset, 52 unsigned long data) 53 { 54 unsigned char *stack; 55 56 stack = (unsigned char *)task_pt_regs(task); 57 stack += offset; 58 *(unsigned long *) stack = data; 59 return 0; 60 } 61 62 void ptrace_triggered(struct perf_event *bp, 63 struct perf_sample_data *data, struct pt_regs *regs) 64 { 65 struct perf_event_attr attr; 66 67 /* 68 * Disable the breakpoint request here since ptrace has defined a 69 * one-shot behaviour for breakpoint exceptions. 70 */ 71 attr = bp->attr; 72 attr.disabled = true; 73 modify_user_hw_breakpoint(bp, &attr); 74 } 75 76 static int set_single_step(struct task_struct *tsk, unsigned long addr) 77 { 78 struct thread_struct *thread = &tsk->thread; 79 struct perf_event *bp; 80 struct perf_event_attr attr; 81 82 bp = thread->ptrace_bps[0]; 83 if (!bp) { 84 ptrace_breakpoint_init(&attr); 85 86 attr.bp_addr = addr; 87 attr.bp_len = HW_BREAKPOINT_LEN_2; 88 attr.bp_type = HW_BREAKPOINT_R; 89 90 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 91 NULL, tsk); 92 if (IS_ERR(bp)) 93 return PTR_ERR(bp); 94 95 thread->ptrace_bps[0] = bp; 96 } else { 97 int err; 98 99 attr = bp->attr; 100 attr.bp_addr = addr; 101 /* reenable breakpoint */ 102 attr.disabled = false; 103 err = modify_user_hw_breakpoint(bp, &attr); 104 if (unlikely(err)) 105 return err; 106 } 107 108 return 0; 109 } 110 111 void user_enable_single_step(struct task_struct *child) 112 { 113 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); 114 115 set_tsk_thread_flag(child, TIF_SINGLESTEP); 116 117 set_single_step(child, pc); 118 } 119 120 void user_disable_single_step(struct task_struct *child) 121 { 122 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 123 } 124 125 /* 126 * Called by kernel/ptrace.c when detaching.. 127 * 128 * Make sure single step bits etc are not set. 129 */ 130 void ptrace_disable(struct task_struct *child) 131 { 132 user_disable_single_step(child); 133 } 134 135 static int genregs_get(struct task_struct *target, 136 const struct user_regset *regset, 137 unsigned int pos, unsigned int count, 138 void *kbuf, void __user *ubuf) 139 { 140 const struct pt_regs *regs = task_pt_regs(target); 141 int ret; 142 143 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 144 regs->regs, 145 0, 16 * sizeof(unsigned long)); 146 if (!ret) 147 /* PC, PR, SR, GBR, MACH, MACL, TRA */ 148 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 149 ®s->pc, 150 offsetof(struct pt_regs, pc), 151 sizeof(struct pt_regs)); 152 if (!ret) 153 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 154 sizeof(struct pt_regs), -1); 155 156 return ret; 157 } 158 159 static int genregs_set(struct task_struct *target, 160 const struct user_regset *regset, 161 unsigned int pos, unsigned int count, 162 const void *kbuf, const void __user *ubuf) 163 { 164 struct pt_regs *regs = task_pt_regs(target); 165 int ret; 166 167 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 168 regs->regs, 169 0, 16 * sizeof(unsigned long)); 170 if (!ret && count > 0) 171 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 172 ®s->pc, 173 offsetof(struct pt_regs, pc), 174 sizeof(struct pt_regs)); 175 if (!ret) 176 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 177 sizeof(struct pt_regs), -1); 178 179 return ret; 180 } 181 182 #ifdef CONFIG_SH_FPU 183 int fpregs_get(struct task_struct *target, 184 const struct user_regset *regset, 185 unsigned int pos, unsigned int count, 186 void *kbuf, void __user *ubuf) 187 { 188 int ret; 189 190 ret = init_fpu(target); 191 if (ret) 192 return ret; 193 194 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 195 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 196 &target->thread.xstate->hardfpu, 0, -1); 197 198 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 199 &target->thread.xstate->softfpu, 0, -1); 200 } 201 202 static int fpregs_set(struct task_struct *target, 203 const struct user_regset *regset, 204 unsigned int pos, unsigned int count, 205 const void *kbuf, const void __user *ubuf) 206 { 207 int ret; 208 209 ret = init_fpu(target); 210 if (ret) 211 return ret; 212 213 set_stopped_child_used_math(target); 214 215 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 216 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 217 &target->thread.xstate->hardfpu, 0, -1); 218 219 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 220 &target->thread.xstate->softfpu, 0, -1); 221 } 222 223 static int fpregs_active(struct task_struct *target, 224 const struct user_regset *regset) 225 { 226 return tsk_used_math(target) ? regset->n : 0; 227 } 228 #endif 229 230 #ifdef CONFIG_SH_DSP 231 static int dspregs_get(struct task_struct *target, 232 const struct user_regset *regset, 233 unsigned int pos, unsigned int count, 234 void *kbuf, void __user *ubuf) 235 { 236 const struct pt_dspregs *regs = 237 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 238 int ret; 239 240 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 241 0, sizeof(struct pt_dspregs)); 242 if (!ret) 243 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 244 sizeof(struct pt_dspregs), -1); 245 246 return ret; 247 } 248 249 static int dspregs_set(struct task_struct *target, 250 const struct user_regset *regset, 251 unsigned int pos, unsigned int count, 252 const void *kbuf, const void __user *ubuf) 253 { 254 struct pt_dspregs *regs = 255 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; 256 int ret; 257 258 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 259 0, sizeof(struct pt_dspregs)); 260 if (!ret) 261 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 262 sizeof(struct pt_dspregs), -1); 263 264 return ret; 265 } 266 267 static int dspregs_active(struct task_struct *target, 268 const struct user_regset *regset) 269 { 270 struct pt_regs *regs = task_pt_regs(target); 271 272 return regs->sr & SR_DSP ? regset->n : 0; 273 } 274 #endif 275 276 const struct pt_regs_offset regoffset_table[] = { 277 REGS_OFFSET_NAME(0), 278 REGS_OFFSET_NAME(1), 279 REGS_OFFSET_NAME(2), 280 REGS_OFFSET_NAME(3), 281 REGS_OFFSET_NAME(4), 282 REGS_OFFSET_NAME(5), 283 REGS_OFFSET_NAME(6), 284 REGS_OFFSET_NAME(7), 285 REGS_OFFSET_NAME(8), 286 REGS_OFFSET_NAME(9), 287 REGS_OFFSET_NAME(10), 288 REGS_OFFSET_NAME(11), 289 REGS_OFFSET_NAME(12), 290 REGS_OFFSET_NAME(13), 291 REGS_OFFSET_NAME(14), 292 REGS_OFFSET_NAME(15), 293 REG_OFFSET_NAME(pc), 294 REG_OFFSET_NAME(pr), 295 REG_OFFSET_NAME(sr), 296 REG_OFFSET_NAME(gbr), 297 REG_OFFSET_NAME(mach), 298 REG_OFFSET_NAME(macl), 299 REG_OFFSET_NAME(tra), 300 REG_OFFSET_END, 301 }; 302 303 /* 304 * These are our native regset flavours. 305 */ 306 enum sh_regset { 307 REGSET_GENERAL, 308 #ifdef CONFIG_SH_FPU 309 REGSET_FPU, 310 #endif 311 #ifdef CONFIG_SH_DSP 312 REGSET_DSP, 313 #endif 314 }; 315 316 static const struct user_regset sh_regsets[] = { 317 /* 318 * Format is: 319 * R0 --> R15 320 * PC, PR, SR, GBR, MACH, MACL, TRA 321 */ 322 [REGSET_GENERAL] = { 323 .core_note_type = NT_PRSTATUS, 324 .n = ELF_NGREG, 325 .size = sizeof(long), 326 .align = sizeof(long), 327 .get = genregs_get, 328 .set = genregs_set, 329 }, 330 331 #ifdef CONFIG_SH_FPU 332 [REGSET_FPU] = { 333 .core_note_type = NT_PRFPREG, 334 .n = sizeof(struct user_fpu_struct) / sizeof(long), 335 .size = sizeof(long), 336 .align = sizeof(long), 337 .get = fpregs_get, 338 .set = fpregs_set, 339 .active = fpregs_active, 340 }, 341 #endif 342 343 #ifdef CONFIG_SH_DSP 344 [REGSET_DSP] = { 345 .n = sizeof(struct pt_dspregs) / sizeof(long), 346 .size = sizeof(long), 347 .align = sizeof(long), 348 .get = dspregs_get, 349 .set = dspregs_set, 350 .active = dspregs_active, 351 }, 352 #endif 353 }; 354 355 static const struct user_regset_view user_sh_native_view = { 356 .name = "sh", 357 .e_machine = EM_SH, 358 .regsets = sh_regsets, 359 .n = ARRAY_SIZE(sh_regsets), 360 }; 361 362 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 363 { 364 return &user_sh_native_view; 365 } 366 367 long arch_ptrace(struct task_struct *child, long request, 368 unsigned long addr, unsigned long data) 369 { 370 unsigned long __user *datap = (unsigned long __user *)data; 371 int ret; 372 373 switch (request) { 374 /* read the word at location addr in the USER area. */ 375 case PTRACE_PEEKUSR: { 376 unsigned long tmp; 377 378 ret = -EIO; 379 if ((addr & 3) || addr < 0 || 380 addr > sizeof(struct user) - 3) 381 break; 382 383 if (addr < sizeof(struct pt_regs)) 384 tmp = get_stack_long(child, addr); 385 else if (addr >= offsetof(struct user, fpu) && 386 addr < offsetof(struct user, u_fpvalid)) { 387 if (!tsk_used_math(child)) { 388 if (addr == offsetof(struct user, fpu.fpscr)) 389 tmp = FPSCR_INIT; 390 else 391 tmp = 0; 392 } else { 393 unsigned long index; 394 ret = init_fpu(child); 395 if (ret) 396 break; 397 index = addr - offsetof(struct user, fpu); 398 tmp = ((unsigned long *)child->thread.xstate) 399 [index >> 2]; 400 } 401 } else if (addr == offsetof(struct user, u_fpvalid)) 402 tmp = !!tsk_used_math(child); 403 else if (addr == PT_TEXT_ADDR) 404 tmp = child->mm->start_code; 405 else if (addr == PT_DATA_ADDR) 406 tmp = child->mm->start_data; 407 else if (addr == PT_TEXT_END_ADDR) 408 tmp = child->mm->end_code; 409 else if (addr == PT_TEXT_LEN) 410 tmp = child->mm->end_code - child->mm->start_code; 411 else 412 tmp = 0; 413 ret = put_user(tmp, datap); 414 break; 415 } 416 417 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 418 ret = -EIO; 419 if ((addr & 3) || addr < 0 || 420 addr > sizeof(struct user) - 3) 421 break; 422 423 if (addr < sizeof(struct pt_regs)) 424 ret = put_stack_long(child, addr, data); 425 else if (addr >= offsetof(struct user, fpu) && 426 addr < offsetof(struct user, u_fpvalid)) { 427 unsigned long index; 428 ret = init_fpu(child); 429 if (ret) 430 break; 431 index = addr - offsetof(struct user, fpu); 432 set_stopped_child_used_math(child); 433 ((unsigned long *)child->thread.xstate) 434 [index >> 2] = data; 435 ret = 0; 436 } else if (addr == offsetof(struct user, u_fpvalid)) { 437 conditional_stopped_child_used_math(data, child); 438 ret = 0; 439 } 440 break; 441 442 case PTRACE_GETREGS: 443 return copy_regset_to_user(child, &user_sh_native_view, 444 REGSET_GENERAL, 445 0, sizeof(struct pt_regs), 446 datap); 447 case PTRACE_SETREGS: 448 return copy_regset_from_user(child, &user_sh_native_view, 449 REGSET_GENERAL, 450 0, sizeof(struct pt_regs), 451 datap); 452 #ifdef CONFIG_SH_FPU 453 case PTRACE_GETFPREGS: 454 return copy_regset_to_user(child, &user_sh_native_view, 455 REGSET_FPU, 456 0, sizeof(struct user_fpu_struct), 457 datap); 458 case PTRACE_SETFPREGS: 459 return copy_regset_from_user(child, &user_sh_native_view, 460 REGSET_FPU, 461 0, sizeof(struct user_fpu_struct), 462 datap); 463 #endif 464 #ifdef CONFIG_SH_DSP 465 case PTRACE_GETDSPREGS: 466 return copy_regset_to_user(child, &user_sh_native_view, 467 REGSET_DSP, 468 0, sizeof(struct pt_dspregs), 469 datap); 470 case PTRACE_SETDSPREGS: 471 return copy_regset_from_user(child, &user_sh_native_view, 472 REGSET_DSP, 473 0, sizeof(struct pt_dspregs), 474 datap); 475 #endif 476 default: 477 ret = ptrace_request(child, request, addr, data); 478 break; 479 } 480 481 return ret; 482 } 483 484 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 485 { 486 long ret = 0; 487 488 secure_computing_strict(regs->regs[0]); 489 490 if (test_thread_flag(TIF_SYSCALL_TRACE) && 491 tracehook_report_syscall_entry(regs)) 492 /* 493 * Tracing decided this syscall should not happen. 494 * We'll return a bogus call number to get an ENOSYS 495 * error, but leave the original number in regs->regs[0]. 496 */ 497 ret = -1L; 498 499 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 500 trace_sys_enter(regs, regs->regs[0]); 501 502 audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5], 503 regs->regs[6], regs->regs[7]); 504 505 return ret ?: regs->regs[0]; 506 } 507 508 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 509 { 510 int step; 511 512 audit_syscall_exit(regs); 513 514 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 515 trace_sys_exit(regs, regs->regs[0]); 516 517 step = test_thread_flag(TIF_SINGLESTEP); 518 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 519 tracehook_report_syscall_exit(regs, step); 520 } 521