1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/mm.h> 24 #include <linux/errno.h> 25 #include <linux/ptrace.h> 26 #include <linux/regset.h> 27 #include <linux/smp.h> 28 #include <linux/security.h> 29 #include <linux/stddef.h> 30 #include <linux/tracehook.h> 31 #include <linux/audit.h> 32 #include <linux/seccomp.h> 33 #include <linux/ftrace.h> 34 35 #include <asm/byteorder.h> 36 #include <asm/cpu.h> 37 #include <asm/cpu-info.h> 38 #include <asm/dsp.h> 39 #include <asm/fpu.h> 40 #include <asm/mipsregs.h> 41 #include <asm/mipsmtregs.h> 42 #include <asm/pgtable.h> 43 #include <asm/page.h> 44 #include <asm/syscall.h> 45 #include <linux/uaccess.h> 46 #include <asm/bootinfo.h> 47 #include <asm/reg.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/syscalls.h> 51 52 static void init_fp_ctx(struct task_struct *target) 53 { 54 /* If FP has been used then the target already has context */ 55 if (tsk_used_math(target)) 56 return; 57 58 /* Begin with data registers set to all 1s... */ 59 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 60 61 /* FCSR has been preset by `mips_set_personality_nan'. */ 62 63 /* 64 * Record that the target has "used" math, such that the context 65 * just initialised, and any modifications made by the caller, 66 * aren't discarded. 67 */ 68 set_stopped_child_used_math(target); 69 } 70 71 /* 72 * Called by kernel/ptrace.c when detaching.. 73 * 74 * Make sure single step bits etc are not set. 75 */ 76 void ptrace_disable(struct task_struct *child) 77 { 78 /* Don't load the watchpoint registers for the ex-child. */ 79 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 80 } 81 82 /* 83 * Poke at FCSR according to its mask. Set the Cause bits even 84 * if a corresponding Enable bit is set. This will be noticed at 85 * the time the thread is switched to and SIGFPE thrown accordingly. 86 */ 87 static void ptrace_setfcr31(struct task_struct *child, u32 value) 88 { 89 u32 fcr31; 90 u32 mask; 91 92 fcr31 = child->thread.fpu.fcr31; 93 mask = boot_cpu_data.fpu_msk31; 94 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 95 } 96 97 /* 98 * Read a general register set. We always use the 64-bit format, even 99 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 100 * Registers are sign extended to fill the available space. 101 */ 102 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 103 { 104 struct pt_regs *regs; 105 int i; 106 107 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 108 return -EIO; 109 110 regs = task_pt_regs(child); 111 112 for (i = 0; i < 32; i++) 113 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 114 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 115 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 116 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 117 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 118 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 119 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 120 121 return 0; 122 } 123 124 /* 125 * Write a general register set. As for PTRACE_GETREGS, we always use 126 * the 64-bit format. On a 32-bit kernel only the lower order half 127 * (according to endianness) will be used. 128 */ 129 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 130 { 131 struct pt_regs *regs; 132 int i; 133 134 if (!access_ok(VERIFY_READ, data, 38 * 8)) 135 return -EIO; 136 137 regs = task_pt_regs(child); 138 139 for (i = 0; i < 32; i++) 140 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 141 __get_user(regs->lo, (__s64 __user *)&data->lo); 142 __get_user(regs->hi, (__s64 __user *)&data->hi); 143 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 144 145 /* badvaddr, status, and cause may not be written. */ 146 147 /* System call number may have been changed */ 148 mips_syscall_update_nr(child, regs); 149 150 return 0; 151 } 152 153 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 154 { 155 int i; 156 157 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 158 return -EIO; 159 160 if (tsk_used_math(child)) { 161 union fpureg *fregs = get_fpu_regs(child); 162 for (i = 0; i < 32; i++) 163 __put_user(get_fpr64(&fregs[i], 0), 164 i + (__u64 __user *)data); 165 } else { 166 for (i = 0; i < 32; i++) 167 __put_user((__u64) -1, i + (__u64 __user *) data); 168 } 169 170 __put_user(child->thread.fpu.fcr31, data + 64); 171 __put_user(boot_cpu_data.fpu_id, data + 65); 172 173 return 0; 174 } 175 176 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 177 { 178 union fpureg *fregs; 179 u64 fpr_val; 180 u32 value; 181 int i; 182 183 if (!access_ok(VERIFY_READ, data, 33 * 8)) 184 return -EIO; 185 186 init_fp_ctx(child); 187 fregs = get_fpu_regs(child); 188 189 for (i = 0; i < 32; i++) { 190 __get_user(fpr_val, i + (__u64 __user *)data); 191 set_fpr64(&fregs[i], 0, fpr_val); 192 } 193 194 __get_user(value, data + 64); 195 ptrace_setfcr31(child, value); 196 197 /* FIR may not be written. */ 198 199 return 0; 200 } 201 202 int ptrace_get_watch_regs(struct task_struct *child, 203 struct pt_watch_regs __user *addr) 204 { 205 enum pt_watch_style style; 206 int i; 207 208 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 209 return -EIO; 210 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 211 return -EIO; 212 213 #ifdef CONFIG_32BIT 214 style = pt_watch_style_mips32; 215 #define WATCH_STYLE mips32 216 #else 217 style = pt_watch_style_mips64; 218 #define WATCH_STYLE mips64 219 #endif 220 221 __put_user(style, &addr->style); 222 __put_user(boot_cpu_data.watch_reg_use_cnt, 223 &addr->WATCH_STYLE.num_valid); 224 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 225 __put_user(child->thread.watch.mips3264.watchlo[i], 226 &addr->WATCH_STYLE.watchlo[i]); 227 __put_user(child->thread.watch.mips3264.watchhi[i] & 228 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 229 &addr->WATCH_STYLE.watchhi[i]); 230 __put_user(boot_cpu_data.watch_reg_masks[i], 231 &addr->WATCH_STYLE.watch_masks[i]); 232 } 233 for (; i < 8; i++) { 234 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 235 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 236 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 237 } 238 239 return 0; 240 } 241 242 int ptrace_set_watch_regs(struct task_struct *child, 243 struct pt_watch_regs __user *addr) 244 { 245 int i; 246 int watch_active = 0; 247 unsigned long lt[NUM_WATCH_REGS]; 248 u16 ht[NUM_WATCH_REGS]; 249 250 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 251 return -EIO; 252 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 253 return -EIO; 254 /* Check the values. */ 255 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 256 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 257 #ifdef CONFIG_32BIT 258 if (lt[i] & __UA_LIMIT) 259 return -EINVAL; 260 #else 261 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 262 if (lt[i] & 0xffffffff80000000UL) 263 return -EINVAL; 264 } else { 265 if (lt[i] & __UA_LIMIT) 266 return -EINVAL; 267 } 268 #endif 269 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 270 if (ht[i] & ~MIPS_WATCHHI_MASK) 271 return -EINVAL; 272 } 273 /* Install them. */ 274 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 275 if (lt[i] & MIPS_WATCHLO_IRW) 276 watch_active = 1; 277 child->thread.watch.mips3264.watchlo[i] = lt[i]; 278 /* Set the G bit. */ 279 child->thread.watch.mips3264.watchhi[i] = ht[i]; 280 } 281 282 if (watch_active) 283 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 284 else 285 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 286 287 return 0; 288 } 289 290 /* regset get/set implementations */ 291 292 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 293 294 static int gpr32_get(struct task_struct *target, 295 const struct user_regset *regset, 296 unsigned int pos, unsigned int count, 297 void *kbuf, void __user *ubuf) 298 { 299 struct pt_regs *regs = task_pt_regs(target); 300 u32 uregs[ELF_NGREG] = {}; 301 302 mips_dump_regs32(uregs, regs); 303 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 304 sizeof(uregs)); 305 } 306 307 static int gpr32_set(struct task_struct *target, 308 const struct user_regset *regset, 309 unsigned int pos, unsigned int count, 310 const void *kbuf, const void __user *ubuf) 311 { 312 struct pt_regs *regs = task_pt_regs(target); 313 u32 uregs[ELF_NGREG]; 314 unsigned start, num_regs, i; 315 int err; 316 317 start = pos / sizeof(u32); 318 num_regs = count / sizeof(u32); 319 320 if (start + num_regs > ELF_NGREG) 321 return -EIO; 322 323 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 324 sizeof(uregs)); 325 if (err) 326 return err; 327 328 for (i = start; i < num_regs; i++) { 329 /* 330 * Cast all values to signed here so that if this is a 64-bit 331 * kernel, the supplied 32-bit values will be sign extended. 332 */ 333 switch (i) { 334 case MIPS32_EF_R1 ... MIPS32_EF_R25: 335 /* k0/k1 are ignored. */ 336 case MIPS32_EF_R28 ... MIPS32_EF_R31: 337 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 338 break; 339 case MIPS32_EF_LO: 340 regs->lo = (s32)uregs[i]; 341 break; 342 case MIPS32_EF_HI: 343 regs->hi = (s32)uregs[i]; 344 break; 345 case MIPS32_EF_CP0_EPC: 346 regs->cp0_epc = (s32)uregs[i]; 347 break; 348 } 349 } 350 351 /* System call number may have been changed */ 352 mips_syscall_update_nr(target, regs); 353 354 return 0; 355 } 356 357 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 358 359 #ifdef CONFIG_64BIT 360 361 static int gpr64_get(struct task_struct *target, 362 const struct user_regset *regset, 363 unsigned int pos, unsigned int count, 364 void *kbuf, void __user *ubuf) 365 { 366 struct pt_regs *regs = task_pt_regs(target); 367 u64 uregs[ELF_NGREG] = {}; 368 369 mips_dump_regs64(uregs, regs); 370 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 371 sizeof(uregs)); 372 } 373 374 static int gpr64_set(struct task_struct *target, 375 const struct user_regset *regset, 376 unsigned int pos, unsigned int count, 377 const void *kbuf, const void __user *ubuf) 378 { 379 struct pt_regs *regs = task_pt_regs(target); 380 u64 uregs[ELF_NGREG]; 381 unsigned start, num_regs, i; 382 int err; 383 384 start = pos / sizeof(u64); 385 num_regs = count / sizeof(u64); 386 387 if (start + num_regs > ELF_NGREG) 388 return -EIO; 389 390 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 391 sizeof(uregs)); 392 if (err) 393 return err; 394 395 for (i = start; i < num_regs; i++) { 396 switch (i) { 397 case MIPS64_EF_R1 ... MIPS64_EF_R25: 398 /* k0/k1 are ignored. */ 399 case MIPS64_EF_R28 ... MIPS64_EF_R31: 400 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 401 break; 402 case MIPS64_EF_LO: 403 regs->lo = uregs[i]; 404 break; 405 case MIPS64_EF_HI: 406 regs->hi = uregs[i]; 407 break; 408 case MIPS64_EF_CP0_EPC: 409 regs->cp0_epc = uregs[i]; 410 break; 411 } 412 } 413 414 /* System call number may have been changed */ 415 mips_syscall_update_nr(target, regs); 416 417 return 0; 418 } 419 420 #endif /* CONFIG_64BIT */ 421 422 static int fpr_get(struct task_struct *target, 423 const struct user_regset *regset, 424 unsigned int pos, unsigned int count, 425 void *kbuf, void __user *ubuf) 426 { 427 unsigned i; 428 int err; 429 u64 fpr_val; 430 431 /* XXX fcr31 */ 432 433 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 434 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 435 &target->thread.fpu, 436 0, sizeof(elf_fpregset_t)); 437 438 for (i = 0; i < NUM_FPU_REGS; i++) { 439 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 440 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 441 &fpr_val, i * sizeof(elf_fpreg_t), 442 (i + 1) * sizeof(elf_fpreg_t)); 443 if (err) 444 return err; 445 } 446 447 return 0; 448 } 449 450 static int fpr_set(struct task_struct *target, 451 const struct user_regset *regset, 452 unsigned int pos, unsigned int count, 453 const void *kbuf, const void __user *ubuf) 454 { 455 unsigned i; 456 int err; 457 u64 fpr_val; 458 459 /* XXX fcr31 */ 460 461 init_fp_ctx(target); 462 463 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 464 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 465 &target->thread.fpu, 466 0, sizeof(elf_fpregset_t)); 467 468 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 469 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { 470 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 471 &fpr_val, i * sizeof(elf_fpreg_t), 472 (i + 1) * sizeof(elf_fpreg_t)); 473 if (err) 474 return err; 475 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 476 } 477 478 return 0; 479 } 480 481 enum mips_regset { 482 REGSET_GPR, 483 REGSET_FPR, 484 }; 485 486 struct pt_regs_offset { 487 const char *name; 488 int offset; 489 }; 490 491 #define REG_OFFSET_NAME(reg, r) { \ 492 .name = #reg, \ 493 .offset = offsetof(struct pt_regs, r) \ 494 } 495 496 #define REG_OFFSET_END { \ 497 .name = NULL, \ 498 .offset = 0 \ 499 } 500 501 static const struct pt_regs_offset regoffset_table[] = { 502 REG_OFFSET_NAME(r0, regs[0]), 503 REG_OFFSET_NAME(r1, regs[1]), 504 REG_OFFSET_NAME(r2, regs[2]), 505 REG_OFFSET_NAME(r3, regs[3]), 506 REG_OFFSET_NAME(r4, regs[4]), 507 REG_OFFSET_NAME(r5, regs[5]), 508 REG_OFFSET_NAME(r6, regs[6]), 509 REG_OFFSET_NAME(r7, regs[7]), 510 REG_OFFSET_NAME(r8, regs[8]), 511 REG_OFFSET_NAME(r9, regs[9]), 512 REG_OFFSET_NAME(r10, regs[10]), 513 REG_OFFSET_NAME(r11, regs[11]), 514 REG_OFFSET_NAME(r12, regs[12]), 515 REG_OFFSET_NAME(r13, regs[13]), 516 REG_OFFSET_NAME(r14, regs[14]), 517 REG_OFFSET_NAME(r15, regs[15]), 518 REG_OFFSET_NAME(r16, regs[16]), 519 REG_OFFSET_NAME(r17, regs[17]), 520 REG_OFFSET_NAME(r18, regs[18]), 521 REG_OFFSET_NAME(r19, regs[19]), 522 REG_OFFSET_NAME(r20, regs[20]), 523 REG_OFFSET_NAME(r21, regs[21]), 524 REG_OFFSET_NAME(r22, regs[22]), 525 REG_OFFSET_NAME(r23, regs[23]), 526 REG_OFFSET_NAME(r24, regs[24]), 527 REG_OFFSET_NAME(r25, regs[25]), 528 REG_OFFSET_NAME(r26, regs[26]), 529 REG_OFFSET_NAME(r27, regs[27]), 530 REG_OFFSET_NAME(r28, regs[28]), 531 REG_OFFSET_NAME(r29, regs[29]), 532 REG_OFFSET_NAME(r30, regs[30]), 533 REG_OFFSET_NAME(r31, regs[31]), 534 REG_OFFSET_NAME(c0_status, cp0_status), 535 REG_OFFSET_NAME(hi, hi), 536 REG_OFFSET_NAME(lo, lo), 537 #ifdef CONFIG_CPU_HAS_SMARTMIPS 538 REG_OFFSET_NAME(acx, acx), 539 #endif 540 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 541 REG_OFFSET_NAME(c0_cause, cp0_cause), 542 REG_OFFSET_NAME(c0_epc, cp0_epc), 543 #ifdef CONFIG_CPU_CAVIUM_OCTEON 544 REG_OFFSET_NAME(mpl0, mpl[0]), 545 REG_OFFSET_NAME(mpl1, mpl[1]), 546 REG_OFFSET_NAME(mpl2, mpl[2]), 547 REG_OFFSET_NAME(mtp0, mtp[0]), 548 REG_OFFSET_NAME(mtp1, mtp[1]), 549 REG_OFFSET_NAME(mtp2, mtp[2]), 550 #endif 551 REG_OFFSET_END, 552 }; 553 554 /** 555 * regs_query_register_offset() - query register offset from its name 556 * @name: the name of a register 557 * 558 * regs_query_register_offset() returns the offset of a register in struct 559 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 560 */ 561 int regs_query_register_offset(const char *name) 562 { 563 const struct pt_regs_offset *roff; 564 for (roff = regoffset_table; roff->name != NULL; roff++) 565 if (!strcmp(roff->name, name)) 566 return roff->offset; 567 return -EINVAL; 568 } 569 570 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 571 572 static const struct user_regset mips_regsets[] = { 573 [REGSET_GPR] = { 574 .core_note_type = NT_PRSTATUS, 575 .n = ELF_NGREG, 576 .size = sizeof(unsigned int), 577 .align = sizeof(unsigned int), 578 .get = gpr32_get, 579 .set = gpr32_set, 580 }, 581 [REGSET_FPR] = { 582 .core_note_type = NT_PRFPREG, 583 .n = ELF_NFPREG, 584 .size = sizeof(elf_fpreg_t), 585 .align = sizeof(elf_fpreg_t), 586 .get = fpr_get, 587 .set = fpr_set, 588 }, 589 }; 590 591 static const struct user_regset_view user_mips_view = { 592 .name = "mips", 593 .e_machine = ELF_ARCH, 594 .ei_osabi = ELF_OSABI, 595 .regsets = mips_regsets, 596 .n = ARRAY_SIZE(mips_regsets), 597 }; 598 599 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 600 601 #ifdef CONFIG_64BIT 602 603 static const struct user_regset mips64_regsets[] = { 604 [REGSET_GPR] = { 605 .core_note_type = NT_PRSTATUS, 606 .n = ELF_NGREG, 607 .size = sizeof(unsigned long), 608 .align = sizeof(unsigned long), 609 .get = gpr64_get, 610 .set = gpr64_set, 611 }, 612 [REGSET_FPR] = { 613 .core_note_type = NT_PRFPREG, 614 .n = ELF_NFPREG, 615 .size = sizeof(elf_fpreg_t), 616 .align = sizeof(elf_fpreg_t), 617 .get = fpr_get, 618 .set = fpr_set, 619 }, 620 }; 621 622 static const struct user_regset_view user_mips64_view = { 623 .name = "mips64", 624 .e_machine = ELF_ARCH, 625 .ei_osabi = ELF_OSABI, 626 .regsets = mips64_regsets, 627 .n = ARRAY_SIZE(mips64_regsets), 628 }; 629 630 #ifdef CONFIG_MIPS32_N32 631 632 static const struct user_regset_view user_mipsn32_view = { 633 .name = "mipsn32", 634 .e_flags = EF_MIPS_ABI2, 635 .e_machine = ELF_ARCH, 636 .ei_osabi = ELF_OSABI, 637 .regsets = mips64_regsets, 638 .n = ARRAY_SIZE(mips64_regsets), 639 }; 640 641 #endif /* CONFIG_MIPS32_N32 */ 642 643 #endif /* CONFIG_64BIT */ 644 645 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 646 { 647 #ifdef CONFIG_32BIT 648 return &user_mips_view; 649 #else 650 #ifdef CONFIG_MIPS32_O32 651 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 652 return &user_mips_view; 653 #endif 654 #ifdef CONFIG_MIPS32_N32 655 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) 656 return &user_mipsn32_view; 657 #endif 658 return &user_mips64_view; 659 #endif 660 } 661 662 long arch_ptrace(struct task_struct *child, long request, 663 unsigned long addr, unsigned long data) 664 { 665 int ret; 666 void __user *addrp = (void __user *) addr; 667 void __user *datavp = (void __user *) data; 668 unsigned long __user *datalp = (void __user *) data; 669 670 switch (request) { 671 /* when I and D space are separate, these will need to be fixed. */ 672 case PTRACE_PEEKTEXT: /* read word at location addr. */ 673 case PTRACE_PEEKDATA: 674 ret = generic_ptrace_peekdata(child, addr, data); 675 break; 676 677 /* Read the word at location addr in the USER area. */ 678 case PTRACE_PEEKUSR: { 679 struct pt_regs *regs; 680 union fpureg *fregs; 681 unsigned long tmp = 0; 682 683 regs = task_pt_regs(child); 684 ret = 0; /* Default return value. */ 685 686 switch (addr) { 687 case 0 ... 31: 688 tmp = regs->regs[addr]; 689 break; 690 case FPR_BASE ... FPR_BASE + 31: 691 if (!tsk_used_math(child)) { 692 /* FP not yet used */ 693 tmp = -1; 694 break; 695 } 696 fregs = get_fpu_regs(child); 697 698 #ifdef CONFIG_32BIT 699 if (test_thread_flag(TIF_32BIT_FPREGS)) { 700 /* 701 * The odd registers are actually the high 702 * order bits of the values stored in the even 703 * registers - unless we're using r2k_switch.S. 704 */ 705 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 706 addr & 1); 707 break; 708 } 709 #endif 710 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 711 break; 712 case PC: 713 tmp = regs->cp0_epc; 714 break; 715 case CAUSE: 716 tmp = regs->cp0_cause; 717 break; 718 case BADVADDR: 719 tmp = regs->cp0_badvaddr; 720 break; 721 case MMHI: 722 tmp = regs->hi; 723 break; 724 case MMLO: 725 tmp = regs->lo; 726 break; 727 #ifdef CONFIG_CPU_HAS_SMARTMIPS 728 case ACX: 729 tmp = regs->acx; 730 break; 731 #endif 732 case FPC_CSR: 733 tmp = child->thread.fpu.fcr31; 734 break; 735 case FPC_EIR: 736 /* implementation / version register */ 737 tmp = boot_cpu_data.fpu_id; 738 break; 739 case DSP_BASE ... DSP_BASE + 5: { 740 dspreg_t *dregs; 741 742 if (!cpu_has_dsp) { 743 tmp = 0; 744 ret = -EIO; 745 goto out; 746 } 747 dregs = __get_dsp_regs(child); 748 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 749 break; 750 } 751 case DSP_CONTROL: 752 if (!cpu_has_dsp) { 753 tmp = 0; 754 ret = -EIO; 755 goto out; 756 } 757 tmp = child->thread.dsp.dspcontrol; 758 break; 759 default: 760 tmp = 0; 761 ret = -EIO; 762 goto out; 763 } 764 ret = put_user(tmp, datalp); 765 break; 766 } 767 768 /* when I and D space are separate, this will have to be fixed. */ 769 case PTRACE_POKETEXT: /* write the word at location addr. */ 770 case PTRACE_POKEDATA: 771 ret = generic_ptrace_pokedata(child, addr, data); 772 break; 773 774 case PTRACE_POKEUSR: { 775 struct pt_regs *regs; 776 ret = 0; 777 regs = task_pt_regs(child); 778 779 switch (addr) { 780 case 0 ... 31: 781 regs->regs[addr] = data; 782 /* System call number may have been changed */ 783 if (addr == 2) 784 mips_syscall_update_nr(child, regs); 785 else if (addr == 4 && 786 mips_syscall_is_indirect(child, regs)) 787 mips_syscall_update_nr(child, regs); 788 break; 789 case FPR_BASE ... FPR_BASE + 31: { 790 union fpureg *fregs = get_fpu_regs(child); 791 792 init_fp_ctx(child); 793 #ifdef CONFIG_32BIT 794 if (test_thread_flag(TIF_32BIT_FPREGS)) { 795 /* 796 * The odd registers are actually the high 797 * order bits of the values stored in the even 798 * registers - unless we're using r2k_switch.S. 799 */ 800 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 801 addr & 1, data); 802 break; 803 } 804 #endif 805 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 806 break; 807 } 808 case PC: 809 regs->cp0_epc = data; 810 break; 811 case MMHI: 812 regs->hi = data; 813 break; 814 case MMLO: 815 regs->lo = data; 816 break; 817 #ifdef CONFIG_CPU_HAS_SMARTMIPS 818 case ACX: 819 regs->acx = data; 820 break; 821 #endif 822 case FPC_CSR: 823 init_fp_ctx(child); 824 ptrace_setfcr31(child, data); 825 break; 826 case DSP_BASE ... DSP_BASE + 5: { 827 dspreg_t *dregs; 828 829 if (!cpu_has_dsp) { 830 ret = -EIO; 831 break; 832 } 833 834 dregs = __get_dsp_regs(child); 835 dregs[addr - DSP_BASE] = data; 836 break; 837 } 838 case DSP_CONTROL: 839 if (!cpu_has_dsp) { 840 ret = -EIO; 841 break; 842 } 843 child->thread.dsp.dspcontrol = data; 844 break; 845 default: 846 /* The rest are not allowed. */ 847 ret = -EIO; 848 break; 849 } 850 break; 851 } 852 853 case PTRACE_GETREGS: 854 ret = ptrace_getregs(child, datavp); 855 break; 856 857 case PTRACE_SETREGS: 858 ret = ptrace_setregs(child, datavp); 859 break; 860 861 case PTRACE_GETFPREGS: 862 ret = ptrace_getfpregs(child, datavp); 863 break; 864 865 case PTRACE_SETFPREGS: 866 ret = ptrace_setfpregs(child, datavp); 867 break; 868 869 case PTRACE_GET_THREAD_AREA: 870 ret = put_user(task_thread_info(child)->tp_value, datalp); 871 break; 872 873 case PTRACE_GET_WATCH_REGS: 874 ret = ptrace_get_watch_regs(child, addrp); 875 break; 876 877 case PTRACE_SET_WATCH_REGS: 878 ret = ptrace_set_watch_regs(child, addrp); 879 break; 880 881 default: 882 ret = ptrace_request(child, request, addr, data); 883 break; 884 } 885 out: 886 return ret; 887 } 888 889 /* 890 * Notification of system call entry/exit 891 * - triggered by current->work.syscall_trace 892 */ 893 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 894 { 895 user_exit(); 896 897 current_thread_info()->syscall = syscall; 898 899 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 900 if (tracehook_report_syscall_entry(regs)) 901 return -1; 902 syscall = current_thread_info()->syscall; 903 } 904 905 #ifdef CONFIG_SECCOMP 906 if (unlikely(test_thread_flag(TIF_SECCOMP))) { 907 int ret, i; 908 struct seccomp_data sd; 909 unsigned long args[6]; 910 911 sd.nr = syscall; 912 sd.arch = syscall_get_arch(); 913 syscall_get_arguments(current, regs, 0, 6, args); 914 for (i = 0; i < 6; i++) 915 sd.args[i] = args[i]; 916 sd.instruction_pointer = KSTK_EIP(current); 917 918 ret = __secure_computing(&sd); 919 if (ret == -1) 920 return ret; 921 syscall = current_thread_info()->syscall; 922 } 923 #endif 924 925 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 926 trace_sys_enter(regs, regs->regs[2]); 927 928 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 929 regs->regs[6], regs->regs[7]); 930 931 /* 932 * Negative syscall numbers are mistaken for rejected syscalls, but 933 * won't have had the return value set appropriately, so we do so now. 934 */ 935 if (syscall < 0) 936 syscall_set_return_value(current, regs, -ENOSYS, 0); 937 return syscall; 938 } 939 940 /* 941 * Notification of system call entry/exit 942 * - triggered by current->work.syscall_trace 943 */ 944 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 945 { 946 /* 947 * We may come here right after calling schedule_user() 948 * or do_notify_resume(), in which case we can be in RCU 949 * user mode. 950 */ 951 user_exit(); 952 953 audit_syscall_exit(regs); 954 955 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 956 trace_sys_exit(regs, regs_return_value(regs)); 957 958 if (test_thread_flag(TIF_SYSCALL_TRACE)) 959 tracehook_report_syscall_exit(regs, 0); 960 961 user_enter(); 962 } 963