1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/mm.h> 23 #include <linux/errno.h> 24 #include <linux/ptrace.h> 25 #include <linux/regset.h> 26 #include <linux/smp.h> 27 #include <linux/security.h> 28 #include <linux/tracehook.h> 29 #include <linux/audit.h> 30 #include <linux/seccomp.h> 31 #include <linux/ftrace.h> 32 33 #include <asm/byteorder.h> 34 #include <asm/cpu.h> 35 #include <asm/cpu-info.h> 36 #include <asm/dsp.h> 37 #include <asm/fpu.h> 38 #include <asm/mipsregs.h> 39 #include <asm/mipsmtregs.h> 40 #include <asm/pgtable.h> 41 #include <asm/page.h> 42 #include <asm/syscall.h> 43 #include <asm/uaccess.h> 44 #include <asm/bootinfo.h> 45 #include <asm/reg.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/syscalls.h> 49 50 static void init_fp_ctx(struct task_struct *target) 51 { 52 /* If FP has been used then the target already has context */ 53 if (tsk_used_math(target)) 54 return; 55 56 /* Begin with data registers set to all 1s... */ 57 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 58 59 /* ...and FCSR zeroed */ 60 target->thread.fpu.fcr31 = 0; 61 62 /* 63 * Record that the target has "used" math, such that the context 64 * just initialised, and any modifications made by the caller, 65 * aren't discarded. 66 */ 67 set_stopped_child_used_math(target); 68 } 69 70 /* 71 * Called by kernel/ptrace.c when detaching.. 72 * 73 * Make sure single step bits etc are not set. 74 */ 75 void ptrace_disable(struct task_struct *child) 76 { 77 /* Don't load the watchpoint registers for the ex-child. */ 78 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 79 } 80 81 /* 82 * Read a general register set. We always use the 64-bit format, even 83 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 84 * Registers are sign extended to fill the available space. 85 */ 86 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 87 { 88 struct pt_regs *regs; 89 int i; 90 91 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 92 return -EIO; 93 94 regs = task_pt_regs(child); 95 96 for (i = 0; i < 32; i++) 97 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 98 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 99 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 100 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 101 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 102 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 103 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 104 105 return 0; 106 } 107 108 /* 109 * Write a general register set. As for PTRACE_GETREGS, we always use 110 * the 64-bit format. On a 32-bit kernel only the lower order half 111 * (according to endianness) will be used. 112 */ 113 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 114 { 115 struct pt_regs *regs; 116 int i; 117 118 if (!access_ok(VERIFY_READ, data, 38 * 8)) 119 return -EIO; 120 121 regs = task_pt_regs(child); 122 123 for (i = 0; i < 32; i++) 124 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 125 __get_user(regs->lo, (__s64 __user *)&data->lo); 126 __get_user(regs->hi, (__s64 __user *)&data->hi); 127 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 128 129 /* badvaddr, status, and cause may not be written. */ 130 131 return 0; 132 } 133 134 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 135 { 136 int i; 137 138 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 139 return -EIO; 140 141 if (tsk_used_math(child)) { 142 union fpureg *fregs = get_fpu_regs(child); 143 for (i = 0; i < 32; i++) 144 __put_user(get_fpr64(&fregs[i], 0), 145 i + (__u64 __user *)data); 146 } else { 147 for (i = 0; i < 32; i++) 148 __put_user((__u64) -1, i + (__u64 __user *) data); 149 } 150 151 __put_user(child->thread.fpu.fcr31, data + 64); 152 __put_user(boot_cpu_data.fpu_id, data + 65); 153 154 return 0; 155 } 156 157 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 158 { 159 union fpureg *fregs; 160 u64 fpr_val; 161 u32 fcr31; 162 u32 value; 163 u32 mask; 164 int i; 165 166 if (!access_ok(VERIFY_READ, data, 33 * 8)) 167 return -EIO; 168 169 init_fp_ctx(child); 170 fregs = get_fpu_regs(child); 171 172 for (i = 0; i < 32; i++) { 173 __get_user(fpr_val, i + (__u64 __user *)data); 174 set_fpr64(&fregs[i], 0, fpr_val); 175 } 176 177 __get_user(value, data + 64); 178 fcr31 = child->thread.fpu.fcr31; 179 mask = boot_cpu_data.fpu_msk31; 180 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 181 182 /* FIR may not be written. */ 183 184 return 0; 185 } 186 187 int ptrace_get_watch_regs(struct task_struct *child, 188 struct pt_watch_regs __user *addr) 189 { 190 enum pt_watch_style style; 191 int i; 192 193 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 194 return -EIO; 195 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 196 return -EIO; 197 198 #ifdef CONFIG_32BIT 199 style = pt_watch_style_mips32; 200 #define WATCH_STYLE mips32 201 #else 202 style = pt_watch_style_mips64; 203 #define WATCH_STYLE mips64 204 #endif 205 206 __put_user(style, &addr->style); 207 __put_user(boot_cpu_data.watch_reg_use_cnt, 208 &addr->WATCH_STYLE.num_valid); 209 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 210 __put_user(child->thread.watch.mips3264.watchlo[i], 211 &addr->WATCH_STYLE.watchlo[i]); 212 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, 213 &addr->WATCH_STYLE.watchhi[i]); 214 __put_user(boot_cpu_data.watch_reg_masks[i], 215 &addr->WATCH_STYLE.watch_masks[i]); 216 } 217 for (; i < 8; i++) { 218 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 219 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 220 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 221 } 222 223 return 0; 224 } 225 226 int ptrace_set_watch_regs(struct task_struct *child, 227 struct pt_watch_regs __user *addr) 228 { 229 int i; 230 int watch_active = 0; 231 unsigned long lt[NUM_WATCH_REGS]; 232 u16 ht[NUM_WATCH_REGS]; 233 234 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 235 return -EIO; 236 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 237 return -EIO; 238 /* Check the values. */ 239 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 240 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 241 #ifdef CONFIG_32BIT 242 if (lt[i] & __UA_LIMIT) 243 return -EINVAL; 244 #else 245 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 246 if (lt[i] & 0xffffffff80000000UL) 247 return -EINVAL; 248 } else { 249 if (lt[i] & __UA_LIMIT) 250 return -EINVAL; 251 } 252 #endif 253 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 254 if (ht[i] & ~0xff8) 255 return -EINVAL; 256 } 257 /* Install them. */ 258 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 259 if (lt[i] & 7) 260 watch_active = 1; 261 child->thread.watch.mips3264.watchlo[i] = lt[i]; 262 /* Set the G bit. */ 263 child->thread.watch.mips3264.watchhi[i] = ht[i]; 264 } 265 266 if (watch_active) 267 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 268 else 269 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 270 271 return 0; 272 } 273 274 /* regset get/set implementations */ 275 276 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 277 278 static int gpr32_get(struct task_struct *target, 279 const struct user_regset *regset, 280 unsigned int pos, unsigned int count, 281 void *kbuf, void __user *ubuf) 282 { 283 struct pt_regs *regs = task_pt_regs(target); 284 u32 uregs[ELF_NGREG] = {}; 285 unsigned i; 286 287 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { 288 /* k0/k1 are copied as zero. */ 289 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) 290 continue; 291 292 uregs[i] = regs->regs[i - MIPS32_EF_R0]; 293 } 294 295 uregs[MIPS32_EF_LO] = regs->lo; 296 uregs[MIPS32_EF_HI] = regs->hi; 297 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; 298 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 299 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; 300 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; 301 302 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 303 sizeof(uregs)); 304 } 305 306 static int gpr32_set(struct task_struct *target, 307 const struct user_regset *regset, 308 unsigned int pos, unsigned int count, 309 const void *kbuf, const void __user *ubuf) 310 { 311 struct pt_regs *regs = task_pt_regs(target); 312 u32 uregs[ELF_NGREG]; 313 unsigned start, num_regs, i; 314 int err; 315 316 start = pos / sizeof(u32); 317 num_regs = count / sizeof(u32); 318 319 if (start + num_regs > ELF_NGREG) 320 return -EIO; 321 322 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 323 sizeof(uregs)); 324 if (err) 325 return err; 326 327 for (i = start; i < num_regs; i++) { 328 /* 329 * Cast all values to signed here so that if this is a 64-bit 330 * kernel, the supplied 32-bit values will be sign extended. 331 */ 332 switch (i) { 333 case MIPS32_EF_R1 ... MIPS32_EF_R25: 334 /* k0/k1 are ignored. */ 335 case MIPS32_EF_R28 ... MIPS32_EF_R31: 336 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 337 break; 338 case MIPS32_EF_LO: 339 regs->lo = (s32)uregs[i]; 340 break; 341 case MIPS32_EF_HI: 342 regs->hi = (s32)uregs[i]; 343 break; 344 case MIPS32_EF_CP0_EPC: 345 regs->cp0_epc = (s32)uregs[i]; 346 break; 347 } 348 } 349 350 return 0; 351 } 352 353 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 354 355 #ifdef CONFIG_64BIT 356 357 static int gpr64_get(struct task_struct *target, 358 const struct user_regset *regset, 359 unsigned int pos, unsigned int count, 360 void *kbuf, void __user *ubuf) 361 { 362 struct pt_regs *regs = task_pt_regs(target); 363 u64 uregs[ELF_NGREG] = {}; 364 unsigned i; 365 366 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { 367 /* k0/k1 are copied as zero. */ 368 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) 369 continue; 370 371 uregs[i] = regs->regs[i - MIPS64_EF_R0]; 372 } 373 374 uregs[MIPS64_EF_LO] = regs->lo; 375 uregs[MIPS64_EF_HI] = regs->hi; 376 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; 377 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 378 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; 379 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; 380 381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 382 sizeof(uregs)); 383 } 384 385 static int gpr64_set(struct task_struct *target, 386 const struct user_regset *regset, 387 unsigned int pos, unsigned int count, 388 const void *kbuf, const void __user *ubuf) 389 { 390 struct pt_regs *regs = task_pt_regs(target); 391 u64 uregs[ELF_NGREG]; 392 unsigned start, num_regs, i; 393 int err; 394 395 start = pos / sizeof(u64); 396 num_regs = count / sizeof(u64); 397 398 if (start + num_regs > ELF_NGREG) 399 return -EIO; 400 401 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 402 sizeof(uregs)); 403 if (err) 404 return err; 405 406 for (i = start; i < num_regs; i++) { 407 switch (i) { 408 case MIPS64_EF_R1 ... MIPS64_EF_R25: 409 /* k0/k1 are ignored. */ 410 case MIPS64_EF_R28 ... MIPS64_EF_R31: 411 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 412 break; 413 case MIPS64_EF_LO: 414 regs->lo = uregs[i]; 415 break; 416 case MIPS64_EF_HI: 417 regs->hi = uregs[i]; 418 break; 419 case MIPS64_EF_CP0_EPC: 420 regs->cp0_epc = uregs[i]; 421 break; 422 } 423 } 424 425 return 0; 426 } 427 428 #endif /* CONFIG_64BIT */ 429 430 static int fpr_get(struct task_struct *target, 431 const struct user_regset *regset, 432 unsigned int pos, unsigned int count, 433 void *kbuf, void __user *ubuf) 434 { 435 unsigned i; 436 int err; 437 u64 fpr_val; 438 439 /* XXX fcr31 */ 440 441 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 442 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 443 &target->thread.fpu, 444 0, sizeof(elf_fpregset_t)); 445 446 for (i = 0; i < NUM_FPU_REGS; i++) { 447 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 448 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 449 &fpr_val, i * sizeof(elf_fpreg_t), 450 (i + 1) * sizeof(elf_fpreg_t)); 451 if (err) 452 return err; 453 } 454 455 return 0; 456 } 457 458 static int fpr_set(struct task_struct *target, 459 const struct user_regset *regset, 460 unsigned int pos, unsigned int count, 461 const void *kbuf, const void __user *ubuf) 462 { 463 unsigned i; 464 int err; 465 u64 fpr_val; 466 467 /* XXX fcr31 */ 468 469 init_fp_ctx(target); 470 471 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 472 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 473 &target->thread.fpu, 474 0, sizeof(elf_fpregset_t)); 475 476 for (i = 0; i < NUM_FPU_REGS; i++) { 477 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 478 &fpr_val, i * sizeof(elf_fpreg_t), 479 (i + 1) * sizeof(elf_fpreg_t)); 480 if (err) 481 return err; 482 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 483 } 484 485 return 0; 486 } 487 488 enum mips_regset { 489 REGSET_GPR, 490 REGSET_FPR, 491 }; 492 493 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 494 495 static const struct user_regset mips_regsets[] = { 496 [REGSET_GPR] = { 497 .core_note_type = NT_PRSTATUS, 498 .n = ELF_NGREG, 499 .size = sizeof(unsigned int), 500 .align = sizeof(unsigned int), 501 .get = gpr32_get, 502 .set = gpr32_set, 503 }, 504 [REGSET_FPR] = { 505 .core_note_type = NT_PRFPREG, 506 .n = ELF_NFPREG, 507 .size = sizeof(elf_fpreg_t), 508 .align = sizeof(elf_fpreg_t), 509 .get = fpr_get, 510 .set = fpr_set, 511 }, 512 }; 513 514 static const struct user_regset_view user_mips_view = { 515 .name = "mips", 516 .e_machine = ELF_ARCH, 517 .ei_osabi = ELF_OSABI, 518 .regsets = mips_regsets, 519 .n = ARRAY_SIZE(mips_regsets), 520 }; 521 522 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 523 524 #ifdef CONFIG_64BIT 525 526 static const struct user_regset mips64_regsets[] = { 527 [REGSET_GPR] = { 528 .core_note_type = NT_PRSTATUS, 529 .n = ELF_NGREG, 530 .size = sizeof(unsigned long), 531 .align = sizeof(unsigned long), 532 .get = gpr64_get, 533 .set = gpr64_set, 534 }, 535 [REGSET_FPR] = { 536 .core_note_type = NT_PRFPREG, 537 .n = ELF_NFPREG, 538 .size = sizeof(elf_fpreg_t), 539 .align = sizeof(elf_fpreg_t), 540 .get = fpr_get, 541 .set = fpr_set, 542 }, 543 }; 544 545 static const struct user_regset_view user_mips64_view = { 546 .name = "mips64", 547 .e_machine = ELF_ARCH, 548 .ei_osabi = ELF_OSABI, 549 .regsets = mips64_regsets, 550 .n = ARRAY_SIZE(mips64_regsets), 551 }; 552 553 #endif /* CONFIG_64BIT */ 554 555 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 556 { 557 #ifdef CONFIG_32BIT 558 return &user_mips_view; 559 #else 560 #ifdef CONFIG_MIPS32_O32 561 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 562 return &user_mips_view; 563 #endif 564 return &user_mips64_view; 565 #endif 566 } 567 568 long arch_ptrace(struct task_struct *child, long request, 569 unsigned long addr, unsigned long data) 570 { 571 int ret; 572 void __user *addrp = (void __user *) addr; 573 void __user *datavp = (void __user *) data; 574 unsigned long __user *datalp = (void __user *) data; 575 576 switch (request) { 577 /* when I and D space are separate, these will need to be fixed. */ 578 case PTRACE_PEEKTEXT: /* read word at location addr. */ 579 case PTRACE_PEEKDATA: 580 ret = generic_ptrace_peekdata(child, addr, data); 581 break; 582 583 /* Read the word at location addr in the USER area. */ 584 case PTRACE_PEEKUSR: { 585 struct pt_regs *regs; 586 union fpureg *fregs; 587 unsigned long tmp = 0; 588 589 regs = task_pt_regs(child); 590 ret = 0; /* Default return value. */ 591 592 switch (addr) { 593 case 0 ... 31: 594 tmp = regs->regs[addr]; 595 break; 596 case FPR_BASE ... FPR_BASE + 31: 597 if (!tsk_used_math(child)) { 598 /* FP not yet used */ 599 tmp = -1; 600 break; 601 } 602 fregs = get_fpu_regs(child); 603 604 #ifdef CONFIG_32BIT 605 if (test_thread_flag(TIF_32BIT_FPREGS)) { 606 /* 607 * The odd registers are actually the high 608 * order bits of the values stored in the even 609 * registers - unless we're using r2k_switch.S. 610 */ 611 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 612 addr & 1); 613 break; 614 } 615 #endif 616 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 617 break; 618 case PC: 619 tmp = regs->cp0_epc; 620 break; 621 case CAUSE: 622 tmp = regs->cp0_cause; 623 break; 624 case BADVADDR: 625 tmp = regs->cp0_badvaddr; 626 break; 627 case MMHI: 628 tmp = regs->hi; 629 break; 630 case MMLO: 631 tmp = regs->lo; 632 break; 633 #ifdef CONFIG_CPU_HAS_SMARTMIPS 634 case ACX: 635 tmp = regs->acx; 636 break; 637 #endif 638 case FPC_CSR: 639 tmp = child->thread.fpu.fcr31; 640 break; 641 case FPC_EIR: 642 /* implementation / version register */ 643 tmp = boot_cpu_data.fpu_id; 644 break; 645 case DSP_BASE ... DSP_BASE + 5: { 646 dspreg_t *dregs; 647 648 if (!cpu_has_dsp) { 649 tmp = 0; 650 ret = -EIO; 651 goto out; 652 } 653 dregs = __get_dsp_regs(child); 654 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 655 break; 656 } 657 case DSP_CONTROL: 658 if (!cpu_has_dsp) { 659 tmp = 0; 660 ret = -EIO; 661 goto out; 662 } 663 tmp = child->thread.dsp.dspcontrol; 664 break; 665 default: 666 tmp = 0; 667 ret = -EIO; 668 goto out; 669 } 670 ret = put_user(tmp, datalp); 671 break; 672 } 673 674 /* when I and D space are separate, this will have to be fixed. */ 675 case PTRACE_POKETEXT: /* write the word at location addr. */ 676 case PTRACE_POKEDATA: 677 ret = generic_ptrace_pokedata(child, addr, data); 678 break; 679 680 case PTRACE_POKEUSR: { 681 struct pt_regs *regs; 682 ret = 0; 683 regs = task_pt_regs(child); 684 685 switch (addr) { 686 case 0 ... 31: 687 regs->regs[addr] = data; 688 break; 689 case FPR_BASE ... FPR_BASE + 31: { 690 union fpureg *fregs = get_fpu_regs(child); 691 692 init_fp_ctx(child); 693 #ifdef CONFIG_32BIT 694 if (test_thread_flag(TIF_32BIT_FPREGS)) { 695 /* 696 * The odd registers are actually the high 697 * order bits of the values stored in the even 698 * registers - unless we're using r2k_switch.S. 699 */ 700 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 701 addr & 1, data); 702 break; 703 } 704 #endif 705 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 706 break; 707 } 708 case PC: 709 regs->cp0_epc = data; 710 break; 711 case MMHI: 712 regs->hi = data; 713 break; 714 case MMLO: 715 regs->lo = data; 716 break; 717 #ifdef CONFIG_CPU_HAS_SMARTMIPS 718 case ACX: 719 regs->acx = data; 720 break; 721 #endif 722 case FPC_CSR: 723 child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; 724 break; 725 case DSP_BASE ... DSP_BASE + 5: { 726 dspreg_t *dregs; 727 728 if (!cpu_has_dsp) { 729 ret = -EIO; 730 break; 731 } 732 733 dregs = __get_dsp_regs(child); 734 dregs[addr - DSP_BASE] = data; 735 break; 736 } 737 case DSP_CONTROL: 738 if (!cpu_has_dsp) { 739 ret = -EIO; 740 break; 741 } 742 child->thread.dsp.dspcontrol = data; 743 break; 744 default: 745 /* The rest are not allowed. */ 746 ret = -EIO; 747 break; 748 } 749 break; 750 } 751 752 case PTRACE_GETREGS: 753 ret = ptrace_getregs(child, datavp); 754 break; 755 756 case PTRACE_SETREGS: 757 ret = ptrace_setregs(child, datavp); 758 break; 759 760 case PTRACE_GETFPREGS: 761 ret = ptrace_getfpregs(child, datavp); 762 break; 763 764 case PTRACE_SETFPREGS: 765 ret = ptrace_setfpregs(child, datavp); 766 break; 767 768 case PTRACE_GET_THREAD_AREA: 769 ret = put_user(task_thread_info(child)->tp_value, datalp); 770 break; 771 772 case PTRACE_GET_WATCH_REGS: 773 ret = ptrace_get_watch_regs(child, addrp); 774 break; 775 776 case PTRACE_SET_WATCH_REGS: 777 ret = ptrace_set_watch_regs(child, addrp); 778 break; 779 780 default: 781 ret = ptrace_request(child, request, addr, data); 782 break; 783 } 784 out: 785 return ret; 786 } 787 788 /* 789 * Notification of system call entry/exit 790 * - triggered by current->work.syscall_trace 791 */ 792 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 793 { 794 long ret = 0; 795 user_exit(); 796 797 current_thread_info()->syscall = syscall; 798 799 if (secure_computing() == -1) 800 return -1; 801 802 if (test_thread_flag(TIF_SYSCALL_TRACE) && 803 tracehook_report_syscall_entry(regs)) 804 ret = -1; 805 806 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 807 trace_sys_enter(regs, regs->regs[2]); 808 809 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 810 regs->regs[6], regs->regs[7]); 811 return syscall; 812 } 813 814 /* 815 * Notification of system call entry/exit 816 * - triggered by current->work.syscall_trace 817 */ 818 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 819 { 820 /* 821 * We may come here right after calling schedule_user() 822 * or do_notify_resume(), in which case we can be in RCU 823 * user mode. 824 */ 825 user_exit(); 826 827 audit_syscall_exit(regs); 828 829 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 830 trace_sys_exit(regs, regs->regs[2]); 831 832 if (test_thread_flag(TIF_SYSCALL_TRACE)) 833 tracehook_report_syscall_exit(regs, 0); 834 835 user_enter(); 836 } 837