1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/mm.h> 24 #include <linux/errno.h> 25 #include <linux/ptrace.h> 26 #include <linux/regset.h> 27 #include <linux/smp.h> 28 #include <linux/security.h> 29 #include <linux/stddef.h> 30 #include <linux/tracehook.h> 31 #include <linux/audit.h> 32 #include <linux/seccomp.h> 33 #include <linux/ftrace.h> 34 35 #include <asm/byteorder.h> 36 #include <asm/cpu.h> 37 #include <asm/cpu-info.h> 38 #include <asm/dsp.h> 39 #include <asm/fpu.h> 40 #include <asm/mipsregs.h> 41 #include <asm/mipsmtregs.h> 42 #include <asm/pgtable.h> 43 #include <asm/page.h> 44 #include <asm/syscall.h> 45 #include <linux/uaccess.h> 46 #include <asm/bootinfo.h> 47 #include <asm/reg.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/syscalls.h> 51 52 static void init_fp_ctx(struct task_struct *target) 53 { 54 /* If FP has been used then the target already has context */ 55 if (tsk_used_math(target)) 56 return; 57 58 /* Begin with data registers set to all 1s... */ 59 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 60 61 /* FCSR has been preset by `mips_set_personality_nan'. */ 62 63 /* 64 * Record that the target has "used" math, such that the context 65 * just initialised, and any modifications made by the caller, 66 * aren't discarded. 67 */ 68 set_stopped_child_used_math(target); 69 } 70 71 /* 72 * Called by kernel/ptrace.c when detaching.. 73 * 74 * Make sure single step bits etc are not set. 75 */ 76 void ptrace_disable(struct task_struct *child) 77 { 78 /* Don't load the watchpoint registers for the ex-child. */ 79 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 80 } 81 82 /* 83 * Poke at FCSR according to its mask. Set the Cause bits even 84 * if a corresponding Enable bit is set. This will be noticed at 85 * the time the thread is switched to and SIGFPE thrown accordingly. 86 */ 87 static void ptrace_setfcr31(struct task_struct *child, u32 value) 88 { 89 u32 fcr31; 90 u32 mask; 91 92 fcr31 = child->thread.fpu.fcr31; 93 mask = boot_cpu_data.fpu_msk31; 94 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 95 } 96 97 /* 98 * Read a general register set. We always use the 64-bit format, even 99 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 100 * Registers are sign extended to fill the available space. 101 */ 102 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 103 { 104 struct pt_regs *regs; 105 int i; 106 107 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 108 return -EIO; 109 110 regs = task_pt_regs(child); 111 112 for (i = 0; i < 32; i++) 113 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 114 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 115 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 116 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 117 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 118 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 119 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 120 121 return 0; 122 } 123 124 /* 125 * Write a general register set. As for PTRACE_GETREGS, we always use 126 * the 64-bit format. On a 32-bit kernel only the lower order half 127 * (according to endianness) will be used. 128 */ 129 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 130 { 131 struct pt_regs *regs; 132 int i; 133 134 if (!access_ok(VERIFY_READ, data, 38 * 8)) 135 return -EIO; 136 137 regs = task_pt_regs(child); 138 139 for (i = 0; i < 32; i++) 140 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 141 __get_user(regs->lo, (__s64 __user *)&data->lo); 142 __get_user(regs->hi, (__s64 __user *)&data->hi); 143 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 144 145 /* badvaddr, status, and cause may not be written. */ 146 147 /* System call number may have been changed */ 148 mips_syscall_update_nr(child, regs); 149 150 return 0; 151 } 152 153 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 154 { 155 int i; 156 157 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 158 return -EIO; 159 160 if (tsk_used_math(child)) { 161 union fpureg *fregs = get_fpu_regs(child); 162 for (i = 0; i < 32; i++) 163 __put_user(get_fpr64(&fregs[i], 0), 164 i + (__u64 __user *)data); 165 } else { 166 for (i = 0; i < 32; i++) 167 __put_user((__u64) -1, i + (__u64 __user *) data); 168 } 169 170 __put_user(child->thread.fpu.fcr31, data + 64); 171 __put_user(boot_cpu_data.fpu_id, data + 65); 172 173 return 0; 174 } 175 176 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 177 { 178 union fpureg *fregs; 179 u64 fpr_val; 180 u32 value; 181 int i; 182 183 if (!access_ok(VERIFY_READ, data, 33 * 8)) 184 return -EIO; 185 186 init_fp_ctx(child); 187 fregs = get_fpu_regs(child); 188 189 for (i = 0; i < 32; i++) { 190 __get_user(fpr_val, i + (__u64 __user *)data); 191 set_fpr64(&fregs[i], 0, fpr_val); 192 } 193 194 __get_user(value, data + 64); 195 ptrace_setfcr31(child, value); 196 197 /* FIR may not be written. */ 198 199 return 0; 200 } 201 202 int ptrace_get_watch_regs(struct task_struct *child, 203 struct pt_watch_regs __user *addr) 204 { 205 enum pt_watch_style style; 206 int i; 207 208 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 209 return -EIO; 210 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 211 return -EIO; 212 213 #ifdef CONFIG_32BIT 214 style = pt_watch_style_mips32; 215 #define WATCH_STYLE mips32 216 #else 217 style = pt_watch_style_mips64; 218 #define WATCH_STYLE mips64 219 #endif 220 221 __put_user(style, &addr->style); 222 __put_user(boot_cpu_data.watch_reg_use_cnt, 223 &addr->WATCH_STYLE.num_valid); 224 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 225 __put_user(child->thread.watch.mips3264.watchlo[i], 226 &addr->WATCH_STYLE.watchlo[i]); 227 __put_user(child->thread.watch.mips3264.watchhi[i] & 228 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 229 &addr->WATCH_STYLE.watchhi[i]); 230 __put_user(boot_cpu_data.watch_reg_masks[i], 231 &addr->WATCH_STYLE.watch_masks[i]); 232 } 233 for (; i < 8; i++) { 234 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 235 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 236 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 237 } 238 239 return 0; 240 } 241 242 int ptrace_set_watch_regs(struct task_struct *child, 243 struct pt_watch_regs __user *addr) 244 { 245 int i; 246 int watch_active = 0; 247 unsigned long lt[NUM_WATCH_REGS]; 248 u16 ht[NUM_WATCH_REGS]; 249 250 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 251 return -EIO; 252 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 253 return -EIO; 254 /* Check the values. */ 255 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 256 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 257 #ifdef CONFIG_32BIT 258 if (lt[i] & __UA_LIMIT) 259 return -EINVAL; 260 #else 261 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 262 if (lt[i] & 0xffffffff80000000UL) 263 return -EINVAL; 264 } else { 265 if (lt[i] & __UA_LIMIT) 266 return -EINVAL; 267 } 268 #endif 269 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 270 if (ht[i] & ~MIPS_WATCHHI_MASK) 271 return -EINVAL; 272 } 273 /* Install them. */ 274 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 275 if (lt[i] & MIPS_WATCHLO_IRW) 276 watch_active = 1; 277 child->thread.watch.mips3264.watchlo[i] = lt[i]; 278 /* Set the G bit. */ 279 child->thread.watch.mips3264.watchhi[i] = ht[i]; 280 } 281 282 if (watch_active) 283 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 284 else 285 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 286 287 return 0; 288 } 289 290 /* regset get/set implementations */ 291 292 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 293 294 static int gpr32_get(struct task_struct *target, 295 const struct user_regset *regset, 296 unsigned int pos, unsigned int count, 297 void *kbuf, void __user *ubuf) 298 { 299 struct pt_regs *regs = task_pt_regs(target); 300 u32 uregs[ELF_NGREG] = {}; 301 302 mips_dump_regs32(uregs, regs); 303 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 304 sizeof(uregs)); 305 } 306 307 static int gpr32_set(struct task_struct *target, 308 const struct user_regset *regset, 309 unsigned int pos, unsigned int count, 310 const void *kbuf, const void __user *ubuf) 311 { 312 struct pt_regs *regs = task_pt_regs(target); 313 u32 uregs[ELF_NGREG]; 314 unsigned start, num_regs, i; 315 int err; 316 317 start = pos / sizeof(u32); 318 num_regs = count / sizeof(u32); 319 320 if (start + num_regs > ELF_NGREG) 321 return -EIO; 322 323 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 324 sizeof(uregs)); 325 if (err) 326 return err; 327 328 for (i = start; i < num_regs; i++) { 329 /* 330 * Cast all values to signed here so that if this is a 64-bit 331 * kernel, the supplied 32-bit values will be sign extended. 332 */ 333 switch (i) { 334 case MIPS32_EF_R1 ... MIPS32_EF_R25: 335 /* k0/k1 are ignored. */ 336 case MIPS32_EF_R28 ... MIPS32_EF_R31: 337 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 338 break; 339 case MIPS32_EF_LO: 340 regs->lo = (s32)uregs[i]; 341 break; 342 case MIPS32_EF_HI: 343 regs->hi = (s32)uregs[i]; 344 break; 345 case MIPS32_EF_CP0_EPC: 346 regs->cp0_epc = (s32)uregs[i]; 347 break; 348 } 349 } 350 351 /* System call number may have been changed */ 352 mips_syscall_update_nr(target, regs); 353 354 return 0; 355 } 356 357 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 358 359 #ifdef CONFIG_64BIT 360 361 static int gpr64_get(struct task_struct *target, 362 const struct user_regset *regset, 363 unsigned int pos, unsigned int count, 364 void *kbuf, void __user *ubuf) 365 { 366 struct pt_regs *regs = task_pt_regs(target); 367 u64 uregs[ELF_NGREG] = {}; 368 369 mips_dump_regs64(uregs, regs); 370 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 371 sizeof(uregs)); 372 } 373 374 static int gpr64_set(struct task_struct *target, 375 const struct user_regset *regset, 376 unsigned int pos, unsigned int count, 377 const void *kbuf, const void __user *ubuf) 378 { 379 struct pt_regs *regs = task_pt_regs(target); 380 u64 uregs[ELF_NGREG]; 381 unsigned start, num_regs, i; 382 int err; 383 384 start = pos / sizeof(u64); 385 num_regs = count / sizeof(u64); 386 387 if (start + num_regs > ELF_NGREG) 388 return -EIO; 389 390 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 391 sizeof(uregs)); 392 if (err) 393 return err; 394 395 for (i = start; i < num_regs; i++) { 396 switch (i) { 397 case MIPS64_EF_R1 ... MIPS64_EF_R25: 398 /* k0/k1 are ignored. */ 399 case MIPS64_EF_R28 ... MIPS64_EF_R31: 400 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 401 break; 402 case MIPS64_EF_LO: 403 regs->lo = uregs[i]; 404 break; 405 case MIPS64_EF_HI: 406 regs->hi = uregs[i]; 407 break; 408 case MIPS64_EF_CP0_EPC: 409 regs->cp0_epc = uregs[i]; 410 break; 411 } 412 } 413 414 /* System call number may have been changed */ 415 mips_syscall_update_nr(target, regs); 416 417 return 0; 418 } 419 420 #endif /* CONFIG_64BIT */ 421 422 /* 423 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 424 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots 425 * correspond 1:1 to buffer slots. Only general registers are copied. 426 */ 427 static int fpr_get_fpa(struct task_struct *target, 428 unsigned int *pos, unsigned int *count, 429 void **kbuf, void __user **ubuf) 430 { 431 return user_regset_copyout(pos, count, kbuf, ubuf, 432 &target->thread.fpu, 433 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 434 } 435 436 /* 437 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 438 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's 439 * general register slots are copied to buffer slots. Only general 440 * registers are copied. 441 */ 442 static int fpr_get_msa(struct task_struct *target, 443 unsigned int *pos, unsigned int *count, 444 void **kbuf, void __user **ubuf) 445 { 446 unsigned int i; 447 u64 fpr_val; 448 int err; 449 450 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 451 for (i = 0; i < NUM_FPU_REGS; i++) { 452 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 453 err = user_regset_copyout(pos, count, kbuf, ubuf, 454 &fpr_val, i * sizeof(elf_fpreg_t), 455 (i + 1) * sizeof(elf_fpreg_t)); 456 if (err) 457 return err; 458 } 459 460 return 0; 461 } 462 463 /* 464 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 465 * Choose the appropriate helper for general registers, and then copy 466 * the FCSR register separately. 467 */ 468 static int fpr_get(struct task_struct *target, 469 const struct user_regset *regset, 470 unsigned int pos, unsigned int count, 471 void *kbuf, void __user *ubuf) 472 { 473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 474 int err; 475 476 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 477 err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); 478 else 479 err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); 480 if (err) 481 return err; 482 483 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 484 &target->thread.fpu.fcr31, 485 fcr31_pos, fcr31_pos + sizeof(u32)); 486 487 return err; 488 } 489 490 /* 491 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 492 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP 493 * context's general register slots. Only general registers are copied. 494 */ 495 static int fpr_set_fpa(struct task_struct *target, 496 unsigned int *pos, unsigned int *count, 497 const void **kbuf, const void __user **ubuf) 498 { 499 return user_regset_copyin(pos, count, kbuf, ubuf, 500 &target->thread.fpu, 501 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 502 } 503 504 /* 505 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 506 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 507 * bits only of FP context's general register slots. Only general 508 * registers are copied. 509 */ 510 static int fpr_set_msa(struct task_struct *target, 511 unsigned int *pos, unsigned int *count, 512 const void **kbuf, const void __user **ubuf) 513 { 514 unsigned int i; 515 u64 fpr_val; 516 int err; 517 518 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 519 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 520 err = user_regset_copyin(pos, count, kbuf, ubuf, 521 &fpr_val, i * sizeof(elf_fpreg_t), 522 (i + 1) * sizeof(elf_fpreg_t)); 523 if (err) 524 return err; 525 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 526 } 527 528 return 0; 529 } 530 531 /* 532 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 533 * Choose the appropriate helper for general registers, and then copy 534 * the FCSR register separately. 535 * 536 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 537 * which is supposed to have been guaranteed by the kernel before 538 * calling us, e.g. in `ptrace_regset'. We enforce that requirement, 539 * so that we can safely avoid preinitializing temporaries for 540 * partial register writes. 541 */ 542 static int fpr_set(struct task_struct *target, 543 const struct user_regset *regset, 544 unsigned int pos, unsigned int count, 545 const void *kbuf, const void __user *ubuf) 546 { 547 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 548 u32 fcr31; 549 int err; 550 551 BUG_ON(count % sizeof(elf_fpreg_t)); 552 553 if (pos + count > sizeof(elf_fpregset_t)) 554 return -EIO; 555 556 init_fp_ctx(target); 557 558 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 559 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); 560 else 561 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); 562 if (err) 563 return err; 564 565 if (count > 0) { 566 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 567 &fcr31, 568 fcr31_pos, fcr31_pos + sizeof(u32)); 569 if (err) 570 return err; 571 572 ptrace_setfcr31(target, fcr31); 573 } 574 575 return err; 576 } 577 578 enum mips_regset { 579 REGSET_GPR, 580 REGSET_FPR, 581 }; 582 583 struct pt_regs_offset { 584 const char *name; 585 int offset; 586 }; 587 588 #define REG_OFFSET_NAME(reg, r) { \ 589 .name = #reg, \ 590 .offset = offsetof(struct pt_regs, r) \ 591 } 592 593 #define REG_OFFSET_END { \ 594 .name = NULL, \ 595 .offset = 0 \ 596 } 597 598 static const struct pt_regs_offset regoffset_table[] = { 599 REG_OFFSET_NAME(r0, regs[0]), 600 REG_OFFSET_NAME(r1, regs[1]), 601 REG_OFFSET_NAME(r2, regs[2]), 602 REG_OFFSET_NAME(r3, regs[3]), 603 REG_OFFSET_NAME(r4, regs[4]), 604 REG_OFFSET_NAME(r5, regs[5]), 605 REG_OFFSET_NAME(r6, regs[6]), 606 REG_OFFSET_NAME(r7, regs[7]), 607 REG_OFFSET_NAME(r8, regs[8]), 608 REG_OFFSET_NAME(r9, regs[9]), 609 REG_OFFSET_NAME(r10, regs[10]), 610 REG_OFFSET_NAME(r11, regs[11]), 611 REG_OFFSET_NAME(r12, regs[12]), 612 REG_OFFSET_NAME(r13, regs[13]), 613 REG_OFFSET_NAME(r14, regs[14]), 614 REG_OFFSET_NAME(r15, regs[15]), 615 REG_OFFSET_NAME(r16, regs[16]), 616 REG_OFFSET_NAME(r17, regs[17]), 617 REG_OFFSET_NAME(r18, regs[18]), 618 REG_OFFSET_NAME(r19, regs[19]), 619 REG_OFFSET_NAME(r20, regs[20]), 620 REG_OFFSET_NAME(r21, regs[21]), 621 REG_OFFSET_NAME(r22, regs[22]), 622 REG_OFFSET_NAME(r23, regs[23]), 623 REG_OFFSET_NAME(r24, regs[24]), 624 REG_OFFSET_NAME(r25, regs[25]), 625 REG_OFFSET_NAME(r26, regs[26]), 626 REG_OFFSET_NAME(r27, regs[27]), 627 REG_OFFSET_NAME(r28, regs[28]), 628 REG_OFFSET_NAME(r29, regs[29]), 629 REG_OFFSET_NAME(r30, regs[30]), 630 REG_OFFSET_NAME(r31, regs[31]), 631 REG_OFFSET_NAME(c0_status, cp0_status), 632 REG_OFFSET_NAME(hi, hi), 633 REG_OFFSET_NAME(lo, lo), 634 #ifdef CONFIG_CPU_HAS_SMARTMIPS 635 REG_OFFSET_NAME(acx, acx), 636 #endif 637 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 638 REG_OFFSET_NAME(c0_cause, cp0_cause), 639 REG_OFFSET_NAME(c0_epc, cp0_epc), 640 #ifdef CONFIG_CPU_CAVIUM_OCTEON 641 REG_OFFSET_NAME(mpl0, mpl[0]), 642 REG_OFFSET_NAME(mpl1, mpl[1]), 643 REG_OFFSET_NAME(mpl2, mpl[2]), 644 REG_OFFSET_NAME(mtp0, mtp[0]), 645 REG_OFFSET_NAME(mtp1, mtp[1]), 646 REG_OFFSET_NAME(mtp2, mtp[2]), 647 #endif 648 REG_OFFSET_END, 649 }; 650 651 /** 652 * regs_query_register_offset() - query register offset from its name 653 * @name: the name of a register 654 * 655 * regs_query_register_offset() returns the offset of a register in struct 656 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 657 */ 658 int regs_query_register_offset(const char *name) 659 { 660 const struct pt_regs_offset *roff; 661 for (roff = regoffset_table; roff->name != NULL; roff++) 662 if (!strcmp(roff->name, name)) 663 return roff->offset; 664 return -EINVAL; 665 } 666 667 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 668 669 static const struct user_regset mips_regsets[] = { 670 [REGSET_GPR] = { 671 .core_note_type = NT_PRSTATUS, 672 .n = ELF_NGREG, 673 .size = sizeof(unsigned int), 674 .align = sizeof(unsigned int), 675 .get = gpr32_get, 676 .set = gpr32_set, 677 }, 678 [REGSET_FPR] = { 679 .core_note_type = NT_PRFPREG, 680 .n = ELF_NFPREG, 681 .size = sizeof(elf_fpreg_t), 682 .align = sizeof(elf_fpreg_t), 683 .get = fpr_get, 684 .set = fpr_set, 685 }, 686 }; 687 688 static const struct user_regset_view user_mips_view = { 689 .name = "mips", 690 .e_machine = ELF_ARCH, 691 .ei_osabi = ELF_OSABI, 692 .regsets = mips_regsets, 693 .n = ARRAY_SIZE(mips_regsets), 694 }; 695 696 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 697 698 #ifdef CONFIG_64BIT 699 700 static const struct user_regset mips64_regsets[] = { 701 [REGSET_GPR] = { 702 .core_note_type = NT_PRSTATUS, 703 .n = ELF_NGREG, 704 .size = sizeof(unsigned long), 705 .align = sizeof(unsigned long), 706 .get = gpr64_get, 707 .set = gpr64_set, 708 }, 709 [REGSET_FPR] = { 710 .core_note_type = NT_PRFPREG, 711 .n = ELF_NFPREG, 712 .size = sizeof(elf_fpreg_t), 713 .align = sizeof(elf_fpreg_t), 714 .get = fpr_get, 715 .set = fpr_set, 716 }, 717 }; 718 719 static const struct user_regset_view user_mips64_view = { 720 .name = "mips64", 721 .e_machine = ELF_ARCH, 722 .ei_osabi = ELF_OSABI, 723 .regsets = mips64_regsets, 724 .n = ARRAY_SIZE(mips64_regsets), 725 }; 726 727 #ifdef CONFIG_MIPS32_N32 728 729 static const struct user_regset_view user_mipsn32_view = { 730 .name = "mipsn32", 731 .e_flags = EF_MIPS_ABI2, 732 .e_machine = ELF_ARCH, 733 .ei_osabi = ELF_OSABI, 734 .regsets = mips64_regsets, 735 .n = ARRAY_SIZE(mips64_regsets), 736 }; 737 738 #endif /* CONFIG_MIPS32_N32 */ 739 740 #endif /* CONFIG_64BIT */ 741 742 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 743 { 744 #ifdef CONFIG_32BIT 745 return &user_mips_view; 746 #else 747 #ifdef CONFIG_MIPS32_O32 748 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 749 return &user_mips_view; 750 #endif 751 #ifdef CONFIG_MIPS32_N32 752 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) 753 return &user_mipsn32_view; 754 #endif 755 return &user_mips64_view; 756 #endif 757 } 758 759 long arch_ptrace(struct task_struct *child, long request, 760 unsigned long addr, unsigned long data) 761 { 762 int ret; 763 void __user *addrp = (void __user *) addr; 764 void __user *datavp = (void __user *) data; 765 unsigned long __user *datalp = (void __user *) data; 766 767 switch (request) { 768 /* when I and D space are separate, these will need to be fixed. */ 769 case PTRACE_PEEKTEXT: /* read word at location addr. */ 770 case PTRACE_PEEKDATA: 771 ret = generic_ptrace_peekdata(child, addr, data); 772 break; 773 774 /* Read the word at location addr in the USER area. */ 775 case PTRACE_PEEKUSR: { 776 struct pt_regs *regs; 777 union fpureg *fregs; 778 unsigned long tmp = 0; 779 780 regs = task_pt_regs(child); 781 ret = 0; /* Default return value. */ 782 783 switch (addr) { 784 case 0 ... 31: 785 tmp = regs->regs[addr]; 786 break; 787 case FPR_BASE ... FPR_BASE + 31: 788 if (!tsk_used_math(child)) { 789 /* FP not yet used */ 790 tmp = -1; 791 break; 792 } 793 fregs = get_fpu_regs(child); 794 795 #ifdef CONFIG_32BIT 796 if (test_thread_flag(TIF_32BIT_FPREGS)) { 797 /* 798 * The odd registers are actually the high 799 * order bits of the values stored in the even 800 * registers - unless we're using r2k_switch.S. 801 */ 802 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 803 addr & 1); 804 break; 805 } 806 #endif 807 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 808 break; 809 case PC: 810 tmp = regs->cp0_epc; 811 break; 812 case CAUSE: 813 tmp = regs->cp0_cause; 814 break; 815 case BADVADDR: 816 tmp = regs->cp0_badvaddr; 817 break; 818 case MMHI: 819 tmp = regs->hi; 820 break; 821 case MMLO: 822 tmp = regs->lo; 823 break; 824 #ifdef CONFIG_CPU_HAS_SMARTMIPS 825 case ACX: 826 tmp = regs->acx; 827 break; 828 #endif 829 case FPC_CSR: 830 tmp = child->thread.fpu.fcr31; 831 break; 832 case FPC_EIR: 833 /* implementation / version register */ 834 tmp = boot_cpu_data.fpu_id; 835 break; 836 case DSP_BASE ... DSP_BASE + 5: { 837 dspreg_t *dregs; 838 839 if (!cpu_has_dsp) { 840 tmp = 0; 841 ret = -EIO; 842 goto out; 843 } 844 dregs = __get_dsp_regs(child); 845 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 846 break; 847 } 848 case DSP_CONTROL: 849 if (!cpu_has_dsp) { 850 tmp = 0; 851 ret = -EIO; 852 goto out; 853 } 854 tmp = child->thread.dsp.dspcontrol; 855 break; 856 default: 857 tmp = 0; 858 ret = -EIO; 859 goto out; 860 } 861 ret = put_user(tmp, datalp); 862 break; 863 } 864 865 /* when I and D space are separate, this will have to be fixed. */ 866 case PTRACE_POKETEXT: /* write the word at location addr. */ 867 case PTRACE_POKEDATA: 868 ret = generic_ptrace_pokedata(child, addr, data); 869 break; 870 871 case PTRACE_POKEUSR: { 872 struct pt_regs *regs; 873 ret = 0; 874 regs = task_pt_regs(child); 875 876 switch (addr) { 877 case 0 ... 31: 878 regs->regs[addr] = data; 879 /* System call number may have been changed */ 880 if (addr == 2) 881 mips_syscall_update_nr(child, regs); 882 else if (addr == 4 && 883 mips_syscall_is_indirect(child, regs)) 884 mips_syscall_update_nr(child, regs); 885 break; 886 case FPR_BASE ... FPR_BASE + 31: { 887 union fpureg *fregs = get_fpu_regs(child); 888 889 init_fp_ctx(child); 890 #ifdef CONFIG_32BIT 891 if (test_thread_flag(TIF_32BIT_FPREGS)) { 892 /* 893 * The odd registers are actually the high 894 * order bits of the values stored in the even 895 * registers - unless we're using r2k_switch.S. 896 */ 897 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 898 addr & 1, data); 899 break; 900 } 901 #endif 902 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 903 break; 904 } 905 case PC: 906 regs->cp0_epc = data; 907 break; 908 case MMHI: 909 regs->hi = data; 910 break; 911 case MMLO: 912 regs->lo = data; 913 break; 914 #ifdef CONFIG_CPU_HAS_SMARTMIPS 915 case ACX: 916 regs->acx = data; 917 break; 918 #endif 919 case FPC_CSR: 920 init_fp_ctx(child); 921 ptrace_setfcr31(child, data); 922 break; 923 case DSP_BASE ... DSP_BASE + 5: { 924 dspreg_t *dregs; 925 926 if (!cpu_has_dsp) { 927 ret = -EIO; 928 break; 929 } 930 931 dregs = __get_dsp_regs(child); 932 dregs[addr - DSP_BASE] = data; 933 break; 934 } 935 case DSP_CONTROL: 936 if (!cpu_has_dsp) { 937 ret = -EIO; 938 break; 939 } 940 child->thread.dsp.dspcontrol = data; 941 break; 942 default: 943 /* The rest are not allowed. */ 944 ret = -EIO; 945 break; 946 } 947 break; 948 } 949 950 case PTRACE_GETREGS: 951 ret = ptrace_getregs(child, datavp); 952 break; 953 954 case PTRACE_SETREGS: 955 ret = ptrace_setregs(child, datavp); 956 break; 957 958 case PTRACE_GETFPREGS: 959 ret = ptrace_getfpregs(child, datavp); 960 break; 961 962 case PTRACE_SETFPREGS: 963 ret = ptrace_setfpregs(child, datavp); 964 break; 965 966 case PTRACE_GET_THREAD_AREA: 967 ret = put_user(task_thread_info(child)->tp_value, datalp); 968 break; 969 970 case PTRACE_GET_WATCH_REGS: 971 ret = ptrace_get_watch_regs(child, addrp); 972 break; 973 974 case PTRACE_SET_WATCH_REGS: 975 ret = ptrace_set_watch_regs(child, addrp); 976 break; 977 978 default: 979 ret = ptrace_request(child, request, addr, data); 980 break; 981 } 982 out: 983 return ret; 984 } 985 986 /* 987 * Notification of system call entry/exit 988 * - triggered by current->work.syscall_trace 989 */ 990 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 991 { 992 user_exit(); 993 994 current_thread_info()->syscall = syscall; 995 996 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 997 if (tracehook_report_syscall_entry(regs)) 998 return -1; 999 syscall = current_thread_info()->syscall; 1000 } 1001 1002 #ifdef CONFIG_SECCOMP 1003 if (unlikely(test_thread_flag(TIF_SECCOMP))) { 1004 int ret, i; 1005 struct seccomp_data sd; 1006 unsigned long args[6]; 1007 1008 sd.nr = syscall; 1009 sd.arch = syscall_get_arch(); 1010 syscall_get_arguments(current, regs, 0, 6, args); 1011 for (i = 0; i < 6; i++) 1012 sd.args[i] = args[i]; 1013 sd.instruction_pointer = KSTK_EIP(current); 1014 1015 ret = __secure_computing(&sd); 1016 if (ret == -1) 1017 return ret; 1018 syscall = current_thread_info()->syscall; 1019 } 1020 #endif 1021 1022 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1023 trace_sys_enter(regs, regs->regs[2]); 1024 1025 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 1026 regs->regs[6], regs->regs[7]); 1027 1028 /* 1029 * Negative syscall numbers are mistaken for rejected syscalls, but 1030 * won't have had the return value set appropriately, so we do so now. 1031 */ 1032 if (syscall < 0) 1033 syscall_set_return_value(current, regs, -ENOSYS, 0); 1034 return syscall; 1035 } 1036 1037 /* 1038 * Notification of system call entry/exit 1039 * - triggered by current->work.syscall_trace 1040 */ 1041 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1042 { 1043 /* 1044 * We may come here right after calling schedule_user() 1045 * or do_notify_resume(), in which case we can be in RCU 1046 * user mode. 1047 */ 1048 user_exit(); 1049 1050 audit_syscall_exit(regs); 1051 1052 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1053 trace_sys_exit(regs, regs_return_value(regs)); 1054 1055 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1056 tracehook_report_syscall_exit(regs, 0); 1057 1058 user_enter(); 1059 } 1060