1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/mm.h> 22 #include <linux/errno.h> 23 #include <linux/ptrace.h> 24 #include <linux/smp.h> 25 #include <linux/user.h> 26 #include <linux/security.h> 27 #include <linux/audit.h> 28 #include <linux/seccomp.h> 29 30 #include <asm/byteorder.h> 31 #include <asm/cpu.h> 32 #include <asm/dsp.h> 33 #include <asm/fpu.h> 34 #include <asm/mipsregs.h> 35 #include <asm/mipsmtregs.h> 36 #include <asm/pgtable.h> 37 #include <asm/page.h> 38 #include <asm/uaccess.h> 39 #include <asm/bootinfo.h> 40 #include <asm/reg.h> 41 42 /* 43 * Called by kernel/ptrace.c when detaching.. 44 * 45 * Make sure single step bits etc are not set. 46 */ 47 void ptrace_disable(struct task_struct *child) 48 { 49 /* Don't load the watchpoint registers for the ex-child. */ 50 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 51 } 52 53 /* 54 * Read a general register set. We always use the 64-bit format, even 55 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 56 * Registers are sign extended to fill the available space. 57 */ 58 int ptrace_getregs(struct task_struct *child, __s64 __user *data) 59 { 60 struct pt_regs *regs; 61 int i; 62 63 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 64 return -EIO; 65 66 regs = task_pt_regs(child); 67 68 for (i = 0; i < 32; i++) 69 __put_user((long)regs->regs[i], data + i); 70 __put_user((long)regs->lo, data + EF_LO - EF_R0); 71 __put_user((long)regs->hi, data + EF_HI - EF_R0); 72 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 73 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); 74 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0); 75 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); 76 77 return 0; 78 } 79 80 /* 81 * Write a general register set. As for PTRACE_GETREGS, we always use 82 * the 64-bit format. On a 32-bit kernel only the lower order half 83 * (according to endianness) will be used. 84 */ 85 int ptrace_setregs(struct task_struct *child, __s64 __user *data) 86 { 87 struct pt_regs *regs; 88 int i; 89 90 if (!access_ok(VERIFY_READ, data, 38 * 8)) 91 return -EIO; 92 93 regs = task_pt_regs(child); 94 95 for (i = 0; i < 32; i++) 96 __get_user(regs->regs[i], data + i); 97 __get_user(regs->lo, data + EF_LO - EF_R0); 98 __get_user(regs->hi, data + EF_HI - EF_R0); 99 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 100 101 /* badvaddr, status, and cause may not be written. */ 102 103 return 0; 104 } 105 106 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 107 { 108 int i; 109 unsigned int tmp; 110 111 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 112 return -EIO; 113 114 if (tsk_used_math(child)) { 115 fpureg_t *fregs = get_fpu_regs(child); 116 for (i = 0; i < 32; i++) 117 __put_user(fregs[i], i + (__u64 __user *) data); 118 } else { 119 for (i = 0; i < 32; i++) 120 __put_user((__u64) -1, i + (__u64 __user *) data); 121 } 122 123 __put_user(child->thread.fpu.fcr31, data + 64); 124 125 preempt_disable(); 126 if (cpu_has_fpu) { 127 unsigned int flags; 128 129 if (cpu_has_mipsmt) { 130 unsigned int vpflags = dvpe(); 131 flags = read_c0_status(); 132 __enable_fpu(); 133 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 134 write_c0_status(flags); 135 evpe(vpflags); 136 } else { 137 flags = read_c0_status(); 138 __enable_fpu(); 139 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 140 write_c0_status(flags); 141 } 142 } else { 143 tmp = 0; 144 } 145 preempt_enable(); 146 __put_user(tmp, data + 65); 147 148 return 0; 149 } 150 151 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 152 { 153 fpureg_t *fregs; 154 int i; 155 156 if (!access_ok(VERIFY_READ, data, 33 * 8)) 157 return -EIO; 158 159 fregs = get_fpu_regs(child); 160 161 for (i = 0; i < 32; i++) 162 __get_user(fregs[i], i + (__u64 __user *) data); 163 164 __get_user(child->thread.fpu.fcr31, data + 64); 165 166 /* FIR may not be written. */ 167 168 return 0; 169 } 170 171 int ptrace_get_watch_regs(struct task_struct *child, 172 struct pt_watch_regs __user *addr) 173 { 174 enum pt_watch_style style; 175 int i; 176 177 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 178 return -EIO; 179 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 180 return -EIO; 181 182 #ifdef CONFIG_32BIT 183 style = pt_watch_style_mips32; 184 #define WATCH_STYLE mips32 185 #else 186 style = pt_watch_style_mips64; 187 #define WATCH_STYLE mips64 188 #endif 189 190 __put_user(style, &addr->style); 191 __put_user(current_cpu_data.watch_reg_use_cnt, 192 &addr->WATCH_STYLE.num_valid); 193 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 194 __put_user(child->thread.watch.mips3264.watchlo[i], 195 &addr->WATCH_STYLE.watchlo[i]); 196 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, 197 &addr->WATCH_STYLE.watchhi[i]); 198 __put_user(current_cpu_data.watch_reg_masks[i], 199 &addr->WATCH_STYLE.watch_masks[i]); 200 } 201 for (; i < 8; i++) { 202 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 203 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 204 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 205 } 206 207 return 0; 208 } 209 210 int ptrace_set_watch_regs(struct task_struct *child, 211 struct pt_watch_regs __user *addr) 212 { 213 int i; 214 int watch_active = 0; 215 unsigned long lt[NUM_WATCH_REGS]; 216 u16 ht[NUM_WATCH_REGS]; 217 218 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 219 return -EIO; 220 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 221 return -EIO; 222 /* Check the values. */ 223 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 224 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 225 #ifdef CONFIG_32BIT 226 if (lt[i] & __UA_LIMIT) 227 return -EINVAL; 228 #else 229 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 230 if (lt[i] & 0xffffffff80000000UL) 231 return -EINVAL; 232 } else { 233 if (lt[i] & __UA_LIMIT) 234 return -EINVAL; 235 } 236 #endif 237 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 238 if (ht[i] & ~0xff8) 239 return -EINVAL; 240 } 241 /* Install them. */ 242 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 243 if (lt[i] & 7) 244 watch_active = 1; 245 child->thread.watch.mips3264.watchlo[i] = lt[i]; 246 /* Set the G bit. */ 247 child->thread.watch.mips3264.watchhi[i] = ht[i]; 248 } 249 250 if (watch_active) 251 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 252 else 253 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 254 255 return 0; 256 } 257 258 long arch_ptrace(struct task_struct *child, long request, 259 unsigned long addr, unsigned long data) 260 { 261 int ret; 262 void __user *addrp = (void __user *) addr; 263 void __user *datavp = (void __user *) data; 264 unsigned long __user *datalp = (void __user *) data; 265 266 switch (request) { 267 /* when I and D space are separate, these will need to be fixed. */ 268 case PTRACE_PEEKTEXT: /* read word at location addr. */ 269 case PTRACE_PEEKDATA: 270 ret = generic_ptrace_peekdata(child, addr, data); 271 break; 272 273 /* Read the word at location addr in the USER area. */ 274 case PTRACE_PEEKUSR: { 275 struct pt_regs *regs; 276 unsigned long tmp = 0; 277 278 regs = task_pt_regs(child); 279 ret = 0; /* Default return value. */ 280 281 switch (addr) { 282 case 0 ... 31: 283 tmp = regs->regs[addr]; 284 break; 285 case FPR_BASE ... FPR_BASE + 31: 286 if (tsk_used_math(child)) { 287 fpureg_t *fregs = get_fpu_regs(child); 288 289 #ifdef CONFIG_32BIT 290 /* 291 * The odd registers are actually the high 292 * order bits of the values stored in the even 293 * registers - unless we're using r2k_switch.S. 294 */ 295 if (addr & 1) 296 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); 297 else 298 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); 299 #endif 300 #ifdef CONFIG_64BIT 301 tmp = fregs[addr - FPR_BASE]; 302 #endif 303 } else { 304 tmp = -1; /* FP not yet used */ 305 } 306 break; 307 case PC: 308 tmp = regs->cp0_epc; 309 break; 310 case CAUSE: 311 tmp = regs->cp0_cause; 312 break; 313 case BADVADDR: 314 tmp = regs->cp0_badvaddr; 315 break; 316 case MMHI: 317 tmp = regs->hi; 318 break; 319 case MMLO: 320 tmp = regs->lo; 321 break; 322 #ifdef CONFIG_CPU_HAS_SMARTMIPS 323 case ACX: 324 tmp = regs->acx; 325 break; 326 #endif 327 case FPC_CSR: 328 tmp = child->thread.fpu.fcr31; 329 break; 330 case FPC_EIR: { /* implementation / version register */ 331 unsigned int flags; 332 #ifdef CONFIG_MIPS_MT_SMTC 333 unsigned long irqflags; 334 unsigned int mtflags; 335 #endif /* CONFIG_MIPS_MT_SMTC */ 336 337 preempt_disable(); 338 if (!cpu_has_fpu) { 339 preempt_enable(); 340 break; 341 } 342 343 #ifdef CONFIG_MIPS_MT_SMTC 344 /* Read-modify-write of Status must be atomic */ 345 local_irq_save(irqflags); 346 mtflags = dmt(); 347 #endif /* CONFIG_MIPS_MT_SMTC */ 348 if (cpu_has_mipsmt) { 349 unsigned int vpflags = dvpe(); 350 flags = read_c0_status(); 351 __enable_fpu(); 352 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 353 write_c0_status(flags); 354 evpe(vpflags); 355 } else { 356 flags = read_c0_status(); 357 __enable_fpu(); 358 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 359 write_c0_status(flags); 360 } 361 #ifdef CONFIG_MIPS_MT_SMTC 362 emt(mtflags); 363 local_irq_restore(irqflags); 364 #endif /* CONFIG_MIPS_MT_SMTC */ 365 preempt_enable(); 366 break; 367 } 368 case DSP_BASE ... DSP_BASE + 5: { 369 dspreg_t *dregs; 370 371 if (!cpu_has_dsp) { 372 tmp = 0; 373 ret = -EIO; 374 goto out; 375 } 376 dregs = __get_dsp_regs(child); 377 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 378 break; 379 } 380 case DSP_CONTROL: 381 if (!cpu_has_dsp) { 382 tmp = 0; 383 ret = -EIO; 384 goto out; 385 } 386 tmp = child->thread.dsp.dspcontrol; 387 break; 388 default: 389 tmp = 0; 390 ret = -EIO; 391 goto out; 392 } 393 ret = put_user(tmp, datalp); 394 break; 395 } 396 397 /* when I and D space are separate, this will have to be fixed. */ 398 case PTRACE_POKETEXT: /* write the word at location addr. */ 399 case PTRACE_POKEDATA: 400 ret = generic_ptrace_pokedata(child, addr, data); 401 break; 402 403 case PTRACE_POKEUSR: { 404 struct pt_regs *regs; 405 ret = 0; 406 regs = task_pt_regs(child); 407 408 switch (addr) { 409 case 0 ... 31: 410 regs->regs[addr] = data; 411 break; 412 case FPR_BASE ... FPR_BASE + 31: { 413 fpureg_t *fregs = get_fpu_regs(child); 414 415 if (!tsk_used_math(child)) { 416 /* FP not yet used */ 417 memset(&child->thread.fpu, ~0, 418 sizeof(child->thread.fpu)); 419 child->thread.fpu.fcr31 = 0; 420 } 421 #ifdef CONFIG_32BIT 422 /* 423 * The odd registers are actually the high order bits 424 * of the values stored in the even registers - unless 425 * we're using r2k_switch.S. 426 */ 427 if (addr & 1) { 428 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; 429 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; 430 } else { 431 fregs[addr - FPR_BASE] &= ~0xffffffffLL; 432 fregs[addr - FPR_BASE] |= data; 433 } 434 #endif 435 #ifdef CONFIG_64BIT 436 fregs[addr - FPR_BASE] = data; 437 #endif 438 break; 439 } 440 case PC: 441 regs->cp0_epc = data; 442 break; 443 case MMHI: 444 regs->hi = data; 445 break; 446 case MMLO: 447 regs->lo = data; 448 break; 449 #ifdef CONFIG_CPU_HAS_SMARTMIPS 450 case ACX: 451 regs->acx = data; 452 break; 453 #endif 454 case FPC_CSR: 455 child->thread.fpu.fcr31 = data; 456 break; 457 case DSP_BASE ... DSP_BASE + 5: { 458 dspreg_t *dregs; 459 460 if (!cpu_has_dsp) { 461 ret = -EIO; 462 break; 463 } 464 465 dregs = __get_dsp_regs(child); 466 dregs[addr - DSP_BASE] = data; 467 break; 468 } 469 case DSP_CONTROL: 470 if (!cpu_has_dsp) { 471 ret = -EIO; 472 break; 473 } 474 child->thread.dsp.dspcontrol = data; 475 break; 476 default: 477 /* The rest are not allowed. */ 478 ret = -EIO; 479 break; 480 } 481 break; 482 } 483 484 case PTRACE_GETREGS: 485 ret = ptrace_getregs(child, datavp); 486 break; 487 488 case PTRACE_SETREGS: 489 ret = ptrace_setregs(child, datavp); 490 break; 491 492 case PTRACE_GETFPREGS: 493 ret = ptrace_getfpregs(child, datavp); 494 break; 495 496 case PTRACE_SETFPREGS: 497 ret = ptrace_setfpregs(child, datavp); 498 break; 499 500 case PTRACE_GET_THREAD_AREA: 501 ret = put_user(task_thread_info(child)->tp_value, datalp); 502 break; 503 504 case PTRACE_GET_WATCH_REGS: 505 ret = ptrace_get_watch_regs(child, addrp); 506 break; 507 508 case PTRACE_SET_WATCH_REGS: 509 ret = ptrace_set_watch_regs(child, addrp); 510 break; 511 512 default: 513 ret = ptrace_request(child, request, addr, data); 514 break; 515 } 516 out: 517 return ret; 518 } 519 520 static inline int audit_arch(void) 521 { 522 int arch = EM_MIPS; 523 #ifdef CONFIG_64BIT 524 arch |= __AUDIT_ARCH_64BIT; 525 #endif 526 #if defined(__LITTLE_ENDIAN) 527 arch |= __AUDIT_ARCH_LE; 528 #endif 529 return arch; 530 } 531 532 /* 533 * Notification of system call entry/exit 534 * - triggered by current->work.syscall_trace 535 */ 536 asmlinkage void syscall_trace_enter(struct pt_regs *regs) 537 { 538 user_exit(); 539 540 /* do the secure computing check first */ 541 secure_computing_strict(regs->regs[2]); 542 543 if (!(current->ptrace & PT_PTRACED)) 544 goto out; 545 546 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 547 goto out; 548 549 /* The 0x80 provides a way for the tracing parent to distinguish 550 between a syscall stop and SIGTRAP delivery */ 551 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 552 0x80 : 0)); 553 554 /* 555 * this isn't the same as continuing with a signal, but it will do 556 * for normal use. strace only continues with a signal if the 557 * stopping signal is not SIGTRAP. -brl 558 */ 559 if (current->exit_code) { 560 send_sig(current->exit_code, current, 1); 561 current->exit_code = 0; 562 } 563 564 out: 565 audit_syscall_entry(audit_arch(), regs->regs[2], 566 regs->regs[4], regs->regs[5], 567 regs->regs[6], regs->regs[7]); 568 } 569 570 /* 571 * Notification of system call entry/exit 572 * - triggered by current->work.syscall_trace 573 */ 574 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 575 { 576 /* 577 * We may come here right after calling schedule_user() 578 * or do_notify_resume(), in which case we can be in RCU 579 * user mode. 580 */ 581 user_exit(); 582 583 audit_syscall_exit(regs); 584 585 if (!(current->ptrace & PT_PTRACED)) 586 return; 587 588 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 589 return; 590 591 /* The 0x80 provides a way for the tracing parent to distinguish 592 between a syscall stop and SIGTRAP delivery */ 593 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 594 0x80 : 0)); 595 596 /* 597 * this isn't the same as continuing with a signal, but it will do 598 * for normal use. strace only continues with a signal if the 599 * stopping signal is not SIGTRAP. -brl 600 */ 601 if (current->exit_code) { 602 send_sig(current->exit_code, current, 1); 603 current->exit_code = 0; 604 } 605 606 user_enter(); 607 } 608