1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/kernel.h> 19 #include <linux/sched.h> 20 #include <linux/mm.h> 21 #include <linux/errno.h> 22 #include <linux/ptrace.h> 23 #include <linux/smp.h> 24 #include <linux/user.h> 25 #include <linux/security.h> 26 #include <linux/audit.h> 27 #include <linux/seccomp.h> 28 29 #include <asm/byteorder.h> 30 #include <asm/cpu.h> 31 #include <asm/dsp.h> 32 #include <asm/fpu.h> 33 #include <asm/mipsregs.h> 34 #include <asm/mipsmtregs.h> 35 #include <asm/pgtable.h> 36 #include <asm/page.h> 37 #include <asm/uaccess.h> 38 #include <asm/bootinfo.h> 39 #include <asm/reg.h> 40 41 /* 42 * Called by kernel/ptrace.c when detaching.. 43 * 44 * Make sure single step bits etc are not set. 45 */ 46 void ptrace_disable(struct task_struct *child) 47 { 48 /* Don't load the watchpoint registers for the ex-child. */ 49 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 50 } 51 52 /* 53 * Read a general register set. We always use the 64-bit format, even 54 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 55 * Registers are sign extended to fill the available space. 56 */ 57 int ptrace_getregs(struct task_struct *child, __s64 __user *data) 58 { 59 struct pt_regs *regs; 60 int i; 61 62 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 63 return -EIO; 64 65 regs = task_pt_regs(child); 66 67 for (i = 0; i < 32; i++) 68 __put_user((long)regs->regs[i], data + i); 69 __put_user((long)regs->lo, data + EF_LO - EF_R0); 70 __put_user((long)regs->hi, data + EF_HI - EF_R0); 71 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 72 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); 73 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0); 74 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); 75 76 return 0; 77 } 78 79 /* 80 * Write a general register set. As for PTRACE_GETREGS, we always use 81 * the 64-bit format. On a 32-bit kernel only the lower order half 82 * (according to endianness) will be used. 83 */ 84 int ptrace_setregs(struct task_struct *child, __s64 __user *data) 85 { 86 struct pt_regs *regs; 87 int i; 88 89 if (!access_ok(VERIFY_READ, data, 38 * 8)) 90 return -EIO; 91 92 regs = task_pt_regs(child); 93 94 for (i = 0; i < 32; i++) 95 __get_user(regs->regs[i], data + i); 96 __get_user(regs->lo, data + EF_LO - EF_R0); 97 __get_user(regs->hi, data + EF_HI - EF_R0); 98 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 99 100 /* badvaddr, status, and cause may not be written. */ 101 102 return 0; 103 } 104 105 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 106 { 107 int i; 108 unsigned int tmp; 109 110 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 111 return -EIO; 112 113 if (tsk_used_math(child)) { 114 fpureg_t *fregs = get_fpu_regs(child); 115 for (i = 0; i < 32; i++) 116 __put_user(fregs[i], i + (__u64 __user *) data); 117 } else { 118 for (i = 0; i < 32; i++) 119 __put_user((__u64) -1, i + (__u64 __user *) data); 120 } 121 122 __put_user(child->thread.fpu.fcr31, data + 64); 123 124 preempt_disable(); 125 if (cpu_has_fpu) { 126 unsigned int flags; 127 128 if (cpu_has_mipsmt) { 129 unsigned int vpflags = dvpe(); 130 flags = read_c0_status(); 131 __enable_fpu(); 132 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 133 write_c0_status(flags); 134 evpe(vpflags); 135 } else { 136 flags = read_c0_status(); 137 __enable_fpu(); 138 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 139 write_c0_status(flags); 140 } 141 } else { 142 tmp = 0; 143 } 144 preempt_enable(); 145 __put_user(tmp, data + 65); 146 147 return 0; 148 } 149 150 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 151 { 152 fpureg_t *fregs; 153 int i; 154 155 if (!access_ok(VERIFY_READ, data, 33 * 8)) 156 return -EIO; 157 158 fregs = get_fpu_regs(child); 159 160 for (i = 0; i < 32; i++) 161 __get_user(fregs[i], i + (__u64 __user *) data); 162 163 __get_user(child->thread.fpu.fcr31, data + 64); 164 165 /* FIR may not be written. */ 166 167 return 0; 168 } 169 170 int ptrace_get_watch_regs(struct task_struct *child, 171 struct pt_watch_regs __user *addr) 172 { 173 enum pt_watch_style style; 174 int i; 175 176 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 177 return -EIO; 178 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 179 return -EIO; 180 181 #ifdef CONFIG_32BIT 182 style = pt_watch_style_mips32; 183 #define WATCH_STYLE mips32 184 #else 185 style = pt_watch_style_mips64; 186 #define WATCH_STYLE mips64 187 #endif 188 189 __put_user(style, &addr->style); 190 __put_user(current_cpu_data.watch_reg_use_cnt, 191 &addr->WATCH_STYLE.num_valid); 192 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 193 __put_user(child->thread.watch.mips3264.watchlo[i], 194 &addr->WATCH_STYLE.watchlo[i]); 195 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, 196 &addr->WATCH_STYLE.watchhi[i]); 197 __put_user(current_cpu_data.watch_reg_masks[i], 198 &addr->WATCH_STYLE.watch_masks[i]); 199 } 200 for (; i < 8; i++) { 201 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 202 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 203 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 204 } 205 206 return 0; 207 } 208 209 int ptrace_set_watch_regs(struct task_struct *child, 210 struct pt_watch_regs __user *addr) 211 { 212 int i; 213 int watch_active = 0; 214 unsigned long lt[NUM_WATCH_REGS]; 215 u16 ht[NUM_WATCH_REGS]; 216 217 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 218 return -EIO; 219 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 220 return -EIO; 221 /* Check the values. */ 222 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 223 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 224 #ifdef CONFIG_32BIT 225 if (lt[i] & __UA_LIMIT) 226 return -EINVAL; 227 #else 228 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 229 if (lt[i] & 0xffffffff80000000UL) 230 return -EINVAL; 231 } else { 232 if (lt[i] & __UA_LIMIT) 233 return -EINVAL; 234 } 235 #endif 236 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 237 if (ht[i] & ~0xff8) 238 return -EINVAL; 239 } 240 /* Install them. */ 241 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 242 if (lt[i] & 7) 243 watch_active = 1; 244 child->thread.watch.mips3264.watchlo[i] = lt[i]; 245 /* Set the G bit. */ 246 child->thread.watch.mips3264.watchhi[i] = ht[i]; 247 } 248 249 if (watch_active) 250 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 251 else 252 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 253 254 return 0; 255 } 256 257 long arch_ptrace(struct task_struct *child, long request, 258 unsigned long addr, unsigned long data) 259 { 260 int ret; 261 void __user *addrp = (void __user *) addr; 262 void __user *datavp = (void __user *) data; 263 unsigned long __user *datalp = (void __user *) data; 264 265 switch (request) { 266 /* when I and D space are separate, these will need to be fixed. */ 267 case PTRACE_PEEKTEXT: /* read word at location addr. */ 268 case PTRACE_PEEKDATA: 269 ret = generic_ptrace_peekdata(child, addr, data); 270 break; 271 272 /* Read the word at location addr in the USER area. */ 273 case PTRACE_PEEKUSR: { 274 struct pt_regs *regs; 275 unsigned long tmp = 0; 276 277 regs = task_pt_regs(child); 278 ret = 0; /* Default return value. */ 279 280 switch (addr) { 281 case 0 ... 31: 282 tmp = regs->regs[addr]; 283 break; 284 case FPR_BASE ... FPR_BASE + 31: 285 if (tsk_used_math(child)) { 286 fpureg_t *fregs = get_fpu_regs(child); 287 288 #ifdef CONFIG_32BIT 289 /* 290 * The odd registers are actually the high 291 * order bits of the values stored in the even 292 * registers - unless we're using r2k_switch.S. 293 */ 294 if (addr & 1) 295 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); 296 else 297 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); 298 #endif 299 #ifdef CONFIG_64BIT 300 tmp = fregs[addr - FPR_BASE]; 301 #endif 302 } else { 303 tmp = -1; /* FP not yet used */ 304 } 305 break; 306 case PC: 307 tmp = regs->cp0_epc; 308 break; 309 case CAUSE: 310 tmp = regs->cp0_cause; 311 break; 312 case BADVADDR: 313 tmp = regs->cp0_badvaddr; 314 break; 315 case MMHI: 316 tmp = regs->hi; 317 break; 318 case MMLO: 319 tmp = regs->lo; 320 break; 321 #ifdef CONFIG_CPU_HAS_SMARTMIPS 322 case ACX: 323 tmp = regs->acx; 324 break; 325 #endif 326 case FPC_CSR: 327 tmp = child->thread.fpu.fcr31; 328 break; 329 case FPC_EIR: { /* implementation / version register */ 330 unsigned int flags; 331 #ifdef CONFIG_MIPS_MT_SMTC 332 unsigned long irqflags; 333 unsigned int mtflags; 334 #endif /* CONFIG_MIPS_MT_SMTC */ 335 336 preempt_disable(); 337 if (!cpu_has_fpu) { 338 preempt_enable(); 339 break; 340 } 341 342 #ifdef CONFIG_MIPS_MT_SMTC 343 /* Read-modify-write of Status must be atomic */ 344 local_irq_save(irqflags); 345 mtflags = dmt(); 346 #endif /* CONFIG_MIPS_MT_SMTC */ 347 if (cpu_has_mipsmt) { 348 unsigned int vpflags = dvpe(); 349 flags = read_c0_status(); 350 __enable_fpu(); 351 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 352 write_c0_status(flags); 353 evpe(vpflags); 354 } else { 355 flags = read_c0_status(); 356 __enable_fpu(); 357 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 358 write_c0_status(flags); 359 } 360 #ifdef CONFIG_MIPS_MT_SMTC 361 emt(mtflags); 362 local_irq_restore(irqflags); 363 #endif /* CONFIG_MIPS_MT_SMTC */ 364 preempt_enable(); 365 break; 366 } 367 case DSP_BASE ... DSP_BASE + 5: { 368 dspreg_t *dregs; 369 370 if (!cpu_has_dsp) { 371 tmp = 0; 372 ret = -EIO; 373 goto out; 374 } 375 dregs = __get_dsp_regs(child); 376 tmp = (unsigned long) (dregs[addr - DSP_BASE]); 377 break; 378 } 379 case DSP_CONTROL: 380 if (!cpu_has_dsp) { 381 tmp = 0; 382 ret = -EIO; 383 goto out; 384 } 385 tmp = child->thread.dsp.dspcontrol; 386 break; 387 default: 388 tmp = 0; 389 ret = -EIO; 390 goto out; 391 } 392 ret = put_user(tmp, datalp); 393 break; 394 } 395 396 /* when I and D space are separate, this will have to be fixed. */ 397 case PTRACE_POKETEXT: /* write the word at location addr. */ 398 case PTRACE_POKEDATA: 399 ret = generic_ptrace_pokedata(child, addr, data); 400 break; 401 402 case PTRACE_POKEUSR: { 403 struct pt_regs *regs; 404 ret = 0; 405 regs = task_pt_regs(child); 406 407 switch (addr) { 408 case 0 ... 31: 409 regs->regs[addr] = data; 410 break; 411 case FPR_BASE ... FPR_BASE + 31: { 412 fpureg_t *fregs = get_fpu_regs(child); 413 414 if (!tsk_used_math(child)) { 415 /* FP not yet used */ 416 memset(&child->thread.fpu, ~0, 417 sizeof(child->thread.fpu)); 418 child->thread.fpu.fcr31 = 0; 419 } 420 #ifdef CONFIG_32BIT 421 /* 422 * The odd registers are actually the high order bits 423 * of the values stored in the even registers - unless 424 * we're using r2k_switch.S. 425 */ 426 if (addr & 1) { 427 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; 428 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; 429 } else { 430 fregs[addr - FPR_BASE] &= ~0xffffffffLL; 431 fregs[addr - FPR_BASE] |= data; 432 } 433 #endif 434 #ifdef CONFIG_64BIT 435 fregs[addr - FPR_BASE] = data; 436 #endif 437 break; 438 } 439 case PC: 440 regs->cp0_epc = data; 441 break; 442 case MMHI: 443 regs->hi = data; 444 break; 445 case MMLO: 446 regs->lo = data; 447 break; 448 #ifdef CONFIG_CPU_HAS_SMARTMIPS 449 case ACX: 450 regs->acx = data; 451 break; 452 #endif 453 case FPC_CSR: 454 child->thread.fpu.fcr31 = data; 455 break; 456 case DSP_BASE ... DSP_BASE + 5: { 457 dspreg_t *dregs; 458 459 if (!cpu_has_dsp) { 460 ret = -EIO; 461 break; 462 } 463 464 dregs = __get_dsp_regs(child); 465 dregs[addr - DSP_BASE] = data; 466 break; 467 } 468 case DSP_CONTROL: 469 if (!cpu_has_dsp) { 470 ret = -EIO; 471 break; 472 } 473 child->thread.dsp.dspcontrol = data; 474 break; 475 default: 476 /* The rest are not allowed. */ 477 ret = -EIO; 478 break; 479 } 480 break; 481 } 482 483 case PTRACE_GETREGS: 484 ret = ptrace_getregs(child, datavp); 485 break; 486 487 case PTRACE_SETREGS: 488 ret = ptrace_setregs(child, datavp); 489 break; 490 491 case PTRACE_GETFPREGS: 492 ret = ptrace_getfpregs(child, datavp); 493 break; 494 495 case PTRACE_SETFPREGS: 496 ret = ptrace_setfpregs(child, datavp); 497 break; 498 499 case PTRACE_GET_THREAD_AREA: 500 ret = put_user(task_thread_info(child)->tp_value, datalp); 501 break; 502 503 case PTRACE_GET_WATCH_REGS: 504 ret = ptrace_get_watch_regs(child, addrp); 505 break; 506 507 case PTRACE_SET_WATCH_REGS: 508 ret = ptrace_set_watch_regs(child, addrp); 509 break; 510 511 default: 512 ret = ptrace_request(child, request, addr, data); 513 break; 514 } 515 out: 516 return ret; 517 } 518 519 static inline int audit_arch(void) 520 { 521 int arch = EM_MIPS; 522 #ifdef CONFIG_64BIT 523 arch |= __AUDIT_ARCH_64BIT; 524 #endif 525 #if defined(__LITTLE_ENDIAN) 526 arch |= __AUDIT_ARCH_LE; 527 #endif 528 return arch; 529 } 530 531 /* 532 * Notification of system call entry/exit 533 * - triggered by current->work.syscall_trace 534 */ 535 asmlinkage void syscall_trace_enter(struct pt_regs *regs) 536 { 537 /* do the secure computing check first */ 538 secure_computing_strict(regs->regs[2]); 539 540 if (!(current->ptrace & PT_PTRACED)) 541 goto out; 542 543 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 544 goto out; 545 546 /* The 0x80 provides a way for the tracing parent to distinguish 547 between a syscall stop and SIGTRAP delivery */ 548 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 549 0x80 : 0)); 550 551 /* 552 * this isn't the same as continuing with a signal, but it will do 553 * for normal use. strace only continues with a signal if the 554 * stopping signal is not SIGTRAP. -brl 555 */ 556 if (current->exit_code) { 557 send_sig(current->exit_code, current, 1); 558 current->exit_code = 0; 559 } 560 561 out: 562 audit_syscall_entry(audit_arch(), regs->regs[2], 563 regs->regs[4], regs->regs[5], 564 regs->regs[6], regs->regs[7]); 565 } 566 567 /* 568 * Notification of system call entry/exit 569 * - triggered by current->work.syscall_trace 570 */ 571 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 572 { 573 audit_syscall_exit(regs); 574 575 if (!(current->ptrace & PT_PTRACED)) 576 return; 577 578 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 579 return; 580 581 /* The 0x80 provides a way for the tracing parent to distinguish 582 between a syscall stop and SIGTRAP delivery */ 583 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 584 0x80 : 0)); 585 586 /* 587 * this isn't the same as continuing with a signal, but it will do 588 * for normal use. strace only continues with a signal if the 589 * stopping signal is not SIGTRAP. -brl 590 */ 591 if (current->exit_code) { 592 send_sig(current->exit_code, current, 1); 593 current->exit_code = 0; 594 } 595 } 596