1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2001 - 2007 Tensilica Inc. 7 * 8 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 9 * Chris Zankel <chris@zankel.net> 10 * Scott Foehner<sfoehner@yahoo.com>, 11 * Kevin Chea 12 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> 13 */ 14 15 #include <linux/audit.h> 16 #include <linux/errno.h> 17 #include <linux/hw_breakpoint.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/perf_event.h> 21 #include <linux/ptrace.h> 22 #include <linux/regset.h> 23 #include <linux/sched.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/seccomp.h> 26 #include <linux/security.h> 27 #include <linux/signal.h> 28 #include <linux/smp.h> 29 #include <linux/tracehook.h> 30 #include <linux/uaccess.h> 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/syscalls.h> 34 35 #include <asm/coprocessor.h> 36 #include <asm/elf.h> 37 #include <asm/page.h> 38 #include <asm/ptrace.h> 39 40 static int gpr_get(struct task_struct *target, 41 const struct user_regset *regset, 42 struct membuf to) 43 { 44 struct pt_regs *regs = task_pt_regs(target); 45 struct user_pt_regs newregs = { 46 .pc = regs->pc, 47 .ps = regs->ps & ~(1 << PS_EXCM_BIT), 48 .lbeg = regs->lbeg, 49 .lend = regs->lend, 50 .lcount = regs->lcount, 51 .sar = regs->sar, 52 .threadptr = regs->threadptr, 53 .windowbase = regs->windowbase, 54 .windowstart = regs->windowstart, 55 .syscall = regs->syscall, 56 }; 57 58 memcpy(newregs.a, 59 regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4, 60 regs->windowbase * 16); 61 memcpy(newregs.a + regs->windowbase * 4, 62 regs->areg, 63 (WSBITS - regs->windowbase) * 16); 64 65 return membuf_write(&to, &newregs, sizeof(newregs)); 66 } 67 68 static int gpr_set(struct task_struct *target, 69 const struct user_regset *regset, 70 unsigned int pos, unsigned int count, 71 const void *kbuf, const void __user *ubuf) 72 { 73 int ret; 74 struct user_pt_regs newregs = {0}; 75 struct pt_regs *regs; 76 const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; 77 78 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 79 if (ret) 80 return ret; 81 82 if (newregs.windowbase >= XCHAL_NUM_AREGS / 4) 83 return -EINVAL; 84 85 regs = task_pt_regs(target); 86 regs->pc = newregs.pc; 87 regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask); 88 regs->lbeg = newregs.lbeg; 89 regs->lend = newregs.lend; 90 regs->lcount = newregs.lcount; 91 regs->sar = newregs.sar; 92 regs->threadptr = newregs.threadptr; 93 94 if (newregs.syscall) 95 regs->syscall = newregs.syscall; 96 97 if (newregs.windowbase != regs->windowbase || 98 newregs.windowstart != regs->windowstart) { 99 u32 rotws, wmask; 100 101 rotws = (((newregs.windowstart | 102 (newregs.windowstart << WSBITS)) >> 103 newregs.windowbase) & 104 ((1 << WSBITS) - 1)) & ~1; 105 wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | 106 (rotws & 0xF) | 1; 107 regs->windowbase = newregs.windowbase; 108 regs->windowstart = newregs.windowstart; 109 regs->wmask = wmask; 110 } 111 112 memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4, 113 newregs.a, newregs.windowbase * 16); 114 memcpy(regs->areg, newregs.a + newregs.windowbase * 4, 115 (WSBITS - newregs.windowbase) * 16); 116 117 return 0; 118 } 119 120 static int tie_get(struct task_struct *target, 121 const struct user_regset *regset, 122 struct membuf to) 123 { 124 int ret; 125 struct pt_regs *regs = task_pt_regs(target); 126 struct thread_info *ti = task_thread_info(target); 127 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); 128 129 if (!newregs) 130 return -ENOMEM; 131 132 newregs->opt = regs->xtregs_opt; 133 newregs->user = ti->xtregs_user; 134 135 #if XTENSA_HAVE_COPROCESSORS 136 /* Flush all coprocessor registers to memory. */ 137 coprocessor_flush_all(ti); 138 newregs->cp0 = ti->xtregs_cp.cp0; 139 newregs->cp1 = ti->xtregs_cp.cp1; 140 newregs->cp2 = ti->xtregs_cp.cp2; 141 newregs->cp3 = ti->xtregs_cp.cp3; 142 newregs->cp4 = ti->xtregs_cp.cp4; 143 newregs->cp5 = ti->xtregs_cp.cp5; 144 newregs->cp6 = ti->xtregs_cp.cp6; 145 newregs->cp7 = ti->xtregs_cp.cp7; 146 #endif 147 ret = membuf_write(&to, newregs, sizeof(*newregs)); 148 kfree(newregs); 149 return ret; 150 } 151 152 static int tie_set(struct task_struct *target, 153 const struct user_regset *regset, 154 unsigned int pos, unsigned int count, 155 const void *kbuf, const void __user *ubuf) 156 { 157 int ret; 158 struct pt_regs *regs = task_pt_regs(target); 159 struct thread_info *ti = task_thread_info(target); 160 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); 161 162 if (!newregs) 163 return -ENOMEM; 164 165 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 166 newregs, 0, -1); 167 168 if (ret) 169 goto exit; 170 regs->xtregs_opt = newregs->opt; 171 ti->xtregs_user = newregs->user; 172 173 #if XTENSA_HAVE_COPROCESSORS 174 /* Flush all coprocessors before we overwrite them. */ 175 coprocessor_flush_all(ti); 176 coprocessor_release_all(ti); 177 ti->xtregs_cp.cp0 = newregs->cp0; 178 ti->xtregs_cp.cp1 = newregs->cp1; 179 ti->xtregs_cp.cp2 = newregs->cp2; 180 ti->xtregs_cp.cp3 = newregs->cp3; 181 ti->xtregs_cp.cp4 = newregs->cp4; 182 ti->xtregs_cp.cp5 = newregs->cp5; 183 ti->xtregs_cp.cp6 = newregs->cp6; 184 ti->xtregs_cp.cp7 = newregs->cp7; 185 #endif 186 exit: 187 kfree(newregs); 188 return ret; 189 } 190 191 enum xtensa_regset { 192 REGSET_GPR, 193 REGSET_TIE, 194 }; 195 196 static const struct user_regset xtensa_regsets[] = { 197 [REGSET_GPR] = { 198 .core_note_type = NT_PRSTATUS, 199 .n = sizeof(struct user_pt_regs) / sizeof(u32), 200 .size = sizeof(u32), 201 .align = sizeof(u32), 202 .regset_get = gpr_get, 203 .set = gpr_set, 204 }, 205 [REGSET_TIE] = { 206 .core_note_type = NT_PRFPREG, 207 .n = sizeof(elf_xtregs_t) / sizeof(u32), 208 .size = sizeof(u32), 209 .align = sizeof(u32), 210 .regset_get = tie_get, 211 .set = tie_set, 212 }, 213 }; 214 215 static const struct user_regset_view user_xtensa_view = { 216 .name = "xtensa", 217 .e_machine = EM_XTENSA, 218 .regsets = xtensa_regsets, 219 .n = ARRAY_SIZE(xtensa_regsets) 220 }; 221 222 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 223 { 224 return &user_xtensa_view; 225 } 226 227 void user_enable_single_step(struct task_struct *child) 228 { 229 child->ptrace |= PT_SINGLESTEP; 230 } 231 232 void user_disable_single_step(struct task_struct *child) 233 { 234 child->ptrace &= ~PT_SINGLESTEP; 235 } 236 237 /* 238 * Called by kernel/ptrace.c when detaching to disable single stepping. 239 */ 240 241 void ptrace_disable(struct task_struct *child) 242 { 243 /* Nothing to do.. */ 244 } 245 246 static int ptrace_getregs(struct task_struct *child, void __user *uregs) 247 { 248 return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR, 249 0, sizeof(xtensa_gregset_t), uregs); 250 } 251 252 static int ptrace_setregs(struct task_struct *child, void __user *uregs) 253 { 254 return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR, 255 0, sizeof(xtensa_gregset_t), uregs); 256 } 257 258 static int ptrace_getxregs(struct task_struct *child, void __user *uregs) 259 { 260 return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE, 261 0, sizeof(elf_xtregs_t), uregs); 262 } 263 264 static int ptrace_setxregs(struct task_struct *child, void __user *uregs) 265 { 266 return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE, 267 0, sizeof(elf_xtregs_t), uregs); 268 } 269 270 static int ptrace_peekusr(struct task_struct *child, long regno, 271 long __user *ret) 272 { 273 struct pt_regs *regs; 274 unsigned long tmp; 275 276 regs = task_pt_regs(child); 277 tmp = 0; /* Default return value. */ 278 279 switch(regno) { 280 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 281 tmp = regs->areg[regno - REG_AR_BASE]; 282 break; 283 284 case REG_A_BASE ... REG_A_BASE + 15: 285 tmp = regs->areg[regno - REG_A_BASE]; 286 break; 287 288 case REG_PC: 289 tmp = regs->pc; 290 break; 291 292 case REG_PS: 293 /* Note: PS.EXCM is not set while user task is running; 294 * its being set in regs is for exception handling 295 * convenience. 296 */ 297 tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); 298 break; 299 300 case REG_WB: 301 break; /* tmp = 0 */ 302 303 case REG_WS: 304 { 305 unsigned long wb = regs->windowbase; 306 unsigned long ws = regs->windowstart; 307 tmp = ((ws >> wb) | (ws << (WSBITS - wb))) & 308 ((1 << WSBITS) - 1); 309 break; 310 } 311 case REG_LBEG: 312 tmp = regs->lbeg; 313 break; 314 315 case REG_LEND: 316 tmp = regs->lend; 317 break; 318 319 case REG_LCOUNT: 320 tmp = regs->lcount; 321 break; 322 323 case REG_SAR: 324 tmp = regs->sar; 325 break; 326 327 case SYSCALL_NR: 328 tmp = regs->syscall; 329 break; 330 331 default: 332 return -EIO; 333 } 334 return put_user(tmp, ret); 335 } 336 337 static int ptrace_pokeusr(struct task_struct *child, long regno, long val) 338 { 339 struct pt_regs *regs; 340 regs = task_pt_regs(child); 341 342 switch (regno) { 343 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 344 regs->areg[regno - REG_AR_BASE] = val; 345 break; 346 347 case REG_A_BASE ... REG_A_BASE + 15: 348 regs->areg[regno - REG_A_BASE] = val; 349 break; 350 351 case REG_PC: 352 regs->pc = val; 353 break; 354 355 case SYSCALL_NR: 356 regs->syscall = val; 357 break; 358 359 default: 360 return -EIO; 361 } 362 return 0; 363 } 364 365 #ifdef CONFIG_HAVE_HW_BREAKPOINT 366 static void ptrace_hbptriggered(struct perf_event *bp, 367 struct perf_sample_data *data, 368 struct pt_regs *regs) 369 { 370 int i; 371 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 372 373 if (bp->attr.bp_type & HW_BREAKPOINT_X) { 374 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) 375 if (current->thread.ptrace_bp[i] == bp) 376 break; 377 i <<= 1; 378 } else { 379 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) 380 if (current->thread.ptrace_wp[i] == bp) 381 break; 382 i = (i << 1) | 1; 383 } 384 385 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); 386 } 387 388 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) 389 { 390 struct perf_event_attr attr; 391 392 ptrace_breakpoint_init(&attr); 393 394 /* Initialise fields to sane defaults. */ 395 attr.bp_addr = 0; 396 attr.bp_len = 1; 397 attr.bp_type = type; 398 attr.disabled = 1; 399 400 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, 401 tsk); 402 } 403 404 /* 405 * Address bit 0 choose instruction (0) or data (1) break register, bits 406 * 31..1 are the register number. 407 * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words: 408 * address (0) and control (1). 409 * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set. 410 * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is 411 * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a 412 * breakpoint. To set a breakpoint length must be a power of 2 in the range 413 * 1..64 and the address must be length-aligned. 414 */ 415 416 static long ptrace_gethbpregs(struct task_struct *child, long addr, 417 long __user *datap) 418 { 419 struct perf_event *bp; 420 u32 user_data[2] = {0}; 421 bool dbreak = addr & 1; 422 unsigned idx = addr >> 1; 423 424 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || 425 (dbreak && idx >= XCHAL_NUM_DBREAK)) 426 return -EINVAL; 427 428 if (dbreak) 429 bp = child->thread.ptrace_wp[idx]; 430 else 431 bp = child->thread.ptrace_bp[idx]; 432 433 if (bp) { 434 user_data[0] = bp->attr.bp_addr; 435 user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len; 436 if (dbreak) { 437 if (bp->attr.bp_type & HW_BREAKPOINT_R) 438 user_data[1] |= DBREAKC_LOAD_MASK; 439 if (bp->attr.bp_type & HW_BREAKPOINT_W) 440 user_data[1] |= DBREAKC_STOR_MASK; 441 } 442 } 443 444 if (copy_to_user(datap, user_data, sizeof(user_data))) 445 return -EFAULT; 446 447 return 0; 448 } 449 450 static long ptrace_sethbpregs(struct task_struct *child, long addr, 451 long __user *datap) 452 { 453 struct perf_event *bp; 454 struct perf_event_attr attr; 455 u32 user_data[2]; 456 bool dbreak = addr & 1; 457 unsigned idx = addr >> 1; 458 int bp_type = 0; 459 460 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || 461 (dbreak && idx >= XCHAL_NUM_DBREAK)) 462 return -EINVAL; 463 464 if (copy_from_user(user_data, datap, sizeof(user_data))) 465 return -EFAULT; 466 467 if (dbreak) { 468 bp = child->thread.ptrace_wp[idx]; 469 if (user_data[1] & DBREAKC_LOAD_MASK) 470 bp_type |= HW_BREAKPOINT_R; 471 if (user_data[1] & DBREAKC_STOR_MASK) 472 bp_type |= HW_BREAKPOINT_W; 473 } else { 474 bp = child->thread.ptrace_bp[idx]; 475 bp_type = HW_BREAKPOINT_X; 476 } 477 478 if (!bp) { 479 bp = ptrace_hbp_create(child, 480 bp_type ? bp_type : HW_BREAKPOINT_RW); 481 if (IS_ERR(bp)) 482 return PTR_ERR(bp); 483 if (dbreak) 484 child->thread.ptrace_wp[idx] = bp; 485 else 486 child->thread.ptrace_bp[idx] = bp; 487 } 488 489 attr = bp->attr; 490 attr.bp_addr = user_data[0]; 491 attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK); 492 attr.bp_type = bp_type; 493 attr.disabled = !attr.bp_len; 494 495 return modify_user_hw_breakpoint(bp, &attr); 496 } 497 #endif 498 499 long arch_ptrace(struct task_struct *child, long request, 500 unsigned long addr, unsigned long data) 501 { 502 int ret = -EPERM; 503 void __user *datap = (void __user *) data; 504 505 switch (request) { 506 case PTRACE_PEEKUSR: /* read register specified by addr. */ 507 ret = ptrace_peekusr(child, addr, datap); 508 break; 509 510 case PTRACE_POKEUSR: /* write register specified by addr. */ 511 ret = ptrace_pokeusr(child, addr, data); 512 break; 513 514 case PTRACE_GETREGS: 515 ret = ptrace_getregs(child, datap); 516 break; 517 518 case PTRACE_SETREGS: 519 ret = ptrace_setregs(child, datap); 520 break; 521 522 case PTRACE_GETXTREGS: 523 ret = ptrace_getxregs(child, datap); 524 break; 525 526 case PTRACE_SETXTREGS: 527 ret = ptrace_setxregs(child, datap); 528 break; 529 #ifdef CONFIG_HAVE_HW_BREAKPOINT 530 case PTRACE_GETHBPREGS: 531 ret = ptrace_gethbpregs(child, addr, datap); 532 break; 533 534 case PTRACE_SETHBPREGS: 535 ret = ptrace_sethbpregs(child, addr, datap); 536 break; 537 #endif 538 default: 539 ret = ptrace_request(child, request, addr, data); 540 break; 541 } 542 543 return ret; 544 } 545 546 void do_syscall_trace_leave(struct pt_regs *regs); 547 int do_syscall_trace_enter(struct pt_regs *regs) 548 { 549 if (regs->syscall == NO_SYSCALL) 550 regs->areg[2] = -ENOSYS; 551 552 if (test_thread_flag(TIF_SYSCALL_TRACE) && 553 tracehook_report_syscall_entry(regs)) { 554 regs->areg[2] = -ENOSYS; 555 regs->syscall = NO_SYSCALL; 556 return 0; 557 } 558 559 if (regs->syscall == NO_SYSCALL || 560 secure_computing() == -1) { 561 do_syscall_trace_leave(regs); 562 return 0; 563 } 564 565 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 566 trace_sys_enter(regs, syscall_get_nr(current, regs)); 567 568 audit_syscall_entry(regs->syscall, regs->areg[6], 569 regs->areg[3], regs->areg[4], 570 regs->areg[5]); 571 return 1; 572 } 573 574 void do_syscall_trace_leave(struct pt_regs *regs) 575 { 576 int step; 577 578 audit_syscall_exit(regs); 579 580 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 581 trace_sys_exit(regs, regs_return_value(regs)); 582 583 step = test_thread_flag(TIF_SINGLESTEP); 584 585 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 586 tracehook_report_syscall_exit(regs, step); 587 } 588