1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/mm.h> 24 #include <linux/errno.h> 25 #include <linux/ptrace.h> 26 #include <linux/regset.h> 27 #include <linux/smp.h> 28 #include <linux/security.h> 29 #include <linux/stddef.h> 30 #include <linux/tracehook.h> 31 #include <linux/audit.h> 32 #include <linux/seccomp.h> 33 #include <linux/ftrace.h> 34 35 #include <asm/byteorder.h> 36 #include <asm/cpu.h> 37 #include <asm/cpu-info.h> 38 #include <asm/dsp.h> 39 #include <asm/fpu.h> 40 #include <asm/mipsregs.h> 41 #include <asm/mipsmtregs.h> 42 #include <asm/page.h> 43 #include <asm/processor.h> 44 #include <asm/syscall.h> 45 #include <linux/uaccess.h> 46 #include <asm/bootinfo.h> 47 #include <asm/reg.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/syscalls.h> 51 52 /* 53 * Called by kernel/ptrace.c when detaching.. 54 * 55 * Make sure single step bits etc are not set. 56 */ 57 void ptrace_disable(struct task_struct *child) 58 { 59 /* Don't load the watchpoint registers for the ex-child. */ 60 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 61 } 62 63 /* 64 * Read a general register set. We always use the 64-bit format, even 65 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 66 * Registers are sign extended to fill the available space. 67 */ 68 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 69 { 70 struct pt_regs *regs; 71 int i; 72 73 if (!access_ok(data, 38 * 8)) 74 return -EIO; 75 76 regs = task_pt_regs(child); 77 78 for (i = 0; i < 32; i++) 79 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 80 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 81 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 82 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 83 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 84 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 85 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 86 87 return 0; 88 } 89 90 /* 91 * Write a general register set. As for PTRACE_GETREGS, we always use 92 * the 64-bit format. On a 32-bit kernel only the lower order half 93 * (according to endianness) will be used. 94 */ 95 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 96 { 97 struct pt_regs *regs; 98 int i; 99 100 if (!access_ok(data, 38 * 8)) 101 return -EIO; 102 103 regs = task_pt_regs(child); 104 105 for (i = 0; i < 32; i++) 106 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 107 __get_user(regs->lo, (__s64 __user *)&data->lo); 108 __get_user(regs->hi, (__s64 __user *)&data->hi); 109 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 110 111 /* badvaddr, status, and cause may not be written. */ 112 113 /* System call number may have been changed */ 114 mips_syscall_update_nr(child, regs); 115 116 return 0; 117 } 118 119 int ptrace_get_watch_regs(struct task_struct *child, 120 struct pt_watch_regs __user *addr) 121 { 122 enum pt_watch_style style; 123 int i; 124 125 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 126 return -EIO; 127 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 128 return -EIO; 129 130 #ifdef CONFIG_32BIT 131 style = pt_watch_style_mips32; 132 #define WATCH_STYLE mips32 133 #else 134 style = pt_watch_style_mips64; 135 #define WATCH_STYLE mips64 136 #endif 137 138 __put_user(style, &addr->style); 139 __put_user(boot_cpu_data.watch_reg_use_cnt, 140 &addr->WATCH_STYLE.num_valid); 141 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 142 __put_user(child->thread.watch.mips3264.watchlo[i], 143 &addr->WATCH_STYLE.watchlo[i]); 144 __put_user(child->thread.watch.mips3264.watchhi[i] & 145 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 146 &addr->WATCH_STYLE.watchhi[i]); 147 __put_user(boot_cpu_data.watch_reg_masks[i], 148 &addr->WATCH_STYLE.watch_masks[i]); 149 } 150 for (; i < 8; i++) { 151 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 152 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 153 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 154 } 155 156 return 0; 157 } 158 159 int ptrace_set_watch_regs(struct task_struct *child, 160 struct pt_watch_regs __user *addr) 161 { 162 int i; 163 int watch_active = 0; 164 unsigned long lt[NUM_WATCH_REGS]; 165 u16 ht[NUM_WATCH_REGS]; 166 167 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 168 return -EIO; 169 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 170 return -EIO; 171 /* Check the values. */ 172 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 173 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 174 #ifdef CONFIG_32BIT 175 if (lt[i] & __UA_LIMIT) 176 return -EINVAL; 177 #else 178 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 179 if (lt[i] & 0xffffffff80000000UL) 180 return -EINVAL; 181 } else { 182 if (lt[i] & __UA_LIMIT) 183 return -EINVAL; 184 } 185 #endif 186 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 187 if (ht[i] & ~MIPS_WATCHHI_MASK) 188 return -EINVAL; 189 } 190 /* Install them. */ 191 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 192 if (lt[i] & MIPS_WATCHLO_IRW) 193 watch_active = 1; 194 child->thread.watch.mips3264.watchlo[i] = lt[i]; 195 /* Set the G bit. */ 196 child->thread.watch.mips3264.watchhi[i] = ht[i]; 197 } 198 199 if (watch_active) 200 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 201 else 202 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 203 204 return 0; 205 } 206 207 /* regset get/set implementations */ 208 209 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 210 211 static int gpr32_get(struct task_struct *target, 212 const struct user_regset *regset, 213 struct membuf to) 214 { 215 struct pt_regs *regs = task_pt_regs(target); 216 u32 uregs[ELF_NGREG] = {}; 217 218 mips_dump_regs32(uregs, regs); 219 return membuf_write(&to, uregs, sizeof(uregs)); 220 } 221 222 static int gpr32_set(struct task_struct *target, 223 const struct user_regset *regset, 224 unsigned int pos, unsigned int count, 225 const void *kbuf, const void __user *ubuf) 226 { 227 struct pt_regs *regs = task_pt_regs(target); 228 u32 uregs[ELF_NGREG]; 229 unsigned start, num_regs, i; 230 int err; 231 232 start = pos / sizeof(u32); 233 num_regs = count / sizeof(u32); 234 235 if (start + num_regs > ELF_NGREG) 236 return -EIO; 237 238 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 239 sizeof(uregs)); 240 if (err) 241 return err; 242 243 for (i = start; i < num_regs; i++) { 244 /* 245 * Cast all values to signed here so that if this is a 64-bit 246 * kernel, the supplied 32-bit values will be sign extended. 247 */ 248 switch (i) { 249 case MIPS32_EF_R1 ... MIPS32_EF_R25: 250 /* k0/k1 are ignored. */ 251 case MIPS32_EF_R28 ... MIPS32_EF_R31: 252 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 253 break; 254 case MIPS32_EF_LO: 255 regs->lo = (s32)uregs[i]; 256 break; 257 case MIPS32_EF_HI: 258 regs->hi = (s32)uregs[i]; 259 break; 260 case MIPS32_EF_CP0_EPC: 261 regs->cp0_epc = (s32)uregs[i]; 262 break; 263 } 264 } 265 266 /* System call number may have been changed */ 267 mips_syscall_update_nr(target, regs); 268 269 return 0; 270 } 271 272 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 273 274 #ifdef CONFIG_64BIT 275 276 static int gpr64_get(struct task_struct *target, 277 const struct user_regset *regset, 278 struct membuf to) 279 { 280 struct pt_regs *regs = task_pt_regs(target); 281 u64 uregs[ELF_NGREG] = {}; 282 283 mips_dump_regs64(uregs, regs); 284 return membuf_write(&to, uregs, sizeof(uregs)); 285 } 286 287 static int gpr64_set(struct task_struct *target, 288 const struct user_regset *regset, 289 unsigned int pos, unsigned int count, 290 const void *kbuf, const void __user *ubuf) 291 { 292 struct pt_regs *regs = task_pt_regs(target); 293 u64 uregs[ELF_NGREG]; 294 unsigned start, num_regs, i; 295 int err; 296 297 start = pos / sizeof(u64); 298 num_regs = count / sizeof(u64); 299 300 if (start + num_regs > ELF_NGREG) 301 return -EIO; 302 303 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 304 sizeof(uregs)); 305 if (err) 306 return err; 307 308 for (i = start; i < num_regs; i++) { 309 switch (i) { 310 case MIPS64_EF_R1 ... MIPS64_EF_R25: 311 /* k0/k1 are ignored. */ 312 case MIPS64_EF_R28 ... MIPS64_EF_R31: 313 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 314 break; 315 case MIPS64_EF_LO: 316 regs->lo = uregs[i]; 317 break; 318 case MIPS64_EF_HI: 319 regs->hi = uregs[i]; 320 break; 321 case MIPS64_EF_CP0_EPC: 322 regs->cp0_epc = uregs[i]; 323 break; 324 } 325 } 326 327 /* System call number may have been changed */ 328 mips_syscall_update_nr(target, regs); 329 330 return 0; 331 } 332 333 #endif /* CONFIG_64BIT */ 334 335 336 #ifdef CONFIG_MIPS_FP_SUPPORT 337 338 /* 339 * Poke at FCSR according to its mask. Set the Cause bits even 340 * if a corresponding Enable bit is set. This will be noticed at 341 * the time the thread is switched to and SIGFPE thrown accordingly. 342 */ 343 static void ptrace_setfcr31(struct task_struct *child, u32 value) 344 { 345 u32 fcr31; 346 u32 mask; 347 348 fcr31 = child->thread.fpu.fcr31; 349 mask = boot_cpu_data.fpu_msk31; 350 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 351 } 352 353 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 354 { 355 int i; 356 357 if (!access_ok(data, 33 * 8)) 358 return -EIO; 359 360 if (tsk_used_math(child)) { 361 union fpureg *fregs = get_fpu_regs(child); 362 for (i = 0; i < 32; i++) 363 __put_user(get_fpr64(&fregs[i], 0), 364 i + (__u64 __user *)data); 365 } else { 366 for (i = 0; i < 32; i++) 367 __put_user((__u64) -1, i + (__u64 __user *) data); 368 } 369 370 __put_user(child->thread.fpu.fcr31, data + 64); 371 __put_user(boot_cpu_data.fpu_id, data + 65); 372 373 return 0; 374 } 375 376 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 377 { 378 union fpureg *fregs; 379 u64 fpr_val; 380 u32 value; 381 int i; 382 383 if (!access_ok(data, 33 * 8)) 384 return -EIO; 385 386 init_fp_ctx(child); 387 fregs = get_fpu_regs(child); 388 389 for (i = 0; i < 32; i++) { 390 __get_user(fpr_val, i + (__u64 __user *)data); 391 set_fpr64(&fregs[i], 0, fpr_val); 392 } 393 394 __get_user(value, data + 64); 395 ptrace_setfcr31(child, value); 396 397 /* FIR may not be written. */ 398 399 return 0; 400 } 401 402 /* 403 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 404 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots 405 * correspond 1:1 to buffer slots. Only general registers are copied. 406 */ 407 static void fpr_get_fpa(struct task_struct *target, 408 struct membuf *to) 409 { 410 membuf_write(to, &target->thread.fpu, 411 NUM_FPU_REGS * sizeof(elf_fpreg_t)); 412 } 413 414 /* 415 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 416 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's 417 * general register slots are copied to buffer slots. Only general 418 * registers are copied. 419 */ 420 static void fpr_get_msa(struct task_struct *target, struct membuf *to) 421 { 422 unsigned int i; 423 424 BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t)); 425 for (i = 0; i < NUM_FPU_REGS; i++) 426 membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0)); 427 } 428 429 /* 430 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 431 * Choose the appropriate helper for general registers, and then copy 432 * the FCSR and FIR registers separately. 433 */ 434 static int fpr_get(struct task_struct *target, 435 const struct user_regset *regset, 436 struct membuf to) 437 { 438 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 439 fpr_get_fpa(target, &to); 440 else 441 fpr_get_msa(target, &to); 442 443 membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32)); 444 membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32)); 445 return 0; 446 } 447 448 /* 449 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 450 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP 451 * context's general register slots. Only general registers are copied. 452 */ 453 static int fpr_set_fpa(struct task_struct *target, 454 unsigned int *pos, unsigned int *count, 455 const void **kbuf, const void __user **ubuf) 456 { 457 return user_regset_copyin(pos, count, kbuf, ubuf, 458 &target->thread.fpu, 459 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 460 } 461 462 /* 463 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 464 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 465 * bits only of FP context's general register slots. Only general 466 * registers are copied. 467 */ 468 static int fpr_set_msa(struct task_struct *target, 469 unsigned int *pos, unsigned int *count, 470 const void **kbuf, const void __user **ubuf) 471 { 472 unsigned int i; 473 u64 fpr_val; 474 int err; 475 476 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 477 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 478 err = user_regset_copyin(pos, count, kbuf, ubuf, 479 &fpr_val, i * sizeof(elf_fpreg_t), 480 (i + 1) * sizeof(elf_fpreg_t)); 481 if (err) 482 return err; 483 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 484 } 485 486 return 0; 487 } 488 489 /* 490 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 491 * Choose the appropriate helper for general registers, and then copy 492 * the FCSR register separately. Ignore the incoming FIR register 493 * contents though, as the register is read-only. 494 * 495 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 496 * which is supposed to have been guaranteed by the kernel before 497 * calling us, e.g. in `ptrace_regset'. We enforce that requirement, 498 * so that we can safely avoid preinitializing temporaries for 499 * partial register writes. 500 */ 501 static int fpr_set(struct task_struct *target, 502 const struct user_regset *regset, 503 unsigned int pos, unsigned int count, 504 const void *kbuf, const void __user *ubuf) 505 { 506 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 507 const int fir_pos = fcr31_pos + sizeof(u32); 508 u32 fcr31; 509 int err; 510 511 BUG_ON(count % sizeof(elf_fpreg_t)); 512 513 if (pos + count > sizeof(elf_fpregset_t)) 514 return -EIO; 515 516 init_fp_ctx(target); 517 518 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 519 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); 520 else 521 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); 522 if (err) 523 return err; 524 525 if (count > 0) { 526 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 527 &fcr31, 528 fcr31_pos, fcr31_pos + sizeof(u32)); 529 if (err) 530 return err; 531 532 ptrace_setfcr31(target, fcr31); 533 } 534 535 if (count > 0) 536 err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 537 fir_pos, 538 fir_pos + sizeof(u32)); 539 540 return err; 541 } 542 543 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ 544 static int fp_mode_get(struct task_struct *target, 545 const struct user_regset *regset, 546 struct membuf to) 547 { 548 return membuf_store(&to, (int)mips_get_process_fp_mode(target)); 549 } 550 551 /* 552 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. 553 * 554 * We optimize for the case where `count % sizeof(int) == 0', which 555 * is supposed to have been guaranteed by the kernel before calling 556 * us, e.g. in `ptrace_regset'. We enforce that requirement, so 557 * that we can safely avoid preinitializing temporaries for partial 558 * mode writes. 559 */ 560 static int fp_mode_set(struct task_struct *target, 561 const struct user_regset *regset, 562 unsigned int pos, unsigned int count, 563 const void *kbuf, const void __user *ubuf) 564 { 565 int fp_mode; 566 int err; 567 568 BUG_ON(count % sizeof(int)); 569 570 if (pos + count > sizeof(fp_mode)) 571 return -EIO; 572 573 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, 574 sizeof(fp_mode)); 575 if (err) 576 return err; 577 578 if (count > 0) 579 err = mips_set_process_fp_mode(target, fp_mode); 580 581 return err; 582 } 583 584 #endif /* CONFIG_MIPS_FP_SUPPORT */ 585 586 #ifdef CONFIG_CPU_HAS_MSA 587 588 struct msa_control_regs { 589 unsigned int fir; 590 unsigned int fcsr; 591 unsigned int msair; 592 unsigned int msacsr; 593 }; 594 595 static void copy_pad_fprs(struct task_struct *target, 596 const struct user_regset *regset, 597 struct membuf *to, 598 unsigned int live_sz) 599 { 600 int i, j; 601 unsigned long long fill = ~0ull; 602 unsigned int cp_sz, pad_sz; 603 604 cp_sz = min(regset->size, live_sz); 605 pad_sz = regset->size - cp_sz; 606 WARN_ON(pad_sz % sizeof(fill)); 607 608 for (i = 0; i < NUM_FPU_REGS; i++) { 609 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); 610 for (j = 0; j < (pad_sz / sizeof(fill)); j++) 611 membuf_store(to, fill); 612 } 613 } 614 615 static int msa_get(struct task_struct *target, 616 const struct user_regset *regset, 617 struct membuf to) 618 { 619 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 620 const struct msa_control_regs ctrl_regs = { 621 .fir = boot_cpu_data.fpu_id, 622 .fcsr = target->thread.fpu.fcr31, 623 .msair = boot_cpu_data.msa_id, 624 .msacsr = target->thread.fpu.msacsr, 625 }; 626 627 if (!tsk_used_math(target)) { 628 /* The task hasn't used FP or MSA, fill with 0xff */ 629 copy_pad_fprs(target, regset, &to, 0); 630 } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { 631 /* Copy scalar FP context, fill the rest with 0xff */ 632 copy_pad_fprs(target, regset, &to, 8); 633 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 634 /* Trivially copy the vector registers */ 635 membuf_write(&to, &target->thread.fpu.fpr, wr_size); 636 } else { 637 /* Copy as much context as possible, fill the rest with 0xff */ 638 copy_pad_fprs(target, regset, &to, 639 sizeof(target->thread.fpu.fpr[0])); 640 } 641 642 return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs)); 643 } 644 645 static int msa_set(struct task_struct *target, 646 const struct user_regset *regset, 647 unsigned int pos, unsigned int count, 648 const void *kbuf, const void __user *ubuf) 649 { 650 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 651 struct msa_control_regs ctrl_regs; 652 unsigned int cp_sz; 653 int i, err, start; 654 655 init_fp_ctx(target); 656 657 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 658 /* Trivially copy the vector registers */ 659 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 660 &target->thread.fpu.fpr, 661 0, wr_size); 662 } else { 663 /* Copy as much context as possible */ 664 cp_sz = min_t(unsigned int, regset->size, 665 sizeof(target->thread.fpu.fpr[0])); 666 667 i = start = err = 0; 668 for (; i < NUM_FPU_REGS; i++, start += regset->size) { 669 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 670 &target->thread.fpu.fpr[i], 671 start, start + cp_sz); 672 } 673 } 674 675 if (!err) 676 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs, 677 wr_size, wr_size + sizeof(ctrl_regs)); 678 if (!err) { 679 target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X; 680 target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF; 681 } 682 683 return err; 684 } 685 686 #endif /* CONFIG_CPU_HAS_MSA */ 687 688 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 689 690 /* 691 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer. 692 */ 693 static int dsp32_get(struct task_struct *target, 694 const struct user_regset *regset, 695 struct membuf to) 696 { 697 u32 dspregs[NUM_DSP_REGS + 1]; 698 unsigned int i; 699 700 BUG_ON(to.left % sizeof(u32)); 701 702 if (!cpu_has_dsp) 703 return -EIO; 704 705 for (i = 0; i < NUM_DSP_REGS; i++) 706 dspregs[i] = target->thread.dsp.dspr[i]; 707 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 708 return membuf_write(&to, dspregs, sizeof(dspregs)); 709 } 710 711 /* 712 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context. 713 */ 714 static int dsp32_set(struct task_struct *target, 715 const struct user_regset *regset, 716 unsigned int pos, unsigned int count, 717 const void *kbuf, const void __user *ubuf) 718 { 719 unsigned int start, num_regs, i; 720 u32 dspregs[NUM_DSP_REGS + 1]; 721 int err; 722 723 BUG_ON(count % sizeof(u32)); 724 725 if (!cpu_has_dsp) 726 return -EIO; 727 728 start = pos / sizeof(u32); 729 num_regs = count / sizeof(u32); 730 731 if (start + num_regs > NUM_DSP_REGS + 1) 732 return -EIO; 733 734 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 735 sizeof(dspregs)); 736 if (err) 737 return err; 738 739 for (i = start; i < num_regs; i++) 740 switch (i) { 741 case 0 ... NUM_DSP_REGS - 1: 742 target->thread.dsp.dspr[i] = (s32)dspregs[i]; 743 break; 744 case NUM_DSP_REGS: 745 target->thread.dsp.dspcontrol = (s32)dspregs[i]; 746 break; 747 } 748 749 return 0; 750 } 751 752 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 753 754 #ifdef CONFIG_64BIT 755 756 /* 757 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer. 758 */ 759 static int dsp64_get(struct task_struct *target, 760 const struct user_regset *regset, 761 struct membuf to) 762 { 763 u64 dspregs[NUM_DSP_REGS + 1]; 764 unsigned int i; 765 766 BUG_ON(to.left % sizeof(u64)); 767 768 if (!cpu_has_dsp) 769 return -EIO; 770 771 for (i = 0; i < NUM_DSP_REGS; i++) 772 dspregs[i] = target->thread.dsp.dspr[i]; 773 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 774 return membuf_write(&to, dspregs, sizeof(dspregs)); 775 } 776 777 /* 778 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context. 779 */ 780 static int dsp64_set(struct task_struct *target, 781 const struct user_regset *regset, 782 unsigned int pos, unsigned int count, 783 const void *kbuf, const void __user *ubuf) 784 { 785 unsigned int start, num_regs, i; 786 u64 dspregs[NUM_DSP_REGS + 1]; 787 int err; 788 789 BUG_ON(count % sizeof(u64)); 790 791 if (!cpu_has_dsp) 792 return -EIO; 793 794 start = pos / sizeof(u64); 795 num_regs = count / sizeof(u64); 796 797 if (start + num_regs > NUM_DSP_REGS + 1) 798 return -EIO; 799 800 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 801 sizeof(dspregs)); 802 if (err) 803 return err; 804 805 for (i = start; i < num_regs; i++) 806 switch (i) { 807 case 0 ... NUM_DSP_REGS - 1: 808 target->thread.dsp.dspr[i] = dspregs[i]; 809 break; 810 case NUM_DSP_REGS: 811 target->thread.dsp.dspcontrol = dspregs[i]; 812 break; 813 } 814 815 return 0; 816 } 817 818 #endif /* CONFIG_64BIT */ 819 820 /* 821 * Determine whether the DSP context is present. 822 */ 823 static int dsp_active(struct task_struct *target, 824 const struct user_regset *regset) 825 { 826 return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV; 827 } 828 829 enum mips_regset { 830 REGSET_GPR, 831 REGSET_DSP, 832 #ifdef CONFIG_MIPS_FP_SUPPORT 833 REGSET_FPR, 834 REGSET_FP_MODE, 835 #endif 836 #ifdef CONFIG_CPU_HAS_MSA 837 REGSET_MSA, 838 #endif 839 }; 840 841 struct pt_regs_offset { 842 const char *name; 843 int offset; 844 }; 845 846 #define REG_OFFSET_NAME(reg, r) { \ 847 .name = #reg, \ 848 .offset = offsetof(struct pt_regs, r) \ 849 } 850 851 #define REG_OFFSET_END { \ 852 .name = NULL, \ 853 .offset = 0 \ 854 } 855 856 static const struct pt_regs_offset regoffset_table[] = { 857 REG_OFFSET_NAME(r0, regs[0]), 858 REG_OFFSET_NAME(r1, regs[1]), 859 REG_OFFSET_NAME(r2, regs[2]), 860 REG_OFFSET_NAME(r3, regs[3]), 861 REG_OFFSET_NAME(r4, regs[4]), 862 REG_OFFSET_NAME(r5, regs[5]), 863 REG_OFFSET_NAME(r6, regs[6]), 864 REG_OFFSET_NAME(r7, regs[7]), 865 REG_OFFSET_NAME(r8, regs[8]), 866 REG_OFFSET_NAME(r9, regs[9]), 867 REG_OFFSET_NAME(r10, regs[10]), 868 REG_OFFSET_NAME(r11, regs[11]), 869 REG_OFFSET_NAME(r12, regs[12]), 870 REG_OFFSET_NAME(r13, regs[13]), 871 REG_OFFSET_NAME(r14, regs[14]), 872 REG_OFFSET_NAME(r15, regs[15]), 873 REG_OFFSET_NAME(r16, regs[16]), 874 REG_OFFSET_NAME(r17, regs[17]), 875 REG_OFFSET_NAME(r18, regs[18]), 876 REG_OFFSET_NAME(r19, regs[19]), 877 REG_OFFSET_NAME(r20, regs[20]), 878 REG_OFFSET_NAME(r21, regs[21]), 879 REG_OFFSET_NAME(r22, regs[22]), 880 REG_OFFSET_NAME(r23, regs[23]), 881 REG_OFFSET_NAME(r24, regs[24]), 882 REG_OFFSET_NAME(r25, regs[25]), 883 REG_OFFSET_NAME(r26, regs[26]), 884 REG_OFFSET_NAME(r27, regs[27]), 885 REG_OFFSET_NAME(r28, regs[28]), 886 REG_OFFSET_NAME(r29, regs[29]), 887 REG_OFFSET_NAME(r30, regs[30]), 888 REG_OFFSET_NAME(r31, regs[31]), 889 REG_OFFSET_NAME(c0_status, cp0_status), 890 REG_OFFSET_NAME(hi, hi), 891 REG_OFFSET_NAME(lo, lo), 892 #ifdef CONFIG_CPU_HAS_SMARTMIPS 893 REG_OFFSET_NAME(acx, acx), 894 #endif 895 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 896 REG_OFFSET_NAME(c0_cause, cp0_cause), 897 REG_OFFSET_NAME(c0_epc, cp0_epc), 898 #ifdef CONFIG_CPU_CAVIUM_OCTEON 899 REG_OFFSET_NAME(mpl0, mpl[0]), 900 REG_OFFSET_NAME(mpl1, mpl[1]), 901 REG_OFFSET_NAME(mpl2, mpl[2]), 902 REG_OFFSET_NAME(mtp0, mtp[0]), 903 REG_OFFSET_NAME(mtp1, mtp[1]), 904 REG_OFFSET_NAME(mtp2, mtp[2]), 905 #endif 906 REG_OFFSET_END, 907 }; 908 909 /** 910 * regs_query_register_offset() - query register offset from its name 911 * @name: the name of a register 912 * 913 * regs_query_register_offset() returns the offset of a register in struct 914 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 915 */ 916 int regs_query_register_offset(const char *name) 917 { 918 const struct pt_regs_offset *roff; 919 for (roff = regoffset_table; roff->name != NULL; roff++) 920 if (!strcmp(roff->name, name)) 921 return roff->offset; 922 return -EINVAL; 923 } 924 925 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 926 927 static const struct user_regset mips_regsets[] = { 928 [REGSET_GPR] = { 929 .core_note_type = NT_PRSTATUS, 930 .n = ELF_NGREG, 931 .size = sizeof(unsigned int), 932 .align = sizeof(unsigned int), 933 .regset_get = gpr32_get, 934 .set = gpr32_set, 935 }, 936 [REGSET_DSP] = { 937 .core_note_type = NT_MIPS_DSP, 938 .n = NUM_DSP_REGS + 1, 939 .size = sizeof(u32), 940 .align = sizeof(u32), 941 .regset_get = dsp32_get, 942 .set = dsp32_set, 943 .active = dsp_active, 944 }, 945 #ifdef CONFIG_MIPS_FP_SUPPORT 946 [REGSET_FPR] = { 947 .core_note_type = NT_PRFPREG, 948 .n = ELF_NFPREG, 949 .size = sizeof(elf_fpreg_t), 950 .align = sizeof(elf_fpreg_t), 951 .regset_get = fpr_get, 952 .set = fpr_set, 953 }, 954 [REGSET_FP_MODE] = { 955 .core_note_type = NT_MIPS_FP_MODE, 956 .n = 1, 957 .size = sizeof(int), 958 .align = sizeof(int), 959 .regset_get = fp_mode_get, 960 .set = fp_mode_set, 961 }, 962 #endif 963 #ifdef CONFIG_CPU_HAS_MSA 964 [REGSET_MSA] = { 965 .core_note_type = NT_MIPS_MSA, 966 .n = NUM_FPU_REGS + 1, 967 .size = 16, 968 .align = 16, 969 .regset_get = msa_get, 970 .set = msa_set, 971 }, 972 #endif 973 }; 974 975 static const struct user_regset_view user_mips_view = { 976 .name = "mips", 977 .e_machine = ELF_ARCH, 978 .ei_osabi = ELF_OSABI, 979 .regsets = mips_regsets, 980 .n = ARRAY_SIZE(mips_regsets), 981 }; 982 983 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 984 985 #ifdef CONFIG_64BIT 986 987 static const struct user_regset mips64_regsets[] = { 988 [REGSET_GPR] = { 989 .core_note_type = NT_PRSTATUS, 990 .n = ELF_NGREG, 991 .size = sizeof(unsigned long), 992 .align = sizeof(unsigned long), 993 .regset_get = gpr64_get, 994 .set = gpr64_set, 995 }, 996 [REGSET_DSP] = { 997 .core_note_type = NT_MIPS_DSP, 998 .n = NUM_DSP_REGS + 1, 999 .size = sizeof(u64), 1000 .align = sizeof(u64), 1001 .regset_get = dsp64_get, 1002 .set = dsp64_set, 1003 .active = dsp_active, 1004 }, 1005 #ifdef CONFIG_MIPS_FP_SUPPORT 1006 [REGSET_FP_MODE] = { 1007 .core_note_type = NT_MIPS_FP_MODE, 1008 .n = 1, 1009 .size = sizeof(int), 1010 .align = sizeof(int), 1011 .regset_get = fp_mode_get, 1012 .set = fp_mode_set, 1013 }, 1014 [REGSET_FPR] = { 1015 .core_note_type = NT_PRFPREG, 1016 .n = ELF_NFPREG, 1017 .size = sizeof(elf_fpreg_t), 1018 .align = sizeof(elf_fpreg_t), 1019 .regset_get = fpr_get, 1020 .set = fpr_set, 1021 }, 1022 #endif 1023 #ifdef CONFIG_CPU_HAS_MSA 1024 [REGSET_MSA] = { 1025 .core_note_type = NT_MIPS_MSA, 1026 .n = NUM_FPU_REGS + 1, 1027 .size = 16, 1028 .align = 16, 1029 .regset_get = msa_get, 1030 .set = msa_set, 1031 }, 1032 #endif 1033 }; 1034 1035 static const struct user_regset_view user_mips64_view = { 1036 .name = "mips64", 1037 .e_machine = ELF_ARCH, 1038 .ei_osabi = ELF_OSABI, 1039 .regsets = mips64_regsets, 1040 .n = ARRAY_SIZE(mips64_regsets), 1041 }; 1042 1043 #ifdef CONFIG_MIPS32_N32 1044 1045 static const struct user_regset_view user_mipsn32_view = { 1046 .name = "mipsn32", 1047 .e_flags = EF_MIPS_ABI2, 1048 .e_machine = ELF_ARCH, 1049 .ei_osabi = ELF_OSABI, 1050 .regsets = mips64_regsets, 1051 .n = ARRAY_SIZE(mips64_regsets), 1052 }; 1053 1054 #endif /* CONFIG_MIPS32_N32 */ 1055 1056 #endif /* CONFIG_64BIT */ 1057 1058 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1059 { 1060 #ifdef CONFIG_32BIT 1061 return &user_mips_view; 1062 #else 1063 #ifdef CONFIG_MIPS32_O32 1064 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 1065 return &user_mips_view; 1066 #endif 1067 #ifdef CONFIG_MIPS32_N32 1068 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) 1069 return &user_mipsn32_view; 1070 #endif 1071 return &user_mips64_view; 1072 #endif 1073 } 1074 1075 long arch_ptrace(struct task_struct *child, long request, 1076 unsigned long addr, unsigned long data) 1077 { 1078 int ret; 1079 void __user *addrp = (void __user *) addr; 1080 void __user *datavp = (void __user *) data; 1081 unsigned long __user *datalp = (void __user *) data; 1082 1083 switch (request) { 1084 /* when I and D space are separate, these will need to be fixed. */ 1085 case PTRACE_PEEKTEXT: /* read word at location addr. */ 1086 case PTRACE_PEEKDATA: 1087 ret = generic_ptrace_peekdata(child, addr, data); 1088 break; 1089 1090 /* Read the word at location addr in the USER area. */ 1091 case PTRACE_PEEKUSR: { 1092 struct pt_regs *regs; 1093 unsigned long tmp = 0; 1094 1095 regs = task_pt_regs(child); 1096 ret = 0; /* Default return value. */ 1097 1098 switch (addr) { 1099 case 0 ... 31: 1100 tmp = regs->regs[addr]; 1101 break; 1102 #ifdef CONFIG_MIPS_FP_SUPPORT 1103 case FPR_BASE ... FPR_BASE + 31: { 1104 union fpureg *fregs; 1105 1106 if (!tsk_used_math(child)) { 1107 /* FP not yet used */ 1108 tmp = -1; 1109 break; 1110 } 1111 fregs = get_fpu_regs(child); 1112 1113 #ifdef CONFIG_32BIT 1114 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1115 /* 1116 * The odd registers are actually the high 1117 * order bits of the values stored in the even 1118 * registers. 1119 */ 1120 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1121 addr & 1); 1122 break; 1123 } 1124 #endif 1125 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 1126 break; 1127 } 1128 case FPC_CSR: 1129 tmp = child->thread.fpu.fcr31; 1130 break; 1131 case FPC_EIR: 1132 /* implementation / version register */ 1133 tmp = boot_cpu_data.fpu_id; 1134 break; 1135 #endif 1136 case PC: 1137 tmp = regs->cp0_epc; 1138 break; 1139 case CAUSE: 1140 tmp = regs->cp0_cause; 1141 break; 1142 case BADVADDR: 1143 tmp = regs->cp0_badvaddr; 1144 break; 1145 case MMHI: 1146 tmp = regs->hi; 1147 break; 1148 case MMLO: 1149 tmp = regs->lo; 1150 break; 1151 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1152 case ACX: 1153 tmp = regs->acx; 1154 break; 1155 #endif 1156 case DSP_BASE ... DSP_BASE + 5: { 1157 dspreg_t *dregs; 1158 1159 if (!cpu_has_dsp) { 1160 tmp = 0; 1161 ret = -EIO; 1162 goto out; 1163 } 1164 dregs = __get_dsp_regs(child); 1165 tmp = dregs[addr - DSP_BASE]; 1166 break; 1167 } 1168 case DSP_CONTROL: 1169 if (!cpu_has_dsp) { 1170 tmp = 0; 1171 ret = -EIO; 1172 goto out; 1173 } 1174 tmp = child->thread.dsp.dspcontrol; 1175 break; 1176 default: 1177 tmp = 0; 1178 ret = -EIO; 1179 goto out; 1180 } 1181 ret = put_user(tmp, datalp); 1182 break; 1183 } 1184 1185 /* when I and D space are separate, this will have to be fixed. */ 1186 case PTRACE_POKETEXT: /* write the word at location addr. */ 1187 case PTRACE_POKEDATA: 1188 ret = generic_ptrace_pokedata(child, addr, data); 1189 break; 1190 1191 case PTRACE_POKEUSR: { 1192 struct pt_regs *regs; 1193 ret = 0; 1194 regs = task_pt_regs(child); 1195 1196 switch (addr) { 1197 case 0 ... 31: 1198 regs->regs[addr] = data; 1199 /* System call number may have been changed */ 1200 if (addr == 2) 1201 mips_syscall_update_nr(child, regs); 1202 else if (addr == 4 && 1203 mips_syscall_is_indirect(child, regs)) 1204 mips_syscall_update_nr(child, regs); 1205 break; 1206 #ifdef CONFIG_MIPS_FP_SUPPORT 1207 case FPR_BASE ... FPR_BASE + 31: { 1208 union fpureg *fregs = get_fpu_regs(child); 1209 1210 init_fp_ctx(child); 1211 #ifdef CONFIG_32BIT 1212 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1213 /* 1214 * The odd registers are actually the high 1215 * order bits of the values stored in the even 1216 * registers. 1217 */ 1218 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1219 addr & 1, data); 1220 break; 1221 } 1222 #endif 1223 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 1224 break; 1225 } 1226 case FPC_CSR: 1227 init_fp_ctx(child); 1228 ptrace_setfcr31(child, data); 1229 break; 1230 #endif 1231 case PC: 1232 regs->cp0_epc = data; 1233 break; 1234 case MMHI: 1235 regs->hi = data; 1236 break; 1237 case MMLO: 1238 regs->lo = data; 1239 break; 1240 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1241 case ACX: 1242 regs->acx = data; 1243 break; 1244 #endif 1245 case DSP_BASE ... DSP_BASE + 5: { 1246 dspreg_t *dregs; 1247 1248 if (!cpu_has_dsp) { 1249 ret = -EIO; 1250 break; 1251 } 1252 1253 dregs = __get_dsp_regs(child); 1254 dregs[addr - DSP_BASE] = data; 1255 break; 1256 } 1257 case DSP_CONTROL: 1258 if (!cpu_has_dsp) { 1259 ret = -EIO; 1260 break; 1261 } 1262 child->thread.dsp.dspcontrol = data; 1263 break; 1264 default: 1265 /* The rest are not allowed. */ 1266 ret = -EIO; 1267 break; 1268 } 1269 break; 1270 } 1271 1272 case PTRACE_GETREGS: 1273 ret = ptrace_getregs(child, datavp); 1274 break; 1275 1276 case PTRACE_SETREGS: 1277 ret = ptrace_setregs(child, datavp); 1278 break; 1279 1280 #ifdef CONFIG_MIPS_FP_SUPPORT 1281 case PTRACE_GETFPREGS: 1282 ret = ptrace_getfpregs(child, datavp); 1283 break; 1284 1285 case PTRACE_SETFPREGS: 1286 ret = ptrace_setfpregs(child, datavp); 1287 break; 1288 #endif 1289 case PTRACE_GET_THREAD_AREA: 1290 ret = put_user(task_thread_info(child)->tp_value, datalp); 1291 break; 1292 1293 case PTRACE_GET_WATCH_REGS: 1294 ret = ptrace_get_watch_regs(child, addrp); 1295 break; 1296 1297 case PTRACE_SET_WATCH_REGS: 1298 ret = ptrace_set_watch_regs(child, addrp); 1299 break; 1300 1301 default: 1302 ret = ptrace_request(child, request, addr, data); 1303 break; 1304 } 1305 out: 1306 return ret; 1307 } 1308 1309 /* 1310 * Notification of system call entry/exit 1311 * - triggered by current->work.syscall_trace 1312 */ 1313 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) 1314 { 1315 user_exit(); 1316 1317 current_thread_info()->syscall = syscall; 1318 1319 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 1320 if (tracehook_report_syscall_entry(regs)) 1321 return -1; 1322 syscall = current_thread_info()->syscall; 1323 } 1324 1325 #ifdef CONFIG_SECCOMP 1326 if (unlikely(test_thread_flag(TIF_SECCOMP))) { 1327 int ret, i; 1328 struct seccomp_data sd; 1329 unsigned long args[6]; 1330 1331 sd.nr = syscall; 1332 sd.arch = syscall_get_arch(current); 1333 syscall_get_arguments(current, regs, args); 1334 for (i = 0; i < 6; i++) 1335 sd.args[i] = args[i]; 1336 sd.instruction_pointer = KSTK_EIP(current); 1337 1338 ret = __secure_computing(&sd); 1339 if (ret == -1) 1340 return ret; 1341 syscall = current_thread_info()->syscall; 1342 } 1343 #endif 1344 1345 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1346 trace_sys_enter(regs, regs->regs[2]); 1347 1348 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], 1349 regs->regs[6], regs->regs[7]); 1350 1351 /* 1352 * Negative syscall numbers are mistaken for rejected syscalls, but 1353 * won't have had the return value set appropriately, so we do so now. 1354 */ 1355 if (syscall < 0) 1356 syscall_set_return_value(current, regs, -ENOSYS, 0); 1357 return syscall; 1358 } 1359 1360 /* 1361 * Notification of system call entry/exit 1362 * - triggered by current->work.syscall_trace 1363 */ 1364 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1365 { 1366 /* 1367 * We may come here right after calling schedule_user() 1368 * or do_notify_resume(), in which case we can be in RCU 1369 * user mode. 1370 */ 1371 user_exit(); 1372 1373 audit_syscall_exit(regs); 1374 1375 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1376 trace_sys_exit(regs, regs_return_value(regs)); 1377 1378 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1379 tracehook_report_syscall_exit(regs, 0); 1380 1381 user_enter(); 1382 } 1383