1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author: Hanlu Li <lihanlu@loongson.cn> 4 * Huacai Chen <chenhuacai@loongson.cn> 5 * 6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 7 * 8 * Derived from MIPS: 9 * Copyright (C) 1992 Ross Biro 10 * Copyright (C) Linus Torvalds 11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 12 * Copyright (C) 1996 David S. Miller 13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 14 * Copyright (C) 1999 MIPS Technologies, Inc. 15 * Copyright (C) 2000 Ulf Carlsson 16 */ 17 #include <linux/kernel.h> 18 #include <linux/audit.h> 19 #include <linux/compiler.h> 20 #include <linux/context_tracking.h> 21 #include <linux/elf.h> 22 #include <linux/errno.h> 23 #include <linux/hw_breakpoint.h> 24 #include <linux/mm.h> 25 #include <linux/nospec.h> 26 #include <linux/ptrace.h> 27 #include <linux/regset.h> 28 #include <linux/sched.h> 29 #include <linux/sched/task_stack.h> 30 #include <linux/security.h> 31 #include <linux/smp.h> 32 #include <linux/stddef.h> 33 #include <linux/seccomp.h> 34 #include <linux/thread_info.h> 35 #include <linux/uaccess.h> 36 37 #include <asm/byteorder.h> 38 #include <asm/cpu.h> 39 #include <asm/cpu-info.h> 40 #include <asm/fpu.h> 41 #include <asm/loongarch.h> 42 #include <asm/page.h> 43 #include <asm/pgtable.h> 44 #include <asm/processor.h> 45 #include <asm/ptrace.h> 46 #include <asm/reg.h> 47 #include <asm/syscall.h> 48 49 static void init_fp_ctx(struct task_struct *target) 50 { 51 /* The target already has context */ 52 if (tsk_used_math(target)) 53 return; 54 55 /* Begin with data registers set to all 1s... */ 56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 57 set_stopped_child_used_math(target); 58 } 59 60 /* 61 * Called by kernel/ptrace.c when detaching.. 62 * 63 * Make sure single step bits etc are not set. 64 */ 65 void ptrace_disable(struct task_struct *child) 66 { 67 /* Don't load the watchpoint registers for the ex-child. */ 68 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 69 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 70 } 71 72 /* regset get/set implementations */ 73 74 static int gpr_get(struct task_struct *target, 75 const struct user_regset *regset, 76 struct membuf to) 77 { 78 int r; 79 struct pt_regs *regs = task_pt_regs(target); 80 81 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM); 82 r = membuf_write(&to, ®s->orig_a0, sizeof(u64)); 83 r = membuf_write(&to, ®s->csr_era, sizeof(u64)); 84 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64)); 85 86 return r; 87 } 88 89 static int gpr_set(struct task_struct *target, 90 const struct user_regset *regset, 91 unsigned int pos, unsigned int count, 92 const void *kbuf, const void __user *ubuf) 93 { 94 int err; 95 int a0_start = sizeof(u64) * GPR_NUM; 96 int era_start = a0_start + sizeof(u64); 97 int badvaddr_start = era_start + sizeof(u64); 98 struct pt_regs *regs = task_pt_regs(target); 99 100 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 101 ®s->regs, 102 0, a0_start); 103 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 104 ®s->orig_a0, 105 a0_start, a0_start + sizeof(u64)); 106 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 107 ®s->csr_era, 108 era_start, era_start + sizeof(u64)); 109 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 110 ®s->csr_badvaddr, 111 badvaddr_start, badvaddr_start + sizeof(u64)); 112 113 return err; 114 } 115 116 117 /* 118 * Get the general floating-point registers. 119 */ 120 static int gfpr_get(struct task_struct *target, struct membuf *to) 121 { 122 return membuf_write(to, &target->thread.fpu.fpr, 123 sizeof(elf_fpreg_t) * NUM_FPU_REGS); 124 } 125 126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to) 127 { 128 int i, r; 129 u64 fpr_val; 130 131 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 132 for (i = 0; i < NUM_FPU_REGS; i++) { 133 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 134 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t)); 135 } 136 137 return r; 138 } 139 140 /* 141 * Choose the appropriate helper for general registers, and then copy 142 * the FCC and FCSR registers separately. 143 */ 144 static int fpr_get(struct task_struct *target, 145 const struct user_regset *regset, 146 struct membuf to) 147 { 148 int r; 149 150 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 151 r = gfpr_get(target, &to); 152 else 153 r = gfpr_get_simd(target, &to); 154 155 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc)); 156 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr)); 157 158 return r; 159 } 160 161 static int gfpr_set(struct task_struct *target, 162 unsigned int *pos, unsigned int *count, 163 const void **kbuf, const void __user **ubuf) 164 { 165 return user_regset_copyin(pos, count, kbuf, ubuf, 166 &target->thread.fpu.fpr, 167 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 168 } 169 170 static int gfpr_set_simd(struct task_struct *target, 171 unsigned int *pos, unsigned int *count, 172 const void **kbuf, const void __user **ubuf) 173 { 174 int i, err; 175 u64 fpr_val; 176 177 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 178 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 179 err = user_regset_copyin(pos, count, kbuf, ubuf, 180 &fpr_val, i * sizeof(elf_fpreg_t), 181 (i + 1) * sizeof(elf_fpreg_t)); 182 if (err) 183 return err; 184 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 185 } 186 187 return 0; 188 } 189 190 /* 191 * Choose the appropriate helper for general registers, and then copy 192 * the FCC register separately. 193 */ 194 static int fpr_set(struct task_struct *target, 195 const struct user_regset *regset, 196 unsigned int pos, unsigned int count, 197 const void *kbuf, const void __user *ubuf) 198 { 199 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t); 200 const int fcsr_start = fcc_start + sizeof(u64); 201 int err; 202 203 BUG_ON(count % sizeof(elf_fpreg_t)); 204 if (pos + count > sizeof(elf_fpregset_t)) 205 return -EIO; 206 207 init_fp_ctx(target); 208 209 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 210 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf); 211 else 212 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf); 213 if (err) 214 return err; 215 216 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 217 &target->thread.fpu.fcc, fcc_start, 218 fcc_start + sizeof(u64)); 219 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 220 &target->thread.fpu.fcsr, fcsr_start, 221 fcsr_start + sizeof(u32)); 222 223 return err; 224 } 225 226 static int cfg_get(struct task_struct *target, 227 const struct user_regset *regset, 228 struct membuf to) 229 { 230 int i, r; 231 u32 cfg_val; 232 233 i = 0; 234 while (to.left > 0) { 235 cfg_val = read_cpucfg(i++); 236 r = membuf_write(&to, &cfg_val, sizeof(u32)); 237 } 238 239 return r; 240 } 241 242 /* 243 * CFG registers are read-only. 244 */ 245 static int cfg_set(struct task_struct *target, 246 const struct user_regset *regset, 247 unsigned int pos, unsigned int count, 248 const void *kbuf, const void __user *ubuf) 249 { 250 return 0; 251 } 252 253 #ifdef CONFIG_CPU_HAS_LSX 254 255 static void copy_pad_fprs(struct task_struct *target, 256 const struct user_regset *regset, 257 struct membuf *to, unsigned int live_sz) 258 { 259 int i, j; 260 unsigned long long fill = ~0ull; 261 unsigned int cp_sz, pad_sz; 262 263 cp_sz = min(regset->size, live_sz); 264 pad_sz = regset->size - cp_sz; 265 WARN_ON(pad_sz % sizeof(fill)); 266 267 for (i = 0; i < NUM_FPU_REGS; i++) { 268 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); 269 for (j = 0; j < (pad_sz / sizeof(fill)); j++) { 270 membuf_store(to, fill); 271 } 272 } 273 } 274 275 static int simd_get(struct task_struct *target, 276 const struct user_regset *regset, 277 struct membuf to) 278 { 279 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 280 281 if (!tsk_used_math(target)) { 282 /* The task hasn't used FP or LSX, fill with 0xff */ 283 copy_pad_fprs(target, regset, &to, 0); 284 } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) { 285 /* Copy scalar FP context, fill the rest with 0xff */ 286 copy_pad_fprs(target, regset, &to, 8); 287 #ifdef CONFIG_CPU_HAS_LASX 288 } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) { 289 /* Copy LSX 128 Bit context, fill the rest with 0xff */ 290 copy_pad_fprs(target, regset, &to, 16); 291 #endif 292 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 293 /* Trivially copy the vector registers */ 294 membuf_write(&to, &target->thread.fpu.fpr, wr_size); 295 } else { 296 /* Copy as much context as possible, fill the rest with 0xff */ 297 copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0])); 298 } 299 300 return 0; 301 } 302 303 static int simd_set(struct task_struct *target, 304 const struct user_regset *regset, 305 unsigned int pos, unsigned int count, 306 const void *kbuf, const void __user *ubuf) 307 { 308 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 309 unsigned int cp_sz; 310 int i, err, start; 311 312 init_fp_ctx(target); 313 314 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 315 /* Trivially copy the vector registers */ 316 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 317 &target->thread.fpu.fpr, 318 0, wr_size); 319 } else { 320 /* Copy as much context as possible */ 321 cp_sz = min_t(unsigned int, regset->size, 322 sizeof(target->thread.fpu.fpr[0])); 323 324 i = start = err = 0; 325 for (; i < NUM_FPU_REGS; i++, start += regset->size) { 326 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 327 &target->thread.fpu.fpr[i], 328 start, start + cp_sz); 329 } 330 } 331 332 return err; 333 } 334 335 #endif /* CONFIG_CPU_HAS_LSX */ 336 337 #ifdef CONFIG_HAVE_HW_BREAKPOINT 338 339 /* 340 * Handle hitting a HW-breakpoint. 341 */ 342 static void ptrace_hbptriggered(struct perf_event *bp, 343 struct perf_sample_data *data, 344 struct pt_regs *regs) 345 { 346 int i; 347 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 348 349 for (i = 0; i < LOONGARCH_MAX_BRP; ++i) 350 if (current->thread.hbp_break[i] == bp) 351 break; 352 353 for (i = 0; i < LOONGARCH_MAX_WRP; ++i) 354 if (current->thread.hbp_watch[i] == bp) 355 break; 356 357 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); 358 } 359 360 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 361 struct task_struct *tsk, 362 unsigned long idx) 363 { 364 struct perf_event *bp; 365 366 switch (note_type) { 367 case NT_LOONGARCH_HW_BREAK: 368 if (idx >= LOONGARCH_MAX_BRP) 369 return ERR_PTR(-EINVAL); 370 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 371 bp = tsk->thread.hbp_break[idx]; 372 break; 373 case NT_LOONGARCH_HW_WATCH: 374 if (idx >= LOONGARCH_MAX_WRP) 375 return ERR_PTR(-EINVAL); 376 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 377 bp = tsk->thread.hbp_watch[idx]; 378 break; 379 } 380 381 return bp; 382 } 383 384 static int ptrace_hbp_set_event(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 struct perf_event *bp) 388 { 389 switch (note_type) { 390 case NT_LOONGARCH_HW_BREAK: 391 if (idx >= LOONGARCH_MAX_BRP) 392 return -EINVAL; 393 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 394 tsk->thread.hbp_break[idx] = bp; 395 break; 396 case NT_LOONGARCH_HW_WATCH: 397 if (idx >= LOONGARCH_MAX_WRP) 398 return -EINVAL; 399 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 400 tsk->thread.hbp_watch[idx] = bp; 401 break; 402 } 403 404 return 0; 405 } 406 407 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 408 struct task_struct *tsk, 409 unsigned long idx) 410 { 411 int err, type; 412 struct perf_event *bp; 413 struct perf_event_attr attr; 414 415 switch (note_type) { 416 case NT_LOONGARCH_HW_BREAK: 417 type = HW_BREAKPOINT_X; 418 break; 419 case NT_LOONGARCH_HW_WATCH: 420 type = HW_BREAKPOINT_RW; 421 break; 422 default: 423 return ERR_PTR(-EINVAL); 424 } 425 426 ptrace_breakpoint_init(&attr); 427 428 /* 429 * Initialise fields to sane defaults 430 * (i.e. values that will pass validation). 431 */ 432 attr.bp_addr = 0; 433 attr.bp_len = HW_BREAKPOINT_LEN_4; 434 attr.bp_type = type; 435 attr.disabled = 1; 436 437 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 438 if (IS_ERR(bp)) 439 return bp; 440 441 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 442 if (err) 443 return ERR_PTR(err); 444 445 return bp; 446 } 447 448 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 449 struct arch_hw_breakpoint_ctrl ctrl, 450 struct perf_event_attr *attr) 451 { 452 int err, len, type, offset; 453 454 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 455 if (err) 456 return err; 457 458 switch (note_type) { 459 case NT_LOONGARCH_HW_BREAK: 460 if ((type & HW_BREAKPOINT_X) != type) 461 return -EINVAL; 462 break; 463 case NT_LOONGARCH_HW_WATCH: 464 if ((type & HW_BREAKPOINT_RW) != type) 465 return -EINVAL; 466 break; 467 default: 468 return -EINVAL; 469 } 470 471 attr->bp_len = len; 472 attr->bp_type = type; 473 attr->bp_addr += offset; 474 475 return 0; 476 } 477 478 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info) 479 { 480 u8 num; 481 u64 reg = 0; 482 483 switch (note_type) { 484 case NT_LOONGARCH_HW_BREAK: 485 num = hw_breakpoint_slots(TYPE_INST); 486 break; 487 case NT_LOONGARCH_HW_WATCH: 488 num = hw_breakpoint_slots(TYPE_DATA); 489 break; 490 default: 491 return -EINVAL; 492 } 493 494 *info = reg | num; 495 496 return 0; 497 } 498 499 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 500 struct task_struct *tsk, 501 unsigned long idx) 502 { 503 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 504 505 if (!bp) 506 bp = ptrace_hbp_create(note_type, tsk, idx); 507 508 return bp; 509 } 510 511 static int ptrace_hbp_get_ctrl(unsigned int note_type, 512 struct task_struct *tsk, 513 unsigned long idx, u32 *ctrl) 514 { 515 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 516 517 if (IS_ERR(bp)) 518 return PTR_ERR(bp); 519 520 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 521 522 return 0; 523 } 524 525 static int ptrace_hbp_get_mask(unsigned int note_type, 526 struct task_struct *tsk, 527 unsigned long idx, u64 *mask) 528 { 529 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 530 531 if (IS_ERR(bp)) 532 return PTR_ERR(bp); 533 534 *mask = bp ? counter_arch_bp(bp)->mask : 0; 535 536 return 0; 537 } 538 539 static int ptrace_hbp_get_addr(unsigned int note_type, 540 struct task_struct *tsk, 541 unsigned long idx, u64 *addr) 542 { 543 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 544 545 if (IS_ERR(bp)) 546 return PTR_ERR(bp); 547 548 *addr = bp ? counter_arch_bp(bp)->address : 0; 549 550 return 0; 551 } 552 553 static int ptrace_hbp_set_ctrl(unsigned int note_type, 554 struct task_struct *tsk, 555 unsigned long idx, u32 uctrl) 556 { 557 int err; 558 struct perf_event *bp; 559 struct perf_event_attr attr; 560 struct arch_hw_breakpoint_ctrl ctrl; 561 562 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 563 if (IS_ERR(bp)) 564 return PTR_ERR(bp); 565 566 attr = bp->attr; 567 decode_ctrl_reg(uctrl, &ctrl); 568 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 569 if (err) 570 return err; 571 572 return modify_user_hw_breakpoint(bp, &attr); 573 } 574 575 static int ptrace_hbp_set_mask(unsigned int note_type, 576 struct task_struct *tsk, 577 unsigned long idx, u64 mask) 578 { 579 struct perf_event *bp; 580 struct perf_event_attr attr; 581 struct arch_hw_breakpoint *info; 582 583 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 584 if (IS_ERR(bp)) 585 return PTR_ERR(bp); 586 587 attr = bp->attr; 588 info = counter_arch_bp(bp); 589 info->mask = mask; 590 591 return modify_user_hw_breakpoint(bp, &attr); 592 } 593 594 static int ptrace_hbp_set_addr(unsigned int note_type, 595 struct task_struct *tsk, 596 unsigned long idx, u64 addr) 597 { 598 struct perf_event *bp; 599 struct perf_event_attr attr; 600 601 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 602 if (IS_ERR(bp)) 603 return PTR_ERR(bp); 604 605 attr = bp->attr; 606 attr.bp_addr = addr; 607 608 return modify_user_hw_breakpoint(bp, &attr); 609 } 610 611 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 612 #define PTRACE_HBP_MASK_SZ sizeof(u64) 613 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 614 #define PTRACE_HBP_PAD_SZ sizeof(u32) 615 616 static int hw_break_get(struct task_struct *target, 617 const struct user_regset *regset, 618 struct membuf to) 619 { 620 u64 info; 621 u32 ctrl; 622 u64 addr, mask; 623 int ret, idx = 0; 624 unsigned int note_type = regset->core_note_type; 625 626 /* Resource info */ 627 ret = ptrace_hbp_get_resource_info(note_type, &info); 628 if (ret) 629 return ret; 630 631 membuf_write(&to, &info, sizeof(info)); 632 633 /* (address, mask, ctrl) registers */ 634 while (to.left) { 635 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 636 if (ret) 637 return ret; 638 639 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask); 640 if (ret) 641 return ret; 642 643 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 644 if (ret) 645 return ret; 646 647 membuf_store(&to, addr); 648 membuf_store(&to, mask); 649 membuf_store(&to, ctrl); 650 membuf_zero(&to, sizeof(u32)); 651 idx++; 652 } 653 654 return 0; 655 } 656 657 static int hw_break_set(struct task_struct *target, 658 const struct user_regset *regset, 659 unsigned int pos, unsigned int count, 660 const void *kbuf, const void __user *ubuf) 661 { 662 u32 ctrl; 663 u64 addr, mask; 664 int ret, idx = 0, offset, limit; 665 unsigned int note_type = regset->core_note_type; 666 667 /* Resource info */ 668 offset = offsetof(struct user_watch_state, dbg_regs); 669 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 670 671 /* (address, mask, ctrl) registers */ 672 limit = regset->n * regset->size; 673 while (count && offset < limit) { 674 if (count < PTRACE_HBP_ADDR_SZ) 675 return -EINVAL; 676 677 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 678 offset, offset + PTRACE_HBP_ADDR_SZ); 679 if (ret) 680 return ret; 681 682 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 683 if (ret) 684 return ret; 685 offset += PTRACE_HBP_ADDR_SZ; 686 687 if (!count) 688 break; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, 691 offset, offset + PTRACE_HBP_MASK_SZ); 692 if (ret) 693 return ret; 694 695 ret = ptrace_hbp_set_mask(note_type, target, idx, mask); 696 if (ret) 697 return ret; 698 offset += PTRACE_HBP_MASK_SZ; 699 700 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 701 offset, offset + PTRACE_HBP_CTRL_SZ); 702 if (ret) 703 return ret; 704 705 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 706 if (ret) 707 return ret; 708 offset += PTRACE_HBP_CTRL_SZ; 709 710 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 711 offset, offset + PTRACE_HBP_PAD_SZ); 712 offset += PTRACE_HBP_PAD_SZ; 713 714 idx++; 715 } 716 717 return 0; 718 } 719 720 #endif 721 722 struct pt_regs_offset { 723 const char *name; 724 int offset; 725 }; 726 727 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)} 728 #define REG_OFFSET_END {.name = NULL, .offset = 0} 729 730 static const struct pt_regs_offset regoffset_table[] = { 731 REG_OFFSET_NAME(r0, regs[0]), 732 REG_OFFSET_NAME(r1, regs[1]), 733 REG_OFFSET_NAME(r2, regs[2]), 734 REG_OFFSET_NAME(r3, regs[3]), 735 REG_OFFSET_NAME(r4, regs[4]), 736 REG_OFFSET_NAME(r5, regs[5]), 737 REG_OFFSET_NAME(r6, regs[6]), 738 REG_OFFSET_NAME(r7, regs[7]), 739 REG_OFFSET_NAME(r8, regs[8]), 740 REG_OFFSET_NAME(r9, regs[9]), 741 REG_OFFSET_NAME(r10, regs[10]), 742 REG_OFFSET_NAME(r11, regs[11]), 743 REG_OFFSET_NAME(r12, regs[12]), 744 REG_OFFSET_NAME(r13, regs[13]), 745 REG_OFFSET_NAME(r14, regs[14]), 746 REG_OFFSET_NAME(r15, regs[15]), 747 REG_OFFSET_NAME(r16, regs[16]), 748 REG_OFFSET_NAME(r17, regs[17]), 749 REG_OFFSET_NAME(r18, regs[18]), 750 REG_OFFSET_NAME(r19, regs[19]), 751 REG_OFFSET_NAME(r20, regs[20]), 752 REG_OFFSET_NAME(r21, regs[21]), 753 REG_OFFSET_NAME(r22, regs[22]), 754 REG_OFFSET_NAME(r23, regs[23]), 755 REG_OFFSET_NAME(r24, regs[24]), 756 REG_OFFSET_NAME(r25, regs[25]), 757 REG_OFFSET_NAME(r26, regs[26]), 758 REG_OFFSET_NAME(r27, regs[27]), 759 REG_OFFSET_NAME(r28, regs[28]), 760 REG_OFFSET_NAME(r29, regs[29]), 761 REG_OFFSET_NAME(r30, regs[30]), 762 REG_OFFSET_NAME(r31, regs[31]), 763 REG_OFFSET_NAME(orig_a0, orig_a0), 764 REG_OFFSET_NAME(csr_era, csr_era), 765 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr), 766 REG_OFFSET_NAME(csr_crmd, csr_crmd), 767 REG_OFFSET_NAME(csr_prmd, csr_prmd), 768 REG_OFFSET_NAME(csr_euen, csr_euen), 769 REG_OFFSET_NAME(csr_ecfg, csr_ecfg), 770 REG_OFFSET_NAME(csr_estat, csr_estat), 771 REG_OFFSET_END, 772 }; 773 774 /** 775 * regs_query_register_offset() - query register offset from its name 776 * @name: the name of a register 777 * 778 * regs_query_register_offset() returns the offset of a register in struct 779 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 780 */ 781 int regs_query_register_offset(const char *name) 782 { 783 const struct pt_regs_offset *roff; 784 785 for (roff = regoffset_table; roff->name != NULL; roff++) 786 if (!strcmp(roff->name, name)) 787 return roff->offset; 788 return -EINVAL; 789 } 790 791 enum loongarch_regset { 792 REGSET_GPR, 793 REGSET_FPR, 794 REGSET_CPUCFG, 795 #ifdef CONFIG_CPU_HAS_LSX 796 REGSET_LSX, 797 #endif 798 #ifdef CONFIG_CPU_HAS_LASX 799 REGSET_LASX, 800 #endif 801 #ifdef CONFIG_HAVE_HW_BREAKPOINT 802 REGSET_HW_BREAK, 803 REGSET_HW_WATCH, 804 #endif 805 }; 806 807 static const struct user_regset loongarch64_regsets[] = { 808 [REGSET_GPR] = { 809 .core_note_type = NT_PRSTATUS, 810 .n = ELF_NGREG, 811 .size = sizeof(elf_greg_t), 812 .align = sizeof(elf_greg_t), 813 .regset_get = gpr_get, 814 .set = gpr_set, 815 }, 816 [REGSET_FPR] = { 817 .core_note_type = NT_PRFPREG, 818 .n = ELF_NFPREG, 819 .size = sizeof(elf_fpreg_t), 820 .align = sizeof(elf_fpreg_t), 821 .regset_get = fpr_get, 822 .set = fpr_set, 823 }, 824 [REGSET_CPUCFG] = { 825 .core_note_type = NT_LOONGARCH_CPUCFG, 826 .n = 64, 827 .size = sizeof(u32), 828 .align = sizeof(u32), 829 .regset_get = cfg_get, 830 .set = cfg_set, 831 }, 832 #ifdef CONFIG_CPU_HAS_LSX 833 [REGSET_LSX] = { 834 .core_note_type = NT_LOONGARCH_LSX, 835 .n = NUM_FPU_REGS, 836 .size = 16, 837 .align = 16, 838 .regset_get = simd_get, 839 .set = simd_set, 840 }, 841 #endif 842 #ifdef CONFIG_CPU_HAS_LASX 843 [REGSET_LASX] = { 844 .core_note_type = NT_LOONGARCH_LASX, 845 .n = NUM_FPU_REGS, 846 .size = 32, 847 .align = 32, 848 .regset_get = simd_get, 849 .set = simd_set, 850 }, 851 #endif 852 #ifdef CONFIG_HAVE_HW_BREAKPOINT 853 [REGSET_HW_BREAK] = { 854 .core_note_type = NT_LOONGARCH_HW_BREAK, 855 .n = sizeof(struct user_watch_state) / sizeof(u32), 856 .size = sizeof(u32), 857 .align = sizeof(u32), 858 .regset_get = hw_break_get, 859 .set = hw_break_set, 860 }, 861 [REGSET_HW_WATCH] = { 862 .core_note_type = NT_LOONGARCH_HW_WATCH, 863 .n = sizeof(struct user_watch_state) / sizeof(u32), 864 .size = sizeof(u32), 865 .align = sizeof(u32), 866 .regset_get = hw_break_get, 867 .set = hw_break_set, 868 }, 869 #endif 870 }; 871 872 static const struct user_regset_view user_loongarch64_view = { 873 .name = "loongarch64", 874 .e_machine = ELF_ARCH, 875 .regsets = loongarch64_regsets, 876 .n = ARRAY_SIZE(loongarch64_regsets), 877 }; 878 879 880 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 881 { 882 return &user_loongarch64_view; 883 } 884 885 static inline int read_user(struct task_struct *target, unsigned long addr, 886 unsigned long __user *data) 887 { 888 unsigned long tmp = 0; 889 890 switch (addr) { 891 case 0 ... 31: 892 tmp = task_pt_regs(target)->regs[addr]; 893 break; 894 case ARG0: 895 tmp = task_pt_regs(target)->orig_a0; 896 break; 897 case PC: 898 tmp = task_pt_regs(target)->csr_era; 899 break; 900 case BADVADDR: 901 tmp = task_pt_regs(target)->csr_badvaddr; 902 break; 903 default: 904 return -EIO; 905 } 906 907 return put_user(tmp, data); 908 } 909 910 static inline int write_user(struct task_struct *target, unsigned long addr, 911 unsigned long data) 912 { 913 switch (addr) { 914 case 0 ... 31: 915 task_pt_regs(target)->regs[addr] = data; 916 break; 917 case ARG0: 918 task_pt_regs(target)->orig_a0 = data; 919 break; 920 case PC: 921 task_pt_regs(target)->csr_era = data; 922 break; 923 case BADVADDR: 924 task_pt_regs(target)->csr_badvaddr = data; 925 break; 926 default: 927 return -EIO; 928 } 929 930 return 0; 931 } 932 933 long arch_ptrace(struct task_struct *child, long request, 934 unsigned long addr, unsigned long data) 935 { 936 int ret; 937 unsigned long __user *datap = (void __user *) data; 938 939 switch (request) { 940 case PTRACE_PEEKUSR: 941 ret = read_user(child, addr, datap); 942 break; 943 944 case PTRACE_POKEUSR: 945 ret = write_user(child, addr, data); 946 break; 947 948 default: 949 ret = ptrace_request(child, request, addr, data); 950 break; 951 } 952 953 return ret; 954 } 955 956 #ifdef CONFIG_HAVE_HW_BREAKPOINT 957 static void ptrace_triggered(struct perf_event *bp, 958 struct perf_sample_data *data, struct pt_regs *regs) 959 { 960 struct perf_event_attr attr; 961 962 attr = bp->attr; 963 attr.disabled = true; 964 modify_user_hw_breakpoint(bp, &attr); 965 } 966 967 static int set_single_step(struct task_struct *tsk, unsigned long addr) 968 { 969 struct perf_event *bp; 970 struct perf_event_attr attr; 971 struct arch_hw_breakpoint *info; 972 struct thread_struct *thread = &tsk->thread; 973 974 bp = thread->hbp_break[0]; 975 if (!bp) { 976 ptrace_breakpoint_init(&attr); 977 978 attr.bp_addr = addr; 979 attr.bp_len = HW_BREAKPOINT_LEN_8; 980 attr.bp_type = HW_BREAKPOINT_X; 981 982 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 983 NULL, tsk); 984 if (IS_ERR(bp)) 985 return PTR_ERR(bp); 986 987 thread->hbp_break[0] = bp; 988 } else { 989 int err; 990 991 attr = bp->attr; 992 attr.bp_addr = addr; 993 994 /* Reenable breakpoint */ 995 attr.disabled = false; 996 err = modify_user_hw_breakpoint(bp, &attr); 997 if (unlikely(err)) 998 return err; 999 1000 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR); 1001 } 1002 info = counter_arch_bp(bp); 1003 info->mask = TASK_SIZE - 1; 1004 1005 return 0; 1006 } 1007 1008 /* ptrace API */ 1009 void user_enable_single_step(struct task_struct *task) 1010 { 1011 struct thread_info *ti = task_thread_info(task); 1012 1013 set_single_step(task, task_pt_regs(task)->csr_era); 1014 task->thread.single_step = task_pt_regs(task)->csr_era; 1015 set_ti_thread_flag(ti, TIF_SINGLESTEP); 1016 } 1017 1018 void user_disable_single_step(struct task_struct *task) 1019 { 1020 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 1021 } 1022 #endif 1023