xref: /openbmc/linux/arch/loongarch/kernel/ptrace.c (revision 0f613bfa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  *
6  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7  *
8  * Derived from MIPS:
9  * Copyright (C) 1992 Ross Biro
10  * Copyright (C) Linus Torvalds
11  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12  * Copyright (C) 1996 David S. Miller
13  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14  * Copyright (C) 1999 MIPS Technologies, Inc.
15  * Copyright (C) 2000 Ulf Carlsson
16  */
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/mm.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
36 
37 #include <asm/byteorder.h>
38 #include <asm/cpu.h>
39 #include <asm/cpu-info.h>
40 #include <asm/fpu.h>
41 #include <asm/loongarch.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/reg.h>
47 #include <asm/syscall.h>
48 
49 static void init_fp_ctx(struct task_struct *target)
50 {
51 	/* The target already has context */
52 	if (tsk_used_math(target))
53 		return;
54 
55 	/* Begin with data registers set to all 1s... */
56 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57 	set_stopped_child_used_math(target);
58 }
59 
60 /*
61  * Called by kernel/ptrace.c when detaching..
62  *
63  * Make sure single step bits etc are not set.
64  */
65 void ptrace_disable(struct task_struct *child)
66 {
67 	/* Don't load the watchpoint registers for the ex-child. */
68 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
69 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
70 }
71 
72 /* regset get/set implementations */
73 
74 static int gpr_get(struct task_struct *target,
75 		   const struct user_regset *regset,
76 		   struct membuf to)
77 {
78 	int r;
79 	struct pt_regs *regs = task_pt_regs(target);
80 
81 	r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
82 	r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
83 	r = membuf_write(&to, &regs->csr_era, sizeof(u64));
84 	r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
85 
86 	return r;
87 }
88 
89 static int gpr_set(struct task_struct *target,
90 		   const struct user_regset *regset,
91 		   unsigned int pos, unsigned int count,
92 		   const void *kbuf, const void __user *ubuf)
93 {
94 	int err;
95 	int a0_start = sizeof(u64) * GPR_NUM;
96 	int era_start = a0_start + sizeof(u64);
97 	int badvaddr_start = era_start + sizeof(u64);
98 	struct pt_regs *regs = task_pt_regs(target);
99 
100 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
101 				 &regs->regs,
102 				 0, a0_start);
103 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
104 				 &regs->orig_a0,
105 				 a0_start, a0_start + sizeof(u64));
106 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
107 				 &regs->csr_era,
108 				 era_start, era_start + sizeof(u64));
109 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
110 				 &regs->csr_badvaddr,
111 				 badvaddr_start, badvaddr_start + sizeof(u64));
112 
113 	return err;
114 }
115 
116 
117 /*
118  * Get the general floating-point registers.
119  */
120 static int gfpr_get(struct task_struct *target, struct membuf *to)
121 {
122 	return membuf_write(to, &target->thread.fpu.fpr,
123 			    sizeof(elf_fpreg_t) * NUM_FPU_REGS);
124 }
125 
126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
127 {
128 	int i, r;
129 	u64 fpr_val;
130 
131 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
132 	for (i = 0; i < NUM_FPU_REGS; i++) {
133 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
134 		r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
135 	}
136 
137 	return r;
138 }
139 
140 /*
141  * Choose the appropriate helper for general registers, and then copy
142  * the FCC and FCSR registers separately.
143  */
144 static int fpr_get(struct task_struct *target,
145 		   const struct user_regset *regset,
146 		   struct membuf to)
147 {
148 	int r;
149 
150 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
151 		r = gfpr_get(target, &to);
152 	else
153 		r = gfpr_get_simd(target, &to);
154 
155 	r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
156 	r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
157 
158 	return r;
159 }
160 
161 static int gfpr_set(struct task_struct *target,
162 		    unsigned int *pos, unsigned int *count,
163 		    const void **kbuf, const void __user **ubuf)
164 {
165 	return user_regset_copyin(pos, count, kbuf, ubuf,
166 				  &target->thread.fpu.fpr,
167 				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
168 }
169 
170 static int gfpr_set_simd(struct task_struct *target,
171 		       unsigned int *pos, unsigned int *count,
172 		       const void **kbuf, const void __user **ubuf)
173 {
174 	int i, err;
175 	u64 fpr_val;
176 
177 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
178 	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
179 		err = user_regset_copyin(pos, count, kbuf, ubuf,
180 					 &fpr_val, i * sizeof(elf_fpreg_t),
181 					 (i + 1) * sizeof(elf_fpreg_t));
182 		if (err)
183 			return err;
184 		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
185 	}
186 
187 	return 0;
188 }
189 
190 /*
191  * Choose the appropriate helper for general registers, and then copy
192  * the FCC register separately.
193  */
194 static int fpr_set(struct task_struct *target,
195 		   const struct user_regset *regset,
196 		   unsigned int pos, unsigned int count,
197 		   const void *kbuf, const void __user *ubuf)
198 {
199 	const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
200 	const int fcsr_start = fcc_start + sizeof(u64);
201 	int err;
202 
203 	BUG_ON(count % sizeof(elf_fpreg_t));
204 	if (pos + count > sizeof(elf_fpregset_t))
205 		return -EIO;
206 
207 	init_fp_ctx(target);
208 
209 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
210 		err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
211 	else
212 		err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
213 	if (err)
214 		return err;
215 
216 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
217 				  &target->thread.fpu.fcc, fcc_start,
218 				  fcc_start + sizeof(u64));
219 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220 				  &target->thread.fpu.fcsr, fcsr_start,
221 				  fcsr_start + sizeof(u32));
222 
223 	return err;
224 }
225 
226 static int cfg_get(struct task_struct *target,
227 		   const struct user_regset *regset,
228 		   struct membuf to)
229 {
230 	int i, r;
231 	u32 cfg_val;
232 
233 	i = 0;
234 	while (to.left > 0) {
235 		cfg_val = read_cpucfg(i++);
236 		r = membuf_write(&to, &cfg_val, sizeof(u32));
237 	}
238 
239 	return r;
240 }
241 
242 /*
243  * CFG registers are read-only.
244  */
245 static int cfg_set(struct task_struct *target,
246 		   const struct user_regset *regset,
247 		   unsigned int pos, unsigned int count,
248 		   const void *kbuf, const void __user *ubuf)
249 {
250 	return 0;
251 }
252 
253 #ifdef CONFIG_HAVE_HW_BREAKPOINT
254 
255 /*
256  * Handle hitting a HW-breakpoint.
257  */
258 static void ptrace_hbptriggered(struct perf_event *bp,
259 				struct perf_sample_data *data,
260 				struct pt_regs *regs)
261 {
262 	int i;
263 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
264 
265 	for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
266 		if (current->thread.hbp_break[i] == bp)
267 			break;
268 
269 	for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
270 		if (current->thread.hbp_watch[i] == bp)
271 			break;
272 
273 	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
274 }
275 
276 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
277 					       struct task_struct *tsk,
278 					       unsigned long idx)
279 {
280 	struct perf_event *bp;
281 
282 	switch (note_type) {
283 	case NT_LOONGARCH_HW_BREAK:
284 		if (idx >= LOONGARCH_MAX_BRP)
285 			return ERR_PTR(-EINVAL);
286 		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
287 		bp = tsk->thread.hbp_break[idx];
288 		break;
289 	case NT_LOONGARCH_HW_WATCH:
290 		if (idx >= LOONGARCH_MAX_WRP)
291 			return ERR_PTR(-EINVAL);
292 		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
293 		bp = tsk->thread.hbp_watch[idx];
294 		break;
295 	}
296 
297 	return bp;
298 }
299 
300 static int ptrace_hbp_set_event(unsigned int note_type,
301 				struct task_struct *tsk,
302 				unsigned long idx,
303 				struct perf_event *bp)
304 {
305 	switch (note_type) {
306 	case NT_LOONGARCH_HW_BREAK:
307 		if (idx >= LOONGARCH_MAX_BRP)
308 			return -EINVAL;
309 		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
310 		tsk->thread.hbp_break[idx] = bp;
311 		break;
312 	case NT_LOONGARCH_HW_WATCH:
313 		if (idx >= LOONGARCH_MAX_WRP)
314 			return -EINVAL;
315 		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
316 		tsk->thread.hbp_watch[idx] = bp;
317 		break;
318 	}
319 
320 	return 0;
321 }
322 
323 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
324 					    struct task_struct *tsk,
325 					    unsigned long idx)
326 {
327 	int err, type;
328 	struct perf_event *bp;
329 	struct perf_event_attr attr;
330 
331 	switch (note_type) {
332 	case NT_LOONGARCH_HW_BREAK:
333 		type = HW_BREAKPOINT_X;
334 		break;
335 	case NT_LOONGARCH_HW_WATCH:
336 		type = HW_BREAKPOINT_RW;
337 		break;
338 	default:
339 		return ERR_PTR(-EINVAL);
340 	}
341 
342 	ptrace_breakpoint_init(&attr);
343 
344 	/*
345 	 * Initialise fields to sane defaults
346 	 * (i.e. values that will pass validation).
347 	 */
348 	attr.bp_addr	= 0;
349 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
350 	attr.bp_type	= type;
351 	attr.disabled	= 1;
352 
353 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
354 	if (IS_ERR(bp))
355 		return bp;
356 
357 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
358 	if (err)
359 		return ERR_PTR(err);
360 
361 	return bp;
362 }
363 
364 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
365 				     struct arch_hw_breakpoint_ctrl ctrl,
366 				     struct perf_event_attr *attr)
367 {
368 	int err, len, type, offset;
369 
370 	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
371 	if (err)
372 		return err;
373 
374 	switch (note_type) {
375 	case NT_LOONGARCH_HW_BREAK:
376 		if ((type & HW_BREAKPOINT_X) != type)
377 			return -EINVAL;
378 		break;
379 	case NT_LOONGARCH_HW_WATCH:
380 		if ((type & HW_BREAKPOINT_RW) != type)
381 			return -EINVAL;
382 		break;
383 	default:
384 		return -EINVAL;
385 	}
386 
387 	attr->bp_len	= len;
388 	attr->bp_type	= type;
389 	attr->bp_addr	+= offset;
390 
391 	return 0;
392 }
393 
394 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
395 {
396 	u8 num;
397 	u64 reg = 0;
398 
399 	switch (note_type) {
400 	case NT_LOONGARCH_HW_BREAK:
401 		num = hw_breakpoint_slots(TYPE_INST);
402 		break;
403 	case NT_LOONGARCH_HW_WATCH:
404 		num = hw_breakpoint_slots(TYPE_DATA);
405 		break;
406 	default:
407 		return -EINVAL;
408 	}
409 
410 	*info = reg | num;
411 
412 	return 0;
413 }
414 
415 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
416 							struct task_struct *tsk,
417 							unsigned long idx)
418 {
419 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
420 
421 	if (!bp)
422 		bp = ptrace_hbp_create(note_type, tsk, idx);
423 
424 	return bp;
425 }
426 
427 static int ptrace_hbp_get_ctrl(unsigned int note_type,
428 			       struct task_struct *tsk,
429 			       unsigned long idx, u32 *ctrl)
430 {
431 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
432 
433 	if (IS_ERR(bp))
434 		return PTR_ERR(bp);
435 
436 	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
437 
438 	return 0;
439 }
440 
441 static int ptrace_hbp_get_mask(unsigned int note_type,
442 			       struct task_struct *tsk,
443 			       unsigned long idx, u64 *mask)
444 {
445 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
446 
447 	if (IS_ERR(bp))
448 		return PTR_ERR(bp);
449 
450 	*mask = bp ? counter_arch_bp(bp)->mask : 0;
451 
452 	return 0;
453 }
454 
455 static int ptrace_hbp_get_addr(unsigned int note_type,
456 			       struct task_struct *tsk,
457 			       unsigned long idx, u64 *addr)
458 {
459 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
460 
461 	if (IS_ERR(bp))
462 		return PTR_ERR(bp);
463 
464 	*addr = bp ? counter_arch_bp(bp)->address : 0;
465 
466 	return 0;
467 }
468 
469 static int ptrace_hbp_set_ctrl(unsigned int note_type,
470 			       struct task_struct *tsk,
471 			       unsigned long idx, u32 uctrl)
472 {
473 	int err;
474 	struct perf_event *bp;
475 	struct perf_event_attr attr;
476 	struct arch_hw_breakpoint_ctrl ctrl;
477 
478 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
479 	if (IS_ERR(bp))
480 		return PTR_ERR(bp);
481 
482 	attr = bp->attr;
483 	decode_ctrl_reg(uctrl, &ctrl);
484 	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
485 	if (err)
486 		return err;
487 
488 	return modify_user_hw_breakpoint(bp, &attr);
489 }
490 
491 static int ptrace_hbp_set_mask(unsigned int note_type,
492 			       struct task_struct *tsk,
493 			       unsigned long idx, u64 mask)
494 {
495 	struct perf_event *bp;
496 	struct perf_event_attr attr;
497 	struct arch_hw_breakpoint *info;
498 
499 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
500 	if (IS_ERR(bp))
501 		return PTR_ERR(bp);
502 
503 	attr = bp->attr;
504 	info = counter_arch_bp(bp);
505 	info->mask = mask;
506 
507 	return modify_user_hw_breakpoint(bp, &attr);
508 }
509 
510 static int ptrace_hbp_set_addr(unsigned int note_type,
511 			       struct task_struct *tsk,
512 			       unsigned long idx, u64 addr)
513 {
514 	struct perf_event *bp;
515 	struct perf_event_attr attr;
516 
517 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
518 	if (IS_ERR(bp))
519 		return PTR_ERR(bp);
520 
521 	attr = bp->attr;
522 	attr.bp_addr = addr;
523 
524 	return modify_user_hw_breakpoint(bp, &attr);
525 }
526 
527 #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
528 #define PTRACE_HBP_MASK_SZ	sizeof(u64)
529 #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
530 #define PTRACE_HBP_PAD_SZ	sizeof(u32)
531 
532 static int hw_break_get(struct task_struct *target,
533 			const struct user_regset *regset,
534 			struct membuf to)
535 {
536 	u64 info;
537 	u32 ctrl;
538 	u64 addr, mask;
539 	int ret, idx = 0;
540 	unsigned int note_type = regset->core_note_type;
541 
542 	/* Resource info */
543 	ret = ptrace_hbp_get_resource_info(note_type, &info);
544 	if (ret)
545 		return ret;
546 
547 	membuf_write(&to, &info, sizeof(info));
548 
549 	/* (address, mask, ctrl) registers */
550 	while (to.left) {
551 		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
552 		if (ret)
553 			return ret;
554 
555 		ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
556 		if (ret)
557 			return ret;
558 
559 		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
560 		if (ret)
561 			return ret;
562 
563 		membuf_store(&to, addr);
564 		membuf_store(&to, mask);
565 		membuf_store(&to, ctrl);
566 		membuf_zero(&to, sizeof(u32));
567 		idx++;
568 	}
569 
570 	return 0;
571 }
572 
573 static int hw_break_set(struct task_struct *target,
574 			const struct user_regset *regset,
575 			unsigned int pos, unsigned int count,
576 			const void *kbuf, const void __user *ubuf)
577 {
578 	u32 ctrl;
579 	u64 addr, mask;
580 	int ret, idx = 0, offset, limit;
581 	unsigned int note_type = regset->core_note_type;
582 
583 	/* Resource info */
584 	offset = offsetof(struct user_watch_state, dbg_regs);
585 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
586 
587 	/* (address, mask, ctrl) registers */
588 	limit = regset->n * regset->size;
589 	while (count && offset < limit) {
590 		if (count < PTRACE_HBP_ADDR_SZ)
591 			return -EINVAL;
592 
593 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
594 					 offset, offset + PTRACE_HBP_ADDR_SZ);
595 		if (ret)
596 			return ret;
597 
598 		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
599 		if (ret)
600 			return ret;
601 		offset += PTRACE_HBP_ADDR_SZ;
602 
603 		if (!count)
604 			break;
605 
606 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
607 					 offset, offset + PTRACE_HBP_MASK_SZ);
608 		if (ret)
609 			return ret;
610 
611 		ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
612 		if (ret)
613 			return ret;
614 		offset += PTRACE_HBP_MASK_SZ;
615 
616 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
617 					 offset, offset + PTRACE_HBP_CTRL_SZ);
618 		if (ret)
619 			return ret;
620 
621 		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
622 		if (ret)
623 			return ret;
624 		offset += PTRACE_HBP_CTRL_SZ;
625 
626 		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
627 					  offset, offset + PTRACE_HBP_PAD_SZ);
628 		offset += PTRACE_HBP_PAD_SZ;
629 
630 		idx++;
631 	}
632 
633 	return 0;
634 }
635 
636 #endif
637 
638 struct pt_regs_offset {
639 	const char *name;
640 	int offset;
641 };
642 
643 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
644 #define REG_OFFSET_END {.name = NULL, .offset = 0}
645 
646 static const struct pt_regs_offset regoffset_table[] = {
647 	REG_OFFSET_NAME(r0, regs[0]),
648 	REG_OFFSET_NAME(r1, regs[1]),
649 	REG_OFFSET_NAME(r2, regs[2]),
650 	REG_OFFSET_NAME(r3, regs[3]),
651 	REG_OFFSET_NAME(r4, regs[4]),
652 	REG_OFFSET_NAME(r5, regs[5]),
653 	REG_OFFSET_NAME(r6, regs[6]),
654 	REG_OFFSET_NAME(r7, regs[7]),
655 	REG_OFFSET_NAME(r8, regs[8]),
656 	REG_OFFSET_NAME(r9, regs[9]),
657 	REG_OFFSET_NAME(r10, regs[10]),
658 	REG_OFFSET_NAME(r11, regs[11]),
659 	REG_OFFSET_NAME(r12, regs[12]),
660 	REG_OFFSET_NAME(r13, regs[13]),
661 	REG_OFFSET_NAME(r14, regs[14]),
662 	REG_OFFSET_NAME(r15, regs[15]),
663 	REG_OFFSET_NAME(r16, regs[16]),
664 	REG_OFFSET_NAME(r17, regs[17]),
665 	REG_OFFSET_NAME(r18, regs[18]),
666 	REG_OFFSET_NAME(r19, regs[19]),
667 	REG_OFFSET_NAME(r20, regs[20]),
668 	REG_OFFSET_NAME(r21, regs[21]),
669 	REG_OFFSET_NAME(r22, regs[22]),
670 	REG_OFFSET_NAME(r23, regs[23]),
671 	REG_OFFSET_NAME(r24, regs[24]),
672 	REG_OFFSET_NAME(r25, regs[25]),
673 	REG_OFFSET_NAME(r26, regs[26]),
674 	REG_OFFSET_NAME(r27, regs[27]),
675 	REG_OFFSET_NAME(r28, regs[28]),
676 	REG_OFFSET_NAME(r29, regs[29]),
677 	REG_OFFSET_NAME(r30, regs[30]),
678 	REG_OFFSET_NAME(r31, regs[31]),
679 	REG_OFFSET_NAME(orig_a0, orig_a0),
680 	REG_OFFSET_NAME(csr_era, csr_era),
681 	REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
682 	REG_OFFSET_NAME(csr_crmd, csr_crmd),
683 	REG_OFFSET_NAME(csr_prmd, csr_prmd),
684 	REG_OFFSET_NAME(csr_euen, csr_euen),
685 	REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
686 	REG_OFFSET_NAME(csr_estat, csr_estat),
687 	REG_OFFSET_END,
688 };
689 
690 /**
691  * regs_query_register_offset() - query register offset from its name
692  * @name:       the name of a register
693  *
694  * regs_query_register_offset() returns the offset of a register in struct
695  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
696  */
697 int regs_query_register_offset(const char *name)
698 {
699 	const struct pt_regs_offset *roff;
700 
701 	for (roff = regoffset_table; roff->name != NULL; roff++)
702 		if (!strcmp(roff->name, name))
703 			return roff->offset;
704 	return -EINVAL;
705 }
706 
707 enum loongarch_regset {
708 	REGSET_GPR,
709 	REGSET_FPR,
710 	REGSET_CPUCFG,
711 #ifdef CONFIG_HAVE_HW_BREAKPOINT
712 	REGSET_HW_BREAK,
713 	REGSET_HW_WATCH,
714 #endif
715 };
716 
717 static const struct user_regset loongarch64_regsets[] = {
718 	[REGSET_GPR] = {
719 		.core_note_type	= NT_PRSTATUS,
720 		.n		= ELF_NGREG,
721 		.size		= sizeof(elf_greg_t),
722 		.align		= sizeof(elf_greg_t),
723 		.regset_get	= gpr_get,
724 		.set		= gpr_set,
725 	},
726 	[REGSET_FPR] = {
727 		.core_note_type	= NT_PRFPREG,
728 		.n		= ELF_NFPREG,
729 		.size		= sizeof(elf_fpreg_t),
730 		.align		= sizeof(elf_fpreg_t),
731 		.regset_get	= fpr_get,
732 		.set		= fpr_set,
733 	},
734 	[REGSET_CPUCFG] = {
735 		.core_note_type	= NT_LOONGARCH_CPUCFG,
736 		.n		= 64,
737 		.size		= sizeof(u32),
738 		.align		= sizeof(u32),
739 		.regset_get	= cfg_get,
740 		.set		= cfg_set,
741 	},
742 #ifdef CONFIG_HAVE_HW_BREAKPOINT
743 	[REGSET_HW_BREAK] = {
744 		.core_note_type = NT_LOONGARCH_HW_BREAK,
745 		.n = sizeof(struct user_watch_state) / sizeof(u32),
746 		.size = sizeof(u32),
747 		.align = sizeof(u32),
748 		.regset_get = hw_break_get,
749 		.set = hw_break_set,
750 	},
751 	[REGSET_HW_WATCH] = {
752 		.core_note_type = NT_LOONGARCH_HW_WATCH,
753 		.n = sizeof(struct user_watch_state) / sizeof(u32),
754 		.size = sizeof(u32),
755 		.align = sizeof(u32),
756 		.regset_get = hw_break_get,
757 		.set = hw_break_set,
758 	},
759 #endif
760 };
761 
762 static const struct user_regset_view user_loongarch64_view = {
763 	.name		= "loongarch64",
764 	.e_machine	= ELF_ARCH,
765 	.regsets	= loongarch64_regsets,
766 	.n		= ARRAY_SIZE(loongarch64_regsets),
767 };
768 
769 
770 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
771 {
772 	return &user_loongarch64_view;
773 }
774 
775 static inline int read_user(struct task_struct *target, unsigned long addr,
776 			    unsigned long __user *data)
777 {
778 	unsigned long tmp = 0;
779 
780 	switch (addr) {
781 	case 0 ... 31:
782 		tmp = task_pt_regs(target)->regs[addr];
783 		break;
784 	case ARG0:
785 		tmp = task_pt_regs(target)->orig_a0;
786 		break;
787 	case PC:
788 		tmp = task_pt_regs(target)->csr_era;
789 		break;
790 	case BADVADDR:
791 		tmp = task_pt_regs(target)->csr_badvaddr;
792 		break;
793 	default:
794 		return -EIO;
795 	}
796 
797 	return put_user(tmp, data);
798 }
799 
800 static inline int write_user(struct task_struct *target, unsigned long addr,
801 			    unsigned long data)
802 {
803 	switch (addr) {
804 	case 0 ... 31:
805 		task_pt_regs(target)->regs[addr] = data;
806 		break;
807 	case ARG0:
808 		task_pt_regs(target)->orig_a0 = data;
809 		break;
810 	case PC:
811 		task_pt_regs(target)->csr_era = data;
812 		break;
813 	case BADVADDR:
814 		task_pt_regs(target)->csr_badvaddr = data;
815 		break;
816 	default:
817 		return -EIO;
818 	}
819 
820 	return 0;
821 }
822 
823 long arch_ptrace(struct task_struct *child, long request,
824 		 unsigned long addr, unsigned long data)
825 {
826 	int ret;
827 	unsigned long __user *datap = (void __user *) data;
828 
829 	switch (request) {
830 	case PTRACE_PEEKUSR:
831 		ret = read_user(child, addr, datap);
832 		break;
833 
834 	case PTRACE_POKEUSR:
835 		ret = write_user(child, addr, data);
836 		break;
837 
838 	default:
839 		ret = ptrace_request(child, request, addr, data);
840 		break;
841 	}
842 
843 	return ret;
844 }
845 
846 #ifdef CONFIG_HAVE_HW_BREAKPOINT
847 static void ptrace_triggered(struct perf_event *bp,
848 		      struct perf_sample_data *data, struct pt_regs *regs)
849 {
850 	struct perf_event_attr attr;
851 
852 	attr = bp->attr;
853 	attr.disabled = true;
854 	modify_user_hw_breakpoint(bp, &attr);
855 }
856 
857 static int set_single_step(struct task_struct *tsk, unsigned long addr)
858 {
859 	struct perf_event *bp;
860 	struct perf_event_attr attr;
861 	struct arch_hw_breakpoint *info;
862 	struct thread_struct *thread = &tsk->thread;
863 
864 	bp = thread->hbp_break[0];
865 	if (!bp) {
866 		ptrace_breakpoint_init(&attr);
867 
868 		attr.bp_addr = addr;
869 		attr.bp_len = HW_BREAKPOINT_LEN_8;
870 		attr.bp_type = HW_BREAKPOINT_X;
871 
872 		bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
873 						 NULL, tsk);
874 		if (IS_ERR(bp))
875 			return PTR_ERR(bp);
876 
877 		thread->hbp_break[0] = bp;
878 	} else {
879 		int err;
880 
881 		attr = bp->attr;
882 		attr.bp_addr = addr;
883 
884 		/* Reenable breakpoint */
885 		attr.disabled = false;
886 		err = modify_user_hw_breakpoint(bp, &attr);
887 		if (unlikely(err))
888 			return err;
889 
890 		csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
891 	}
892 	info = counter_arch_bp(bp);
893 	info->mask = TASK_SIZE - 1;
894 
895 	return 0;
896 }
897 
898 /* ptrace API */
899 void user_enable_single_step(struct task_struct *task)
900 {
901 	struct thread_info *ti = task_thread_info(task);
902 
903 	set_single_step(task, task_pt_regs(task)->csr_era);
904 	task->thread.single_step = task_pt_regs(task)->csr_era;
905 	set_ti_thread_flag(ti, TIF_SINGLESTEP);
906 }
907 
908 void user_disable_single_step(struct task_struct *task)
909 {
910 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
911 }
912 #endif
913