xref: /openbmc/linux/arch/loongarch/kernel/ptrace.c (revision 75ecfab9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  *
6  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7  *
8  * Derived from MIPS:
9  * Copyright (C) 1992 Ross Biro
10  * Copyright (C) Linus Torvalds
11  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12  * Copyright (C) 1996 David S. Miller
13  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14  * Copyright (C) 1999 MIPS Technologies, Inc.
15  * Copyright (C) 2000 Ulf Carlsson
16  */
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/mm.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
36 
37 #include <asm/byteorder.h>
38 #include <asm/cpu.h>
39 #include <asm/cpu-info.h>
40 #include <asm/fpu.h>
41 #include <asm/lbt.h>
42 #include <asm/loongarch.h>
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
46 #include <asm/ptrace.h>
47 #include <asm/reg.h>
48 #include <asm/syscall.h>
49 
init_fp_ctx(struct task_struct * target)50 static void init_fp_ctx(struct task_struct *target)
51 {
52 	/* The target already has context */
53 	if (tsk_used_math(target))
54 		return;
55 
56 	/* Begin with data registers set to all 1s... */
57 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
58 	set_stopped_child_used_math(target);
59 }
60 
61 /*
62  * Called by kernel/ptrace.c when detaching..
63  *
64  * Make sure single step bits etc are not set.
65  */
ptrace_disable(struct task_struct * child)66 void ptrace_disable(struct task_struct *child)
67 {
68 	/* Don't load the watchpoint registers for the ex-child. */
69 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
70 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
71 }
72 
73 /* regset get/set implementations */
74 
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)75 static int gpr_get(struct task_struct *target,
76 		   const struct user_regset *regset,
77 		   struct membuf to)
78 {
79 	int r;
80 	struct pt_regs *regs = task_pt_regs(target);
81 
82 	r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
83 	r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
84 	r = membuf_write(&to, &regs->csr_era, sizeof(u64));
85 	r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
86 
87 	return r;
88 }
89 
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)90 static int gpr_set(struct task_struct *target,
91 		   const struct user_regset *regset,
92 		   unsigned int pos, unsigned int count,
93 		   const void *kbuf, const void __user *ubuf)
94 {
95 	int err;
96 	int a0_start = sizeof(u64) * GPR_NUM;
97 	int era_start = a0_start + sizeof(u64);
98 	int badvaddr_start = era_start + sizeof(u64);
99 	struct pt_regs *regs = task_pt_regs(target);
100 
101 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
102 				 &regs->regs,
103 				 0, a0_start);
104 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
105 				 &regs->orig_a0,
106 				 a0_start, a0_start + sizeof(u64));
107 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 				 &regs->csr_era,
109 				 era_start, era_start + sizeof(u64));
110 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
111 				 &regs->csr_badvaddr,
112 				 badvaddr_start, badvaddr_start + sizeof(u64));
113 
114 	return err;
115 }
116 
117 
118 /*
119  * Get the general floating-point registers.
120  */
gfpr_get(struct task_struct * target,struct membuf * to)121 static int gfpr_get(struct task_struct *target, struct membuf *to)
122 {
123 	return membuf_write(to, &target->thread.fpu.fpr,
124 			    sizeof(elf_fpreg_t) * NUM_FPU_REGS);
125 }
126 
gfpr_get_simd(struct task_struct * target,struct membuf * to)127 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
128 {
129 	int i, r;
130 	u64 fpr_val;
131 
132 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
133 	for (i = 0; i < NUM_FPU_REGS; i++) {
134 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
135 		r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
136 	}
137 
138 	return r;
139 }
140 
141 /*
142  * Choose the appropriate helper for general registers, and then copy
143  * the FCC and FCSR registers separately.
144  */
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)145 static int fpr_get(struct task_struct *target,
146 		   const struct user_regset *regset,
147 		   struct membuf to)
148 {
149 	int r;
150 
151 	save_fpu_regs(target);
152 
153 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
154 		r = gfpr_get(target, &to);
155 	else
156 		r = gfpr_get_simd(target, &to);
157 
158 	r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
159 	r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
160 
161 	return r;
162 }
163 
gfpr_set(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)164 static int gfpr_set(struct task_struct *target,
165 		    unsigned int *pos, unsigned int *count,
166 		    const void **kbuf, const void __user **ubuf)
167 {
168 	return user_regset_copyin(pos, count, kbuf, ubuf,
169 				  &target->thread.fpu.fpr,
170 				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
171 }
172 
gfpr_set_simd(struct task_struct * target,unsigned int * pos,unsigned int * count,const void ** kbuf,const void __user ** ubuf)173 static int gfpr_set_simd(struct task_struct *target,
174 		       unsigned int *pos, unsigned int *count,
175 		       const void **kbuf, const void __user **ubuf)
176 {
177 	int i, err;
178 	u64 fpr_val;
179 
180 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
181 	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
182 		err = user_regset_copyin(pos, count, kbuf, ubuf,
183 					 &fpr_val, i * sizeof(elf_fpreg_t),
184 					 (i + 1) * sizeof(elf_fpreg_t));
185 		if (err)
186 			return err;
187 		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
188 	}
189 
190 	return 0;
191 }
192 
193 /*
194  * Choose the appropriate helper for general registers, and then copy
195  * the FCC register separately.
196  */
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)197 static int fpr_set(struct task_struct *target,
198 		   const struct user_regset *regset,
199 		   unsigned int pos, unsigned int count,
200 		   const void *kbuf, const void __user *ubuf)
201 {
202 	const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
203 	const int fcsr_start = fcc_start + sizeof(u64);
204 	int err;
205 
206 	BUG_ON(count % sizeof(elf_fpreg_t));
207 	if (pos + count > sizeof(elf_fpregset_t))
208 		return -EIO;
209 
210 	init_fp_ctx(target);
211 
212 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
213 		err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
214 	else
215 		err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
216 	if (err)
217 		return err;
218 
219 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220 				  &target->thread.fpu.fcc, fcc_start,
221 				  fcc_start + sizeof(u64));
222 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
223 				  &target->thread.fpu.fcsr, fcsr_start,
224 				  fcsr_start + sizeof(u32));
225 
226 	return err;
227 }
228 
cfg_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)229 static int cfg_get(struct task_struct *target,
230 		   const struct user_regset *regset,
231 		   struct membuf to)
232 {
233 	int i, r;
234 	u32 cfg_val;
235 
236 	i = 0;
237 	while (to.left > 0) {
238 		cfg_val = read_cpucfg(i++);
239 		r = membuf_write(&to, &cfg_val, sizeof(u32));
240 	}
241 
242 	return r;
243 }
244 
245 /*
246  * CFG registers are read-only.
247  */
cfg_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)248 static int cfg_set(struct task_struct *target,
249 		   const struct user_regset *regset,
250 		   unsigned int pos, unsigned int count,
251 		   const void *kbuf, const void __user *ubuf)
252 {
253 	return 0;
254 }
255 
256 #ifdef CONFIG_CPU_HAS_LSX
257 
copy_pad_fprs(struct task_struct * target,const struct user_regset * regset,struct membuf * to,unsigned int live_sz)258 static void copy_pad_fprs(struct task_struct *target,
259 			 const struct user_regset *regset,
260 			 struct membuf *to, unsigned int live_sz)
261 {
262 	int i, j;
263 	unsigned long long fill = ~0ull;
264 	unsigned int cp_sz, pad_sz;
265 
266 	cp_sz = min(regset->size, live_sz);
267 	pad_sz = regset->size - cp_sz;
268 	WARN_ON(pad_sz % sizeof(fill));
269 
270 	for (i = 0; i < NUM_FPU_REGS; i++) {
271 		membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
272 		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
273 			membuf_store(to, fill);
274 		}
275 	}
276 }
277 
simd_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)278 static int simd_get(struct task_struct *target,
279 		    const struct user_regset *regset,
280 		    struct membuf to)
281 {
282 	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
283 
284 	save_fpu_regs(target);
285 
286 	if (!tsk_used_math(target)) {
287 		/* The task hasn't used FP or LSX, fill with 0xff */
288 		copy_pad_fprs(target, regset, &to, 0);
289 	} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
290 		/* Copy scalar FP context, fill the rest with 0xff */
291 		copy_pad_fprs(target, regset, &to, 8);
292 #ifdef CONFIG_CPU_HAS_LASX
293 	} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
294 		/* Copy LSX 128 Bit context, fill the rest with 0xff */
295 		copy_pad_fprs(target, regset, &to, 16);
296 #endif
297 	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
298 		/* Trivially copy the vector registers */
299 		membuf_write(&to, &target->thread.fpu.fpr, wr_size);
300 	} else {
301 		/* Copy as much context as possible, fill the rest with 0xff */
302 		copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
303 	}
304 
305 	return 0;
306 }
307 
simd_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)308 static int simd_set(struct task_struct *target,
309 		    const struct user_regset *regset,
310 		    unsigned int pos, unsigned int count,
311 		    const void *kbuf, const void __user *ubuf)
312 {
313 	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
314 	unsigned int cp_sz;
315 	int i, err, start;
316 
317 	init_fp_ctx(target);
318 
319 	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
320 		/* Trivially copy the vector registers */
321 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
322 					 &target->thread.fpu.fpr,
323 					 0, wr_size);
324 	} else {
325 		/* Copy as much context as possible */
326 		cp_sz = min_t(unsigned int, regset->size,
327 			      sizeof(target->thread.fpu.fpr[0]));
328 
329 		i = start = err = 0;
330 		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
331 			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
332 						  &target->thread.fpu.fpr[i],
333 						  start, start + cp_sz);
334 		}
335 	}
336 
337 	return err;
338 }
339 
340 #endif /* CONFIG_CPU_HAS_LSX */
341 
342 #ifdef CONFIG_CPU_HAS_LBT
lbt_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)343 static int lbt_get(struct task_struct *target,
344 		   const struct user_regset *regset,
345 		   struct membuf to)
346 {
347 	int r;
348 
349 	r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
350 	r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
351 	r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
352 	r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
353 	r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
354 	r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
355 
356 	return r;
357 }
358 
lbt_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)359 static int lbt_set(struct task_struct *target,
360 		   const struct user_regset *regset,
361 		   unsigned int pos, unsigned int count,
362 		   const void *kbuf, const void __user *ubuf)
363 {
364 	int err = 0;
365 	const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
366 	const int ftop_start = eflags_start + sizeof(u32);
367 
368 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
369 				  &target->thread.lbt.scr0,
370 				  0, 4 * sizeof(target->thread.lbt.scr0));
371 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
372 				  &target->thread.lbt.eflags,
373 				  eflags_start, ftop_start);
374 	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
375 				  &target->thread.fpu.ftop,
376 				  ftop_start, ftop_start + sizeof(u32));
377 
378 	return err;
379 }
380 #endif /* CONFIG_CPU_HAS_LBT */
381 
382 #ifdef CONFIG_HAVE_HW_BREAKPOINT
383 
384 /*
385  * Handle hitting a HW-breakpoint.
386  */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)387 static void ptrace_hbptriggered(struct perf_event *bp,
388 				struct perf_sample_data *data,
389 				struct pt_regs *regs)
390 {
391 	int i;
392 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
393 
394 	for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
395 		if (current->thread.hbp_break[i] == bp)
396 			break;
397 
398 	for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
399 		if (current->thread.hbp_watch[i] == bp)
400 			break;
401 
402 	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
403 }
404 
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)405 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
406 					       struct task_struct *tsk,
407 					       unsigned long idx)
408 {
409 	struct perf_event *bp;
410 
411 	switch (note_type) {
412 	case NT_LOONGARCH_HW_BREAK:
413 		if (idx >= LOONGARCH_MAX_BRP)
414 			return ERR_PTR(-EINVAL);
415 		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
416 		bp = tsk->thread.hbp_break[idx];
417 		break;
418 	case NT_LOONGARCH_HW_WATCH:
419 		if (idx >= LOONGARCH_MAX_WRP)
420 			return ERR_PTR(-EINVAL);
421 		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
422 		bp = tsk->thread.hbp_watch[idx];
423 		break;
424 	}
425 
426 	return bp;
427 }
428 
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)429 static int ptrace_hbp_set_event(unsigned int note_type,
430 				struct task_struct *tsk,
431 				unsigned long idx,
432 				struct perf_event *bp)
433 {
434 	switch (note_type) {
435 	case NT_LOONGARCH_HW_BREAK:
436 		if (idx >= LOONGARCH_MAX_BRP)
437 			return -EINVAL;
438 		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
439 		tsk->thread.hbp_break[idx] = bp;
440 		break;
441 	case NT_LOONGARCH_HW_WATCH:
442 		if (idx >= LOONGARCH_MAX_WRP)
443 			return -EINVAL;
444 		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
445 		tsk->thread.hbp_watch[idx] = bp;
446 		break;
447 	}
448 
449 	return 0;
450 }
451 
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)452 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
453 					    struct task_struct *tsk,
454 					    unsigned long idx)
455 {
456 	int err, type;
457 	struct perf_event *bp;
458 	struct perf_event_attr attr;
459 
460 	switch (note_type) {
461 	case NT_LOONGARCH_HW_BREAK:
462 		type = HW_BREAKPOINT_X;
463 		break;
464 	case NT_LOONGARCH_HW_WATCH:
465 		type = HW_BREAKPOINT_RW;
466 		break;
467 	default:
468 		return ERR_PTR(-EINVAL);
469 	}
470 
471 	ptrace_breakpoint_init(&attr);
472 
473 	/*
474 	 * Initialise fields to sane defaults
475 	 * (i.e. values that will pass validation).
476 	 */
477 	attr.bp_addr	= 0;
478 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
479 	attr.bp_type	= type;
480 	attr.disabled	= 1;
481 
482 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
483 	if (IS_ERR(bp))
484 		return bp;
485 
486 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
487 	if (err)
488 		return ERR_PTR(err);
489 
490 	return bp;
491 }
492 
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)493 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
494 				     struct arch_hw_breakpoint_ctrl ctrl,
495 				     struct perf_event_attr *attr)
496 {
497 	int err, len, type;
498 
499 	err = arch_bp_generic_fields(ctrl, &len, &type);
500 	if (err)
501 		return err;
502 
503 	attr->bp_len	= len;
504 	attr->bp_type	= type;
505 
506 	return 0;
507 }
508 
ptrace_hbp_get_resource_info(unsigned int note_type,u64 * info)509 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
510 {
511 	u8 num;
512 	u64 reg = 0;
513 
514 	switch (note_type) {
515 	case NT_LOONGARCH_HW_BREAK:
516 		num = hw_breakpoint_slots(TYPE_INST);
517 		break;
518 	case NT_LOONGARCH_HW_WATCH:
519 		num = hw_breakpoint_slots(TYPE_DATA);
520 		break;
521 	default:
522 		return -EINVAL;
523 	}
524 
525 	*info = reg | num;
526 
527 	return 0;
528 }
529 
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)530 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
531 							struct task_struct *tsk,
532 							unsigned long idx)
533 {
534 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
535 
536 	if (!bp)
537 		bp = ptrace_hbp_create(note_type, tsk, idx);
538 
539 	return bp;
540 }
541 
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)542 static int ptrace_hbp_get_ctrl(unsigned int note_type,
543 			       struct task_struct *tsk,
544 			       unsigned long idx, u32 *ctrl)
545 {
546 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
547 
548 	if (IS_ERR(bp))
549 		return PTR_ERR(bp);
550 
551 	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
552 
553 	return 0;
554 }
555 
ptrace_hbp_get_mask(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * mask)556 static int ptrace_hbp_get_mask(unsigned int note_type,
557 			       struct task_struct *tsk,
558 			       unsigned long idx, u64 *mask)
559 {
560 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
561 
562 	if (IS_ERR(bp))
563 		return PTR_ERR(bp);
564 
565 	*mask = bp ? counter_arch_bp(bp)->mask : 0;
566 
567 	return 0;
568 }
569 
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)570 static int ptrace_hbp_get_addr(unsigned int note_type,
571 			       struct task_struct *tsk,
572 			       unsigned long idx, u64 *addr)
573 {
574 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
575 
576 	if (IS_ERR(bp))
577 		return PTR_ERR(bp);
578 
579 	*addr = bp ? counter_arch_bp(bp)->address : 0;
580 
581 	return 0;
582 }
583 
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)584 static int ptrace_hbp_set_ctrl(unsigned int note_type,
585 			       struct task_struct *tsk,
586 			       unsigned long idx, u32 uctrl)
587 {
588 	int err;
589 	struct perf_event *bp;
590 	struct perf_event_attr attr;
591 	struct arch_hw_breakpoint_ctrl ctrl;
592 
593 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
594 	if (IS_ERR(bp))
595 		return PTR_ERR(bp);
596 
597 	attr = bp->attr;
598 
599 	switch (note_type) {
600 	case NT_LOONGARCH_HW_BREAK:
601 		ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
602 		ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
603 		break;
604 	case NT_LOONGARCH_HW_WATCH:
605 		decode_ctrl_reg(uctrl, &ctrl);
606 		break;
607 	default:
608 		return -EINVAL;
609 	}
610 
611 	if (uctrl & CTRL_PLV_ENABLE) {
612 		err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
613 		if (err)
614 			return err;
615 		attr.disabled = 0;
616 	} else {
617 		attr.disabled = 1;
618 	}
619 
620 	return modify_user_hw_breakpoint(bp, &attr);
621 }
622 
ptrace_hbp_set_mask(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 mask)623 static int ptrace_hbp_set_mask(unsigned int note_type,
624 			       struct task_struct *tsk,
625 			       unsigned long idx, u64 mask)
626 {
627 	struct perf_event *bp;
628 	struct perf_event_attr attr;
629 	struct arch_hw_breakpoint *info;
630 
631 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
632 	if (IS_ERR(bp))
633 		return PTR_ERR(bp);
634 
635 	attr = bp->attr;
636 	info = counter_arch_bp(bp);
637 	info->mask = mask;
638 
639 	return modify_user_hw_breakpoint(bp, &attr);
640 }
641 
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)642 static int ptrace_hbp_set_addr(unsigned int note_type,
643 			       struct task_struct *tsk,
644 			       unsigned long idx, u64 addr)
645 {
646 	struct perf_event *bp;
647 	struct perf_event_attr attr;
648 
649 	/* Kernel-space address cannot be monitored by user-space */
650 	if ((unsigned long)addr >= XKPRANGE)
651 		return -EINVAL;
652 
653 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
654 	if (IS_ERR(bp))
655 		return PTR_ERR(bp);
656 
657 	attr = bp->attr;
658 	attr.bp_addr = addr;
659 
660 	return modify_user_hw_breakpoint(bp, &attr);
661 }
662 
663 #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
664 #define PTRACE_HBP_MASK_SZ	sizeof(u64)
665 #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
666 #define PTRACE_HBP_PAD_SZ	sizeof(u32)
667 
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)668 static int hw_break_get(struct task_struct *target,
669 			const struct user_regset *regset,
670 			struct membuf to)
671 {
672 	u64 info;
673 	u32 ctrl;
674 	u64 addr, mask;
675 	int ret, idx = 0;
676 	unsigned int note_type = regset->core_note_type;
677 
678 	/* Resource info */
679 	ret = ptrace_hbp_get_resource_info(note_type, &info);
680 	if (ret)
681 		return ret;
682 
683 	membuf_write(&to, &info, sizeof(info));
684 
685 	/* (address, mask, ctrl) registers */
686 	while (to.left) {
687 		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
688 		if (ret)
689 			return ret;
690 
691 		ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
692 		if (ret)
693 			return ret;
694 
695 		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
696 		if (ret)
697 			return ret;
698 
699 		membuf_store(&to, addr);
700 		membuf_store(&to, mask);
701 		membuf_store(&to, ctrl);
702 		membuf_zero(&to, sizeof(u32));
703 		idx++;
704 	}
705 
706 	return 0;
707 }
708 
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)709 static int hw_break_set(struct task_struct *target,
710 			const struct user_regset *regset,
711 			unsigned int pos, unsigned int count,
712 			const void *kbuf, const void __user *ubuf)
713 {
714 	u32 ctrl;
715 	u64 addr, mask;
716 	int ret, idx = 0, offset, limit;
717 	unsigned int note_type = regset->core_note_type;
718 
719 	/* Resource info */
720 	offset = offsetof(struct user_watch_state, dbg_regs);
721 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
722 
723 	/* (address, mask, ctrl) registers */
724 	limit = regset->n * regset->size;
725 	while (count && offset < limit) {
726 		if (count < PTRACE_HBP_ADDR_SZ)
727 			return -EINVAL;
728 
729 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
730 					 offset, offset + PTRACE_HBP_ADDR_SZ);
731 		if (ret)
732 			return ret;
733 
734 		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
735 		if (ret)
736 			return ret;
737 		offset += PTRACE_HBP_ADDR_SZ;
738 
739 		if (!count)
740 			break;
741 
742 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
743 					 offset, offset + PTRACE_HBP_MASK_SZ);
744 		if (ret)
745 			return ret;
746 
747 		ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
748 		if (ret)
749 			return ret;
750 		offset += PTRACE_HBP_MASK_SZ;
751 
752 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
753 					 offset, offset + PTRACE_HBP_CTRL_SZ);
754 		if (ret)
755 			return ret;
756 
757 		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
758 		if (ret)
759 			return ret;
760 		offset += PTRACE_HBP_CTRL_SZ;
761 
762 		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
763 					  offset, offset + PTRACE_HBP_PAD_SZ);
764 		offset += PTRACE_HBP_PAD_SZ;
765 
766 		idx++;
767 	}
768 
769 	return 0;
770 }
771 
772 #endif
773 
774 struct pt_regs_offset {
775 	const char *name;
776 	int offset;
777 };
778 
779 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
780 #define REG_OFFSET_END {.name = NULL, .offset = 0}
781 
782 static const struct pt_regs_offset regoffset_table[] = {
783 	REG_OFFSET_NAME(r0, regs[0]),
784 	REG_OFFSET_NAME(r1, regs[1]),
785 	REG_OFFSET_NAME(r2, regs[2]),
786 	REG_OFFSET_NAME(r3, regs[3]),
787 	REG_OFFSET_NAME(r4, regs[4]),
788 	REG_OFFSET_NAME(r5, regs[5]),
789 	REG_OFFSET_NAME(r6, regs[6]),
790 	REG_OFFSET_NAME(r7, regs[7]),
791 	REG_OFFSET_NAME(r8, regs[8]),
792 	REG_OFFSET_NAME(r9, regs[9]),
793 	REG_OFFSET_NAME(r10, regs[10]),
794 	REG_OFFSET_NAME(r11, regs[11]),
795 	REG_OFFSET_NAME(r12, regs[12]),
796 	REG_OFFSET_NAME(r13, regs[13]),
797 	REG_OFFSET_NAME(r14, regs[14]),
798 	REG_OFFSET_NAME(r15, regs[15]),
799 	REG_OFFSET_NAME(r16, regs[16]),
800 	REG_OFFSET_NAME(r17, regs[17]),
801 	REG_OFFSET_NAME(r18, regs[18]),
802 	REG_OFFSET_NAME(r19, regs[19]),
803 	REG_OFFSET_NAME(r20, regs[20]),
804 	REG_OFFSET_NAME(r21, regs[21]),
805 	REG_OFFSET_NAME(r22, regs[22]),
806 	REG_OFFSET_NAME(r23, regs[23]),
807 	REG_OFFSET_NAME(r24, regs[24]),
808 	REG_OFFSET_NAME(r25, regs[25]),
809 	REG_OFFSET_NAME(r26, regs[26]),
810 	REG_OFFSET_NAME(r27, regs[27]),
811 	REG_OFFSET_NAME(r28, regs[28]),
812 	REG_OFFSET_NAME(r29, regs[29]),
813 	REG_OFFSET_NAME(r30, regs[30]),
814 	REG_OFFSET_NAME(r31, regs[31]),
815 	REG_OFFSET_NAME(orig_a0, orig_a0),
816 	REG_OFFSET_NAME(csr_era, csr_era),
817 	REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
818 	REG_OFFSET_NAME(csr_crmd, csr_crmd),
819 	REG_OFFSET_NAME(csr_prmd, csr_prmd),
820 	REG_OFFSET_NAME(csr_euen, csr_euen),
821 	REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
822 	REG_OFFSET_NAME(csr_estat, csr_estat),
823 	REG_OFFSET_END,
824 };
825 
826 /**
827  * regs_query_register_offset() - query register offset from its name
828  * @name:       the name of a register
829  *
830  * regs_query_register_offset() returns the offset of a register in struct
831  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
832  */
regs_query_register_offset(const char * name)833 int regs_query_register_offset(const char *name)
834 {
835 	const struct pt_regs_offset *roff;
836 
837 	for (roff = regoffset_table; roff->name != NULL; roff++)
838 		if (!strcmp(roff->name, name))
839 			return roff->offset;
840 	return -EINVAL;
841 }
842 
843 enum loongarch_regset {
844 	REGSET_GPR,
845 	REGSET_FPR,
846 	REGSET_CPUCFG,
847 #ifdef CONFIG_CPU_HAS_LSX
848 	REGSET_LSX,
849 #endif
850 #ifdef CONFIG_CPU_HAS_LASX
851 	REGSET_LASX,
852 #endif
853 #ifdef CONFIG_CPU_HAS_LBT
854 	REGSET_LBT,
855 #endif
856 #ifdef CONFIG_HAVE_HW_BREAKPOINT
857 	REGSET_HW_BREAK,
858 	REGSET_HW_WATCH,
859 #endif
860 };
861 
862 static const struct user_regset loongarch64_regsets[] = {
863 	[REGSET_GPR] = {
864 		.core_note_type	= NT_PRSTATUS,
865 		.n		= ELF_NGREG,
866 		.size		= sizeof(elf_greg_t),
867 		.align		= sizeof(elf_greg_t),
868 		.regset_get	= gpr_get,
869 		.set		= gpr_set,
870 	},
871 	[REGSET_FPR] = {
872 		.core_note_type	= NT_PRFPREG,
873 		.n		= ELF_NFPREG,
874 		.size		= sizeof(elf_fpreg_t),
875 		.align		= sizeof(elf_fpreg_t),
876 		.regset_get	= fpr_get,
877 		.set		= fpr_set,
878 	},
879 	[REGSET_CPUCFG] = {
880 		.core_note_type	= NT_LOONGARCH_CPUCFG,
881 		.n		= 64,
882 		.size		= sizeof(u32),
883 		.align		= sizeof(u32),
884 		.regset_get	= cfg_get,
885 		.set		= cfg_set,
886 	},
887 #ifdef CONFIG_CPU_HAS_LSX
888 	[REGSET_LSX] = {
889 		.core_note_type	= NT_LOONGARCH_LSX,
890 		.n		= NUM_FPU_REGS,
891 		.size		= 16,
892 		.align		= 16,
893 		.regset_get	= simd_get,
894 		.set		= simd_set,
895 	},
896 #endif
897 #ifdef CONFIG_CPU_HAS_LASX
898 	[REGSET_LASX] = {
899 		.core_note_type	= NT_LOONGARCH_LASX,
900 		.n		= NUM_FPU_REGS,
901 		.size		= 32,
902 		.align		= 32,
903 		.regset_get	= simd_get,
904 		.set		= simd_set,
905 	},
906 #endif
907 #ifdef CONFIG_CPU_HAS_LBT
908 	[REGSET_LBT] = {
909 		.core_note_type	= NT_LOONGARCH_LBT,
910 		.n		= 5,
911 		.size		= sizeof(u64),
912 		.align		= sizeof(u64),
913 		.regset_get	= lbt_get,
914 		.set		= lbt_set,
915 	},
916 #endif
917 #ifdef CONFIG_HAVE_HW_BREAKPOINT
918 	[REGSET_HW_BREAK] = {
919 		.core_note_type = NT_LOONGARCH_HW_BREAK,
920 		.n = sizeof(struct user_watch_state) / sizeof(u32),
921 		.size = sizeof(u32),
922 		.align = sizeof(u32),
923 		.regset_get = hw_break_get,
924 		.set = hw_break_set,
925 	},
926 	[REGSET_HW_WATCH] = {
927 		.core_note_type = NT_LOONGARCH_HW_WATCH,
928 		.n = sizeof(struct user_watch_state) / sizeof(u32),
929 		.size = sizeof(u32),
930 		.align = sizeof(u32),
931 		.regset_get = hw_break_get,
932 		.set = hw_break_set,
933 	},
934 #endif
935 };
936 
937 static const struct user_regset_view user_loongarch64_view = {
938 	.name		= "loongarch64",
939 	.e_machine	= ELF_ARCH,
940 	.regsets	= loongarch64_regsets,
941 	.n		= ARRAY_SIZE(loongarch64_regsets),
942 };
943 
944 
task_user_regset_view(struct task_struct * task)945 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
946 {
947 	return &user_loongarch64_view;
948 }
949 
read_user(struct task_struct * target,unsigned long addr,unsigned long __user * data)950 static inline int read_user(struct task_struct *target, unsigned long addr,
951 			    unsigned long __user *data)
952 {
953 	unsigned long tmp = 0;
954 
955 	switch (addr) {
956 	case 0 ... 31:
957 		tmp = task_pt_regs(target)->regs[addr];
958 		break;
959 	case ARG0:
960 		tmp = task_pt_regs(target)->orig_a0;
961 		break;
962 	case PC:
963 		tmp = task_pt_regs(target)->csr_era;
964 		break;
965 	case BADVADDR:
966 		tmp = task_pt_regs(target)->csr_badvaddr;
967 		break;
968 	default:
969 		return -EIO;
970 	}
971 
972 	return put_user(tmp, data);
973 }
974 
write_user(struct task_struct * target,unsigned long addr,unsigned long data)975 static inline int write_user(struct task_struct *target, unsigned long addr,
976 			    unsigned long data)
977 {
978 	switch (addr) {
979 	case 0 ... 31:
980 		task_pt_regs(target)->regs[addr] = data;
981 		break;
982 	case ARG0:
983 		task_pt_regs(target)->orig_a0 = data;
984 		break;
985 	case PC:
986 		task_pt_regs(target)->csr_era = data;
987 		break;
988 	case BADVADDR:
989 		task_pt_regs(target)->csr_badvaddr = data;
990 		break;
991 	default:
992 		return -EIO;
993 	}
994 
995 	return 0;
996 }
997 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)998 long arch_ptrace(struct task_struct *child, long request,
999 		 unsigned long addr, unsigned long data)
1000 {
1001 	int ret;
1002 	unsigned long __user *datap = (void __user *) data;
1003 
1004 	switch (request) {
1005 	case PTRACE_PEEKUSR:
1006 		ret = read_user(child, addr, datap);
1007 		break;
1008 
1009 	case PTRACE_POKEUSR:
1010 		ret = write_user(child, addr, data);
1011 		break;
1012 
1013 	default:
1014 		ret = ptrace_request(child, request, addr, data);
1015 		break;
1016 	}
1017 
1018 	return ret;
1019 }
1020 
1021 #ifdef CONFIG_HAVE_HW_BREAKPOINT
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)1022 static void ptrace_triggered(struct perf_event *bp,
1023 		      struct perf_sample_data *data, struct pt_regs *regs)
1024 {
1025 	struct perf_event_attr attr;
1026 
1027 	attr = bp->attr;
1028 	attr.disabled = true;
1029 	modify_user_hw_breakpoint(bp, &attr);
1030 }
1031 
set_single_step(struct task_struct * tsk,unsigned long addr)1032 static int set_single_step(struct task_struct *tsk, unsigned long addr)
1033 {
1034 	struct perf_event *bp;
1035 	struct perf_event_attr attr;
1036 	struct arch_hw_breakpoint *info;
1037 	struct thread_struct *thread = &tsk->thread;
1038 
1039 	bp = thread->hbp_break[0];
1040 	if (!bp) {
1041 		ptrace_breakpoint_init(&attr);
1042 
1043 		attr.bp_addr = addr;
1044 		attr.bp_len = HW_BREAKPOINT_LEN_8;
1045 		attr.bp_type = HW_BREAKPOINT_X;
1046 
1047 		bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
1048 						 NULL, tsk);
1049 		if (IS_ERR(bp))
1050 			return PTR_ERR(bp);
1051 
1052 		thread->hbp_break[0] = bp;
1053 	} else {
1054 		int err;
1055 
1056 		attr = bp->attr;
1057 		attr.bp_addr = addr;
1058 
1059 		/* Reenable breakpoint */
1060 		attr.disabled = false;
1061 		err = modify_user_hw_breakpoint(bp, &attr);
1062 		if (unlikely(err))
1063 			return err;
1064 
1065 		csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1066 	}
1067 	info = counter_arch_bp(bp);
1068 	info->mask = TASK_SIZE - 1;
1069 
1070 	return 0;
1071 }
1072 
1073 /* ptrace API */
user_enable_single_step(struct task_struct * task)1074 void user_enable_single_step(struct task_struct *task)
1075 {
1076 	struct thread_info *ti = task_thread_info(task);
1077 
1078 	set_single_step(task, task_pt_regs(task)->csr_era);
1079 	task->thread.single_step = task_pt_regs(task)->csr_era;
1080 	set_ti_thread_flag(ti, TIF_SINGLESTEP);
1081 }
1082 
user_disable_single_step(struct task_struct * task)1083 void user_disable_single_step(struct task_struct *task)
1084 {
1085 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1086 }
1087 #endif
1088