xref: /openbmc/linux/arch/arm64/kernel/ptrace.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/ptrace.c
4  *
5  * By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  */
10 
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
32 
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/pointer_auth.h>
38 #include <asm/stacktrace.h>
39 #include <asm/syscall.h>
40 #include <asm/traps.h>
41 #include <asm/system_misc.h>
42 
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h>
45 
46 struct pt_regs_offset {
47 	const char *name;
48 	int offset;
49 };
50 
51 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
52 #define REG_OFFSET_END {.name = NULL, .offset = 0}
53 #define GPR_OFFSET_NAME(r) \
54 	{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
55 
56 static const struct pt_regs_offset regoffset_table[] = {
57 	GPR_OFFSET_NAME(0),
58 	GPR_OFFSET_NAME(1),
59 	GPR_OFFSET_NAME(2),
60 	GPR_OFFSET_NAME(3),
61 	GPR_OFFSET_NAME(4),
62 	GPR_OFFSET_NAME(5),
63 	GPR_OFFSET_NAME(6),
64 	GPR_OFFSET_NAME(7),
65 	GPR_OFFSET_NAME(8),
66 	GPR_OFFSET_NAME(9),
67 	GPR_OFFSET_NAME(10),
68 	GPR_OFFSET_NAME(11),
69 	GPR_OFFSET_NAME(12),
70 	GPR_OFFSET_NAME(13),
71 	GPR_OFFSET_NAME(14),
72 	GPR_OFFSET_NAME(15),
73 	GPR_OFFSET_NAME(16),
74 	GPR_OFFSET_NAME(17),
75 	GPR_OFFSET_NAME(18),
76 	GPR_OFFSET_NAME(19),
77 	GPR_OFFSET_NAME(20),
78 	GPR_OFFSET_NAME(21),
79 	GPR_OFFSET_NAME(22),
80 	GPR_OFFSET_NAME(23),
81 	GPR_OFFSET_NAME(24),
82 	GPR_OFFSET_NAME(25),
83 	GPR_OFFSET_NAME(26),
84 	GPR_OFFSET_NAME(27),
85 	GPR_OFFSET_NAME(28),
86 	GPR_OFFSET_NAME(29),
87 	GPR_OFFSET_NAME(30),
88 	{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
89 	REG_OFFSET_NAME(sp),
90 	REG_OFFSET_NAME(pc),
91 	REG_OFFSET_NAME(pstate),
92 	REG_OFFSET_END,
93 };
94 
95 /**
96  * regs_query_register_offset() - query register offset from its name
97  * @name:	the name of a register
98  *
99  * regs_query_register_offset() returns the offset of a register in struct
100  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
101  */
102 int regs_query_register_offset(const char *name)
103 {
104 	const struct pt_regs_offset *roff;
105 
106 	for (roff = regoffset_table; roff->name != NULL; roff++)
107 		if (!strcmp(roff->name, name))
108 			return roff->offset;
109 	return -EINVAL;
110 }
111 
112 /**
113  * regs_within_kernel_stack() - check the address in the stack
114  * @regs:      pt_regs which contains kernel stack pointer.
115  * @addr:      address which is checked.
116  *
117  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
118  * If @addr is within the kernel stack, it returns true. If not, returns false.
119  */
120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
121 {
122 	return ((addr & ~(THREAD_SIZE - 1))  ==
123 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
124 		on_irq_stack(addr, NULL);
125 }
126 
127 /**
128  * regs_get_kernel_stack_nth() - get Nth entry of the stack
129  * @regs:	pt_regs which contains kernel stack pointer.
130  * @n:		stack entry number.
131  *
132  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
133  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
134  * this returns 0.
135  */
136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
137 {
138 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
139 
140 	addr += n;
141 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
142 		return *addr;
143 	else
144 		return 0;
145 }
146 
147 /*
148  * TODO: does not yet catch signals sent when the child dies.
149  * in exit.c or in signal.c.
150  */
151 
152 /*
153  * Called by kernel/ptrace.c when detaching..
154  */
155 void ptrace_disable(struct task_struct *child)
156 {
157 	/*
158 	 * This would be better off in core code, but PTRACE_DETACH has
159 	 * grown its fair share of arch-specific worts and changing it
160 	 * is likely to cause regressions on obscure architectures.
161 	 */
162 	user_disable_single_step(child);
163 }
164 
165 #ifdef CONFIG_HAVE_HW_BREAKPOINT
166 /*
167  * Handle hitting a HW-breakpoint.
168  */
169 static void ptrace_hbptriggered(struct perf_event *bp,
170 				struct perf_sample_data *data,
171 				struct pt_regs *regs)
172 {
173 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
174 	const char *desc = "Hardware breakpoint trap (ptrace)";
175 
176 #ifdef CONFIG_COMPAT
177 	if (is_compat_task()) {
178 		int si_errno = 0;
179 		int i;
180 
181 		for (i = 0; i < ARM_MAX_BRP; ++i) {
182 			if (current->thread.debug.hbp_break[i] == bp) {
183 				si_errno = (i << 1) + 1;
184 				break;
185 			}
186 		}
187 
188 		for (i = 0; i < ARM_MAX_WRP; ++i) {
189 			if (current->thread.debug.hbp_watch[i] == bp) {
190 				si_errno = -((i << 1) + 1);
191 				break;
192 			}
193 		}
194 		arm64_force_sig_ptrace_errno_trap(si_errno,
195 						  (void __user *)bkpt->trigger,
196 						  desc);
197 	}
198 #endif
199 	arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
200 			      (void __user *)(bkpt->trigger),
201 			      desc);
202 }
203 
204 /*
205  * Unregister breakpoints from this task and reset the pointers in
206  * the thread_struct.
207  */
208 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 {
210 	int i;
211 	struct thread_struct *t = &tsk->thread;
212 
213 	for (i = 0; i < ARM_MAX_BRP; i++) {
214 		if (t->debug.hbp_break[i]) {
215 			unregister_hw_breakpoint(t->debug.hbp_break[i]);
216 			t->debug.hbp_break[i] = NULL;
217 		}
218 	}
219 
220 	for (i = 0; i < ARM_MAX_WRP; i++) {
221 		if (t->debug.hbp_watch[i]) {
222 			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
223 			t->debug.hbp_watch[i] = NULL;
224 		}
225 	}
226 }
227 
228 void ptrace_hw_copy_thread(struct task_struct *tsk)
229 {
230 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 }
232 
233 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
234 					       struct task_struct *tsk,
235 					       unsigned long idx)
236 {
237 	struct perf_event *bp = ERR_PTR(-EINVAL);
238 
239 	switch (note_type) {
240 	case NT_ARM_HW_BREAK:
241 		if (idx >= ARM_MAX_BRP)
242 			goto out;
243 		idx = array_index_nospec(idx, ARM_MAX_BRP);
244 		bp = tsk->thread.debug.hbp_break[idx];
245 		break;
246 	case NT_ARM_HW_WATCH:
247 		if (idx >= ARM_MAX_WRP)
248 			goto out;
249 		idx = array_index_nospec(idx, ARM_MAX_WRP);
250 		bp = tsk->thread.debug.hbp_watch[idx];
251 		break;
252 	}
253 
254 out:
255 	return bp;
256 }
257 
258 static int ptrace_hbp_set_event(unsigned int note_type,
259 				struct task_struct *tsk,
260 				unsigned long idx,
261 				struct perf_event *bp)
262 {
263 	int err = -EINVAL;
264 
265 	switch (note_type) {
266 	case NT_ARM_HW_BREAK:
267 		if (idx >= ARM_MAX_BRP)
268 			goto out;
269 		idx = array_index_nospec(idx, ARM_MAX_BRP);
270 		tsk->thread.debug.hbp_break[idx] = bp;
271 		err = 0;
272 		break;
273 	case NT_ARM_HW_WATCH:
274 		if (idx >= ARM_MAX_WRP)
275 			goto out;
276 		idx = array_index_nospec(idx, ARM_MAX_WRP);
277 		tsk->thread.debug.hbp_watch[idx] = bp;
278 		err = 0;
279 		break;
280 	}
281 
282 out:
283 	return err;
284 }
285 
286 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
287 					    struct task_struct *tsk,
288 					    unsigned long idx)
289 {
290 	struct perf_event *bp;
291 	struct perf_event_attr attr;
292 	int err, type;
293 
294 	switch (note_type) {
295 	case NT_ARM_HW_BREAK:
296 		type = HW_BREAKPOINT_X;
297 		break;
298 	case NT_ARM_HW_WATCH:
299 		type = HW_BREAKPOINT_RW;
300 		break;
301 	default:
302 		return ERR_PTR(-EINVAL);
303 	}
304 
305 	ptrace_breakpoint_init(&attr);
306 
307 	/*
308 	 * Initialise fields to sane defaults
309 	 * (i.e. values that will pass validation).
310 	 */
311 	attr.bp_addr	= 0;
312 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
313 	attr.bp_type	= type;
314 	attr.disabled	= 1;
315 
316 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
317 	if (IS_ERR(bp))
318 		return bp;
319 
320 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
321 	if (err)
322 		return ERR_PTR(err);
323 
324 	return bp;
325 }
326 
327 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
328 				     struct arch_hw_breakpoint_ctrl ctrl,
329 				     struct perf_event_attr *attr)
330 {
331 	int err, len, type, offset, disabled = !ctrl.enabled;
332 
333 	attr->disabled = disabled;
334 	if (disabled)
335 		return 0;
336 
337 	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
338 	if (err)
339 		return err;
340 
341 	switch (note_type) {
342 	case NT_ARM_HW_BREAK:
343 		if ((type & HW_BREAKPOINT_X) != type)
344 			return -EINVAL;
345 		break;
346 	case NT_ARM_HW_WATCH:
347 		if ((type & HW_BREAKPOINT_RW) != type)
348 			return -EINVAL;
349 		break;
350 	default:
351 		return -EINVAL;
352 	}
353 
354 	attr->bp_len	= len;
355 	attr->bp_type	= type;
356 	attr->bp_addr	+= offset;
357 
358 	return 0;
359 }
360 
361 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
362 {
363 	u8 num;
364 	u32 reg = 0;
365 
366 	switch (note_type) {
367 	case NT_ARM_HW_BREAK:
368 		num = hw_breakpoint_slots(TYPE_INST);
369 		break;
370 	case NT_ARM_HW_WATCH:
371 		num = hw_breakpoint_slots(TYPE_DATA);
372 		break;
373 	default:
374 		return -EINVAL;
375 	}
376 
377 	reg |= debug_monitors_arch();
378 	reg <<= 8;
379 	reg |= num;
380 
381 	*info = reg;
382 	return 0;
383 }
384 
385 static int ptrace_hbp_get_ctrl(unsigned int note_type,
386 			       struct task_struct *tsk,
387 			       unsigned long idx,
388 			       u32 *ctrl)
389 {
390 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
391 
392 	if (IS_ERR(bp))
393 		return PTR_ERR(bp);
394 
395 	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
396 	return 0;
397 }
398 
399 static int ptrace_hbp_get_addr(unsigned int note_type,
400 			       struct task_struct *tsk,
401 			       unsigned long idx,
402 			       u64 *addr)
403 {
404 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
405 
406 	if (IS_ERR(bp))
407 		return PTR_ERR(bp);
408 
409 	*addr = bp ? counter_arch_bp(bp)->address : 0;
410 	return 0;
411 }
412 
413 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
414 							struct task_struct *tsk,
415 							unsigned long idx)
416 {
417 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 
419 	if (!bp)
420 		bp = ptrace_hbp_create(note_type, tsk, idx);
421 
422 	return bp;
423 }
424 
425 static int ptrace_hbp_set_ctrl(unsigned int note_type,
426 			       struct task_struct *tsk,
427 			       unsigned long idx,
428 			       u32 uctrl)
429 {
430 	int err;
431 	struct perf_event *bp;
432 	struct perf_event_attr attr;
433 	struct arch_hw_breakpoint_ctrl ctrl;
434 
435 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
436 	if (IS_ERR(bp)) {
437 		err = PTR_ERR(bp);
438 		return err;
439 	}
440 
441 	attr = bp->attr;
442 	decode_ctrl_reg(uctrl, &ctrl);
443 	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
444 	if (err)
445 		return err;
446 
447 	return modify_user_hw_breakpoint(bp, &attr);
448 }
449 
450 static int ptrace_hbp_set_addr(unsigned int note_type,
451 			       struct task_struct *tsk,
452 			       unsigned long idx,
453 			       u64 addr)
454 {
455 	int err;
456 	struct perf_event *bp;
457 	struct perf_event_attr attr;
458 
459 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
460 	if (IS_ERR(bp)) {
461 		err = PTR_ERR(bp);
462 		return err;
463 	}
464 
465 	attr = bp->attr;
466 	attr.bp_addr = addr;
467 	err = modify_user_hw_breakpoint(bp, &attr);
468 	return err;
469 }
470 
471 #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
472 #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
473 #define PTRACE_HBP_PAD_SZ	sizeof(u32)
474 
475 static int hw_break_get(struct task_struct *target,
476 			const struct user_regset *regset,
477 			unsigned int pos, unsigned int count,
478 			void *kbuf, void __user *ubuf)
479 {
480 	unsigned int note_type = regset->core_note_type;
481 	int ret, idx = 0, offset, limit;
482 	u32 info, ctrl;
483 	u64 addr;
484 
485 	/* Resource info */
486 	ret = ptrace_hbp_get_resource_info(note_type, &info);
487 	if (ret)
488 		return ret;
489 
490 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
491 				  sizeof(info));
492 	if (ret)
493 		return ret;
494 
495 	/* Pad */
496 	offset = offsetof(struct user_hwdebug_state, pad);
497 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
498 				       offset + PTRACE_HBP_PAD_SZ);
499 	if (ret)
500 		return ret;
501 
502 	/* (address, ctrl) registers */
503 	offset = offsetof(struct user_hwdebug_state, dbg_regs);
504 	limit = regset->n * regset->size;
505 	while (count && offset < limit) {
506 		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
507 		if (ret)
508 			return ret;
509 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
510 					  offset, offset + PTRACE_HBP_ADDR_SZ);
511 		if (ret)
512 			return ret;
513 		offset += PTRACE_HBP_ADDR_SZ;
514 
515 		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
516 		if (ret)
517 			return ret;
518 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
519 					  offset, offset + PTRACE_HBP_CTRL_SZ);
520 		if (ret)
521 			return ret;
522 		offset += PTRACE_HBP_CTRL_SZ;
523 
524 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
525 					       offset,
526 					       offset + PTRACE_HBP_PAD_SZ);
527 		if (ret)
528 			return ret;
529 		offset += PTRACE_HBP_PAD_SZ;
530 		idx++;
531 	}
532 
533 	return 0;
534 }
535 
536 static int hw_break_set(struct task_struct *target,
537 			const struct user_regset *regset,
538 			unsigned int pos, unsigned int count,
539 			const void *kbuf, const void __user *ubuf)
540 {
541 	unsigned int note_type = regset->core_note_type;
542 	int ret, idx = 0, offset, limit;
543 	u32 ctrl;
544 	u64 addr;
545 
546 	/* Resource info and pad */
547 	offset = offsetof(struct user_hwdebug_state, dbg_regs);
548 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
549 	if (ret)
550 		return ret;
551 
552 	/* (address, ctrl) registers */
553 	limit = regset->n * regset->size;
554 	while (count && offset < limit) {
555 		if (count < PTRACE_HBP_ADDR_SZ)
556 			return -EINVAL;
557 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
558 					 offset, offset + PTRACE_HBP_ADDR_SZ);
559 		if (ret)
560 			return ret;
561 		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
562 		if (ret)
563 			return ret;
564 		offset += PTRACE_HBP_ADDR_SZ;
565 
566 		if (!count)
567 			break;
568 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
569 					 offset, offset + PTRACE_HBP_CTRL_SZ);
570 		if (ret)
571 			return ret;
572 		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
573 		if (ret)
574 			return ret;
575 		offset += PTRACE_HBP_CTRL_SZ;
576 
577 		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
578 						offset,
579 						offset + PTRACE_HBP_PAD_SZ);
580 		if (ret)
581 			return ret;
582 		offset += PTRACE_HBP_PAD_SZ;
583 		idx++;
584 	}
585 
586 	return 0;
587 }
588 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
589 
590 static int gpr_get(struct task_struct *target,
591 		   const struct user_regset *regset,
592 		   unsigned int pos, unsigned int count,
593 		   void *kbuf, void __user *ubuf)
594 {
595 	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
596 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
597 }
598 
599 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
600 		   unsigned int pos, unsigned int count,
601 		   const void *kbuf, const void __user *ubuf)
602 {
603 	int ret;
604 	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
605 
606 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
607 	if (ret)
608 		return ret;
609 
610 	if (!valid_user_regs(&newregs, target))
611 		return -EINVAL;
612 
613 	task_pt_regs(target)->user_regs = newregs;
614 	return 0;
615 }
616 
617 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
618 {
619 	if (!system_supports_fpsimd())
620 		return -ENODEV;
621 	return regset->n;
622 }
623 
624 /*
625  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
626  */
627 static int __fpr_get(struct task_struct *target,
628 		     const struct user_regset *regset,
629 		     unsigned int pos, unsigned int count,
630 		     void *kbuf, void __user *ubuf, unsigned int start_pos)
631 {
632 	struct user_fpsimd_state *uregs;
633 
634 	sve_sync_to_fpsimd(target);
635 
636 	uregs = &target->thread.uw.fpsimd_state;
637 
638 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
639 				   start_pos, start_pos + sizeof(*uregs));
640 }
641 
642 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
643 		   unsigned int pos, unsigned int count,
644 		   void *kbuf, void __user *ubuf)
645 {
646 	if (!system_supports_fpsimd())
647 		return -EINVAL;
648 
649 	if (target == current)
650 		fpsimd_preserve_current_state();
651 
652 	return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
653 }
654 
655 static int __fpr_set(struct task_struct *target,
656 		     const struct user_regset *regset,
657 		     unsigned int pos, unsigned int count,
658 		     const void *kbuf, const void __user *ubuf,
659 		     unsigned int start_pos)
660 {
661 	int ret;
662 	struct user_fpsimd_state newstate;
663 
664 	/*
665 	 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
666 	 * short copyin can't resurrect stale data.
667 	 */
668 	sve_sync_to_fpsimd(target);
669 
670 	newstate = target->thread.uw.fpsimd_state;
671 
672 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
673 				 start_pos, start_pos + sizeof(newstate));
674 	if (ret)
675 		return ret;
676 
677 	target->thread.uw.fpsimd_state = newstate;
678 
679 	return ret;
680 }
681 
682 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
683 		   unsigned int pos, unsigned int count,
684 		   const void *kbuf, const void __user *ubuf)
685 {
686 	int ret;
687 
688 	if (!system_supports_fpsimd())
689 		return -EINVAL;
690 
691 	ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
692 	if (ret)
693 		return ret;
694 
695 	sve_sync_from_fpsimd_zeropad(target);
696 	fpsimd_flush_task_state(target);
697 
698 	return ret;
699 }
700 
701 static int tls_get(struct task_struct *target, const struct user_regset *regset,
702 		   unsigned int pos, unsigned int count,
703 		   void *kbuf, void __user *ubuf)
704 {
705 	unsigned long *tls = &target->thread.uw.tp_value;
706 
707 	if (target == current)
708 		tls_preserve_current_state();
709 
710 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
711 }
712 
713 static int tls_set(struct task_struct *target, const struct user_regset *regset,
714 		   unsigned int pos, unsigned int count,
715 		   const void *kbuf, const void __user *ubuf)
716 {
717 	int ret;
718 	unsigned long tls = target->thread.uw.tp_value;
719 
720 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
721 	if (ret)
722 		return ret;
723 
724 	target->thread.uw.tp_value = tls;
725 	return ret;
726 }
727 
728 static int system_call_get(struct task_struct *target,
729 			   const struct user_regset *regset,
730 			   unsigned int pos, unsigned int count,
731 			   void *kbuf, void __user *ubuf)
732 {
733 	int syscallno = task_pt_regs(target)->syscallno;
734 
735 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
736 				   &syscallno, 0, -1);
737 }
738 
739 static int system_call_set(struct task_struct *target,
740 			   const struct user_regset *regset,
741 			   unsigned int pos, unsigned int count,
742 			   const void *kbuf, const void __user *ubuf)
743 {
744 	int syscallno = task_pt_regs(target)->syscallno;
745 	int ret;
746 
747 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
748 	if (ret)
749 		return ret;
750 
751 	task_pt_regs(target)->syscallno = syscallno;
752 	return ret;
753 }
754 
755 #ifdef CONFIG_ARM64_SVE
756 
757 static void sve_init_header_from_task(struct user_sve_header *header,
758 				      struct task_struct *target)
759 {
760 	unsigned int vq;
761 
762 	memset(header, 0, sizeof(*header));
763 
764 	header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
765 		SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
766 	if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
767 		header->flags |= SVE_PT_VL_INHERIT;
768 
769 	header->vl = target->thread.sve_vl;
770 	vq = sve_vq_from_vl(header->vl);
771 
772 	header->max_vl = sve_max_vl;
773 	header->size = SVE_PT_SIZE(vq, header->flags);
774 	header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
775 				      SVE_PT_REGS_SVE);
776 }
777 
778 static unsigned int sve_size_from_header(struct user_sve_header const *header)
779 {
780 	return ALIGN(header->size, SVE_VQ_BYTES);
781 }
782 
783 static unsigned int sve_get_size(struct task_struct *target,
784 				 const struct user_regset *regset)
785 {
786 	struct user_sve_header header;
787 
788 	if (!system_supports_sve())
789 		return 0;
790 
791 	sve_init_header_from_task(&header, target);
792 	return sve_size_from_header(&header);
793 }
794 
795 static int sve_get(struct task_struct *target,
796 		   const struct user_regset *regset,
797 		   unsigned int pos, unsigned int count,
798 		   void *kbuf, void __user *ubuf)
799 {
800 	int ret;
801 	struct user_sve_header header;
802 	unsigned int vq;
803 	unsigned long start, end;
804 
805 	if (!system_supports_sve())
806 		return -EINVAL;
807 
808 	/* Header */
809 	sve_init_header_from_task(&header, target);
810 	vq = sve_vq_from_vl(header.vl);
811 
812 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
813 				  0, sizeof(header));
814 	if (ret)
815 		return ret;
816 
817 	if (target == current)
818 		fpsimd_preserve_current_state();
819 
820 	/* Registers: FPSIMD-only case */
821 
822 	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
823 	if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
824 		return __fpr_get(target, regset, pos, count, kbuf, ubuf,
825 				 SVE_PT_FPSIMD_OFFSET);
826 
827 	/* Otherwise: full SVE case */
828 
829 	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
830 	start = SVE_PT_SVE_OFFSET;
831 	end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
832 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
833 				  target->thread.sve_state,
834 				  start, end);
835 	if (ret)
836 		return ret;
837 
838 	start = end;
839 	end = SVE_PT_SVE_FPSR_OFFSET(vq);
840 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
841 				       start, end);
842 	if (ret)
843 		return ret;
844 
845 	/*
846 	 * Copy fpsr, and fpcr which must follow contiguously in
847 	 * struct fpsimd_state:
848 	 */
849 	start = end;
850 	end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
851 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
852 				  &target->thread.uw.fpsimd_state.fpsr,
853 				  start, end);
854 	if (ret)
855 		return ret;
856 
857 	start = end;
858 	end = sve_size_from_header(&header);
859 	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
860 					start, end);
861 }
862 
863 static int sve_set(struct task_struct *target,
864 		   const struct user_regset *regset,
865 		   unsigned int pos, unsigned int count,
866 		   const void *kbuf, const void __user *ubuf)
867 {
868 	int ret;
869 	struct user_sve_header header;
870 	unsigned int vq;
871 	unsigned long start, end;
872 
873 	if (!system_supports_sve())
874 		return -EINVAL;
875 
876 	/* Header */
877 	if (count < sizeof(header))
878 		return -EINVAL;
879 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
880 				 0, sizeof(header));
881 	if (ret)
882 		goto out;
883 
884 	/*
885 	 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
886 	 * sve_set_vector_length(), which will also validate them for us:
887 	 */
888 	ret = sve_set_vector_length(target, header.vl,
889 		((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
890 	if (ret)
891 		goto out;
892 
893 	/* Actual VL set may be less than the user asked for: */
894 	vq = sve_vq_from_vl(target->thread.sve_vl);
895 
896 	/* Registers: FPSIMD-only case */
897 
898 	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
899 	if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
900 		ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
901 				SVE_PT_FPSIMD_OFFSET);
902 		clear_tsk_thread_flag(target, TIF_SVE);
903 		goto out;
904 	}
905 
906 	/* Otherwise: full SVE case */
907 
908 	/*
909 	 * If setting a different VL from the requested VL and there is
910 	 * register data, the data layout will be wrong: don't even
911 	 * try to set the registers in this case.
912 	 */
913 	if (count && vq != sve_vq_from_vl(header.vl)) {
914 		ret = -EIO;
915 		goto out;
916 	}
917 
918 	sve_alloc(target);
919 
920 	/*
921 	 * Ensure target->thread.sve_state is up to date with target's
922 	 * FPSIMD regs, so that a short copyin leaves trailing registers
923 	 * unmodified.
924 	 */
925 	fpsimd_sync_to_sve(target);
926 	set_tsk_thread_flag(target, TIF_SVE);
927 
928 	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
929 	start = SVE_PT_SVE_OFFSET;
930 	end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
931 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
932 				 target->thread.sve_state,
933 				 start, end);
934 	if (ret)
935 		goto out;
936 
937 	start = end;
938 	end = SVE_PT_SVE_FPSR_OFFSET(vq);
939 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
940 					start, end);
941 	if (ret)
942 		goto out;
943 
944 	/*
945 	 * Copy fpsr, and fpcr which must follow contiguously in
946 	 * struct fpsimd_state:
947 	 */
948 	start = end;
949 	end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
950 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
951 				 &target->thread.uw.fpsimd_state.fpsr,
952 				 start, end);
953 
954 out:
955 	fpsimd_flush_task_state(target);
956 	return ret;
957 }
958 
959 #endif /* CONFIG_ARM64_SVE */
960 
961 #ifdef CONFIG_ARM64_PTR_AUTH
962 static int pac_mask_get(struct task_struct *target,
963 			const struct user_regset *regset,
964 			unsigned int pos, unsigned int count,
965 			void *kbuf, void __user *ubuf)
966 {
967 	/*
968 	 * The PAC bits can differ across data and instruction pointers
969 	 * depending on TCR_EL1.TBID*, which we may make use of in future, so
970 	 * we expose separate masks.
971 	 */
972 	unsigned long mask = ptrauth_user_pac_mask();
973 	struct user_pac_mask uregs = {
974 		.data_mask = mask,
975 		.insn_mask = mask,
976 	};
977 
978 	if (!system_supports_address_auth())
979 		return -EINVAL;
980 
981 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
982 }
983 
984 #ifdef CONFIG_CHECKPOINT_RESTORE
985 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
986 {
987 	return (__uint128_t)key->hi << 64 | key->lo;
988 }
989 
990 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
991 {
992 	struct ptrauth_key key = {
993 		.lo = (unsigned long)ukey,
994 		.hi = (unsigned long)(ukey >> 64),
995 	};
996 
997 	return key;
998 }
999 
1000 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1001 				     const struct ptrauth_keys_user *keys)
1002 {
1003 	ukeys->apiakey = pac_key_to_user(&keys->apia);
1004 	ukeys->apibkey = pac_key_to_user(&keys->apib);
1005 	ukeys->apdakey = pac_key_to_user(&keys->apda);
1006 	ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1007 }
1008 
1009 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1010 				       const struct user_pac_address_keys *ukeys)
1011 {
1012 	keys->apia = pac_key_from_user(ukeys->apiakey);
1013 	keys->apib = pac_key_from_user(ukeys->apibkey);
1014 	keys->apda = pac_key_from_user(ukeys->apdakey);
1015 	keys->apdb = pac_key_from_user(ukeys->apdbkey);
1016 }
1017 
1018 static int pac_address_keys_get(struct task_struct *target,
1019 				const struct user_regset *regset,
1020 				unsigned int pos, unsigned int count,
1021 				void *kbuf, void __user *ubuf)
1022 {
1023 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1024 	struct user_pac_address_keys user_keys;
1025 
1026 	if (!system_supports_address_auth())
1027 		return -EINVAL;
1028 
1029 	pac_address_keys_to_user(&user_keys, keys);
1030 
1031 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1032 				   &user_keys, 0, -1);
1033 }
1034 
1035 static int pac_address_keys_set(struct task_struct *target,
1036 				const struct user_regset *regset,
1037 				unsigned int pos, unsigned int count,
1038 				const void *kbuf, const void __user *ubuf)
1039 {
1040 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1041 	struct user_pac_address_keys user_keys;
1042 	int ret;
1043 
1044 	if (!system_supports_address_auth())
1045 		return -EINVAL;
1046 
1047 	pac_address_keys_to_user(&user_keys, keys);
1048 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1049 				 &user_keys, 0, -1);
1050 	if (ret)
1051 		return ret;
1052 	pac_address_keys_from_user(keys, &user_keys);
1053 
1054 	return 0;
1055 }
1056 
1057 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1058 				     const struct ptrauth_keys_user *keys)
1059 {
1060 	ukeys->apgakey = pac_key_to_user(&keys->apga);
1061 }
1062 
1063 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1064 				       const struct user_pac_generic_keys *ukeys)
1065 {
1066 	keys->apga = pac_key_from_user(ukeys->apgakey);
1067 }
1068 
1069 static int pac_generic_keys_get(struct task_struct *target,
1070 				const struct user_regset *regset,
1071 				unsigned int pos, unsigned int count,
1072 				void *kbuf, void __user *ubuf)
1073 {
1074 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1075 	struct user_pac_generic_keys user_keys;
1076 
1077 	if (!system_supports_generic_auth())
1078 		return -EINVAL;
1079 
1080 	pac_generic_keys_to_user(&user_keys, keys);
1081 
1082 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1083 				   &user_keys, 0, -1);
1084 }
1085 
1086 static int pac_generic_keys_set(struct task_struct *target,
1087 				const struct user_regset *regset,
1088 				unsigned int pos, unsigned int count,
1089 				const void *kbuf, const void __user *ubuf)
1090 {
1091 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1092 	struct user_pac_generic_keys user_keys;
1093 	int ret;
1094 
1095 	if (!system_supports_generic_auth())
1096 		return -EINVAL;
1097 
1098 	pac_generic_keys_to_user(&user_keys, keys);
1099 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1100 				 &user_keys, 0, -1);
1101 	if (ret)
1102 		return ret;
1103 	pac_generic_keys_from_user(keys, &user_keys);
1104 
1105 	return 0;
1106 }
1107 #endif /* CONFIG_CHECKPOINT_RESTORE */
1108 #endif /* CONFIG_ARM64_PTR_AUTH */
1109 
1110 enum aarch64_regset {
1111 	REGSET_GPR,
1112 	REGSET_FPR,
1113 	REGSET_TLS,
1114 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1115 	REGSET_HW_BREAK,
1116 	REGSET_HW_WATCH,
1117 #endif
1118 	REGSET_SYSTEM_CALL,
1119 #ifdef CONFIG_ARM64_SVE
1120 	REGSET_SVE,
1121 #endif
1122 #ifdef CONFIG_ARM64_PTR_AUTH
1123 	REGSET_PAC_MASK,
1124 #ifdef CONFIG_CHECKPOINT_RESTORE
1125 	REGSET_PACA_KEYS,
1126 	REGSET_PACG_KEYS,
1127 #endif
1128 #endif
1129 };
1130 
1131 static const struct user_regset aarch64_regsets[] = {
1132 	[REGSET_GPR] = {
1133 		.core_note_type = NT_PRSTATUS,
1134 		.n = sizeof(struct user_pt_regs) / sizeof(u64),
1135 		.size = sizeof(u64),
1136 		.align = sizeof(u64),
1137 		.get = gpr_get,
1138 		.set = gpr_set
1139 	},
1140 	[REGSET_FPR] = {
1141 		.core_note_type = NT_PRFPREG,
1142 		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1143 		/*
1144 		 * We pretend we have 32-bit registers because the fpsr and
1145 		 * fpcr are 32-bits wide.
1146 		 */
1147 		.size = sizeof(u32),
1148 		.align = sizeof(u32),
1149 		.active = fpr_active,
1150 		.get = fpr_get,
1151 		.set = fpr_set
1152 	},
1153 	[REGSET_TLS] = {
1154 		.core_note_type = NT_ARM_TLS,
1155 		.n = 1,
1156 		.size = sizeof(void *),
1157 		.align = sizeof(void *),
1158 		.get = tls_get,
1159 		.set = tls_set,
1160 	},
1161 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1162 	[REGSET_HW_BREAK] = {
1163 		.core_note_type = NT_ARM_HW_BREAK,
1164 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1165 		.size = sizeof(u32),
1166 		.align = sizeof(u32),
1167 		.get = hw_break_get,
1168 		.set = hw_break_set,
1169 	},
1170 	[REGSET_HW_WATCH] = {
1171 		.core_note_type = NT_ARM_HW_WATCH,
1172 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1173 		.size = sizeof(u32),
1174 		.align = sizeof(u32),
1175 		.get = hw_break_get,
1176 		.set = hw_break_set,
1177 	},
1178 #endif
1179 	[REGSET_SYSTEM_CALL] = {
1180 		.core_note_type = NT_ARM_SYSTEM_CALL,
1181 		.n = 1,
1182 		.size = sizeof(int),
1183 		.align = sizeof(int),
1184 		.get = system_call_get,
1185 		.set = system_call_set,
1186 	},
1187 #ifdef CONFIG_ARM64_SVE
1188 	[REGSET_SVE] = { /* Scalable Vector Extension */
1189 		.core_note_type = NT_ARM_SVE,
1190 		.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1191 				  SVE_VQ_BYTES),
1192 		.size = SVE_VQ_BYTES,
1193 		.align = SVE_VQ_BYTES,
1194 		.get = sve_get,
1195 		.set = sve_set,
1196 		.get_size = sve_get_size,
1197 	},
1198 #endif
1199 #ifdef CONFIG_ARM64_PTR_AUTH
1200 	[REGSET_PAC_MASK] = {
1201 		.core_note_type = NT_ARM_PAC_MASK,
1202 		.n = sizeof(struct user_pac_mask) / sizeof(u64),
1203 		.size = sizeof(u64),
1204 		.align = sizeof(u64),
1205 		.get = pac_mask_get,
1206 		/* this cannot be set dynamically */
1207 	},
1208 #ifdef CONFIG_CHECKPOINT_RESTORE
1209 	[REGSET_PACA_KEYS] = {
1210 		.core_note_type = NT_ARM_PACA_KEYS,
1211 		.n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1212 		.size = sizeof(__uint128_t),
1213 		.align = sizeof(__uint128_t),
1214 		.get = pac_address_keys_get,
1215 		.set = pac_address_keys_set,
1216 	},
1217 	[REGSET_PACG_KEYS] = {
1218 		.core_note_type = NT_ARM_PACG_KEYS,
1219 		.n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1220 		.size = sizeof(__uint128_t),
1221 		.align = sizeof(__uint128_t),
1222 		.get = pac_generic_keys_get,
1223 		.set = pac_generic_keys_set,
1224 	},
1225 #endif
1226 #endif
1227 };
1228 
1229 static const struct user_regset_view user_aarch64_view = {
1230 	.name = "aarch64", .e_machine = EM_AARCH64,
1231 	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1232 };
1233 
1234 #ifdef CONFIG_COMPAT
1235 enum compat_regset {
1236 	REGSET_COMPAT_GPR,
1237 	REGSET_COMPAT_VFP,
1238 };
1239 
1240 static int compat_gpr_get(struct task_struct *target,
1241 			  const struct user_regset *regset,
1242 			  unsigned int pos, unsigned int count,
1243 			  void *kbuf, void __user *ubuf)
1244 {
1245 	int ret = 0;
1246 	unsigned int i, start, num_regs;
1247 
1248 	/* Calculate the number of AArch32 registers contained in count */
1249 	num_regs = count / regset->size;
1250 
1251 	/* Convert pos into an register number */
1252 	start = pos / regset->size;
1253 
1254 	if (start + num_regs > regset->n)
1255 		return -EIO;
1256 
1257 	for (i = 0; i < num_regs; ++i) {
1258 		unsigned int idx = start + i;
1259 		compat_ulong_t reg;
1260 
1261 		switch (idx) {
1262 		case 15:
1263 			reg = task_pt_regs(target)->pc;
1264 			break;
1265 		case 16:
1266 			reg = task_pt_regs(target)->pstate;
1267 			reg = pstate_to_compat_psr(reg);
1268 			break;
1269 		case 17:
1270 			reg = task_pt_regs(target)->orig_x0;
1271 			break;
1272 		default:
1273 			reg = task_pt_regs(target)->regs[idx];
1274 		}
1275 
1276 		if (kbuf) {
1277 			memcpy(kbuf, &reg, sizeof(reg));
1278 			kbuf += sizeof(reg);
1279 		} else {
1280 			ret = copy_to_user(ubuf, &reg, sizeof(reg));
1281 			if (ret) {
1282 				ret = -EFAULT;
1283 				break;
1284 			}
1285 
1286 			ubuf += sizeof(reg);
1287 		}
1288 	}
1289 
1290 	return ret;
1291 }
1292 
1293 static int compat_gpr_set(struct task_struct *target,
1294 			  const struct user_regset *regset,
1295 			  unsigned int pos, unsigned int count,
1296 			  const void *kbuf, const void __user *ubuf)
1297 {
1298 	struct pt_regs newregs;
1299 	int ret = 0;
1300 	unsigned int i, start, num_regs;
1301 
1302 	/* Calculate the number of AArch32 registers contained in count */
1303 	num_regs = count / regset->size;
1304 
1305 	/* Convert pos into an register number */
1306 	start = pos / regset->size;
1307 
1308 	if (start + num_regs > regset->n)
1309 		return -EIO;
1310 
1311 	newregs = *task_pt_regs(target);
1312 
1313 	for (i = 0; i < num_regs; ++i) {
1314 		unsigned int idx = start + i;
1315 		compat_ulong_t reg;
1316 
1317 		if (kbuf) {
1318 			memcpy(&reg, kbuf, sizeof(reg));
1319 			kbuf += sizeof(reg);
1320 		} else {
1321 			ret = copy_from_user(&reg, ubuf, sizeof(reg));
1322 			if (ret) {
1323 				ret = -EFAULT;
1324 				break;
1325 			}
1326 
1327 			ubuf += sizeof(reg);
1328 		}
1329 
1330 		switch (idx) {
1331 		case 15:
1332 			newregs.pc = reg;
1333 			break;
1334 		case 16:
1335 			reg = compat_psr_to_pstate(reg);
1336 			newregs.pstate = reg;
1337 			break;
1338 		case 17:
1339 			newregs.orig_x0 = reg;
1340 			break;
1341 		default:
1342 			newregs.regs[idx] = reg;
1343 		}
1344 
1345 	}
1346 
1347 	if (valid_user_regs(&newregs.user_regs, target))
1348 		*task_pt_regs(target) = newregs;
1349 	else
1350 		ret = -EINVAL;
1351 
1352 	return ret;
1353 }
1354 
1355 static int compat_vfp_get(struct task_struct *target,
1356 			  const struct user_regset *regset,
1357 			  unsigned int pos, unsigned int count,
1358 			  void *kbuf, void __user *ubuf)
1359 {
1360 	struct user_fpsimd_state *uregs;
1361 	compat_ulong_t fpscr;
1362 	int ret, vregs_end_pos;
1363 
1364 	if (!system_supports_fpsimd())
1365 		return -EINVAL;
1366 
1367 	uregs = &target->thread.uw.fpsimd_state;
1368 
1369 	if (target == current)
1370 		fpsimd_preserve_current_state();
1371 
1372 	/*
1373 	 * The VFP registers are packed into the fpsimd_state, so they all sit
1374 	 * nicely together for us. We just need to create the fpscr separately.
1375 	 */
1376 	vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1377 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1378 				  0, vregs_end_pos);
1379 
1380 	if (count && !ret) {
1381 		fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1382 			(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1383 
1384 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1385 					  vregs_end_pos, VFP_STATE_SIZE);
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 static int compat_vfp_set(struct task_struct *target,
1392 			  const struct user_regset *regset,
1393 			  unsigned int pos, unsigned int count,
1394 			  const void *kbuf, const void __user *ubuf)
1395 {
1396 	struct user_fpsimd_state *uregs;
1397 	compat_ulong_t fpscr;
1398 	int ret, vregs_end_pos;
1399 
1400 	if (!system_supports_fpsimd())
1401 		return -EINVAL;
1402 
1403 	uregs = &target->thread.uw.fpsimd_state;
1404 
1405 	vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1406 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1407 				 vregs_end_pos);
1408 
1409 	if (count && !ret) {
1410 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1411 					 vregs_end_pos, VFP_STATE_SIZE);
1412 		if (!ret) {
1413 			uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1414 			uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1415 		}
1416 	}
1417 
1418 	fpsimd_flush_task_state(target);
1419 	return ret;
1420 }
1421 
1422 static int compat_tls_get(struct task_struct *target,
1423 			  const struct user_regset *regset, unsigned int pos,
1424 			  unsigned int count, void *kbuf, void __user *ubuf)
1425 {
1426 	compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
1427 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1428 }
1429 
1430 static int compat_tls_set(struct task_struct *target,
1431 			  const struct user_regset *regset, unsigned int pos,
1432 			  unsigned int count, const void *kbuf,
1433 			  const void __user *ubuf)
1434 {
1435 	int ret;
1436 	compat_ulong_t tls = target->thread.uw.tp_value;
1437 
1438 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1439 	if (ret)
1440 		return ret;
1441 
1442 	target->thread.uw.tp_value = tls;
1443 	return ret;
1444 }
1445 
1446 static const struct user_regset aarch32_regsets[] = {
1447 	[REGSET_COMPAT_GPR] = {
1448 		.core_note_type = NT_PRSTATUS,
1449 		.n = COMPAT_ELF_NGREG,
1450 		.size = sizeof(compat_elf_greg_t),
1451 		.align = sizeof(compat_elf_greg_t),
1452 		.get = compat_gpr_get,
1453 		.set = compat_gpr_set
1454 	},
1455 	[REGSET_COMPAT_VFP] = {
1456 		.core_note_type = NT_ARM_VFP,
1457 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1458 		.size = sizeof(compat_ulong_t),
1459 		.align = sizeof(compat_ulong_t),
1460 		.active = fpr_active,
1461 		.get = compat_vfp_get,
1462 		.set = compat_vfp_set
1463 	},
1464 };
1465 
1466 static const struct user_regset_view user_aarch32_view = {
1467 	.name = "aarch32", .e_machine = EM_ARM,
1468 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1469 };
1470 
1471 static const struct user_regset aarch32_ptrace_regsets[] = {
1472 	[REGSET_GPR] = {
1473 		.core_note_type = NT_PRSTATUS,
1474 		.n = COMPAT_ELF_NGREG,
1475 		.size = sizeof(compat_elf_greg_t),
1476 		.align = sizeof(compat_elf_greg_t),
1477 		.get = compat_gpr_get,
1478 		.set = compat_gpr_set
1479 	},
1480 	[REGSET_FPR] = {
1481 		.core_note_type = NT_ARM_VFP,
1482 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1483 		.size = sizeof(compat_ulong_t),
1484 		.align = sizeof(compat_ulong_t),
1485 		.get = compat_vfp_get,
1486 		.set = compat_vfp_set
1487 	},
1488 	[REGSET_TLS] = {
1489 		.core_note_type = NT_ARM_TLS,
1490 		.n = 1,
1491 		.size = sizeof(compat_ulong_t),
1492 		.align = sizeof(compat_ulong_t),
1493 		.get = compat_tls_get,
1494 		.set = compat_tls_set,
1495 	},
1496 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1497 	[REGSET_HW_BREAK] = {
1498 		.core_note_type = NT_ARM_HW_BREAK,
1499 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1500 		.size = sizeof(u32),
1501 		.align = sizeof(u32),
1502 		.get = hw_break_get,
1503 		.set = hw_break_set,
1504 	},
1505 	[REGSET_HW_WATCH] = {
1506 		.core_note_type = NT_ARM_HW_WATCH,
1507 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1508 		.size = sizeof(u32),
1509 		.align = sizeof(u32),
1510 		.get = hw_break_get,
1511 		.set = hw_break_set,
1512 	},
1513 #endif
1514 	[REGSET_SYSTEM_CALL] = {
1515 		.core_note_type = NT_ARM_SYSTEM_CALL,
1516 		.n = 1,
1517 		.size = sizeof(int),
1518 		.align = sizeof(int),
1519 		.get = system_call_get,
1520 		.set = system_call_set,
1521 	},
1522 };
1523 
1524 static const struct user_regset_view user_aarch32_ptrace_view = {
1525 	.name = "aarch32", .e_machine = EM_ARM,
1526 	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1527 };
1528 
1529 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1530 				   compat_ulong_t __user *ret)
1531 {
1532 	compat_ulong_t tmp;
1533 
1534 	if (off & 3)
1535 		return -EIO;
1536 
1537 	if (off == COMPAT_PT_TEXT_ADDR)
1538 		tmp = tsk->mm->start_code;
1539 	else if (off == COMPAT_PT_DATA_ADDR)
1540 		tmp = tsk->mm->start_data;
1541 	else if (off == COMPAT_PT_TEXT_END_ADDR)
1542 		tmp = tsk->mm->end_code;
1543 	else if (off < sizeof(compat_elf_gregset_t))
1544 		return copy_regset_to_user(tsk, &user_aarch32_view,
1545 					   REGSET_COMPAT_GPR, off,
1546 					   sizeof(compat_ulong_t), ret);
1547 	else if (off >= COMPAT_USER_SZ)
1548 		return -EIO;
1549 	else
1550 		tmp = 0;
1551 
1552 	return put_user(tmp, ret);
1553 }
1554 
1555 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1556 				    compat_ulong_t val)
1557 {
1558 	int ret;
1559 	mm_segment_t old_fs = get_fs();
1560 
1561 	if (off & 3 || off >= COMPAT_USER_SZ)
1562 		return -EIO;
1563 
1564 	if (off >= sizeof(compat_elf_gregset_t))
1565 		return 0;
1566 
1567 	set_fs(KERNEL_DS);
1568 	ret = copy_regset_from_user(tsk, &user_aarch32_view,
1569 				    REGSET_COMPAT_GPR, off,
1570 				    sizeof(compat_ulong_t),
1571 				    &val);
1572 	set_fs(old_fs);
1573 
1574 	return ret;
1575 }
1576 
1577 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1578 
1579 /*
1580  * Convert a virtual register number into an index for a thread_info
1581  * breakpoint array. Breakpoints are identified using positive numbers
1582  * whilst watchpoints are negative. The registers are laid out as pairs
1583  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1584  * Register 0 is reserved for describing resource information.
1585  */
1586 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1587 {
1588 	return (abs(num) - 1) >> 1;
1589 }
1590 
1591 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1592 {
1593 	u8 num_brps, num_wrps, debug_arch, wp_len;
1594 	u32 reg = 0;
1595 
1596 	num_brps	= hw_breakpoint_slots(TYPE_INST);
1597 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
1598 
1599 	debug_arch	= debug_monitors_arch();
1600 	wp_len		= 8;
1601 	reg		|= debug_arch;
1602 	reg		<<= 8;
1603 	reg		|= wp_len;
1604 	reg		<<= 8;
1605 	reg		|= num_wrps;
1606 	reg		<<= 8;
1607 	reg		|= num_brps;
1608 
1609 	*kdata = reg;
1610 	return 0;
1611 }
1612 
1613 static int compat_ptrace_hbp_get(unsigned int note_type,
1614 				 struct task_struct *tsk,
1615 				 compat_long_t num,
1616 				 u32 *kdata)
1617 {
1618 	u64 addr = 0;
1619 	u32 ctrl = 0;
1620 
1621 	int err, idx = compat_ptrace_hbp_num_to_idx(num);
1622 
1623 	if (num & 1) {
1624 		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1625 		*kdata = (u32)addr;
1626 	} else {
1627 		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1628 		*kdata = ctrl;
1629 	}
1630 
1631 	return err;
1632 }
1633 
1634 static int compat_ptrace_hbp_set(unsigned int note_type,
1635 				 struct task_struct *tsk,
1636 				 compat_long_t num,
1637 				 u32 *kdata)
1638 {
1639 	u64 addr;
1640 	u32 ctrl;
1641 
1642 	int err, idx = compat_ptrace_hbp_num_to_idx(num);
1643 
1644 	if (num & 1) {
1645 		addr = *kdata;
1646 		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1647 	} else {
1648 		ctrl = *kdata;
1649 		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1650 	}
1651 
1652 	return err;
1653 }
1654 
1655 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1656 				    compat_ulong_t __user *data)
1657 {
1658 	int ret;
1659 	u32 kdata;
1660 
1661 	/* Watchpoint */
1662 	if (num < 0) {
1663 		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1664 	/* Resource info */
1665 	} else if (num == 0) {
1666 		ret = compat_ptrace_hbp_get_resource_info(&kdata);
1667 	/* Breakpoint */
1668 	} else {
1669 		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1670 	}
1671 
1672 	if (!ret)
1673 		ret = put_user(kdata, data);
1674 
1675 	return ret;
1676 }
1677 
1678 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1679 				    compat_ulong_t __user *data)
1680 {
1681 	int ret;
1682 	u32 kdata = 0;
1683 
1684 	if (num == 0)
1685 		return 0;
1686 
1687 	ret = get_user(kdata, data);
1688 	if (ret)
1689 		return ret;
1690 
1691 	if (num < 0)
1692 		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1693 	else
1694 		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1695 
1696 	return ret;
1697 }
1698 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
1699 
1700 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1701 			compat_ulong_t caddr, compat_ulong_t cdata)
1702 {
1703 	unsigned long addr = caddr;
1704 	unsigned long data = cdata;
1705 	void __user *datap = compat_ptr(data);
1706 	int ret;
1707 
1708 	switch (request) {
1709 		case PTRACE_PEEKUSR:
1710 			ret = compat_ptrace_read_user(child, addr, datap);
1711 			break;
1712 
1713 		case PTRACE_POKEUSR:
1714 			ret = compat_ptrace_write_user(child, addr, data);
1715 			break;
1716 
1717 		case COMPAT_PTRACE_GETREGS:
1718 			ret = copy_regset_to_user(child,
1719 						  &user_aarch32_view,
1720 						  REGSET_COMPAT_GPR,
1721 						  0, sizeof(compat_elf_gregset_t),
1722 						  datap);
1723 			break;
1724 
1725 		case COMPAT_PTRACE_SETREGS:
1726 			ret = copy_regset_from_user(child,
1727 						    &user_aarch32_view,
1728 						    REGSET_COMPAT_GPR,
1729 						    0, sizeof(compat_elf_gregset_t),
1730 						    datap);
1731 			break;
1732 
1733 		case COMPAT_PTRACE_GET_THREAD_AREA:
1734 			ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1735 				       (compat_ulong_t __user *)datap);
1736 			break;
1737 
1738 		case COMPAT_PTRACE_SET_SYSCALL:
1739 			task_pt_regs(child)->syscallno = data;
1740 			ret = 0;
1741 			break;
1742 
1743 		case COMPAT_PTRACE_GETVFPREGS:
1744 			ret = copy_regset_to_user(child,
1745 						  &user_aarch32_view,
1746 						  REGSET_COMPAT_VFP,
1747 						  0, VFP_STATE_SIZE,
1748 						  datap);
1749 			break;
1750 
1751 		case COMPAT_PTRACE_SETVFPREGS:
1752 			ret = copy_regset_from_user(child,
1753 						    &user_aarch32_view,
1754 						    REGSET_COMPAT_VFP,
1755 						    0, VFP_STATE_SIZE,
1756 						    datap);
1757 			break;
1758 
1759 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1760 		case COMPAT_PTRACE_GETHBPREGS:
1761 			ret = compat_ptrace_gethbpregs(child, addr, datap);
1762 			break;
1763 
1764 		case COMPAT_PTRACE_SETHBPREGS:
1765 			ret = compat_ptrace_sethbpregs(child, addr, datap);
1766 			break;
1767 #endif
1768 
1769 		default:
1770 			ret = compat_ptrace_request(child, request, addr,
1771 						    data);
1772 			break;
1773 	}
1774 
1775 	return ret;
1776 }
1777 #endif /* CONFIG_COMPAT */
1778 
1779 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1780 {
1781 #ifdef CONFIG_COMPAT
1782 	/*
1783 	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1784 	 * user_aarch32_view compatible with arm32. Native ptrace requests on
1785 	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1786 	 * access to the TLS register.
1787 	 */
1788 	if (is_compat_task())
1789 		return &user_aarch32_view;
1790 	else if (is_compat_thread(task_thread_info(task)))
1791 		return &user_aarch32_ptrace_view;
1792 #endif
1793 	return &user_aarch64_view;
1794 }
1795 
1796 long arch_ptrace(struct task_struct *child, long request,
1797 		 unsigned long addr, unsigned long data)
1798 {
1799 	return ptrace_request(child, request, addr, data);
1800 }
1801 
1802 enum ptrace_syscall_dir {
1803 	PTRACE_SYSCALL_ENTER = 0,
1804 	PTRACE_SYSCALL_EXIT,
1805 };
1806 
1807 static void tracehook_report_syscall(struct pt_regs *regs,
1808 				     enum ptrace_syscall_dir dir)
1809 {
1810 	int regno;
1811 	unsigned long saved_reg;
1812 
1813 	/*
1814 	 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1815 	 * used to denote syscall entry/exit:
1816 	 */
1817 	regno = (is_compat_task() ? 12 : 7);
1818 	saved_reg = regs->regs[regno];
1819 	regs->regs[regno] = dir;
1820 
1821 	if (dir == PTRACE_SYSCALL_EXIT)
1822 		tracehook_report_syscall_exit(regs, 0);
1823 	else if (tracehook_report_syscall_entry(regs))
1824 		forget_syscall(regs);
1825 
1826 	regs->regs[regno] = saved_reg;
1827 }
1828 
1829 int syscall_trace_enter(struct pt_regs *regs)
1830 {
1831 	unsigned long flags = READ_ONCE(current_thread_info()->flags);
1832 
1833 	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
1834 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1835 		if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
1836 			return -1;
1837 	}
1838 
1839 	/* Do the secure computing after ptrace; failures should be fast. */
1840 	if (secure_computing() == -1)
1841 		return -1;
1842 
1843 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1844 		trace_sys_enter(regs, regs->syscallno);
1845 
1846 	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1847 			    regs->regs[2], regs->regs[3]);
1848 
1849 	return regs->syscallno;
1850 }
1851 
1852 void syscall_trace_exit(struct pt_regs *regs)
1853 {
1854 	audit_syscall_exit(regs);
1855 
1856 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1857 		trace_sys_exit(regs, regs_return_value(regs));
1858 
1859 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1860 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1861 
1862 	rseq_syscall(regs);
1863 }
1864 
1865 /*
1866  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1867  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1868  * not described in ARM DDI 0487D.a.
1869  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1870  * be allocated an EL0 meaning in future.
1871  * Userspace cannot use these until they have an architectural meaning.
1872  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1873  * We also reserve IL for the kernel; SS is handled dynamically.
1874  */
1875 #define SPSR_EL1_AARCH64_RES0_BITS \
1876 	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1877 	 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
1878 #define SPSR_EL1_AARCH32_RES0_BITS \
1879 	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1880 
1881 static int valid_compat_regs(struct user_pt_regs *regs)
1882 {
1883 	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1884 
1885 	if (!system_supports_mixed_endian_el0()) {
1886 		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1887 			regs->pstate |= PSR_AA32_E_BIT;
1888 		else
1889 			regs->pstate &= ~PSR_AA32_E_BIT;
1890 	}
1891 
1892 	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1893 	    (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1894 	    (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1895 	    (regs->pstate & PSR_AA32_F_BIT) == 0) {
1896 		return 1;
1897 	}
1898 
1899 	/*
1900 	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1901 	 * arch/arm.
1902 	 */
1903 	regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1904 			PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1905 			PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1906 			PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1907 			PSR_AA32_T_BIT;
1908 	regs->pstate |= PSR_MODE32_BIT;
1909 
1910 	return 0;
1911 }
1912 
1913 static int valid_native_regs(struct user_pt_regs *regs)
1914 {
1915 	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1916 
1917 	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1918 	    (regs->pstate & PSR_D_BIT) == 0 &&
1919 	    (regs->pstate & PSR_A_BIT) == 0 &&
1920 	    (regs->pstate & PSR_I_BIT) == 0 &&
1921 	    (regs->pstate & PSR_F_BIT) == 0) {
1922 		return 1;
1923 	}
1924 
1925 	/* Force PSR to a valid 64-bit EL0t */
1926 	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1927 
1928 	return 0;
1929 }
1930 
1931 /*
1932  * Are the current registers suitable for user mode? (used to maintain
1933  * security in signal handlers)
1934  */
1935 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1936 {
1937 	if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1938 		regs->pstate &= ~DBG_SPSR_SS;
1939 
1940 	if (is_compat_thread(task_thread_info(task)))
1941 		return valid_compat_regs(regs);
1942 	else
1943 		return valid_native_regs(regs);
1944 }
1945