xref: /openbmc/linux/arch/arm64/kernel/ptrace.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Based on arch/arm/kernel/ptrace.c
4   *
5   * By Ross Biro 1/23/92
6   * edited by Linus Torvalds
7   * ARM modifications Copyright (C) 2000 Russell King
8   * Copyright (C) 2012 ARM Ltd.
9   */
10  
11  #include <linux/audit.h>
12  #include <linux/compat.h>
13  #include <linux/kernel.h>
14  #include <linux/sched/signal.h>
15  #include <linux/sched/task_stack.h>
16  #include <linux/mm.h>
17  #include <linux/nospec.h>
18  #include <linux/smp.h>
19  #include <linux/ptrace.h>
20  #include <linux/user.h>
21  #include <linux/seccomp.h>
22  #include <linux/security.h>
23  #include <linux/init.h>
24  #include <linux/signal.h>
25  #include <linux/string.h>
26  #include <linux/uaccess.h>
27  #include <linux/perf_event.h>
28  #include <linux/hw_breakpoint.h>
29  #include <linux/regset.h>
30  #include <linux/elf.h>
31  
32  #include <asm/compat.h>
33  #include <asm/cpufeature.h>
34  #include <asm/debug-monitors.h>
35  #include <asm/fpsimd.h>
36  #include <asm/mte.h>
37  #include <asm/pointer_auth.h>
38  #include <asm/stacktrace.h>
39  #include <asm/syscall.h>
40  #include <asm/traps.h>
41  #include <asm/system_misc.h>
42  
43  #define CREATE_TRACE_POINTS
44  #include <trace/events/syscalls.h>
45  
46  struct pt_regs_offset {
47  	const char *name;
48  	int offset;
49  };
50  
51  #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
52  #define REG_OFFSET_END {.name = NULL, .offset = 0}
53  #define GPR_OFFSET_NAME(r) \
54  	{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
55  
56  static const struct pt_regs_offset regoffset_table[] = {
57  	GPR_OFFSET_NAME(0),
58  	GPR_OFFSET_NAME(1),
59  	GPR_OFFSET_NAME(2),
60  	GPR_OFFSET_NAME(3),
61  	GPR_OFFSET_NAME(4),
62  	GPR_OFFSET_NAME(5),
63  	GPR_OFFSET_NAME(6),
64  	GPR_OFFSET_NAME(7),
65  	GPR_OFFSET_NAME(8),
66  	GPR_OFFSET_NAME(9),
67  	GPR_OFFSET_NAME(10),
68  	GPR_OFFSET_NAME(11),
69  	GPR_OFFSET_NAME(12),
70  	GPR_OFFSET_NAME(13),
71  	GPR_OFFSET_NAME(14),
72  	GPR_OFFSET_NAME(15),
73  	GPR_OFFSET_NAME(16),
74  	GPR_OFFSET_NAME(17),
75  	GPR_OFFSET_NAME(18),
76  	GPR_OFFSET_NAME(19),
77  	GPR_OFFSET_NAME(20),
78  	GPR_OFFSET_NAME(21),
79  	GPR_OFFSET_NAME(22),
80  	GPR_OFFSET_NAME(23),
81  	GPR_OFFSET_NAME(24),
82  	GPR_OFFSET_NAME(25),
83  	GPR_OFFSET_NAME(26),
84  	GPR_OFFSET_NAME(27),
85  	GPR_OFFSET_NAME(28),
86  	GPR_OFFSET_NAME(29),
87  	GPR_OFFSET_NAME(30),
88  	{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
89  	REG_OFFSET_NAME(sp),
90  	REG_OFFSET_NAME(pc),
91  	REG_OFFSET_NAME(pstate),
92  	REG_OFFSET_END,
93  };
94  
95  /**
96   * regs_query_register_offset() - query register offset from its name
97   * @name:	the name of a register
98   *
99   * regs_query_register_offset() returns the offset of a register in struct
100   * pt_regs from its name. If the name is invalid, this returns -EINVAL;
101   */
regs_query_register_offset(const char * name)102  int regs_query_register_offset(const char *name)
103  {
104  	const struct pt_regs_offset *roff;
105  
106  	for (roff = regoffset_table; roff->name != NULL; roff++)
107  		if (!strcmp(roff->name, name))
108  			return roff->offset;
109  	return -EINVAL;
110  }
111  
112  /**
113   * regs_within_kernel_stack() - check the address in the stack
114   * @regs:      pt_regs which contains kernel stack pointer.
115   * @addr:      address which is checked.
116   *
117   * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
118   * If @addr is within the kernel stack, it returns true. If not, returns false.
119   */
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)120  static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
121  {
122  	return ((addr & ~(THREAD_SIZE - 1))  ==
123  		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
124  		on_irq_stack(addr, sizeof(unsigned long));
125  }
126  
127  /**
128   * regs_get_kernel_stack_nth() - get Nth entry of the stack
129   * @regs:	pt_regs which contains kernel stack pointer.
130   * @n:		stack entry number.
131   *
132   * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
133   * is specified by @regs. If the @n th entry is NOT in the kernel stack,
134   * this returns 0.
135   */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)136  unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
137  {
138  	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
139  
140  	addr += n;
141  	if (regs_within_kernel_stack(regs, (unsigned long)addr))
142  		return *addr;
143  	else
144  		return 0;
145  }
146  
147  /*
148   * TODO: does not yet catch signals sent when the child dies.
149   * in exit.c or in signal.c.
150   */
151  
152  /*
153   * Called by kernel/ptrace.c when detaching..
154   */
ptrace_disable(struct task_struct * child)155  void ptrace_disable(struct task_struct *child)
156  {
157  	/*
158  	 * This would be better off in core code, but PTRACE_DETACH has
159  	 * grown its fair share of arch-specific worts and changing it
160  	 * is likely to cause regressions on obscure architectures.
161  	 */
162  	user_disable_single_step(child);
163  }
164  
165  #ifdef CONFIG_HAVE_HW_BREAKPOINT
166  /*
167   * Handle hitting a HW-breakpoint.
168   */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)169  static void ptrace_hbptriggered(struct perf_event *bp,
170  				struct perf_sample_data *data,
171  				struct pt_regs *regs)
172  {
173  	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
174  	const char *desc = "Hardware breakpoint trap (ptrace)";
175  
176  #ifdef CONFIG_COMPAT
177  	if (is_compat_task()) {
178  		int si_errno = 0;
179  		int i;
180  
181  		for (i = 0; i < ARM_MAX_BRP; ++i) {
182  			if (current->thread.debug.hbp_break[i] == bp) {
183  				si_errno = (i << 1) + 1;
184  				break;
185  			}
186  		}
187  
188  		for (i = 0; i < ARM_MAX_WRP; ++i) {
189  			if (current->thread.debug.hbp_watch[i] == bp) {
190  				si_errno = -((i << 1) + 1);
191  				break;
192  			}
193  		}
194  		arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
195  						  desc);
196  		return;
197  	}
198  #endif
199  	arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
200  }
201  
202  /*
203   * Unregister breakpoints from this task and reset the pointers in
204   * the thread_struct.
205   */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)206  void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
207  {
208  	int i;
209  	struct thread_struct *t = &tsk->thread;
210  
211  	for (i = 0; i < ARM_MAX_BRP; i++) {
212  		if (t->debug.hbp_break[i]) {
213  			unregister_hw_breakpoint(t->debug.hbp_break[i]);
214  			t->debug.hbp_break[i] = NULL;
215  		}
216  	}
217  
218  	for (i = 0; i < ARM_MAX_WRP; i++) {
219  		if (t->debug.hbp_watch[i]) {
220  			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
221  			t->debug.hbp_watch[i] = NULL;
222  		}
223  	}
224  }
225  
ptrace_hw_copy_thread(struct task_struct * tsk)226  void ptrace_hw_copy_thread(struct task_struct *tsk)
227  {
228  	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
229  }
230  
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)231  static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
232  					       struct task_struct *tsk,
233  					       unsigned long idx)
234  {
235  	struct perf_event *bp = ERR_PTR(-EINVAL);
236  
237  	switch (note_type) {
238  	case NT_ARM_HW_BREAK:
239  		if (idx >= ARM_MAX_BRP)
240  			goto out;
241  		idx = array_index_nospec(idx, ARM_MAX_BRP);
242  		bp = tsk->thread.debug.hbp_break[idx];
243  		break;
244  	case NT_ARM_HW_WATCH:
245  		if (idx >= ARM_MAX_WRP)
246  			goto out;
247  		idx = array_index_nospec(idx, ARM_MAX_WRP);
248  		bp = tsk->thread.debug.hbp_watch[idx];
249  		break;
250  	}
251  
252  out:
253  	return bp;
254  }
255  
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)256  static int ptrace_hbp_set_event(unsigned int note_type,
257  				struct task_struct *tsk,
258  				unsigned long idx,
259  				struct perf_event *bp)
260  {
261  	int err = -EINVAL;
262  
263  	switch (note_type) {
264  	case NT_ARM_HW_BREAK:
265  		if (idx >= ARM_MAX_BRP)
266  			goto out;
267  		idx = array_index_nospec(idx, ARM_MAX_BRP);
268  		tsk->thread.debug.hbp_break[idx] = bp;
269  		err = 0;
270  		break;
271  	case NT_ARM_HW_WATCH:
272  		if (idx >= ARM_MAX_WRP)
273  			goto out;
274  		idx = array_index_nospec(idx, ARM_MAX_WRP);
275  		tsk->thread.debug.hbp_watch[idx] = bp;
276  		err = 0;
277  		break;
278  	}
279  
280  out:
281  	return err;
282  }
283  
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)284  static struct perf_event *ptrace_hbp_create(unsigned int note_type,
285  					    struct task_struct *tsk,
286  					    unsigned long idx)
287  {
288  	struct perf_event *bp;
289  	struct perf_event_attr attr;
290  	int err, type;
291  
292  	switch (note_type) {
293  	case NT_ARM_HW_BREAK:
294  		type = HW_BREAKPOINT_X;
295  		break;
296  	case NT_ARM_HW_WATCH:
297  		type = HW_BREAKPOINT_RW;
298  		break;
299  	default:
300  		return ERR_PTR(-EINVAL);
301  	}
302  
303  	ptrace_breakpoint_init(&attr);
304  
305  	/*
306  	 * Initialise fields to sane defaults
307  	 * (i.e. values that will pass validation).
308  	 */
309  	attr.bp_addr	= 0;
310  	attr.bp_len	= HW_BREAKPOINT_LEN_4;
311  	attr.bp_type	= type;
312  	attr.disabled	= 1;
313  
314  	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
315  	if (IS_ERR(bp))
316  		return bp;
317  
318  	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
319  	if (err)
320  		return ERR_PTR(err);
321  
322  	return bp;
323  }
324  
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)325  static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
326  				     struct arch_hw_breakpoint_ctrl ctrl,
327  				     struct perf_event_attr *attr)
328  {
329  	int err, len, type, offset, disabled = !ctrl.enabled;
330  
331  	attr->disabled = disabled;
332  	if (disabled)
333  		return 0;
334  
335  	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
336  	if (err)
337  		return err;
338  
339  	switch (note_type) {
340  	case NT_ARM_HW_BREAK:
341  		if ((type & HW_BREAKPOINT_X) != type)
342  			return -EINVAL;
343  		break;
344  	case NT_ARM_HW_WATCH:
345  		if ((type & HW_BREAKPOINT_RW) != type)
346  			return -EINVAL;
347  		break;
348  	default:
349  		return -EINVAL;
350  	}
351  
352  	attr->bp_len	= len;
353  	attr->bp_type	= type;
354  	attr->bp_addr	+= offset;
355  
356  	return 0;
357  }
358  
ptrace_hbp_get_resource_info(unsigned int note_type,u32 * info)359  static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
360  {
361  	u8 num;
362  	u32 reg = 0;
363  
364  	switch (note_type) {
365  	case NT_ARM_HW_BREAK:
366  		num = hw_breakpoint_slots(TYPE_INST);
367  		break;
368  	case NT_ARM_HW_WATCH:
369  		num = hw_breakpoint_slots(TYPE_DATA);
370  		break;
371  	default:
372  		return -EINVAL;
373  	}
374  
375  	reg |= debug_monitors_arch();
376  	reg <<= 8;
377  	reg |= num;
378  
379  	*info = reg;
380  	return 0;
381  }
382  
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)383  static int ptrace_hbp_get_ctrl(unsigned int note_type,
384  			       struct task_struct *tsk,
385  			       unsigned long idx,
386  			       u32 *ctrl)
387  {
388  	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
389  
390  	if (IS_ERR(bp))
391  		return PTR_ERR(bp);
392  
393  	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
394  	return 0;
395  }
396  
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)397  static int ptrace_hbp_get_addr(unsigned int note_type,
398  			       struct task_struct *tsk,
399  			       unsigned long idx,
400  			       u64 *addr)
401  {
402  	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
403  
404  	if (IS_ERR(bp))
405  		return PTR_ERR(bp);
406  
407  	*addr = bp ? counter_arch_bp(bp)->address : 0;
408  	return 0;
409  }
410  
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)411  static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
412  							struct task_struct *tsk,
413  							unsigned long idx)
414  {
415  	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
416  
417  	if (!bp)
418  		bp = ptrace_hbp_create(note_type, tsk, idx);
419  
420  	return bp;
421  }
422  
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)423  static int ptrace_hbp_set_ctrl(unsigned int note_type,
424  			       struct task_struct *tsk,
425  			       unsigned long idx,
426  			       u32 uctrl)
427  {
428  	int err;
429  	struct perf_event *bp;
430  	struct perf_event_attr attr;
431  	struct arch_hw_breakpoint_ctrl ctrl;
432  
433  	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
434  	if (IS_ERR(bp)) {
435  		err = PTR_ERR(bp);
436  		return err;
437  	}
438  
439  	attr = bp->attr;
440  	decode_ctrl_reg(uctrl, &ctrl);
441  	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
442  	if (err)
443  		return err;
444  
445  	return modify_user_hw_breakpoint(bp, &attr);
446  }
447  
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)448  static int ptrace_hbp_set_addr(unsigned int note_type,
449  			       struct task_struct *tsk,
450  			       unsigned long idx,
451  			       u64 addr)
452  {
453  	int err;
454  	struct perf_event *bp;
455  	struct perf_event_attr attr;
456  
457  	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
458  	if (IS_ERR(bp)) {
459  		err = PTR_ERR(bp);
460  		return err;
461  	}
462  
463  	attr = bp->attr;
464  	attr.bp_addr = addr;
465  	err = modify_user_hw_breakpoint(bp, &attr);
466  	return err;
467  }
468  
469  #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
470  #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
471  #define PTRACE_HBP_PAD_SZ	sizeof(u32)
472  
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)473  static int hw_break_get(struct task_struct *target,
474  			const struct user_regset *regset,
475  			struct membuf to)
476  {
477  	unsigned int note_type = regset->core_note_type;
478  	int ret, idx = 0;
479  	u32 info, ctrl;
480  	u64 addr;
481  
482  	/* Resource info */
483  	ret = ptrace_hbp_get_resource_info(note_type, &info);
484  	if (ret)
485  		return ret;
486  
487  	membuf_write(&to, &info, sizeof(info));
488  	membuf_zero(&to, sizeof(u32));
489  	/* (address, ctrl) registers */
490  	while (to.left) {
491  		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
492  		if (ret)
493  			return ret;
494  		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
495  		if (ret)
496  			return ret;
497  		membuf_store(&to, addr);
498  		membuf_store(&to, ctrl);
499  		membuf_zero(&to, sizeof(u32));
500  		idx++;
501  	}
502  	return 0;
503  }
504  
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)505  static int hw_break_set(struct task_struct *target,
506  			const struct user_regset *regset,
507  			unsigned int pos, unsigned int count,
508  			const void *kbuf, const void __user *ubuf)
509  {
510  	unsigned int note_type = regset->core_note_type;
511  	int ret, idx = 0, offset, limit;
512  	u32 ctrl;
513  	u64 addr;
514  
515  	/* Resource info and pad */
516  	offset = offsetof(struct user_hwdebug_state, dbg_regs);
517  	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
518  
519  	/* (address, ctrl) registers */
520  	limit = regset->n * regset->size;
521  	while (count && offset < limit) {
522  		if (count < PTRACE_HBP_ADDR_SZ)
523  			return -EINVAL;
524  		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
525  					 offset, offset + PTRACE_HBP_ADDR_SZ);
526  		if (ret)
527  			return ret;
528  		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
529  		if (ret)
530  			return ret;
531  		offset += PTRACE_HBP_ADDR_SZ;
532  
533  		if (!count)
534  			break;
535  		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
536  					 offset, offset + PTRACE_HBP_CTRL_SZ);
537  		if (ret)
538  			return ret;
539  		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
540  		if (ret)
541  			return ret;
542  		offset += PTRACE_HBP_CTRL_SZ;
543  
544  		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
545  					  offset, offset + PTRACE_HBP_PAD_SZ);
546  		offset += PTRACE_HBP_PAD_SZ;
547  		idx++;
548  	}
549  
550  	return 0;
551  }
552  #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
553  
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)554  static int gpr_get(struct task_struct *target,
555  		   const struct user_regset *regset,
556  		   struct membuf to)
557  {
558  	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
559  	return membuf_write(&to, uregs, sizeof(*uregs));
560  }
561  
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)562  static int gpr_set(struct task_struct *target, const struct user_regset *regset,
563  		   unsigned int pos, unsigned int count,
564  		   const void *kbuf, const void __user *ubuf)
565  {
566  	int ret;
567  	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
568  
569  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
570  	if (ret)
571  		return ret;
572  
573  	if (!valid_user_regs(&newregs, target))
574  		return -EINVAL;
575  
576  	task_pt_regs(target)->user_regs = newregs;
577  	return 0;
578  }
579  
fpr_active(struct task_struct * target,const struct user_regset * regset)580  static int fpr_active(struct task_struct *target, const struct user_regset *regset)
581  {
582  	if (!system_supports_fpsimd())
583  		return -ENODEV;
584  	return regset->n;
585  }
586  
587  /*
588   * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
589   */
__fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)590  static int __fpr_get(struct task_struct *target,
591  		     const struct user_regset *regset,
592  		     struct membuf to)
593  {
594  	struct user_fpsimd_state *uregs;
595  
596  	sve_sync_to_fpsimd(target);
597  
598  	uregs = &target->thread.uw.fpsimd_state;
599  
600  	return membuf_write(&to, uregs, sizeof(*uregs));
601  }
602  
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)603  static int fpr_get(struct task_struct *target, const struct user_regset *regset,
604  		   struct membuf to)
605  {
606  	if (!system_supports_fpsimd())
607  		return -EINVAL;
608  
609  	if (target == current)
610  		fpsimd_preserve_current_state();
611  
612  	return __fpr_get(target, regset, to);
613  }
614  
__fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,unsigned int start_pos)615  static int __fpr_set(struct task_struct *target,
616  		     const struct user_regset *regset,
617  		     unsigned int pos, unsigned int count,
618  		     const void *kbuf, const void __user *ubuf,
619  		     unsigned int start_pos)
620  {
621  	int ret;
622  	struct user_fpsimd_state newstate;
623  
624  	/*
625  	 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
626  	 * short copyin can't resurrect stale data.
627  	 */
628  	sve_sync_to_fpsimd(target);
629  
630  	newstate = target->thread.uw.fpsimd_state;
631  
632  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
633  				 start_pos, start_pos + sizeof(newstate));
634  	if (ret)
635  		return ret;
636  
637  	target->thread.uw.fpsimd_state = newstate;
638  
639  	return ret;
640  }
641  
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)642  static int fpr_set(struct task_struct *target, const struct user_regset *regset,
643  		   unsigned int pos, unsigned int count,
644  		   const void *kbuf, const void __user *ubuf)
645  {
646  	int ret;
647  
648  	if (!system_supports_fpsimd())
649  		return -EINVAL;
650  
651  	ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
652  	if (ret)
653  		return ret;
654  
655  	sve_sync_from_fpsimd_zeropad(target);
656  	fpsimd_flush_task_state(target);
657  
658  	return ret;
659  }
660  
tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)661  static int tls_get(struct task_struct *target, const struct user_regset *regset,
662  		   struct membuf to)
663  {
664  	int ret;
665  
666  	if (target == current)
667  		tls_preserve_current_state();
668  
669  	ret = membuf_store(&to, target->thread.uw.tp_value);
670  	if (system_supports_tpidr2())
671  		ret = membuf_store(&to, target->thread.tpidr2_el0);
672  	else
673  		ret = membuf_zero(&to, sizeof(u64));
674  
675  	return ret;
676  }
677  
tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)678  static int tls_set(struct task_struct *target, const struct user_regset *regset,
679  		   unsigned int pos, unsigned int count,
680  		   const void *kbuf, const void __user *ubuf)
681  {
682  	int ret;
683  	unsigned long tls[2];
684  
685  	tls[0] = target->thread.uw.tp_value;
686  	if (system_supports_tpidr2())
687  		tls[1] = target->thread.tpidr2_el0;
688  
689  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
690  	if (ret)
691  		return ret;
692  
693  	target->thread.uw.tp_value = tls[0];
694  	if (system_supports_tpidr2())
695  		target->thread.tpidr2_el0 = tls[1];
696  
697  	return ret;
698  }
699  
system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)700  static int system_call_get(struct task_struct *target,
701  			   const struct user_regset *regset,
702  			   struct membuf to)
703  {
704  	return membuf_store(&to, task_pt_regs(target)->syscallno);
705  }
706  
system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)707  static int system_call_set(struct task_struct *target,
708  			   const struct user_regset *regset,
709  			   unsigned int pos, unsigned int count,
710  			   const void *kbuf, const void __user *ubuf)
711  {
712  	int syscallno = task_pt_regs(target)->syscallno;
713  	int ret;
714  
715  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
716  	if (ret)
717  		return ret;
718  
719  	task_pt_regs(target)->syscallno = syscallno;
720  	return ret;
721  }
722  
723  #ifdef CONFIG_ARM64_SVE
724  
sve_init_header_from_task(struct user_sve_header * header,struct task_struct * target,enum vec_type type)725  static void sve_init_header_from_task(struct user_sve_header *header,
726  				      struct task_struct *target,
727  				      enum vec_type type)
728  {
729  	unsigned int vq;
730  	bool active;
731  	enum vec_type task_type;
732  
733  	memset(header, 0, sizeof(*header));
734  
735  	/* Check if the requested registers are active for the task */
736  	if (thread_sm_enabled(&target->thread))
737  		task_type = ARM64_VEC_SME;
738  	else
739  		task_type = ARM64_VEC_SVE;
740  	active = (task_type == type);
741  
742  	switch (type) {
743  	case ARM64_VEC_SVE:
744  		if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
745  			header->flags |= SVE_PT_VL_INHERIT;
746  		break;
747  	case ARM64_VEC_SME:
748  		if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
749  			header->flags |= SVE_PT_VL_INHERIT;
750  		break;
751  	default:
752  		WARN_ON_ONCE(1);
753  		return;
754  	}
755  
756  	if (active) {
757  		if (target->thread.fp_type == FP_STATE_FPSIMD) {
758  			header->flags |= SVE_PT_REGS_FPSIMD;
759  		} else {
760  			header->flags |= SVE_PT_REGS_SVE;
761  		}
762  	}
763  
764  	header->vl = task_get_vl(target, type);
765  	vq = sve_vq_from_vl(header->vl);
766  
767  	header->max_vl = vec_max_vl(type);
768  	header->size = SVE_PT_SIZE(vq, header->flags);
769  	header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
770  				      SVE_PT_REGS_SVE);
771  }
772  
sve_size_from_header(struct user_sve_header const * header)773  static unsigned int sve_size_from_header(struct user_sve_header const *header)
774  {
775  	return ALIGN(header->size, SVE_VQ_BYTES);
776  }
777  
sve_get_common(struct task_struct * target,const struct user_regset * regset,struct membuf to,enum vec_type type)778  static int sve_get_common(struct task_struct *target,
779  			  const struct user_regset *regset,
780  			  struct membuf to,
781  			  enum vec_type type)
782  {
783  	struct user_sve_header header;
784  	unsigned int vq;
785  	unsigned long start, end;
786  
787  	/* Header */
788  	sve_init_header_from_task(&header, target, type);
789  	vq = sve_vq_from_vl(header.vl);
790  
791  	membuf_write(&to, &header, sizeof(header));
792  
793  	if (target == current)
794  		fpsimd_preserve_current_state();
795  
796  	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
797  	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
798  
799  	switch ((header.flags & SVE_PT_REGS_MASK)) {
800  	case SVE_PT_REGS_FPSIMD:
801  		return __fpr_get(target, regset, to);
802  
803  	case SVE_PT_REGS_SVE:
804  		start = SVE_PT_SVE_OFFSET;
805  		end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
806  		membuf_write(&to, target->thread.sve_state, end - start);
807  
808  		start = end;
809  		end = SVE_PT_SVE_FPSR_OFFSET(vq);
810  		membuf_zero(&to, end - start);
811  
812  		/*
813  		 * Copy fpsr, and fpcr which must follow contiguously in
814  		 * struct fpsimd_state:
815  		 */
816  		start = end;
817  		end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
818  		membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
819  			     end - start);
820  
821  		start = end;
822  		end = sve_size_from_header(&header);
823  		return membuf_zero(&to, end - start);
824  
825  	default:
826  		return 0;
827  	}
828  }
829  
sve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)830  static int sve_get(struct task_struct *target,
831  		   const struct user_regset *regset,
832  		   struct membuf to)
833  {
834  	if (!system_supports_sve())
835  		return -EINVAL;
836  
837  	return sve_get_common(target, regset, to, ARM64_VEC_SVE);
838  }
839  
sve_set_common(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,enum vec_type type)840  static int sve_set_common(struct task_struct *target,
841  			  const struct user_regset *regset,
842  			  unsigned int pos, unsigned int count,
843  			  const void *kbuf, const void __user *ubuf,
844  			  enum vec_type type)
845  {
846  	int ret;
847  	struct user_sve_header header;
848  	unsigned int vq;
849  	unsigned long start, end;
850  
851  	/* Header */
852  	if (count < sizeof(header))
853  		return -EINVAL;
854  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
855  				 0, sizeof(header));
856  	if (ret)
857  		goto out;
858  
859  	/*
860  	 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
861  	 * vec_set_vector_length(), which will also validate them for us:
862  	 */
863  	ret = vec_set_vector_length(target, type, header.vl,
864  		((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
865  	if (ret)
866  		goto out;
867  
868  	/* Actual VL set may be less than the user asked for: */
869  	vq = sve_vq_from_vl(task_get_vl(target, type));
870  
871  	/* Enter/exit streaming mode */
872  	if (system_supports_sme()) {
873  		u64 old_svcr = target->thread.svcr;
874  
875  		switch (type) {
876  		case ARM64_VEC_SVE:
877  			target->thread.svcr &= ~SVCR_SM_MASK;
878  			break;
879  		case ARM64_VEC_SME:
880  			target->thread.svcr |= SVCR_SM_MASK;
881  
882  			/*
883  			 * Disable traps and ensure there is SME storage but
884  			 * preserve any currently set values in ZA/ZT.
885  			 */
886  			sme_alloc(target, false);
887  			set_tsk_thread_flag(target, TIF_SME);
888  			break;
889  		default:
890  			WARN_ON_ONCE(1);
891  			ret = -EINVAL;
892  			goto out;
893  		}
894  
895  		/*
896  		 * If we switched then invalidate any existing SVE
897  		 * state and ensure there's storage.
898  		 */
899  		if (target->thread.svcr != old_svcr)
900  			sve_alloc(target, true);
901  	}
902  
903  	/* Registers: FPSIMD-only case */
904  
905  	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
906  	if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
907  		ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
908  				SVE_PT_FPSIMD_OFFSET);
909  		clear_tsk_thread_flag(target, TIF_SVE);
910  		target->thread.fp_type = FP_STATE_FPSIMD;
911  		goto out;
912  	}
913  
914  	/*
915  	 * Otherwise: no registers or full SVE case.  For backwards
916  	 * compatibility reasons we treat empty flags as SVE registers.
917  	 */
918  
919  	/*
920  	 * If setting a different VL from the requested VL and there is
921  	 * register data, the data layout will be wrong: don't even
922  	 * try to set the registers in this case.
923  	 */
924  	if (count && vq != sve_vq_from_vl(header.vl)) {
925  		ret = -EIO;
926  		goto out;
927  	}
928  
929  	sve_alloc(target, true);
930  	if (!target->thread.sve_state) {
931  		ret = -ENOMEM;
932  		clear_tsk_thread_flag(target, TIF_SVE);
933  		target->thread.fp_type = FP_STATE_FPSIMD;
934  		goto out;
935  	}
936  
937  	/*
938  	 * Ensure target->thread.sve_state is up to date with target's
939  	 * FPSIMD regs, so that a short copyin leaves trailing
940  	 * registers unmodified.  Only enable SVE if we are
941  	 * configuring normal SVE, a system with streaming SVE may not
942  	 * have normal SVE.
943  	 */
944  	fpsimd_sync_to_sve(target);
945  	if (type == ARM64_VEC_SVE)
946  		set_tsk_thread_flag(target, TIF_SVE);
947  	target->thread.fp_type = FP_STATE_SVE;
948  
949  	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
950  	start = SVE_PT_SVE_OFFSET;
951  	end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
952  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
953  				 target->thread.sve_state,
954  				 start, end);
955  	if (ret)
956  		goto out;
957  
958  	start = end;
959  	end = SVE_PT_SVE_FPSR_OFFSET(vq);
960  	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
961  
962  	/*
963  	 * Copy fpsr, and fpcr which must follow contiguously in
964  	 * struct fpsimd_state:
965  	 */
966  	start = end;
967  	end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
968  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
969  				 &target->thread.uw.fpsimd_state.fpsr,
970  				 start, end);
971  
972  out:
973  	fpsimd_flush_task_state(target);
974  	return ret;
975  }
976  
sve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)977  static int sve_set(struct task_struct *target,
978  		   const struct user_regset *regset,
979  		   unsigned int pos, unsigned int count,
980  		   const void *kbuf, const void __user *ubuf)
981  {
982  	if (!system_supports_sve())
983  		return -EINVAL;
984  
985  	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
986  			      ARM64_VEC_SVE);
987  }
988  
989  #endif /* CONFIG_ARM64_SVE */
990  
991  #ifdef CONFIG_ARM64_SME
992  
ssve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)993  static int ssve_get(struct task_struct *target,
994  		   const struct user_regset *regset,
995  		   struct membuf to)
996  {
997  	if (!system_supports_sme())
998  		return -EINVAL;
999  
1000  	return sve_get_common(target, regset, to, ARM64_VEC_SME);
1001  }
1002  
ssve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1003  static int ssve_set(struct task_struct *target,
1004  		    const struct user_regset *regset,
1005  		    unsigned int pos, unsigned int count,
1006  		    const void *kbuf, const void __user *ubuf)
1007  {
1008  	if (!system_supports_sme())
1009  		return -EINVAL;
1010  
1011  	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1012  			      ARM64_VEC_SME);
1013  }
1014  
za_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1015  static int za_get(struct task_struct *target,
1016  		  const struct user_regset *regset,
1017  		  struct membuf to)
1018  {
1019  	struct user_za_header header;
1020  	unsigned int vq;
1021  	unsigned long start, end;
1022  
1023  	if (!system_supports_sme())
1024  		return -EINVAL;
1025  
1026  	/* Header */
1027  	memset(&header, 0, sizeof(header));
1028  
1029  	if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1030  		header.flags |= ZA_PT_VL_INHERIT;
1031  
1032  	header.vl = task_get_sme_vl(target);
1033  	vq = sve_vq_from_vl(header.vl);
1034  	header.max_vl = sme_max_vl();
1035  	header.max_size = ZA_PT_SIZE(vq);
1036  
1037  	/* If ZA is not active there is only the header */
1038  	if (thread_za_enabled(&target->thread))
1039  		header.size = ZA_PT_SIZE(vq);
1040  	else
1041  		header.size = ZA_PT_ZA_OFFSET;
1042  
1043  	membuf_write(&to, &header, sizeof(header));
1044  
1045  	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1046  	end = ZA_PT_ZA_OFFSET;
1047  
1048  	if (target == current)
1049  		fpsimd_preserve_current_state();
1050  
1051  	/* Any register data to include? */
1052  	if (thread_za_enabled(&target->thread)) {
1053  		start = end;
1054  		end = ZA_PT_SIZE(vq);
1055  		membuf_write(&to, target->thread.sme_state, end - start);
1056  	}
1057  
1058  	/* Zero any trailing padding */
1059  	start = end;
1060  	end = ALIGN(header.size, SVE_VQ_BYTES);
1061  	return membuf_zero(&to, end - start);
1062  }
1063  
za_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1064  static int za_set(struct task_struct *target,
1065  		  const struct user_regset *regset,
1066  		  unsigned int pos, unsigned int count,
1067  		  const void *kbuf, const void __user *ubuf)
1068  {
1069  	int ret;
1070  	struct user_za_header header;
1071  	unsigned int vq;
1072  	unsigned long start, end;
1073  
1074  	if (!system_supports_sme())
1075  		return -EINVAL;
1076  
1077  	/* Header */
1078  	if (count < sizeof(header))
1079  		return -EINVAL;
1080  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1081  				 0, sizeof(header));
1082  	if (ret)
1083  		goto out;
1084  
1085  	/*
1086  	 * All current ZA_PT_* flags are consumed by
1087  	 * vec_set_vector_length(), which will also validate them for
1088  	 * us:
1089  	 */
1090  	ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1091  		((unsigned long)header.flags) << 16);
1092  	if (ret)
1093  		goto out;
1094  
1095  	/* Actual VL set may be less than the user asked for: */
1096  	vq = sve_vq_from_vl(task_get_sme_vl(target));
1097  
1098  	/* Ensure there is some SVE storage for streaming mode */
1099  	if (!target->thread.sve_state) {
1100  		sve_alloc(target, false);
1101  		if (!target->thread.sve_state) {
1102  			ret = -ENOMEM;
1103  			goto out;
1104  		}
1105  	}
1106  
1107  	/*
1108  	 * Only flush the storage if PSTATE.ZA was not already set,
1109  	 * otherwise preserve any existing data.
1110  	 */
1111  	sme_alloc(target, !thread_za_enabled(&target->thread));
1112  	if (!target->thread.sme_state)
1113  		return -ENOMEM;
1114  
1115  	/* If there is no data then disable ZA */
1116  	if (!count) {
1117  		target->thread.svcr &= ~SVCR_ZA_MASK;
1118  		goto out;
1119  	}
1120  
1121  	/*
1122  	 * If setting a different VL from the requested VL and there is
1123  	 * register data, the data layout will be wrong: don't even
1124  	 * try to set the registers in this case.
1125  	 */
1126  	if (vq != sve_vq_from_vl(header.vl)) {
1127  		ret = -EIO;
1128  		goto out;
1129  	}
1130  
1131  	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1132  	start = ZA_PT_ZA_OFFSET;
1133  	end = ZA_PT_SIZE(vq);
1134  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1135  				 target->thread.sme_state,
1136  				 start, end);
1137  	if (ret)
1138  		goto out;
1139  
1140  	/* Mark ZA as active and let userspace use it */
1141  	set_tsk_thread_flag(target, TIF_SME);
1142  	target->thread.svcr |= SVCR_ZA_MASK;
1143  
1144  out:
1145  	fpsimd_flush_task_state(target);
1146  	return ret;
1147  }
1148  
zt_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1149  static int zt_get(struct task_struct *target,
1150  		  const struct user_regset *regset,
1151  		  struct membuf to)
1152  {
1153  	if (!system_supports_sme2())
1154  		return -EINVAL;
1155  
1156  	/*
1157  	 * If PSTATE.ZA is not set then ZT will be zeroed when it is
1158  	 * enabled so report the current register value as zero.
1159  	 */
1160  	if (thread_za_enabled(&target->thread))
1161  		membuf_write(&to, thread_zt_state(&target->thread),
1162  			     ZT_SIG_REG_BYTES);
1163  	else
1164  		membuf_zero(&to, ZT_SIG_REG_BYTES);
1165  
1166  	return 0;
1167  }
1168  
zt_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1169  static int zt_set(struct task_struct *target,
1170  		  const struct user_regset *regset,
1171  		  unsigned int pos, unsigned int count,
1172  		  const void *kbuf, const void __user *ubuf)
1173  {
1174  	int ret;
1175  
1176  	if (!system_supports_sme2())
1177  		return -EINVAL;
1178  
1179  	/* Ensure SVE storage in case this is first use of SME */
1180  	sve_alloc(target, false);
1181  	if (!target->thread.sve_state)
1182  		return -ENOMEM;
1183  
1184  	if (!thread_za_enabled(&target->thread)) {
1185  		sme_alloc(target, true);
1186  		if (!target->thread.sme_state)
1187  			return -ENOMEM;
1188  	}
1189  
1190  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1191  				 thread_zt_state(&target->thread),
1192  				 0, ZT_SIG_REG_BYTES);
1193  	if (ret == 0) {
1194  		target->thread.svcr |= SVCR_ZA_MASK;
1195  		set_tsk_thread_flag(target, TIF_SME);
1196  	}
1197  
1198  	fpsimd_flush_task_state(target);
1199  
1200  	return ret;
1201  }
1202  
1203  #endif /* CONFIG_ARM64_SME */
1204  
1205  #ifdef CONFIG_ARM64_PTR_AUTH
pac_mask_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1206  static int pac_mask_get(struct task_struct *target,
1207  			const struct user_regset *regset,
1208  			struct membuf to)
1209  {
1210  	/*
1211  	 * The PAC bits can differ across data and instruction pointers
1212  	 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1213  	 * we expose separate masks.
1214  	 */
1215  	unsigned long mask = ptrauth_user_pac_mask();
1216  	struct user_pac_mask uregs = {
1217  		.data_mask = mask,
1218  		.insn_mask = mask,
1219  	};
1220  
1221  	if (!system_supports_address_auth())
1222  		return -EINVAL;
1223  
1224  	return membuf_write(&to, &uregs, sizeof(uregs));
1225  }
1226  
pac_enabled_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1227  static int pac_enabled_keys_get(struct task_struct *target,
1228  				const struct user_regset *regset,
1229  				struct membuf to)
1230  {
1231  	long enabled_keys = ptrauth_get_enabled_keys(target);
1232  
1233  	if (IS_ERR_VALUE(enabled_keys))
1234  		return enabled_keys;
1235  
1236  	return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1237  }
1238  
pac_enabled_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1239  static int pac_enabled_keys_set(struct task_struct *target,
1240  				const struct user_regset *regset,
1241  				unsigned int pos, unsigned int count,
1242  				const void *kbuf, const void __user *ubuf)
1243  {
1244  	int ret;
1245  	long enabled_keys = ptrauth_get_enabled_keys(target);
1246  
1247  	if (IS_ERR_VALUE(enabled_keys))
1248  		return enabled_keys;
1249  
1250  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1251  				 sizeof(long));
1252  	if (ret)
1253  		return ret;
1254  
1255  	return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1256  					enabled_keys);
1257  }
1258  
1259  #ifdef CONFIG_CHECKPOINT_RESTORE
pac_key_to_user(const struct ptrauth_key * key)1260  static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1261  {
1262  	return (__uint128_t)key->hi << 64 | key->lo;
1263  }
1264  
pac_key_from_user(__uint128_t ukey)1265  static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1266  {
1267  	struct ptrauth_key key = {
1268  		.lo = (unsigned long)ukey,
1269  		.hi = (unsigned long)(ukey >> 64),
1270  	};
1271  
1272  	return key;
1273  }
1274  
pac_address_keys_to_user(struct user_pac_address_keys * ukeys,const struct ptrauth_keys_user * keys)1275  static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1276  				     const struct ptrauth_keys_user *keys)
1277  {
1278  	ukeys->apiakey = pac_key_to_user(&keys->apia);
1279  	ukeys->apibkey = pac_key_to_user(&keys->apib);
1280  	ukeys->apdakey = pac_key_to_user(&keys->apda);
1281  	ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1282  }
1283  
pac_address_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_address_keys * ukeys)1284  static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1285  				       const struct user_pac_address_keys *ukeys)
1286  {
1287  	keys->apia = pac_key_from_user(ukeys->apiakey);
1288  	keys->apib = pac_key_from_user(ukeys->apibkey);
1289  	keys->apda = pac_key_from_user(ukeys->apdakey);
1290  	keys->apdb = pac_key_from_user(ukeys->apdbkey);
1291  }
1292  
pac_address_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1293  static int pac_address_keys_get(struct task_struct *target,
1294  				const struct user_regset *regset,
1295  				struct membuf to)
1296  {
1297  	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1298  	struct user_pac_address_keys user_keys;
1299  
1300  	if (!system_supports_address_auth())
1301  		return -EINVAL;
1302  
1303  	pac_address_keys_to_user(&user_keys, keys);
1304  
1305  	return membuf_write(&to, &user_keys, sizeof(user_keys));
1306  }
1307  
pac_address_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1308  static int pac_address_keys_set(struct task_struct *target,
1309  				const struct user_regset *regset,
1310  				unsigned int pos, unsigned int count,
1311  				const void *kbuf, const void __user *ubuf)
1312  {
1313  	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1314  	struct user_pac_address_keys user_keys;
1315  	int ret;
1316  
1317  	if (!system_supports_address_auth())
1318  		return -EINVAL;
1319  
1320  	pac_address_keys_to_user(&user_keys, keys);
1321  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322  				 &user_keys, 0, -1);
1323  	if (ret)
1324  		return ret;
1325  	pac_address_keys_from_user(keys, &user_keys);
1326  
1327  	return 0;
1328  }
1329  
pac_generic_keys_to_user(struct user_pac_generic_keys * ukeys,const struct ptrauth_keys_user * keys)1330  static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1331  				     const struct ptrauth_keys_user *keys)
1332  {
1333  	ukeys->apgakey = pac_key_to_user(&keys->apga);
1334  }
1335  
pac_generic_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_generic_keys * ukeys)1336  static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1337  				       const struct user_pac_generic_keys *ukeys)
1338  {
1339  	keys->apga = pac_key_from_user(ukeys->apgakey);
1340  }
1341  
pac_generic_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1342  static int pac_generic_keys_get(struct task_struct *target,
1343  				const struct user_regset *regset,
1344  				struct membuf to)
1345  {
1346  	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1347  	struct user_pac_generic_keys user_keys;
1348  
1349  	if (!system_supports_generic_auth())
1350  		return -EINVAL;
1351  
1352  	pac_generic_keys_to_user(&user_keys, keys);
1353  
1354  	return membuf_write(&to, &user_keys, sizeof(user_keys));
1355  }
1356  
pac_generic_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1357  static int pac_generic_keys_set(struct task_struct *target,
1358  				const struct user_regset *regset,
1359  				unsigned int pos, unsigned int count,
1360  				const void *kbuf, const void __user *ubuf)
1361  {
1362  	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1363  	struct user_pac_generic_keys user_keys;
1364  	int ret;
1365  
1366  	if (!system_supports_generic_auth())
1367  		return -EINVAL;
1368  
1369  	pac_generic_keys_to_user(&user_keys, keys);
1370  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1371  				 &user_keys, 0, -1);
1372  	if (ret)
1373  		return ret;
1374  	pac_generic_keys_from_user(keys, &user_keys);
1375  
1376  	return 0;
1377  }
1378  #endif /* CONFIG_CHECKPOINT_RESTORE */
1379  #endif /* CONFIG_ARM64_PTR_AUTH */
1380  
1381  #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
tagged_addr_ctrl_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1382  static int tagged_addr_ctrl_get(struct task_struct *target,
1383  				const struct user_regset *regset,
1384  				struct membuf to)
1385  {
1386  	long ctrl = get_tagged_addr_ctrl(target);
1387  
1388  	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1389  		return ctrl;
1390  
1391  	return membuf_write(&to, &ctrl, sizeof(ctrl));
1392  }
1393  
tagged_addr_ctrl_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1394  static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1395  				user_regset *regset, unsigned int pos,
1396  				unsigned int count, const void *kbuf, const
1397  				void __user *ubuf)
1398  {
1399  	int ret;
1400  	long ctrl;
1401  
1402  	ctrl = get_tagged_addr_ctrl(target);
1403  	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1404  		return ctrl;
1405  
1406  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1407  	if (ret)
1408  		return ret;
1409  
1410  	return set_tagged_addr_ctrl(target, ctrl);
1411  }
1412  #endif
1413  
1414  enum aarch64_regset {
1415  	REGSET_GPR,
1416  	REGSET_FPR,
1417  	REGSET_TLS,
1418  #ifdef CONFIG_HAVE_HW_BREAKPOINT
1419  	REGSET_HW_BREAK,
1420  	REGSET_HW_WATCH,
1421  #endif
1422  	REGSET_SYSTEM_CALL,
1423  #ifdef CONFIG_ARM64_SVE
1424  	REGSET_SVE,
1425  #endif
1426  #ifdef CONFIG_ARM64_SME
1427  	REGSET_SSVE,
1428  	REGSET_ZA,
1429  	REGSET_ZT,
1430  #endif
1431  #ifdef CONFIG_ARM64_PTR_AUTH
1432  	REGSET_PAC_MASK,
1433  	REGSET_PAC_ENABLED_KEYS,
1434  #ifdef CONFIG_CHECKPOINT_RESTORE
1435  	REGSET_PACA_KEYS,
1436  	REGSET_PACG_KEYS,
1437  #endif
1438  #endif
1439  #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1440  	REGSET_TAGGED_ADDR_CTRL,
1441  #endif
1442  };
1443  
1444  static const struct user_regset aarch64_regsets[] = {
1445  	[REGSET_GPR] = {
1446  		.core_note_type = NT_PRSTATUS,
1447  		.n = sizeof(struct user_pt_regs) / sizeof(u64),
1448  		.size = sizeof(u64),
1449  		.align = sizeof(u64),
1450  		.regset_get = gpr_get,
1451  		.set = gpr_set
1452  	},
1453  	[REGSET_FPR] = {
1454  		.core_note_type = NT_PRFPREG,
1455  		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1456  		/*
1457  		 * We pretend we have 32-bit registers because the fpsr and
1458  		 * fpcr are 32-bits wide.
1459  		 */
1460  		.size = sizeof(u32),
1461  		.align = sizeof(u32),
1462  		.active = fpr_active,
1463  		.regset_get = fpr_get,
1464  		.set = fpr_set
1465  	},
1466  	[REGSET_TLS] = {
1467  		.core_note_type = NT_ARM_TLS,
1468  		.n = 2,
1469  		.size = sizeof(void *),
1470  		.align = sizeof(void *),
1471  		.regset_get = tls_get,
1472  		.set = tls_set,
1473  	},
1474  #ifdef CONFIG_HAVE_HW_BREAKPOINT
1475  	[REGSET_HW_BREAK] = {
1476  		.core_note_type = NT_ARM_HW_BREAK,
1477  		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1478  		.size = sizeof(u32),
1479  		.align = sizeof(u32),
1480  		.regset_get = hw_break_get,
1481  		.set = hw_break_set,
1482  	},
1483  	[REGSET_HW_WATCH] = {
1484  		.core_note_type = NT_ARM_HW_WATCH,
1485  		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1486  		.size = sizeof(u32),
1487  		.align = sizeof(u32),
1488  		.regset_get = hw_break_get,
1489  		.set = hw_break_set,
1490  	},
1491  #endif
1492  	[REGSET_SYSTEM_CALL] = {
1493  		.core_note_type = NT_ARM_SYSTEM_CALL,
1494  		.n = 1,
1495  		.size = sizeof(int),
1496  		.align = sizeof(int),
1497  		.regset_get = system_call_get,
1498  		.set = system_call_set,
1499  	},
1500  #ifdef CONFIG_ARM64_SVE
1501  	[REGSET_SVE] = { /* Scalable Vector Extension */
1502  		.core_note_type = NT_ARM_SVE,
1503  		.n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1504  					      SVE_PT_REGS_SVE),
1505  				  SVE_VQ_BYTES),
1506  		.size = SVE_VQ_BYTES,
1507  		.align = SVE_VQ_BYTES,
1508  		.regset_get = sve_get,
1509  		.set = sve_set,
1510  	},
1511  #endif
1512  #ifdef CONFIG_ARM64_SME
1513  	[REGSET_SSVE] = { /* Streaming mode SVE */
1514  		.core_note_type = NT_ARM_SSVE,
1515  		.n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1516  				  SVE_VQ_BYTES),
1517  		.size = SVE_VQ_BYTES,
1518  		.align = SVE_VQ_BYTES,
1519  		.regset_get = ssve_get,
1520  		.set = ssve_set,
1521  	},
1522  	[REGSET_ZA] = { /* SME ZA */
1523  		.core_note_type = NT_ARM_ZA,
1524  		/*
1525  		 * ZA is a single register but it's variably sized and
1526  		 * the ptrace core requires that the size of any data
1527  		 * be an exact multiple of the configured register
1528  		 * size so report as though we had SVE_VQ_BYTES
1529  		 * registers. These values aren't exposed to
1530  		 * userspace.
1531  		 */
1532  		.n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1533  		.size = SVE_VQ_BYTES,
1534  		.align = SVE_VQ_BYTES,
1535  		.regset_get = za_get,
1536  		.set = za_set,
1537  	},
1538  	[REGSET_ZT] = { /* SME ZT */
1539  		.core_note_type = NT_ARM_ZT,
1540  		.n = 1,
1541  		.size = ZT_SIG_REG_BYTES,
1542  		.align = sizeof(u64),
1543  		.regset_get = zt_get,
1544  		.set = zt_set,
1545  	},
1546  #endif
1547  #ifdef CONFIG_ARM64_PTR_AUTH
1548  	[REGSET_PAC_MASK] = {
1549  		.core_note_type = NT_ARM_PAC_MASK,
1550  		.n = sizeof(struct user_pac_mask) / sizeof(u64),
1551  		.size = sizeof(u64),
1552  		.align = sizeof(u64),
1553  		.regset_get = pac_mask_get,
1554  		/* this cannot be set dynamically */
1555  	},
1556  	[REGSET_PAC_ENABLED_KEYS] = {
1557  		.core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1558  		.n = 1,
1559  		.size = sizeof(long),
1560  		.align = sizeof(long),
1561  		.regset_get = pac_enabled_keys_get,
1562  		.set = pac_enabled_keys_set,
1563  	},
1564  #ifdef CONFIG_CHECKPOINT_RESTORE
1565  	[REGSET_PACA_KEYS] = {
1566  		.core_note_type = NT_ARM_PACA_KEYS,
1567  		.n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1568  		.size = sizeof(__uint128_t),
1569  		.align = sizeof(__uint128_t),
1570  		.regset_get = pac_address_keys_get,
1571  		.set = pac_address_keys_set,
1572  	},
1573  	[REGSET_PACG_KEYS] = {
1574  		.core_note_type = NT_ARM_PACG_KEYS,
1575  		.n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1576  		.size = sizeof(__uint128_t),
1577  		.align = sizeof(__uint128_t),
1578  		.regset_get = pac_generic_keys_get,
1579  		.set = pac_generic_keys_set,
1580  	},
1581  #endif
1582  #endif
1583  #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1584  	[REGSET_TAGGED_ADDR_CTRL] = {
1585  		.core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1586  		.n = 1,
1587  		.size = sizeof(long),
1588  		.align = sizeof(long),
1589  		.regset_get = tagged_addr_ctrl_get,
1590  		.set = tagged_addr_ctrl_set,
1591  	},
1592  #endif
1593  };
1594  
1595  static const struct user_regset_view user_aarch64_view = {
1596  	.name = "aarch64", .e_machine = EM_AARCH64,
1597  	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1598  };
1599  
1600  #ifdef CONFIG_COMPAT
1601  enum compat_regset {
1602  	REGSET_COMPAT_GPR,
1603  	REGSET_COMPAT_VFP,
1604  };
1605  
compat_get_user_reg(struct task_struct * task,int idx)1606  static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1607  {
1608  	struct pt_regs *regs = task_pt_regs(task);
1609  
1610  	switch (idx) {
1611  	case 15:
1612  		return regs->pc;
1613  	case 16:
1614  		return pstate_to_compat_psr(regs->pstate);
1615  	case 17:
1616  		return regs->orig_x0;
1617  	default:
1618  		return regs->regs[idx];
1619  	}
1620  }
1621  
compat_gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1622  static int compat_gpr_get(struct task_struct *target,
1623  			  const struct user_regset *regset,
1624  			  struct membuf to)
1625  {
1626  	int i = 0;
1627  
1628  	while (to.left)
1629  		membuf_store(&to, compat_get_user_reg(target, i++));
1630  	return 0;
1631  }
1632  
compat_gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1633  static int compat_gpr_set(struct task_struct *target,
1634  			  const struct user_regset *regset,
1635  			  unsigned int pos, unsigned int count,
1636  			  const void *kbuf, const void __user *ubuf)
1637  {
1638  	struct pt_regs newregs;
1639  	int ret = 0;
1640  	unsigned int i, start, num_regs;
1641  
1642  	/* Calculate the number of AArch32 registers contained in count */
1643  	num_regs = count / regset->size;
1644  
1645  	/* Convert pos into an register number */
1646  	start = pos / regset->size;
1647  
1648  	if (start + num_regs > regset->n)
1649  		return -EIO;
1650  
1651  	newregs = *task_pt_regs(target);
1652  
1653  	for (i = 0; i < num_regs; ++i) {
1654  		unsigned int idx = start + i;
1655  		compat_ulong_t reg;
1656  
1657  		if (kbuf) {
1658  			memcpy(&reg, kbuf, sizeof(reg));
1659  			kbuf += sizeof(reg);
1660  		} else {
1661  			ret = copy_from_user(&reg, ubuf, sizeof(reg));
1662  			if (ret) {
1663  				ret = -EFAULT;
1664  				break;
1665  			}
1666  
1667  			ubuf += sizeof(reg);
1668  		}
1669  
1670  		switch (idx) {
1671  		case 15:
1672  			newregs.pc = reg;
1673  			break;
1674  		case 16:
1675  			reg = compat_psr_to_pstate(reg);
1676  			newregs.pstate = reg;
1677  			break;
1678  		case 17:
1679  			newregs.orig_x0 = reg;
1680  			break;
1681  		default:
1682  			newregs.regs[idx] = reg;
1683  		}
1684  
1685  	}
1686  
1687  	if (valid_user_regs(&newregs.user_regs, target))
1688  		*task_pt_regs(target) = newregs;
1689  	else
1690  		ret = -EINVAL;
1691  
1692  	return ret;
1693  }
1694  
compat_vfp_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1695  static int compat_vfp_get(struct task_struct *target,
1696  			  const struct user_regset *regset,
1697  			  struct membuf to)
1698  {
1699  	struct user_fpsimd_state *uregs;
1700  	compat_ulong_t fpscr;
1701  
1702  	if (!system_supports_fpsimd())
1703  		return -EINVAL;
1704  
1705  	uregs = &target->thread.uw.fpsimd_state;
1706  
1707  	if (target == current)
1708  		fpsimd_preserve_current_state();
1709  
1710  	/*
1711  	 * The VFP registers are packed into the fpsimd_state, so they all sit
1712  	 * nicely together for us. We just need to create the fpscr separately.
1713  	 */
1714  	membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1715  	fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1716  		(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1717  	return membuf_store(&to, fpscr);
1718  }
1719  
compat_vfp_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1720  static int compat_vfp_set(struct task_struct *target,
1721  			  const struct user_regset *regset,
1722  			  unsigned int pos, unsigned int count,
1723  			  const void *kbuf, const void __user *ubuf)
1724  {
1725  	struct user_fpsimd_state *uregs;
1726  	compat_ulong_t fpscr;
1727  	int ret, vregs_end_pos;
1728  
1729  	if (!system_supports_fpsimd())
1730  		return -EINVAL;
1731  
1732  	uregs = &target->thread.uw.fpsimd_state;
1733  
1734  	vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1735  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1736  				 vregs_end_pos);
1737  
1738  	if (count && !ret) {
1739  		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1740  					 vregs_end_pos, VFP_STATE_SIZE);
1741  		if (!ret) {
1742  			uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1743  			uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1744  		}
1745  	}
1746  
1747  	fpsimd_flush_task_state(target);
1748  	return ret;
1749  }
1750  
compat_tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1751  static int compat_tls_get(struct task_struct *target,
1752  			  const struct user_regset *regset,
1753  			  struct membuf to)
1754  {
1755  	return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1756  }
1757  
compat_tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1758  static int compat_tls_set(struct task_struct *target,
1759  			  const struct user_regset *regset, unsigned int pos,
1760  			  unsigned int count, const void *kbuf,
1761  			  const void __user *ubuf)
1762  {
1763  	int ret;
1764  	compat_ulong_t tls = target->thread.uw.tp_value;
1765  
1766  	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1767  	if (ret)
1768  		return ret;
1769  
1770  	target->thread.uw.tp_value = tls;
1771  	return ret;
1772  }
1773  
1774  static const struct user_regset aarch32_regsets[] = {
1775  	[REGSET_COMPAT_GPR] = {
1776  		.core_note_type = NT_PRSTATUS,
1777  		.n = COMPAT_ELF_NGREG,
1778  		.size = sizeof(compat_elf_greg_t),
1779  		.align = sizeof(compat_elf_greg_t),
1780  		.regset_get = compat_gpr_get,
1781  		.set = compat_gpr_set
1782  	},
1783  	[REGSET_COMPAT_VFP] = {
1784  		.core_note_type = NT_ARM_VFP,
1785  		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1786  		.size = sizeof(compat_ulong_t),
1787  		.align = sizeof(compat_ulong_t),
1788  		.active = fpr_active,
1789  		.regset_get = compat_vfp_get,
1790  		.set = compat_vfp_set
1791  	},
1792  };
1793  
1794  static const struct user_regset_view user_aarch32_view = {
1795  	.name = "aarch32", .e_machine = EM_ARM,
1796  	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1797  };
1798  
1799  static const struct user_regset aarch32_ptrace_regsets[] = {
1800  	[REGSET_GPR] = {
1801  		.core_note_type = NT_PRSTATUS,
1802  		.n = COMPAT_ELF_NGREG,
1803  		.size = sizeof(compat_elf_greg_t),
1804  		.align = sizeof(compat_elf_greg_t),
1805  		.regset_get = compat_gpr_get,
1806  		.set = compat_gpr_set
1807  	},
1808  	[REGSET_FPR] = {
1809  		.core_note_type = NT_ARM_VFP,
1810  		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1811  		.size = sizeof(compat_ulong_t),
1812  		.align = sizeof(compat_ulong_t),
1813  		.regset_get = compat_vfp_get,
1814  		.set = compat_vfp_set
1815  	},
1816  	[REGSET_TLS] = {
1817  		.core_note_type = NT_ARM_TLS,
1818  		.n = 1,
1819  		.size = sizeof(compat_ulong_t),
1820  		.align = sizeof(compat_ulong_t),
1821  		.regset_get = compat_tls_get,
1822  		.set = compat_tls_set,
1823  	},
1824  #ifdef CONFIG_HAVE_HW_BREAKPOINT
1825  	[REGSET_HW_BREAK] = {
1826  		.core_note_type = NT_ARM_HW_BREAK,
1827  		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1828  		.size = sizeof(u32),
1829  		.align = sizeof(u32),
1830  		.regset_get = hw_break_get,
1831  		.set = hw_break_set,
1832  	},
1833  	[REGSET_HW_WATCH] = {
1834  		.core_note_type = NT_ARM_HW_WATCH,
1835  		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1836  		.size = sizeof(u32),
1837  		.align = sizeof(u32),
1838  		.regset_get = hw_break_get,
1839  		.set = hw_break_set,
1840  	},
1841  #endif
1842  	[REGSET_SYSTEM_CALL] = {
1843  		.core_note_type = NT_ARM_SYSTEM_CALL,
1844  		.n = 1,
1845  		.size = sizeof(int),
1846  		.align = sizeof(int),
1847  		.regset_get = system_call_get,
1848  		.set = system_call_set,
1849  	},
1850  };
1851  
1852  static const struct user_regset_view user_aarch32_ptrace_view = {
1853  	.name = "aarch32", .e_machine = EM_ARM,
1854  	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1855  };
1856  
compat_ptrace_read_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t __user * ret)1857  static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1858  				   compat_ulong_t __user *ret)
1859  {
1860  	compat_ulong_t tmp;
1861  
1862  	if (off & 3)
1863  		return -EIO;
1864  
1865  	if (off == COMPAT_PT_TEXT_ADDR)
1866  		tmp = tsk->mm->start_code;
1867  	else if (off == COMPAT_PT_DATA_ADDR)
1868  		tmp = tsk->mm->start_data;
1869  	else if (off == COMPAT_PT_TEXT_END_ADDR)
1870  		tmp = tsk->mm->end_code;
1871  	else if (off < sizeof(compat_elf_gregset_t))
1872  		tmp = compat_get_user_reg(tsk, off >> 2);
1873  	else if (off >= COMPAT_USER_SZ)
1874  		return -EIO;
1875  	else
1876  		tmp = 0;
1877  
1878  	return put_user(tmp, ret);
1879  }
1880  
compat_ptrace_write_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t val)1881  static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1882  				    compat_ulong_t val)
1883  {
1884  	struct pt_regs newregs = *task_pt_regs(tsk);
1885  	unsigned int idx = off / 4;
1886  
1887  	if (off & 3 || off >= COMPAT_USER_SZ)
1888  		return -EIO;
1889  
1890  	if (off >= sizeof(compat_elf_gregset_t))
1891  		return 0;
1892  
1893  	switch (idx) {
1894  	case 15:
1895  		newregs.pc = val;
1896  		break;
1897  	case 16:
1898  		newregs.pstate = compat_psr_to_pstate(val);
1899  		break;
1900  	case 17:
1901  		newregs.orig_x0 = val;
1902  		break;
1903  	default:
1904  		newregs.regs[idx] = val;
1905  	}
1906  
1907  	if (!valid_user_regs(&newregs.user_regs, tsk))
1908  		return -EINVAL;
1909  
1910  	*task_pt_regs(tsk) = newregs;
1911  	return 0;
1912  }
1913  
1914  #ifdef CONFIG_HAVE_HW_BREAKPOINT
1915  
1916  /*
1917   * Convert a virtual register number into an index for a thread_info
1918   * breakpoint array. Breakpoints are identified using positive numbers
1919   * whilst watchpoints are negative. The registers are laid out as pairs
1920   * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1921   * Register 0 is reserved for describing resource information.
1922   */
compat_ptrace_hbp_num_to_idx(compat_long_t num)1923  static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1924  {
1925  	return (abs(num) - 1) >> 1;
1926  }
1927  
compat_ptrace_hbp_get_resource_info(u32 * kdata)1928  static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1929  {
1930  	u8 num_brps, num_wrps, debug_arch, wp_len;
1931  	u32 reg = 0;
1932  
1933  	num_brps	= hw_breakpoint_slots(TYPE_INST);
1934  	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
1935  
1936  	debug_arch	= debug_monitors_arch();
1937  	wp_len		= 8;
1938  	reg		|= debug_arch;
1939  	reg		<<= 8;
1940  	reg		|= wp_len;
1941  	reg		<<= 8;
1942  	reg		|= num_wrps;
1943  	reg		<<= 8;
1944  	reg		|= num_brps;
1945  
1946  	*kdata = reg;
1947  	return 0;
1948  }
1949  
compat_ptrace_hbp_get(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)1950  static int compat_ptrace_hbp_get(unsigned int note_type,
1951  				 struct task_struct *tsk,
1952  				 compat_long_t num,
1953  				 u32 *kdata)
1954  {
1955  	u64 addr = 0;
1956  	u32 ctrl = 0;
1957  
1958  	int err, idx = compat_ptrace_hbp_num_to_idx(num);
1959  
1960  	if (num & 1) {
1961  		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1962  		*kdata = (u32)addr;
1963  	} else {
1964  		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1965  		*kdata = ctrl;
1966  	}
1967  
1968  	return err;
1969  }
1970  
compat_ptrace_hbp_set(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)1971  static int compat_ptrace_hbp_set(unsigned int note_type,
1972  				 struct task_struct *tsk,
1973  				 compat_long_t num,
1974  				 u32 *kdata)
1975  {
1976  	u64 addr;
1977  	u32 ctrl;
1978  
1979  	int err, idx = compat_ptrace_hbp_num_to_idx(num);
1980  
1981  	if (num & 1) {
1982  		addr = *kdata;
1983  		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1984  	} else {
1985  		ctrl = *kdata;
1986  		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1987  	}
1988  
1989  	return err;
1990  }
1991  
compat_ptrace_gethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)1992  static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1993  				    compat_ulong_t __user *data)
1994  {
1995  	int ret;
1996  	u32 kdata;
1997  
1998  	/* Watchpoint */
1999  	if (num < 0) {
2000  		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
2001  	/* Resource info */
2002  	} else if (num == 0) {
2003  		ret = compat_ptrace_hbp_get_resource_info(&kdata);
2004  	/* Breakpoint */
2005  	} else {
2006  		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
2007  	}
2008  
2009  	if (!ret)
2010  		ret = put_user(kdata, data);
2011  
2012  	return ret;
2013  }
2014  
compat_ptrace_sethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)2015  static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
2016  				    compat_ulong_t __user *data)
2017  {
2018  	int ret;
2019  	u32 kdata = 0;
2020  
2021  	if (num == 0)
2022  		return 0;
2023  
2024  	ret = get_user(kdata, data);
2025  	if (ret)
2026  		return ret;
2027  
2028  	if (num < 0)
2029  		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
2030  	else
2031  		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
2032  
2033  	return ret;
2034  }
2035  #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
2036  
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)2037  long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2038  			compat_ulong_t caddr, compat_ulong_t cdata)
2039  {
2040  	unsigned long addr = caddr;
2041  	unsigned long data = cdata;
2042  	void __user *datap = compat_ptr(data);
2043  	int ret;
2044  
2045  	switch (request) {
2046  		case PTRACE_PEEKUSR:
2047  			ret = compat_ptrace_read_user(child, addr, datap);
2048  			break;
2049  
2050  		case PTRACE_POKEUSR:
2051  			ret = compat_ptrace_write_user(child, addr, data);
2052  			break;
2053  
2054  		case COMPAT_PTRACE_GETREGS:
2055  			ret = copy_regset_to_user(child,
2056  						  &user_aarch32_view,
2057  						  REGSET_COMPAT_GPR,
2058  						  0, sizeof(compat_elf_gregset_t),
2059  						  datap);
2060  			break;
2061  
2062  		case COMPAT_PTRACE_SETREGS:
2063  			ret = copy_regset_from_user(child,
2064  						    &user_aarch32_view,
2065  						    REGSET_COMPAT_GPR,
2066  						    0, sizeof(compat_elf_gregset_t),
2067  						    datap);
2068  			break;
2069  
2070  		case COMPAT_PTRACE_GET_THREAD_AREA:
2071  			ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2072  				       (compat_ulong_t __user *)datap);
2073  			break;
2074  
2075  		case COMPAT_PTRACE_SET_SYSCALL:
2076  			task_pt_regs(child)->syscallno = data;
2077  			ret = 0;
2078  			break;
2079  
2080  		case COMPAT_PTRACE_GETVFPREGS:
2081  			ret = copy_regset_to_user(child,
2082  						  &user_aarch32_view,
2083  						  REGSET_COMPAT_VFP,
2084  						  0, VFP_STATE_SIZE,
2085  						  datap);
2086  			break;
2087  
2088  		case COMPAT_PTRACE_SETVFPREGS:
2089  			ret = copy_regset_from_user(child,
2090  						    &user_aarch32_view,
2091  						    REGSET_COMPAT_VFP,
2092  						    0, VFP_STATE_SIZE,
2093  						    datap);
2094  			break;
2095  
2096  #ifdef CONFIG_HAVE_HW_BREAKPOINT
2097  		case COMPAT_PTRACE_GETHBPREGS:
2098  			ret = compat_ptrace_gethbpregs(child, addr, datap);
2099  			break;
2100  
2101  		case COMPAT_PTRACE_SETHBPREGS:
2102  			ret = compat_ptrace_sethbpregs(child, addr, datap);
2103  			break;
2104  #endif
2105  
2106  		default:
2107  			ret = compat_ptrace_request(child, request, addr,
2108  						    data);
2109  			break;
2110  	}
2111  
2112  	return ret;
2113  }
2114  #endif /* CONFIG_COMPAT */
2115  
task_user_regset_view(struct task_struct * task)2116  const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2117  {
2118  #ifdef CONFIG_COMPAT
2119  	/*
2120  	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2121  	 * user_aarch32_view compatible with arm32. Native ptrace requests on
2122  	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2123  	 * access to the TLS register.
2124  	 */
2125  	if (is_compat_task())
2126  		return &user_aarch32_view;
2127  	else if (is_compat_thread(task_thread_info(task)))
2128  		return &user_aarch32_ptrace_view;
2129  #endif
2130  	return &user_aarch64_view;
2131  }
2132  
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)2133  long arch_ptrace(struct task_struct *child, long request,
2134  		 unsigned long addr, unsigned long data)
2135  {
2136  	switch (request) {
2137  	case PTRACE_PEEKMTETAGS:
2138  	case PTRACE_POKEMTETAGS:
2139  		return mte_ptrace_copy_tags(child, request, addr, data);
2140  	}
2141  
2142  	return ptrace_request(child, request, addr, data);
2143  }
2144  
2145  enum ptrace_syscall_dir {
2146  	PTRACE_SYSCALL_ENTER = 0,
2147  	PTRACE_SYSCALL_EXIT,
2148  };
2149  
report_syscall(struct pt_regs * regs,enum ptrace_syscall_dir dir)2150  static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2151  {
2152  	int regno;
2153  	unsigned long saved_reg;
2154  
2155  	/*
2156  	 * We have some ABI weirdness here in the way that we handle syscall
2157  	 * exit stops because we indicate whether or not the stop has been
2158  	 * signalled from syscall entry or syscall exit by clobbering a general
2159  	 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2160  	 * and restoring its old value after the stop. This means that:
2161  	 *
2162  	 * - Any writes by the tracer to this register during the stop are
2163  	 *   ignored/discarded.
2164  	 *
2165  	 * - The actual value of the register is not available during the stop,
2166  	 *   so the tracer cannot save it and restore it later.
2167  	 *
2168  	 * - Syscall stops behave differently to seccomp and pseudo-step traps
2169  	 *   (the latter do not nobble any registers).
2170  	 */
2171  	regno = (is_compat_task() ? 12 : 7);
2172  	saved_reg = regs->regs[regno];
2173  	regs->regs[regno] = dir;
2174  
2175  	if (dir == PTRACE_SYSCALL_ENTER) {
2176  		if (ptrace_report_syscall_entry(regs))
2177  			forget_syscall(regs);
2178  		regs->regs[regno] = saved_reg;
2179  	} else if (!test_thread_flag(TIF_SINGLESTEP)) {
2180  		ptrace_report_syscall_exit(regs, 0);
2181  		regs->regs[regno] = saved_reg;
2182  	} else {
2183  		regs->regs[regno] = saved_reg;
2184  
2185  		/*
2186  		 * Signal a pseudo-step exception since we are stepping but
2187  		 * tracer modifications to the registers may have rewound the
2188  		 * state machine.
2189  		 */
2190  		ptrace_report_syscall_exit(regs, 1);
2191  	}
2192  }
2193  
syscall_trace_enter(struct pt_regs * regs)2194  int syscall_trace_enter(struct pt_regs *regs)
2195  {
2196  	unsigned long flags = read_thread_flags();
2197  
2198  	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2199  		report_syscall(regs, PTRACE_SYSCALL_ENTER);
2200  		if (flags & _TIF_SYSCALL_EMU)
2201  			return NO_SYSCALL;
2202  	}
2203  
2204  	/* Do the secure computing after ptrace; failures should be fast. */
2205  	if (secure_computing() == -1)
2206  		return NO_SYSCALL;
2207  
2208  	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2209  		trace_sys_enter(regs, regs->syscallno);
2210  
2211  	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2212  			    regs->regs[2], regs->regs[3]);
2213  
2214  	return regs->syscallno;
2215  }
2216  
syscall_trace_exit(struct pt_regs * regs)2217  void syscall_trace_exit(struct pt_regs *regs)
2218  {
2219  	unsigned long flags = read_thread_flags();
2220  
2221  	audit_syscall_exit(regs);
2222  
2223  	if (flags & _TIF_SYSCALL_TRACEPOINT)
2224  		trace_sys_exit(regs, syscall_get_return_value(current, regs));
2225  
2226  	if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2227  		report_syscall(regs, PTRACE_SYSCALL_EXIT);
2228  
2229  	rseq_syscall(regs);
2230  }
2231  
2232  /*
2233   * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2234   * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2235   * not described in ARM DDI 0487D.a.
2236   * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2237   * be allocated an EL0 meaning in future.
2238   * Userspace cannot use these until they have an architectural meaning.
2239   * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2240   * We also reserve IL for the kernel; SS is handled dynamically.
2241   */
2242  #define SPSR_EL1_AARCH64_RES0_BITS \
2243  	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2244  	 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2245  #define SPSR_EL1_AARCH32_RES0_BITS \
2246  	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2247  
valid_compat_regs(struct user_pt_regs * regs)2248  static int valid_compat_regs(struct user_pt_regs *regs)
2249  {
2250  	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2251  
2252  	if (!system_supports_mixed_endian_el0()) {
2253  		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2254  			regs->pstate |= PSR_AA32_E_BIT;
2255  		else
2256  			regs->pstate &= ~PSR_AA32_E_BIT;
2257  	}
2258  
2259  	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2260  	    (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2261  	    (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2262  	    (regs->pstate & PSR_AA32_F_BIT) == 0) {
2263  		return 1;
2264  	}
2265  
2266  	/*
2267  	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2268  	 * arch/arm.
2269  	 */
2270  	regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2271  			PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2272  			PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2273  			PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2274  			PSR_AA32_T_BIT;
2275  	regs->pstate |= PSR_MODE32_BIT;
2276  
2277  	return 0;
2278  }
2279  
valid_native_regs(struct user_pt_regs * regs)2280  static int valid_native_regs(struct user_pt_regs *regs)
2281  {
2282  	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2283  
2284  	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2285  	    (regs->pstate & PSR_D_BIT) == 0 &&
2286  	    (regs->pstate & PSR_A_BIT) == 0 &&
2287  	    (regs->pstate & PSR_I_BIT) == 0 &&
2288  	    (regs->pstate & PSR_F_BIT) == 0) {
2289  		return 1;
2290  	}
2291  
2292  	/* Force PSR to a valid 64-bit EL0t */
2293  	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2294  
2295  	return 0;
2296  }
2297  
2298  /*
2299   * Are the current registers suitable for user mode? (used to maintain
2300   * security in signal handlers)
2301   */
valid_user_regs(struct user_pt_regs * regs,struct task_struct * task)2302  int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2303  {
2304  	/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2305  	user_regs_reset_single_step(regs, task);
2306  
2307  	if (is_compat_thread(task_thread_info(task)))
2308  		return valid_compat_regs(regs);
2309  	else
2310  		return valid_native_regs(regs);
2311  }
2312