1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17 
18 #include <linux/regset.h>
19 #include <linux/tracehook.h>
20 #include <linux/elf.h>
21 #include <linux/audit.h>
22 #include <linux/hw_breakpoint.h>
23 #include <linux/context_tracking.h>
24 #include <linux/nospec.h>
25 #include <linux/syscalls.h>
26 #include <linux/pkeys.h>
27 
28 #include <asm/switch_to.h>
29 #include <asm/tm.h>
30 #include <asm/asm-prototypes.h>
31 #include <asm/debug.h>
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/syscalls.h>
35 
36 #include "ptrace-decl.h"
37 
38 struct pt_regs_offset {
39 	const char *name;
40 	int offset;
41 };
42 
43 #define STR(s)	#s			/* convert to string */
44 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
45 #define GPR_OFFSET_NAME(num)	\
46 	{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
47 	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
48 #define REG_OFFSET_END {.name = NULL, .offset = 0}
49 
50 #define TVSO(f)	(offsetof(struct thread_vr_state, f))
51 #define TFSO(f)	(offsetof(struct thread_fp_state, f))
52 #define TSO(f)	(offsetof(struct thread_struct, f))
53 
54 static const struct pt_regs_offset regoffset_table[] = {
55 	GPR_OFFSET_NAME(0),
56 	GPR_OFFSET_NAME(1),
57 	GPR_OFFSET_NAME(2),
58 	GPR_OFFSET_NAME(3),
59 	GPR_OFFSET_NAME(4),
60 	GPR_OFFSET_NAME(5),
61 	GPR_OFFSET_NAME(6),
62 	GPR_OFFSET_NAME(7),
63 	GPR_OFFSET_NAME(8),
64 	GPR_OFFSET_NAME(9),
65 	GPR_OFFSET_NAME(10),
66 	GPR_OFFSET_NAME(11),
67 	GPR_OFFSET_NAME(12),
68 	GPR_OFFSET_NAME(13),
69 	GPR_OFFSET_NAME(14),
70 	GPR_OFFSET_NAME(15),
71 	GPR_OFFSET_NAME(16),
72 	GPR_OFFSET_NAME(17),
73 	GPR_OFFSET_NAME(18),
74 	GPR_OFFSET_NAME(19),
75 	GPR_OFFSET_NAME(20),
76 	GPR_OFFSET_NAME(21),
77 	GPR_OFFSET_NAME(22),
78 	GPR_OFFSET_NAME(23),
79 	GPR_OFFSET_NAME(24),
80 	GPR_OFFSET_NAME(25),
81 	GPR_OFFSET_NAME(26),
82 	GPR_OFFSET_NAME(27),
83 	GPR_OFFSET_NAME(28),
84 	GPR_OFFSET_NAME(29),
85 	GPR_OFFSET_NAME(30),
86 	GPR_OFFSET_NAME(31),
87 	REG_OFFSET_NAME(nip),
88 	REG_OFFSET_NAME(msr),
89 	REG_OFFSET_NAME(ctr),
90 	REG_OFFSET_NAME(link),
91 	REG_OFFSET_NAME(xer),
92 	REG_OFFSET_NAME(ccr),
93 #ifdef CONFIG_PPC64
94 	REG_OFFSET_NAME(softe),
95 #else
96 	REG_OFFSET_NAME(mq),
97 #endif
98 	REG_OFFSET_NAME(trap),
99 	REG_OFFSET_NAME(dar),
100 	REG_OFFSET_NAME(dsisr),
101 	REG_OFFSET_END,
102 };
103 
104 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
105 void flush_tmregs_to_thread(struct task_struct *tsk)
106 {
107 	/*
108 	 * If task is not current, it will have been flushed already to
109 	 * it's thread_struct during __switch_to().
110 	 *
111 	 * A reclaim flushes ALL the state or if not in TM save TM SPRs
112 	 * in the appropriate thread structures from live.
113 	 */
114 
115 	if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
116 		return;
117 
118 	if (MSR_TM_SUSPENDED(mfmsr())) {
119 		tm_reclaim_current(TM_CAUSE_SIGNAL);
120 	} else {
121 		tm_enable();
122 		tm_save_sprs(&(tsk->thread));
123 	}
124 }
125 #endif
126 
127 /**
128  * regs_query_register_offset() - query register offset from its name
129  * @name:	the name of a register
130  *
131  * regs_query_register_offset() returns the offset of a register in struct
132  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
133  */
134 int regs_query_register_offset(const char *name)
135 {
136 	const struct pt_regs_offset *roff;
137 	for (roff = regoffset_table; roff->name != NULL; roff++)
138 		if (!strcmp(roff->name, name))
139 			return roff->offset;
140 	return -EINVAL;
141 }
142 
143 /**
144  * regs_query_register_name() - query register name from its offset
145  * @offset:	the offset of a register in struct pt_regs.
146  *
147  * regs_query_register_name() returns the name of a register from its
148  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
149  */
150 const char *regs_query_register_name(unsigned int offset)
151 {
152 	const struct pt_regs_offset *roff;
153 	for (roff = regoffset_table; roff->name != NULL; roff++)
154 		if (roff->offset == offset)
155 			return roff->name;
156 	return NULL;
157 }
158 
159 /*
160  * does not yet catch signals sent when the child dies.
161  * in exit.c or in signal.c.
162  */
163 
164 /*
165  * Set of msr bits that gdb can change on behalf of a process.
166  */
167 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
168 #define MSR_DEBUGCHANGE	0
169 #else
170 #define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
171 #endif
172 
173 /*
174  * Max register writeable via put_reg
175  */
176 #ifdef CONFIG_PPC32
177 #define PT_MAX_PUT_REG	PT_MQ
178 #else
179 #define PT_MAX_PUT_REG	PT_CCR
180 #endif
181 
182 static unsigned long get_user_msr(struct task_struct *task)
183 {
184 	return task->thread.regs->msr | task->thread.fpexc_mode;
185 }
186 
187 static int set_user_msr(struct task_struct *task, unsigned long msr)
188 {
189 	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
190 	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
191 	return 0;
192 }
193 
194 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
195 static unsigned long get_user_ckpt_msr(struct task_struct *task)
196 {
197 	return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
198 }
199 
200 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
201 {
202 	task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
203 	task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
204 	return 0;
205 }
206 
207 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
208 {
209 	task->thread.ckpt_regs.trap = trap & 0xfff0;
210 	return 0;
211 }
212 #endif
213 
214 #ifdef CONFIG_PPC64
215 static int get_user_dscr(struct task_struct *task, unsigned long *data)
216 {
217 	*data = task->thread.dscr;
218 	return 0;
219 }
220 
221 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
222 {
223 	task->thread.dscr = dscr;
224 	task->thread.dscr_inherit = 1;
225 	return 0;
226 }
227 #else
228 static int get_user_dscr(struct task_struct *task, unsigned long *data)
229 {
230 	return -EIO;
231 }
232 
233 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
234 {
235 	return -EIO;
236 }
237 #endif
238 
239 /*
240  * We prevent mucking around with the reserved area of trap
241  * which are used internally by the kernel.
242  */
243 static int set_user_trap(struct task_struct *task, unsigned long trap)
244 {
245 	task->thread.regs->trap = trap & 0xfff0;
246 	return 0;
247 }
248 
249 /*
250  * Get contents of register REGNO in task TASK.
251  */
252 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
253 {
254 	unsigned int regs_max;
255 
256 	if ((task->thread.regs == NULL) || !data)
257 		return -EIO;
258 
259 	if (regno == PT_MSR) {
260 		*data = get_user_msr(task);
261 		return 0;
262 	}
263 
264 	if (regno == PT_DSCR)
265 		return get_user_dscr(task, data);
266 
267 	/*
268 	 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
269 	 * no more used as a flag, lets force usr to alway see the softe value as 1
270 	 * which means interrupts are not soft disabled.
271 	 */
272 	if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
273 		*data = 1;
274 		return  0;
275 	}
276 
277 	regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
278 	if (regno < regs_max) {
279 		regno = array_index_nospec(regno, regs_max);
280 		*data = ((unsigned long *)task->thread.regs)[regno];
281 		return 0;
282 	}
283 
284 	return -EIO;
285 }
286 
287 /*
288  * Write contents of register REGNO in task TASK.
289  */
290 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
291 {
292 	if (task->thread.regs == NULL)
293 		return -EIO;
294 
295 	if (regno == PT_MSR)
296 		return set_user_msr(task, data);
297 	if (regno == PT_TRAP)
298 		return set_user_trap(task, data);
299 	if (regno == PT_DSCR)
300 		return set_user_dscr(task, data);
301 
302 	if (regno <= PT_MAX_PUT_REG) {
303 		regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
304 		((unsigned long *)task->thread.regs)[regno] = data;
305 		return 0;
306 	}
307 	return -EIO;
308 }
309 
310 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
311 		   unsigned int pos, unsigned int count,
312 		   void *kbuf, void __user *ubuf)
313 {
314 	int i, ret;
315 
316 	if (target->thread.regs == NULL)
317 		return -EIO;
318 
319 	if (!FULL_REGS(target->thread.regs)) {
320 		/* We have a partial register set.  Fill 14-31 with bogus values */
321 		for (i = 14; i < 32; i++)
322 			target->thread.regs->gpr[i] = NV_REG_POISON;
323 	}
324 
325 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
326 				  target->thread.regs,
327 				  0, offsetof(struct pt_regs, msr));
328 	if (!ret) {
329 		unsigned long msr = get_user_msr(target);
330 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
331 					  offsetof(struct pt_regs, msr),
332 					  offsetof(struct pt_regs, msr) +
333 					  sizeof(msr));
334 	}
335 
336 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
337 		     offsetof(struct pt_regs, msr) + sizeof(long));
338 
339 	if (!ret)
340 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
341 					  &target->thread.regs->orig_gpr3,
342 					  offsetof(struct pt_regs, orig_gpr3),
343 					  sizeof(struct user_pt_regs));
344 	if (!ret)
345 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
346 					       sizeof(struct user_pt_regs), -1);
347 
348 	return ret;
349 }
350 
351 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
352 		   unsigned int pos, unsigned int count,
353 		   const void *kbuf, const void __user *ubuf)
354 {
355 	unsigned long reg;
356 	int ret;
357 
358 	if (target->thread.regs == NULL)
359 		return -EIO;
360 
361 	CHECK_FULL_REGS(target->thread.regs);
362 
363 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
364 				 target->thread.regs,
365 				 0, PT_MSR * sizeof(reg));
366 
367 	if (!ret && count > 0) {
368 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
369 					 PT_MSR * sizeof(reg),
370 					 (PT_MSR + 1) * sizeof(reg));
371 		if (!ret)
372 			ret = set_user_msr(target, reg);
373 	}
374 
375 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
376 		     offsetof(struct pt_regs, msr) + sizeof(long));
377 
378 	if (!ret)
379 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
380 					 &target->thread.regs->orig_gpr3,
381 					 PT_ORIG_R3 * sizeof(reg),
382 					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
383 
384 	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
385 		ret = user_regset_copyin_ignore(
386 			&pos, &count, &kbuf, &ubuf,
387 			(PT_MAX_PUT_REG + 1) * sizeof(reg),
388 			PT_TRAP * sizeof(reg));
389 
390 	if (!ret && count > 0) {
391 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
392 					 PT_TRAP * sizeof(reg),
393 					 (PT_TRAP + 1) * sizeof(reg));
394 		if (!ret)
395 			ret = set_user_trap(target, reg);
396 	}
397 
398 	if (!ret)
399 		ret = user_regset_copyin_ignore(
400 			&pos, &count, &kbuf, &ubuf,
401 			(PT_TRAP + 1) * sizeof(reg), -1);
402 
403 	return ret;
404 }
405 
406 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
407 /**
408  * tm_cgpr_active - get active number of registers in CGPR
409  * @target:	The target task.
410  * @regset:	The user regset structure.
411  *
412  * This function checks for the active number of available
413  * regisers in transaction checkpointed GPR category.
414  */
415 static int tm_cgpr_active(struct task_struct *target,
416 			  const struct user_regset *regset)
417 {
418 	if (!cpu_has_feature(CPU_FTR_TM))
419 		return -ENODEV;
420 
421 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
422 		return 0;
423 
424 	return regset->n;
425 }
426 
427 /**
428  * tm_cgpr_get - get CGPR registers
429  * @target:	The target task.
430  * @regset:	The user regset structure.
431  * @pos:	The buffer position.
432  * @count:	Number of bytes to copy.
433  * @kbuf:	Kernel buffer to copy from.
434  * @ubuf:	User buffer to copy into.
435  *
436  * This function gets transaction checkpointed GPR registers.
437  *
438  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
439  * GPR register values for the current transaction to fall back on if it
440  * aborts in between. This function gets those checkpointed GPR registers.
441  * The userspace interface buffer layout is as follows.
442  *
443  * struct data {
444  *	struct pt_regs ckpt_regs;
445  * };
446  */
447 static int tm_cgpr_get(struct task_struct *target,
448 			const struct user_regset *regset,
449 			unsigned int pos, unsigned int count,
450 			void *kbuf, void __user *ubuf)
451 {
452 	int ret;
453 
454 	if (!cpu_has_feature(CPU_FTR_TM))
455 		return -ENODEV;
456 
457 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
458 		return -ENODATA;
459 
460 	flush_tmregs_to_thread(target);
461 	flush_fp_to_thread(target);
462 	flush_altivec_to_thread(target);
463 
464 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
465 				  &target->thread.ckpt_regs,
466 				  0, offsetof(struct pt_regs, msr));
467 	if (!ret) {
468 		unsigned long msr = get_user_ckpt_msr(target);
469 
470 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
471 					  offsetof(struct pt_regs, msr),
472 					  offsetof(struct pt_regs, msr) +
473 					  sizeof(msr));
474 	}
475 
476 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
477 		     offsetof(struct pt_regs, msr) + sizeof(long));
478 
479 	if (!ret)
480 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
481 					  &target->thread.ckpt_regs.orig_gpr3,
482 					  offsetof(struct pt_regs, orig_gpr3),
483 					  sizeof(struct user_pt_regs));
484 	if (!ret)
485 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
486 					       sizeof(struct user_pt_regs), -1);
487 
488 	return ret;
489 }
490 
491 /*
492  * tm_cgpr_set - set the CGPR registers
493  * @target:	The target task.
494  * @regset:	The user regset structure.
495  * @pos:	The buffer position.
496  * @count:	Number of bytes to copy.
497  * @kbuf:	Kernel buffer to copy into.
498  * @ubuf:	User buffer to copy from.
499  *
500  * This function sets in transaction checkpointed GPR registers.
501  *
502  * When the transaction is active, 'ckpt_regs' holds the checkpointed
503  * GPR register values for the current transaction to fall back on if it
504  * aborts in between. This function sets those checkpointed GPR registers.
505  * The userspace interface buffer layout is as follows.
506  *
507  * struct data {
508  *	struct pt_regs ckpt_regs;
509  * };
510  */
511 static int tm_cgpr_set(struct task_struct *target,
512 			const struct user_regset *regset,
513 			unsigned int pos, unsigned int count,
514 			const void *kbuf, const void __user *ubuf)
515 {
516 	unsigned long reg;
517 	int ret;
518 
519 	if (!cpu_has_feature(CPU_FTR_TM))
520 		return -ENODEV;
521 
522 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
523 		return -ENODATA;
524 
525 	flush_tmregs_to_thread(target);
526 	flush_fp_to_thread(target);
527 	flush_altivec_to_thread(target);
528 
529 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
530 				 &target->thread.ckpt_regs,
531 				 0, PT_MSR * sizeof(reg));
532 
533 	if (!ret && count > 0) {
534 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
535 					 PT_MSR * sizeof(reg),
536 					 (PT_MSR + 1) * sizeof(reg));
537 		if (!ret)
538 			ret = set_user_ckpt_msr(target, reg);
539 	}
540 
541 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
542 		     offsetof(struct pt_regs, msr) + sizeof(long));
543 
544 	if (!ret)
545 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
546 					 &target->thread.ckpt_regs.orig_gpr3,
547 					 PT_ORIG_R3 * sizeof(reg),
548 					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
549 
550 	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
551 		ret = user_regset_copyin_ignore(
552 			&pos, &count, &kbuf, &ubuf,
553 			(PT_MAX_PUT_REG + 1) * sizeof(reg),
554 			PT_TRAP * sizeof(reg));
555 
556 	if (!ret && count > 0) {
557 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
558 					 PT_TRAP * sizeof(reg),
559 					 (PT_TRAP + 1) * sizeof(reg));
560 		if (!ret)
561 			ret = set_user_ckpt_trap(target, reg);
562 	}
563 
564 	if (!ret)
565 		ret = user_regset_copyin_ignore(
566 			&pos, &count, &kbuf, &ubuf,
567 			(PT_TRAP + 1) * sizeof(reg), -1);
568 
569 	return ret;
570 }
571 
572 /**
573  * tm_cfpr_active - get active number of registers in CFPR
574  * @target:	The target task.
575  * @regset:	The user regset structure.
576  *
577  * This function checks for the active number of available
578  * regisers in transaction checkpointed FPR category.
579  */
580 static int tm_cfpr_active(struct task_struct *target,
581 				const struct user_regset *regset)
582 {
583 	if (!cpu_has_feature(CPU_FTR_TM))
584 		return -ENODEV;
585 
586 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
587 		return 0;
588 
589 	return regset->n;
590 }
591 
592 /**
593  * tm_cfpr_get - get CFPR registers
594  * @target:	The target task.
595  * @regset:	The user regset structure.
596  * @pos:	The buffer position.
597  * @count:	Number of bytes to copy.
598  * @kbuf:	Kernel buffer to copy from.
599  * @ubuf:	User buffer to copy into.
600  *
601  * This function gets in transaction checkpointed FPR registers.
602  *
603  * When the transaction is active 'ckfp_state' holds the checkpointed
604  * values for the current transaction to fall back on if it aborts
605  * in between. This function gets those checkpointed FPR registers.
606  * The userspace interface buffer layout is as follows.
607  *
608  * struct data {
609  *	u64	fpr[32];
610  *	u64	fpscr;
611  *};
612  */
613 static int tm_cfpr_get(struct task_struct *target,
614 			const struct user_regset *regset,
615 			unsigned int pos, unsigned int count,
616 			void *kbuf, void __user *ubuf)
617 {
618 	u64 buf[33];
619 	int i;
620 
621 	if (!cpu_has_feature(CPU_FTR_TM))
622 		return -ENODEV;
623 
624 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
625 		return -ENODATA;
626 
627 	flush_tmregs_to_thread(target);
628 	flush_fp_to_thread(target);
629 	flush_altivec_to_thread(target);
630 
631 	/* copy to local buffer then write that out */
632 	for (i = 0; i < 32 ; i++)
633 		buf[i] = target->thread.TS_CKFPR(i);
634 	buf[32] = target->thread.ckfp_state.fpscr;
635 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
636 }
637 
638 /**
639  * tm_cfpr_set - set CFPR registers
640  * @target:	The target task.
641  * @regset:	The user regset structure.
642  * @pos:	The buffer position.
643  * @count:	Number of bytes to copy.
644  * @kbuf:	Kernel buffer to copy into.
645  * @ubuf:	User buffer to copy from.
646  *
647  * This function sets in transaction checkpointed FPR registers.
648  *
649  * When the transaction is active 'ckfp_state' holds the checkpointed
650  * FPR register values for the current transaction to fall back on
651  * if it aborts in between. This function sets these checkpointed
652  * FPR registers. The userspace interface buffer layout is as follows.
653  *
654  * struct data {
655  *	u64	fpr[32];
656  *	u64	fpscr;
657  *};
658  */
659 static int tm_cfpr_set(struct task_struct *target,
660 			const struct user_regset *regset,
661 			unsigned int pos, unsigned int count,
662 			const void *kbuf, const void __user *ubuf)
663 {
664 	u64 buf[33];
665 	int i;
666 
667 	if (!cpu_has_feature(CPU_FTR_TM))
668 		return -ENODEV;
669 
670 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
671 		return -ENODATA;
672 
673 	flush_tmregs_to_thread(target);
674 	flush_fp_to_thread(target);
675 	flush_altivec_to_thread(target);
676 
677 	for (i = 0; i < 32; i++)
678 		buf[i] = target->thread.TS_CKFPR(i);
679 	buf[32] = target->thread.ckfp_state.fpscr;
680 
681 	/* copy to local buffer then write that out */
682 	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
683 	if (i)
684 		return i;
685 	for (i = 0; i < 32 ; i++)
686 		target->thread.TS_CKFPR(i) = buf[i];
687 	target->thread.ckfp_state.fpscr = buf[32];
688 	return 0;
689 }
690 
691 /**
692  * tm_cvmx_active - get active number of registers in CVMX
693  * @target:	The target task.
694  * @regset:	The user regset structure.
695  *
696  * This function checks for the active number of available
697  * regisers in checkpointed VMX category.
698  */
699 static int tm_cvmx_active(struct task_struct *target,
700 				const struct user_regset *regset)
701 {
702 	if (!cpu_has_feature(CPU_FTR_TM))
703 		return -ENODEV;
704 
705 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
706 		return 0;
707 
708 	return regset->n;
709 }
710 
711 /**
712  * tm_cvmx_get - get CMVX registers
713  * @target:	The target task.
714  * @regset:	The user regset structure.
715  * @pos:	The buffer position.
716  * @count:	Number of bytes to copy.
717  * @kbuf:	Kernel buffer to copy from.
718  * @ubuf:	User buffer to copy into.
719  *
720  * This function gets in transaction checkpointed VMX registers.
721  *
722  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
723  * the checkpointed values for the current transaction to fall
724  * back on if it aborts in between. The userspace interface buffer
725  * layout is as follows.
726  *
727  * struct data {
728  *	vector128	vr[32];
729  *	vector128	vscr;
730  *	vector128	vrsave;
731  *};
732  */
733 static int tm_cvmx_get(struct task_struct *target,
734 			const struct user_regset *regset,
735 			unsigned int pos, unsigned int count,
736 			void *kbuf, void __user *ubuf)
737 {
738 	int ret;
739 
740 	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
741 
742 	if (!cpu_has_feature(CPU_FTR_TM))
743 		return -ENODEV;
744 
745 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
746 		return -ENODATA;
747 
748 	/* Flush the state */
749 	flush_tmregs_to_thread(target);
750 	flush_fp_to_thread(target);
751 	flush_altivec_to_thread(target);
752 
753 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
754 					&target->thread.ckvr_state, 0,
755 					33 * sizeof(vector128));
756 	if (!ret) {
757 		/*
758 		 * Copy out only the low-order word of vrsave.
759 		 */
760 		union {
761 			elf_vrreg_t reg;
762 			u32 word;
763 		} vrsave;
764 		memset(&vrsave, 0, sizeof(vrsave));
765 		vrsave.word = target->thread.ckvrsave;
766 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
767 						33 * sizeof(vector128), -1);
768 	}
769 
770 	return ret;
771 }
772 
773 /**
774  * tm_cvmx_set - set CMVX registers
775  * @target:	The target task.
776  * @regset:	The user regset structure.
777  * @pos:	The buffer position.
778  * @count:	Number of bytes to copy.
779  * @kbuf:	Kernel buffer to copy into.
780  * @ubuf:	User buffer to copy from.
781  *
782  * This function sets in transaction checkpointed VMX registers.
783  *
784  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
785  * the checkpointed values for the current transaction to fall
786  * back on if it aborts in between. The userspace interface buffer
787  * layout is as follows.
788  *
789  * struct data {
790  *	vector128	vr[32];
791  *	vector128	vscr;
792  *	vector128	vrsave;
793  *};
794  */
795 static int tm_cvmx_set(struct task_struct *target,
796 			const struct user_regset *regset,
797 			unsigned int pos, unsigned int count,
798 			const void *kbuf, const void __user *ubuf)
799 {
800 	int ret;
801 
802 	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
803 
804 	if (!cpu_has_feature(CPU_FTR_TM))
805 		return -ENODEV;
806 
807 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
808 		return -ENODATA;
809 
810 	flush_tmregs_to_thread(target);
811 	flush_fp_to_thread(target);
812 	flush_altivec_to_thread(target);
813 
814 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
815 					&target->thread.ckvr_state, 0,
816 					33 * sizeof(vector128));
817 	if (!ret && count > 0) {
818 		/*
819 		 * We use only the low-order word of vrsave.
820 		 */
821 		union {
822 			elf_vrreg_t reg;
823 			u32 word;
824 		} vrsave;
825 		memset(&vrsave, 0, sizeof(vrsave));
826 		vrsave.word = target->thread.ckvrsave;
827 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
828 						33 * sizeof(vector128), -1);
829 		if (!ret)
830 			target->thread.ckvrsave = vrsave.word;
831 	}
832 
833 	return ret;
834 }
835 
836 /**
837  * tm_cvsx_active - get active number of registers in CVSX
838  * @target:	The target task.
839  * @regset:	The user regset structure.
840  *
841  * This function checks for the active number of available
842  * regisers in transaction checkpointed VSX category.
843  */
844 static int tm_cvsx_active(struct task_struct *target,
845 				const struct user_regset *regset)
846 {
847 	if (!cpu_has_feature(CPU_FTR_TM))
848 		return -ENODEV;
849 
850 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
851 		return 0;
852 
853 	flush_vsx_to_thread(target);
854 	return target->thread.used_vsr ? regset->n : 0;
855 }
856 
857 /**
858  * tm_cvsx_get - get CVSX registers
859  * @target:	The target task.
860  * @regset:	The user regset structure.
861  * @pos:	The buffer position.
862  * @count:	Number of bytes to copy.
863  * @kbuf:	Kernel buffer to copy from.
864  * @ubuf:	User buffer to copy into.
865  *
866  * This function gets in transaction checkpointed VSX registers.
867  *
868  * When the transaction is active 'ckfp_state' holds the checkpointed
869  * values for the current transaction to fall back on if it aborts
870  * in between. This function gets those checkpointed VSX registers.
871  * The userspace interface buffer layout is as follows.
872  *
873  * struct data {
874  *	u64	vsx[32];
875  *};
876  */
877 static int tm_cvsx_get(struct task_struct *target,
878 			const struct user_regset *regset,
879 			unsigned int pos, unsigned int count,
880 			void *kbuf, void __user *ubuf)
881 {
882 	u64 buf[32];
883 	int ret, i;
884 
885 	if (!cpu_has_feature(CPU_FTR_TM))
886 		return -ENODEV;
887 
888 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
889 		return -ENODATA;
890 
891 	/* Flush the state */
892 	flush_tmregs_to_thread(target);
893 	flush_fp_to_thread(target);
894 	flush_altivec_to_thread(target);
895 	flush_vsx_to_thread(target);
896 
897 	for (i = 0; i < 32 ; i++)
898 		buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
899 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
900 				  buf, 0, 32 * sizeof(double));
901 
902 	return ret;
903 }
904 
905 /**
906  * tm_cvsx_set - set CFPR registers
907  * @target:	The target task.
908  * @regset:	The user regset structure.
909  * @pos:	The buffer position.
910  * @count:	Number of bytes to copy.
911  * @kbuf:	Kernel buffer to copy into.
912  * @ubuf:	User buffer to copy from.
913  *
914  * This function sets in transaction checkpointed VSX registers.
915  *
916  * When the transaction is active 'ckfp_state' holds the checkpointed
917  * VSX register values for the current transaction to fall back on
918  * if it aborts in between. This function sets these checkpointed
919  * FPR registers. The userspace interface buffer layout is as follows.
920  *
921  * struct data {
922  *	u64	vsx[32];
923  *};
924  */
925 static int tm_cvsx_set(struct task_struct *target,
926 			const struct user_regset *regset,
927 			unsigned int pos, unsigned int count,
928 			const void *kbuf, const void __user *ubuf)
929 {
930 	u64 buf[32];
931 	int ret, i;
932 
933 	if (!cpu_has_feature(CPU_FTR_TM))
934 		return -ENODEV;
935 
936 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
937 		return -ENODATA;
938 
939 	/* Flush the state */
940 	flush_tmregs_to_thread(target);
941 	flush_fp_to_thread(target);
942 	flush_altivec_to_thread(target);
943 	flush_vsx_to_thread(target);
944 
945 	for (i = 0; i < 32 ; i++)
946 		buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
947 
948 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
949 				 buf, 0, 32 * sizeof(double));
950 	if (!ret)
951 		for (i = 0; i < 32 ; i++)
952 			target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
953 
954 	return ret;
955 }
956 
957 /**
958  * tm_spr_active - get active number of registers in TM SPR
959  * @target:	The target task.
960  * @regset:	The user regset structure.
961  *
962  * This function checks the active number of available
963  * regisers in the transactional memory SPR category.
964  */
965 static int tm_spr_active(struct task_struct *target,
966 			 const struct user_regset *regset)
967 {
968 	if (!cpu_has_feature(CPU_FTR_TM))
969 		return -ENODEV;
970 
971 	return regset->n;
972 }
973 
974 /**
975  * tm_spr_get - get the TM related SPR registers
976  * @target:	The target task.
977  * @regset:	The user regset structure.
978  * @pos:	The buffer position.
979  * @count:	Number of bytes to copy.
980  * @kbuf:	Kernel buffer to copy from.
981  * @ubuf:	User buffer to copy into.
982  *
983  * This function gets transactional memory related SPR registers.
984  * The userspace interface buffer layout is as follows.
985  *
986  * struct {
987  *	u64		tm_tfhar;
988  *	u64		tm_texasr;
989  *	u64		tm_tfiar;
990  * };
991  */
992 static int tm_spr_get(struct task_struct *target,
993 		      const struct user_regset *regset,
994 		      unsigned int pos, unsigned int count,
995 		      void *kbuf, void __user *ubuf)
996 {
997 	int ret;
998 
999 	/* Build tests */
1000 	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1001 	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1002 	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1003 
1004 	if (!cpu_has_feature(CPU_FTR_TM))
1005 		return -ENODEV;
1006 
1007 	/* Flush the states */
1008 	flush_tmregs_to_thread(target);
1009 	flush_fp_to_thread(target);
1010 	flush_altivec_to_thread(target);
1011 
1012 	/* TFHAR register */
1013 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1014 				&target->thread.tm_tfhar, 0, sizeof(u64));
1015 
1016 	/* TEXASR register */
1017 	if (!ret)
1018 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1019 				&target->thread.tm_texasr, sizeof(u64),
1020 				2 * sizeof(u64));
1021 
1022 	/* TFIAR register */
1023 	if (!ret)
1024 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1025 				&target->thread.tm_tfiar,
1026 				2 * sizeof(u64), 3 * sizeof(u64));
1027 	return ret;
1028 }
1029 
1030 /**
1031  * tm_spr_set - set the TM related SPR registers
1032  * @target:	The target task.
1033  * @regset:	The user regset structure.
1034  * @pos:	The buffer position.
1035  * @count:	Number of bytes to copy.
1036  * @kbuf:	Kernel buffer to copy into.
1037  * @ubuf:	User buffer to copy from.
1038  *
1039  * This function sets transactional memory related SPR registers.
1040  * The userspace interface buffer layout is as follows.
1041  *
1042  * struct {
1043  *	u64		tm_tfhar;
1044  *	u64		tm_texasr;
1045  *	u64		tm_tfiar;
1046  * };
1047  */
1048 static int tm_spr_set(struct task_struct *target,
1049 		      const struct user_regset *regset,
1050 		      unsigned int pos, unsigned int count,
1051 		      const void *kbuf, const void __user *ubuf)
1052 {
1053 	int ret;
1054 
1055 	/* Build tests */
1056 	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1057 	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1058 	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1059 
1060 	if (!cpu_has_feature(CPU_FTR_TM))
1061 		return -ENODEV;
1062 
1063 	/* Flush the states */
1064 	flush_tmregs_to_thread(target);
1065 	flush_fp_to_thread(target);
1066 	flush_altivec_to_thread(target);
1067 
1068 	/* TFHAR register */
1069 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1070 				&target->thread.tm_tfhar, 0, sizeof(u64));
1071 
1072 	/* TEXASR register */
1073 	if (!ret)
1074 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1075 				&target->thread.tm_texasr, sizeof(u64),
1076 				2 * sizeof(u64));
1077 
1078 	/* TFIAR register */
1079 	if (!ret)
1080 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1081 				&target->thread.tm_tfiar,
1082 				 2 * sizeof(u64), 3 * sizeof(u64));
1083 	return ret;
1084 }
1085 
1086 static int tm_tar_active(struct task_struct *target,
1087 			 const struct user_regset *regset)
1088 {
1089 	if (!cpu_has_feature(CPU_FTR_TM))
1090 		return -ENODEV;
1091 
1092 	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1093 		return regset->n;
1094 
1095 	return 0;
1096 }
1097 
1098 static int tm_tar_get(struct task_struct *target,
1099 		      const struct user_regset *regset,
1100 		      unsigned int pos, unsigned int count,
1101 		      void *kbuf, void __user *ubuf)
1102 {
1103 	int ret;
1104 
1105 	if (!cpu_has_feature(CPU_FTR_TM))
1106 		return -ENODEV;
1107 
1108 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1109 		return -ENODATA;
1110 
1111 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1112 				&target->thread.tm_tar, 0, sizeof(u64));
1113 	return ret;
1114 }
1115 
1116 static int tm_tar_set(struct task_struct *target,
1117 		      const struct user_regset *regset,
1118 		      unsigned int pos, unsigned int count,
1119 		      const void *kbuf, const void __user *ubuf)
1120 {
1121 	int ret;
1122 
1123 	if (!cpu_has_feature(CPU_FTR_TM))
1124 		return -ENODEV;
1125 
1126 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1127 		return -ENODATA;
1128 
1129 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1130 				&target->thread.tm_tar, 0, sizeof(u64));
1131 	return ret;
1132 }
1133 
1134 static int tm_ppr_active(struct task_struct *target,
1135 			 const struct user_regset *regset)
1136 {
1137 	if (!cpu_has_feature(CPU_FTR_TM))
1138 		return -ENODEV;
1139 
1140 	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1141 		return regset->n;
1142 
1143 	return 0;
1144 }
1145 
1146 
1147 static int tm_ppr_get(struct task_struct *target,
1148 		      const struct user_regset *regset,
1149 		      unsigned int pos, unsigned int count,
1150 		      void *kbuf, void __user *ubuf)
1151 {
1152 	int ret;
1153 
1154 	if (!cpu_has_feature(CPU_FTR_TM))
1155 		return -ENODEV;
1156 
1157 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1158 		return -ENODATA;
1159 
1160 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1161 				&target->thread.tm_ppr, 0, sizeof(u64));
1162 	return ret;
1163 }
1164 
1165 static int tm_ppr_set(struct task_struct *target,
1166 		      const struct user_regset *regset,
1167 		      unsigned int pos, unsigned int count,
1168 		      const void *kbuf, const void __user *ubuf)
1169 {
1170 	int ret;
1171 
1172 	if (!cpu_has_feature(CPU_FTR_TM))
1173 		return -ENODEV;
1174 
1175 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1176 		return -ENODATA;
1177 
1178 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1179 				&target->thread.tm_ppr, 0, sizeof(u64));
1180 	return ret;
1181 }
1182 
1183 static int tm_dscr_active(struct task_struct *target,
1184 			 const struct user_regset *regset)
1185 {
1186 	if (!cpu_has_feature(CPU_FTR_TM))
1187 		return -ENODEV;
1188 
1189 	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1190 		return regset->n;
1191 
1192 	return 0;
1193 }
1194 
1195 static int tm_dscr_get(struct task_struct *target,
1196 		      const struct user_regset *regset,
1197 		      unsigned int pos, unsigned int count,
1198 		      void *kbuf, void __user *ubuf)
1199 {
1200 	int ret;
1201 
1202 	if (!cpu_has_feature(CPU_FTR_TM))
1203 		return -ENODEV;
1204 
1205 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1206 		return -ENODATA;
1207 
1208 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1209 				&target->thread.tm_dscr, 0, sizeof(u64));
1210 	return ret;
1211 }
1212 
1213 static int tm_dscr_set(struct task_struct *target,
1214 		      const struct user_regset *regset,
1215 		      unsigned int pos, unsigned int count,
1216 		      const void *kbuf, const void __user *ubuf)
1217 {
1218 	int ret;
1219 
1220 	if (!cpu_has_feature(CPU_FTR_TM))
1221 		return -ENODEV;
1222 
1223 	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1224 		return -ENODATA;
1225 
1226 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1227 				&target->thread.tm_dscr, 0, sizeof(u64));
1228 	return ret;
1229 }
1230 #endif	/* CONFIG_PPC_TRANSACTIONAL_MEM */
1231 
1232 #ifdef CONFIG_PPC64
1233 static int ppr_get(struct task_struct *target,
1234 		      const struct user_regset *regset,
1235 		      unsigned int pos, unsigned int count,
1236 		      void *kbuf, void __user *ubuf)
1237 {
1238 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1239 				   &target->thread.regs->ppr, 0, sizeof(u64));
1240 }
1241 
1242 static int ppr_set(struct task_struct *target,
1243 		      const struct user_regset *regset,
1244 		      unsigned int pos, unsigned int count,
1245 		      const void *kbuf, const void __user *ubuf)
1246 {
1247 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1248 				  &target->thread.regs->ppr, 0, sizeof(u64));
1249 }
1250 
1251 static int dscr_get(struct task_struct *target,
1252 		      const struct user_regset *regset,
1253 		      unsigned int pos, unsigned int count,
1254 		      void *kbuf, void __user *ubuf)
1255 {
1256 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1257 				   &target->thread.dscr, 0, sizeof(u64));
1258 }
1259 static int dscr_set(struct task_struct *target,
1260 		      const struct user_regset *regset,
1261 		      unsigned int pos, unsigned int count,
1262 		      const void *kbuf, const void __user *ubuf)
1263 {
1264 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1265 				  &target->thread.dscr, 0, sizeof(u64));
1266 }
1267 #endif
1268 #ifdef CONFIG_PPC_BOOK3S_64
1269 static int tar_get(struct task_struct *target,
1270 		      const struct user_regset *regset,
1271 		      unsigned int pos, unsigned int count,
1272 		      void *kbuf, void __user *ubuf)
1273 {
1274 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1275 				   &target->thread.tar, 0, sizeof(u64));
1276 }
1277 static int tar_set(struct task_struct *target,
1278 		      const struct user_regset *regset,
1279 		      unsigned int pos, unsigned int count,
1280 		      const void *kbuf, const void __user *ubuf)
1281 {
1282 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1283 				  &target->thread.tar, 0, sizeof(u64));
1284 }
1285 
1286 static int ebb_active(struct task_struct *target,
1287 			 const struct user_regset *regset)
1288 {
1289 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1290 		return -ENODEV;
1291 
1292 	if (target->thread.used_ebb)
1293 		return regset->n;
1294 
1295 	return 0;
1296 }
1297 
1298 static int ebb_get(struct task_struct *target,
1299 		      const struct user_regset *regset,
1300 		      unsigned int pos, unsigned int count,
1301 		      void *kbuf, void __user *ubuf)
1302 {
1303 	/* Build tests */
1304 	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1305 	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1306 
1307 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1308 		return -ENODEV;
1309 
1310 	if (!target->thread.used_ebb)
1311 		return -ENODATA;
1312 
1313 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1314 			&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1315 }
1316 
1317 static int ebb_set(struct task_struct *target,
1318 		      const struct user_regset *regset,
1319 		      unsigned int pos, unsigned int count,
1320 		      const void *kbuf, const void __user *ubuf)
1321 {
1322 	int ret = 0;
1323 
1324 	/* Build tests */
1325 	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1326 	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1327 
1328 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1329 		return -ENODEV;
1330 
1331 	if (target->thread.used_ebb)
1332 		return -ENODATA;
1333 
1334 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1335 			&target->thread.ebbrr, 0, sizeof(unsigned long));
1336 
1337 	if (!ret)
1338 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1339 			&target->thread.ebbhr, sizeof(unsigned long),
1340 			2 * sizeof(unsigned long));
1341 
1342 	if (!ret)
1343 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1344 			&target->thread.bescr,
1345 			2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1346 
1347 	return ret;
1348 }
1349 static int pmu_active(struct task_struct *target,
1350 			 const struct user_regset *regset)
1351 {
1352 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1353 		return -ENODEV;
1354 
1355 	return regset->n;
1356 }
1357 
1358 static int pmu_get(struct task_struct *target,
1359 		      const struct user_regset *regset,
1360 		      unsigned int pos, unsigned int count,
1361 		      void *kbuf, void __user *ubuf)
1362 {
1363 	/* Build tests */
1364 	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1365 	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1366 	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1367 	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1368 
1369 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1370 		return -ENODEV;
1371 
1372 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1373 			&target->thread.siar, 0,
1374 			5 * sizeof(unsigned long));
1375 }
1376 
1377 static int pmu_set(struct task_struct *target,
1378 		      const struct user_regset *regset,
1379 		      unsigned int pos, unsigned int count,
1380 		      const void *kbuf, const void __user *ubuf)
1381 {
1382 	int ret = 0;
1383 
1384 	/* Build tests */
1385 	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1386 	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1387 	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1388 	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1389 
1390 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1391 		return -ENODEV;
1392 
1393 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1394 			&target->thread.siar, 0,
1395 			sizeof(unsigned long));
1396 
1397 	if (!ret)
1398 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1399 			&target->thread.sdar, sizeof(unsigned long),
1400 			2 * sizeof(unsigned long));
1401 
1402 	if (!ret)
1403 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1404 			&target->thread.sier, 2 * sizeof(unsigned long),
1405 			3 * sizeof(unsigned long));
1406 
1407 	if (!ret)
1408 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1409 			&target->thread.mmcr2, 3 * sizeof(unsigned long),
1410 			4 * sizeof(unsigned long));
1411 
1412 	if (!ret)
1413 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1414 			&target->thread.mmcr0, 4 * sizeof(unsigned long),
1415 			5 * sizeof(unsigned long));
1416 	return ret;
1417 }
1418 #endif
1419 
1420 #ifdef CONFIG_PPC_MEM_KEYS
1421 static int pkey_active(struct task_struct *target,
1422 		       const struct user_regset *regset)
1423 {
1424 	if (!arch_pkeys_enabled())
1425 		return -ENODEV;
1426 
1427 	return regset->n;
1428 }
1429 
1430 static int pkey_get(struct task_struct *target,
1431 		    const struct user_regset *regset,
1432 		    unsigned int pos, unsigned int count,
1433 		    void *kbuf, void __user *ubuf)
1434 {
1435 	BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1436 	BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1437 
1438 	if (!arch_pkeys_enabled())
1439 		return -ENODEV;
1440 
1441 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1442 				   &target->thread.amr, 0,
1443 				   ELF_NPKEY * sizeof(unsigned long));
1444 }
1445 
1446 static int pkey_set(struct task_struct *target,
1447 		      const struct user_regset *regset,
1448 		      unsigned int pos, unsigned int count,
1449 		      const void *kbuf, const void __user *ubuf)
1450 {
1451 	u64 new_amr;
1452 	int ret;
1453 
1454 	if (!arch_pkeys_enabled())
1455 		return -ENODEV;
1456 
1457 	/* Only the AMR can be set from userspace */
1458 	if (pos != 0 || count != sizeof(new_amr))
1459 		return -EINVAL;
1460 
1461 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1462 				 &new_amr, 0, sizeof(new_amr));
1463 	if (ret)
1464 		return ret;
1465 
1466 	/* UAMOR determines which bits of the AMR can be set from userspace. */
1467 	target->thread.amr = (new_amr & target->thread.uamor) |
1468 		(target->thread.amr & ~target->thread.uamor);
1469 
1470 	return 0;
1471 }
1472 #endif /* CONFIG_PPC_MEM_KEYS */
1473 
1474 /*
1475  * These are our native regset flavors.
1476  */
1477 enum powerpc_regset {
1478 	REGSET_GPR,
1479 	REGSET_FPR,
1480 #ifdef CONFIG_ALTIVEC
1481 	REGSET_VMX,
1482 #endif
1483 #ifdef CONFIG_VSX
1484 	REGSET_VSX,
1485 #endif
1486 #ifdef CONFIG_SPE
1487 	REGSET_SPE,
1488 #endif
1489 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1490 	REGSET_TM_CGPR,		/* TM checkpointed GPR registers */
1491 	REGSET_TM_CFPR,		/* TM checkpointed FPR registers */
1492 	REGSET_TM_CVMX,		/* TM checkpointed VMX registers */
1493 	REGSET_TM_CVSX,		/* TM checkpointed VSX registers */
1494 	REGSET_TM_SPR,		/* TM specific SPR registers */
1495 	REGSET_TM_CTAR,		/* TM checkpointed TAR register */
1496 	REGSET_TM_CPPR,		/* TM checkpointed PPR register */
1497 	REGSET_TM_CDSCR,	/* TM checkpointed DSCR register */
1498 #endif
1499 #ifdef CONFIG_PPC64
1500 	REGSET_PPR,		/* PPR register */
1501 	REGSET_DSCR,		/* DSCR register */
1502 #endif
1503 #ifdef CONFIG_PPC_BOOK3S_64
1504 	REGSET_TAR,		/* TAR register */
1505 	REGSET_EBB,		/* EBB registers */
1506 	REGSET_PMR,		/* Performance Monitor Registers */
1507 #endif
1508 #ifdef CONFIG_PPC_MEM_KEYS
1509 	REGSET_PKEY,		/* AMR register */
1510 #endif
1511 };
1512 
1513 static const struct user_regset native_regsets[] = {
1514 	[REGSET_GPR] = {
1515 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1516 		.size = sizeof(long), .align = sizeof(long),
1517 		.get = gpr_get, .set = gpr_set
1518 	},
1519 	[REGSET_FPR] = {
1520 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1521 		.size = sizeof(double), .align = sizeof(double),
1522 		.get = fpr_get, .set = fpr_set
1523 	},
1524 #ifdef CONFIG_ALTIVEC
1525 	[REGSET_VMX] = {
1526 		.core_note_type = NT_PPC_VMX, .n = 34,
1527 		.size = sizeof(vector128), .align = sizeof(vector128),
1528 		.active = vr_active, .get = vr_get, .set = vr_set
1529 	},
1530 #endif
1531 #ifdef CONFIG_VSX
1532 	[REGSET_VSX] = {
1533 		.core_note_type = NT_PPC_VSX, .n = 32,
1534 		.size = sizeof(double), .align = sizeof(double),
1535 		.active = vsr_active, .get = vsr_get, .set = vsr_set
1536 	},
1537 #endif
1538 #ifdef CONFIG_SPE
1539 	[REGSET_SPE] = {
1540 		.core_note_type = NT_PPC_SPE, .n = 35,
1541 		.size = sizeof(u32), .align = sizeof(u32),
1542 		.active = evr_active, .get = evr_get, .set = evr_set
1543 	},
1544 #endif
1545 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1546 	[REGSET_TM_CGPR] = {
1547 		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1548 		.size = sizeof(long), .align = sizeof(long),
1549 		.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1550 	},
1551 	[REGSET_TM_CFPR] = {
1552 		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1553 		.size = sizeof(double), .align = sizeof(double),
1554 		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1555 	},
1556 	[REGSET_TM_CVMX] = {
1557 		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1558 		.size = sizeof(vector128), .align = sizeof(vector128),
1559 		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1560 	},
1561 	[REGSET_TM_CVSX] = {
1562 		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1563 		.size = sizeof(double), .align = sizeof(double),
1564 		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1565 	},
1566 	[REGSET_TM_SPR] = {
1567 		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1568 		.size = sizeof(u64), .align = sizeof(u64),
1569 		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1570 	},
1571 	[REGSET_TM_CTAR] = {
1572 		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
1573 		.size = sizeof(u64), .align = sizeof(u64),
1574 		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1575 	},
1576 	[REGSET_TM_CPPR] = {
1577 		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
1578 		.size = sizeof(u64), .align = sizeof(u64),
1579 		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1580 	},
1581 	[REGSET_TM_CDSCR] = {
1582 		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1583 		.size = sizeof(u64), .align = sizeof(u64),
1584 		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1585 	},
1586 #endif
1587 #ifdef CONFIG_PPC64
1588 	[REGSET_PPR] = {
1589 		.core_note_type = NT_PPC_PPR, .n = 1,
1590 		.size = sizeof(u64), .align = sizeof(u64),
1591 		.get = ppr_get, .set = ppr_set
1592 	},
1593 	[REGSET_DSCR] = {
1594 		.core_note_type = NT_PPC_DSCR, .n = 1,
1595 		.size = sizeof(u64), .align = sizeof(u64),
1596 		.get = dscr_get, .set = dscr_set
1597 	},
1598 #endif
1599 #ifdef CONFIG_PPC_BOOK3S_64
1600 	[REGSET_TAR] = {
1601 		.core_note_type = NT_PPC_TAR, .n = 1,
1602 		.size = sizeof(u64), .align = sizeof(u64),
1603 		.get = tar_get, .set = tar_set
1604 	},
1605 	[REGSET_EBB] = {
1606 		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1607 		.size = sizeof(u64), .align = sizeof(u64),
1608 		.active = ebb_active, .get = ebb_get, .set = ebb_set
1609 	},
1610 	[REGSET_PMR] = {
1611 		.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1612 		.size = sizeof(u64), .align = sizeof(u64),
1613 		.active = pmu_active, .get = pmu_get, .set = pmu_set
1614 	},
1615 #endif
1616 #ifdef CONFIG_PPC_MEM_KEYS
1617 	[REGSET_PKEY] = {
1618 		.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1619 		.size = sizeof(u64), .align = sizeof(u64),
1620 		.active = pkey_active, .get = pkey_get, .set = pkey_set
1621 	},
1622 #endif
1623 };
1624 
1625 static const struct user_regset_view user_ppc_native_view = {
1626 	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1627 	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1628 };
1629 
1630 #include <linux/compat.h>
1631 
1632 static int gpr32_get_common(struct task_struct *target,
1633 		     const struct user_regset *regset,
1634 		     unsigned int pos, unsigned int count,
1635 			    void *kbuf, void __user *ubuf,
1636 			    unsigned long *regs)
1637 {
1638 	compat_ulong_t *k = kbuf;
1639 	compat_ulong_t __user *u = ubuf;
1640 	compat_ulong_t reg;
1641 
1642 	pos /= sizeof(reg);
1643 	count /= sizeof(reg);
1644 
1645 	if (kbuf)
1646 		for (; count > 0 && pos < PT_MSR; --count)
1647 			*k++ = regs[pos++];
1648 	else
1649 		for (; count > 0 && pos < PT_MSR; --count)
1650 			if (__put_user((compat_ulong_t) regs[pos++], u++))
1651 				return -EFAULT;
1652 
1653 	if (count > 0 && pos == PT_MSR) {
1654 		reg = get_user_msr(target);
1655 		if (kbuf)
1656 			*k++ = reg;
1657 		else if (__put_user(reg, u++))
1658 			return -EFAULT;
1659 		++pos;
1660 		--count;
1661 	}
1662 
1663 	if (kbuf)
1664 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
1665 			*k++ = regs[pos++];
1666 	else
1667 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
1668 			if (__put_user((compat_ulong_t) regs[pos++], u++))
1669 				return -EFAULT;
1670 
1671 	kbuf = k;
1672 	ubuf = u;
1673 	pos *= sizeof(reg);
1674 	count *= sizeof(reg);
1675 	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1676 					PT_REGS_COUNT * sizeof(reg), -1);
1677 }
1678 
1679 static int gpr32_set_common(struct task_struct *target,
1680 		     const struct user_regset *regset,
1681 		     unsigned int pos, unsigned int count,
1682 		     const void *kbuf, const void __user *ubuf,
1683 		     unsigned long *regs)
1684 {
1685 	const compat_ulong_t *k = kbuf;
1686 	const compat_ulong_t __user *u = ubuf;
1687 	compat_ulong_t reg;
1688 
1689 	pos /= sizeof(reg);
1690 	count /= sizeof(reg);
1691 
1692 	if (kbuf)
1693 		for (; count > 0 && pos < PT_MSR; --count)
1694 			regs[pos++] = *k++;
1695 	else
1696 		for (; count > 0 && pos < PT_MSR; --count) {
1697 			if (__get_user(reg, u++))
1698 				return -EFAULT;
1699 			regs[pos++] = reg;
1700 		}
1701 
1702 
1703 	if (count > 0 && pos == PT_MSR) {
1704 		if (kbuf)
1705 			reg = *k++;
1706 		else if (__get_user(reg, u++))
1707 			return -EFAULT;
1708 		set_user_msr(target, reg);
1709 		++pos;
1710 		--count;
1711 	}
1712 
1713 	if (kbuf) {
1714 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
1715 			regs[pos++] = *k++;
1716 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
1717 			++k;
1718 	} else {
1719 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
1720 			if (__get_user(reg, u++))
1721 				return -EFAULT;
1722 			regs[pos++] = reg;
1723 		}
1724 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
1725 			if (__get_user(reg, u++))
1726 				return -EFAULT;
1727 	}
1728 
1729 	if (count > 0 && pos == PT_TRAP) {
1730 		if (kbuf)
1731 			reg = *k++;
1732 		else if (__get_user(reg, u++))
1733 			return -EFAULT;
1734 		set_user_trap(target, reg);
1735 		++pos;
1736 		--count;
1737 	}
1738 
1739 	kbuf = k;
1740 	ubuf = u;
1741 	pos *= sizeof(reg);
1742 	count *= sizeof(reg);
1743 	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
1744 					 (PT_TRAP + 1) * sizeof(reg), -1);
1745 }
1746 
1747 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1748 static int tm_cgpr32_get(struct task_struct *target,
1749 		     const struct user_regset *regset,
1750 		     unsigned int pos, unsigned int count,
1751 		     void *kbuf, void __user *ubuf)
1752 {
1753 	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
1754 			&target->thread.ckpt_regs.gpr[0]);
1755 }
1756 
1757 static int tm_cgpr32_set(struct task_struct *target,
1758 		     const struct user_regset *regset,
1759 		     unsigned int pos, unsigned int count,
1760 		     const void *kbuf, const void __user *ubuf)
1761 {
1762 	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
1763 			&target->thread.ckpt_regs.gpr[0]);
1764 }
1765 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1766 
1767 static int gpr32_get(struct task_struct *target,
1768 		     const struct user_regset *regset,
1769 		     unsigned int pos, unsigned int count,
1770 		     void *kbuf, void __user *ubuf)
1771 {
1772 	int i;
1773 
1774 	if (target->thread.regs == NULL)
1775 		return -EIO;
1776 
1777 	if (!FULL_REGS(target->thread.regs)) {
1778 		/*
1779 		 * We have a partial register set.
1780 		 * Fill 14-31 with bogus values.
1781 		 */
1782 		for (i = 14; i < 32; i++)
1783 			target->thread.regs->gpr[i] = NV_REG_POISON;
1784 	}
1785 	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
1786 			&target->thread.regs->gpr[0]);
1787 }
1788 
1789 static int gpr32_set(struct task_struct *target,
1790 		     const struct user_regset *regset,
1791 		     unsigned int pos, unsigned int count,
1792 		     const void *kbuf, const void __user *ubuf)
1793 {
1794 	if (target->thread.regs == NULL)
1795 		return -EIO;
1796 
1797 	CHECK_FULL_REGS(target->thread.regs);
1798 	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
1799 			&target->thread.regs->gpr[0]);
1800 }
1801 
1802 /*
1803  * These are the regset flavors matching the CONFIG_PPC32 native set.
1804  */
1805 static const struct user_regset compat_regsets[] = {
1806 	[REGSET_GPR] = {
1807 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1808 		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
1809 		.get = gpr32_get, .set = gpr32_set
1810 	},
1811 	[REGSET_FPR] = {
1812 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1813 		.size = sizeof(double), .align = sizeof(double),
1814 		.get = fpr_get, .set = fpr_set
1815 	},
1816 #ifdef CONFIG_ALTIVEC
1817 	[REGSET_VMX] = {
1818 		.core_note_type = NT_PPC_VMX, .n = 34,
1819 		.size = sizeof(vector128), .align = sizeof(vector128),
1820 		.active = vr_active, .get = vr_get, .set = vr_set
1821 	},
1822 #endif
1823 #ifdef CONFIG_SPE
1824 	[REGSET_SPE] = {
1825 		.core_note_type = NT_PPC_SPE, .n = 35,
1826 		.size = sizeof(u32), .align = sizeof(u32),
1827 		.active = evr_active, .get = evr_get, .set = evr_set
1828 	},
1829 #endif
1830 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1831 	[REGSET_TM_CGPR] = {
1832 		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1833 		.size = sizeof(long), .align = sizeof(long),
1834 		.active = tm_cgpr_active,
1835 		.get = tm_cgpr32_get, .set = tm_cgpr32_set
1836 	},
1837 	[REGSET_TM_CFPR] = {
1838 		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1839 		.size = sizeof(double), .align = sizeof(double),
1840 		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1841 	},
1842 	[REGSET_TM_CVMX] = {
1843 		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1844 		.size = sizeof(vector128), .align = sizeof(vector128),
1845 		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1846 	},
1847 	[REGSET_TM_CVSX] = {
1848 		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1849 		.size = sizeof(double), .align = sizeof(double),
1850 		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1851 	},
1852 	[REGSET_TM_SPR] = {
1853 		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1854 		.size = sizeof(u64), .align = sizeof(u64),
1855 		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1856 	},
1857 	[REGSET_TM_CTAR] = {
1858 		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
1859 		.size = sizeof(u64), .align = sizeof(u64),
1860 		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1861 	},
1862 	[REGSET_TM_CPPR] = {
1863 		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
1864 		.size = sizeof(u64), .align = sizeof(u64),
1865 		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1866 	},
1867 	[REGSET_TM_CDSCR] = {
1868 		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1869 		.size = sizeof(u64), .align = sizeof(u64),
1870 		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1871 	},
1872 #endif
1873 #ifdef CONFIG_PPC64
1874 	[REGSET_PPR] = {
1875 		.core_note_type = NT_PPC_PPR, .n = 1,
1876 		.size = sizeof(u64), .align = sizeof(u64),
1877 		.get = ppr_get, .set = ppr_set
1878 	},
1879 	[REGSET_DSCR] = {
1880 		.core_note_type = NT_PPC_DSCR, .n = 1,
1881 		.size = sizeof(u64), .align = sizeof(u64),
1882 		.get = dscr_get, .set = dscr_set
1883 	},
1884 #endif
1885 #ifdef CONFIG_PPC_BOOK3S_64
1886 	[REGSET_TAR] = {
1887 		.core_note_type = NT_PPC_TAR, .n = 1,
1888 		.size = sizeof(u64), .align = sizeof(u64),
1889 		.get = tar_get, .set = tar_set
1890 	},
1891 	[REGSET_EBB] = {
1892 		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1893 		.size = sizeof(u64), .align = sizeof(u64),
1894 		.active = ebb_active, .get = ebb_get, .set = ebb_set
1895 	},
1896 #endif
1897 };
1898 
1899 static const struct user_regset_view user_ppc_compat_view = {
1900 	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
1901 	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
1902 };
1903 
1904 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1905 {
1906 	if (IS_ENABLED(CONFIG_PPC64) && test_tsk_thread_flag(task, TIF_32BIT))
1907 		return &user_ppc_compat_view;
1908 	return &user_ppc_native_view;
1909 }
1910 
1911 
1912 void user_enable_single_step(struct task_struct *task)
1913 {
1914 	struct pt_regs *regs = task->thread.regs;
1915 
1916 	if (regs != NULL) {
1917 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1918 		task->thread.debug.dbcr0 &= ~DBCR0_BT;
1919 		task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1920 		regs->msr |= MSR_DE;
1921 #else
1922 		regs->msr &= ~MSR_BE;
1923 		regs->msr |= MSR_SE;
1924 #endif
1925 	}
1926 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
1927 }
1928 
1929 void user_enable_block_step(struct task_struct *task)
1930 {
1931 	struct pt_regs *regs = task->thread.regs;
1932 
1933 	if (regs != NULL) {
1934 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1935 		task->thread.debug.dbcr0 &= ~DBCR0_IC;
1936 		task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
1937 		regs->msr |= MSR_DE;
1938 #else
1939 		regs->msr &= ~MSR_SE;
1940 		regs->msr |= MSR_BE;
1941 #endif
1942 	}
1943 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
1944 }
1945 
1946 void user_disable_single_step(struct task_struct *task)
1947 {
1948 	struct pt_regs *regs = task->thread.regs;
1949 
1950 	if (regs != NULL) {
1951 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1952 		/*
1953 		 * The logic to disable single stepping should be as
1954 		 * simple as turning off the Instruction Complete flag.
1955 		 * And, after doing so, if all debug flags are off, turn
1956 		 * off DBCR0(IDM) and MSR(DE) .... Torez
1957 		 */
1958 		task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
1959 		/*
1960 		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
1961 		 */
1962 		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1963 					task->thread.debug.dbcr1)) {
1964 			/*
1965 			 * All debug events were off.....
1966 			 */
1967 			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1968 			regs->msr &= ~MSR_DE;
1969 		}
1970 #else
1971 		regs->msr &= ~(MSR_SE | MSR_BE);
1972 #endif
1973 	}
1974 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1975 }
1976 
1977 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1978 void ptrace_triggered(struct perf_event *bp,
1979 		      struct perf_sample_data *data, struct pt_regs *regs)
1980 {
1981 	struct perf_event_attr attr;
1982 
1983 	/*
1984 	 * Disable the breakpoint request here since ptrace has defined a
1985 	 * one-shot behaviour for breakpoint exceptions in PPC64.
1986 	 * The SIGTRAP signal is generated automatically for us in do_dabr().
1987 	 * We don't have to do anything about that here
1988 	 */
1989 	attr = bp->attr;
1990 	attr.disabled = true;
1991 	modify_user_hw_breakpoint(bp, &attr);
1992 }
1993 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1994 
1995 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
1996 			       unsigned long data)
1997 {
1998 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1999 	int ret;
2000 	struct thread_struct *thread = &(task->thread);
2001 	struct perf_event *bp;
2002 	struct perf_event_attr attr;
2003 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2004 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2005 	bool set_bp = true;
2006 	struct arch_hw_breakpoint hw_brk;
2007 #endif
2008 
2009 	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2010 	 *  For embedded processors we support one DAC and no IAC's at the
2011 	 *  moment.
2012 	 */
2013 	if (addr > 0)
2014 		return -EINVAL;
2015 
2016 	/* The bottom 3 bits in dabr are flags */
2017 	if ((data & ~0x7UL) >= TASK_SIZE)
2018 		return -EIO;
2019 
2020 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2021 	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2022 	 *  It was assumed, on previous implementations, that 3 bits were
2023 	 *  passed together with the data address, fitting the design of the
2024 	 *  DABR register, as follows:
2025 	 *
2026 	 *  bit 0: Read flag
2027 	 *  bit 1: Write flag
2028 	 *  bit 2: Breakpoint translation
2029 	 *
2030 	 *  Thus, we use them here as so.
2031 	 */
2032 
2033 	/* Ensure breakpoint translation bit is set */
2034 	if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2035 		return -EIO;
2036 	hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2037 	hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2038 	hw_brk.len = DABR_MAX_LEN;
2039 	hw_brk.hw_len = DABR_MAX_LEN;
2040 	set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2041 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2042 	bp = thread->ptrace_bps[0];
2043 	if (!set_bp) {
2044 		if (bp) {
2045 			unregister_hw_breakpoint(bp);
2046 			thread->ptrace_bps[0] = NULL;
2047 		}
2048 		return 0;
2049 	}
2050 	if (bp) {
2051 		attr = bp->attr;
2052 		attr.bp_addr = hw_brk.address;
2053 		attr.bp_len = DABR_MAX_LEN;
2054 		arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2055 
2056 		/* Enable breakpoint */
2057 		attr.disabled = false;
2058 
2059 		ret =  modify_user_hw_breakpoint(bp, &attr);
2060 		if (ret) {
2061 			return ret;
2062 		}
2063 		thread->ptrace_bps[0] = bp;
2064 		thread->hw_brk = hw_brk;
2065 		return 0;
2066 	}
2067 
2068 	/* Create a new breakpoint request if one doesn't exist already */
2069 	hw_breakpoint_init(&attr);
2070 	attr.bp_addr = hw_brk.address;
2071 	attr.bp_len = DABR_MAX_LEN;
2072 	arch_bp_generic_fields(hw_brk.type,
2073 			       &attr.bp_type);
2074 
2075 	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2076 					       ptrace_triggered, NULL, task);
2077 	if (IS_ERR(bp)) {
2078 		thread->ptrace_bps[0] = NULL;
2079 		return PTR_ERR(bp);
2080 	}
2081 
2082 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2083 	if (set_bp && (!ppc_breakpoint_available()))
2084 		return -ENODEV;
2085 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2086 	task->thread.hw_brk = hw_brk;
2087 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2088 	/* As described above, it was assumed 3 bits were passed with the data
2089 	 *  address, but we will assume only the mode bits will be passed
2090 	 *  as to not cause alignment restrictions for DAC-based processors.
2091 	 */
2092 
2093 	/* DAC's hold the whole address without any mode flags */
2094 	task->thread.debug.dac1 = data & ~0x3UL;
2095 
2096 	if (task->thread.debug.dac1 == 0) {
2097 		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2098 		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2099 					task->thread.debug.dbcr1)) {
2100 			task->thread.regs->msr &= ~MSR_DE;
2101 			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2102 		}
2103 		return 0;
2104 	}
2105 
2106 	/* Read or Write bits must be set */
2107 
2108 	if (!(data & 0x3UL))
2109 		return -EINVAL;
2110 
2111 	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2112 	   register */
2113 	task->thread.debug.dbcr0 |= DBCR0_IDM;
2114 
2115 	/* Check for write and read flags and set DBCR0
2116 	   accordingly */
2117 	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2118 	if (data & 0x1UL)
2119 		dbcr_dac(task) |= DBCR_DAC1R;
2120 	if (data & 0x2UL)
2121 		dbcr_dac(task) |= DBCR_DAC1W;
2122 	task->thread.regs->msr |= MSR_DE;
2123 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2124 	return 0;
2125 }
2126 
2127 /*
2128  * Called by kernel/ptrace.c when detaching..
2129  *
2130  * Make sure single step bits etc are not set.
2131  */
2132 void ptrace_disable(struct task_struct *child)
2133 {
2134 	/* make sure the single step bit is not set. */
2135 	user_disable_single_step(child);
2136 }
2137 
2138 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2139 static long set_instruction_bp(struct task_struct *child,
2140 			      struct ppc_hw_breakpoint *bp_info)
2141 {
2142 	int slot;
2143 	int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2144 	int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2145 	int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2146 	int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2147 
2148 	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2149 		slot2_in_use = 1;
2150 	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2151 		slot4_in_use = 1;
2152 
2153 	if (bp_info->addr >= TASK_SIZE)
2154 		return -EIO;
2155 
2156 	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2157 
2158 		/* Make sure range is valid. */
2159 		if (bp_info->addr2 >= TASK_SIZE)
2160 			return -EIO;
2161 
2162 		/* We need a pair of IAC regsisters */
2163 		if ((!slot1_in_use) && (!slot2_in_use)) {
2164 			slot = 1;
2165 			child->thread.debug.iac1 = bp_info->addr;
2166 			child->thread.debug.iac2 = bp_info->addr2;
2167 			child->thread.debug.dbcr0 |= DBCR0_IAC1;
2168 			if (bp_info->addr_mode ==
2169 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2170 				dbcr_iac_range(child) |= DBCR_IAC12X;
2171 			else
2172 				dbcr_iac_range(child) |= DBCR_IAC12I;
2173 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2174 		} else if ((!slot3_in_use) && (!slot4_in_use)) {
2175 			slot = 3;
2176 			child->thread.debug.iac3 = bp_info->addr;
2177 			child->thread.debug.iac4 = bp_info->addr2;
2178 			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2179 			if (bp_info->addr_mode ==
2180 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2181 				dbcr_iac_range(child) |= DBCR_IAC34X;
2182 			else
2183 				dbcr_iac_range(child) |= DBCR_IAC34I;
2184 #endif
2185 		} else
2186 			return -ENOSPC;
2187 	} else {
2188 		/* We only need one.  If possible leave a pair free in
2189 		 * case a range is needed later
2190 		 */
2191 		if (!slot1_in_use) {
2192 			/*
2193 			 * Don't use iac1 if iac1-iac2 are free and either
2194 			 * iac3 or iac4 (but not both) are free
2195 			 */
2196 			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2197 				slot = 1;
2198 				child->thread.debug.iac1 = bp_info->addr;
2199 				child->thread.debug.dbcr0 |= DBCR0_IAC1;
2200 				goto out;
2201 			}
2202 		}
2203 		if (!slot2_in_use) {
2204 			slot = 2;
2205 			child->thread.debug.iac2 = bp_info->addr;
2206 			child->thread.debug.dbcr0 |= DBCR0_IAC2;
2207 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2208 		} else if (!slot3_in_use) {
2209 			slot = 3;
2210 			child->thread.debug.iac3 = bp_info->addr;
2211 			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2212 		} else if (!slot4_in_use) {
2213 			slot = 4;
2214 			child->thread.debug.iac4 = bp_info->addr;
2215 			child->thread.debug.dbcr0 |= DBCR0_IAC4;
2216 #endif
2217 		} else
2218 			return -ENOSPC;
2219 	}
2220 out:
2221 	child->thread.debug.dbcr0 |= DBCR0_IDM;
2222 	child->thread.regs->msr |= MSR_DE;
2223 
2224 	return slot;
2225 }
2226 
2227 static int del_instruction_bp(struct task_struct *child, int slot)
2228 {
2229 	switch (slot) {
2230 	case 1:
2231 		if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2232 			return -ENOENT;
2233 
2234 		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2235 			/* address range - clear slots 1 & 2 */
2236 			child->thread.debug.iac2 = 0;
2237 			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2238 		}
2239 		child->thread.debug.iac1 = 0;
2240 		child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2241 		break;
2242 	case 2:
2243 		if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2244 			return -ENOENT;
2245 
2246 		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2247 			/* used in a range */
2248 			return -EINVAL;
2249 		child->thread.debug.iac2 = 0;
2250 		child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2251 		break;
2252 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2253 	case 3:
2254 		if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2255 			return -ENOENT;
2256 
2257 		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2258 			/* address range - clear slots 3 & 4 */
2259 			child->thread.debug.iac4 = 0;
2260 			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2261 		}
2262 		child->thread.debug.iac3 = 0;
2263 		child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2264 		break;
2265 	case 4:
2266 		if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2267 			return -ENOENT;
2268 
2269 		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2270 			/* Used in a range */
2271 			return -EINVAL;
2272 		child->thread.debug.iac4 = 0;
2273 		child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2274 		break;
2275 #endif
2276 	default:
2277 		return -EINVAL;
2278 	}
2279 	return 0;
2280 }
2281 
2282 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2283 {
2284 	int byte_enable =
2285 		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2286 		& 0xf;
2287 	int condition_mode =
2288 		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2289 	int slot;
2290 
2291 	if (byte_enable && (condition_mode == 0))
2292 		return -EINVAL;
2293 
2294 	if (bp_info->addr >= TASK_SIZE)
2295 		return -EIO;
2296 
2297 	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2298 		slot = 1;
2299 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2300 			dbcr_dac(child) |= DBCR_DAC1R;
2301 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2302 			dbcr_dac(child) |= DBCR_DAC1W;
2303 		child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2304 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2305 		if (byte_enable) {
2306 			child->thread.debug.dvc1 =
2307 				(unsigned long)bp_info->condition_value;
2308 			child->thread.debug.dbcr2 |=
2309 				((byte_enable << DBCR2_DVC1BE_SHIFT) |
2310 				 (condition_mode << DBCR2_DVC1M_SHIFT));
2311 		}
2312 #endif
2313 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2314 	} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2315 		/* Both dac1 and dac2 are part of a range */
2316 		return -ENOSPC;
2317 #endif
2318 	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2319 		slot = 2;
2320 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2321 			dbcr_dac(child) |= DBCR_DAC2R;
2322 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2323 			dbcr_dac(child) |= DBCR_DAC2W;
2324 		child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2325 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2326 		if (byte_enable) {
2327 			child->thread.debug.dvc2 =
2328 				(unsigned long)bp_info->condition_value;
2329 			child->thread.debug.dbcr2 |=
2330 				((byte_enable << DBCR2_DVC2BE_SHIFT) |
2331 				 (condition_mode << DBCR2_DVC2M_SHIFT));
2332 		}
2333 #endif
2334 	} else
2335 		return -ENOSPC;
2336 	child->thread.debug.dbcr0 |= DBCR0_IDM;
2337 	child->thread.regs->msr |= MSR_DE;
2338 
2339 	return slot + 4;
2340 }
2341 
2342 static int del_dac(struct task_struct *child, int slot)
2343 {
2344 	if (slot == 1) {
2345 		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2346 			return -ENOENT;
2347 
2348 		child->thread.debug.dac1 = 0;
2349 		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2350 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2351 		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2352 			child->thread.debug.dac2 = 0;
2353 			child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2354 		}
2355 		child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2356 #endif
2357 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2358 		child->thread.debug.dvc1 = 0;
2359 #endif
2360 	} else if (slot == 2) {
2361 		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2362 			return -ENOENT;
2363 
2364 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2365 		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2366 			/* Part of a range */
2367 			return -EINVAL;
2368 		child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2369 #endif
2370 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2371 		child->thread.debug.dvc2 = 0;
2372 #endif
2373 		child->thread.debug.dac2 = 0;
2374 		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2375 	} else
2376 		return -EINVAL;
2377 
2378 	return 0;
2379 }
2380 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2381 
2382 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2383 static int set_dac_range(struct task_struct *child,
2384 			 struct ppc_hw_breakpoint *bp_info)
2385 {
2386 	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2387 
2388 	/* We don't allow range watchpoints to be used with DVC */
2389 	if (bp_info->condition_mode)
2390 		return -EINVAL;
2391 
2392 	/*
2393 	 * Best effort to verify the address range.  The user/supervisor bits
2394 	 * prevent trapping in kernel space, but let's fail on an obvious bad
2395 	 * range.  The simple test on the mask is not fool-proof, and any
2396 	 * exclusive range will spill over into kernel space.
2397 	 */
2398 	if (bp_info->addr >= TASK_SIZE)
2399 		return -EIO;
2400 	if (mode == PPC_BREAKPOINT_MODE_MASK) {
2401 		/*
2402 		 * dac2 is a bitmask.  Don't allow a mask that makes a
2403 		 * kernel space address from a valid dac1 value
2404 		 */
2405 		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2406 			return -EIO;
2407 	} else {
2408 		/*
2409 		 * For range breakpoints, addr2 must also be a valid address
2410 		 */
2411 		if (bp_info->addr2 >= TASK_SIZE)
2412 			return -EIO;
2413 	}
2414 
2415 	if (child->thread.debug.dbcr0 &
2416 	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2417 		return -ENOSPC;
2418 
2419 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2420 		child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2421 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2422 		child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2423 	child->thread.debug.dac1 = bp_info->addr;
2424 	child->thread.debug.dac2 = bp_info->addr2;
2425 	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2426 		child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2427 	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2428 		child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2429 	else	/* PPC_BREAKPOINT_MODE_MASK */
2430 		child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2431 	child->thread.regs->msr |= MSR_DE;
2432 
2433 	return 5;
2434 }
2435 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2436 
2437 static long ppc_set_hwdebug(struct task_struct *child,
2438 		     struct ppc_hw_breakpoint *bp_info)
2439 {
2440 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2441 	int len = 0;
2442 	struct thread_struct *thread = &(child->thread);
2443 	struct perf_event *bp;
2444 	struct perf_event_attr attr;
2445 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2446 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2447 	struct arch_hw_breakpoint brk;
2448 #endif
2449 
2450 	if (bp_info->version != 1)
2451 		return -ENOTSUPP;
2452 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2453 	/*
2454 	 * Check for invalid flags and combinations
2455 	 */
2456 	if ((bp_info->trigger_type == 0) ||
2457 	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2458 				       PPC_BREAKPOINT_TRIGGER_RW)) ||
2459 	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2460 	    (bp_info->condition_mode &
2461 	     ~(PPC_BREAKPOINT_CONDITION_MODE |
2462 	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
2463 		return -EINVAL;
2464 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2465 	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2466 		return -EINVAL;
2467 #endif
2468 
2469 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2470 		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2471 		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2472 			return -EINVAL;
2473 		return set_instruction_bp(child, bp_info);
2474 	}
2475 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2476 		return set_dac(child, bp_info);
2477 
2478 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2479 	return set_dac_range(child, bp_info);
2480 #else
2481 	return -EINVAL;
2482 #endif
2483 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2484 	/*
2485 	 * We only support one data breakpoint
2486 	 */
2487 	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2488 	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2489 	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2490 		return -EINVAL;
2491 
2492 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
2493 		return -EIO;
2494 
2495 	brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2496 	brk.type = HW_BRK_TYPE_TRANSLATE;
2497 	brk.len = DABR_MAX_LEN;
2498 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2499 		brk.type |= HW_BRK_TYPE_READ;
2500 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2501 		brk.type |= HW_BRK_TYPE_WRITE;
2502 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2503 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2504 		len = bp_info->addr2 - bp_info->addr;
2505 	else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2506 		len = 1;
2507 	else
2508 		return -EINVAL;
2509 	bp = thread->ptrace_bps[0];
2510 	if (bp)
2511 		return -ENOSPC;
2512 
2513 	/* Create a new breakpoint request if one doesn't exist already */
2514 	hw_breakpoint_init(&attr);
2515 	attr.bp_addr = (unsigned long)bp_info->addr;
2516 	attr.bp_len = len;
2517 	arch_bp_generic_fields(brk.type, &attr.bp_type);
2518 
2519 	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2520 					       ptrace_triggered, NULL, child);
2521 	if (IS_ERR(bp)) {
2522 		thread->ptrace_bps[0] = NULL;
2523 		return PTR_ERR(bp);
2524 	}
2525 
2526 	return 1;
2527 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2528 
2529 	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2530 		return -EINVAL;
2531 
2532 	if (child->thread.hw_brk.address)
2533 		return -ENOSPC;
2534 
2535 	if (!ppc_breakpoint_available())
2536 		return -ENODEV;
2537 
2538 	child->thread.hw_brk = brk;
2539 
2540 	return 1;
2541 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2542 }
2543 
2544 static long ppc_del_hwdebug(struct task_struct *child, long data)
2545 {
2546 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2547 	int ret = 0;
2548 	struct thread_struct *thread = &(child->thread);
2549 	struct perf_event *bp;
2550 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2551 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2552 	int rc;
2553 
2554 	if (data <= 4)
2555 		rc = del_instruction_bp(child, (int)data);
2556 	else
2557 		rc = del_dac(child, (int)data - 4);
2558 
2559 	if (!rc) {
2560 		if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2561 					child->thread.debug.dbcr1)) {
2562 			child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2563 			child->thread.regs->msr &= ~MSR_DE;
2564 		}
2565 	}
2566 	return rc;
2567 #else
2568 	if (data != 1)
2569 		return -EINVAL;
2570 
2571 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2572 	bp = thread->ptrace_bps[0];
2573 	if (bp) {
2574 		unregister_hw_breakpoint(bp);
2575 		thread->ptrace_bps[0] = NULL;
2576 	} else
2577 		ret = -ENOENT;
2578 	return ret;
2579 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2580 	if (child->thread.hw_brk.address == 0)
2581 		return -ENOENT;
2582 
2583 	child->thread.hw_brk.address = 0;
2584 	child->thread.hw_brk.type = 0;
2585 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2586 
2587 	return 0;
2588 #endif
2589 }
2590 
2591 long arch_ptrace(struct task_struct *child, long request,
2592 		 unsigned long addr, unsigned long data)
2593 {
2594 	int ret = -EPERM;
2595 	void __user *datavp = (void __user *) data;
2596 	unsigned long __user *datalp = datavp;
2597 
2598 	switch (request) {
2599 	/* read the word at location addr in the USER area. */
2600 	case PTRACE_PEEKUSR: {
2601 		unsigned long index, tmp;
2602 
2603 		ret = -EIO;
2604 		/* convert to index and check */
2605 #ifdef CONFIG_PPC32
2606 		index = addr >> 2;
2607 		if ((addr & 3) || (index > PT_FPSCR)
2608 		    || (child->thread.regs == NULL))
2609 #else
2610 		index = addr >> 3;
2611 		if ((addr & 7) || (index > PT_FPSCR))
2612 #endif
2613 			break;
2614 
2615 		CHECK_FULL_REGS(child->thread.regs);
2616 		if (index < PT_FPR0) {
2617 			ret = ptrace_get_reg(child, (int) index, &tmp);
2618 			if (ret)
2619 				break;
2620 		} else {
2621 			unsigned int fpidx = index - PT_FPR0;
2622 
2623 			flush_fp_to_thread(child);
2624 			if (fpidx < (PT_FPSCR - PT_FPR0))
2625 				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
2626 				       sizeof(long));
2627 			else
2628 				tmp = child->thread.fp_state.fpscr;
2629 		}
2630 		ret = put_user(tmp, datalp);
2631 		break;
2632 	}
2633 
2634 	/* write the word at location addr in the USER area */
2635 	case PTRACE_POKEUSR: {
2636 		unsigned long index;
2637 
2638 		ret = -EIO;
2639 		/* convert to index and check */
2640 #ifdef CONFIG_PPC32
2641 		index = addr >> 2;
2642 		if ((addr & 3) || (index > PT_FPSCR)
2643 		    || (child->thread.regs == NULL))
2644 #else
2645 		index = addr >> 3;
2646 		if ((addr & 7) || (index > PT_FPSCR))
2647 #endif
2648 			break;
2649 
2650 		CHECK_FULL_REGS(child->thread.regs);
2651 		if (index < PT_FPR0) {
2652 			ret = ptrace_put_reg(child, index, data);
2653 		} else {
2654 			unsigned int fpidx = index - PT_FPR0;
2655 
2656 			flush_fp_to_thread(child);
2657 			if (fpidx < (PT_FPSCR - PT_FPR0))
2658 				memcpy(&child->thread.TS_FPR(fpidx), &data,
2659 				       sizeof(long));
2660 			else
2661 				child->thread.fp_state.fpscr = data;
2662 			ret = 0;
2663 		}
2664 		break;
2665 	}
2666 
2667 	case PPC_PTRACE_GETHWDBGINFO: {
2668 		struct ppc_debug_info dbginfo;
2669 
2670 		dbginfo.version = 1;
2671 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2672 		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2673 		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
2674 		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
2675 		dbginfo.data_bp_alignment = 4;
2676 		dbginfo.sizeof_condition = 4;
2677 		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
2678 				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
2679 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2680 		dbginfo.features |=
2681 				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
2682 				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
2683 #endif
2684 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
2685 		dbginfo.num_instruction_bps = 0;
2686 		if (ppc_breakpoint_available())
2687 			dbginfo.num_data_bps = 1;
2688 		else
2689 			dbginfo.num_data_bps = 0;
2690 		dbginfo.num_condition_regs = 0;
2691 		dbginfo.data_bp_alignment = sizeof(long);
2692 		dbginfo.sizeof_condition = 0;
2693 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2694 		dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
2695 		if (dawr_enabled())
2696 			dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
2697 #else
2698 		dbginfo.features = 0;
2699 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2700 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2701 
2702 		if (copy_to_user(datavp, &dbginfo,
2703 				 sizeof(struct ppc_debug_info)))
2704 			return -EFAULT;
2705 		return 0;
2706 	}
2707 
2708 	case PPC_PTRACE_SETHWDEBUG: {
2709 		struct ppc_hw_breakpoint bp_info;
2710 
2711 		if (copy_from_user(&bp_info, datavp,
2712 				   sizeof(struct ppc_hw_breakpoint)))
2713 			return -EFAULT;
2714 		return ppc_set_hwdebug(child, &bp_info);
2715 	}
2716 
2717 	case PPC_PTRACE_DELHWDEBUG: {
2718 		ret = ppc_del_hwdebug(child, data);
2719 		break;
2720 	}
2721 
2722 	case PTRACE_GET_DEBUGREG: {
2723 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2724 		unsigned long dabr_fake;
2725 #endif
2726 		ret = -EINVAL;
2727 		/* We only support one DABR and no IABRS at the moment */
2728 		if (addr > 0)
2729 			break;
2730 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2731 		ret = put_user(child->thread.debug.dac1, datalp);
2732 #else
2733 		dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
2734 			     (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
2735 		ret = put_user(dabr_fake, datalp);
2736 #endif
2737 		break;
2738 	}
2739 
2740 	case PTRACE_SET_DEBUGREG:
2741 		ret = ptrace_set_debugreg(child, addr, data);
2742 		break;
2743 
2744 #ifdef CONFIG_PPC64
2745 	case PTRACE_GETREGS64:
2746 #endif
2747 	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
2748 		return copy_regset_to_user(child, &user_ppc_native_view,
2749 					   REGSET_GPR,
2750 					   0, sizeof(struct user_pt_regs),
2751 					   datavp);
2752 
2753 #ifdef CONFIG_PPC64
2754 	case PTRACE_SETREGS64:
2755 #endif
2756 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
2757 		return copy_regset_from_user(child, &user_ppc_native_view,
2758 					     REGSET_GPR,
2759 					     0, sizeof(struct user_pt_regs),
2760 					     datavp);
2761 
2762 	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
2763 		return copy_regset_to_user(child, &user_ppc_native_view,
2764 					   REGSET_FPR,
2765 					   0, sizeof(elf_fpregset_t),
2766 					   datavp);
2767 
2768 	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
2769 		return copy_regset_from_user(child, &user_ppc_native_view,
2770 					     REGSET_FPR,
2771 					     0, sizeof(elf_fpregset_t),
2772 					     datavp);
2773 
2774 #ifdef CONFIG_ALTIVEC
2775 	case PTRACE_GETVRREGS:
2776 		return copy_regset_to_user(child, &user_ppc_native_view,
2777 					   REGSET_VMX,
2778 					   0, (33 * sizeof(vector128) +
2779 					       sizeof(u32)),
2780 					   datavp);
2781 
2782 	case PTRACE_SETVRREGS:
2783 		return copy_regset_from_user(child, &user_ppc_native_view,
2784 					     REGSET_VMX,
2785 					     0, (33 * sizeof(vector128) +
2786 						 sizeof(u32)),
2787 					     datavp);
2788 #endif
2789 #ifdef CONFIG_VSX
2790 	case PTRACE_GETVSRREGS:
2791 		return copy_regset_to_user(child, &user_ppc_native_view,
2792 					   REGSET_VSX,
2793 					   0, 32 * sizeof(double),
2794 					   datavp);
2795 
2796 	case PTRACE_SETVSRREGS:
2797 		return copy_regset_from_user(child, &user_ppc_native_view,
2798 					     REGSET_VSX,
2799 					     0, 32 * sizeof(double),
2800 					     datavp);
2801 #endif
2802 #ifdef CONFIG_SPE
2803 	case PTRACE_GETEVRREGS:
2804 		/* Get the child spe register state. */
2805 		return copy_regset_to_user(child, &user_ppc_native_view,
2806 					   REGSET_SPE, 0, 35 * sizeof(u32),
2807 					   datavp);
2808 
2809 	case PTRACE_SETEVRREGS:
2810 		/* Set the child spe register state. */
2811 		return copy_regset_from_user(child, &user_ppc_native_view,
2812 					     REGSET_SPE, 0, 35 * sizeof(u32),
2813 					     datavp);
2814 #endif
2815 
2816 	default:
2817 		ret = ptrace_request(child, request, addr, data);
2818 		break;
2819 	}
2820 	return ret;
2821 }
2822 
2823 #ifdef CONFIG_SECCOMP
2824 static int do_seccomp(struct pt_regs *regs)
2825 {
2826 	if (!test_thread_flag(TIF_SECCOMP))
2827 		return 0;
2828 
2829 	/*
2830 	 * The ABI we present to seccomp tracers is that r3 contains
2831 	 * the syscall return value and orig_gpr3 contains the first
2832 	 * syscall parameter. This is different to the ptrace ABI where
2833 	 * both r3 and orig_gpr3 contain the first syscall parameter.
2834 	 */
2835 	regs->gpr[3] = -ENOSYS;
2836 
2837 	/*
2838 	 * We use the __ version here because we have already checked
2839 	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
2840 	 * have already loaded -ENOSYS into r3, or seccomp has put
2841 	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
2842 	 */
2843 	if (__secure_computing(NULL))
2844 		return -1;
2845 
2846 	/*
2847 	 * The syscall was allowed by seccomp, restore the register
2848 	 * state to what audit expects.
2849 	 * Note that we use orig_gpr3, which means a seccomp tracer can
2850 	 * modify the first syscall parameter (in orig_gpr3) and also
2851 	 * allow the syscall to proceed.
2852 	 */
2853 	regs->gpr[3] = regs->orig_gpr3;
2854 
2855 	return 0;
2856 }
2857 #else
2858 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
2859 #endif /* CONFIG_SECCOMP */
2860 
2861 /**
2862  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
2863  * @regs: the pt_regs of the task to trace (current)
2864  *
2865  * Performs various types of tracing on syscall entry. This includes seccomp,
2866  * ptrace, syscall tracepoints and audit.
2867  *
2868  * The pt_regs are potentially visible to userspace via ptrace, so their
2869  * contents is ABI.
2870  *
2871  * One or more of the tracers may modify the contents of pt_regs, in particular
2872  * to modify arguments or even the syscall number itself.
2873  *
2874  * It's also possible that a tracer can choose to reject the system call. In
2875  * that case this function will return an illegal syscall number, and will put
2876  * an appropriate return value in regs->r3.
2877  *
2878  * Return: the (possibly changed) syscall number.
2879  */
2880 long do_syscall_trace_enter(struct pt_regs *regs)
2881 {
2882 	u32 flags;
2883 
2884 	user_exit();
2885 
2886 	flags = READ_ONCE(current_thread_info()->flags) &
2887 		(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
2888 
2889 	if (flags) {
2890 		int rc = tracehook_report_syscall_entry(regs);
2891 
2892 		if (unlikely(flags & _TIF_SYSCALL_EMU)) {
2893 			/*
2894 			 * A nonzero return code from
2895 			 * tracehook_report_syscall_entry() tells us to prevent
2896 			 * the syscall execution, but we are not going to
2897 			 * execute it anyway.
2898 			 *
2899 			 * Returning -1 will skip the syscall execution. We want
2900 			 * to avoid clobbering any registers, so we don't goto
2901 			 * the skip label below.
2902 			 */
2903 			return -1;
2904 		}
2905 
2906 		if (rc) {
2907 			/*
2908 			 * The tracer decided to abort the syscall. Note that
2909 			 * the tracer may also just change regs->gpr[0] to an
2910 			 * invalid syscall number, that is handled below on the
2911 			 * exit path.
2912 			 */
2913 			goto skip;
2914 		}
2915 	}
2916 
2917 	/* Run seccomp after ptrace; allow it to set gpr[3]. */
2918 	if (do_seccomp(regs))
2919 		return -1;
2920 
2921 	/* Avoid trace and audit when syscall is invalid. */
2922 	if (regs->gpr[0] >= NR_syscalls)
2923 		goto skip;
2924 
2925 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2926 		trace_sys_enter(regs, regs->gpr[0]);
2927 
2928 	if (!is_32bit_task())
2929 		audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
2930 				    regs->gpr[5], regs->gpr[6]);
2931 	else
2932 		audit_syscall_entry(regs->gpr[0],
2933 				    regs->gpr[3] & 0xffffffff,
2934 				    regs->gpr[4] & 0xffffffff,
2935 				    regs->gpr[5] & 0xffffffff,
2936 				    regs->gpr[6] & 0xffffffff);
2937 
2938 	/* Return the possibly modified but valid syscall number */
2939 	return regs->gpr[0];
2940 
2941 skip:
2942 	/*
2943 	 * If we are aborting explicitly, or if the syscall number is
2944 	 * now invalid, set the return value to -ENOSYS.
2945 	 */
2946 	regs->gpr[3] = -ENOSYS;
2947 	return -1;
2948 }
2949 
2950 void do_syscall_trace_leave(struct pt_regs *regs)
2951 {
2952 	int step;
2953 
2954 	audit_syscall_exit(regs);
2955 
2956 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2957 		trace_sys_exit(regs, regs->result);
2958 
2959 	step = test_thread_flag(TIF_SINGLESTEP);
2960 	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
2961 		tracehook_report_syscall_exit(regs, step);
2962 
2963 	user_enter();
2964 }
2965 
2966 void __init pt_regs_check(void);
2967 
2968 /*
2969  * Dummy function, its purpose is to break the build if struct pt_regs and
2970  * struct user_pt_regs don't match.
2971  */
2972 void __init pt_regs_check(void)
2973 {
2974 	BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
2975 		     offsetof(struct user_pt_regs, gpr));
2976 	BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
2977 		     offsetof(struct user_pt_regs, nip));
2978 	BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
2979 		     offsetof(struct user_pt_regs, msr));
2980 	BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
2981 		     offsetof(struct user_pt_regs, msr));
2982 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
2983 		     offsetof(struct user_pt_regs, orig_gpr3));
2984 	BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
2985 		     offsetof(struct user_pt_regs, ctr));
2986 	BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
2987 		     offsetof(struct user_pt_regs, link));
2988 	BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
2989 		     offsetof(struct user_pt_regs, xer));
2990 	BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
2991 		     offsetof(struct user_pt_regs, ccr));
2992 #ifdef __powerpc64__
2993 	BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
2994 		     offsetof(struct user_pt_regs, softe));
2995 #else
2996 	BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
2997 		     offsetof(struct user_pt_regs, mq));
2998 #endif
2999 	BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
3000 		     offsetof(struct user_pt_regs, trap));
3001 	BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
3002 		     offsetof(struct user_pt_regs, dar));
3003 	BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
3004 		     offsetof(struct user_pt_regs, dsisr));
3005 	BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
3006 		     offsetof(struct user_pt_regs, result));
3007 
3008 	BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
3009 
3010 	// Now check that the pt_regs offsets match the uapi #defines
3011 	#define CHECK_REG(_pt, _reg) \
3012 		BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
3013 				     sizeof(unsigned long)));
3014 
3015 	CHECK_REG(PT_R0,  gpr[0]);
3016 	CHECK_REG(PT_R1,  gpr[1]);
3017 	CHECK_REG(PT_R2,  gpr[2]);
3018 	CHECK_REG(PT_R3,  gpr[3]);
3019 	CHECK_REG(PT_R4,  gpr[4]);
3020 	CHECK_REG(PT_R5,  gpr[5]);
3021 	CHECK_REG(PT_R6,  gpr[6]);
3022 	CHECK_REG(PT_R7,  gpr[7]);
3023 	CHECK_REG(PT_R8,  gpr[8]);
3024 	CHECK_REG(PT_R9,  gpr[9]);
3025 	CHECK_REG(PT_R10, gpr[10]);
3026 	CHECK_REG(PT_R11, gpr[11]);
3027 	CHECK_REG(PT_R12, gpr[12]);
3028 	CHECK_REG(PT_R13, gpr[13]);
3029 	CHECK_REG(PT_R14, gpr[14]);
3030 	CHECK_REG(PT_R15, gpr[15]);
3031 	CHECK_REG(PT_R16, gpr[16]);
3032 	CHECK_REG(PT_R17, gpr[17]);
3033 	CHECK_REG(PT_R18, gpr[18]);
3034 	CHECK_REG(PT_R19, gpr[19]);
3035 	CHECK_REG(PT_R20, gpr[20]);
3036 	CHECK_REG(PT_R21, gpr[21]);
3037 	CHECK_REG(PT_R22, gpr[22]);
3038 	CHECK_REG(PT_R23, gpr[23]);
3039 	CHECK_REG(PT_R24, gpr[24]);
3040 	CHECK_REG(PT_R25, gpr[25]);
3041 	CHECK_REG(PT_R26, gpr[26]);
3042 	CHECK_REG(PT_R27, gpr[27]);
3043 	CHECK_REG(PT_R28, gpr[28]);
3044 	CHECK_REG(PT_R29, gpr[29]);
3045 	CHECK_REG(PT_R30, gpr[30]);
3046 	CHECK_REG(PT_R31, gpr[31]);
3047 	CHECK_REG(PT_NIP, nip);
3048 	CHECK_REG(PT_MSR, msr);
3049 	CHECK_REG(PT_ORIG_R3, orig_gpr3);
3050 	CHECK_REG(PT_CTR, ctr);
3051 	CHECK_REG(PT_LNK, link);
3052 	CHECK_REG(PT_XER, xer);
3053 	CHECK_REG(PT_CCR, ccr);
3054 #ifdef CONFIG_PPC64
3055 	CHECK_REG(PT_SOFTE, softe);
3056 #else
3057 	CHECK_REG(PT_MQ, mq);
3058 #endif
3059 	CHECK_REG(PT_TRAP, trap);
3060 	CHECK_REG(PT_DAR, dar);
3061 	CHECK_REG(PT_DSISR, dsisr);
3062 	CHECK_REG(PT_RESULT, result);
3063 	#undef CHECK_REG
3064 
3065 	BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
3066 
3067 	/*
3068 	 * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
3069 	 * real registers.
3070 	 */
3071 	BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
3072 }
3073