1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17 
18 #include <linux/regset.h>
19 #include <linux/tracehook.h>
20 #include <linux/elf.h>
21 #include <linux/audit.h>
22 #include <linux/hw_breakpoint.h>
23 #include <linux/context_tracking.h>
24 #include <linux/nospec.h>
25 #include <linux/syscalls.h>
26 #include <linux/pkeys.h>
27 
28 #include <asm/switch_to.h>
29 #include <asm/asm-prototypes.h>
30 #include <asm/debug.h>
31 
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/syscalls.h>
34 
35 #include "ptrace-decl.h"
36 
37 struct pt_regs_offset {
38 	const char *name;
39 	int offset;
40 };
41 
42 #define STR(s)	#s			/* convert to string */
43 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
44 #define GPR_OFFSET_NAME(num)	\
45 	{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
46 	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
47 #define REG_OFFSET_END {.name = NULL, .offset = 0}
48 
49 static const struct pt_regs_offset regoffset_table[] = {
50 	GPR_OFFSET_NAME(0),
51 	GPR_OFFSET_NAME(1),
52 	GPR_OFFSET_NAME(2),
53 	GPR_OFFSET_NAME(3),
54 	GPR_OFFSET_NAME(4),
55 	GPR_OFFSET_NAME(5),
56 	GPR_OFFSET_NAME(6),
57 	GPR_OFFSET_NAME(7),
58 	GPR_OFFSET_NAME(8),
59 	GPR_OFFSET_NAME(9),
60 	GPR_OFFSET_NAME(10),
61 	GPR_OFFSET_NAME(11),
62 	GPR_OFFSET_NAME(12),
63 	GPR_OFFSET_NAME(13),
64 	GPR_OFFSET_NAME(14),
65 	GPR_OFFSET_NAME(15),
66 	GPR_OFFSET_NAME(16),
67 	GPR_OFFSET_NAME(17),
68 	GPR_OFFSET_NAME(18),
69 	GPR_OFFSET_NAME(19),
70 	GPR_OFFSET_NAME(20),
71 	GPR_OFFSET_NAME(21),
72 	GPR_OFFSET_NAME(22),
73 	GPR_OFFSET_NAME(23),
74 	GPR_OFFSET_NAME(24),
75 	GPR_OFFSET_NAME(25),
76 	GPR_OFFSET_NAME(26),
77 	GPR_OFFSET_NAME(27),
78 	GPR_OFFSET_NAME(28),
79 	GPR_OFFSET_NAME(29),
80 	GPR_OFFSET_NAME(30),
81 	GPR_OFFSET_NAME(31),
82 	REG_OFFSET_NAME(nip),
83 	REG_OFFSET_NAME(msr),
84 	REG_OFFSET_NAME(ctr),
85 	REG_OFFSET_NAME(link),
86 	REG_OFFSET_NAME(xer),
87 	REG_OFFSET_NAME(ccr),
88 #ifdef CONFIG_PPC64
89 	REG_OFFSET_NAME(softe),
90 #else
91 	REG_OFFSET_NAME(mq),
92 #endif
93 	REG_OFFSET_NAME(trap),
94 	REG_OFFSET_NAME(dar),
95 	REG_OFFSET_NAME(dsisr),
96 	REG_OFFSET_END,
97 };
98 
99 /**
100  * regs_query_register_offset() - query register offset from its name
101  * @name:	the name of a register
102  *
103  * regs_query_register_offset() returns the offset of a register in struct
104  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
105  */
106 int regs_query_register_offset(const char *name)
107 {
108 	const struct pt_regs_offset *roff;
109 	for (roff = regoffset_table; roff->name != NULL; roff++)
110 		if (!strcmp(roff->name, name))
111 			return roff->offset;
112 	return -EINVAL;
113 }
114 
115 /**
116  * regs_query_register_name() - query register name from its offset
117  * @offset:	the offset of a register in struct pt_regs.
118  *
119  * regs_query_register_name() returns the name of a register from its
120  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
121  */
122 const char *regs_query_register_name(unsigned int offset)
123 {
124 	const struct pt_regs_offset *roff;
125 	for (roff = regoffset_table; roff->name != NULL; roff++)
126 		if (roff->offset == offset)
127 			return roff->name;
128 	return NULL;
129 }
130 
131 /*
132  * does not yet catch signals sent when the child dies.
133  * in exit.c or in signal.c.
134  */
135 
136 static unsigned long get_user_msr(struct task_struct *task)
137 {
138 	return task->thread.regs->msr | task->thread.fpexc_mode;
139 }
140 
141 static int set_user_msr(struct task_struct *task, unsigned long msr)
142 {
143 	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
144 	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
145 	return 0;
146 }
147 
148 #ifdef CONFIG_PPC64
149 static int get_user_dscr(struct task_struct *task, unsigned long *data)
150 {
151 	*data = task->thread.dscr;
152 	return 0;
153 }
154 
155 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
156 {
157 	task->thread.dscr = dscr;
158 	task->thread.dscr_inherit = 1;
159 	return 0;
160 }
161 #else
162 static int get_user_dscr(struct task_struct *task, unsigned long *data)
163 {
164 	return -EIO;
165 }
166 
167 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
168 {
169 	return -EIO;
170 }
171 #endif
172 
173 /*
174  * We prevent mucking around with the reserved area of trap
175  * which are used internally by the kernel.
176  */
177 static int set_user_trap(struct task_struct *task, unsigned long trap)
178 {
179 	task->thread.regs->trap = trap & 0xfff0;
180 	return 0;
181 }
182 
183 /*
184  * Get contents of register REGNO in task TASK.
185  */
186 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
187 {
188 	unsigned int regs_max;
189 
190 	if ((task->thread.regs == NULL) || !data)
191 		return -EIO;
192 
193 	if (regno == PT_MSR) {
194 		*data = get_user_msr(task);
195 		return 0;
196 	}
197 
198 	if (regno == PT_DSCR)
199 		return get_user_dscr(task, data);
200 
201 	/*
202 	 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
203 	 * no more used as a flag, lets force usr to alway see the softe value as 1
204 	 * which means interrupts are not soft disabled.
205 	 */
206 	if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
207 		*data = 1;
208 		return  0;
209 	}
210 
211 	regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
212 	if (regno < regs_max) {
213 		regno = array_index_nospec(regno, regs_max);
214 		*data = ((unsigned long *)task->thread.regs)[regno];
215 		return 0;
216 	}
217 
218 	return -EIO;
219 }
220 
221 /*
222  * Write contents of register REGNO in task TASK.
223  */
224 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
225 {
226 	if (task->thread.regs == NULL)
227 		return -EIO;
228 
229 	if (regno == PT_MSR)
230 		return set_user_msr(task, data);
231 	if (regno == PT_TRAP)
232 		return set_user_trap(task, data);
233 	if (regno == PT_DSCR)
234 		return set_user_dscr(task, data);
235 
236 	if (regno <= PT_MAX_PUT_REG) {
237 		regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
238 		((unsigned long *)task->thread.regs)[regno] = data;
239 		return 0;
240 	}
241 	return -EIO;
242 }
243 
244 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
245 		   unsigned int pos, unsigned int count,
246 		   void *kbuf, void __user *ubuf)
247 {
248 	int i, ret;
249 
250 	if (target->thread.regs == NULL)
251 		return -EIO;
252 
253 	if (!FULL_REGS(target->thread.regs)) {
254 		/* We have a partial register set.  Fill 14-31 with bogus values */
255 		for (i = 14; i < 32; i++)
256 			target->thread.regs->gpr[i] = NV_REG_POISON;
257 	}
258 
259 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
260 				  target->thread.regs,
261 				  0, offsetof(struct pt_regs, msr));
262 	if (!ret) {
263 		unsigned long msr = get_user_msr(target);
264 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
265 					  offsetof(struct pt_regs, msr),
266 					  offsetof(struct pt_regs, msr) +
267 					  sizeof(msr));
268 	}
269 
270 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
271 		     offsetof(struct pt_regs, msr) + sizeof(long));
272 
273 	if (!ret)
274 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
275 					  &target->thread.regs->orig_gpr3,
276 					  offsetof(struct pt_regs, orig_gpr3),
277 					  sizeof(struct user_pt_regs));
278 	if (!ret)
279 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
280 					       sizeof(struct user_pt_regs), -1);
281 
282 	return ret;
283 }
284 
285 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
286 		   unsigned int pos, unsigned int count,
287 		   const void *kbuf, const void __user *ubuf)
288 {
289 	unsigned long reg;
290 	int ret;
291 
292 	if (target->thread.regs == NULL)
293 		return -EIO;
294 
295 	CHECK_FULL_REGS(target->thread.regs);
296 
297 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
298 				 target->thread.regs,
299 				 0, PT_MSR * sizeof(reg));
300 
301 	if (!ret && count > 0) {
302 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
303 					 PT_MSR * sizeof(reg),
304 					 (PT_MSR + 1) * sizeof(reg));
305 		if (!ret)
306 			ret = set_user_msr(target, reg);
307 	}
308 
309 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
310 		     offsetof(struct pt_regs, msr) + sizeof(long));
311 
312 	if (!ret)
313 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
314 					 &target->thread.regs->orig_gpr3,
315 					 PT_ORIG_R3 * sizeof(reg),
316 					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
317 
318 	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
319 		ret = user_regset_copyin_ignore(
320 			&pos, &count, &kbuf, &ubuf,
321 			(PT_MAX_PUT_REG + 1) * sizeof(reg),
322 			PT_TRAP * sizeof(reg));
323 
324 	if (!ret && count > 0) {
325 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
326 					 PT_TRAP * sizeof(reg),
327 					 (PT_TRAP + 1) * sizeof(reg));
328 		if (!ret)
329 			ret = set_user_trap(target, reg);
330 	}
331 
332 	if (!ret)
333 		ret = user_regset_copyin_ignore(
334 			&pos, &count, &kbuf, &ubuf,
335 			(PT_TRAP + 1) * sizeof(reg), -1);
336 
337 	return ret;
338 }
339 
340 #ifdef CONFIG_PPC64
341 static int ppr_get(struct task_struct *target,
342 		      const struct user_regset *regset,
343 		      unsigned int pos, unsigned int count,
344 		      void *kbuf, void __user *ubuf)
345 {
346 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
347 				   &target->thread.regs->ppr, 0, sizeof(u64));
348 }
349 
350 static int ppr_set(struct task_struct *target,
351 		      const struct user_regset *regset,
352 		      unsigned int pos, unsigned int count,
353 		      const void *kbuf, const void __user *ubuf)
354 {
355 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
356 				  &target->thread.regs->ppr, 0, sizeof(u64));
357 }
358 
359 static int dscr_get(struct task_struct *target,
360 		      const struct user_regset *regset,
361 		      unsigned int pos, unsigned int count,
362 		      void *kbuf, void __user *ubuf)
363 {
364 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
365 				   &target->thread.dscr, 0, sizeof(u64));
366 }
367 static int dscr_set(struct task_struct *target,
368 		      const struct user_regset *regset,
369 		      unsigned int pos, unsigned int count,
370 		      const void *kbuf, const void __user *ubuf)
371 {
372 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
373 				  &target->thread.dscr, 0, sizeof(u64));
374 }
375 #endif
376 #ifdef CONFIG_PPC_BOOK3S_64
377 static int tar_get(struct task_struct *target,
378 		      const struct user_regset *regset,
379 		      unsigned int pos, unsigned int count,
380 		      void *kbuf, void __user *ubuf)
381 {
382 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
383 				   &target->thread.tar, 0, sizeof(u64));
384 }
385 static int tar_set(struct task_struct *target,
386 		      const struct user_regset *regset,
387 		      unsigned int pos, unsigned int count,
388 		      const void *kbuf, const void __user *ubuf)
389 {
390 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
391 				  &target->thread.tar, 0, sizeof(u64));
392 }
393 
394 static int ebb_active(struct task_struct *target,
395 			 const struct user_regset *regset)
396 {
397 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
398 		return -ENODEV;
399 
400 	if (target->thread.used_ebb)
401 		return regset->n;
402 
403 	return 0;
404 }
405 
406 static int ebb_get(struct task_struct *target,
407 		      const struct user_regset *regset,
408 		      unsigned int pos, unsigned int count,
409 		      void *kbuf, void __user *ubuf)
410 {
411 	/* Build tests */
412 	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
413 	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
414 
415 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
416 		return -ENODEV;
417 
418 	if (!target->thread.used_ebb)
419 		return -ENODATA;
420 
421 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
422 			&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
423 }
424 
425 static int ebb_set(struct task_struct *target,
426 		      const struct user_regset *regset,
427 		      unsigned int pos, unsigned int count,
428 		      const void *kbuf, const void __user *ubuf)
429 {
430 	int ret = 0;
431 
432 	/* Build tests */
433 	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
434 	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
435 
436 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
437 		return -ENODEV;
438 
439 	if (target->thread.used_ebb)
440 		return -ENODATA;
441 
442 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
443 			&target->thread.ebbrr, 0, sizeof(unsigned long));
444 
445 	if (!ret)
446 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
447 			&target->thread.ebbhr, sizeof(unsigned long),
448 			2 * sizeof(unsigned long));
449 
450 	if (!ret)
451 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
452 			&target->thread.bescr,
453 			2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
454 
455 	return ret;
456 }
457 static int pmu_active(struct task_struct *target,
458 			 const struct user_regset *regset)
459 {
460 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
461 		return -ENODEV;
462 
463 	return regset->n;
464 }
465 
466 static int pmu_get(struct task_struct *target,
467 		      const struct user_regset *regset,
468 		      unsigned int pos, unsigned int count,
469 		      void *kbuf, void __user *ubuf)
470 {
471 	/* Build tests */
472 	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
473 	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
474 	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
475 	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
476 
477 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
478 		return -ENODEV;
479 
480 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
481 			&target->thread.siar, 0,
482 			5 * sizeof(unsigned long));
483 }
484 
485 static int pmu_set(struct task_struct *target,
486 		      const struct user_regset *regset,
487 		      unsigned int pos, unsigned int count,
488 		      const void *kbuf, const void __user *ubuf)
489 {
490 	int ret = 0;
491 
492 	/* Build tests */
493 	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
494 	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
495 	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
496 	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
497 
498 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
499 		return -ENODEV;
500 
501 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
502 			&target->thread.siar, 0,
503 			sizeof(unsigned long));
504 
505 	if (!ret)
506 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
507 			&target->thread.sdar, sizeof(unsigned long),
508 			2 * sizeof(unsigned long));
509 
510 	if (!ret)
511 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
512 			&target->thread.sier, 2 * sizeof(unsigned long),
513 			3 * sizeof(unsigned long));
514 
515 	if (!ret)
516 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
517 			&target->thread.mmcr2, 3 * sizeof(unsigned long),
518 			4 * sizeof(unsigned long));
519 
520 	if (!ret)
521 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
522 			&target->thread.mmcr0, 4 * sizeof(unsigned long),
523 			5 * sizeof(unsigned long));
524 	return ret;
525 }
526 #endif
527 
528 #ifdef CONFIG_PPC_MEM_KEYS
529 static int pkey_active(struct task_struct *target,
530 		       const struct user_regset *regset)
531 {
532 	if (!arch_pkeys_enabled())
533 		return -ENODEV;
534 
535 	return regset->n;
536 }
537 
538 static int pkey_get(struct task_struct *target,
539 		    const struct user_regset *regset,
540 		    unsigned int pos, unsigned int count,
541 		    void *kbuf, void __user *ubuf)
542 {
543 	BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
544 	BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
545 
546 	if (!arch_pkeys_enabled())
547 		return -ENODEV;
548 
549 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
550 				   &target->thread.amr, 0,
551 				   ELF_NPKEY * sizeof(unsigned long));
552 }
553 
554 static int pkey_set(struct task_struct *target,
555 		      const struct user_regset *regset,
556 		      unsigned int pos, unsigned int count,
557 		      const void *kbuf, const void __user *ubuf)
558 {
559 	u64 new_amr;
560 	int ret;
561 
562 	if (!arch_pkeys_enabled())
563 		return -ENODEV;
564 
565 	/* Only the AMR can be set from userspace */
566 	if (pos != 0 || count != sizeof(new_amr))
567 		return -EINVAL;
568 
569 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
570 				 &new_amr, 0, sizeof(new_amr));
571 	if (ret)
572 		return ret;
573 
574 	/* UAMOR determines which bits of the AMR can be set from userspace. */
575 	target->thread.amr = (new_amr & target->thread.uamor) |
576 		(target->thread.amr & ~target->thread.uamor);
577 
578 	return 0;
579 }
580 #endif /* CONFIG_PPC_MEM_KEYS */
581 
582 /*
583  * These are our native regset flavors.
584  */
585 enum powerpc_regset {
586 	REGSET_GPR,
587 	REGSET_FPR,
588 #ifdef CONFIG_ALTIVEC
589 	REGSET_VMX,
590 #endif
591 #ifdef CONFIG_VSX
592 	REGSET_VSX,
593 #endif
594 #ifdef CONFIG_SPE
595 	REGSET_SPE,
596 #endif
597 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
598 	REGSET_TM_CGPR,		/* TM checkpointed GPR registers */
599 	REGSET_TM_CFPR,		/* TM checkpointed FPR registers */
600 	REGSET_TM_CVMX,		/* TM checkpointed VMX registers */
601 	REGSET_TM_CVSX,		/* TM checkpointed VSX registers */
602 	REGSET_TM_SPR,		/* TM specific SPR registers */
603 	REGSET_TM_CTAR,		/* TM checkpointed TAR register */
604 	REGSET_TM_CPPR,		/* TM checkpointed PPR register */
605 	REGSET_TM_CDSCR,	/* TM checkpointed DSCR register */
606 #endif
607 #ifdef CONFIG_PPC64
608 	REGSET_PPR,		/* PPR register */
609 	REGSET_DSCR,		/* DSCR register */
610 #endif
611 #ifdef CONFIG_PPC_BOOK3S_64
612 	REGSET_TAR,		/* TAR register */
613 	REGSET_EBB,		/* EBB registers */
614 	REGSET_PMR,		/* Performance Monitor Registers */
615 #endif
616 #ifdef CONFIG_PPC_MEM_KEYS
617 	REGSET_PKEY,		/* AMR register */
618 #endif
619 };
620 
621 static const struct user_regset native_regsets[] = {
622 	[REGSET_GPR] = {
623 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
624 		.size = sizeof(long), .align = sizeof(long),
625 		.get = gpr_get, .set = gpr_set
626 	},
627 	[REGSET_FPR] = {
628 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
629 		.size = sizeof(double), .align = sizeof(double),
630 		.get = fpr_get, .set = fpr_set
631 	},
632 #ifdef CONFIG_ALTIVEC
633 	[REGSET_VMX] = {
634 		.core_note_type = NT_PPC_VMX, .n = 34,
635 		.size = sizeof(vector128), .align = sizeof(vector128),
636 		.active = vr_active, .get = vr_get, .set = vr_set
637 	},
638 #endif
639 #ifdef CONFIG_VSX
640 	[REGSET_VSX] = {
641 		.core_note_type = NT_PPC_VSX, .n = 32,
642 		.size = sizeof(double), .align = sizeof(double),
643 		.active = vsr_active, .get = vsr_get, .set = vsr_set
644 	},
645 #endif
646 #ifdef CONFIG_SPE
647 	[REGSET_SPE] = {
648 		.core_note_type = NT_PPC_SPE, .n = 35,
649 		.size = sizeof(u32), .align = sizeof(u32),
650 		.active = evr_active, .get = evr_get, .set = evr_set
651 	},
652 #endif
653 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
654 	[REGSET_TM_CGPR] = {
655 		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
656 		.size = sizeof(long), .align = sizeof(long),
657 		.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
658 	},
659 	[REGSET_TM_CFPR] = {
660 		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
661 		.size = sizeof(double), .align = sizeof(double),
662 		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
663 	},
664 	[REGSET_TM_CVMX] = {
665 		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
666 		.size = sizeof(vector128), .align = sizeof(vector128),
667 		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
668 	},
669 	[REGSET_TM_CVSX] = {
670 		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
671 		.size = sizeof(double), .align = sizeof(double),
672 		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
673 	},
674 	[REGSET_TM_SPR] = {
675 		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
676 		.size = sizeof(u64), .align = sizeof(u64),
677 		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
678 	},
679 	[REGSET_TM_CTAR] = {
680 		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
681 		.size = sizeof(u64), .align = sizeof(u64),
682 		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
683 	},
684 	[REGSET_TM_CPPR] = {
685 		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
686 		.size = sizeof(u64), .align = sizeof(u64),
687 		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
688 	},
689 	[REGSET_TM_CDSCR] = {
690 		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
691 		.size = sizeof(u64), .align = sizeof(u64),
692 		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
693 	},
694 #endif
695 #ifdef CONFIG_PPC64
696 	[REGSET_PPR] = {
697 		.core_note_type = NT_PPC_PPR, .n = 1,
698 		.size = sizeof(u64), .align = sizeof(u64),
699 		.get = ppr_get, .set = ppr_set
700 	},
701 	[REGSET_DSCR] = {
702 		.core_note_type = NT_PPC_DSCR, .n = 1,
703 		.size = sizeof(u64), .align = sizeof(u64),
704 		.get = dscr_get, .set = dscr_set
705 	},
706 #endif
707 #ifdef CONFIG_PPC_BOOK3S_64
708 	[REGSET_TAR] = {
709 		.core_note_type = NT_PPC_TAR, .n = 1,
710 		.size = sizeof(u64), .align = sizeof(u64),
711 		.get = tar_get, .set = tar_set
712 	},
713 	[REGSET_EBB] = {
714 		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
715 		.size = sizeof(u64), .align = sizeof(u64),
716 		.active = ebb_active, .get = ebb_get, .set = ebb_set
717 	},
718 	[REGSET_PMR] = {
719 		.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
720 		.size = sizeof(u64), .align = sizeof(u64),
721 		.active = pmu_active, .get = pmu_get, .set = pmu_set
722 	},
723 #endif
724 #ifdef CONFIG_PPC_MEM_KEYS
725 	[REGSET_PKEY] = {
726 		.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
727 		.size = sizeof(u64), .align = sizeof(u64),
728 		.active = pkey_active, .get = pkey_get, .set = pkey_set
729 	},
730 #endif
731 };
732 
733 static const struct user_regset_view user_ppc_native_view = {
734 	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
735 	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
736 };
737 
738 #include <linux/compat.h>
739 
740 int gpr32_get_common(struct task_struct *target,
741 		     const struct user_regset *regset,
742 		     unsigned int pos, unsigned int count,
743 			    void *kbuf, void __user *ubuf,
744 			    unsigned long *regs)
745 {
746 	compat_ulong_t *k = kbuf;
747 	compat_ulong_t __user *u = ubuf;
748 	compat_ulong_t reg;
749 
750 	pos /= sizeof(reg);
751 	count /= sizeof(reg);
752 
753 	if (kbuf)
754 		for (; count > 0 && pos < PT_MSR; --count)
755 			*k++ = regs[pos++];
756 	else
757 		for (; count > 0 && pos < PT_MSR; --count)
758 			if (__put_user((compat_ulong_t) regs[pos++], u++))
759 				return -EFAULT;
760 
761 	if (count > 0 && pos == PT_MSR) {
762 		reg = get_user_msr(target);
763 		if (kbuf)
764 			*k++ = reg;
765 		else if (__put_user(reg, u++))
766 			return -EFAULT;
767 		++pos;
768 		--count;
769 	}
770 
771 	if (kbuf)
772 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
773 			*k++ = regs[pos++];
774 	else
775 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
776 			if (__put_user((compat_ulong_t) regs[pos++], u++))
777 				return -EFAULT;
778 
779 	kbuf = k;
780 	ubuf = u;
781 	pos *= sizeof(reg);
782 	count *= sizeof(reg);
783 	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
784 					PT_REGS_COUNT * sizeof(reg), -1);
785 }
786 
787 int gpr32_set_common(struct task_struct *target,
788 		     const struct user_regset *regset,
789 		     unsigned int pos, unsigned int count,
790 		     const void *kbuf, const void __user *ubuf,
791 		     unsigned long *regs)
792 {
793 	const compat_ulong_t *k = kbuf;
794 	const compat_ulong_t __user *u = ubuf;
795 	compat_ulong_t reg;
796 
797 	pos /= sizeof(reg);
798 	count /= sizeof(reg);
799 
800 	if (kbuf)
801 		for (; count > 0 && pos < PT_MSR; --count)
802 			regs[pos++] = *k++;
803 	else
804 		for (; count > 0 && pos < PT_MSR; --count) {
805 			if (__get_user(reg, u++))
806 				return -EFAULT;
807 			regs[pos++] = reg;
808 		}
809 
810 
811 	if (count > 0 && pos == PT_MSR) {
812 		if (kbuf)
813 			reg = *k++;
814 		else if (__get_user(reg, u++))
815 			return -EFAULT;
816 		set_user_msr(target, reg);
817 		++pos;
818 		--count;
819 	}
820 
821 	if (kbuf) {
822 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
823 			regs[pos++] = *k++;
824 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
825 			++k;
826 	} else {
827 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
828 			if (__get_user(reg, u++))
829 				return -EFAULT;
830 			regs[pos++] = reg;
831 		}
832 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
833 			if (__get_user(reg, u++))
834 				return -EFAULT;
835 	}
836 
837 	if (count > 0 && pos == PT_TRAP) {
838 		if (kbuf)
839 			reg = *k++;
840 		else if (__get_user(reg, u++))
841 			return -EFAULT;
842 		set_user_trap(target, reg);
843 		++pos;
844 		--count;
845 	}
846 
847 	kbuf = k;
848 	ubuf = u;
849 	pos *= sizeof(reg);
850 	count *= sizeof(reg);
851 	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
852 					 (PT_TRAP + 1) * sizeof(reg), -1);
853 }
854 
855 static int gpr32_get(struct task_struct *target,
856 		     const struct user_regset *regset,
857 		     unsigned int pos, unsigned int count,
858 		     void *kbuf, void __user *ubuf)
859 {
860 	int i;
861 
862 	if (target->thread.regs == NULL)
863 		return -EIO;
864 
865 	if (!FULL_REGS(target->thread.regs)) {
866 		/*
867 		 * We have a partial register set.
868 		 * Fill 14-31 with bogus values.
869 		 */
870 		for (i = 14; i < 32; i++)
871 			target->thread.regs->gpr[i] = NV_REG_POISON;
872 	}
873 	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
874 			&target->thread.regs->gpr[0]);
875 }
876 
877 static int gpr32_set(struct task_struct *target,
878 		     const struct user_regset *regset,
879 		     unsigned int pos, unsigned int count,
880 		     const void *kbuf, const void __user *ubuf)
881 {
882 	if (target->thread.regs == NULL)
883 		return -EIO;
884 
885 	CHECK_FULL_REGS(target->thread.regs);
886 	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
887 			&target->thread.regs->gpr[0]);
888 }
889 
890 /*
891  * These are the regset flavors matching the CONFIG_PPC32 native set.
892  */
893 static const struct user_regset compat_regsets[] = {
894 	[REGSET_GPR] = {
895 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
896 		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
897 		.get = gpr32_get, .set = gpr32_set
898 	},
899 	[REGSET_FPR] = {
900 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
901 		.size = sizeof(double), .align = sizeof(double),
902 		.get = fpr_get, .set = fpr_set
903 	},
904 #ifdef CONFIG_ALTIVEC
905 	[REGSET_VMX] = {
906 		.core_note_type = NT_PPC_VMX, .n = 34,
907 		.size = sizeof(vector128), .align = sizeof(vector128),
908 		.active = vr_active, .get = vr_get, .set = vr_set
909 	},
910 #endif
911 #ifdef CONFIG_SPE
912 	[REGSET_SPE] = {
913 		.core_note_type = NT_PPC_SPE, .n = 35,
914 		.size = sizeof(u32), .align = sizeof(u32),
915 		.active = evr_active, .get = evr_get, .set = evr_set
916 	},
917 #endif
918 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
919 	[REGSET_TM_CGPR] = {
920 		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
921 		.size = sizeof(long), .align = sizeof(long),
922 		.active = tm_cgpr_active,
923 		.get = tm_cgpr32_get, .set = tm_cgpr32_set
924 	},
925 	[REGSET_TM_CFPR] = {
926 		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
927 		.size = sizeof(double), .align = sizeof(double),
928 		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
929 	},
930 	[REGSET_TM_CVMX] = {
931 		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
932 		.size = sizeof(vector128), .align = sizeof(vector128),
933 		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
934 	},
935 	[REGSET_TM_CVSX] = {
936 		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
937 		.size = sizeof(double), .align = sizeof(double),
938 		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
939 	},
940 	[REGSET_TM_SPR] = {
941 		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
942 		.size = sizeof(u64), .align = sizeof(u64),
943 		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
944 	},
945 	[REGSET_TM_CTAR] = {
946 		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
947 		.size = sizeof(u64), .align = sizeof(u64),
948 		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
949 	},
950 	[REGSET_TM_CPPR] = {
951 		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
952 		.size = sizeof(u64), .align = sizeof(u64),
953 		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
954 	},
955 	[REGSET_TM_CDSCR] = {
956 		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
957 		.size = sizeof(u64), .align = sizeof(u64),
958 		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
959 	},
960 #endif
961 #ifdef CONFIG_PPC64
962 	[REGSET_PPR] = {
963 		.core_note_type = NT_PPC_PPR, .n = 1,
964 		.size = sizeof(u64), .align = sizeof(u64),
965 		.get = ppr_get, .set = ppr_set
966 	},
967 	[REGSET_DSCR] = {
968 		.core_note_type = NT_PPC_DSCR, .n = 1,
969 		.size = sizeof(u64), .align = sizeof(u64),
970 		.get = dscr_get, .set = dscr_set
971 	},
972 #endif
973 #ifdef CONFIG_PPC_BOOK3S_64
974 	[REGSET_TAR] = {
975 		.core_note_type = NT_PPC_TAR, .n = 1,
976 		.size = sizeof(u64), .align = sizeof(u64),
977 		.get = tar_get, .set = tar_set
978 	},
979 	[REGSET_EBB] = {
980 		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
981 		.size = sizeof(u64), .align = sizeof(u64),
982 		.active = ebb_active, .get = ebb_get, .set = ebb_set
983 	},
984 #endif
985 };
986 
987 static const struct user_regset_view user_ppc_compat_view = {
988 	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
989 	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
990 };
991 
992 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
993 {
994 	if (IS_ENABLED(CONFIG_PPC64) && test_tsk_thread_flag(task, TIF_32BIT))
995 		return &user_ppc_compat_view;
996 	return &user_ppc_native_view;
997 }
998 
999 
1000 void user_enable_single_step(struct task_struct *task)
1001 {
1002 	struct pt_regs *regs = task->thread.regs;
1003 
1004 	if (regs != NULL) {
1005 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1006 		task->thread.debug.dbcr0 &= ~DBCR0_BT;
1007 		task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1008 		regs->msr |= MSR_DE;
1009 #else
1010 		regs->msr &= ~MSR_BE;
1011 		regs->msr |= MSR_SE;
1012 #endif
1013 	}
1014 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
1015 }
1016 
1017 void user_enable_block_step(struct task_struct *task)
1018 {
1019 	struct pt_regs *regs = task->thread.regs;
1020 
1021 	if (regs != NULL) {
1022 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1023 		task->thread.debug.dbcr0 &= ~DBCR0_IC;
1024 		task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
1025 		regs->msr |= MSR_DE;
1026 #else
1027 		regs->msr &= ~MSR_SE;
1028 		regs->msr |= MSR_BE;
1029 #endif
1030 	}
1031 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
1032 }
1033 
1034 void user_disable_single_step(struct task_struct *task)
1035 {
1036 	struct pt_regs *regs = task->thread.regs;
1037 
1038 	if (regs != NULL) {
1039 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1040 		/*
1041 		 * The logic to disable single stepping should be as
1042 		 * simple as turning off the Instruction Complete flag.
1043 		 * And, after doing so, if all debug flags are off, turn
1044 		 * off DBCR0(IDM) and MSR(DE) .... Torez
1045 		 */
1046 		task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
1047 		/*
1048 		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
1049 		 */
1050 		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1051 					task->thread.debug.dbcr1)) {
1052 			/*
1053 			 * All debug events were off.....
1054 			 */
1055 			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1056 			regs->msr &= ~MSR_DE;
1057 		}
1058 #else
1059 		regs->msr &= ~(MSR_SE | MSR_BE);
1060 #endif
1061 	}
1062 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1063 }
1064 
1065 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1066 void ptrace_triggered(struct perf_event *bp,
1067 		      struct perf_sample_data *data, struct pt_regs *regs)
1068 {
1069 	struct perf_event_attr attr;
1070 
1071 	/*
1072 	 * Disable the breakpoint request here since ptrace has defined a
1073 	 * one-shot behaviour for breakpoint exceptions in PPC64.
1074 	 * The SIGTRAP signal is generated automatically for us in do_dabr().
1075 	 * We don't have to do anything about that here
1076 	 */
1077 	attr = bp->attr;
1078 	attr.disabled = true;
1079 	modify_user_hw_breakpoint(bp, &attr);
1080 }
1081 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1082 
1083 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
1084 			       unsigned long data)
1085 {
1086 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1087 	int ret;
1088 	struct thread_struct *thread = &(task->thread);
1089 	struct perf_event *bp;
1090 	struct perf_event_attr attr;
1091 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1092 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1093 	bool set_bp = true;
1094 	struct arch_hw_breakpoint hw_brk;
1095 #endif
1096 
1097 	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
1098 	 *  For embedded processors we support one DAC and no IAC's at the
1099 	 *  moment.
1100 	 */
1101 	if (addr > 0)
1102 		return -EINVAL;
1103 
1104 	/* The bottom 3 bits in dabr are flags */
1105 	if ((data & ~0x7UL) >= TASK_SIZE)
1106 		return -EIO;
1107 
1108 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1109 	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
1110 	 *  It was assumed, on previous implementations, that 3 bits were
1111 	 *  passed together with the data address, fitting the design of the
1112 	 *  DABR register, as follows:
1113 	 *
1114 	 *  bit 0: Read flag
1115 	 *  bit 1: Write flag
1116 	 *  bit 2: Breakpoint translation
1117 	 *
1118 	 *  Thus, we use them here as so.
1119 	 */
1120 
1121 	/* Ensure breakpoint translation bit is set */
1122 	if (data && !(data & HW_BRK_TYPE_TRANSLATE))
1123 		return -EIO;
1124 	hw_brk.address = data & (~HW_BRK_TYPE_DABR);
1125 	hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
1126 	hw_brk.len = DABR_MAX_LEN;
1127 	hw_brk.hw_len = DABR_MAX_LEN;
1128 	set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
1129 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1130 	bp = thread->ptrace_bps[0];
1131 	if (!set_bp) {
1132 		if (bp) {
1133 			unregister_hw_breakpoint(bp);
1134 			thread->ptrace_bps[0] = NULL;
1135 		}
1136 		return 0;
1137 	}
1138 	if (bp) {
1139 		attr = bp->attr;
1140 		attr.bp_addr = hw_brk.address;
1141 		attr.bp_len = DABR_MAX_LEN;
1142 		arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
1143 
1144 		/* Enable breakpoint */
1145 		attr.disabled = false;
1146 
1147 		ret =  modify_user_hw_breakpoint(bp, &attr);
1148 		if (ret) {
1149 			return ret;
1150 		}
1151 		thread->ptrace_bps[0] = bp;
1152 		thread->hw_brk = hw_brk;
1153 		return 0;
1154 	}
1155 
1156 	/* Create a new breakpoint request if one doesn't exist already */
1157 	hw_breakpoint_init(&attr);
1158 	attr.bp_addr = hw_brk.address;
1159 	attr.bp_len = DABR_MAX_LEN;
1160 	arch_bp_generic_fields(hw_brk.type,
1161 			       &attr.bp_type);
1162 
1163 	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1164 					       ptrace_triggered, NULL, task);
1165 	if (IS_ERR(bp)) {
1166 		thread->ptrace_bps[0] = NULL;
1167 		return PTR_ERR(bp);
1168 	}
1169 
1170 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
1171 	if (set_bp && (!ppc_breakpoint_available()))
1172 		return -ENODEV;
1173 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1174 	task->thread.hw_brk = hw_brk;
1175 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
1176 	/* As described above, it was assumed 3 bits were passed with the data
1177 	 *  address, but we will assume only the mode bits will be passed
1178 	 *  as to not cause alignment restrictions for DAC-based processors.
1179 	 */
1180 
1181 	/* DAC's hold the whole address without any mode flags */
1182 	task->thread.debug.dac1 = data & ~0x3UL;
1183 
1184 	if (task->thread.debug.dac1 == 0) {
1185 		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1186 		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1187 					task->thread.debug.dbcr1)) {
1188 			task->thread.regs->msr &= ~MSR_DE;
1189 			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1190 		}
1191 		return 0;
1192 	}
1193 
1194 	/* Read or Write bits must be set */
1195 
1196 	if (!(data & 0x3UL))
1197 		return -EINVAL;
1198 
1199 	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1200 	   register */
1201 	task->thread.debug.dbcr0 |= DBCR0_IDM;
1202 
1203 	/* Check for write and read flags and set DBCR0
1204 	   accordingly */
1205 	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
1206 	if (data & 0x1UL)
1207 		dbcr_dac(task) |= DBCR_DAC1R;
1208 	if (data & 0x2UL)
1209 		dbcr_dac(task) |= DBCR_DAC1W;
1210 	task->thread.regs->msr |= MSR_DE;
1211 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Called by kernel/ptrace.c when detaching..
1217  *
1218  * Make sure single step bits etc are not set.
1219  */
1220 void ptrace_disable(struct task_struct *child)
1221 {
1222 	/* make sure the single step bit is not set. */
1223 	user_disable_single_step(child);
1224 }
1225 
1226 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1227 static long set_instruction_bp(struct task_struct *child,
1228 			      struct ppc_hw_breakpoint *bp_info)
1229 {
1230 	int slot;
1231 	int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
1232 	int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
1233 	int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
1234 	int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
1235 
1236 	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1237 		slot2_in_use = 1;
1238 	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1239 		slot4_in_use = 1;
1240 
1241 	if (bp_info->addr >= TASK_SIZE)
1242 		return -EIO;
1243 
1244 	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1245 
1246 		/* Make sure range is valid. */
1247 		if (bp_info->addr2 >= TASK_SIZE)
1248 			return -EIO;
1249 
1250 		/* We need a pair of IAC regsisters */
1251 		if ((!slot1_in_use) && (!slot2_in_use)) {
1252 			slot = 1;
1253 			child->thread.debug.iac1 = bp_info->addr;
1254 			child->thread.debug.iac2 = bp_info->addr2;
1255 			child->thread.debug.dbcr0 |= DBCR0_IAC1;
1256 			if (bp_info->addr_mode ==
1257 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1258 				dbcr_iac_range(child) |= DBCR_IAC12X;
1259 			else
1260 				dbcr_iac_range(child) |= DBCR_IAC12I;
1261 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1262 		} else if ((!slot3_in_use) && (!slot4_in_use)) {
1263 			slot = 3;
1264 			child->thread.debug.iac3 = bp_info->addr;
1265 			child->thread.debug.iac4 = bp_info->addr2;
1266 			child->thread.debug.dbcr0 |= DBCR0_IAC3;
1267 			if (bp_info->addr_mode ==
1268 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1269 				dbcr_iac_range(child) |= DBCR_IAC34X;
1270 			else
1271 				dbcr_iac_range(child) |= DBCR_IAC34I;
1272 #endif
1273 		} else
1274 			return -ENOSPC;
1275 	} else {
1276 		/* We only need one.  If possible leave a pair free in
1277 		 * case a range is needed later
1278 		 */
1279 		if (!slot1_in_use) {
1280 			/*
1281 			 * Don't use iac1 if iac1-iac2 are free and either
1282 			 * iac3 or iac4 (but not both) are free
1283 			 */
1284 			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1285 				slot = 1;
1286 				child->thread.debug.iac1 = bp_info->addr;
1287 				child->thread.debug.dbcr0 |= DBCR0_IAC1;
1288 				goto out;
1289 			}
1290 		}
1291 		if (!slot2_in_use) {
1292 			slot = 2;
1293 			child->thread.debug.iac2 = bp_info->addr;
1294 			child->thread.debug.dbcr0 |= DBCR0_IAC2;
1295 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1296 		} else if (!slot3_in_use) {
1297 			slot = 3;
1298 			child->thread.debug.iac3 = bp_info->addr;
1299 			child->thread.debug.dbcr0 |= DBCR0_IAC3;
1300 		} else if (!slot4_in_use) {
1301 			slot = 4;
1302 			child->thread.debug.iac4 = bp_info->addr;
1303 			child->thread.debug.dbcr0 |= DBCR0_IAC4;
1304 #endif
1305 		} else
1306 			return -ENOSPC;
1307 	}
1308 out:
1309 	child->thread.debug.dbcr0 |= DBCR0_IDM;
1310 	child->thread.regs->msr |= MSR_DE;
1311 
1312 	return slot;
1313 }
1314 
1315 static int del_instruction_bp(struct task_struct *child, int slot)
1316 {
1317 	switch (slot) {
1318 	case 1:
1319 		if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
1320 			return -ENOENT;
1321 
1322 		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1323 			/* address range - clear slots 1 & 2 */
1324 			child->thread.debug.iac2 = 0;
1325 			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1326 		}
1327 		child->thread.debug.iac1 = 0;
1328 		child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1329 		break;
1330 	case 2:
1331 		if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
1332 			return -ENOENT;
1333 
1334 		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1335 			/* used in a range */
1336 			return -EINVAL;
1337 		child->thread.debug.iac2 = 0;
1338 		child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1339 		break;
1340 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1341 	case 3:
1342 		if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
1343 			return -ENOENT;
1344 
1345 		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1346 			/* address range - clear slots 3 & 4 */
1347 			child->thread.debug.iac4 = 0;
1348 			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1349 		}
1350 		child->thread.debug.iac3 = 0;
1351 		child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1352 		break;
1353 	case 4:
1354 		if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
1355 			return -ENOENT;
1356 
1357 		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1358 			/* Used in a range */
1359 			return -EINVAL;
1360 		child->thread.debug.iac4 = 0;
1361 		child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1362 		break;
1363 #endif
1364 	default:
1365 		return -EINVAL;
1366 	}
1367 	return 0;
1368 }
1369 
1370 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1371 {
1372 	int byte_enable =
1373 		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
1374 		& 0xf;
1375 	int condition_mode =
1376 		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1377 	int slot;
1378 
1379 	if (byte_enable && (condition_mode == 0))
1380 		return -EINVAL;
1381 
1382 	if (bp_info->addr >= TASK_SIZE)
1383 		return -EIO;
1384 
1385 	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1386 		slot = 1;
1387 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1388 			dbcr_dac(child) |= DBCR_DAC1R;
1389 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1390 			dbcr_dac(child) |= DBCR_DAC1W;
1391 		child->thread.debug.dac1 = (unsigned long)bp_info->addr;
1392 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1393 		if (byte_enable) {
1394 			child->thread.debug.dvc1 =
1395 				(unsigned long)bp_info->condition_value;
1396 			child->thread.debug.dbcr2 |=
1397 				((byte_enable << DBCR2_DVC1BE_SHIFT) |
1398 				 (condition_mode << DBCR2_DVC1M_SHIFT));
1399 		}
1400 #endif
1401 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1402 	} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1403 		/* Both dac1 and dac2 are part of a range */
1404 		return -ENOSPC;
1405 #endif
1406 	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1407 		slot = 2;
1408 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1409 			dbcr_dac(child) |= DBCR_DAC2R;
1410 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1411 			dbcr_dac(child) |= DBCR_DAC2W;
1412 		child->thread.debug.dac2 = (unsigned long)bp_info->addr;
1413 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1414 		if (byte_enable) {
1415 			child->thread.debug.dvc2 =
1416 				(unsigned long)bp_info->condition_value;
1417 			child->thread.debug.dbcr2 |=
1418 				((byte_enable << DBCR2_DVC2BE_SHIFT) |
1419 				 (condition_mode << DBCR2_DVC2M_SHIFT));
1420 		}
1421 #endif
1422 	} else
1423 		return -ENOSPC;
1424 	child->thread.debug.dbcr0 |= DBCR0_IDM;
1425 	child->thread.regs->msr |= MSR_DE;
1426 
1427 	return slot + 4;
1428 }
1429 
1430 static int del_dac(struct task_struct *child, int slot)
1431 {
1432 	if (slot == 1) {
1433 		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1434 			return -ENOENT;
1435 
1436 		child->thread.debug.dac1 = 0;
1437 		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1438 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1439 		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1440 			child->thread.debug.dac2 = 0;
1441 			child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1442 		}
1443 		child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1444 #endif
1445 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1446 		child->thread.debug.dvc1 = 0;
1447 #endif
1448 	} else if (slot == 2) {
1449 		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1450 			return -ENOENT;
1451 
1452 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1453 		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
1454 			/* Part of a range */
1455 			return -EINVAL;
1456 		child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1457 #endif
1458 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1459 		child->thread.debug.dvc2 = 0;
1460 #endif
1461 		child->thread.debug.dac2 = 0;
1462 		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1463 	} else
1464 		return -EINVAL;
1465 
1466 	return 0;
1467 }
1468 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1469 
1470 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1471 static int set_dac_range(struct task_struct *child,
1472 			 struct ppc_hw_breakpoint *bp_info)
1473 {
1474 	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1475 
1476 	/* We don't allow range watchpoints to be used with DVC */
1477 	if (bp_info->condition_mode)
1478 		return -EINVAL;
1479 
1480 	/*
1481 	 * Best effort to verify the address range.  The user/supervisor bits
1482 	 * prevent trapping in kernel space, but let's fail on an obvious bad
1483 	 * range.  The simple test on the mask is not fool-proof, and any
1484 	 * exclusive range will spill over into kernel space.
1485 	 */
1486 	if (bp_info->addr >= TASK_SIZE)
1487 		return -EIO;
1488 	if (mode == PPC_BREAKPOINT_MODE_MASK) {
1489 		/*
1490 		 * dac2 is a bitmask.  Don't allow a mask that makes a
1491 		 * kernel space address from a valid dac1 value
1492 		 */
1493 		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1494 			return -EIO;
1495 	} else {
1496 		/*
1497 		 * For range breakpoints, addr2 must also be a valid address
1498 		 */
1499 		if (bp_info->addr2 >= TASK_SIZE)
1500 			return -EIO;
1501 	}
1502 
1503 	if (child->thread.debug.dbcr0 &
1504 	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1505 		return -ENOSPC;
1506 
1507 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1508 		child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1509 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1510 		child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1511 	child->thread.debug.dac1 = bp_info->addr;
1512 	child->thread.debug.dac2 = bp_info->addr2;
1513 	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1514 		child->thread.debug.dbcr2  |= DBCR2_DAC12M;
1515 	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1516 		child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
1517 	else	/* PPC_BREAKPOINT_MODE_MASK */
1518 		child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
1519 	child->thread.regs->msr |= MSR_DE;
1520 
1521 	return 5;
1522 }
1523 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1524 
1525 static long ppc_set_hwdebug(struct task_struct *child,
1526 		     struct ppc_hw_breakpoint *bp_info)
1527 {
1528 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1529 	int len = 0;
1530 	struct thread_struct *thread = &(child->thread);
1531 	struct perf_event *bp;
1532 	struct perf_event_attr attr;
1533 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1534 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1535 	struct arch_hw_breakpoint brk;
1536 #endif
1537 
1538 	if (bp_info->version != 1)
1539 		return -ENOTSUPP;
1540 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1541 	/*
1542 	 * Check for invalid flags and combinations
1543 	 */
1544 	if ((bp_info->trigger_type == 0) ||
1545 	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
1546 				       PPC_BREAKPOINT_TRIGGER_RW)) ||
1547 	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1548 	    (bp_info->condition_mode &
1549 	     ~(PPC_BREAKPOINT_CONDITION_MODE |
1550 	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
1551 		return -EINVAL;
1552 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1553 	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1554 		return -EINVAL;
1555 #endif
1556 
1557 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
1558 		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1559 		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1560 			return -EINVAL;
1561 		return set_instruction_bp(child, bp_info);
1562 	}
1563 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1564 		return set_dac(child, bp_info);
1565 
1566 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1567 	return set_dac_range(child, bp_info);
1568 #else
1569 	return -EINVAL;
1570 #endif
1571 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1572 	/*
1573 	 * We only support one data breakpoint
1574 	 */
1575 	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1576 	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1577 	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1578 		return -EINVAL;
1579 
1580 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
1581 		return -EIO;
1582 
1583 	brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
1584 	brk.type = HW_BRK_TYPE_TRANSLATE;
1585 	brk.len = DABR_MAX_LEN;
1586 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1587 		brk.type |= HW_BRK_TYPE_READ;
1588 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1589 		brk.type |= HW_BRK_TYPE_WRITE;
1590 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1591 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1592 		len = bp_info->addr2 - bp_info->addr;
1593 	else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1594 		len = 1;
1595 	else
1596 		return -EINVAL;
1597 	bp = thread->ptrace_bps[0];
1598 	if (bp)
1599 		return -ENOSPC;
1600 
1601 	/* Create a new breakpoint request if one doesn't exist already */
1602 	hw_breakpoint_init(&attr);
1603 	attr.bp_addr = (unsigned long)bp_info->addr;
1604 	attr.bp_len = len;
1605 	arch_bp_generic_fields(brk.type, &attr.bp_type);
1606 
1607 	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1608 					       ptrace_triggered, NULL, child);
1609 	if (IS_ERR(bp)) {
1610 		thread->ptrace_bps[0] = NULL;
1611 		return PTR_ERR(bp);
1612 	}
1613 
1614 	return 1;
1615 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1616 
1617 	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
1618 		return -EINVAL;
1619 
1620 	if (child->thread.hw_brk.address)
1621 		return -ENOSPC;
1622 
1623 	if (!ppc_breakpoint_available())
1624 		return -ENODEV;
1625 
1626 	child->thread.hw_brk = brk;
1627 
1628 	return 1;
1629 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1630 }
1631 
1632 static long ppc_del_hwdebug(struct task_struct *child, long data)
1633 {
1634 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1635 	int ret = 0;
1636 	struct thread_struct *thread = &(child->thread);
1637 	struct perf_event *bp;
1638 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1639 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1640 	int rc;
1641 
1642 	if (data <= 4)
1643 		rc = del_instruction_bp(child, (int)data);
1644 	else
1645 		rc = del_dac(child, (int)data - 4);
1646 
1647 	if (!rc) {
1648 		if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
1649 					child->thread.debug.dbcr1)) {
1650 			child->thread.debug.dbcr0 &= ~DBCR0_IDM;
1651 			child->thread.regs->msr &= ~MSR_DE;
1652 		}
1653 	}
1654 	return rc;
1655 #else
1656 	if (data != 1)
1657 		return -EINVAL;
1658 
1659 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1660 	bp = thread->ptrace_bps[0];
1661 	if (bp) {
1662 		unregister_hw_breakpoint(bp);
1663 		thread->ptrace_bps[0] = NULL;
1664 	} else
1665 		ret = -ENOENT;
1666 	return ret;
1667 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1668 	if (child->thread.hw_brk.address == 0)
1669 		return -ENOENT;
1670 
1671 	child->thread.hw_brk.address = 0;
1672 	child->thread.hw_brk.type = 0;
1673 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1674 
1675 	return 0;
1676 #endif
1677 }
1678 
1679 long arch_ptrace(struct task_struct *child, long request,
1680 		 unsigned long addr, unsigned long data)
1681 {
1682 	int ret = -EPERM;
1683 	void __user *datavp = (void __user *) data;
1684 	unsigned long __user *datalp = datavp;
1685 
1686 	switch (request) {
1687 	/* read the word at location addr in the USER area. */
1688 	case PTRACE_PEEKUSR: {
1689 		unsigned long index, tmp;
1690 
1691 		ret = -EIO;
1692 		/* convert to index and check */
1693 #ifdef CONFIG_PPC32
1694 		index = addr >> 2;
1695 		if ((addr & 3) || (index > PT_FPSCR)
1696 		    || (child->thread.regs == NULL))
1697 #else
1698 		index = addr >> 3;
1699 		if ((addr & 7) || (index > PT_FPSCR))
1700 #endif
1701 			break;
1702 
1703 		CHECK_FULL_REGS(child->thread.regs);
1704 		if (index < PT_FPR0) {
1705 			ret = ptrace_get_reg(child, (int) index, &tmp);
1706 			if (ret)
1707 				break;
1708 		} else {
1709 			unsigned int fpidx = index - PT_FPR0;
1710 
1711 			flush_fp_to_thread(child);
1712 			if (fpidx < (PT_FPSCR - PT_FPR0))
1713 				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1714 				       sizeof(long));
1715 			else
1716 				tmp = child->thread.fp_state.fpscr;
1717 		}
1718 		ret = put_user(tmp, datalp);
1719 		break;
1720 	}
1721 
1722 	/* write the word at location addr in the USER area */
1723 	case PTRACE_POKEUSR: {
1724 		unsigned long index;
1725 
1726 		ret = -EIO;
1727 		/* convert to index and check */
1728 #ifdef CONFIG_PPC32
1729 		index = addr >> 2;
1730 		if ((addr & 3) || (index > PT_FPSCR)
1731 		    || (child->thread.regs == NULL))
1732 #else
1733 		index = addr >> 3;
1734 		if ((addr & 7) || (index > PT_FPSCR))
1735 #endif
1736 			break;
1737 
1738 		CHECK_FULL_REGS(child->thread.regs);
1739 		if (index < PT_FPR0) {
1740 			ret = ptrace_put_reg(child, index, data);
1741 		} else {
1742 			unsigned int fpidx = index - PT_FPR0;
1743 
1744 			flush_fp_to_thread(child);
1745 			if (fpidx < (PT_FPSCR - PT_FPR0))
1746 				memcpy(&child->thread.TS_FPR(fpidx), &data,
1747 				       sizeof(long));
1748 			else
1749 				child->thread.fp_state.fpscr = data;
1750 			ret = 0;
1751 		}
1752 		break;
1753 	}
1754 
1755 	case PPC_PTRACE_GETHWDBGINFO: {
1756 		struct ppc_debug_info dbginfo;
1757 
1758 		dbginfo.version = 1;
1759 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1760 		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1761 		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1762 		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1763 		dbginfo.data_bp_alignment = 4;
1764 		dbginfo.sizeof_condition = 4;
1765 		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
1766 				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
1767 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1768 		dbginfo.features |=
1769 				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
1770 				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
1771 #endif
1772 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1773 		dbginfo.num_instruction_bps = 0;
1774 		if (ppc_breakpoint_available())
1775 			dbginfo.num_data_bps = 1;
1776 		else
1777 			dbginfo.num_data_bps = 0;
1778 		dbginfo.num_condition_regs = 0;
1779 		dbginfo.data_bp_alignment = sizeof(long);
1780 		dbginfo.sizeof_condition = 0;
1781 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1782 		dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
1783 		if (dawr_enabled())
1784 			dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
1785 #else
1786 		dbginfo.features = 0;
1787 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1788 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1789 
1790 		if (copy_to_user(datavp, &dbginfo,
1791 				 sizeof(struct ppc_debug_info)))
1792 			return -EFAULT;
1793 		return 0;
1794 	}
1795 
1796 	case PPC_PTRACE_SETHWDEBUG: {
1797 		struct ppc_hw_breakpoint bp_info;
1798 
1799 		if (copy_from_user(&bp_info, datavp,
1800 				   sizeof(struct ppc_hw_breakpoint)))
1801 			return -EFAULT;
1802 		return ppc_set_hwdebug(child, &bp_info);
1803 	}
1804 
1805 	case PPC_PTRACE_DELHWDEBUG: {
1806 		ret = ppc_del_hwdebug(child, data);
1807 		break;
1808 	}
1809 
1810 	case PTRACE_GET_DEBUGREG: {
1811 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1812 		unsigned long dabr_fake;
1813 #endif
1814 		ret = -EINVAL;
1815 		/* We only support one DABR and no IABRS at the moment */
1816 		if (addr > 0)
1817 			break;
1818 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1819 		ret = put_user(child->thread.debug.dac1, datalp);
1820 #else
1821 		dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
1822 			     (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
1823 		ret = put_user(dabr_fake, datalp);
1824 #endif
1825 		break;
1826 	}
1827 
1828 	case PTRACE_SET_DEBUGREG:
1829 		ret = ptrace_set_debugreg(child, addr, data);
1830 		break;
1831 
1832 #ifdef CONFIG_PPC64
1833 	case PTRACE_GETREGS64:
1834 #endif
1835 	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
1836 		return copy_regset_to_user(child, &user_ppc_native_view,
1837 					   REGSET_GPR,
1838 					   0, sizeof(struct user_pt_regs),
1839 					   datavp);
1840 
1841 #ifdef CONFIG_PPC64
1842 	case PTRACE_SETREGS64:
1843 #endif
1844 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1845 		return copy_regset_from_user(child, &user_ppc_native_view,
1846 					     REGSET_GPR,
1847 					     0, sizeof(struct user_pt_regs),
1848 					     datavp);
1849 
1850 	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1851 		return copy_regset_to_user(child, &user_ppc_native_view,
1852 					   REGSET_FPR,
1853 					   0, sizeof(elf_fpregset_t),
1854 					   datavp);
1855 
1856 	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1857 		return copy_regset_from_user(child, &user_ppc_native_view,
1858 					     REGSET_FPR,
1859 					     0, sizeof(elf_fpregset_t),
1860 					     datavp);
1861 
1862 #ifdef CONFIG_ALTIVEC
1863 	case PTRACE_GETVRREGS:
1864 		return copy_regset_to_user(child, &user_ppc_native_view,
1865 					   REGSET_VMX,
1866 					   0, (33 * sizeof(vector128) +
1867 					       sizeof(u32)),
1868 					   datavp);
1869 
1870 	case PTRACE_SETVRREGS:
1871 		return copy_regset_from_user(child, &user_ppc_native_view,
1872 					     REGSET_VMX,
1873 					     0, (33 * sizeof(vector128) +
1874 						 sizeof(u32)),
1875 					     datavp);
1876 #endif
1877 #ifdef CONFIG_VSX
1878 	case PTRACE_GETVSRREGS:
1879 		return copy_regset_to_user(child, &user_ppc_native_view,
1880 					   REGSET_VSX,
1881 					   0, 32 * sizeof(double),
1882 					   datavp);
1883 
1884 	case PTRACE_SETVSRREGS:
1885 		return copy_regset_from_user(child, &user_ppc_native_view,
1886 					     REGSET_VSX,
1887 					     0, 32 * sizeof(double),
1888 					     datavp);
1889 #endif
1890 #ifdef CONFIG_SPE
1891 	case PTRACE_GETEVRREGS:
1892 		/* Get the child spe register state. */
1893 		return copy_regset_to_user(child, &user_ppc_native_view,
1894 					   REGSET_SPE, 0, 35 * sizeof(u32),
1895 					   datavp);
1896 
1897 	case PTRACE_SETEVRREGS:
1898 		/* Set the child spe register state. */
1899 		return copy_regset_from_user(child, &user_ppc_native_view,
1900 					     REGSET_SPE, 0, 35 * sizeof(u32),
1901 					     datavp);
1902 #endif
1903 
1904 	default:
1905 		ret = ptrace_request(child, request, addr, data);
1906 		break;
1907 	}
1908 	return ret;
1909 }
1910 
1911 #ifdef CONFIG_SECCOMP
1912 static int do_seccomp(struct pt_regs *regs)
1913 {
1914 	if (!test_thread_flag(TIF_SECCOMP))
1915 		return 0;
1916 
1917 	/*
1918 	 * The ABI we present to seccomp tracers is that r3 contains
1919 	 * the syscall return value and orig_gpr3 contains the first
1920 	 * syscall parameter. This is different to the ptrace ABI where
1921 	 * both r3 and orig_gpr3 contain the first syscall parameter.
1922 	 */
1923 	regs->gpr[3] = -ENOSYS;
1924 
1925 	/*
1926 	 * We use the __ version here because we have already checked
1927 	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
1928 	 * have already loaded -ENOSYS into r3, or seccomp has put
1929 	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
1930 	 */
1931 	if (__secure_computing(NULL))
1932 		return -1;
1933 
1934 	/*
1935 	 * The syscall was allowed by seccomp, restore the register
1936 	 * state to what audit expects.
1937 	 * Note that we use orig_gpr3, which means a seccomp tracer can
1938 	 * modify the first syscall parameter (in orig_gpr3) and also
1939 	 * allow the syscall to proceed.
1940 	 */
1941 	regs->gpr[3] = regs->orig_gpr3;
1942 
1943 	return 0;
1944 }
1945 #else
1946 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
1947 #endif /* CONFIG_SECCOMP */
1948 
1949 /**
1950  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
1951  * @regs: the pt_regs of the task to trace (current)
1952  *
1953  * Performs various types of tracing on syscall entry. This includes seccomp,
1954  * ptrace, syscall tracepoints and audit.
1955  *
1956  * The pt_regs are potentially visible to userspace via ptrace, so their
1957  * contents is ABI.
1958  *
1959  * One or more of the tracers may modify the contents of pt_regs, in particular
1960  * to modify arguments or even the syscall number itself.
1961  *
1962  * It's also possible that a tracer can choose to reject the system call. In
1963  * that case this function will return an illegal syscall number, and will put
1964  * an appropriate return value in regs->r3.
1965  *
1966  * Return: the (possibly changed) syscall number.
1967  */
1968 long do_syscall_trace_enter(struct pt_regs *regs)
1969 {
1970 	u32 flags;
1971 
1972 	user_exit();
1973 
1974 	flags = READ_ONCE(current_thread_info()->flags) &
1975 		(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
1976 
1977 	if (flags) {
1978 		int rc = tracehook_report_syscall_entry(regs);
1979 
1980 		if (unlikely(flags & _TIF_SYSCALL_EMU)) {
1981 			/*
1982 			 * A nonzero return code from
1983 			 * tracehook_report_syscall_entry() tells us to prevent
1984 			 * the syscall execution, but we are not going to
1985 			 * execute it anyway.
1986 			 *
1987 			 * Returning -1 will skip the syscall execution. We want
1988 			 * to avoid clobbering any registers, so we don't goto
1989 			 * the skip label below.
1990 			 */
1991 			return -1;
1992 		}
1993 
1994 		if (rc) {
1995 			/*
1996 			 * The tracer decided to abort the syscall. Note that
1997 			 * the tracer may also just change regs->gpr[0] to an
1998 			 * invalid syscall number, that is handled below on the
1999 			 * exit path.
2000 			 */
2001 			goto skip;
2002 		}
2003 	}
2004 
2005 	/* Run seccomp after ptrace; allow it to set gpr[3]. */
2006 	if (do_seccomp(regs))
2007 		return -1;
2008 
2009 	/* Avoid trace and audit when syscall is invalid. */
2010 	if (regs->gpr[0] >= NR_syscalls)
2011 		goto skip;
2012 
2013 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2014 		trace_sys_enter(regs, regs->gpr[0]);
2015 
2016 	if (!is_32bit_task())
2017 		audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
2018 				    regs->gpr[5], regs->gpr[6]);
2019 	else
2020 		audit_syscall_entry(regs->gpr[0],
2021 				    regs->gpr[3] & 0xffffffff,
2022 				    regs->gpr[4] & 0xffffffff,
2023 				    regs->gpr[5] & 0xffffffff,
2024 				    regs->gpr[6] & 0xffffffff);
2025 
2026 	/* Return the possibly modified but valid syscall number */
2027 	return regs->gpr[0];
2028 
2029 skip:
2030 	/*
2031 	 * If we are aborting explicitly, or if the syscall number is
2032 	 * now invalid, set the return value to -ENOSYS.
2033 	 */
2034 	regs->gpr[3] = -ENOSYS;
2035 	return -1;
2036 }
2037 
2038 void do_syscall_trace_leave(struct pt_regs *regs)
2039 {
2040 	int step;
2041 
2042 	audit_syscall_exit(regs);
2043 
2044 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
2045 		trace_sys_exit(regs, regs->result);
2046 
2047 	step = test_thread_flag(TIF_SINGLESTEP);
2048 	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
2049 		tracehook_report_syscall_exit(regs, step);
2050 
2051 	user_enter();
2052 }
2053 
2054 void __init pt_regs_check(void);
2055 
2056 /*
2057  * Dummy function, its purpose is to break the build if struct pt_regs and
2058  * struct user_pt_regs don't match.
2059  */
2060 void __init pt_regs_check(void)
2061 {
2062 	BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
2063 		     offsetof(struct user_pt_regs, gpr));
2064 	BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
2065 		     offsetof(struct user_pt_regs, nip));
2066 	BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
2067 		     offsetof(struct user_pt_regs, msr));
2068 	BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
2069 		     offsetof(struct user_pt_regs, msr));
2070 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
2071 		     offsetof(struct user_pt_regs, orig_gpr3));
2072 	BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
2073 		     offsetof(struct user_pt_regs, ctr));
2074 	BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
2075 		     offsetof(struct user_pt_regs, link));
2076 	BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
2077 		     offsetof(struct user_pt_regs, xer));
2078 	BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
2079 		     offsetof(struct user_pt_regs, ccr));
2080 #ifdef __powerpc64__
2081 	BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
2082 		     offsetof(struct user_pt_regs, softe));
2083 #else
2084 	BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
2085 		     offsetof(struct user_pt_regs, mq));
2086 #endif
2087 	BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
2088 		     offsetof(struct user_pt_regs, trap));
2089 	BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
2090 		     offsetof(struct user_pt_regs, dar));
2091 	BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
2092 		     offsetof(struct user_pt_regs, dsisr));
2093 	BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
2094 		     offsetof(struct user_pt_regs, result));
2095 
2096 	BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
2097 
2098 	// Now check that the pt_regs offsets match the uapi #defines
2099 	#define CHECK_REG(_pt, _reg) \
2100 		BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
2101 				     sizeof(unsigned long)));
2102 
2103 	CHECK_REG(PT_R0,  gpr[0]);
2104 	CHECK_REG(PT_R1,  gpr[1]);
2105 	CHECK_REG(PT_R2,  gpr[2]);
2106 	CHECK_REG(PT_R3,  gpr[3]);
2107 	CHECK_REG(PT_R4,  gpr[4]);
2108 	CHECK_REG(PT_R5,  gpr[5]);
2109 	CHECK_REG(PT_R6,  gpr[6]);
2110 	CHECK_REG(PT_R7,  gpr[7]);
2111 	CHECK_REG(PT_R8,  gpr[8]);
2112 	CHECK_REG(PT_R9,  gpr[9]);
2113 	CHECK_REG(PT_R10, gpr[10]);
2114 	CHECK_REG(PT_R11, gpr[11]);
2115 	CHECK_REG(PT_R12, gpr[12]);
2116 	CHECK_REG(PT_R13, gpr[13]);
2117 	CHECK_REG(PT_R14, gpr[14]);
2118 	CHECK_REG(PT_R15, gpr[15]);
2119 	CHECK_REG(PT_R16, gpr[16]);
2120 	CHECK_REG(PT_R17, gpr[17]);
2121 	CHECK_REG(PT_R18, gpr[18]);
2122 	CHECK_REG(PT_R19, gpr[19]);
2123 	CHECK_REG(PT_R20, gpr[20]);
2124 	CHECK_REG(PT_R21, gpr[21]);
2125 	CHECK_REG(PT_R22, gpr[22]);
2126 	CHECK_REG(PT_R23, gpr[23]);
2127 	CHECK_REG(PT_R24, gpr[24]);
2128 	CHECK_REG(PT_R25, gpr[25]);
2129 	CHECK_REG(PT_R26, gpr[26]);
2130 	CHECK_REG(PT_R27, gpr[27]);
2131 	CHECK_REG(PT_R28, gpr[28]);
2132 	CHECK_REG(PT_R29, gpr[29]);
2133 	CHECK_REG(PT_R30, gpr[30]);
2134 	CHECK_REG(PT_R31, gpr[31]);
2135 	CHECK_REG(PT_NIP, nip);
2136 	CHECK_REG(PT_MSR, msr);
2137 	CHECK_REG(PT_ORIG_R3, orig_gpr3);
2138 	CHECK_REG(PT_CTR, ctr);
2139 	CHECK_REG(PT_LNK, link);
2140 	CHECK_REG(PT_XER, xer);
2141 	CHECK_REG(PT_CCR, ccr);
2142 #ifdef CONFIG_PPC64
2143 	CHECK_REG(PT_SOFTE, softe);
2144 #else
2145 	CHECK_REG(PT_MQ, mq);
2146 #endif
2147 	CHECK_REG(PT_TRAP, trap);
2148 	CHECK_REG(PT_DAR, dar);
2149 	CHECK_REG(PT_DSISR, dsisr);
2150 	CHECK_REG(PT_RESULT, result);
2151 	#undef CHECK_REG
2152 
2153 	BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
2154 
2155 	/*
2156 	 * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
2157 	 * real registers.
2158 	 */
2159 	BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
2160 }
2161