xref: /openbmc/linux/arch/s390/kernel/ptrace.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <linux/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/switch_to.h>
32 #include <asm/runtime_instr.h>
33 #include <asm/facility.h>
34 
35 #include "entry.h"
36 
37 #ifdef CONFIG_COMPAT
38 #include "compat_ptrace.h"
39 #endif
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43 
44 void update_cr_regs(struct task_struct *task)
45 {
46 	struct pt_regs *regs = task_pt_regs(task);
47 	struct thread_struct *thread = &task->thread;
48 	struct per_regs old, new;
49 	union ctlreg0 cr0_old, cr0_new;
50 	union ctlreg2 cr2_old, cr2_new;
51 	int cr0_changed, cr2_changed;
52 
53 	__ctl_store(cr0_old.val, 0, 0);
54 	__ctl_store(cr2_old.val, 2, 2);
55 	cr0_new = cr0_old;
56 	cr2_new = cr2_old;
57 	/* Take care of the enable/disable of transactional execution. */
58 	if (MACHINE_HAS_TE) {
59 		/* Set or clear transaction execution TXC bit 8. */
60 		cr0_new.tcx = 1;
61 		if (task->thread.per_flags & PER_FLAG_NO_TE)
62 			cr0_new.tcx = 0;
63 		/* Set or clear transaction execution TDC bits 62 and 63. */
64 		cr2_new.tdc = 0;
65 		if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
66 			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
67 				cr2_new.tdc = 1;
68 			else
69 				cr2_new.tdc = 2;
70 		}
71 	}
72 	/* Take care of enable/disable of guarded storage. */
73 	if (MACHINE_HAS_GS) {
74 		cr2_new.gse = 0;
75 		if (task->thread.gs_cb)
76 			cr2_new.gse = 1;
77 	}
78 	/* Load control register 0/2 iff changed */
79 	cr0_changed = cr0_new.val != cr0_old.val;
80 	cr2_changed = cr2_new.val != cr2_old.val;
81 	if (cr0_changed)
82 		__ctl_load(cr0_new.val, 0, 0);
83 	if (cr2_changed)
84 		__ctl_load(cr2_new.val, 2, 2);
85 	/* Copy user specified PER registers */
86 	new.control = thread->per_user.control;
87 	new.start = thread->per_user.start;
88 	new.end = thread->per_user.end;
89 
90 	/* merge TIF_SINGLE_STEP into user specified PER registers. */
91 	if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
92 	    test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
93 		if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
94 			new.control |= PER_EVENT_BRANCH;
95 		else
96 			new.control |= PER_EVENT_IFETCH;
97 		new.control |= PER_CONTROL_SUSPENSION;
98 		new.control |= PER_EVENT_TRANSACTION_END;
99 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
100 			new.control |= PER_EVENT_IFETCH;
101 		new.start = 0;
102 		new.end = -1UL;
103 	}
104 
105 	/* Take care of the PER enablement bit in the PSW. */
106 	if (!(new.control & PER_EVENT_MASK)) {
107 		regs->psw.mask &= ~PSW_MASK_PER;
108 		return;
109 	}
110 	regs->psw.mask |= PSW_MASK_PER;
111 	__ctl_store(old, 9, 11);
112 	if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
113 		__ctl_load(new, 9, 11);
114 }
115 
116 void user_enable_single_step(struct task_struct *task)
117 {
118 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
119 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
120 }
121 
122 void user_disable_single_step(struct task_struct *task)
123 {
124 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
125 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
126 }
127 
128 void user_enable_block_step(struct task_struct *task)
129 {
130 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
131 	set_tsk_thread_flag(task, TIF_BLOCK_STEP);
132 }
133 
134 /*
135  * Called by kernel/ptrace.c when detaching..
136  *
137  * Clear all debugging related fields.
138  */
139 void ptrace_disable(struct task_struct *task)
140 {
141 	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
142 	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
143 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
144 	clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
145 	task->thread.per_flags = 0;
146 }
147 
148 #define __ADDR_MASK 7
149 
150 static inline unsigned long __peek_user_per(struct task_struct *child,
151 					    addr_t addr)
152 {
153 	struct per_struct_kernel *dummy = NULL;
154 
155 	if (addr == (addr_t) &dummy->cr9)
156 		/* Control bits of the active per set. */
157 		return test_thread_flag(TIF_SINGLE_STEP) ?
158 			PER_EVENT_IFETCH : child->thread.per_user.control;
159 	else if (addr == (addr_t) &dummy->cr10)
160 		/* Start address of the active per set. */
161 		return test_thread_flag(TIF_SINGLE_STEP) ?
162 			0 : child->thread.per_user.start;
163 	else if (addr == (addr_t) &dummy->cr11)
164 		/* End address of the active per set. */
165 		return test_thread_flag(TIF_SINGLE_STEP) ?
166 			-1UL : child->thread.per_user.end;
167 	else if (addr == (addr_t) &dummy->bits)
168 		/* Single-step bit. */
169 		return test_thread_flag(TIF_SINGLE_STEP) ?
170 			(1UL << (BITS_PER_LONG - 1)) : 0;
171 	else if (addr == (addr_t) &dummy->starting_addr)
172 		/* Start address of the user specified per set. */
173 		return child->thread.per_user.start;
174 	else if (addr == (addr_t) &dummy->ending_addr)
175 		/* End address of the user specified per set. */
176 		return child->thread.per_user.end;
177 	else if (addr == (addr_t) &dummy->perc_atmid)
178 		/* PER code, ATMID and AI of the last PER trap */
179 		return (unsigned long)
180 			child->thread.per_event.cause << (BITS_PER_LONG - 16);
181 	else if (addr == (addr_t) &dummy->address)
182 		/* Address of the last PER trap */
183 		return child->thread.per_event.address;
184 	else if (addr == (addr_t) &dummy->access_id)
185 		/* Access id of the last PER trap */
186 		return (unsigned long)
187 			child->thread.per_event.paid << (BITS_PER_LONG - 8);
188 	return 0;
189 }
190 
191 /*
192  * Read the word at offset addr from the user area of a process. The
193  * trouble here is that the information is littered over different
194  * locations. The process registers are found on the kernel stack,
195  * the floating point stuff and the trace settings are stored in
196  * the task structure. In addition the different structures in
197  * struct user contain pad bytes that should be read as zeroes.
198  * Lovely...
199  */
200 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
201 {
202 	struct user *dummy = NULL;
203 	addr_t offset, tmp;
204 
205 	if (addr < (addr_t) &dummy->regs.acrs) {
206 		/*
207 		 * psw and gprs are stored on the stack
208 		 */
209 		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
210 		if (addr == (addr_t) &dummy->regs.psw.mask) {
211 			/* Return a clean psw mask. */
212 			tmp &= PSW_MASK_USER | PSW_MASK_RI;
213 			tmp |= PSW_USER_BITS;
214 		}
215 
216 	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
217 		/*
218 		 * access registers are stored in the thread structure
219 		 */
220 		offset = addr - (addr_t) &dummy->regs.acrs;
221 		/*
222 		 * Very special case: old & broken 64 bit gdb reading
223 		 * from acrs[15]. Result is a 64 bit value. Read the
224 		 * 32 bit acrs[15] value and shift it by 32. Sick...
225 		 */
226 		if (addr == (addr_t) &dummy->regs.acrs[15])
227 			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
228 		else
229 			tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
230 
231 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
232 		/*
233 		 * orig_gpr2 is stored on the kernel stack
234 		 */
235 		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
236 
237 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
238 		/*
239 		 * prevent reads of padding hole between
240 		 * orig_gpr2 and fp_regs on s390.
241 		 */
242 		tmp = 0;
243 
244 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
245 		/*
246 		 * floating point control reg. is in the thread structure
247 		 */
248 		tmp = child->thread.fpu.fpc;
249 		tmp <<= BITS_PER_LONG - 32;
250 
251 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252 		/*
253 		 * floating point regs. are either in child->thread.fpu
254 		 * or the child->thread.fpu.vxrs array
255 		 */
256 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
257 		if (MACHINE_HAS_VX)
258 			tmp = *(addr_t *)
259 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
260 		else
261 			tmp = *(addr_t *)
262 			       ((addr_t) child->thread.fpu.fprs + offset);
263 
264 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
265 		/*
266 		 * Handle access to the per_info structure.
267 		 */
268 		addr -= (addr_t) &dummy->regs.per_info;
269 		tmp = __peek_user_per(child, addr);
270 
271 	} else
272 		tmp = 0;
273 
274 	return tmp;
275 }
276 
277 static int
278 peek_user(struct task_struct *child, addr_t addr, addr_t data)
279 {
280 	addr_t tmp, mask;
281 
282 	/*
283 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
284 	 * an alignment of 4. Programmers from hell...
285 	 */
286 	mask = __ADDR_MASK;
287 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
288 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
289 		mask = 3;
290 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
291 		return -EIO;
292 
293 	tmp = __peek_user(child, addr);
294 	return put_user(tmp, (addr_t __user *) data);
295 }
296 
297 static inline void __poke_user_per(struct task_struct *child,
298 				   addr_t addr, addr_t data)
299 {
300 	struct per_struct_kernel *dummy = NULL;
301 
302 	/*
303 	 * There are only three fields in the per_info struct that the
304 	 * debugger user can write to.
305 	 * 1) cr9: the debugger wants to set a new PER event mask
306 	 * 2) starting_addr: the debugger wants to set a new starting
307 	 *    address to use with the PER event mask.
308 	 * 3) ending_addr: the debugger wants to set a new ending
309 	 *    address to use with the PER event mask.
310 	 * The user specified PER event mask and the start and end
311 	 * addresses are used only if single stepping is not in effect.
312 	 * Writes to any other field in per_info are ignored.
313 	 */
314 	if (addr == (addr_t) &dummy->cr9)
315 		/* PER event mask of the user specified per set. */
316 		child->thread.per_user.control =
317 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
318 	else if (addr == (addr_t) &dummy->starting_addr)
319 		/* Starting address of the user specified per set. */
320 		child->thread.per_user.start = data;
321 	else if (addr == (addr_t) &dummy->ending_addr)
322 		/* Ending address of the user specified per set. */
323 		child->thread.per_user.end = data;
324 }
325 
326 static void fixup_int_code(struct task_struct *child, addr_t data)
327 {
328 	struct pt_regs *regs = task_pt_regs(child);
329 	int ilc = regs->int_code >> 16;
330 	u16 insn;
331 
332 	if (ilc > 6)
333 		return;
334 
335 	if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
336 			&insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
337 		return;
338 
339 	/* double check that tracee stopped on svc instruction */
340 	if ((insn >> 8) != 0xa)
341 		return;
342 
343 	regs->int_code = 0x20000 | (data & 0xffff);
344 }
345 /*
346  * Write a word to the user area of a process at location addr. This
347  * operation does have an additional problem compared to peek_user.
348  * Stores to the program status word and on the floating point
349  * control register needs to get checked for validity.
350  */
351 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
352 {
353 	struct user *dummy = NULL;
354 	addr_t offset;
355 
356 
357 	if (addr < (addr_t) &dummy->regs.acrs) {
358 		struct pt_regs *regs = task_pt_regs(child);
359 		/*
360 		 * psw and gprs are stored on the stack
361 		 */
362 		if (addr == (addr_t) &dummy->regs.psw.mask) {
363 			unsigned long mask = PSW_MASK_USER;
364 
365 			mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
366 			if ((data ^ PSW_USER_BITS) & ~mask)
367 				/* Invalid psw mask. */
368 				return -EINVAL;
369 			if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
370 				/* Invalid address-space-control bits */
371 				return -EINVAL;
372 			if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
373 				/* Invalid addressing mode bits */
374 				return -EINVAL;
375 		}
376 
377 		if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
378 			addr == offsetof(struct user, regs.gprs[2]))
379 			fixup_int_code(child, data);
380 		*(addr_t *)((addr_t) &regs->psw + addr) = data;
381 
382 	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
383 		/*
384 		 * access registers are stored in the thread structure
385 		 */
386 		offset = addr - (addr_t) &dummy->regs.acrs;
387 		/*
388 		 * Very special case: old & broken 64 bit gdb writing
389 		 * to acrs[15] with a 64 bit value. Ignore the lower
390 		 * half of the value and write the upper 32 bit to
391 		 * acrs[15]. Sick...
392 		 */
393 		if (addr == (addr_t) &dummy->regs.acrs[15])
394 			child->thread.acrs[15] = (unsigned int) (data >> 32);
395 		else
396 			*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
397 
398 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
399 		/*
400 		 * orig_gpr2 is stored on the kernel stack
401 		 */
402 		task_pt_regs(child)->orig_gpr2 = data;
403 
404 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
405 		/*
406 		 * prevent writes of padding hole between
407 		 * orig_gpr2 and fp_regs on s390.
408 		 */
409 		return 0;
410 
411 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
412 		/*
413 		 * floating point control reg. is in the thread structure
414 		 */
415 		if ((unsigned int) data != 0 ||
416 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
417 			return -EINVAL;
418 		child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
419 
420 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
421 		/*
422 		 * floating point regs. are either in child->thread.fpu
423 		 * or the child->thread.fpu.vxrs array
424 		 */
425 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
426 		if (MACHINE_HAS_VX)
427 			*(addr_t *)((addr_t)
428 				child->thread.fpu.vxrs + 2*offset) = data;
429 		else
430 			*(addr_t *)((addr_t)
431 				child->thread.fpu.fprs + offset) = data;
432 
433 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
434 		/*
435 		 * Handle access to the per_info structure.
436 		 */
437 		addr -= (addr_t) &dummy->regs.per_info;
438 		__poke_user_per(child, addr, data);
439 
440 	}
441 
442 	return 0;
443 }
444 
445 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
446 {
447 	addr_t mask;
448 
449 	/*
450 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
451 	 * an alignment of 4. Programmers from hell indeed...
452 	 */
453 	mask = __ADDR_MASK;
454 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
455 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
456 		mask = 3;
457 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
458 		return -EIO;
459 
460 	return __poke_user(child, addr, data);
461 }
462 
463 long arch_ptrace(struct task_struct *child, long request,
464 		 unsigned long addr, unsigned long data)
465 {
466 	ptrace_area parea;
467 	int copied, ret;
468 
469 	switch (request) {
470 	case PTRACE_PEEKUSR:
471 		/* read the word at location addr in the USER area. */
472 		return peek_user(child, addr, data);
473 
474 	case PTRACE_POKEUSR:
475 		/* write the word at location addr in the USER area */
476 		return poke_user(child, addr, data);
477 
478 	case PTRACE_PEEKUSR_AREA:
479 	case PTRACE_POKEUSR_AREA:
480 		if (copy_from_user(&parea, (void __force __user *) addr,
481 							sizeof(parea)))
482 			return -EFAULT;
483 		addr = parea.kernel_addr;
484 		data = parea.process_addr;
485 		copied = 0;
486 		while (copied < parea.len) {
487 			if (request == PTRACE_PEEKUSR_AREA)
488 				ret = peek_user(child, addr, data);
489 			else {
490 				addr_t utmp;
491 				if (get_user(utmp,
492 					     (addr_t __force __user *) data))
493 					return -EFAULT;
494 				ret = poke_user(child, addr, utmp);
495 			}
496 			if (ret)
497 				return ret;
498 			addr += sizeof(unsigned long);
499 			data += sizeof(unsigned long);
500 			copied += sizeof(unsigned long);
501 		}
502 		return 0;
503 	case PTRACE_GET_LAST_BREAK:
504 		put_user(child->thread.last_break,
505 			 (unsigned long __user *) data);
506 		return 0;
507 	case PTRACE_ENABLE_TE:
508 		if (!MACHINE_HAS_TE)
509 			return -EIO;
510 		child->thread.per_flags &= ~PER_FLAG_NO_TE;
511 		return 0;
512 	case PTRACE_DISABLE_TE:
513 		if (!MACHINE_HAS_TE)
514 			return -EIO;
515 		child->thread.per_flags |= PER_FLAG_NO_TE;
516 		child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
517 		return 0;
518 	case PTRACE_TE_ABORT_RAND:
519 		if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
520 			return -EIO;
521 		switch (data) {
522 		case 0UL:
523 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
524 			break;
525 		case 1UL:
526 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
527 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
528 			break;
529 		case 2UL:
530 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
531 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
532 			break;
533 		default:
534 			return -EINVAL;
535 		}
536 		return 0;
537 	default:
538 		return ptrace_request(child, request, addr, data);
539 	}
540 }
541 
542 #ifdef CONFIG_COMPAT
543 /*
544  * Now the fun part starts... a 31 bit program running in the
545  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
546  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
547  * to handle, the difference to the 64 bit versions of the requests
548  * is that the access is done in multiples of 4 byte instead of
549  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
550  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
551  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
552  * is a 31 bit program too, the content of struct user can be
553  * emulated. A 31 bit program peeking into the struct user of
554  * a 64 bit program is a no-no.
555  */
556 
557 /*
558  * Same as peek_user_per but for a 31 bit program.
559  */
560 static inline __u32 __peek_user_per_compat(struct task_struct *child,
561 					   addr_t addr)
562 {
563 	struct compat_per_struct_kernel *dummy32 = NULL;
564 
565 	if (addr == (addr_t) &dummy32->cr9)
566 		/* Control bits of the active per set. */
567 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
568 			PER_EVENT_IFETCH : child->thread.per_user.control;
569 	else if (addr == (addr_t) &dummy32->cr10)
570 		/* Start address of the active per set. */
571 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
572 			0 : child->thread.per_user.start;
573 	else if (addr == (addr_t) &dummy32->cr11)
574 		/* End address of the active per set. */
575 		return test_thread_flag(TIF_SINGLE_STEP) ?
576 			PSW32_ADDR_INSN : child->thread.per_user.end;
577 	else if (addr == (addr_t) &dummy32->bits)
578 		/* Single-step bit. */
579 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
580 			0x80000000 : 0;
581 	else if (addr == (addr_t) &dummy32->starting_addr)
582 		/* Start address of the user specified per set. */
583 		return (__u32) child->thread.per_user.start;
584 	else if (addr == (addr_t) &dummy32->ending_addr)
585 		/* End address of the user specified per set. */
586 		return (__u32) child->thread.per_user.end;
587 	else if (addr == (addr_t) &dummy32->perc_atmid)
588 		/* PER code, ATMID and AI of the last PER trap */
589 		return (__u32) child->thread.per_event.cause << 16;
590 	else if (addr == (addr_t) &dummy32->address)
591 		/* Address of the last PER trap */
592 		return (__u32) child->thread.per_event.address;
593 	else if (addr == (addr_t) &dummy32->access_id)
594 		/* Access id of the last PER trap */
595 		return (__u32) child->thread.per_event.paid << 24;
596 	return 0;
597 }
598 
599 /*
600  * Same as peek_user but for a 31 bit program.
601  */
602 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
603 {
604 	struct compat_user *dummy32 = NULL;
605 	addr_t offset;
606 	__u32 tmp;
607 
608 	if (addr < (addr_t) &dummy32->regs.acrs) {
609 		struct pt_regs *regs = task_pt_regs(child);
610 		/*
611 		 * psw and gprs are stored on the stack
612 		 */
613 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
614 			/* Fake a 31 bit psw mask. */
615 			tmp = (__u32)(regs->psw.mask >> 32);
616 			tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
617 			tmp |= PSW32_USER_BITS;
618 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
619 			/* Fake a 31 bit psw address. */
620 			tmp = (__u32) regs->psw.addr |
621 				(__u32)(regs->psw.mask & PSW_MASK_BA);
622 		} else {
623 			/* gpr 0-15 */
624 			tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
625 		}
626 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
627 		/*
628 		 * access registers are stored in the thread structure
629 		 */
630 		offset = addr - (addr_t) &dummy32->regs.acrs;
631 		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
632 
633 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
634 		/*
635 		 * orig_gpr2 is stored on the kernel stack
636 		 */
637 		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
638 
639 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
640 		/*
641 		 * prevent reads of padding hole between
642 		 * orig_gpr2 and fp_regs on s390.
643 		 */
644 		tmp = 0;
645 
646 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
647 		/*
648 		 * floating point control reg. is in the thread structure
649 		 */
650 		tmp = child->thread.fpu.fpc;
651 
652 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
653 		/*
654 		 * floating point regs. are either in child->thread.fpu
655 		 * or the child->thread.fpu.vxrs array
656 		 */
657 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
658 		if (MACHINE_HAS_VX)
659 			tmp = *(__u32 *)
660 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
661 		else
662 			tmp = *(__u32 *)
663 			       ((addr_t) child->thread.fpu.fprs + offset);
664 
665 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
666 		/*
667 		 * Handle access to the per_info structure.
668 		 */
669 		addr -= (addr_t) &dummy32->regs.per_info;
670 		tmp = __peek_user_per_compat(child, addr);
671 
672 	} else
673 		tmp = 0;
674 
675 	return tmp;
676 }
677 
678 static int peek_user_compat(struct task_struct *child,
679 			    addr_t addr, addr_t data)
680 {
681 	__u32 tmp;
682 
683 	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
684 		return -EIO;
685 
686 	tmp = __peek_user_compat(child, addr);
687 	return put_user(tmp, (__u32 __user *) data);
688 }
689 
690 /*
691  * Same as poke_user_per but for a 31 bit program.
692  */
693 static inline void __poke_user_per_compat(struct task_struct *child,
694 					  addr_t addr, __u32 data)
695 {
696 	struct compat_per_struct_kernel *dummy32 = NULL;
697 
698 	if (addr == (addr_t) &dummy32->cr9)
699 		/* PER event mask of the user specified per set. */
700 		child->thread.per_user.control =
701 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
702 	else if (addr == (addr_t) &dummy32->starting_addr)
703 		/* Starting address of the user specified per set. */
704 		child->thread.per_user.start = data;
705 	else if (addr == (addr_t) &dummy32->ending_addr)
706 		/* Ending address of the user specified per set. */
707 		child->thread.per_user.end = data;
708 }
709 
710 /*
711  * Same as poke_user but for a 31 bit program.
712  */
713 static int __poke_user_compat(struct task_struct *child,
714 			      addr_t addr, addr_t data)
715 {
716 	struct compat_user *dummy32 = NULL;
717 	__u32 tmp = (__u32) data;
718 	addr_t offset;
719 
720 	if (addr < (addr_t) &dummy32->regs.acrs) {
721 		struct pt_regs *regs = task_pt_regs(child);
722 		/*
723 		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
724 		 */
725 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
726 			__u32 mask = PSW32_MASK_USER;
727 
728 			mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
729 			/* Build a 64 bit psw mask from 31 bit mask. */
730 			if ((tmp ^ PSW32_USER_BITS) & ~mask)
731 				/* Invalid psw mask. */
732 				return -EINVAL;
733 			if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
734 				/* Invalid address-space-control bits */
735 				return -EINVAL;
736 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
737 				(regs->psw.mask & PSW_MASK_BA) |
738 				(__u64)(tmp & mask) << 32;
739 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
740 			/* Build a 64 bit psw address from 31 bit address. */
741 			regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
742 			/* Transfer 31 bit amode bit to psw mask. */
743 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
744 				(__u64)(tmp & PSW32_ADDR_AMODE);
745 		} else {
746 
747 			if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
748 				addr == offsetof(struct compat_user, regs.gprs[2]))
749 				fixup_int_code(child, data);
750 			/* gpr 0-15 */
751 			*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
752 		}
753 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
754 		/*
755 		 * access registers are stored in the thread structure
756 		 */
757 		offset = addr - (addr_t) &dummy32->regs.acrs;
758 		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
759 
760 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
761 		/*
762 		 * orig_gpr2 is stored on the kernel stack
763 		 */
764 		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
765 
766 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
767 		/*
768 		 * prevent writess of padding hole between
769 		 * orig_gpr2 and fp_regs on s390.
770 		 */
771 		return 0;
772 
773 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
774 		/*
775 		 * floating point control reg. is in the thread structure
776 		 */
777 		if (test_fp_ctl(tmp))
778 			return -EINVAL;
779 		child->thread.fpu.fpc = data;
780 
781 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
782 		/*
783 		 * floating point regs. are either in child->thread.fpu
784 		 * or the child->thread.fpu.vxrs array
785 		 */
786 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
787 		if (MACHINE_HAS_VX)
788 			*(__u32 *)((addr_t)
789 				child->thread.fpu.vxrs + 2*offset) = tmp;
790 		else
791 			*(__u32 *)((addr_t)
792 				child->thread.fpu.fprs + offset) = tmp;
793 
794 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
795 		/*
796 		 * Handle access to the per_info structure.
797 		 */
798 		addr -= (addr_t) &dummy32->regs.per_info;
799 		__poke_user_per_compat(child, addr, data);
800 	}
801 
802 	return 0;
803 }
804 
805 static int poke_user_compat(struct task_struct *child,
806 			    addr_t addr, addr_t data)
807 {
808 	if (!is_compat_task() || (addr & 3) ||
809 	    addr > sizeof(struct compat_user) - 3)
810 		return -EIO;
811 
812 	return __poke_user_compat(child, addr, data);
813 }
814 
815 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
816 			compat_ulong_t caddr, compat_ulong_t cdata)
817 {
818 	unsigned long addr = caddr;
819 	unsigned long data = cdata;
820 	compat_ptrace_area parea;
821 	int copied, ret;
822 
823 	switch (request) {
824 	case PTRACE_PEEKUSR:
825 		/* read the word at location addr in the USER area. */
826 		return peek_user_compat(child, addr, data);
827 
828 	case PTRACE_POKEUSR:
829 		/* write the word at location addr in the USER area */
830 		return poke_user_compat(child, addr, data);
831 
832 	case PTRACE_PEEKUSR_AREA:
833 	case PTRACE_POKEUSR_AREA:
834 		if (copy_from_user(&parea, (void __force __user *) addr,
835 							sizeof(parea)))
836 			return -EFAULT;
837 		addr = parea.kernel_addr;
838 		data = parea.process_addr;
839 		copied = 0;
840 		while (copied < parea.len) {
841 			if (request == PTRACE_PEEKUSR_AREA)
842 				ret = peek_user_compat(child, addr, data);
843 			else {
844 				__u32 utmp;
845 				if (get_user(utmp,
846 					     (__u32 __force __user *) data))
847 					return -EFAULT;
848 				ret = poke_user_compat(child, addr, utmp);
849 			}
850 			if (ret)
851 				return ret;
852 			addr += sizeof(unsigned int);
853 			data += sizeof(unsigned int);
854 			copied += sizeof(unsigned int);
855 		}
856 		return 0;
857 	case PTRACE_GET_LAST_BREAK:
858 		put_user(child->thread.last_break,
859 			 (unsigned int __user *) data);
860 		return 0;
861 	}
862 	return compat_ptrace_request(child, request, addr, data);
863 }
864 #endif
865 
866 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
867 {
868 	unsigned long mask = -1UL;
869 	long ret = -1;
870 
871 	if (is_compat_task())
872 		mask = 0xffffffff;
873 
874 	/*
875 	 * The sysc_tracesys code in entry.S stored the system
876 	 * call number to gprs[2].
877 	 */
878 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
879 	    tracehook_report_syscall_entry(regs)) {
880 		/*
881 		 * Tracing decided this syscall should not happen. Skip
882 		 * the system call and the system call restart handling.
883 		 */
884 		goto skip;
885 	}
886 
887 #ifdef CONFIG_SECCOMP
888 	/* Do the secure computing check after ptrace. */
889 	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
890 		struct seccomp_data sd;
891 
892 		if (is_compat_task()) {
893 			sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
894 			sd.arch = AUDIT_ARCH_S390;
895 		} else {
896 			sd.instruction_pointer = regs->psw.addr;
897 			sd.arch = AUDIT_ARCH_S390X;
898 		}
899 
900 		sd.nr = regs->int_code & 0xffff;
901 		sd.args[0] = regs->orig_gpr2 & mask;
902 		sd.args[1] = regs->gprs[3] & mask;
903 		sd.args[2] = regs->gprs[4] & mask;
904 		sd.args[3] = regs->gprs[5] & mask;
905 		sd.args[4] = regs->gprs[6] & mask;
906 		sd.args[5] = regs->gprs[7] & mask;
907 
908 		if (__secure_computing(&sd) == -1)
909 			goto skip;
910 	}
911 #endif /* CONFIG_SECCOMP */
912 
913 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
914 		trace_sys_enter(regs, regs->int_code & 0xffff);
915 
916 
917 	audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
918 			    regs->gprs[3] &mask, regs->gprs[4] &mask,
919 			    regs->gprs[5] &mask);
920 
921 	if ((signed long)regs->gprs[2] >= NR_syscalls) {
922 		regs->gprs[2] = -ENOSYS;
923 		ret = -ENOSYS;
924 	}
925 	return regs->gprs[2];
926 skip:
927 	clear_pt_regs_flag(regs, PIF_SYSCALL);
928 	return ret;
929 }
930 
931 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
932 {
933 	audit_syscall_exit(regs);
934 
935 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
936 		trace_sys_exit(regs, regs->gprs[2]);
937 
938 	if (test_thread_flag(TIF_SYSCALL_TRACE))
939 		tracehook_report_syscall_exit(regs, 0);
940 }
941 
942 /*
943  * user_regset definitions.
944  */
945 
946 static int s390_regs_get(struct task_struct *target,
947 			 const struct user_regset *regset,
948 			 unsigned int pos, unsigned int count,
949 			 void *kbuf, void __user *ubuf)
950 {
951 	if (target == current)
952 		save_access_regs(target->thread.acrs);
953 
954 	if (kbuf) {
955 		unsigned long *k = kbuf;
956 		while (count > 0) {
957 			*k++ = __peek_user(target, pos);
958 			count -= sizeof(*k);
959 			pos += sizeof(*k);
960 		}
961 	} else {
962 		unsigned long __user *u = ubuf;
963 		while (count > 0) {
964 			if (__put_user(__peek_user(target, pos), u++))
965 				return -EFAULT;
966 			count -= sizeof(*u);
967 			pos += sizeof(*u);
968 		}
969 	}
970 	return 0;
971 }
972 
973 static int s390_regs_set(struct task_struct *target,
974 			 const struct user_regset *regset,
975 			 unsigned int pos, unsigned int count,
976 			 const void *kbuf, const void __user *ubuf)
977 {
978 	int rc = 0;
979 
980 	if (target == current)
981 		save_access_regs(target->thread.acrs);
982 
983 	if (kbuf) {
984 		const unsigned long *k = kbuf;
985 		while (count > 0 && !rc) {
986 			rc = __poke_user(target, pos, *k++);
987 			count -= sizeof(*k);
988 			pos += sizeof(*k);
989 		}
990 	} else {
991 		const unsigned long  __user *u = ubuf;
992 		while (count > 0 && !rc) {
993 			unsigned long word;
994 			rc = __get_user(word, u++);
995 			if (rc)
996 				break;
997 			rc = __poke_user(target, pos, word);
998 			count -= sizeof(*u);
999 			pos += sizeof(*u);
1000 		}
1001 	}
1002 
1003 	if (rc == 0 && target == current)
1004 		restore_access_regs(target->thread.acrs);
1005 
1006 	return rc;
1007 }
1008 
1009 static int s390_fpregs_get(struct task_struct *target,
1010 			   const struct user_regset *regset, unsigned int pos,
1011 			   unsigned int count, void *kbuf, void __user *ubuf)
1012 {
1013 	_s390_fp_regs fp_regs;
1014 
1015 	if (target == current)
1016 		save_fpu_regs();
1017 
1018 	fp_regs.fpc = target->thread.fpu.fpc;
1019 	fpregs_store(&fp_regs, &target->thread.fpu);
1020 
1021 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1022 				   &fp_regs, 0, -1);
1023 }
1024 
1025 static int s390_fpregs_set(struct task_struct *target,
1026 			   const struct user_regset *regset, unsigned int pos,
1027 			   unsigned int count, const void *kbuf,
1028 			   const void __user *ubuf)
1029 {
1030 	int rc = 0;
1031 	freg_t fprs[__NUM_FPRS];
1032 
1033 	if (target == current)
1034 		save_fpu_regs();
1035 
1036 	if (MACHINE_HAS_VX)
1037 		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
1038 	else
1039 		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
1040 
1041 	/* If setting FPC, must validate it first. */
1042 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
1043 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
1044 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
1045 					0, offsetof(s390_fp_regs, fprs));
1046 		if (rc)
1047 			return rc;
1048 		if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
1049 			return -EINVAL;
1050 		target->thread.fpu.fpc = ufpc[0];
1051 	}
1052 
1053 	if (rc == 0 && count > 0)
1054 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1055 					fprs, offsetof(s390_fp_regs, fprs), -1);
1056 	if (rc)
1057 		return rc;
1058 
1059 	if (MACHINE_HAS_VX)
1060 		convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1061 	else
1062 		memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1063 
1064 	return rc;
1065 }
1066 
1067 static int s390_last_break_get(struct task_struct *target,
1068 			       const struct user_regset *regset,
1069 			       unsigned int pos, unsigned int count,
1070 			       void *kbuf, void __user *ubuf)
1071 {
1072 	if (count > 0) {
1073 		if (kbuf) {
1074 			unsigned long *k = kbuf;
1075 			*k = target->thread.last_break;
1076 		} else {
1077 			unsigned long  __user *u = ubuf;
1078 			if (__put_user(target->thread.last_break, u))
1079 				return -EFAULT;
1080 		}
1081 	}
1082 	return 0;
1083 }
1084 
1085 static int s390_last_break_set(struct task_struct *target,
1086 			       const struct user_regset *regset,
1087 			       unsigned int pos, unsigned int count,
1088 			       const void *kbuf, const void __user *ubuf)
1089 {
1090 	return 0;
1091 }
1092 
1093 static int s390_tdb_get(struct task_struct *target,
1094 			const struct user_regset *regset,
1095 			unsigned int pos, unsigned int count,
1096 			void *kbuf, void __user *ubuf)
1097 {
1098 	struct pt_regs *regs = task_pt_regs(target);
1099 	unsigned char *data;
1100 
1101 	if (!(regs->int_code & 0x200))
1102 		return -ENODATA;
1103 	data = target->thread.trap_tdb;
1104 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1105 }
1106 
1107 static int s390_tdb_set(struct task_struct *target,
1108 			const struct user_regset *regset,
1109 			unsigned int pos, unsigned int count,
1110 			const void *kbuf, const void __user *ubuf)
1111 {
1112 	return 0;
1113 }
1114 
1115 static int s390_vxrs_low_get(struct task_struct *target,
1116 			     const struct user_regset *regset,
1117 			     unsigned int pos, unsigned int count,
1118 			     void *kbuf, void __user *ubuf)
1119 {
1120 	__u64 vxrs[__NUM_VXRS_LOW];
1121 	int i;
1122 
1123 	if (!MACHINE_HAS_VX)
1124 		return -ENODEV;
1125 	if (target == current)
1126 		save_fpu_regs();
1127 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1128 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1129 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1130 }
1131 
1132 static int s390_vxrs_low_set(struct task_struct *target,
1133 			     const struct user_regset *regset,
1134 			     unsigned int pos, unsigned int count,
1135 			     const void *kbuf, const void __user *ubuf)
1136 {
1137 	__u64 vxrs[__NUM_VXRS_LOW];
1138 	int i, rc;
1139 
1140 	if (!MACHINE_HAS_VX)
1141 		return -ENODEV;
1142 	if (target == current)
1143 		save_fpu_regs();
1144 
1145 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1146 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1147 
1148 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1149 	if (rc == 0)
1150 		for (i = 0; i < __NUM_VXRS_LOW; i++)
1151 			*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1152 
1153 	return rc;
1154 }
1155 
1156 static int s390_vxrs_high_get(struct task_struct *target,
1157 			      const struct user_regset *regset,
1158 			      unsigned int pos, unsigned int count,
1159 			      void *kbuf, void __user *ubuf)
1160 {
1161 	__vector128 vxrs[__NUM_VXRS_HIGH];
1162 
1163 	if (!MACHINE_HAS_VX)
1164 		return -ENODEV;
1165 	if (target == current)
1166 		save_fpu_regs();
1167 	memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1168 
1169 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1170 }
1171 
1172 static int s390_vxrs_high_set(struct task_struct *target,
1173 			      const struct user_regset *regset,
1174 			      unsigned int pos, unsigned int count,
1175 			      const void *kbuf, const void __user *ubuf)
1176 {
1177 	int rc;
1178 
1179 	if (!MACHINE_HAS_VX)
1180 		return -ENODEV;
1181 	if (target == current)
1182 		save_fpu_regs();
1183 
1184 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1185 				target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1186 	return rc;
1187 }
1188 
1189 static int s390_system_call_get(struct task_struct *target,
1190 				const struct user_regset *regset,
1191 				unsigned int pos, unsigned int count,
1192 				void *kbuf, void __user *ubuf)
1193 {
1194 	unsigned int *data = &target->thread.system_call;
1195 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1196 				   data, 0, sizeof(unsigned int));
1197 }
1198 
1199 static int s390_system_call_set(struct task_struct *target,
1200 				const struct user_regset *regset,
1201 				unsigned int pos, unsigned int count,
1202 				const void *kbuf, const void __user *ubuf)
1203 {
1204 	unsigned int *data = &target->thread.system_call;
1205 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1206 				  data, 0, sizeof(unsigned int));
1207 }
1208 
1209 static int s390_gs_cb_get(struct task_struct *target,
1210 			  const struct user_regset *regset,
1211 			  unsigned int pos, unsigned int count,
1212 			  void *kbuf, void __user *ubuf)
1213 {
1214 	struct gs_cb *data = target->thread.gs_cb;
1215 
1216 	if (!MACHINE_HAS_GS)
1217 		return -ENODEV;
1218 	if (!data)
1219 		return -ENODATA;
1220 	if (target == current)
1221 		save_gs_cb(data);
1222 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1223 				   data, 0, sizeof(struct gs_cb));
1224 }
1225 
1226 static int s390_gs_cb_set(struct task_struct *target,
1227 			  const struct user_regset *regset,
1228 			  unsigned int pos, unsigned int count,
1229 			  const void *kbuf, const void __user *ubuf)
1230 {
1231 	struct gs_cb gs_cb = { }, *data = NULL;
1232 	int rc;
1233 
1234 	if (!MACHINE_HAS_GS)
1235 		return -ENODEV;
1236 	if (!target->thread.gs_cb) {
1237 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1238 		if (!data)
1239 			return -ENOMEM;
1240 	}
1241 	if (!target->thread.gs_cb)
1242 		gs_cb.gsd = 25;
1243 	else if (target == current)
1244 		save_gs_cb(&gs_cb);
1245 	else
1246 		gs_cb = *target->thread.gs_cb;
1247 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1248 				&gs_cb, 0, sizeof(gs_cb));
1249 	if (rc) {
1250 		kfree(data);
1251 		return -EFAULT;
1252 	}
1253 	preempt_disable();
1254 	if (!target->thread.gs_cb)
1255 		target->thread.gs_cb = data;
1256 	*target->thread.gs_cb = gs_cb;
1257 	if (target == current) {
1258 		__ctl_set_bit(2, 4);
1259 		restore_gs_cb(target->thread.gs_cb);
1260 	}
1261 	preempt_enable();
1262 	return rc;
1263 }
1264 
1265 static int s390_gs_bc_get(struct task_struct *target,
1266 			  const struct user_regset *regset,
1267 			  unsigned int pos, unsigned int count,
1268 			  void *kbuf, void __user *ubuf)
1269 {
1270 	struct gs_cb *data = target->thread.gs_bc_cb;
1271 
1272 	if (!MACHINE_HAS_GS)
1273 		return -ENODEV;
1274 	if (!data)
1275 		return -ENODATA;
1276 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1277 				   data, 0, sizeof(struct gs_cb));
1278 }
1279 
1280 static int s390_gs_bc_set(struct task_struct *target,
1281 			  const struct user_regset *regset,
1282 			  unsigned int pos, unsigned int count,
1283 			  const void *kbuf, const void __user *ubuf)
1284 {
1285 	struct gs_cb *data = target->thread.gs_bc_cb;
1286 
1287 	if (!MACHINE_HAS_GS)
1288 		return -ENODEV;
1289 	if (!data) {
1290 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1291 		if (!data)
1292 			return -ENOMEM;
1293 		target->thread.gs_bc_cb = data;
1294 	}
1295 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1296 				  data, 0, sizeof(struct gs_cb));
1297 }
1298 
1299 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1300 {
1301 	return (cb->rca & 0x1f) == 0 &&
1302 		(cb->roa & 0xfff) == 0 &&
1303 		(cb->rla & 0xfff) == 0xfff &&
1304 		cb->s == 1 &&
1305 		cb->k == 1 &&
1306 		cb->h == 0 &&
1307 		cb->reserved1 == 0 &&
1308 		cb->ps == 1 &&
1309 		cb->qs == 0 &&
1310 		cb->pc == 1 &&
1311 		cb->qc == 0 &&
1312 		cb->reserved2 == 0 &&
1313 		cb->key == PAGE_DEFAULT_KEY &&
1314 		cb->reserved3 == 0 &&
1315 		cb->reserved4 == 0 &&
1316 		cb->reserved5 == 0 &&
1317 		cb->reserved6 == 0 &&
1318 		cb->reserved7 == 0 &&
1319 		cb->reserved8 == 0 &&
1320 		cb->rla >= cb->roa &&
1321 		cb->rca >= cb->roa &&
1322 		cb->rca <= cb->rla+1 &&
1323 		cb->m < 3;
1324 }
1325 
1326 static int s390_runtime_instr_get(struct task_struct *target,
1327 				const struct user_regset *regset,
1328 				unsigned int pos, unsigned int count,
1329 				void *kbuf, void __user *ubuf)
1330 {
1331 	struct runtime_instr_cb *data = target->thread.ri_cb;
1332 
1333 	if (!test_facility(64))
1334 		return -ENODEV;
1335 	if (!data)
1336 		return -ENODATA;
1337 
1338 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1339 				   data, 0, sizeof(struct runtime_instr_cb));
1340 }
1341 
1342 static int s390_runtime_instr_set(struct task_struct *target,
1343 				  const struct user_regset *regset,
1344 				  unsigned int pos, unsigned int count,
1345 				  const void *kbuf, const void __user *ubuf)
1346 {
1347 	struct runtime_instr_cb ri_cb = { }, *data = NULL;
1348 	int rc;
1349 
1350 	if (!test_facility(64))
1351 		return -ENODEV;
1352 
1353 	if (!target->thread.ri_cb) {
1354 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1355 		if (!data)
1356 			return -ENOMEM;
1357 	}
1358 
1359 	if (target->thread.ri_cb) {
1360 		if (target == current)
1361 			store_runtime_instr_cb(&ri_cb);
1362 		else
1363 			ri_cb = *target->thread.ri_cb;
1364 	}
1365 
1366 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1367 				&ri_cb, 0, sizeof(struct runtime_instr_cb));
1368 	if (rc) {
1369 		kfree(data);
1370 		return -EFAULT;
1371 	}
1372 
1373 	if (!is_ri_cb_valid(&ri_cb)) {
1374 		kfree(data);
1375 		return -EINVAL;
1376 	}
1377 
1378 	preempt_disable();
1379 	if (!target->thread.ri_cb)
1380 		target->thread.ri_cb = data;
1381 	*target->thread.ri_cb = ri_cb;
1382 	if (target == current)
1383 		load_runtime_instr_cb(target->thread.ri_cb);
1384 	preempt_enable();
1385 
1386 	return 0;
1387 }
1388 
1389 static const struct user_regset s390_regsets[] = {
1390 	{
1391 		.core_note_type = NT_PRSTATUS,
1392 		.n = sizeof(s390_regs) / sizeof(long),
1393 		.size = sizeof(long),
1394 		.align = sizeof(long),
1395 		.get = s390_regs_get,
1396 		.set = s390_regs_set,
1397 	},
1398 	{
1399 		.core_note_type = NT_PRFPREG,
1400 		.n = sizeof(s390_fp_regs) / sizeof(long),
1401 		.size = sizeof(long),
1402 		.align = sizeof(long),
1403 		.get = s390_fpregs_get,
1404 		.set = s390_fpregs_set,
1405 	},
1406 	{
1407 		.core_note_type = NT_S390_SYSTEM_CALL,
1408 		.n = 1,
1409 		.size = sizeof(unsigned int),
1410 		.align = sizeof(unsigned int),
1411 		.get = s390_system_call_get,
1412 		.set = s390_system_call_set,
1413 	},
1414 	{
1415 		.core_note_type = NT_S390_LAST_BREAK,
1416 		.n = 1,
1417 		.size = sizeof(long),
1418 		.align = sizeof(long),
1419 		.get = s390_last_break_get,
1420 		.set = s390_last_break_set,
1421 	},
1422 	{
1423 		.core_note_type = NT_S390_TDB,
1424 		.n = 1,
1425 		.size = 256,
1426 		.align = 1,
1427 		.get = s390_tdb_get,
1428 		.set = s390_tdb_set,
1429 	},
1430 	{
1431 		.core_note_type = NT_S390_VXRS_LOW,
1432 		.n = __NUM_VXRS_LOW,
1433 		.size = sizeof(__u64),
1434 		.align = sizeof(__u64),
1435 		.get = s390_vxrs_low_get,
1436 		.set = s390_vxrs_low_set,
1437 	},
1438 	{
1439 		.core_note_type = NT_S390_VXRS_HIGH,
1440 		.n = __NUM_VXRS_HIGH,
1441 		.size = sizeof(__vector128),
1442 		.align = sizeof(__vector128),
1443 		.get = s390_vxrs_high_get,
1444 		.set = s390_vxrs_high_set,
1445 	},
1446 	{
1447 		.core_note_type = NT_S390_GS_CB,
1448 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1449 		.size = sizeof(__u64),
1450 		.align = sizeof(__u64),
1451 		.get = s390_gs_cb_get,
1452 		.set = s390_gs_cb_set,
1453 	},
1454 	{
1455 		.core_note_type = NT_S390_GS_BC,
1456 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1457 		.size = sizeof(__u64),
1458 		.align = sizeof(__u64),
1459 		.get = s390_gs_bc_get,
1460 		.set = s390_gs_bc_set,
1461 	},
1462 	{
1463 		.core_note_type = NT_S390_RI_CB,
1464 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1465 		.size = sizeof(__u64),
1466 		.align = sizeof(__u64),
1467 		.get = s390_runtime_instr_get,
1468 		.set = s390_runtime_instr_set,
1469 	},
1470 };
1471 
1472 static const struct user_regset_view user_s390_view = {
1473 	.name = "s390x",
1474 	.e_machine = EM_S390,
1475 	.regsets = s390_regsets,
1476 	.n = ARRAY_SIZE(s390_regsets)
1477 };
1478 
1479 #ifdef CONFIG_COMPAT
1480 static int s390_compat_regs_get(struct task_struct *target,
1481 				const struct user_regset *regset,
1482 				unsigned int pos, unsigned int count,
1483 				void *kbuf, void __user *ubuf)
1484 {
1485 	if (target == current)
1486 		save_access_regs(target->thread.acrs);
1487 
1488 	if (kbuf) {
1489 		compat_ulong_t *k = kbuf;
1490 		while (count > 0) {
1491 			*k++ = __peek_user_compat(target, pos);
1492 			count -= sizeof(*k);
1493 			pos += sizeof(*k);
1494 		}
1495 	} else {
1496 		compat_ulong_t __user *u = ubuf;
1497 		while (count > 0) {
1498 			if (__put_user(__peek_user_compat(target, pos), u++))
1499 				return -EFAULT;
1500 			count -= sizeof(*u);
1501 			pos += sizeof(*u);
1502 		}
1503 	}
1504 	return 0;
1505 }
1506 
1507 static int s390_compat_regs_set(struct task_struct *target,
1508 				const struct user_regset *regset,
1509 				unsigned int pos, unsigned int count,
1510 				const void *kbuf, const void __user *ubuf)
1511 {
1512 	int rc = 0;
1513 
1514 	if (target == current)
1515 		save_access_regs(target->thread.acrs);
1516 
1517 	if (kbuf) {
1518 		const compat_ulong_t *k = kbuf;
1519 		while (count > 0 && !rc) {
1520 			rc = __poke_user_compat(target, pos, *k++);
1521 			count -= sizeof(*k);
1522 			pos += sizeof(*k);
1523 		}
1524 	} else {
1525 		const compat_ulong_t  __user *u = ubuf;
1526 		while (count > 0 && !rc) {
1527 			compat_ulong_t word;
1528 			rc = __get_user(word, u++);
1529 			if (rc)
1530 				break;
1531 			rc = __poke_user_compat(target, pos, word);
1532 			count -= sizeof(*u);
1533 			pos += sizeof(*u);
1534 		}
1535 	}
1536 
1537 	if (rc == 0 && target == current)
1538 		restore_access_regs(target->thread.acrs);
1539 
1540 	return rc;
1541 }
1542 
1543 static int s390_compat_regs_high_get(struct task_struct *target,
1544 				     const struct user_regset *regset,
1545 				     unsigned int pos, unsigned int count,
1546 				     void *kbuf, void __user *ubuf)
1547 {
1548 	compat_ulong_t *gprs_high;
1549 
1550 	gprs_high = (compat_ulong_t *)
1551 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1552 	if (kbuf) {
1553 		compat_ulong_t *k = kbuf;
1554 		while (count > 0) {
1555 			*k++ = *gprs_high;
1556 			gprs_high += 2;
1557 			count -= sizeof(*k);
1558 		}
1559 	} else {
1560 		compat_ulong_t __user *u = ubuf;
1561 		while (count > 0) {
1562 			if (__put_user(*gprs_high, u++))
1563 				return -EFAULT;
1564 			gprs_high += 2;
1565 			count -= sizeof(*u);
1566 		}
1567 	}
1568 	return 0;
1569 }
1570 
1571 static int s390_compat_regs_high_set(struct task_struct *target,
1572 				     const struct user_regset *regset,
1573 				     unsigned int pos, unsigned int count,
1574 				     const void *kbuf, const void __user *ubuf)
1575 {
1576 	compat_ulong_t *gprs_high;
1577 	int rc = 0;
1578 
1579 	gprs_high = (compat_ulong_t *)
1580 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1581 	if (kbuf) {
1582 		const compat_ulong_t *k = kbuf;
1583 		while (count > 0) {
1584 			*gprs_high = *k++;
1585 			*gprs_high += 2;
1586 			count -= sizeof(*k);
1587 		}
1588 	} else {
1589 		const compat_ulong_t  __user *u = ubuf;
1590 		while (count > 0 && !rc) {
1591 			unsigned long word;
1592 			rc = __get_user(word, u++);
1593 			if (rc)
1594 				break;
1595 			*gprs_high = word;
1596 			*gprs_high += 2;
1597 			count -= sizeof(*u);
1598 		}
1599 	}
1600 
1601 	return rc;
1602 }
1603 
1604 static int s390_compat_last_break_get(struct task_struct *target,
1605 				      const struct user_regset *regset,
1606 				      unsigned int pos, unsigned int count,
1607 				      void *kbuf, void __user *ubuf)
1608 {
1609 	compat_ulong_t last_break;
1610 
1611 	if (count > 0) {
1612 		last_break = target->thread.last_break;
1613 		if (kbuf) {
1614 			unsigned long *k = kbuf;
1615 			*k = last_break;
1616 		} else {
1617 			unsigned long  __user *u = ubuf;
1618 			if (__put_user(last_break, u))
1619 				return -EFAULT;
1620 		}
1621 	}
1622 	return 0;
1623 }
1624 
1625 static int s390_compat_last_break_set(struct task_struct *target,
1626 				      const struct user_regset *regset,
1627 				      unsigned int pos, unsigned int count,
1628 				      const void *kbuf, const void __user *ubuf)
1629 {
1630 	return 0;
1631 }
1632 
1633 static const struct user_regset s390_compat_regsets[] = {
1634 	{
1635 		.core_note_type = NT_PRSTATUS,
1636 		.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1637 		.size = sizeof(compat_long_t),
1638 		.align = sizeof(compat_long_t),
1639 		.get = s390_compat_regs_get,
1640 		.set = s390_compat_regs_set,
1641 	},
1642 	{
1643 		.core_note_type = NT_PRFPREG,
1644 		.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1645 		.size = sizeof(compat_long_t),
1646 		.align = sizeof(compat_long_t),
1647 		.get = s390_fpregs_get,
1648 		.set = s390_fpregs_set,
1649 	},
1650 	{
1651 		.core_note_type = NT_S390_SYSTEM_CALL,
1652 		.n = 1,
1653 		.size = sizeof(compat_uint_t),
1654 		.align = sizeof(compat_uint_t),
1655 		.get = s390_system_call_get,
1656 		.set = s390_system_call_set,
1657 	},
1658 	{
1659 		.core_note_type = NT_S390_LAST_BREAK,
1660 		.n = 1,
1661 		.size = sizeof(long),
1662 		.align = sizeof(long),
1663 		.get = s390_compat_last_break_get,
1664 		.set = s390_compat_last_break_set,
1665 	},
1666 	{
1667 		.core_note_type = NT_S390_TDB,
1668 		.n = 1,
1669 		.size = 256,
1670 		.align = 1,
1671 		.get = s390_tdb_get,
1672 		.set = s390_tdb_set,
1673 	},
1674 	{
1675 		.core_note_type = NT_S390_VXRS_LOW,
1676 		.n = __NUM_VXRS_LOW,
1677 		.size = sizeof(__u64),
1678 		.align = sizeof(__u64),
1679 		.get = s390_vxrs_low_get,
1680 		.set = s390_vxrs_low_set,
1681 	},
1682 	{
1683 		.core_note_type = NT_S390_VXRS_HIGH,
1684 		.n = __NUM_VXRS_HIGH,
1685 		.size = sizeof(__vector128),
1686 		.align = sizeof(__vector128),
1687 		.get = s390_vxrs_high_get,
1688 		.set = s390_vxrs_high_set,
1689 	},
1690 	{
1691 		.core_note_type = NT_S390_HIGH_GPRS,
1692 		.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1693 		.size = sizeof(compat_long_t),
1694 		.align = sizeof(compat_long_t),
1695 		.get = s390_compat_regs_high_get,
1696 		.set = s390_compat_regs_high_set,
1697 	},
1698 	{
1699 		.core_note_type = NT_S390_GS_CB,
1700 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1701 		.size = sizeof(__u64),
1702 		.align = sizeof(__u64),
1703 		.get = s390_gs_cb_get,
1704 		.set = s390_gs_cb_set,
1705 	},
1706 	{
1707 		.core_note_type = NT_S390_GS_BC,
1708 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1709 		.size = sizeof(__u64),
1710 		.align = sizeof(__u64),
1711 		.get = s390_gs_bc_get,
1712 		.set = s390_gs_bc_set,
1713 	},
1714 	{
1715 		.core_note_type = NT_S390_RI_CB,
1716 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1717 		.size = sizeof(__u64),
1718 		.align = sizeof(__u64),
1719 		.get = s390_runtime_instr_get,
1720 		.set = s390_runtime_instr_set,
1721 	},
1722 };
1723 
1724 static const struct user_regset_view user_s390_compat_view = {
1725 	.name = "s390",
1726 	.e_machine = EM_S390,
1727 	.regsets = s390_compat_regsets,
1728 	.n = ARRAY_SIZE(s390_compat_regsets)
1729 };
1730 #endif
1731 
1732 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1733 {
1734 #ifdef CONFIG_COMPAT
1735 	if (test_tsk_thread_flag(task, TIF_31BIT))
1736 		return &user_s390_compat_view;
1737 #endif
1738 	return &user_s390_view;
1739 }
1740 
1741 static const char *gpr_names[NUM_GPRS] = {
1742 	"r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1743 	"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1744 };
1745 
1746 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1747 {
1748 	if (offset >= NUM_GPRS)
1749 		return 0;
1750 	return regs->gprs[offset];
1751 }
1752 
1753 int regs_query_register_offset(const char *name)
1754 {
1755 	unsigned long offset;
1756 
1757 	if (!name || *name != 'r')
1758 		return -EINVAL;
1759 	if (kstrtoul(name + 1, 10, &offset))
1760 		return -EINVAL;
1761 	if (offset >= NUM_GPRS)
1762 		return -EINVAL;
1763 	return offset;
1764 }
1765 
1766 const char *regs_query_register_name(unsigned int offset)
1767 {
1768 	if (offset >= NUM_GPRS)
1769 		return NULL;
1770 	return gpr_names[offset];
1771 }
1772 
1773 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1774 {
1775 	unsigned long ksp = kernel_stack_pointer(regs);
1776 
1777 	return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1778 }
1779 
1780 /**
1781  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1782  * @regs:pt_regs which contains kernel stack pointer.
1783  * @n:stack entry number.
1784  *
1785  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1786  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1787  * this returns 0.
1788  */
1789 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1790 {
1791 	unsigned long addr;
1792 
1793 	addr = kernel_stack_pointer(regs) + n * sizeof(long);
1794 	if (!regs_within_kernel_stack(regs, addr))
1795 		return 0;
1796 	return *(unsigned long *)addr;
1797 }
1798