xref: /openbmc/linux/arch/s390/kernel/ptrace.c (revision 4464005a12b5c79e1a364e6272ee10a83413f928)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <linux/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/switch_to.h>
32 #include <asm/runtime_instr.h>
33 #include <asm/facility.h>
34 
35 #include "entry.h"
36 
37 #ifdef CONFIG_COMPAT
38 #include "compat_ptrace.h"
39 #endif
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43 
44 void update_cr_regs(struct task_struct *task)
45 {
46 	struct pt_regs *regs = task_pt_regs(task);
47 	struct thread_struct *thread = &task->thread;
48 	struct per_regs old, new;
49 	union ctlreg0 cr0_old, cr0_new;
50 	union ctlreg2 cr2_old, cr2_new;
51 	int cr0_changed, cr2_changed;
52 
53 	__ctl_store(cr0_old.val, 0, 0);
54 	__ctl_store(cr2_old.val, 2, 2);
55 	cr0_new = cr0_old;
56 	cr2_new = cr2_old;
57 	/* Take care of the enable/disable of transactional execution. */
58 	if (MACHINE_HAS_TE) {
59 		/* Set or clear transaction execution TXC bit 8. */
60 		cr0_new.tcx = 1;
61 		if (task->thread.per_flags & PER_FLAG_NO_TE)
62 			cr0_new.tcx = 0;
63 		/* Set or clear transaction execution TDC bits 62 and 63. */
64 		cr2_new.tdc = 0;
65 		if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
66 			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
67 				cr2_new.tdc = 1;
68 			else
69 				cr2_new.tdc = 2;
70 		}
71 	}
72 	/* Take care of enable/disable of guarded storage. */
73 	if (MACHINE_HAS_GS) {
74 		cr2_new.gse = 0;
75 		if (task->thread.gs_cb)
76 			cr2_new.gse = 1;
77 	}
78 	/* Load control register 0/2 iff changed */
79 	cr0_changed = cr0_new.val != cr0_old.val;
80 	cr2_changed = cr2_new.val != cr2_old.val;
81 	if (cr0_changed)
82 		__ctl_load(cr0_new.val, 0, 0);
83 	if (cr2_changed)
84 		__ctl_load(cr2_new.val, 2, 2);
85 	/* Copy user specified PER registers */
86 	new.control = thread->per_user.control;
87 	new.start = thread->per_user.start;
88 	new.end = thread->per_user.end;
89 
90 	/* merge TIF_SINGLE_STEP into user specified PER registers. */
91 	if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
92 	    test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
93 		if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
94 			new.control |= PER_EVENT_BRANCH;
95 		else
96 			new.control |= PER_EVENT_IFETCH;
97 		new.control |= PER_CONTROL_SUSPENSION;
98 		new.control |= PER_EVENT_TRANSACTION_END;
99 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
100 			new.control |= PER_EVENT_IFETCH;
101 		new.start = 0;
102 		new.end = -1UL;
103 	}
104 
105 	/* Take care of the PER enablement bit in the PSW. */
106 	if (!(new.control & PER_EVENT_MASK)) {
107 		regs->psw.mask &= ~PSW_MASK_PER;
108 		return;
109 	}
110 	regs->psw.mask |= PSW_MASK_PER;
111 	__ctl_store(old, 9, 11);
112 	if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
113 		__ctl_load(new, 9, 11);
114 }
115 
116 void user_enable_single_step(struct task_struct *task)
117 {
118 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
119 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
120 }
121 
122 void user_disable_single_step(struct task_struct *task)
123 {
124 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
125 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
126 }
127 
128 void user_enable_block_step(struct task_struct *task)
129 {
130 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
131 	set_tsk_thread_flag(task, TIF_BLOCK_STEP);
132 }
133 
134 /*
135  * Called by kernel/ptrace.c when detaching..
136  *
137  * Clear all debugging related fields.
138  */
139 void ptrace_disable(struct task_struct *task)
140 {
141 	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
142 	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
143 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
144 	clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
145 	task->thread.per_flags = 0;
146 }
147 
148 #define __ADDR_MASK 7
149 
150 static inline unsigned long __peek_user_per(struct task_struct *child,
151 					    addr_t addr)
152 {
153 	struct per_struct_kernel *dummy = NULL;
154 
155 	if (addr == (addr_t) &dummy->cr9)
156 		/* Control bits of the active per set. */
157 		return test_thread_flag(TIF_SINGLE_STEP) ?
158 			PER_EVENT_IFETCH : child->thread.per_user.control;
159 	else if (addr == (addr_t) &dummy->cr10)
160 		/* Start address of the active per set. */
161 		return test_thread_flag(TIF_SINGLE_STEP) ?
162 			0 : child->thread.per_user.start;
163 	else if (addr == (addr_t) &dummy->cr11)
164 		/* End address of the active per set. */
165 		return test_thread_flag(TIF_SINGLE_STEP) ?
166 			-1UL : child->thread.per_user.end;
167 	else if (addr == (addr_t) &dummy->bits)
168 		/* Single-step bit. */
169 		return test_thread_flag(TIF_SINGLE_STEP) ?
170 			(1UL << (BITS_PER_LONG - 1)) : 0;
171 	else if (addr == (addr_t) &dummy->starting_addr)
172 		/* Start address of the user specified per set. */
173 		return child->thread.per_user.start;
174 	else if (addr == (addr_t) &dummy->ending_addr)
175 		/* End address of the user specified per set. */
176 		return child->thread.per_user.end;
177 	else if (addr == (addr_t) &dummy->perc_atmid)
178 		/* PER code, ATMID and AI of the last PER trap */
179 		return (unsigned long)
180 			child->thread.per_event.cause << (BITS_PER_LONG - 16);
181 	else if (addr == (addr_t) &dummy->address)
182 		/* Address of the last PER trap */
183 		return child->thread.per_event.address;
184 	else if (addr == (addr_t) &dummy->access_id)
185 		/* Access id of the last PER trap */
186 		return (unsigned long)
187 			child->thread.per_event.paid << (BITS_PER_LONG - 8);
188 	return 0;
189 }
190 
191 /*
192  * Read the word at offset addr from the user area of a process. The
193  * trouble here is that the information is littered over different
194  * locations. The process registers are found on the kernel stack,
195  * the floating point stuff and the trace settings are stored in
196  * the task structure. In addition the different structures in
197  * struct user contain pad bytes that should be read as zeroes.
198  * Lovely...
199  */
200 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
201 {
202 	struct user *dummy = NULL;
203 	addr_t offset, tmp;
204 
205 	if (addr < (addr_t) &dummy->regs.acrs) {
206 		/*
207 		 * psw and gprs are stored on the stack
208 		 */
209 		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
210 		if (addr == (addr_t) &dummy->regs.psw.mask) {
211 			/* Return a clean psw mask. */
212 			tmp &= PSW_MASK_USER | PSW_MASK_RI;
213 			tmp |= PSW_USER_BITS;
214 		}
215 
216 	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
217 		/*
218 		 * access registers are stored in the thread structure
219 		 */
220 		offset = addr - (addr_t) &dummy->regs.acrs;
221 		/*
222 		 * Very special case: old & broken 64 bit gdb reading
223 		 * from acrs[15]. Result is a 64 bit value. Read the
224 		 * 32 bit acrs[15] value and shift it by 32. Sick...
225 		 */
226 		if (addr == (addr_t) &dummy->regs.acrs[15])
227 			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
228 		else
229 			tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
230 
231 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
232 		/*
233 		 * orig_gpr2 is stored on the kernel stack
234 		 */
235 		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
236 
237 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
238 		/*
239 		 * prevent reads of padding hole between
240 		 * orig_gpr2 and fp_regs on s390.
241 		 */
242 		tmp = 0;
243 
244 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
245 		/*
246 		 * floating point control reg. is in the thread structure
247 		 */
248 		tmp = child->thread.fpu.fpc;
249 		tmp <<= BITS_PER_LONG - 32;
250 
251 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252 		/*
253 		 * floating point regs. are either in child->thread.fpu
254 		 * or the child->thread.fpu.vxrs array
255 		 */
256 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
257 		if (MACHINE_HAS_VX)
258 			tmp = *(addr_t *)
259 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
260 		else
261 			tmp = *(addr_t *)
262 			       ((addr_t) child->thread.fpu.fprs + offset);
263 
264 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
265 		/*
266 		 * Handle access to the per_info structure.
267 		 */
268 		addr -= (addr_t) &dummy->regs.per_info;
269 		tmp = __peek_user_per(child, addr);
270 
271 	} else
272 		tmp = 0;
273 
274 	return tmp;
275 }
276 
277 static int
278 peek_user(struct task_struct *child, addr_t addr, addr_t data)
279 {
280 	addr_t tmp, mask;
281 
282 	/*
283 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
284 	 * an alignment of 4. Programmers from hell...
285 	 */
286 	mask = __ADDR_MASK;
287 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
288 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
289 		mask = 3;
290 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
291 		return -EIO;
292 
293 	tmp = __peek_user(child, addr);
294 	return put_user(tmp, (addr_t __user *) data);
295 }
296 
297 static inline void __poke_user_per(struct task_struct *child,
298 				   addr_t addr, addr_t data)
299 {
300 	struct per_struct_kernel *dummy = NULL;
301 
302 	/*
303 	 * There are only three fields in the per_info struct that the
304 	 * debugger user can write to.
305 	 * 1) cr9: the debugger wants to set a new PER event mask
306 	 * 2) starting_addr: the debugger wants to set a new starting
307 	 *    address to use with the PER event mask.
308 	 * 3) ending_addr: the debugger wants to set a new ending
309 	 *    address to use with the PER event mask.
310 	 * The user specified PER event mask and the start and end
311 	 * addresses are used only if single stepping is not in effect.
312 	 * Writes to any other field in per_info are ignored.
313 	 */
314 	if (addr == (addr_t) &dummy->cr9)
315 		/* PER event mask of the user specified per set. */
316 		child->thread.per_user.control =
317 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
318 	else if (addr == (addr_t) &dummy->starting_addr)
319 		/* Starting address of the user specified per set. */
320 		child->thread.per_user.start = data;
321 	else if (addr == (addr_t) &dummy->ending_addr)
322 		/* Ending address of the user specified per set. */
323 		child->thread.per_user.end = data;
324 }
325 
326 /*
327  * Write a word to the user area of a process at location addr. This
328  * operation does have an additional problem compared to peek_user.
329  * Stores to the program status word and on the floating point
330  * control register needs to get checked for validity.
331  */
332 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
333 {
334 	struct user *dummy = NULL;
335 	addr_t offset;
336 
337 	if (addr < (addr_t) &dummy->regs.acrs) {
338 		/*
339 		 * psw and gprs are stored on the stack
340 		 */
341 		if (addr == (addr_t) &dummy->regs.psw.mask) {
342 			unsigned long mask = PSW_MASK_USER;
343 
344 			mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
345 			if ((data ^ PSW_USER_BITS) & ~mask)
346 				/* Invalid psw mask. */
347 				return -EINVAL;
348 			if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
349 				/* Invalid address-space-control bits */
350 				return -EINVAL;
351 			if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
352 				/* Invalid addressing mode bits */
353 				return -EINVAL;
354 		}
355 		*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
356 
357 	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
358 		/*
359 		 * access registers are stored in the thread structure
360 		 */
361 		offset = addr - (addr_t) &dummy->regs.acrs;
362 		/*
363 		 * Very special case: old & broken 64 bit gdb writing
364 		 * to acrs[15] with a 64 bit value. Ignore the lower
365 		 * half of the value and write the upper 32 bit to
366 		 * acrs[15]. Sick...
367 		 */
368 		if (addr == (addr_t) &dummy->regs.acrs[15])
369 			child->thread.acrs[15] = (unsigned int) (data >> 32);
370 		else
371 			*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
372 
373 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
374 		/*
375 		 * orig_gpr2 is stored on the kernel stack
376 		 */
377 		task_pt_regs(child)->orig_gpr2 = data;
378 
379 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
380 		/*
381 		 * prevent writes of padding hole between
382 		 * orig_gpr2 and fp_regs on s390.
383 		 */
384 		return 0;
385 
386 	} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
387 		/*
388 		 * floating point control reg. is in the thread structure
389 		 */
390 		if ((unsigned int) data != 0 ||
391 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
392 			return -EINVAL;
393 		child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
394 
395 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
396 		/*
397 		 * floating point regs. are either in child->thread.fpu
398 		 * or the child->thread.fpu.vxrs array
399 		 */
400 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
401 		if (MACHINE_HAS_VX)
402 			*(addr_t *)((addr_t)
403 				child->thread.fpu.vxrs + 2*offset) = data;
404 		else
405 			*(addr_t *)((addr_t)
406 				child->thread.fpu.fprs + offset) = data;
407 
408 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
409 		/*
410 		 * Handle access to the per_info structure.
411 		 */
412 		addr -= (addr_t) &dummy->regs.per_info;
413 		__poke_user_per(child, addr, data);
414 
415 	}
416 
417 	return 0;
418 }
419 
420 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
421 {
422 	addr_t mask;
423 
424 	/*
425 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
426 	 * an alignment of 4. Programmers from hell indeed...
427 	 */
428 	mask = __ADDR_MASK;
429 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
430 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
431 		mask = 3;
432 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
433 		return -EIO;
434 
435 	return __poke_user(child, addr, data);
436 }
437 
438 long arch_ptrace(struct task_struct *child, long request,
439 		 unsigned long addr, unsigned long data)
440 {
441 	ptrace_area parea;
442 	int copied, ret;
443 
444 	switch (request) {
445 	case PTRACE_PEEKUSR:
446 		/* read the word at location addr in the USER area. */
447 		return peek_user(child, addr, data);
448 
449 	case PTRACE_POKEUSR:
450 		/* write the word at location addr in the USER area */
451 		return poke_user(child, addr, data);
452 
453 	case PTRACE_PEEKUSR_AREA:
454 	case PTRACE_POKEUSR_AREA:
455 		if (copy_from_user(&parea, (void __force __user *) addr,
456 							sizeof(parea)))
457 			return -EFAULT;
458 		addr = parea.kernel_addr;
459 		data = parea.process_addr;
460 		copied = 0;
461 		while (copied < parea.len) {
462 			if (request == PTRACE_PEEKUSR_AREA)
463 				ret = peek_user(child, addr, data);
464 			else {
465 				addr_t utmp;
466 				if (get_user(utmp,
467 					     (addr_t __force __user *) data))
468 					return -EFAULT;
469 				ret = poke_user(child, addr, utmp);
470 			}
471 			if (ret)
472 				return ret;
473 			addr += sizeof(unsigned long);
474 			data += sizeof(unsigned long);
475 			copied += sizeof(unsigned long);
476 		}
477 		return 0;
478 	case PTRACE_GET_LAST_BREAK:
479 		put_user(child->thread.last_break,
480 			 (unsigned long __user *) data);
481 		return 0;
482 	case PTRACE_ENABLE_TE:
483 		if (!MACHINE_HAS_TE)
484 			return -EIO;
485 		child->thread.per_flags &= ~PER_FLAG_NO_TE;
486 		return 0;
487 	case PTRACE_DISABLE_TE:
488 		if (!MACHINE_HAS_TE)
489 			return -EIO;
490 		child->thread.per_flags |= PER_FLAG_NO_TE;
491 		child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
492 		return 0;
493 	case PTRACE_TE_ABORT_RAND:
494 		if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
495 			return -EIO;
496 		switch (data) {
497 		case 0UL:
498 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
499 			break;
500 		case 1UL:
501 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
502 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
503 			break;
504 		case 2UL:
505 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
506 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
507 			break;
508 		default:
509 			return -EINVAL;
510 		}
511 		return 0;
512 	default:
513 		return ptrace_request(child, request, addr, data);
514 	}
515 }
516 
517 #ifdef CONFIG_COMPAT
518 /*
519  * Now the fun part starts... a 31 bit program running in the
520  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
521  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
522  * to handle, the difference to the 64 bit versions of the requests
523  * is that the access is done in multiples of 4 byte instead of
524  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
525  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
526  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
527  * is a 31 bit program too, the content of struct user can be
528  * emulated. A 31 bit program peeking into the struct user of
529  * a 64 bit program is a no-no.
530  */
531 
532 /*
533  * Same as peek_user_per but for a 31 bit program.
534  */
535 static inline __u32 __peek_user_per_compat(struct task_struct *child,
536 					   addr_t addr)
537 {
538 	struct compat_per_struct_kernel *dummy32 = NULL;
539 
540 	if (addr == (addr_t) &dummy32->cr9)
541 		/* Control bits of the active per set. */
542 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
543 			PER_EVENT_IFETCH : child->thread.per_user.control;
544 	else if (addr == (addr_t) &dummy32->cr10)
545 		/* Start address of the active per set. */
546 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
547 			0 : child->thread.per_user.start;
548 	else if (addr == (addr_t) &dummy32->cr11)
549 		/* End address of the active per set. */
550 		return test_thread_flag(TIF_SINGLE_STEP) ?
551 			PSW32_ADDR_INSN : child->thread.per_user.end;
552 	else if (addr == (addr_t) &dummy32->bits)
553 		/* Single-step bit. */
554 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
555 			0x80000000 : 0;
556 	else if (addr == (addr_t) &dummy32->starting_addr)
557 		/* Start address of the user specified per set. */
558 		return (__u32) child->thread.per_user.start;
559 	else if (addr == (addr_t) &dummy32->ending_addr)
560 		/* End address of the user specified per set. */
561 		return (__u32) child->thread.per_user.end;
562 	else if (addr == (addr_t) &dummy32->perc_atmid)
563 		/* PER code, ATMID and AI of the last PER trap */
564 		return (__u32) child->thread.per_event.cause << 16;
565 	else if (addr == (addr_t) &dummy32->address)
566 		/* Address of the last PER trap */
567 		return (__u32) child->thread.per_event.address;
568 	else if (addr == (addr_t) &dummy32->access_id)
569 		/* Access id of the last PER trap */
570 		return (__u32) child->thread.per_event.paid << 24;
571 	return 0;
572 }
573 
574 /*
575  * Same as peek_user but for a 31 bit program.
576  */
577 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
578 {
579 	struct compat_user *dummy32 = NULL;
580 	addr_t offset;
581 	__u32 tmp;
582 
583 	if (addr < (addr_t) &dummy32->regs.acrs) {
584 		struct pt_regs *regs = task_pt_regs(child);
585 		/*
586 		 * psw and gprs are stored on the stack
587 		 */
588 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
589 			/* Fake a 31 bit psw mask. */
590 			tmp = (__u32)(regs->psw.mask >> 32);
591 			tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
592 			tmp |= PSW32_USER_BITS;
593 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
594 			/* Fake a 31 bit psw address. */
595 			tmp = (__u32) regs->psw.addr |
596 				(__u32)(regs->psw.mask & PSW_MASK_BA);
597 		} else {
598 			/* gpr 0-15 */
599 			tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
600 		}
601 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
602 		/*
603 		 * access registers are stored in the thread structure
604 		 */
605 		offset = addr - (addr_t) &dummy32->regs.acrs;
606 		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
607 
608 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
609 		/*
610 		 * orig_gpr2 is stored on the kernel stack
611 		 */
612 		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
613 
614 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
615 		/*
616 		 * prevent reads of padding hole between
617 		 * orig_gpr2 and fp_regs on s390.
618 		 */
619 		tmp = 0;
620 
621 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
622 		/*
623 		 * floating point control reg. is in the thread structure
624 		 */
625 		tmp = child->thread.fpu.fpc;
626 
627 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
628 		/*
629 		 * floating point regs. are either in child->thread.fpu
630 		 * or the child->thread.fpu.vxrs array
631 		 */
632 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
633 		if (MACHINE_HAS_VX)
634 			tmp = *(__u32 *)
635 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
636 		else
637 			tmp = *(__u32 *)
638 			       ((addr_t) child->thread.fpu.fprs + offset);
639 
640 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
641 		/*
642 		 * Handle access to the per_info structure.
643 		 */
644 		addr -= (addr_t) &dummy32->regs.per_info;
645 		tmp = __peek_user_per_compat(child, addr);
646 
647 	} else
648 		tmp = 0;
649 
650 	return tmp;
651 }
652 
653 static int peek_user_compat(struct task_struct *child,
654 			    addr_t addr, addr_t data)
655 {
656 	__u32 tmp;
657 
658 	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
659 		return -EIO;
660 
661 	tmp = __peek_user_compat(child, addr);
662 	return put_user(tmp, (__u32 __user *) data);
663 }
664 
665 /*
666  * Same as poke_user_per but for a 31 bit program.
667  */
668 static inline void __poke_user_per_compat(struct task_struct *child,
669 					  addr_t addr, __u32 data)
670 {
671 	struct compat_per_struct_kernel *dummy32 = NULL;
672 
673 	if (addr == (addr_t) &dummy32->cr9)
674 		/* PER event mask of the user specified per set. */
675 		child->thread.per_user.control =
676 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
677 	else if (addr == (addr_t) &dummy32->starting_addr)
678 		/* Starting address of the user specified per set. */
679 		child->thread.per_user.start = data;
680 	else if (addr == (addr_t) &dummy32->ending_addr)
681 		/* Ending address of the user specified per set. */
682 		child->thread.per_user.end = data;
683 }
684 
685 /*
686  * Same as poke_user but for a 31 bit program.
687  */
688 static int __poke_user_compat(struct task_struct *child,
689 			      addr_t addr, addr_t data)
690 {
691 	struct compat_user *dummy32 = NULL;
692 	__u32 tmp = (__u32) data;
693 	addr_t offset;
694 
695 	if (addr < (addr_t) &dummy32->regs.acrs) {
696 		struct pt_regs *regs = task_pt_regs(child);
697 		/*
698 		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
699 		 */
700 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
701 			__u32 mask = PSW32_MASK_USER;
702 
703 			mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
704 			/* Build a 64 bit psw mask from 31 bit mask. */
705 			if ((tmp ^ PSW32_USER_BITS) & ~mask)
706 				/* Invalid psw mask. */
707 				return -EINVAL;
708 			if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
709 				/* Invalid address-space-control bits */
710 				return -EINVAL;
711 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
712 				(regs->psw.mask & PSW_MASK_BA) |
713 				(__u64)(tmp & mask) << 32;
714 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
715 			/* Build a 64 bit psw address from 31 bit address. */
716 			regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
717 			/* Transfer 31 bit amode bit to psw mask. */
718 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
719 				(__u64)(tmp & PSW32_ADDR_AMODE);
720 		} else {
721 			/* gpr 0-15 */
722 			*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
723 		}
724 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
725 		/*
726 		 * access registers are stored in the thread structure
727 		 */
728 		offset = addr - (addr_t) &dummy32->regs.acrs;
729 		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
730 
731 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
732 		/*
733 		 * orig_gpr2 is stored on the kernel stack
734 		 */
735 		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
736 
737 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
738 		/*
739 		 * prevent writess of padding hole between
740 		 * orig_gpr2 and fp_regs on s390.
741 		 */
742 		return 0;
743 
744 	} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
745 		/*
746 		 * floating point control reg. is in the thread structure
747 		 */
748 		if (test_fp_ctl(tmp))
749 			return -EINVAL;
750 		child->thread.fpu.fpc = data;
751 
752 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
753 		/*
754 		 * floating point regs. are either in child->thread.fpu
755 		 * or the child->thread.fpu.vxrs array
756 		 */
757 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
758 		if (MACHINE_HAS_VX)
759 			*(__u32 *)((addr_t)
760 				child->thread.fpu.vxrs + 2*offset) = tmp;
761 		else
762 			*(__u32 *)((addr_t)
763 				child->thread.fpu.fprs + offset) = tmp;
764 
765 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
766 		/*
767 		 * Handle access to the per_info structure.
768 		 */
769 		addr -= (addr_t) &dummy32->regs.per_info;
770 		__poke_user_per_compat(child, addr, data);
771 	}
772 
773 	return 0;
774 }
775 
776 static int poke_user_compat(struct task_struct *child,
777 			    addr_t addr, addr_t data)
778 {
779 	if (!is_compat_task() || (addr & 3) ||
780 	    addr > sizeof(struct compat_user) - 3)
781 		return -EIO;
782 
783 	return __poke_user_compat(child, addr, data);
784 }
785 
786 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
787 			compat_ulong_t caddr, compat_ulong_t cdata)
788 {
789 	unsigned long addr = caddr;
790 	unsigned long data = cdata;
791 	compat_ptrace_area parea;
792 	int copied, ret;
793 
794 	switch (request) {
795 	case PTRACE_PEEKUSR:
796 		/* read the word at location addr in the USER area. */
797 		return peek_user_compat(child, addr, data);
798 
799 	case PTRACE_POKEUSR:
800 		/* write the word at location addr in the USER area */
801 		return poke_user_compat(child, addr, data);
802 
803 	case PTRACE_PEEKUSR_AREA:
804 	case PTRACE_POKEUSR_AREA:
805 		if (copy_from_user(&parea, (void __force __user *) addr,
806 							sizeof(parea)))
807 			return -EFAULT;
808 		addr = parea.kernel_addr;
809 		data = parea.process_addr;
810 		copied = 0;
811 		while (copied < parea.len) {
812 			if (request == PTRACE_PEEKUSR_AREA)
813 				ret = peek_user_compat(child, addr, data);
814 			else {
815 				__u32 utmp;
816 				if (get_user(utmp,
817 					     (__u32 __force __user *) data))
818 					return -EFAULT;
819 				ret = poke_user_compat(child, addr, utmp);
820 			}
821 			if (ret)
822 				return ret;
823 			addr += sizeof(unsigned int);
824 			data += sizeof(unsigned int);
825 			copied += sizeof(unsigned int);
826 		}
827 		return 0;
828 	case PTRACE_GET_LAST_BREAK:
829 		put_user(child->thread.last_break,
830 			 (unsigned int __user *) data);
831 		return 0;
832 	}
833 	return compat_ptrace_request(child, request, addr, data);
834 }
835 #endif
836 
837 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
838 {
839 	unsigned long mask = -1UL;
840 
841 	/*
842 	 * The sysc_tracesys code in entry.S stored the system
843 	 * call number to gprs[2].
844 	 */
845 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
846 	    (tracehook_report_syscall_entry(regs) ||
847 	     regs->gprs[2] >= NR_syscalls)) {
848 		/*
849 		 * Tracing decided this syscall should not happen or the
850 		 * debugger stored an invalid system call number. Skip
851 		 * the system call and the system call restart handling.
852 		 */
853 		clear_pt_regs_flag(regs, PIF_SYSCALL);
854 		return -1;
855 	}
856 
857 	/* Do the secure computing check after ptrace. */
858 	if (secure_computing()) {
859 		/* seccomp failures shouldn't expose any additional code. */
860 		return -1;
861 	}
862 
863 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
864 		trace_sys_enter(regs, regs->gprs[2]);
865 
866 	if (is_compat_task())
867 		mask = 0xffffffff;
868 
869 	audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
870 			    regs->gprs[3] &mask, regs->gprs[4] &mask,
871 			    regs->gprs[5] &mask);
872 
873 	return regs->gprs[2];
874 }
875 
876 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
877 {
878 	audit_syscall_exit(regs);
879 
880 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
881 		trace_sys_exit(regs, regs->gprs[2]);
882 
883 	if (test_thread_flag(TIF_SYSCALL_TRACE))
884 		tracehook_report_syscall_exit(regs, 0);
885 }
886 
887 /*
888  * user_regset definitions.
889  */
890 
891 static int s390_regs_get(struct task_struct *target,
892 			 const struct user_regset *regset,
893 			 unsigned int pos, unsigned int count,
894 			 void *kbuf, void __user *ubuf)
895 {
896 	if (target == current)
897 		save_access_regs(target->thread.acrs);
898 
899 	if (kbuf) {
900 		unsigned long *k = kbuf;
901 		while (count > 0) {
902 			*k++ = __peek_user(target, pos);
903 			count -= sizeof(*k);
904 			pos += sizeof(*k);
905 		}
906 	} else {
907 		unsigned long __user *u = ubuf;
908 		while (count > 0) {
909 			if (__put_user(__peek_user(target, pos), u++))
910 				return -EFAULT;
911 			count -= sizeof(*u);
912 			pos += sizeof(*u);
913 		}
914 	}
915 	return 0;
916 }
917 
918 static int s390_regs_set(struct task_struct *target,
919 			 const struct user_regset *regset,
920 			 unsigned int pos, unsigned int count,
921 			 const void *kbuf, const void __user *ubuf)
922 {
923 	int rc = 0;
924 
925 	if (target == current)
926 		save_access_regs(target->thread.acrs);
927 
928 	if (kbuf) {
929 		const unsigned long *k = kbuf;
930 		while (count > 0 && !rc) {
931 			rc = __poke_user(target, pos, *k++);
932 			count -= sizeof(*k);
933 			pos += sizeof(*k);
934 		}
935 	} else {
936 		const unsigned long  __user *u = ubuf;
937 		while (count > 0 && !rc) {
938 			unsigned long word;
939 			rc = __get_user(word, u++);
940 			if (rc)
941 				break;
942 			rc = __poke_user(target, pos, word);
943 			count -= sizeof(*u);
944 			pos += sizeof(*u);
945 		}
946 	}
947 
948 	if (rc == 0 && target == current)
949 		restore_access_regs(target->thread.acrs);
950 
951 	return rc;
952 }
953 
954 static int s390_fpregs_get(struct task_struct *target,
955 			   const struct user_regset *regset, unsigned int pos,
956 			   unsigned int count, void *kbuf, void __user *ubuf)
957 {
958 	_s390_fp_regs fp_regs;
959 
960 	if (target == current)
961 		save_fpu_regs();
962 
963 	fp_regs.fpc = target->thread.fpu.fpc;
964 	fpregs_store(&fp_regs, &target->thread.fpu);
965 
966 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
967 				   &fp_regs, 0, -1);
968 }
969 
970 static int s390_fpregs_set(struct task_struct *target,
971 			   const struct user_regset *regset, unsigned int pos,
972 			   unsigned int count, const void *kbuf,
973 			   const void __user *ubuf)
974 {
975 	int rc = 0;
976 	freg_t fprs[__NUM_FPRS];
977 
978 	if (target == current)
979 		save_fpu_regs();
980 
981 	if (MACHINE_HAS_VX)
982 		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
983 	else
984 		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
985 
986 	/* If setting FPC, must validate it first. */
987 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
988 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
989 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
990 					0, offsetof(s390_fp_regs, fprs));
991 		if (rc)
992 			return rc;
993 		if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
994 			return -EINVAL;
995 		target->thread.fpu.fpc = ufpc[0];
996 	}
997 
998 	if (rc == 0 && count > 0)
999 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1000 					fprs, offsetof(s390_fp_regs, fprs), -1);
1001 	if (rc)
1002 		return rc;
1003 
1004 	if (MACHINE_HAS_VX)
1005 		convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1006 	else
1007 		memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1008 
1009 	return rc;
1010 }
1011 
1012 static int s390_last_break_get(struct task_struct *target,
1013 			       const struct user_regset *regset,
1014 			       unsigned int pos, unsigned int count,
1015 			       void *kbuf, void __user *ubuf)
1016 {
1017 	if (count > 0) {
1018 		if (kbuf) {
1019 			unsigned long *k = kbuf;
1020 			*k = target->thread.last_break;
1021 		} else {
1022 			unsigned long  __user *u = ubuf;
1023 			if (__put_user(target->thread.last_break, u))
1024 				return -EFAULT;
1025 		}
1026 	}
1027 	return 0;
1028 }
1029 
1030 static int s390_last_break_set(struct task_struct *target,
1031 			       const struct user_regset *regset,
1032 			       unsigned int pos, unsigned int count,
1033 			       const void *kbuf, const void __user *ubuf)
1034 {
1035 	return 0;
1036 }
1037 
1038 static int s390_tdb_get(struct task_struct *target,
1039 			const struct user_regset *regset,
1040 			unsigned int pos, unsigned int count,
1041 			void *kbuf, void __user *ubuf)
1042 {
1043 	struct pt_regs *regs = task_pt_regs(target);
1044 	unsigned char *data;
1045 
1046 	if (!(regs->int_code & 0x200))
1047 		return -ENODATA;
1048 	data = target->thread.trap_tdb;
1049 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1050 }
1051 
1052 static int s390_tdb_set(struct task_struct *target,
1053 			const struct user_regset *regset,
1054 			unsigned int pos, unsigned int count,
1055 			const void *kbuf, const void __user *ubuf)
1056 {
1057 	return 0;
1058 }
1059 
1060 static int s390_vxrs_low_get(struct task_struct *target,
1061 			     const struct user_regset *regset,
1062 			     unsigned int pos, unsigned int count,
1063 			     void *kbuf, void __user *ubuf)
1064 {
1065 	__u64 vxrs[__NUM_VXRS_LOW];
1066 	int i;
1067 
1068 	if (!MACHINE_HAS_VX)
1069 		return -ENODEV;
1070 	if (target == current)
1071 		save_fpu_regs();
1072 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1073 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1074 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1075 }
1076 
1077 static int s390_vxrs_low_set(struct task_struct *target,
1078 			     const struct user_regset *regset,
1079 			     unsigned int pos, unsigned int count,
1080 			     const void *kbuf, const void __user *ubuf)
1081 {
1082 	__u64 vxrs[__NUM_VXRS_LOW];
1083 	int i, rc;
1084 
1085 	if (!MACHINE_HAS_VX)
1086 		return -ENODEV;
1087 	if (target == current)
1088 		save_fpu_regs();
1089 
1090 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1091 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1092 
1093 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1094 	if (rc == 0)
1095 		for (i = 0; i < __NUM_VXRS_LOW; i++)
1096 			*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1097 
1098 	return rc;
1099 }
1100 
1101 static int s390_vxrs_high_get(struct task_struct *target,
1102 			      const struct user_regset *regset,
1103 			      unsigned int pos, unsigned int count,
1104 			      void *kbuf, void __user *ubuf)
1105 {
1106 	__vector128 vxrs[__NUM_VXRS_HIGH];
1107 
1108 	if (!MACHINE_HAS_VX)
1109 		return -ENODEV;
1110 	if (target == current)
1111 		save_fpu_regs();
1112 	memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1113 
1114 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1115 }
1116 
1117 static int s390_vxrs_high_set(struct task_struct *target,
1118 			      const struct user_regset *regset,
1119 			      unsigned int pos, unsigned int count,
1120 			      const void *kbuf, const void __user *ubuf)
1121 {
1122 	int rc;
1123 
1124 	if (!MACHINE_HAS_VX)
1125 		return -ENODEV;
1126 	if (target == current)
1127 		save_fpu_regs();
1128 
1129 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1130 				target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1131 	return rc;
1132 }
1133 
1134 static int s390_system_call_get(struct task_struct *target,
1135 				const struct user_regset *regset,
1136 				unsigned int pos, unsigned int count,
1137 				void *kbuf, void __user *ubuf)
1138 {
1139 	unsigned int *data = &target->thread.system_call;
1140 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1141 				   data, 0, sizeof(unsigned int));
1142 }
1143 
1144 static int s390_system_call_set(struct task_struct *target,
1145 				const struct user_regset *regset,
1146 				unsigned int pos, unsigned int count,
1147 				const void *kbuf, const void __user *ubuf)
1148 {
1149 	unsigned int *data = &target->thread.system_call;
1150 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1151 				  data, 0, sizeof(unsigned int));
1152 }
1153 
1154 static int s390_gs_cb_get(struct task_struct *target,
1155 			  const struct user_regset *regset,
1156 			  unsigned int pos, unsigned int count,
1157 			  void *kbuf, void __user *ubuf)
1158 {
1159 	struct gs_cb *data = target->thread.gs_cb;
1160 
1161 	if (!MACHINE_HAS_GS)
1162 		return -ENODEV;
1163 	if (!data)
1164 		return -ENODATA;
1165 	if (target == current)
1166 		save_gs_cb(data);
1167 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1168 				   data, 0, sizeof(struct gs_cb));
1169 }
1170 
1171 static int s390_gs_cb_set(struct task_struct *target,
1172 			  const struct user_regset *regset,
1173 			  unsigned int pos, unsigned int count,
1174 			  const void *kbuf, const void __user *ubuf)
1175 {
1176 	struct gs_cb gs_cb = { }, *data = NULL;
1177 	int rc;
1178 
1179 	if (!MACHINE_HAS_GS)
1180 		return -ENODEV;
1181 	if (!target->thread.gs_cb) {
1182 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1183 		if (!data)
1184 			return -ENOMEM;
1185 	}
1186 	if (!target->thread.gs_cb)
1187 		gs_cb.gsd = 25;
1188 	else if (target == current)
1189 		save_gs_cb(&gs_cb);
1190 	else
1191 		gs_cb = *target->thread.gs_cb;
1192 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1193 				&gs_cb, 0, sizeof(gs_cb));
1194 	if (rc) {
1195 		kfree(data);
1196 		return -EFAULT;
1197 	}
1198 	preempt_disable();
1199 	if (!target->thread.gs_cb)
1200 		target->thread.gs_cb = data;
1201 	*target->thread.gs_cb = gs_cb;
1202 	if (target == current) {
1203 		__ctl_set_bit(2, 4);
1204 		restore_gs_cb(target->thread.gs_cb);
1205 	}
1206 	preempt_enable();
1207 	return rc;
1208 }
1209 
1210 static int s390_gs_bc_get(struct task_struct *target,
1211 			  const struct user_regset *regset,
1212 			  unsigned int pos, unsigned int count,
1213 			  void *kbuf, void __user *ubuf)
1214 {
1215 	struct gs_cb *data = target->thread.gs_bc_cb;
1216 
1217 	if (!MACHINE_HAS_GS)
1218 		return -ENODEV;
1219 	if (!data)
1220 		return -ENODATA;
1221 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1222 				   data, 0, sizeof(struct gs_cb));
1223 }
1224 
1225 static int s390_gs_bc_set(struct task_struct *target,
1226 			  const struct user_regset *regset,
1227 			  unsigned int pos, unsigned int count,
1228 			  const void *kbuf, const void __user *ubuf)
1229 {
1230 	struct gs_cb *data = target->thread.gs_bc_cb;
1231 
1232 	if (!MACHINE_HAS_GS)
1233 		return -ENODEV;
1234 	if (!data) {
1235 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1236 		if (!data)
1237 			return -ENOMEM;
1238 		target->thread.gs_bc_cb = data;
1239 	}
1240 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1241 				  data, 0, sizeof(struct gs_cb));
1242 }
1243 
1244 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1245 {
1246 	return (cb->rca & 0x1f) == 0 &&
1247 		(cb->roa & 0xfff) == 0 &&
1248 		(cb->rla & 0xfff) == 0xfff &&
1249 		cb->s == 1 &&
1250 		cb->k == 1 &&
1251 		cb->h == 0 &&
1252 		cb->reserved1 == 0 &&
1253 		cb->ps == 1 &&
1254 		cb->qs == 0 &&
1255 		cb->pc == 1 &&
1256 		cb->qc == 0 &&
1257 		cb->reserved2 == 0 &&
1258 		cb->key == PAGE_DEFAULT_KEY &&
1259 		cb->reserved3 == 0 &&
1260 		cb->reserved4 == 0 &&
1261 		cb->reserved5 == 0 &&
1262 		cb->reserved6 == 0 &&
1263 		cb->reserved7 == 0 &&
1264 		cb->reserved8 == 0 &&
1265 		cb->rla >= cb->roa &&
1266 		cb->rca >= cb->roa &&
1267 		cb->rca <= cb->rla+1 &&
1268 		cb->m < 3;
1269 }
1270 
1271 static int s390_runtime_instr_get(struct task_struct *target,
1272 				const struct user_regset *regset,
1273 				unsigned int pos, unsigned int count,
1274 				void *kbuf, void __user *ubuf)
1275 {
1276 	struct runtime_instr_cb *data = target->thread.ri_cb;
1277 
1278 	if (!test_facility(64))
1279 		return -ENODEV;
1280 	if (!data)
1281 		return -ENODATA;
1282 
1283 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1284 				   data, 0, sizeof(struct runtime_instr_cb));
1285 }
1286 
1287 static int s390_runtime_instr_set(struct task_struct *target,
1288 				  const struct user_regset *regset,
1289 				  unsigned int pos, unsigned int count,
1290 				  const void *kbuf, const void __user *ubuf)
1291 {
1292 	struct runtime_instr_cb ri_cb = { }, *data = NULL;
1293 	int rc;
1294 
1295 	if (!test_facility(64))
1296 		return -ENODEV;
1297 
1298 	if (!target->thread.ri_cb) {
1299 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1300 		if (!data)
1301 			return -ENOMEM;
1302 	}
1303 
1304 	if (target->thread.ri_cb) {
1305 		if (target == current)
1306 			store_runtime_instr_cb(&ri_cb);
1307 		else
1308 			ri_cb = *target->thread.ri_cb;
1309 	}
1310 
1311 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1312 				&ri_cb, 0, sizeof(struct runtime_instr_cb));
1313 	if (rc) {
1314 		kfree(data);
1315 		return -EFAULT;
1316 	}
1317 
1318 	if (!is_ri_cb_valid(&ri_cb)) {
1319 		kfree(data);
1320 		return -EINVAL;
1321 	}
1322 
1323 	preempt_disable();
1324 	if (!target->thread.ri_cb)
1325 		target->thread.ri_cb = data;
1326 	*target->thread.ri_cb = ri_cb;
1327 	if (target == current)
1328 		load_runtime_instr_cb(target->thread.ri_cb);
1329 	preempt_enable();
1330 
1331 	return 0;
1332 }
1333 
1334 static const struct user_regset s390_regsets[] = {
1335 	{
1336 		.core_note_type = NT_PRSTATUS,
1337 		.n = sizeof(s390_regs) / sizeof(long),
1338 		.size = sizeof(long),
1339 		.align = sizeof(long),
1340 		.get = s390_regs_get,
1341 		.set = s390_regs_set,
1342 	},
1343 	{
1344 		.core_note_type = NT_PRFPREG,
1345 		.n = sizeof(s390_fp_regs) / sizeof(long),
1346 		.size = sizeof(long),
1347 		.align = sizeof(long),
1348 		.get = s390_fpregs_get,
1349 		.set = s390_fpregs_set,
1350 	},
1351 	{
1352 		.core_note_type = NT_S390_SYSTEM_CALL,
1353 		.n = 1,
1354 		.size = sizeof(unsigned int),
1355 		.align = sizeof(unsigned int),
1356 		.get = s390_system_call_get,
1357 		.set = s390_system_call_set,
1358 	},
1359 	{
1360 		.core_note_type = NT_S390_LAST_BREAK,
1361 		.n = 1,
1362 		.size = sizeof(long),
1363 		.align = sizeof(long),
1364 		.get = s390_last_break_get,
1365 		.set = s390_last_break_set,
1366 	},
1367 	{
1368 		.core_note_type = NT_S390_TDB,
1369 		.n = 1,
1370 		.size = 256,
1371 		.align = 1,
1372 		.get = s390_tdb_get,
1373 		.set = s390_tdb_set,
1374 	},
1375 	{
1376 		.core_note_type = NT_S390_VXRS_LOW,
1377 		.n = __NUM_VXRS_LOW,
1378 		.size = sizeof(__u64),
1379 		.align = sizeof(__u64),
1380 		.get = s390_vxrs_low_get,
1381 		.set = s390_vxrs_low_set,
1382 	},
1383 	{
1384 		.core_note_type = NT_S390_VXRS_HIGH,
1385 		.n = __NUM_VXRS_HIGH,
1386 		.size = sizeof(__vector128),
1387 		.align = sizeof(__vector128),
1388 		.get = s390_vxrs_high_get,
1389 		.set = s390_vxrs_high_set,
1390 	},
1391 	{
1392 		.core_note_type = NT_S390_GS_CB,
1393 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1394 		.size = sizeof(__u64),
1395 		.align = sizeof(__u64),
1396 		.get = s390_gs_cb_get,
1397 		.set = s390_gs_cb_set,
1398 	},
1399 	{
1400 		.core_note_type = NT_S390_GS_BC,
1401 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1402 		.size = sizeof(__u64),
1403 		.align = sizeof(__u64),
1404 		.get = s390_gs_bc_get,
1405 		.set = s390_gs_bc_set,
1406 	},
1407 	{
1408 		.core_note_type = NT_S390_RI_CB,
1409 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1410 		.size = sizeof(__u64),
1411 		.align = sizeof(__u64),
1412 		.get = s390_runtime_instr_get,
1413 		.set = s390_runtime_instr_set,
1414 	},
1415 };
1416 
1417 static const struct user_regset_view user_s390_view = {
1418 	.name = "s390x",
1419 	.e_machine = EM_S390,
1420 	.regsets = s390_regsets,
1421 	.n = ARRAY_SIZE(s390_regsets)
1422 };
1423 
1424 #ifdef CONFIG_COMPAT
1425 static int s390_compat_regs_get(struct task_struct *target,
1426 				const struct user_regset *regset,
1427 				unsigned int pos, unsigned int count,
1428 				void *kbuf, void __user *ubuf)
1429 {
1430 	if (target == current)
1431 		save_access_regs(target->thread.acrs);
1432 
1433 	if (kbuf) {
1434 		compat_ulong_t *k = kbuf;
1435 		while (count > 0) {
1436 			*k++ = __peek_user_compat(target, pos);
1437 			count -= sizeof(*k);
1438 			pos += sizeof(*k);
1439 		}
1440 	} else {
1441 		compat_ulong_t __user *u = ubuf;
1442 		while (count > 0) {
1443 			if (__put_user(__peek_user_compat(target, pos), u++))
1444 				return -EFAULT;
1445 			count -= sizeof(*u);
1446 			pos += sizeof(*u);
1447 		}
1448 	}
1449 	return 0;
1450 }
1451 
1452 static int s390_compat_regs_set(struct task_struct *target,
1453 				const struct user_regset *regset,
1454 				unsigned int pos, unsigned int count,
1455 				const void *kbuf, const void __user *ubuf)
1456 {
1457 	int rc = 0;
1458 
1459 	if (target == current)
1460 		save_access_regs(target->thread.acrs);
1461 
1462 	if (kbuf) {
1463 		const compat_ulong_t *k = kbuf;
1464 		while (count > 0 && !rc) {
1465 			rc = __poke_user_compat(target, pos, *k++);
1466 			count -= sizeof(*k);
1467 			pos += sizeof(*k);
1468 		}
1469 	} else {
1470 		const compat_ulong_t  __user *u = ubuf;
1471 		while (count > 0 && !rc) {
1472 			compat_ulong_t word;
1473 			rc = __get_user(word, u++);
1474 			if (rc)
1475 				break;
1476 			rc = __poke_user_compat(target, pos, word);
1477 			count -= sizeof(*u);
1478 			pos += sizeof(*u);
1479 		}
1480 	}
1481 
1482 	if (rc == 0 && target == current)
1483 		restore_access_regs(target->thread.acrs);
1484 
1485 	return rc;
1486 }
1487 
1488 static int s390_compat_regs_high_get(struct task_struct *target,
1489 				     const struct user_regset *regset,
1490 				     unsigned int pos, unsigned int count,
1491 				     void *kbuf, void __user *ubuf)
1492 {
1493 	compat_ulong_t *gprs_high;
1494 
1495 	gprs_high = (compat_ulong_t *)
1496 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1497 	if (kbuf) {
1498 		compat_ulong_t *k = kbuf;
1499 		while (count > 0) {
1500 			*k++ = *gprs_high;
1501 			gprs_high += 2;
1502 			count -= sizeof(*k);
1503 		}
1504 	} else {
1505 		compat_ulong_t __user *u = ubuf;
1506 		while (count > 0) {
1507 			if (__put_user(*gprs_high, u++))
1508 				return -EFAULT;
1509 			gprs_high += 2;
1510 			count -= sizeof(*u);
1511 		}
1512 	}
1513 	return 0;
1514 }
1515 
1516 static int s390_compat_regs_high_set(struct task_struct *target,
1517 				     const struct user_regset *regset,
1518 				     unsigned int pos, unsigned int count,
1519 				     const void *kbuf, const void __user *ubuf)
1520 {
1521 	compat_ulong_t *gprs_high;
1522 	int rc = 0;
1523 
1524 	gprs_high = (compat_ulong_t *)
1525 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1526 	if (kbuf) {
1527 		const compat_ulong_t *k = kbuf;
1528 		while (count > 0) {
1529 			*gprs_high = *k++;
1530 			*gprs_high += 2;
1531 			count -= sizeof(*k);
1532 		}
1533 	} else {
1534 		const compat_ulong_t  __user *u = ubuf;
1535 		while (count > 0 && !rc) {
1536 			unsigned long word;
1537 			rc = __get_user(word, u++);
1538 			if (rc)
1539 				break;
1540 			*gprs_high = word;
1541 			*gprs_high += 2;
1542 			count -= sizeof(*u);
1543 		}
1544 	}
1545 
1546 	return rc;
1547 }
1548 
1549 static int s390_compat_last_break_get(struct task_struct *target,
1550 				      const struct user_regset *regset,
1551 				      unsigned int pos, unsigned int count,
1552 				      void *kbuf, void __user *ubuf)
1553 {
1554 	compat_ulong_t last_break;
1555 
1556 	if (count > 0) {
1557 		last_break = target->thread.last_break;
1558 		if (kbuf) {
1559 			unsigned long *k = kbuf;
1560 			*k = last_break;
1561 		} else {
1562 			unsigned long  __user *u = ubuf;
1563 			if (__put_user(last_break, u))
1564 				return -EFAULT;
1565 		}
1566 	}
1567 	return 0;
1568 }
1569 
1570 static int s390_compat_last_break_set(struct task_struct *target,
1571 				      const struct user_regset *regset,
1572 				      unsigned int pos, unsigned int count,
1573 				      const void *kbuf, const void __user *ubuf)
1574 {
1575 	return 0;
1576 }
1577 
1578 static const struct user_regset s390_compat_regsets[] = {
1579 	{
1580 		.core_note_type = NT_PRSTATUS,
1581 		.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1582 		.size = sizeof(compat_long_t),
1583 		.align = sizeof(compat_long_t),
1584 		.get = s390_compat_regs_get,
1585 		.set = s390_compat_regs_set,
1586 	},
1587 	{
1588 		.core_note_type = NT_PRFPREG,
1589 		.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1590 		.size = sizeof(compat_long_t),
1591 		.align = sizeof(compat_long_t),
1592 		.get = s390_fpregs_get,
1593 		.set = s390_fpregs_set,
1594 	},
1595 	{
1596 		.core_note_type = NT_S390_SYSTEM_CALL,
1597 		.n = 1,
1598 		.size = sizeof(compat_uint_t),
1599 		.align = sizeof(compat_uint_t),
1600 		.get = s390_system_call_get,
1601 		.set = s390_system_call_set,
1602 	},
1603 	{
1604 		.core_note_type = NT_S390_LAST_BREAK,
1605 		.n = 1,
1606 		.size = sizeof(long),
1607 		.align = sizeof(long),
1608 		.get = s390_compat_last_break_get,
1609 		.set = s390_compat_last_break_set,
1610 	},
1611 	{
1612 		.core_note_type = NT_S390_TDB,
1613 		.n = 1,
1614 		.size = 256,
1615 		.align = 1,
1616 		.get = s390_tdb_get,
1617 		.set = s390_tdb_set,
1618 	},
1619 	{
1620 		.core_note_type = NT_S390_VXRS_LOW,
1621 		.n = __NUM_VXRS_LOW,
1622 		.size = sizeof(__u64),
1623 		.align = sizeof(__u64),
1624 		.get = s390_vxrs_low_get,
1625 		.set = s390_vxrs_low_set,
1626 	},
1627 	{
1628 		.core_note_type = NT_S390_VXRS_HIGH,
1629 		.n = __NUM_VXRS_HIGH,
1630 		.size = sizeof(__vector128),
1631 		.align = sizeof(__vector128),
1632 		.get = s390_vxrs_high_get,
1633 		.set = s390_vxrs_high_set,
1634 	},
1635 	{
1636 		.core_note_type = NT_S390_HIGH_GPRS,
1637 		.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1638 		.size = sizeof(compat_long_t),
1639 		.align = sizeof(compat_long_t),
1640 		.get = s390_compat_regs_high_get,
1641 		.set = s390_compat_regs_high_set,
1642 	},
1643 	{
1644 		.core_note_type = NT_S390_GS_CB,
1645 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1646 		.size = sizeof(__u64),
1647 		.align = sizeof(__u64),
1648 		.get = s390_gs_cb_get,
1649 		.set = s390_gs_cb_set,
1650 	},
1651 	{
1652 		.core_note_type = NT_S390_GS_BC,
1653 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1654 		.size = sizeof(__u64),
1655 		.align = sizeof(__u64),
1656 		.get = s390_gs_bc_get,
1657 		.set = s390_gs_bc_set,
1658 	},
1659 	{
1660 		.core_note_type = NT_S390_RI_CB,
1661 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1662 		.size = sizeof(__u64),
1663 		.align = sizeof(__u64),
1664 		.get = s390_runtime_instr_get,
1665 		.set = s390_runtime_instr_set,
1666 	},
1667 };
1668 
1669 static const struct user_regset_view user_s390_compat_view = {
1670 	.name = "s390",
1671 	.e_machine = EM_S390,
1672 	.regsets = s390_compat_regsets,
1673 	.n = ARRAY_SIZE(s390_compat_regsets)
1674 };
1675 #endif
1676 
1677 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1678 {
1679 #ifdef CONFIG_COMPAT
1680 	if (test_tsk_thread_flag(task, TIF_31BIT))
1681 		return &user_s390_compat_view;
1682 #endif
1683 	return &user_s390_view;
1684 }
1685 
1686 static const char *gpr_names[NUM_GPRS] = {
1687 	"r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1688 	"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1689 };
1690 
1691 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1692 {
1693 	if (offset >= NUM_GPRS)
1694 		return 0;
1695 	return regs->gprs[offset];
1696 }
1697 
1698 int regs_query_register_offset(const char *name)
1699 {
1700 	unsigned long offset;
1701 
1702 	if (!name || *name != 'r')
1703 		return -EINVAL;
1704 	if (kstrtoul(name + 1, 10, &offset))
1705 		return -EINVAL;
1706 	if (offset >= NUM_GPRS)
1707 		return -EINVAL;
1708 	return offset;
1709 }
1710 
1711 const char *regs_query_register_name(unsigned int offset)
1712 {
1713 	if (offset >= NUM_GPRS)
1714 		return NULL;
1715 	return gpr_names[offset];
1716 }
1717 
1718 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1719 {
1720 	unsigned long ksp = kernel_stack_pointer(regs);
1721 
1722 	return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1723 }
1724 
1725 /**
1726  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1727  * @regs:pt_regs which contains kernel stack pointer.
1728  * @n:stack entry number.
1729  *
1730  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1731  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1732  * this returns 0.
1733  */
1734 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1735 {
1736 	unsigned long addr;
1737 
1738 	addr = kernel_stack_pointer(regs) + n * sizeof(long);
1739 	if (!regs_within_kernel_stack(regs, addr))
1740 		return 0;
1741 	return *(unsigned long *)addr;
1742 }
1743