xref: /openbmc/linux/arch/s390/kernel/ptrace.c (revision 22a41e9a5044bf3519f05b4a00e99af34bfeb40c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Ptrace user space interface.
4  *
5  *    Copyright IBM Corp. 1999, 2010
6  *    Author(s): Denis Joseph Barrow
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  */
9 
10 #include "asm/ptrace.h"
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/signal.h>
22 #include <linux/elf.h>
23 #include <linux/regset.h>
24 #include <linux/tracehook.h>
25 #include <linux/seccomp.h>
26 #include <linux/compat.h>
27 #include <trace/syscall.h>
28 #include <asm/page.h>
29 #include <linux/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/switch_to.h>
32 #include <asm/runtime_instr.h>
33 #include <asm/facility.h>
34 
35 #include "entry.h"
36 
37 #ifdef CONFIG_COMPAT
38 #include "compat_ptrace.h"
39 #endif
40 
41 void update_cr_regs(struct task_struct *task)
42 {
43 	struct pt_regs *regs = task_pt_regs(task);
44 	struct thread_struct *thread = &task->thread;
45 	struct per_regs old, new;
46 	union ctlreg0 cr0_old, cr0_new;
47 	union ctlreg2 cr2_old, cr2_new;
48 	int cr0_changed, cr2_changed;
49 
50 	__ctl_store(cr0_old.val, 0, 0);
51 	__ctl_store(cr2_old.val, 2, 2);
52 	cr0_new = cr0_old;
53 	cr2_new = cr2_old;
54 	/* Take care of the enable/disable of transactional execution. */
55 	if (MACHINE_HAS_TE) {
56 		/* Set or clear transaction execution TXC bit 8. */
57 		cr0_new.tcx = 1;
58 		if (task->thread.per_flags & PER_FLAG_NO_TE)
59 			cr0_new.tcx = 0;
60 		/* Set or clear transaction execution TDC bits 62 and 63. */
61 		cr2_new.tdc = 0;
62 		if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
63 			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
64 				cr2_new.tdc = 1;
65 			else
66 				cr2_new.tdc = 2;
67 		}
68 	}
69 	/* Take care of enable/disable of guarded storage. */
70 	if (MACHINE_HAS_GS) {
71 		cr2_new.gse = 0;
72 		if (task->thread.gs_cb)
73 			cr2_new.gse = 1;
74 	}
75 	/* Load control register 0/2 iff changed */
76 	cr0_changed = cr0_new.val != cr0_old.val;
77 	cr2_changed = cr2_new.val != cr2_old.val;
78 	if (cr0_changed)
79 		__ctl_load(cr0_new.val, 0, 0);
80 	if (cr2_changed)
81 		__ctl_load(cr2_new.val, 2, 2);
82 	/* Copy user specified PER registers */
83 	new.control = thread->per_user.control;
84 	new.start = thread->per_user.start;
85 	new.end = thread->per_user.end;
86 
87 	/* merge TIF_SINGLE_STEP into user specified PER registers. */
88 	if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
89 	    test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
90 		if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
91 			new.control |= PER_EVENT_BRANCH;
92 		else
93 			new.control |= PER_EVENT_IFETCH;
94 		new.control |= PER_CONTROL_SUSPENSION;
95 		new.control |= PER_EVENT_TRANSACTION_END;
96 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
97 			new.control |= PER_EVENT_IFETCH;
98 		new.start = 0;
99 		new.end = -1UL;
100 	}
101 
102 	/* Take care of the PER enablement bit in the PSW. */
103 	if (!(new.control & PER_EVENT_MASK)) {
104 		regs->psw.mask &= ~PSW_MASK_PER;
105 		return;
106 	}
107 	regs->psw.mask |= PSW_MASK_PER;
108 	__ctl_store(old, 9, 11);
109 	if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
110 		__ctl_load(new, 9, 11);
111 }
112 
113 void user_enable_single_step(struct task_struct *task)
114 {
115 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
116 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
117 }
118 
119 void user_disable_single_step(struct task_struct *task)
120 {
121 	clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
122 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
123 }
124 
125 void user_enable_block_step(struct task_struct *task)
126 {
127 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
128 	set_tsk_thread_flag(task, TIF_BLOCK_STEP);
129 }
130 
131 /*
132  * Called by kernel/ptrace.c when detaching..
133  *
134  * Clear all debugging related fields.
135  */
136 void ptrace_disable(struct task_struct *task)
137 {
138 	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
139 	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
140 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
141 	clear_tsk_thread_flag(task, TIF_PER_TRAP);
142 	task->thread.per_flags = 0;
143 }
144 
145 #define __ADDR_MASK 7
146 
147 static inline unsigned long __peek_user_per(struct task_struct *child,
148 					    addr_t addr)
149 {
150 	if (addr == offsetof(struct per_struct_kernel, cr9))
151 		/* Control bits of the active per set. */
152 		return test_thread_flag(TIF_SINGLE_STEP) ?
153 			PER_EVENT_IFETCH : child->thread.per_user.control;
154 	else if (addr == offsetof(struct per_struct_kernel, cr10))
155 		/* Start address of the active per set. */
156 		return test_thread_flag(TIF_SINGLE_STEP) ?
157 			0 : child->thread.per_user.start;
158 	else if (addr == offsetof(struct per_struct_kernel, cr11))
159 		/* End address of the active per set. */
160 		return test_thread_flag(TIF_SINGLE_STEP) ?
161 			-1UL : child->thread.per_user.end;
162 	else if (addr == offsetof(struct per_struct_kernel, bits))
163 		/* Single-step bit. */
164 		return test_thread_flag(TIF_SINGLE_STEP) ?
165 			(1UL << (BITS_PER_LONG - 1)) : 0;
166 	else if (addr == offsetof(struct per_struct_kernel, starting_addr))
167 		/* Start address of the user specified per set. */
168 		return child->thread.per_user.start;
169 	else if (addr == offsetof(struct per_struct_kernel, ending_addr))
170 		/* End address of the user specified per set. */
171 		return child->thread.per_user.end;
172 	else if (addr == offsetof(struct per_struct_kernel, perc_atmid))
173 		/* PER code, ATMID and AI of the last PER trap */
174 		return (unsigned long)
175 			child->thread.per_event.cause << (BITS_PER_LONG - 16);
176 	else if (addr == offsetof(struct per_struct_kernel, address))
177 		/* Address of the last PER trap */
178 		return child->thread.per_event.address;
179 	else if (addr == offsetof(struct per_struct_kernel, access_id))
180 		/* Access id of the last PER trap */
181 		return (unsigned long)
182 			child->thread.per_event.paid << (BITS_PER_LONG - 8);
183 	return 0;
184 }
185 
186 /*
187  * Read the word at offset addr from the user area of a process. The
188  * trouble here is that the information is littered over different
189  * locations. The process registers are found on the kernel stack,
190  * the floating point stuff and the trace settings are stored in
191  * the task structure. In addition the different structures in
192  * struct user contain pad bytes that should be read as zeroes.
193  * Lovely...
194  */
195 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
196 {
197 	addr_t offset, tmp;
198 
199 	if (addr < offsetof(struct user, regs.acrs)) {
200 		/*
201 		 * psw and gprs are stored on the stack
202 		 */
203 		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
204 		if (addr == offsetof(struct user, regs.psw.mask)) {
205 			/* Return a clean psw mask. */
206 			tmp &= PSW_MASK_USER | PSW_MASK_RI;
207 			tmp |= PSW_USER_BITS;
208 		}
209 
210 	} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
211 		/*
212 		 * access registers are stored in the thread structure
213 		 */
214 		offset = addr - offsetof(struct user, regs.acrs);
215 		/*
216 		 * Very special case: old & broken 64 bit gdb reading
217 		 * from acrs[15]. Result is a 64 bit value. Read the
218 		 * 32 bit acrs[15] value and shift it by 32. Sick...
219 		 */
220 		if (addr == offsetof(struct user, regs.acrs[15]))
221 			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
222 		else
223 			tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
224 
225 	} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
226 		/*
227 		 * orig_gpr2 is stored on the kernel stack
228 		 */
229 		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
230 
231 	} else if (addr < offsetof(struct user, regs.fp_regs)) {
232 		/*
233 		 * prevent reads of padding hole between
234 		 * orig_gpr2 and fp_regs on s390.
235 		 */
236 		tmp = 0;
237 
238 	} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
239 		/*
240 		 * floating point control reg. is in the thread structure
241 		 */
242 		tmp = child->thread.fpu.fpc;
243 		tmp <<= BITS_PER_LONG - 32;
244 
245 	} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
246 		/*
247 		 * floating point regs. are either in child->thread.fpu
248 		 * or the child->thread.fpu.vxrs array
249 		 */
250 		offset = addr - offsetof(struct user, regs.fp_regs.fprs);
251 		if (MACHINE_HAS_VX)
252 			tmp = *(addr_t *)
253 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
254 		else
255 			tmp = *(addr_t *)
256 			       ((addr_t) child->thread.fpu.fprs + offset);
257 
258 	} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
259 		/*
260 		 * Handle access to the per_info structure.
261 		 */
262 		addr -= offsetof(struct user, regs.per_info);
263 		tmp = __peek_user_per(child, addr);
264 
265 	} else
266 		tmp = 0;
267 
268 	return tmp;
269 }
270 
271 static int
272 peek_user(struct task_struct *child, addr_t addr, addr_t data)
273 {
274 	addr_t tmp, mask;
275 
276 	/*
277 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
278 	 * an alignment of 4. Programmers from hell...
279 	 */
280 	mask = __ADDR_MASK;
281 	if (addr >= offsetof(struct user, regs.acrs) &&
282 	    addr < offsetof(struct user, regs.orig_gpr2))
283 		mask = 3;
284 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
285 		return -EIO;
286 
287 	tmp = __peek_user(child, addr);
288 	return put_user(tmp, (addr_t __user *) data);
289 }
290 
291 static inline void __poke_user_per(struct task_struct *child,
292 				   addr_t addr, addr_t data)
293 {
294 	/*
295 	 * There are only three fields in the per_info struct that the
296 	 * debugger user can write to.
297 	 * 1) cr9: the debugger wants to set a new PER event mask
298 	 * 2) starting_addr: the debugger wants to set a new starting
299 	 *    address to use with the PER event mask.
300 	 * 3) ending_addr: the debugger wants to set a new ending
301 	 *    address to use with the PER event mask.
302 	 * The user specified PER event mask and the start and end
303 	 * addresses are used only if single stepping is not in effect.
304 	 * Writes to any other field in per_info are ignored.
305 	 */
306 	if (addr == offsetof(struct per_struct_kernel, cr9))
307 		/* PER event mask of the user specified per set. */
308 		child->thread.per_user.control =
309 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
310 	else if (addr == offsetof(struct per_struct_kernel, starting_addr))
311 		/* Starting address of the user specified per set. */
312 		child->thread.per_user.start = data;
313 	else if (addr == offsetof(struct per_struct_kernel, ending_addr))
314 		/* Ending address of the user specified per set. */
315 		child->thread.per_user.end = data;
316 }
317 
318 /*
319  * Write a word to the user area of a process at location addr. This
320  * operation does have an additional problem compared to peek_user.
321  * Stores to the program status word and on the floating point
322  * control register needs to get checked for validity.
323  */
324 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
325 {
326 	addr_t offset;
327 
328 
329 	if (addr < offsetof(struct user, regs.acrs)) {
330 		struct pt_regs *regs = task_pt_regs(child);
331 		/*
332 		 * psw and gprs are stored on the stack
333 		 */
334 		if (addr == offsetof(struct user, regs.psw.mask)) {
335 			unsigned long mask = PSW_MASK_USER;
336 
337 			mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
338 			if ((data ^ PSW_USER_BITS) & ~mask)
339 				/* Invalid psw mask. */
340 				return -EINVAL;
341 			if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
342 				/* Invalid address-space-control bits */
343 				return -EINVAL;
344 			if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
345 				/* Invalid addressing mode bits */
346 				return -EINVAL;
347 		}
348 
349 		if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
350 			addr == offsetof(struct user, regs.gprs[2])) {
351 			struct pt_regs *regs = task_pt_regs(child);
352 
353 			regs->int_code = 0x20000 | (data & 0xffff);
354 		}
355 		*(addr_t *)((addr_t) &regs->psw + addr) = data;
356 	} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
357 		/*
358 		 * access registers are stored in the thread structure
359 		 */
360 		offset = addr - offsetof(struct user, regs.acrs);
361 		/*
362 		 * Very special case: old & broken 64 bit gdb writing
363 		 * to acrs[15] with a 64 bit value. Ignore the lower
364 		 * half of the value and write the upper 32 bit to
365 		 * acrs[15]. Sick...
366 		 */
367 		if (addr == offsetof(struct user, regs.acrs[15]))
368 			child->thread.acrs[15] = (unsigned int) (data >> 32);
369 		else
370 			*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
371 
372 	} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
373 		/*
374 		 * orig_gpr2 is stored on the kernel stack
375 		 */
376 		task_pt_regs(child)->orig_gpr2 = data;
377 
378 	} else if (addr < offsetof(struct user, regs.fp_regs)) {
379 		/*
380 		 * prevent writes of padding hole between
381 		 * orig_gpr2 and fp_regs on s390.
382 		 */
383 		return 0;
384 
385 	} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
386 		/*
387 		 * floating point control reg. is in the thread structure
388 		 */
389 		if ((unsigned int) data != 0 ||
390 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
391 			return -EINVAL;
392 		child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
393 
394 	} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
395 		/*
396 		 * floating point regs. are either in child->thread.fpu
397 		 * or the child->thread.fpu.vxrs array
398 		 */
399 		offset = addr - offsetof(struct user, regs.fp_regs.fprs);
400 		if (MACHINE_HAS_VX)
401 			*(addr_t *)((addr_t)
402 				child->thread.fpu.vxrs + 2*offset) = data;
403 		else
404 			*(addr_t *)((addr_t)
405 				child->thread.fpu.fprs + offset) = data;
406 
407 	} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
408 		/*
409 		 * Handle access to the per_info structure.
410 		 */
411 		addr -= offsetof(struct user, regs.per_info);
412 		__poke_user_per(child, addr, data);
413 
414 	}
415 
416 	return 0;
417 }
418 
419 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
420 {
421 	addr_t mask;
422 
423 	/*
424 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
425 	 * an alignment of 4. Programmers from hell indeed...
426 	 */
427 	mask = __ADDR_MASK;
428 	if (addr >= offsetof(struct user, regs.acrs) &&
429 	    addr < offsetof(struct user, regs.orig_gpr2))
430 		mask = 3;
431 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
432 		return -EIO;
433 
434 	return __poke_user(child, addr, data);
435 }
436 
437 long arch_ptrace(struct task_struct *child, long request,
438 		 unsigned long addr, unsigned long data)
439 {
440 	ptrace_area parea;
441 	int copied, ret;
442 
443 	switch (request) {
444 	case PTRACE_PEEKUSR:
445 		/* read the word at location addr in the USER area. */
446 		return peek_user(child, addr, data);
447 
448 	case PTRACE_POKEUSR:
449 		/* write the word at location addr in the USER area */
450 		return poke_user(child, addr, data);
451 
452 	case PTRACE_PEEKUSR_AREA:
453 	case PTRACE_POKEUSR_AREA:
454 		if (copy_from_user(&parea, (void __force __user *) addr,
455 							sizeof(parea)))
456 			return -EFAULT;
457 		addr = parea.kernel_addr;
458 		data = parea.process_addr;
459 		copied = 0;
460 		while (copied < parea.len) {
461 			if (request == PTRACE_PEEKUSR_AREA)
462 				ret = peek_user(child, addr, data);
463 			else {
464 				addr_t utmp;
465 				if (get_user(utmp,
466 					     (addr_t __force __user *) data))
467 					return -EFAULT;
468 				ret = poke_user(child, addr, utmp);
469 			}
470 			if (ret)
471 				return ret;
472 			addr += sizeof(unsigned long);
473 			data += sizeof(unsigned long);
474 			copied += sizeof(unsigned long);
475 		}
476 		return 0;
477 	case PTRACE_GET_LAST_BREAK:
478 		put_user(child->thread.last_break,
479 			 (unsigned long __user *) data);
480 		return 0;
481 	case PTRACE_ENABLE_TE:
482 		if (!MACHINE_HAS_TE)
483 			return -EIO;
484 		child->thread.per_flags &= ~PER_FLAG_NO_TE;
485 		return 0;
486 	case PTRACE_DISABLE_TE:
487 		if (!MACHINE_HAS_TE)
488 			return -EIO;
489 		child->thread.per_flags |= PER_FLAG_NO_TE;
490 		child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
491 		return 0;
492 	case PTRACE_TE_ABORT_RAND:
493 		if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
494 			return -EIO;
495 		switch (data) {
496 		case 0UL:
497 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
498 			break;
499 		case 1UL:
500 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
501 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
502 			break;
503 		case 2UL:
504 			child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
505 			child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
506 			break;
507 		default:
508 			return -EINVAL;
509 		}
510 		return 0;
511 	default:
512 		return ptrace_request(child, request, addr, data);
513 	}
514 }
515 
516 #ifdef CONFIG_COMPAT
517 /*
518  * Now the fun part starts... a 31 bit program running in the
519  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
520  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
521  * to handle, the difference to the 64 bit versions of the requests
522  * is that the access is done in multiples of 4 byte instead of
523  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
524  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
525  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
526  * is a 31 bit program too, the content of struct user can be
527  * emulated. A 31 bit program peeking into the struct user of
528  * a 64 bit program is a no-no.
529  */
530 
531 /*
532  * Same as peek_user_per but for a 31 bit program.
533  */
534 static inline __u32 __peek_user_per_compat(struct task_struct *child,
535 					   addr_t addr)
536 {
537 	if (addr == offsetof(struct compat_per_struct_kernel, cr9))
538 		/* Control bits of the active per set. */
539 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
540 			PER_EVENT_IFETCH : child->thread.per_user.control;
541 	else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
542 		/* Start address of the active per set. */
543 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
544 			0 : child->thread.per_user.start;
545 	else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
546 		/* End address of the active per set. */
547 		return test_thread_flag(TIF_SINGLE_STEP) ?
548 			PSW32_ADDR_INSN : child->thread.per_user.end;
549 	else if (addr == offsetof(struct compat_per_struct_kernel, bits))
550 		/* Single-step bit. */
551 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
552 			0x80000000 : 0;
553 	else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
554 		/* Start address of the user specified per set. */
555 		return (__u32) child->thread.per_user.start;
556 	else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
557 		/* End address of the user specified per set. */
558 		return (__u32) child->thread.per_user.end;
559 	else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
560 		/* PER code, ATMID and AI of the last PER trap */
561 		return (__u32) child->thread.per_event.cause << 16;
562 	else if (addr == offsetof(struct compat_per_struct_kernel, address))
563 		/* Address of the last PER trap */
564 		return (__u32) child->thread.per_event.address;
565 	else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
566 		/* Access id of the last PER trap */
567 		return (__u32) child->thread.per_event.paid << 24;
568 	return 0;
569 }
570 
571 /*
572  * Same as peek_user but for a 31 bit program.
573  */
574 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
575 {
576 	addr_t offset;
577 	__u32 tmp;
578 
579 	if (addr < offsetof(struct compat_user, regs.acrs)) {
580 		struct pt_regs *regs = task_pt_regs(child);
581 		/*
582 		 * psw and gprs are stored on the stack
583 		 */
584 		if (addr == offsetof(struct compat_user, regs.psw.mask)) {
585 			/* Fake a 31 bit psw mask. */
586 			tmp = (__u32)(regs->psw.mask >> 32);
587 			tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
588 			tmp |= PSW32_USER_BITS;
589 		} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
590 			/* Fake a 31 bit psw address. */
591 			tmp = (__u32) regs->psw.addr |
592 				(__u32)(regs->psw.mask & PSW_MASK_BA);
593 		} else {
594 			/* gpr 0-15 */
595 			tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
596 		}
597 	} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
598 		/*
599 		 * access registers are stored in the thread structure
600 		 */
601 		offset = addr - offsetof(struct compat_user, regs.acrs);
602 		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
603 
604 	} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
605 		/*
606 		 * orig_gpr2 is stored on the kernel stack
607 		 */
608 		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
609 
610 	} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
611 		/*
612 		 * prevent reads of padding hole between
613 		 * orig_gpr2 and fp_regs on s390.
614 		 */
615 		tmp = 0;
616 
617 	} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
618 		/*
619 		 * floating point control reg. is in the thread structure
620 		 */
621 		tmp = child->thread.fpu.fpc;
622 
623 	} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
624 		/*
625 		 * floating point regs. are either in child->thread.fpu
626 		 * or the child->thread.fpu.vxrs array
627 		 */
628 		offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
629 		if (MACHINE_HAS_VX)
630 			tmp = *(__u32 *)
631 			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
632 		else
633 			tmp = *(__u32 *)
634 			       ((addr_t) child->thread.fpu.fprs + offset);
635 
636 	} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
637 		/*
638 		 * Handle access to the per_info structure.
639 		 */
640 		addr -= offsetof(struct compat_user, regs.per_info);
641 		tmp = __peek_user_per_compat(child, addr);
642 
643 	} else
644 		tmp = 0;
645 
646 	return tmp;
647 }
648 
649 static int peek_user_compat(struct task_struct *child,
650 			    addr_t addr, addr_t data)
651 {
652 	__u32 tmp;
653 
654 	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
655 		return -EIO;
656 
657 	tmp = __peek_user_compat(child, addr);
658 	return put_user(tmp, (__u32 __user *) data);
659 }
660 
661 /*
662  * Same as poke_user_per but for a 31 bit program.
663  */
664 static inline void __poke_user_per_compat(struct task_struct *child,
665 					  addr_t addr, __u32 data)
666 {
667 	if (addr == offsetof(struct compat_per_struct_kernel, cr9))
668 		/* PER event mask of the user specified per set. */
669 		child->thread.per_user.control =
670 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
671 	else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
672 		/* Starting address of the user specified per set. */
673 		child->thread.per_user.start = data;
674 	else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
675 		/* Ending address of the user specified per set. */
676 		child->thread.per_user.end = data;
677 }
678 
679 /*
680  * Same as poke_user but for a 31 bit program.
681  */
682 static int __poke_user_compat(struct task_struct *child,
683 			      addr_t addr, addr_t data)
684 {
685 	__u32 tmp = (__u32) data;
686 	addr_t offset;
687 
688 	if (addr < offsetof(struct compat_user, regs.acrs)) {
689 		struct pt_regs *regs = task_pt_regs(child);
690 		/*
691 		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
692 		 */
693 		if (addr == offsetof(struct compat_user, regs.psw.mask)) {
694 			__u32 mask = PSW32_MASK_USER;
695 
696 			mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
697 			/* Build a 64 bit psw mask from 31 bit mask. */
698 			if ((tmp ^ PSW32_USER_BITS) & ~mask)
699 				/* Invalid psw mask. */
700 				return -EINVAL;
701 			if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
702 				/* Invalid address-space-control bits */
703 				return -EINVAL;
704 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
705 				(regs->psw.mask & PSW_MASK_BA) |
706 				(__u64)(tmp & mask) << 32;
707 		} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
708 			/* Build a 64 bit psw address from 31 bit address. */
709 			regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
710 			/* Transfer 31 bit amode bit to psw mask. */
711 			regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
712 				(__u64)(tmp & PSW32_ADDR_AMODE);
713 		} else {
714 			if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
715 				addr == offsetof(struct compat_user, regs.gprs[2])) {
716 				struct pt_regs *regs = task_pt_regs(child);
717 
718 				regs->int_code = 0x20000 | (data & 0xffff);
719 			}
720 			/* gpr 0-15 */
721 			*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
722 		}
723 	} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
724 		/*
725 		 * access registers are stored in the thread structure
726 		 */
727 		offset = addr - offsetof(struct compat_user, regs.acrs);
728 		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
729 
730 	} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
731 		/*
732 		 * orig_gpr2 is stored on the kernel stack
733 		 */
734 		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
735 
736 	} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
737 		/*
738 		 * prevent writess of padding hole between
739 		 * orig_gpr2 and fp_regs on s390.
740 		 */
741 		return 0;
742 
743 	} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
744 		/*
745 		 * floating point control reg. is in the thread structure
746 		 */
747 		if (test_fp_ctl(tmp))
748 			return -EINVAL;
749 		child->thread.fpu.fpc = data;
750 
751 	} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
752 		/*
753 		 * floating point regs. are either in child->thread.fpu
754 		 * or the child->thread.fpu.vxrs array
755 		 */
756 		offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
757 		if (MACHINE_HAS_VX)
758 			*(__u32 *)((addr_t)
759 				child->thread.fpu.vxrs + 2*offset) = tmp;
760 		else
761 			*(__u32 *)((addr_t)
762 				child->thread.fpu.fprs + offset) = tmp;
763 
764 	} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
765 		/*
766 		 * Handle access to the per_info structure.
767 		 */
768 		addr -= offsetof(struct compat_user, regs.per_info);
769 		__poke_user_per_compat(child, addr, data);
770 	}
771 
772 	return 0;
773 }
774 
775 static int poke_user_compat(struct task_struct *child,
776 			    addr_t addr, addr_t data)
777 {
778 	if (!is_compat_task() || (addr & 3) ||
779 	    addr > sizeof(struct compat_user) - 3)
780 		return -EIO;
781 
782 	return __poke_user_compat(child, addr, data);
783 }
784 
785 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
786 			compat_ulong_t caddr, compat_ulong_t cdata)
787 {
788 	unsigned long addr = caddr;
789 	unsigned long data = cdata;
790 	compat_ptrace_area parea;
791 	int copied, ret;
792 
793 	switch (request) {
794 	case PTRACE_PEEKUSR:
795 		/* read the word at location addr in the USER area. */
796 		return peek_user_compat(child, addr, data);
797 
798 	case PTRACE_POKEUSR:
799 		/* write the word at location addr in the USER area */
800 		return poke_user_compat(child, addr, data);
801 
802 	case PTRACE_PEEKUSR_AREA:
803 	case PTRACE_POKEUSR_AREA:
804 		if (copy_from_user(&parea, (void __force __user *) addr,
805 							sizeof(parea)))
806 			return -EFAULT;
807 		addr = parea.kernel_addr;
808 		data = parea.process_addr;
809 		copied = 0;
810 		while (copied < parea.len) {
811 			if (request == PTRACE_PEEKUSR_AREA)
812 				ret = peek_user_compat(child, addr, data);
813 			else {
814 				__u32 utmp;
815 				if (get_user(utmp,
816 					     (__u32 __force __user *) data))
817 					return -EFAULT;
818 				ret = poke_user_compat(child, addr, utmp);
819 			}
820 			if (ret)
821 				return ret;
822 			addr += sizeof(unsigned int);
823 			data += sizeof(unsigned int);
824 			copied += sizeof(unsigned int);
825 		}
826 		return 0;
827 	case PTRACE_GET_LAST_BREAK:
828 		put_user(child->thread.last_break,
829 			 (unsigned int __user *) data);
830 		return 0;
831 	}
832 	return compat_ptrace_request(child, request, addr, data);
833 }
834 #endif
835 
836 /*
837  * user_regset definitions.
838  */
839 
840 static int s390_regs_get(struct task_struct *target,
841 			 const struct user_regset *regset,
842 			 struct membuf to)
843 {
844 	unsigned pos;
845 	if (target == current)
846 		save_access_regs(target->thread.acrs);
847 
848 	for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
849 		membuf_store(&to, __peek_user(target, pos));
850 	return 0;
851 }
852 
853 static int s390_regs_set(struct task_struct *target,
854 			 const struct user_regset *regset,
855 			 unsigned int pos, unsigned int count,
856 			 const void *kbuf, const void __user *ubuf)
857 {
858 	int rc = 0;
859 
860 	if (target == current)
861 		save_access_regs(target->thread.acrs);
862 
863 	if (kbuf) {
864 		const unsigned long *k = kbuf;
865 		while (count > 0 && !rc) {
866 			rc = __poke_user(target, pos, *k++);
867 			count -= sizeof(*k);
868 			pos += sizeof(*k);
869 		}
870 	} else {
871 		const unsigned long  __user *u = ubuf;
872 		while (count > 0 && !rc) {
873 			unsigned long word;
874 			rc = __get_user(word, u++);
875 			if (rc)
876 				break;
877 			rc = __poke_user(target, pos, word);
878 			count -= sizeof(*u);
879 			pos += sizeof(*u);
880 		}
881 	}
882 
883 	if (rc == 0 && target == current)
884 		restore_access_regs(target->thread.acrs);
885 
886 	return rc;
887 }
888 
889 static int s390_fpregs_get(struct task_struct *target,
890 			   const struct user_regset *regset,
891 			   struct membuf to)
892 {
893 	_s390_fp_regs fp_regs;
894 
895 	if (target == current)
896 		save_fpu_regs();
897 
898 	fp_regs.fpc = target->thread.fpu.fpc;
899 	fpregs_store(&fp_regs, &target->thread.fpu);
900 
901 	return membuf_write(&to, &fp_regs, sizeof(fp_regs));
902 }
903 
904 static int s390_fpregs_set(struct task_struct *target,
905 			   const struct user_regset *regset, unsigned int pos,
906 			   unsigned int count, const void *kbuf,
907 			   const void __user *ubuf)
908 {
909 	int rc = 0;
910 	freg_t fprs[__NUM_FPRS];
911 
912 	if (target == current)
913 		save_fpu_regs();
914 
915 	if (MACHINE_HAS_VX)
916 		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
917 	else
918 		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
919 
920 	/* If setting FPC, must validate it first. */
921 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
922 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
923 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
924 					0, offsetof(s390_fp_regs, fprs));
925 		if (rc)
926 			return rc;
927 		if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
928 			return -EINVAL;
929 		target->thread.fpu.fpc = ufpc[0];
930 	}
931 
932 	if (rc == 0 && count > 0)
933 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
934 					fprs, offsetof(s390_fp_regs, fprs), -1);
935 	if (rc)
936 		return rc;
937 
938 	if (MACHINE_HAS_VX)
939 		convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
940 	else
941 		memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
942 
943 	return rc;
944 }
945 
946 static int s390_last_break_get(struct task_struct *target,
947 			       const struct user_regset *regset,
948 			       struct membuf to)
949 {
950 	return membuf_store(&to, target->thread.last_break);
951 }
952 
953 static int s390_last_break_set(struct task_struct *target,
954 			       const struct user_regset *regset,
955 			       unsigned int pos, unsigned int count,
956 			       const void *kbuf, const void __user *ubuf)
957 {
958 	return 0;
959 }
960 
961 static int s390_tdb_get(struct task_struct *target,
962 			const struct user_regset *regset,
963 			struct membuf to)
964 {
965 	struct pt_regs *regs = task_pt_regs(target);
966 	size_t size;
967 
968 	if (!(regs->int_code & 0x200))
969 		return -ENODATA;
970 	size = sizeof(target->thread.trap_tdb.data);
971 	return membuf_write(&to, target->thread.trap_tdb.data, size);
972 }
973 
974 static int s390_tdb_set(struct task_struct *target,
975 			const struct user_regset *regset,
976 			unsigned int pos, unsigned int count,
977 			const void *kbuf, const void __user *ubuf)
978 {
979 	return 0;
980 }
981 
982 static int s390_vxrs_low_get(struct task_struct *target,
983 			     const struct user_regset *regset,
984 			     struct membuf to)
985 {
986 	__u64 vxrs[__NUM_VXRS_LOW];
987 	int i;
988 
989 	if (!MACHINE_HAS_VX)
990 		return -ENODEV;
991 	if (target == current)
992 		save_fpu_regs();
993 	for (i = 0; i < __NUM_VXRS_LOW; i++)
994 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
995 	return membuf_write(&to, vxrs, sizeof(vxrs));
996 }
997 
998 static int s390_vxrs_low_set(struct task_struct *target,
999 			     const struct user_regset *regset,
1000 			     unsigned int pos, unsigned int count,
1001 			     const void *kbuf, const void __user *ubuf)
1002 {
1003 	__u64 vxrs[__NUM_VXRS_LOW];
1004 	int i, rc;
1005 
1006 	if (!MACHINE_HAS_VX)
1007 		return -ENODEV;
1008 	if (target == current)
1009 		save_fpu_regs();
1010 
1011 	for (i = 0; i < __NUM_VXRS_LOW; i++)
1012 		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1013 
1014 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1015 	if (rc == 0)
1016 		for (i = 0; i < __NUM_VXRS_LOW; i++)
1017 			*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1018 
1019 	return rc;
1020 }
1021 
1022 static int s390_vxrs_high_get(struct task_struct *target,
1023 			      const struct user_regset *regset,
1024 			      struct membuf to)
1025 {
1026 	if (!MACHINE_HAS_VX)
1027 		return -ENODEV;
1028 	if (target == current)
1029 		save_fpu_regs();
1030 	return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
1031 			    __NUM_VXRS_HIGH * sizeof(__vector128));
1032 }
1033 
1034 static int s390_vxrs_high_set(struct task_struct *target,
1035 			      const struct user_regset *regset,
1036 			      unsigned int pos, unsigned int count,
1037 			      const void *kbuf, const void __user *ubuf)
1038 {
1039 	int rc;
1040 
1041 	if (!MACHINE_HAS_VX)
1042 		return -ENODEV;
1043 	if (target == current)
1044 		save_fpu_regs();
1045 
1046 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1047 				target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1048 	return rc;
1049 }
1050 
1051 static int s390_system_call_get(struct task_struct *target,
1052 				const struct user_regset *regset,
1053 				struct membuf to)
1054 {
1055 	return membuf_store(&to, target->thread.system_call);
1056 }
1057 
1058 static int s390_system_call_set(struct task_struct *target,
1059 				const struct user_regset *regset,
1060 				unsigned int pos, unsigned int count,
1061 				const void *kbuf, const void __user *ubuf)
1062 {
1063 	unsigned int *data = &target->thread.system_call;
1064 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1065 				  data, 0, sizeof(unsigned int));
1066 }
1067 
1068 static int s390_gs_cb_get(struct task_struct *target,
1069 			  const struct user_regset *regset,
1070 			  struct membuf to)
1071 {
1072 	struct gs_cb *data = target->thread.gs_cb;
1073 
1074 	if (!MACHINE_HAS_GS)
1075 		return -ENODEV;
1076 	if (!data)
1077 		return -ENODATA;
1078 	if (target == current)
1079 		save_gs_cb(data);
1080 	return membuf_write(&to, data, sizeof(struct gs_cb));
1081 }
1082 
1083 static int s390_gs_cb_set(struct task_struct *target,
1084 			  const struct user_regset *regset,
1085 			  unsigned int pos, unsigned int count,
1086 			  const void *kbuf, const void __user *ubuf)
1087 {
1088 	struct gs_cb gs_cb = { }, *data = NULL;
1089 	int rc;
1090 
1091 	if (!MACHINE_HAS_GS)
1092 		return -ENODEV;
1093 	if (!target->thread.gs_cb) {
1094 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1095 		if (!data)
1096 			return -ENOMEM;
1097 	}
1098 	if (!target->thread.gs_cb)
1099 		gs_cb.gsd = 25;
1100 	else if (target == current)
1101 		save_gs_cb(&gs_cb);
1102 	else
1103 		gs_cb = *target->thread.gs_cb;
1104 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1105 				&gs_cb, 0, sizeof(gs_cb));
1106 	if (rc) {
1107 		kfree(data);
1108 		return -EFAULT;
1109 	}
1110 	preempt_disable();
1111 	if (!target->thread.gs_cb)
1112 		target->thread.gs_cb = data;
1113 	*target->thread.gs_cb = gs_cb;
1114 	if (target == current) {
1115 		__ctl_set_bit(2, 4);
1116 		restore_gs_cb(target->thread.gs_cb);
1117 	}
1118 	preempt_enable();
1119 	return rc;
1120 }
1121 
1122 static int s390_gs_bc_get(struct task_struct *target,
1123 			  const struct user_regset *regset,
1124 			  struct membuf to)
1125 {
1126 	struct gs_cb *data = target->thread.gs_bc_cb;
1127 
1128 	if (!MACHINE_HAS_GS)
1129 		return -ENODEV;
1130 	if (!data)
1131 		return -ENODATA;
1132 	return membuf_write(&to, data, sizeof(struct gs_cb));
1133 }
1134 
1135 static int s390_gs_bc_set(struct task_struct *target,
1136 			  const struct user_regset *regset,
1137 			  unsigned int pos, unsigned int count,
1138 			  const void *kbuf, const void __user *ubuf)
1139 {
1140 	struct gs_cb *data = target->thread.gs_bc_cb;
1141 
1142 	if (!MACHINE_HAS_GS)
1143 		return -ENODEV;
1144 	if (!data) {
1145 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1146 		if (!data)
1147 			return -ENOMEM;
1148 		target->thread.gs_bc_cb = data;
1149 	}
1150 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1151 				  data, 0, sizeof(struct gs_cb));
1152 }
1153 
1154 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1155 {
1156 	return (cb->rca & 0x1f) == 0 &&
1157 		(cb->roa & 0xfff) == 0 &&
1158 		(cb->rla & 0xfff) == 0xfff &&
1159 		cb->s == 1 &&
1160 		cb->k == 1 &&
1161 		cb->h == 0 &&
1162 		cb->reserved1 == 0 &&
1163 		cb->ps == 1 &&
1164 		cb->qs == 0 &&
1165 		cb->pc == 1 &&
1166 		cb->qc == 0 &&
1167 		cb->reserved2 == 0 &&
1168 		cb->reserved3 == 0 &&
1169 		cb->reserved4 == 0 &&
1170 		cb->reserved5 == 0 &&
1171 		cb->reserved6 == 0 &&
1172 		cb->reserved7 == 0 &&
1173 		cb->reserved8 == 0 &&
1174 		cb->rla >= cb->roa &&
1175 		cb->rca >= cb->roa &&
1176 		cb->rca <= cb->rla+1 &&
1177 		cb->m < 3;
1178 }
1179 
1180 static int s390_runtime_instr_get(struct task_struct *target,
1181 				const struct user_regset *regset,
1182 				struct membuf to)
1183 {
1184 	struct runtime_instr_cb *data = target->thread.ri_cb;
1185 
1186 	if (!test_facility(64))
1187 		return -ENODEV;
1188 	if (!data)
1189 		return -ENODATA;
1190 
1191 	return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
1192 }
1193 
1194 static int s390_runtime_instr_set(struct task_struct *target,
1195 				  const struct user_regset *regset,
1196 				  unsigned int pos, unsigned int count,
1197 				  const void *kbuf, const void __user *ubuf)
1198 {
1199 	struct runtime_instr_cb ri_cb = { }, *data = NULL;
1200 	int rc;
1201 
1202 	if (!test_facility(64))
1203 		return -ENODEV;
1204 
1205 	if (!target->thread.ri_cb) {
1206 		data = kzalloc(sizeof(*data), GFP_KERNEL);
1207 		if (!data)
1208 			return -ENOMEM;
1209 	}
1210 
1211 	if (target->thread.ri_cb) {
1212 		if (target == current)
1213 			store_runtime_instr_cb(&ri_cb);
1214 		else
1215 			ri_cb = *target->thread.ri_cb;
1216 	}
1217 
1218 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1219 				&ri_cb, 0, sizeof(struct runtime_instr_cb));
1220 	if (rc) {
1221 		kfree(data);
1222 		return -EFAULT;
1223 	}
1224 
1225 	if (!is_ri_cb_valid(&ri_cb)) {
1226 		kfree(data);
1227 		return -EINVAL;
1228 	}
1229 	/*
1230 	 * Override access key in any case, since user space should
1231 	 * not be able to set it, nor should it care about it.
1232 	 */
1233 	ri_cb.key = PAGE_DEFAULT_KEY >> 4;
1234 	preempt_disable();
1235 	if (!target->thread.ri_cb)
1236 		target->thread.ri_cb = data;
1237 	*target->thread.ri_cb = ri_cb;
1238 	if (target == current)
1239 		load_runtime_instr_cb(target->thread.ri_cb);
1240 	preempt_enable();
1241 
1242 	return 0;
1243 }
1244 
1245 static const struct user_regset s390_regsets[] = {
1246 	{
1247 		.core_note_type = NT_PRSTATUS,
1248 		.n = sizeof(s390_regs) / sizeof(long),
1249 		.size = sizeof(long),
1250 		.align = sizeof(long),
1251 		.regset_get = s390_regs_get,
1252 		.set = s390_regs_set,
1253 	},
1254 	{
1255 		.core_note_type = NT_PRFPREG,
1256 		.n = sizeof(s390_fp_regs) / sizeof(long),
1257 		.size = sizeof(long),
1258 		.align = sizeof(long),
1259 		.regset_get = s390_fpregs_get,
1260 		.set = s390_fpregs_set,
1261 	},
1262 	{
1263 		.core_note_type = NT_S390_SYSTEM_CALL,
1264 		.n = 1,
1265 		.size = sizeof(unsigned int),
1266 		.align = sizeof(unsigned int),
1267 		.regset_get = s390_system_call_get,
1268 		.set = s390_system_call_set,
1269 	},
1270 	{
1271 		.core_note_type = NT_S390_LAST_BREAK,
1272 		.n = 1,
1273 		.size = sizeof(long),
1274 		.align = sizeof(long),
1275 		.regset_get = s390_last_break_get,
1276 		.set = s390_last_break_set,
1277 	},
1278 	{
1279 		.core_note_type = NT_S390_TDB,
1280 		.n = 1,
1281 		.size = 256,
1282 		.align = 1,
1283 		.regset_get = s390_tdb_get,
1284 		.set = s390_tdb_set,
1285 	},
1286 	{
1287 		.core_note_type = NT_S390_VXRS_LOW,
1288 		.n = __NUM_VXRS_LOW,
1289 		.size = sizeof(__u64),
1290 		.align = sizeof(__u64),
1291 		.regset_get = s390_vxrs_low_get,
1292 		.set = s390_vxrs_low_set,
1293 	},
1294 	{
1295 		.core_note_type = NT_S390_VXRS_HIGH,
1296 		.n = __NUM_VXRS_HIGH,
1297 		.size = sizeof(__vector128),
1298 		.align = sizeof(__vector128),
1299 		.regset_get = s390_vxrs_high_get,
1300 		.set = s390_vxrs_high_set,
1301 	},
1302 	{
1303 		.core_note_type = NT_S390_GS_CB,
1304 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1305 		.size = sizeof(__u64),
1306 		.align = sizeof(__u64),
1307 		.regset_get = s390_gs_cb_get,
1308 		.set = s390_gs_cb_set,
1309 	},
1310 	{
1311 		.core_note_type = NT_S390_GS_BC,
1312 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1313 		.size = sizeof(__u64),
1314 		.align = sizeof(__u64),
1315 		.regset_get = s390_gs_bc_get,
1316 		.set = s390_gs_bc_set,
1317 	},
1318 	{
1319 		.core_note_type = NT_S390_RI_CB,
1320 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1321 		.size = sizeof(__u64),
1322 		.align = sizeof(__u64),
1323 		.regset_get = s390_runtime_instr_get,
1324 		.set = s390_runtime_instr_set,
1325 	},
1326 };
1327 
1328 static const struct user_regset_view user_s390_view = {
1329 	.name = "s390x",
1330 	.e_machine = EM_S390,
1331 	.regsets = s390_regsets,
1332 	.n = ARRAY_SIZE(s390_regsets)
1333 };
1334 
1335 #ifdef CONFIG_COMPAT
1336 static int s390_compat_regs_get(struct task_struct *target,
1337 				const struct user_regset *regset,
1338 				struct membuf to)
1339 {
1340 	unsigned n;
1341 
1342 	if (target == current)
1343 		save_access_regs(target->thread.acrs);
1344 
1345 	for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
1346 		membuf_store(&to, __peek_user_compat(target, n));
1347 	return 0;
1348 }
1349 
1350 static int s390_compat_regs_set(struct task_struct *target,
1351 				const struct user_regset *regset,
1352 				unsigned int pos, unsigned int count,
1353 				const void *kbuf, const void __user *ubuf)
1354 {
1355 	int rc = 0;
1356 
1357 	if (target == current)
1358 		save_access_regs(target->thread.acrs);
1359 
1360 	if (kbuf) {
1361 		const compat_ulong_t *k = kbuf;
1362 		while (count > 0 && !rc) {
1363 			rc = __poke_user_compat(target, pos, *k++);
1364 			count -= sizeof(*k);
1365 			pos += sizeof(*k);
1366 		}
1367 	} else {
1368 		const compat_ulong_t  __user *u = ubuf;
1369 		while (count > 0 && !rc) {
1370 			compat_ulong_t word;
1371 			rc = __get_user(word, u++);
1372 			if (rc)
1373 				break;
1374 			rc = __poke_user_compat(target, pos, word);
1375 			count -= sizeof(*u);
1376 			pos += sizeof(*u);
1377 		}
1378 	}
1379 
1380 	if (rc == 0 && target == current)
1381 		restore_access_regs(target->thread.acrs);
1382 
1383 	return rc;
1384 }
1385 
1386 static int s390_compat_regs_high_get(struct task_struct *target,
1387 				     const struct user_regset *regset,
1388 				     struct membuf to)
1389 {
1390 	compat_ulong_t *gprs_high;
1391 	int i;
1392 
1393 	gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
1394 	for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
1395 		membuf_store(&to, *gprs_high);
1396 	return 0;
1397 }
1398 
1399 static int s390_compat_regs_high_set(struct task_struct *target,
1400 				     const struct user_regset *regset,
1401 				     unsigned int pos, unsigned int count,
1402 				     const void *kbuf, const void __user *ubuf)
1403 {
1404 	compat_ulong_t *gprs_high;
1405 	int rc = 0;
1406 
1407 	gprs_high = (compat_ulong_t *)
1408 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1409 	if (kbuf) {
1410 		const compat_ulong_t *k = kbuf;
1411 		while (count > 0) {
1412 			*gprs_high = *k++;
1413 			*gprs_high += 2;
1414 			count -= sizeof(*k);
1415 		}
1416 	} else {
1417 		const compat_ulong_t  __user *u = ubuf;
1418 		while (count > 0 && !rc) {
1419 			unsigned long word;
1420 			rc = __get_user(word, u++);
1421 			if (rc)
1422 				break;
1423 			*gprs_high = word;
1424 			*gprs_high += 2;
1425 			count -= sizeof(*u);
1426 		}
1427 	}
1428 
1429 	return rc;
1430 }
1431 
1432 static int s390_compat_last_break_get(struct task_struct *target,
1433 				      const struct user_regset *regset,
1434 				      struct membuf to)
1435 {
1436 	compat_ulong_t last_break = target->thread.last_break;
1437 
1438 	return membuf_store(&to, (unsigned long)last_break);
1439 }
1440 
1441 static int s390_compat_last_break_set(struct task_struct *target,
1442 				      const struct user_regset *regset,
1443 				      unsigned int pos, unsigned int count,
1444 				      const void *kbuf, const void __user *ubuf)
1445 {
1446 	return 0;
1447 }
1448 
1449 static const struct user_regset s390_compat_regsets[] = {
1450 	{
1451 		.core_note_type = NT_PRSTATUS,
1452 		.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1453 		.size = sizeof(compat_long_t),
1454 		.align = sizeof(compat_long_t),
1455 		.regset_get = s390_compat_regs_get,
1456 		.set = s390_compat_regs_set,
1457 	},
1458 	{
1459 		.core_note_type = NT_PRFPREG,
1460 		.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1461 		.size = sizeof(compat_long_t),
1462 		.align = sizeof(compat_long_t),
1463 		.regset_get = s390_fpregs_get,
1464 		.set = s390_fpregs_set,
1465 	},
1466 	{
1467 		.core_note_type = NT_S390_SYSTEM_CALL,
1468 		.n = 1,
1469 		.size = sizeof(compat_uint_t),
1470 		.align = sizeof(compat_uint_t),
1471 		.regset_get = s390_system_call_get,
1472 		.set = s390_system_call_set,
1473 	},
1474 	{
1475 		.core_note_type = NT_S390_LAST_BREAK,
1476 		.n = 1,
1477 		.size = sizeof(long),
1478 		.align = sizeof(long),
1479 		.regset_get = s390_compat_last_break_get,
1480 		.set = s390_compat_last_break_set,
1481 	},
1482 	{
1483 		.core_note_type = NT_S390_TDB,
1484 		.n = 1,
1485 		.size = 256,
1486 		.align = 1,
1487 		.regset_get = s390_tdb_get,
1488 		.set = s390_tdb_set,
1489 	},
1490 	{
1491 		.core_note_type = NT_S390_VXRS_LOW,
1492 		.n = __NUM_VXRS_LOW,
1493 		.size = sizeof(__u64),
1494 		.align = sizeof(__u64),
1495 		.regset_get = s390_vxrs_low_get,
1496 		.set = s390_vxrs_low_set,
1497 	},
1498 	{
1499 		.core_note_type = NT_S390_VXRS_HIGH,
1500 		.n = __NUM_VXRS_HIGH,
1501 		.size = sizeof(__vector128),
1502 		.align = sizeof(__vector128),
1503 		.regset_get = s390_vxrs_high_get,
1504 		.set = s390_vxrs_high_set,
1505 	},
1506 	{
1507 		.core_note_type = NT_S390_HIGH_GPRS,
1508 		.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1509 		.size = sizeof(compat_long_t),
1510 		.align = sizeof(compat_long_t),
1511 		.regset_get = s390_compat_regs_high_get,
1512 		.set = s390_compat_regs_high_set,
1513 	},
1514 	{
1515 		.core_note_type = NT_S390_GS_CB,
1516 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1517 		.size = sizeof(__u64),
1518 		.align = sizeof(__u64),
1519 		.regset_get = s390_gs_cb_get,
1520 		.set = s390_gs_cb_set,
1521 	},
1522 	{
1523 		.core_note_type = NT_S390_GS_BC,
1524 		.n = sizeof(struct gs_cb) / sizeof(__u64),
1525 		.size = sizeof(__u64),
1526 		.align = sizeof(__u64),
1527 		.regset_get = s390_gs_bc_get,
1528 		.set = s390_gs_bc_set,
1529 	},
1530 	{
1531 		.core_note_type = NT_S390_RI_CB,
1532 		.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1533 		.size = sizeof(__u64),
1534 		.align = sizeof(__u64),
1535 		.regset_get = s390_runtime_instr_get,
1536 		.set = s390_runtime_instr_set,
1537 	},
1538 };
1539 
1540 static const struct user_regset_view user_s390_compat_view = {
1541 	.name = "s390",
1542 	.e_machine = EM_S390,
1543 	.regsets = s390_compat_regsets,
1544 	.n = ARRAY_SIZE(s390_compat_regsets)
1545 };
1546 #endif
1547 
1548 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1549 {
1550 #ifdef CONFIG_COMPAT
1551 	if (test_tsk_thread_flag(task, TIF_31BIT))
1552 		return &user_s390_compat_view;
1553 #endif
1554 	return &user_s390_view;
1555 }
1556 
1557 static const char *gpr_names[NUM_GPRS] = {
1558 	"r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1559 	"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1560 };
1561 
1562 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1563 {
1564 	if (offset >= NUM_GPRS)
1565 		return 0;
1566 	return regs->gprs[offset];
1567 }
1568 
1569 int regs_query_register_offset(const char *name)
1570 {
1571 	unsigned long offset;
1572 
1573 	if (!name || *name != 'r')
1574 		return -EINVAL;
1575 	if (kstrtoul(name + 1, 10, &offset))
1576 		return -EINVAL;
1577 	if (offset >= NUM_GPRS)
1578 		return -EINVAL;
1579 	return offset;
1580 }
1581 
1582 const char *regs_query_register_name(unsigned int offset)
1583 {
1584 	if (offset >= NUM_GPRS)
1585 		return NULL;
1586 	return gpr_names[offset];
1587 }
1588 
1589 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1590 {
1591 	unsigned long ksp = kernel_stack_pointer(regs);
1592 
1593 	return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1594 }
1595 
1596 /**
1597  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1598  * @regs:pt_regs which contains kernel stack pointer.
1599  * @n:stack entry number.
1600  *
1601  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1602  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1603  * this returns 0.
1604  */
1605 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1606 {
1607 	unsigned long addr;
1608 
1609 	addr = kernel_stack_pointer(regs) + n * sizeof(long);
1610 	if (!regs_within_kernel_stack(regs, addr))
1611 		return 0;
1612 	return *(unsigned long *)addr;
1613 }
1614