xref: /openbmc/linux/arch/s390/mm/fault.c (revision 18afb028)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-extable.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/diag.h>
38 #include <asm/gmap.h>
39 #include <asm/irq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/facility.h>
42 #include <asm/uv.h>
43 #include "../kernel/entry.h"
44 
45 #define __FAIL_ADDR_MASK -4096L
46 
47 /*
48  * Allocate private vm_fault_reason from top.  Please make sure it won't
49  * collide with vm_fault_reason.
50  */
51 #define VM_FAULT_BADCONTEXT	((__force vm_fault_t)0x80000000)
52 #define VM_FAULT_BADMAP		((__force vm_fault_t)0x40000000)
53 #define VM_FAULT_BADACCESS	((__force vm_fault_t)0x20000000)
54 #define VM_FAULT_SIGNAL		((__force vm_fault_t)0x10000000)
55 #define VM_FAULT_PFAULT		((__force vm_fault_t)0x8000000)
56 
57 enum fault_type {
58 	KERNEL_FAULT,
59 	USER_FAULT,
60 	GMAP_FAULT,
61 };
62 
63 static unsigned long store_indication __read_mostly;
64 
65 static int __init fault_init(void)
66 {
67 	if (test_facility(75))
68 		store_indication = 0xc00;
69 	return 0;
70 }
71 early_initcall(fault_init);
72 
73 /*
74  * Find out which address space caused the exception.
75  */
76 static enum fault_type get_fault_type(struct pt_regs *regs)
77 {
78 	unsigned long trans_exc_code;
79 
80 	trans_exc_code = regs->int_parm_long & 3;
81 	if (likely(trans_exc_code == 0)) {
82 		/* primary space exception */
83 		if (user_mode(regs))
84 			return USER_FAULT;
85 		if (!IS_ENABLED(CONFIG_PGSTE))
86 			return KERNEL_FAULT;
87 		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
88 			return GMAP_FAULT;
89 		return KERNEL_FAULT;
90 	}
91 	if (trans_exc_code == 2)
92 		return USER_FAULT;
93 	if (trans_exc_code == 1) {
94 		/* access register mode, not used in the kernel */
95 		return USER_FAULT;
96 	}
97 	/* home space exception -> access via kernel ASCE */
98 	return KERNEL_FAULT;
99 }
100 
101 static unsigned long get_fault_address(struct pt_regs *regs)
102 {
103 	unsigned long trans_exc_code = regs->int_parm_long;
104 
105 	return trans_exc_code & __FAIL_ADDR_MASK;
106 }
107 
108 static bool fault_is_write(struct pt_regs *regs)
109 {
110 	unsigned long trans_exc_code = regs->int_parm_long;
111 
112 	return (trans_exc_code & store_indication) == 0x400;
113 }
114 
115 static int bad_address(void *p)
116 {
117 	unsigned long dummy;
118 
119 	return get_kernel_nofault(dummy, (unsigned long *)p);
120 }
121 
122 static void dump_pagetable(unsigned long asce, unsigned long address)
123 {
124 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
125 
126 	pr_alert("AS:%016lx ", asce);
127 	switch (asce & _ASCE_TYPE_MASK) {
128 	case _ASCE_TYPE_REGION1:
129 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
130 		if (bad_address(table))
131 			goto bad;
132 		pr_cont("R1:%016lx ", *table);
133 		if (*table & _REGION_ENTRY_INVALID)
134 			goto out;
135 		table = __va(*table & _REGION_ENTRY_ORIGIN);
136 		fallthrough;
137 	case _ASCE_TYPE_REGION2:
138 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
139 		if (bad_address(table))
140 			goto bad;
141 		pr_cont("R2:%016lx ", *table);
142 		if (*table & _REGION_ENTRY_INVALID)
143 			goto out;
144 		table = __va(*table & _REGION_ENTRY_ORIGIN);
145 		fallthrough;
146 	case _ASCE_TYPE_REGION3:
147 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
148 		if (bad_address(table))
149 			goto bad;
150 		pr_cont("R3:%016lx ", *table);
151 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
152 			goto out;
153 		table = __va(*table & _REGION_ENTRY_ORIGIN);
154 		fallthrough;
155 	case _ASCE_TYPE_SEGMENT:
156 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
157 		if (bad_address(table))
158 			goto bad;
159 		pr_cont("S:%016lx ", *table);
160 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
161 			goto out;
162 		table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
163 	}
164 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
165 	if (bad_address(table))
166 		goto bad;
167 	pr_cont("P:%016lx ", *table);
168 out:
169 	pr_cont("\n");
170 	return;
171 bad:
172 	pr_cont("BAD\n");
173 }
174 
175 static void dump_fault_info(struct pt_regs *regs)
176 {
177 	unsigned long asce;
178 
179 	pr_alert("Failing address: %016lx TEID: %016lx\n",
180 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
181 	pr_alert("Fault in ");
182 	switch (regs->int_parm_long & 3) {
183 	case 3:
184 		pr_cont("home space ");
185 		break;
186 	case 2:
187 		pr_cont("secondary space ");
188 		break;
189 	case 1:
190 		pr_cont("access register ");
191 		break;
192 	case 0:
193 		pr_cont("primary space ");
194 		break;
195 	}
196 	pr_cont("mode while using ");
197 	switch (get_fault_type(regs)) {
198 	case USER_FAULT:
199 		asce = S390_lowcore.user_asce;
200 		pr_cont("user ");
201 		break;
202 	case GMAP_FAULT:
203 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
204 		pr_cont("gmap ");
205 		break;
206 	case KERNEL_FAULT:
207 		asce = S390_lowcore.kernel_asce;
208 		pr_cont("kernel ");
209 		break;
210 	default:
211 		unreachable();
212 	}
213 	pr_cont("ASCE.\n");
214 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
215 }
216 
217 int show_unhandled_signals = 1;
218 
219 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
220 {
221 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
222 		return;
223 	if (!unhandled_signal(current, signr))
224 		return;
225 	if (!printk_ratelimit())
226 		return;
227 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
228 	       regs->int_code & 0xffff, regs->int_code >> 17);
229 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
230 	printk(KERN_CONT "\n");
231 	if (is_mm_fault)
232 		dump_fault_info(regs);
233 	show_regs(regs);
234 }
235 
236 /*
237  * Send SIGSEGV to task.  This is an external routine
238  * to keep the stack usage of do_page_fault small.
239  */
240 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
241 {
242 	report_user_fault(regs, SIGSEGV, 1);
243 	force_sig_fault(SIGSEGV, si_code,
244 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
245 }
246 
247 static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
248 {
249 	enum fault_type fault_type;
250 	unsigned long address;
251 	bool is_write;
252 
253 	if (fixup_exception(regs))
254 		return;
255 	fault_type = get_fault_type(regs);
256 	if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
257 		address = get_fault_address(regs);
258 		is_write = fault_is_write(regs);
259 		if (kfence_handle_page_fault(address, is_write, regs))
260 			return;
261 	}
262 	/*
263 	 * Oops. The kernel tried to access some bad page. We'll have to
264 	 * terminate things with extreme prejudice.
265 	 */
266 	if (fault_type == KERNEL_FAULT)
267 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
268 		       " in virtual kernel address space\n");
269 	else
270 		printk(KERN_ALERT "Unable to handle kernel paging request"
271 		       " in virtual user address space\n");
272 	dump_fault_info(regs);
273 	die(regs, "Oops");
274 }
275 
276 static noinline void do_low_address(struct pt_regs *regs)
277 {
278 	/* Low-address protection hit in kernel mode means
279 	   NULL pointer write access in kernel mode.  */
280 	if (regs->psw.mask & PSW_MASK_PSTATE) {
281 		/* Low-address protection hit in user mode 'cannot happen'. */
282 		die (regs, "Low-address protection");
283 	}
284 
285 	do_no_context(regs, VM_FAULT_BADACCESS);
286 }
287 
288 static noinline void do_sigbus(struct pt_regs *regs)
289 {
290 	/*
291 	 * Send a sigbus, regardless of whether we were in kernel
292 	 * or user mode.
293 	 */
294 	force_sig_fault(SIGBUS, BUS_ADRERR,
295 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
296 }
297 
298 static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
299 {
300 	int si_code;
301 
302 	switch (fault) {
303 	case VM_FAULT_BADACCESS:
304 	case VM_FAULT_BADMAP:
305 		/* Bad memory access. Check if it is kernel or user space. */
306 		if (user_mode(regs)) {
307 			/* User mode accesses just cause a SIGSEGV */
308 			si_code = (fault == VM_FAULT_BADMAP) ?
309 				SEGV_MAPERR : SEGV_ACCERR;
310 			do_sigsegv(regs, si_code);
311 			break;
312 		}
313 		fallthrough;
314 	case VM_FAULT_BADCONTEXT:
315 	case VM_FAULT_PFAULT:
316 		do_no_context(regs, fault);
317 		break;
318 	case VM_FAULT_SIGNAL:
319 		if (!user_mode(regs))
320 			do_no_context(regs, fault);
321 		break;
322 	default: /* fault & VM_FAULT_ERROR */
323 		if (fault & VM_FAULT_OOM) {
324 			if (!user_mode(regs))
325 				do_no_context(regs, fault);
326 			else
327 				pagefault_out_of_memory();
328 		} else if (fault & VM_FAULT_SIGSEGV) {
329 			/* Kernel mode? Handle exceptions or die */
330 			if (!user_mode(regs))
331 				do_no_context(regs, fault);
332 			else
333 				do_sigsegv(regs, SEGV_MAPERR);
334 		} else if (fault & VM_FAULT_SIGBUS) {
335 			/* Kernel mode? Handle exceptions or die */
336 			if (!user_mode(regs))
337 				do_no_context(regs, fault);
338 			else
339 				do_sigbus(regs);
340 		} else
341 			BUG();
342 		break;
343 	}
344 }
345 
346 /*
347  * This routine handles page faults.  It determines the address,
348  * and the problem, and then passes it off to one of the appropriate
349  * routines.
350  *
351  * interruption code (int_code):
352  *   04       Protection           ->  Write-Protection  (suppression)
353  *   10       Segment translation  ->  Not present       (nullification)
354  *   11       Page translation     ->  Not present       (nullification)
355  *   3b       Region third trans.  ->  Not present       (nullification)
356  */
357 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
358 {
359 	struct gmap *gmap;
360 	struct task_struct *tsk;
361 	struct mm_struct *mm;
362 	struct vm_area_struct *vma;
363 	enum fault_type type;
364 	unsigned long address;
365 	unsigned int flags;
366 	vm_fault_t fault;
367 	bool is_write;
368 
369 	tsk = current;
370 	/*
371 	 * The instruction that caused the program check has
372 	 * been nullified. Don't signal single step via SIGTRAP.
373 	 */
374 	clear_thread_flag(TIF_PER_TRAP);
375 
376 	if (kprobe_page_fault(regs, 14))
377 		return 0;
378 
379 	mm = tsk->mm;
380 	address = get_fault_address(regs);
381 	is_write = fault_is_write(regs);
382 
383 	/*
384 	 * Verify that the fault happened in user space, that
385 	 * we are not in an interrupt and that there is a
386 	 * user context.
387 	 */
388 	fault = VM_FAULT_BADCONTEXT;
389 	type = get_fault_type(regs);
390 	switch (type) {
391 	case KERNEL_FAULT:
392 		goto out;
393 	case USER_FAULT:
394 	case GMAP_FAULT:
395 		if (faulthandler_disabled() || !mm)
396 			goto out;
397 		break;
398 	}
399 
400 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
401 	flags = FAULT_FLAG_DEFAULT;
402 	if (user_mode(regs))
403 		flags |= FAULT_FLAG_USER;
404 	if (is_write)
405 		access = VM_WRITE;
406 	if (access == VM_WRITE)
407 		flags |= FAULT_FLAG_WRITE;
408 	if (!(flags & FAULT_FLAG_USER))
409 		goto lock_mmap;
410 	vma = lock_vma_under_rcu(mm, address);
411 	if (!vma)
412 		goto lock_mmap;
413 	if (!(vma->vm_flags & access)) {
414 		vma_end_read(vma);
415 		goto lock_mmap;
416 	}
417 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
418 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
419 		vma_end_read(vma);
420 	if (!(fault & VM_FAULT_RETRY)) {
421 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
422 		if (likely(!(fault & VM_FAULT_ERROR)))
423 			fault = 0;
424 		goto out;
425 	}
426 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
427 	/* Quick path to respond to signals */
428 	if (fault_signal_pending(fault, regs)) {
429 		fault = VM_FAULT_SIGNAL;
430 		goto out;
431 	}
432 lock_mmap:
433 	mmap_read_lock(mm);
434 
435 	gmap = NULL;
436 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
437 		gmap = (struct gmap *) S390_lowcore.gmap;
438 		current->thread.gmap_addr = address;
439 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
440 		current->thread.gmap_int_code = regs->int_code & 0xffff;
441 		address = __gmap_translate(gmap, address);
442 		if (address == -EFAULT) {
443 			fault = VM_FAULT_BADMAP;
444 			goto out_up;
445 		}
446 		if (gmap->pfault_enabled)
447 			flags |= FAULT_FLAG_RETRY_NOWAIT;
448 	}
449 
450 retry:
451 	fault = VM_FAULT_BADMAP;
452 	vma = find_vma(mm, address);
453 	if (!vma)
454 		goto out_up;
455 
456 	if (unlikely(vma->vm_start > address)) {
457 		if (!(vma->vm_flags & VM_GROWSDOWN))
458 			goto out_up;
459 		vma = expand_stack(mm, address);
460 		if (!vma)
461 			goto out;
462 	}
463 
464 	/*
465 	 * Ok, we have a good vm_area for this memory access, so
466 	 * we can handle it..
467 	 */
468 	fault = VM_FAULT_BADACCESS;
469 	if (unlikely(!(vma->vm_flags & access)))
470 		goto out_up;
471 
472 	/*
473 	 * If for any reason at all we couldn't handle the fault,
474 	 * make sure we exit gracefully rather than endlessly redo
475 	 * the fault.
476 	 */
477 	fault = handle_mm_fault(vma, address, flags, regs);
478 	if (fault_signal_pending(fault, regs)) {
479 		fault = VM_FAULT_SIGNAL;
480 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
481 			goto out_up;
482 		goto out;
483 	}
484 
485 	/* The fault is fully completed (including releasing mmap lock) */
486 	if (fault & VM_FAULT_COMPLETED) {
487 		if (gmap) {
488 			mmap_read_lock(mm);
489 			goto out_gmap;
490 		}
491 		fault = 0;
492 		goto out;
493 	}
494 
495 	if (unlikely(fault & VM_FAULT_ERROR))
496 		goto out_up;
497 
498 	if (fault & VM_FAULT_RETRY) {
499 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
500 			(flags & FAULT_FLAG_RETRY_NOWAIT)) {
501 			/*
502 			 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
503 			 * not been released
504 			 */
505 			current->thread.gmap_pfault = 1;
506 			fault = VM_FAULT_PFAULT;
507 			goto out_up;
508 		}
509 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
510 		flags |= FAULT_FLAG_TRIED;
511 		mmap_read_lock(mm);
512 		goto retry;
513 	}
514 out_gmap:
515 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
516 		address =  __gmap_link(gmap, current->thread.gmap_addr,
517 				       address);
518 		if (address == -EFAULT) {
519 			fault = VM_FAULT_BADMAP;
520 			goto out_up;
521 		}
522 		if (address == -ENOMEM) {
523 			fault = VM_FAULT_OOM;
524 			goto out_up;
525 		}
526 	}
527 	fault = 0;
528 out_up:
529 	mmap_read_unlock(mm);
530 out:
531 	return fault;
532 }
533 
534 void do_protection_exception(struct pt_regs *regs)
535 {
536 	unsigned long trans_exc_code;
537 	int access;
538 	vm_fault_t fault;
539 
540 	trans_exc_code = regs->int_parm_long;
541 	/*
542 	 * Protection exceptions are suppressing, decrement psw address.
543 	 * The exception to this rule are aborted transactions, for these
544 	 * the PSW already points to the correct location.
545 	 */
546 	if (!(regs->int_code & 0x200))
547 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
548 	/*
549 	 * Check for low-address protection.  This needs to be treated
550 	 * as a special case because the translation exception code
551 	 * field is not guaranteed to contain valid data in this case.
552 	 */
553 	if (unlikely(!(trans_exc_code & 4))) {
554 		do_low_address(regs);
555 		return;
556 	}
557 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
558 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
559 					(regs->psw.addr & PAGE_MASK);
560 		access = VM_EXEC;
561 		fault = VM_FAULT_BADACCESS;
562 	} else {
563 		access = VM_WRITE;
564 		fault = do_exception(regs, access);
565 	}
566 	if (unlikely(fault))
567 		do_fault_error(regs, fault);
568 }
569 NOKPROBE_SYMBOL(do_protection_exception);
570 
571 void do_dat_exception(struct pt_regs *regs)
572 {
573 	int access;
574 	vm_fault_t fault;
575 
576 	access = VM_ACCESS_FLAGS;
577 	fault = do_exception(regs, access);
578 	if (unlikely(fault))
579 		do_fault_error(regs, fault);
580 }
581 NOKPROBE_SYMBOL(do_dat_exception);
582 
583 #if IS_ENABLED(CONFIG_PGSTE)
584 
585 void do_secure_storage_access(struct pt_regs *regs)
586 {
587 	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
588 	struct vm_area_struct *vma;
589 	struct mm_struct *mm;
590 	struct page *page;
591 	struct gmap *gmap;
592 	int rc;
593 
594 	/*
595 	 * bit 61 tells us if the address is valid, if it's not we
596 	 * have a major problem and should stop the kernel or send a
597 	 * SIGSEGV to the process. Unfortunately bit 61 is not
598 	 * reliable without the misc UV feature so we need to check
599 	 * for that as well.
600 	 */
601 	if (uv_has_feature(BIT_UV_FEAT_MISC) &&
602 	    !test_bit_inv(61, &regs->int_parm_long)) {
603 		/*
604 		 * When this happens, userspace did something that it
605 		 * was not supposed to do, e.g. branching into secure
606 		 * memory. Trigger a segmentation fault.
607 		 */
608 		if (user_mode(regs)) {
609 			send_sig(SIGSEGV, current, 0);
610 			return;
611 		}
612 
613 		/*
614 		 * The kernel should never run into this case and we
615 		 * have no way out of this situation.
616 		 */
617 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
618 	}
619 
620 	switch (get_fault_type(regs)) {
621 	case GMAP_FAULT:
622 		mm = current->mm;
623 		gmap = (struct gmap *)S390_lowcore.gmap;
624 		mmap_read_lock(mm);
625 		addr = __gmap_translate(gmap, addr);
626 		mmap_read_unlock(mm);
627 		if (IS_ERR_VALUE(addr)) {
628 			do_fault_error(regs, VM_FAULT_BADMAP);
629 			break;
630 		}
631 		fallthrough;
632 	case USER_FAULT:
633 		mm = current->mm;
634 		mmap_read_lock(mm);
635 		vma = find_vma(mm, addr);
636 		if (!vma) {
637 			mmap_read_unlock(mm);
638 			do_fault_error(regs, VM_FAULT_BADMAP);
639 			break;
640 		}
641 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
642 		if (IS_ERR_OR_NULL(page)) {
643 			mmap_read_unlock(mm);
644 			break;
645 		}
646 		if (arch_make_page_accessible(page))
647 			send_sig(SIGSEGV, current, 0);
648 		put_page(page);
649 		mmap_read_unlock(mm);
650 		break;
651 	case KERNEL_FAULT:
652 		page = phys_to_page(addr);
653 		if (unlikely(!try_get_page(page)))
654 			break;
655 		rc = arch_make_page_accessible(page);
656 		put_page(page);
657 		if (rc)
658 			BUG();
659 		break;
660 	default:
661 		do_fault_error(regs, VM_FAULT_BADMAP);
662 		WARN_ON_ONCE(1);
663 	}
664 }
665 NOKPROBE_SYMBOL(do_secure_storage_access);
666 
667 void do_non_secure_storage_access(struct pt_regs *regs)
668 {
669 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
670 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
671 
672 	if (get_fault_type(regs) != GMAP_FAULT) {
673 		do_fault_error(regs, VM_FAULT_BADMAP);
674 		WARN_ON_ONCE(1);
675 		return;
676 	}
677 
678 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
679 		send_sig(SIGSEGV, current, 0);
680 }
681 NOKPROBE_SYMBOL(do_non_secure_storage_access);
682 
683 void do_secure_storage_violation(struct pt_regs *regs)
684 {
685 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
686 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
687 
688 	/*
689 	 * If the VM has been rebooted, its address space might still contain
690 	 * secure pages from the previous boot.
691 	 * Clear the page so it can be reused.
692 	 */
693 	if (!gmap_destroy_page(gmap, gaddr))
694 		return;
695 	/*
696 	 * Either KVM messed up the secure guest mapping or the same
697 	 * page is mapped into multiple secure guests.
698 	 *
699 	 * This exception is only triggered when a guest 2 is running
700 	 * and can therefore never occur in kernel context.
701 	 */
702 	printk_ratelimited(KERN_WARNING
703 			   "Secure storage violation in task: %s, pid %d\n",
704 			   current->comm, current->pid);
705 	send_sig(SIGSEGV, current, 0);
706 }
707 
708 #endif /* CONFIG_PGSTE */
709