xref: /openbmc/linux/arch/s390/mm/fault.c (revision 41ac42f1)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  S390 version
4a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 1999
51da177e4SLinus Torvalds  *    Author(s): Hartmut Penner (hp@de.ibm.com)
61da177e4SLinus Torvalds  *               Ulrich Weigand (uweigand@de.ibm.com)
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *  Derived from "arch/i386/mm/fault.c"
91da177e4SLinus Torvalds  *    Copyright (C) 1995  Linus Torvalds
101da177e4SLinus Torvalds  */
111da177e4SLinus Torvalds 
12052ff461SHeiko Carstens #include <linux/kernel_stat.h>
13cdd6c482SIngo Molnar #include <linux/perf_event.h>
141da177e4SLinus Torvalds #include <linux/signal.h>
151da177e4SLinus Torvalds #include <linux/sched.h>
16b17b0153SIngo Molnar #include <linux/sched/debug.h>
171da177e4SLinus Torvalds #include <linux/kernel.h>
181da177e4SLinus Torvalds #include <linux/errno.h>
191da177e4SLinus Torvalds #include <linux/string.h>
201da177e4SLinus Torvalds #include <linux/types.h>
211da177e4SLinus Torvalds #include <linux/ptrace.h>
221da177e4SLinus Torvalds #include <linux/mman.h>
231da177e4SLinus Torvalds #include <linux/mm.h>
247757591aSHeiko Carstens #include <linux/compat.h>
251da177e4SLinus Torvalds #include <linux/smp.h>
261eeb66a1SChristoph Hellwig #include <linux/kdebug.h>
271da177e4SLinus Torvalds #include <linux/init.h>
281da177e4SLinus Torvalds #include <linux/console.h>
29dcc096c5SPaul Gortmaker #include <linux/extable.h>
301da177e4SLinus Torvalds #include <linux/hardirq.h>
314ba069b8SMichael Grundy #include <linux/kprobes.h>
32be5ec363SMartin Schwidefsky #include <linux/uaccess.h>
3353492b1dSGerald Schaefer #include <linux/hugetlb.h>
34e41ba111SSven Schnelle #include <linux/kfence.h>
35d09a307fSHeiko Carstens #include <asm/asm-extable.h>
36cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
371ec2772eSMartin Schwidefsky #include <asm/diag.h>
381e133ab2SMartin Schwidefsky #include <asm/gmap.h>
39d7b250e2SHeiko Carstens #include <asm/irq.h>
406252d702SMartin Schwidefsky #include <asm/mmu_context.h>
41a0616cdeSDavid Howells #include <asm/facility.h>
42084ea4d6SVasily Gorbik #include <asm/uv.h>
43a806170eSHeiko Carstens #include "../kernel/entry.h"
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds #define __FAIL_ADDR_MASK -4096L
461da177e4SLinus Torvalds #define __SUBCODE_MASK 0x0600
471da177e4SLinus Torvalds #define __PF_RES_FIELD 0x8000000000000000ULL
481da177e4SLinus Torvalds 
4912437759SChristian Borntraeger #define VM_FAULT_BADCONTEXT	((__force vm_fault_t) 0x010000)
5012437759SChristian Borntraeger #define VM_FAULT_BADMAP		((__force vm_fault_t) 0x020000)
5112437759SChristian Borntraeger #define VM_FAULT_BADACCESS	((__force vm_fault_t) 0x040000)
5212437759SChristian Borntraeger #define VM_FAULT_SIGNAL		((__force vm_fault_t) 0x080000)
5312437759SChristian Borntraeger #define VM_FAULT_PFAULT		((__force vm_fault_t) 0x100000)
5450d7280dSMartin Schwidefsky 
550aaba41bSMartin Schwidefsky enum fault_type {
560aaba41bSMartin Schwidefsky 	KERNEL_FAULT,
570aaba41bSMartin Schwidefsky 	USER_FAULT,
580aaba41bSMartin Schwidefsky 	GMAP_FAULT,
590aaba41bSMartin Schwidefsky };
600aaba41bSMartin Schwidefsky 
61a4f32bdbSHeiko Carstens static unsigned long store_indication __read_mostly;
6292f842eaSMartin Schwidefsky 
63a4f32bdbSHeiko Carstens static int __init fault_init(void)
6492f842eaSMartin Schwidefsky {
65a4f32bdbSHeiko Carstens 	if (test_facility(75))
6692f842eaSMartin Schwidefsky 		store_indication = 0xc00;
67a4f32bdbSHeiko Carstens 	return 0;
6892f842eaSMartin Schwidefsky }
69a4f32bdbSHeiko Carstens early_initcall(fault_init);
7092f842eaSMartin Schwidefsky 
711da177e4SLinus Torvalds /*
720aaba41bSMartin Schwidefsky  * Find out which address space caused the exception.
731da177e4SLinus Torvalds  */
74bf2f1eeeSMasahiro Yamada static enum fault_type get_fault_type(struct pt_regs *regs)
751da177e4SLinus Torvalds {
76457f2180SHeiko Carstens 	unsigned long trans_exc_code;
77457f2180SHeiko Carstens 
78457f2180SHeiko Carstens 	trans_exc_code = regs->int_parm_long & 3;
790aaba41bSMartin Schwidefsky 	if (likely(trans_exc_code == 0)) {
800aaba41bSMartin Schwidefsky 		/* primary space exception */
8187d59863SHeiko Carstens 		if (user_mode(regs))
8287d59863SHeiko Carstens 			return USER_FAULT;
8387d59863SHeiko Carstens 		if (!IS_ENABLED(CONFIG_PGSTE))
8487d59863SHeiko Carstens 			return KERNEL_FAULT;
8587d59863SHeiko Carstens 		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
860aaba41bSMartin Schwidefsky 			return GMAP_FAULT;
870aaba41bSMartin Schwidefsky 		return KERNEL_FAULT;
880aaba41bSMartin Schwidefsky 	}
8987d59863SHeiko Carstens 	if (trans_exc_code == 2)
900aaba41bSMartin Schwidefsky 		return USER_FAULT;
91962f0af8SGerald Schaefer 	if (trans_exc_code == 1) {
92962f0af8SGerald Schaefer 		/* access register mode, not used in the kernel */
93962f0af8SGerald Schaefer 		return USER_FAULT;
94962f0af8SGerald Schaefer 	}
950aaba41bSMartin Schwidefsky 	/* home space exception -> access via kernel ASCE */
960aaba41bSMartin Schwidefsky 	return KERNEL_FAULT;
971da177e4SLinus Torvalds }
981da177e4SLinus Torvalds 
993b7df342SHeiko Carstens static int bad_address(void *p)
1003b7df342SHeiko Carstens {
1013b7df342SHeiko Carstens 	unsigned long dummy;
1023b7df342SHeiko Carstens 
10325f12ae4SChristoph Hellwig 	return get_kernel_nofault(dummy, (unsigned long *)p);
1043b7df342SHeiko Carstens }
1053b7df342SHeiko Carstens 
1063b7df342SHeiko Carstens static void dump_pagetable(unsigned long asce, unsigned long address)
1073b7df342SHeiko Carstens {
108fe7b2747SHeiko Carstens 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
1093b7df342SHeiko Carstens 
1103b7df342SHeiko Carstens 	pr_alert("AS:%016lx ", asce);
1113b7df342SHeiko Carstens 	switch (asce & _ASCE_TYPE_MASK) {
1123b7df342SHeiko Carstens 	case _ASCE_TYPE_REGION1:
113f1c1174fSHeiko Carstens 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
1143b7df342SHeiko Carstens 		if (bad_address(table))
1153b7df342SHeiko Carstens 			goto bad;
1163b7df342SHeiko Carstens 		pr_cont("R1:%016lx ", *table);
1173b7df342SHeiko Carstens 		if (*table & _REGION_ENTRY_INVALID)
1183b7df342SHeiko Carstens 			goto out;
119d2f2949aSHeiko Carstens 		table = __va(*table & _REGION_ENTRY_ORIGIN);
1202c7749b9SJoe Perches 		fallthrough;
1213b7df342SHeiko Carstens 	case _ASCE_TYPE_REGION2:
122f1c1174fSHeiko Carstens 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
1233b7df342SHeiko Carstens 		if (bad_address(table))
1243b7df342SHeiko Carstens 			goto bad;
1253b7df342SHeiko Carstens 		pr_cont("R2:%016lx ", *table);
1263b7df342SHeiko Carstens 		if (*table & _REGION_ENTRY_INVALID)
1273b7df342SHeiko Carstens 			goto out;
128d2f2949aSHeiko Carstens 		table = __va(*table & _REGION_ENTRY_ORIGIN);
1292c7749b9SJoe Perches 		fallthrough;
1303b7df342SHeiko Carstens 	case _ASCE_TYPE_REGION3:
131f1c1174fSHeiko Carstens 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
1323b7df342SHeiko Carstens 		if (bad_address(table))
1333b7df342SHeiko Carstens 			goto bad;
1343b7df342SHeiko Carstens 		pr_cont("R3:%016lx ", *table);
1353b7df342SHeiko Carstens 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
1363b7df342SHeiko Carstens 			goto out;
137d2f2949aSHeiko Carstens 		table = __va(*table & _REGION_ENTRY_ORIGIN);
1382c7749b9SJoe Perches 		fallthrough;
1393b7df342SHeiko Carstens 	case _ASCE_TYPE_SEGMENT:
140f1c1174fSHeiko Carstens 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
1413b7df342SHeiko Carstens 		if (bad_address(table))
1423b7df342SHeiko Carstens 			goto bad;
14391c0837eSJoe Perches 		pr_cont("S:%016lx ", *table);
1443b7df342SHeiko Carstens 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
1453b7df342SHeiko Carstens 			goto out;
146d2f2949aSHeiko Carstens 		table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
1473b7df342SHeiko Carstens 	}
148f1c1174fSHeiko Carstens 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
1493b7df342SHeiko Carstens 	if (bad_address(table))
1503b7df342SHeiko Carstens 		goto bad;
1513b7df342SHeiko Carstens 	pr_cont("P:%016lx ", *table);
1523b7df342SHeiko Carstens out:
1533b7df342SHeiko Carstens 	pr_cont("\n");
1543b7df342SHeiko Carstens 	return;
1553b7df342SHeiko Carstens bad:
1563b7df342SHeiko Carstens 	pr_cont("BAD\n");
1573b7df342SHeiko Carstens }
1583b7df342SHeiko Carstens 
1593b7df342SHeiko Carstens static void dump_fault_info(struct pt_regs *regs)
1603b7df342SHeiko Carstens {
1613b7df342SHeiko Carstens 	unsigned long asce;
1623b7df342SHeiko Carstens 
1635d7eccecSHeiko Carstens 	pr_alert("Failing address: %016lx TEID: %016lx\n",
1645d7eccecSHeiko Carstens 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
1653b7df342SHeiko Carstens 	pr_alert("Fault in ");
1663b7df342SHeiko Carstens 	switch (regs->int_parm_long & 3) {
1673b7df342SHeiko Carstens 	case 3:
1683b7df342SHeiko Carstens 		pr_cont("home space ");
1693b7df342SHeiko Carstens 		break;
1703b7df342SHeiko Carstens 	case 2:
1713b7df342SHeiko Carstens 		pr_cont("secondary space ");
1723b7df342SHeiko Carstens 		break;
1733b7df342SHeiko Carstens 	case 1:
1743b7df342SHeiko Carstens 		pr_cont("access register ");
1753b7df342SHeiko Carstens 		break;
1763b7df342SHeiko Carstens 	case 0:
1773b7df342SHeiko Carstens 		pr_cont("primary space ");
1783b7df342SHeiko Carstens 		break;
1793b7df342SHeiko Carstens 	}
1803b7df342SHeiko Carstens 	pr_cont("mode while using ");
1810aaba41bSMartin Schwidefsky 	switch (get_fault_type(regs)) {
1820aaba41bSMartin Schwidefsky 	case USER_FAULT:
1833b7df342SHeiko Carstens 		asce = S390_lowcore.user_asce;
1843b7df342SHeiko Carstens 		pr_cont("user ");
1850aaba41bSMartin Schwidefsky 		break;
1860aaba41bSMartin Schwidefsky 	case GMAP_FAULT:
1870aaba41bSMartin Schwidefsky 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
1880aaba41bSMartin Schwidefsky 		pr_cont("gmap ");
1890aaba41bSMartin Schwidefsky 		break;
1900aaba41bSMartin Schwidefsky 	case KERNEL_FAULT:
1910aaba41bSMartin Schwidefsky 		asce = S390_lowcore.kernel_asce;
1920aaba41bSMartin Schwidefsky 		pr_cont("kernel ");
1930aaba41bSMartin Schwidefsky 		break;
194bf2f1eeeSMasahiro Yamada 	default:
195bf2f1eeeSMasahiro Yamada 		unreachable();
1963b7df342SHeiko Carstens 	}
1973b7df342SHeiko Carstens 	pr_cont("ASCE.\n");
1983b7df342SHeiko Carstens 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
1993b7df342SHeiko Carstens }
2003b7df342SHeiko Carstens 
2015d7eccecSHeiko Carstens int show_unhandled_signals = 1;
2025d7eccecSHeiko Carstens 
2035d7eccecSHeiko Carstens void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204ab3c68eeSHeiko Carstens {
205ab3c68eeSHeiko Carstens 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
206ab3c68eeSHeiko Carstens 		return;
207ab3c68eeSHeiko Carstens 	if (!unhandled_signal(current, signr))
208ab3c68eeSHeiko Carstens 		return;
209ab3c68eeSHeiko Carstens 	if (!printk_ratelimit())
210ab3c68eeSHeiko Carstens 		return;
211413d4047SHeiko Carstens 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
212413d4047SHeiko Carstens 	       regs->int_code & 0xffff, regs->int_code >> 17);
2139cb1ccecSHeiko Carstens 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
214aa33c8cbSMartin Schwidefsky 	printk(KERN_CONT "\n");
2155d7eccecSHeiko Carstens 	if (is_mm_fault)
2163b7df342SHeiko Carstens 		dump_fault_info(regs);
217ab3c68eeSHeiko Carstens 	show_regs(regs);
218ab3c68eeSHeiko Carstens }
219ab3c68eeSHeiko Carstens 
2201da177e4SLinus Torvalds /*
2211da177e4SLinus Torvalds  * Send SIGSEGV to task.  This is an external routine
2221da177e4SLinus Torvalds  * to keep the stack usage of do_page_fault small.
2231da177e4SLinus Torvalds  */
224aa33c8cbSMartin Schwidefsky static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
2251da177e4SLinus Torvalds {
2265d7eccecSHeiko Carstens 	report_user_fault(regs, SIGSEGV, 1);
2279507a5d0SEric W. Biederman 	force_sig_fault(SIGSEGV, si_code,
2282e1661d2SEric W. Biederman 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
2291da177e4SLinus Torvalds }
2301da177e4SLinus Torvalds 
231aa33c8cbSMartin Schwidefsky static noinline void do_no_context(struct pt_regs *regs)
23210c1031fSMartin Schwidefsky {
23346fee16fSHeiko Carstens 	if (fixup_exception(regs))
23410c1031fSMartin Schwidefsky 		return;
23510c1031fSMartin Schwidefsky 	/*
23610c1031fSMartin Schwidefsky 	 * Oops. The kernel tried to access some bad page. We'll have to
23710c1031fSMartin Schwidefsky 	 * terminate things with extreme prejudice.
23810c1031fSMartin Schwidefsky 	 */
2390aaba41bSMartin Schwidefsky 	if (get_fault_type(regs) == KERNEL_FAULT)
24010c1031fSMartin Schwidefsky 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
2413b7df342SHeiko Carstens 		       " in virtual kernel address space\n");
24210c1031fSMartin Schwidefsky 	else
24310c1031fSMartin Schwidefsky 		printk(KERN_ALERT "Unable to handle kernel paging request"
2443b7df342SHeiko Carstens 		       " in virtual user address space\n");
2453b7df342SHeiko Carstens 	dump_fault_info(regs);
246aa33c8cbSMartin Schwidefsky 	die(regs, "Oops");
24710c1031fSMartin Schwidefsky }
24810c1031fSMartin Schwidefsky 
249aa33c8cbSMartin Schwidefsky static noinline void do_low_address(struct pt_regs *regs)
25010c1031fSMartin Schwidefsky {
25110c1031fSMartin Schwidefsky 	/* Low-address protection hit in kernel mode means
25210c1031fSMartin Schwidefsky 	   NULL pointer write access in kernel mode.  */
25310c1031fSMartin Schwidefsky 	if (regs->psw.mask & PSW_MASK_PSTATE) {
25410c1031fSMartin Schwidefsky 		/* Low-address protection hit in user mode 'cannot happen'. */
255aa33c8cbSMartin Schwidefsky 		die (regs, "Low-address protection");
25610c1031fSMartin Schwidefsky 	}
25710c1031fSMartin Schwidefsky 
258aa33c8cbSMartin Schwidefsky 	do_no_context(regs);
25910c1031fSMartin Schwidefsky }
26010c1031fSMartin Schwidefsky 
261aa33c8cbSMartin Schwidefsky static noinline void do_sigbus(struct pt_regs *regs)
26210c1031fSMartin Schwidefsky {
26310c1031fSMartin Schwidefsky 	/*
26410c1031fSMartin Schwidefsky 	 * Send a sigbus, regardless of whether we were in kernel
26510c1031fSMartin Schwidefsky 	 * or user mode.
26610c1031fSMartin Schwidefsky 	 */
2679507a5d0SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR,
2682e1661d2SEric W. Biederman 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
26910c1031fSMartin Schwidefsky }
27010c1031fSMartin Schwidefsky 
27150a7ca3cSSouptick Joarder static noinline void do_fault_error(struct pt_regs *regs, int access,
27250a7ca3cSSouptick Joarder 					vm_fault_t fault)
27350d7280dSMartin Schwidefsky {
27450d7280dSMartin Schwidefsky 	int si_code;
27550d7280dSMartin Schwidefsky 
27650d7280dSMartin Schwidefsky 	switch (fault) {
27750d7280dSMartin Schwidefsky 	case VM_FAULT_BADACCESS:
27850d7280dSMartin Schwidefsky 	case VM_FAULT_BADMAP:
27950d7280dSMartin Schwidefsky 		/* Bad memory access. Check if it is kernel or user space. */
2807d256175SHeiko Carstens 		if (user_mode(regs)) {
28150d7280dSMartin Schwidefsky 			/* User mode accesses just cause a SIGSEGV */
28250d7280dSMartin Schwidefsky 			si_code = (fault == VM_FAULT_BADMAP) ?
28350d7280dSMartin Schwidefsky 				SEGV_MAPERR : SEGV_ACCERR;
284aa33c8cbSMartin Schwidefsky 			do_sigsegv(regs, si_code);
28557d7f939SMartin Schwidefsky 			break;
28650d7280dSMartin Schwidefsky 		}
2872c7749b9SJoe Perches 		fallthrough;
28850d7280dSMartin Schwidefsky 	case VM_FAULT_BADCONTEXT:
28924eb3a82SDominik Dingel 	case VM_FAULT_PFAULT:
290aa33c8cbSMartin Schwidefsky 		do_no_context(regs);
29150d7280dSMartin Schwidefsky 		break;
292f2c76e3bSHeiko Carstens 	case VM_FAULT_SIGNAL:
293f2c76e3bSHeiko Carstens 		if (!user_mode(regs))
294f2c76e3bSHeiko Carstens 			do_no_context(regs);
295f2c76e3bSHeiko Carstens 		break;
29650d7280dSMartin Schwidefsky 	default: /* fault & VM_FAULT_ERROR */
29799583181SHeiko Carstens 		if (fault & VM_FAULT_OOM) {
2987d256175SHeiko Carstens 			if (!user_mode(regs))
299aa33c8cbSMartin Schwidefsky 				do_no_context(regs);
30099583181SHeiko Carstens 			else
30150d7280dSMartin Schwidefsky 				pagefault_out_of_memory();
30233692f27SLinus Torvalds 		} else if (fault & VM_FAULT_SIGSEGV) {
30333692f27SLinus Torvalds 			/* Kernel mode? Handle exceptions or die */
30433692f27SLinus Torvalds 			if (!user_mode(regs))
30533692f27SLinus Torvalds 				do_no_context(regs);
30633692f27SLinus Torvalds 			else
30733692f27SLinus Torvalds 				do_sigsegv(regs, SEGV_MAPERR);
30899583181SHeiko Carstens 		} else if (fault & VM_FAULT_SIGBUS) {
30950d7280dSMartin Schwidefsky 			/* Kernel mode? Handle exceptions or die */
3107d256175SHeiko Carstens 			if (!user_mode(regs))
311aa33c8cbSMartin Schwidefsky 				do_no_context(regs);
31236bf9680SMartin Schwidefsky 			else
313aa33c8cbSMartin Schwidefsky 				do_sigbus(regs);
31450d7280dSMartin Schwidefsky 		} else
31550d7280dSMartin Schwidefsky 			BUG();
31650d7280dSMartin Schwidefsky 		break;
31750d7280dSMartin Schwidefsky 	}
31850d7280dSMartin Schwidefsky }
31950d7280dSMartin Schwidefsky 
3201da177e4SLinus Torvalds /*
3211da177e4SLinus Torvalds  * This routine handles page faults.  It determines the address,
3221da177e4SLinus Torvalds  * and the problem, and then passes it off to one of the appropriate
3231da177e4SLinus Torvalds  * routines.
3241da177e4SLinus Torvalds  *
32550d7280dSMartin Schwidefsky  * interruption code (int_code):
3267904aaa8SHeiko Carstens  *   04       Protection           ->  Write-Protection  (suppression)
3271da177e4SLinus Torvalds  *   10       Segment translation  ->  Not present       (nullification)
3281da177e4SLinus Torvalds  *   11       Page translation     ->  Not present       (nullification)
3291da177e4SLinus Torvalds  *   3b       Region third trans.  ->  Not present       (nullification)
3301da177e4SLinus Torvalds  */
33150a7ca3cSSouptick Joarder static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
3321da177e4SLinus Torvalds {
33324eb3a82SDominik Dingel 	struct gmap *gmap;
3341da177e4SLinus Torvalds 	struct task_struct *tsk;
3351da177e4SLinus Torvalds 	struct mm_struct *mm;
3361da177e4SLinus Torvalds 	struct vm_area_struct *vma;
3370aaba41bSMartin Schwidefsky 	enum fault_type type;
338aa33c8cbSMartin Schwidefsky 	unsigned long trans_exc_code;
3391da177e4SLinus Torvalds 	unsigned long address;
34033ce6140SHeiko Carstens 	unsigned int flags;
34150a7ca3cSSouptick Joarder 	vm_fault_t fault;
342e41ba111SSven Schnelle 	bool is_write;
34310c1031fSMartin Schwidefsky 
34439efd4ecSMartin Schwidefsky 	tsk = current;
34539efd4ecSMartin Schwidefsky 	/*
34639efd4ecSMartin Schwidefsky 	 * The instruction that caused the program check has
34739efd4ecSMartin Schwidefsky 	 * been nullified. Don't signal single step via SIGTRAP.
34839efd4ecSMartin Schwidefsky 	 */
34956e62a73SSven Schnelle 	clear_thread_flag(TIF_PER_TRAP);
35039efd4ecSMartin Schwidefsky 
351b98cca44SAnshuman Khandual 	if (kprobe_page_fault(regs, 14))
35250d7280dSMartin Schwidefsky 		return 0;
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 	mm = tsk->mm;
355aa33c8cbSMartin Schwidefsky 	trans_exc_code = regs->int_parm_long;
356e41ba111SSven Schnelle 	address = trans_exc_code & __FAIL_ADDR_MASK;
357e41ba111SSven Schnelle 	is_write = (trans_exc_code & store_indication) == 0x400;
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 	/*
3601da177e4SLinus Torvalds 	 * Verify that the fault happened in user space, that
3611da177e4SLinus Torvalds 	 * we are not in an interrupt and that there is a
3621da177e4SLinus Torvalds 	 * user context.
3631da177e4SLinus Torvalds 	 */
36450d7280dSMartin Schwidefsky 	fault = VM_FAULT_BADCONTEXT;
3650aaba41bSMartin Schwidefsky 	type = get_fault_type(regs);
3660aaba41bSMartin Schwidefsky 	switch (type) {
3670aaba41bSMartin Schwidefsky 	case KERNEL_FAULT:
368e41ba111SSven Schnelle 		if (kfence_handle_page_fault(address, is_write, regs))
369e41ba111SSven Schnelle 			return 0;
37050d7280dSMartin Schwidefsky 		goto out;
3710aaba41bSMartin Schwidefsky 	case USER_FAULT:
3720aaba41bSMartin Schwidefsky 	case GMAP_FAULT:
3730aaba41bSMartin Schwidefsky 		if (faulthandler_disabled() || !mm)
3740aaba41bSMartin Schwidefsky 			goto out;
3750aaba41bSMartin Schwidefsky 		break;
3760aaba41bSMartin Schwidefsky 	}
3771da177e4SLinus Torvalds 
378a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
379dde16072SPeter Xu 	flags = FAULT_FLAG_DEFAULT;
380759496baSJohannes Weiner 	if (user_mode(regs))
381759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
382*41ac42f1SGerald Schaefer 	if (is_write)
383*41ac42f1SGerald Schaefer 		access = VM_WRITE;
384*41ac42f1SGerald Schaefer 	if (access == VM_WRITE)
38533ce6140SHeiko Carstens 		flags |= FAULT_FLAG_WRITE;
386d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
3871da177e4SLinus Torvalds 
3880aaba41bSMartin Schwidefsky 	gmap = NULL;
3890aaba41bSMartin Schwidefsky 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
3900aaba41bSMartin Schwidefsky 		gmap = (struct gmap *) S390_lowcore.gmap;
391527e30b4SMartin Schwidefsky 		current->thread.gmap_addr = address;
3924be130a0SMartin Schwidefsky 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
3934a494439SDavid Hildenbrand 		current->thread.gmap_int_code = regs->int_code & 0xffff;
394527e30b4SMartin Schwidefsky 		address = __gmap_translate(gmap, address);
395e5992f2eSMartin Schwidefsky 		if (address == -EFAULT) {
396e5992f2eSMartin Schwidefsky 			fault = VM_FAULT_BADMAP;
397e5992f2eSMartin Schwidefsky 			goto out_up;
398e5992f2eSMartin Schwidefsky 		}
39924eb3a82SDominik Dingel 		if (gmap->pfault_enabled)
40024eb3a82SDominik Dingel 			flags |= FAULT_FLAG_RETRY_NOWAIT;
401e5992f2eSMartin Schwidefsky 	}
402e5992f2eSMartin Schwidefsky 
403e5992f2eSMartin Schwidefsky retry:
40450d7280dSMartin Schwidefsky 	fault = VM_FAULT_BADMAP;
4051da177e4SLinus Torvalds 	vma = find_vma(mm, address);
4061da177e4SLinus Torvalds 	if (!vma)
40750d7280dSMartin Schwidefsky 		goto out_up;
408c1821c2eSGerald Schaefer 
40950d7280dSMartin Schwidefsky 	if (unlikely(vma->vm_start > address)) {
4101da177e4SLinus Torvalds 		if (!(vma->vm_flags & VM_GROWSDOWN))
41150d7280dSMartin Schwidefsky 			goto out_up;
4121da177e4SLinus Torvalds 		if (expand_stack(vma, address))
41350d7280dSMartin Schwidefsky 			goto out_up;
41450d7280dSMartin Schwidefsky 	}
41550d7280dSMartin Schwidefsky 
4161da177e4SLinus Torvalds 	/*
4171da177e4SLinus Torvalds 	 * Ok, we have a good vm_area for this memory access, so
4181da177e4SLinus Torvalds 	 * we can handle it..
4191da177e4SLinus Torvalds 	 */
42050d7280dSMartin Schwidefsky 	fault = VM_FAULT_BADACCESS;
4211ab947deSMartin Schwidefsky 	if (unlikely(!(vma->vm_flags & access)))
42250d7280dSMartin Schwidefsky 		goto out_up;
4231da177e4SLinus Torvalds 
42453492b1dSGerald Schaefer 	if (is_vm_hugetlb_page(vma))
42553492b1dSGerald Schaefer 		address &= HPAGE_MASK;
4261da177e4SLinus Torvalds 	/*
4271da177e4SLinus Torvalds 	 * If for any reason at all we couldn't handle the fault,
4281da177e4SLinus Torvalds 	 * make sure we exit gracefully rather than endlessly redo
4291da177e4SLinus Torvalds 	 * the fault.
4301da177e4SLinus Torvalds 	 */
43135e45f3eSPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
4324ef87322SPeter Xu 	if (fault_signal_pending(fault, regs)) {
433f2c76e3bSHeiko Carstens 		fault = VM_FAULT_SIGNAL;
434306d6c49SClaudio Imbrenda 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
435306d6c49SClaudio Imbrenda 			goto out_up;
436f2c76e3bSHeiko Carstens 		goto out;
437f2c76e3bSHeiko Carstens 	}
438d9272525SPeter Xu 
439d9272525SPeter Xu 	/* The fault is fully completed (including releasing mmap lock) */
440d9272525SPeter Xu 	if (fault & VM_FAULT_COMPLETED) {
441d9272525SPeter Xu 		if (gmap) {
442d9272525SPeter Xu 			mmap_read_lock(mm);
443d9272525SPeter Xu 			goto out_gmap;
444d9272525SPeter Xu 		}
445d9272525SPeter Xu 		fault = 0;
446d9272525SPeter Xu 		goto out;
447d9272525SPeter Xu 	}
448d9272525SPeter Xu 
44950d7280dSMartin Schwidefsky 	if (unlikely(fault & VM_FAULT_ERROR))
45050d7280dSMartin Schwidefsky 		goto out_up;
45150d7280dSMartin Schwidefsky 
45233ce6140SHeiko Carstens 	if (fault & VM_FAULT_RETRY) {
4530aaba41bSMartin Schwidefsky 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
4540aaba41bSMartin Schwidefsky 			(flags & FAULT_FLAG_RETRY_NOWAIT)) {
45536ef159fSQi Zheng 			/*
45636ef159fSQi Zheng 			 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
45736ef159fSQi Zheng 			 * not been released
45836ef159fSQi Zheng 			 */
45924eb3a82SDominik Dingel 			current->thread.gmap_pfault = 1;
46024eb3a82SDominik Dingel 			fault = VM_FAULT_PFAULT;
46124eb3a82SDominik Dingel 			goto out_up;
46224eb3a82SDominik Dingel 		}
4634064b982SPeter Xu 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
46445cac65bSShaohua Li 		flags |= FAULT_FLAG_TRIED;
465d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
46633ce6140SHeiko Carstens 		goto retry;
46733ce6140SHeiko Carstens 	}
468d9272525SPeter Xu out_gmap:
4690aaba41bSMartin Schwidefsky 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
470527e30b4SMartin Schwidefsky 		address =  __gmap_link(gmap, current->thread.gmap_addr,
471527e30b4SMartin Schwidefsky 				       address);
472527e30b4SMartin Schwidefsky 		if (address == -EFAULT) {
473527e30b4SMartin Schwidefsky 			fault = VM_FAULT_BADMAP;
474527e30b4SMartin Schwidefsky 			goto out_up;
475527e30b4SMartin Schwidefsky 		}
476527e30b4SMartin Schwidefsky 		if (address == -ENOMEM) {
477527e30b4SMartin Schwidefsky 			fault = VM_FAULT_OOM;
478527e30b4SMartin Schwidefsky 			goto out_up;
479527e30b4SMartin Schwidefsky 		}
480527e30b4SMartin Schwidefsky 	}
48150d7280dSMartin Schwidefsky 	fault = 0;
48250d7280dSMartin Schwidefsky out_up:
483d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
48450d7280dSMartin Schwidefsky out:
48550d7280dSMartin Schwidefsky 	return fault;
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
4887a5388deSHeiko Carstens void do_protection_exception(struct pt_regs *regs)
4891da177e4SLinus Torvalds {
490aa33c8cbSMartin Schwidefsky 	unsigned long trans_exc_code;
49150a7ca3cSSouptick Joarder 	int access;
49250a7ca3cSSouptick Joarder 	vm_fault_t fault;
49361365e13SMartin Schwidefsky 
494aa33c8cbSMartin Schwidefsky 	trans_exc_code = regs->int_parm_long;
495f752ac4dSMartin Schwidefsky 	/*
496f752ac4dSMartin Schwidefsky 	 * Protection exceptions are suppressing, decrement psw address.
497f752ac4dSMartin Schwidefsky 	 * The exception to this rule are aborted transactions, for these
498f752ac4dSMartin Schwidefsky 	 * the PSW already points to the correct location.
499f752ac4dSMartin Schwidefsky 	 */
500f752ac4dSMartin Schwidefsky 	if (!(regs->int_code & 0x200))
501aa33c8cbSMartin Schwidefsky 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
50210c1031fSMartin Schwidefsky 	/*
50310c1031fSMartin Schwidefsky 	 * Check for low-address protection.  This needs to be treated
50410c1031fSMartin Schwidefsky 	 * as a special case because the translation exception code
50510c1031fSMartin Schwidefsky 	 * field is not guaranteed to contain valid data in this case.
50610c1031fSMartin Schwidefsky 	 */
50761365e13SMartin Schwidefsky 	if (unlikely(!(trans_exc_code & 4))) {
508aa33c8cbSMartin Schwidefsky 		do_low_address(regs);
50910c1031fSMartin Schwidefsky 		return;
51010c1031fSMartin Schwidefsky 	}
51157d7f939SMartin Schwidefsky 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
51257d7f939SMartin Schwidefsky 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
51357d7f939SMartin Schwidefsky 					(regs->psw.addr & PAGE_MASK);
51457d7f939SMartin Schwidefsky 		access = VM_EXEC;
51557d7f939SMartin Schwidefsky 		fault = VM_FAULT_BADACCESS;
51657d7f939SMartin Schwidefsky 	} else {
51757d7f939SMartin Schwidefsky 		access = VM_WRITE;
51857d7f939SMartin Schwidefsky 		fault = do_exception(regs, access);
51957d7f939SMartin Schwidefsky 	}
52050d7280dSMartin Schwidefsky 	if (unlikely(fault))
52157d7f939SMartin Schwidefsky 		do_fault_error(regs, access, fault);
5221da177e4SLinus Torvalds }
5237a5388deSHeiko Carstens NOKPROBE_SYMBOL(do_protection_exception);
5241da177e4SLinus Torvalds 
5257a5388deSHeiko Carstens void do_dat_exception(struct pt_regs *regs)
5261da177e4SLinus Torvalds {
52750a7ca3cSSouptick Joarder 	int access;
52850a7ca3cSSouptick Joarder 	vm_fault_t fault;
52950d7280dSMartin Schwidefsky 
5306cb4d9a2SAnshuman Khandual 	access = VM_ACCESS_FLAGS;
531aa33c8cbSMartin Schwidefsky 	fault = do_exception(regs, access);
53250d7280dSMartin Schwidefsky 	if (unlikely(fault))
53357d7f939SMartin Schwidefsky 		do_fault_error(regs, access, fault);
5341da177e4SLinus Torvalds }
5357a5388deSHeiko Carstens NOKPROBE_SYMBOL(do_dat_exception);
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds #ifdef CONFIG_PFAULT
5381da177e4SLinus Torvalds /*
5391da177e4SLinus Torvalds  * 'pfault' pseudo page faults routines.
5401da177e4SLinus Torvalds  */
541fb0a9d7eSHeiko Carstens static int pfault_disable;
5421da177e4SLinus Torvalds 
5431da177e4SLinus Torvalds static int __init nopfault(char *str)
5441da177e4SLinus Torvalds {
5451da177e4SLinus Torvalds 	pfault_disable = 1;
5461da177e4SLinus Torvalds 	return 1;
5471da177e4SLinus Torvalds }
5481da177e4SLinus Torvalds 
5491da177e4SLinus Torvalds __setup("nopfault", nopfault);
5501da177e4SLinus Torvalds 
5517dd8fe1fSHeiko Carstens struct pfault_refbk {
5527dd8fe1fSHeiko Carstens 	u16 refdiagc;
5537dd8fe1fSHeiko Carstens 	u16 reffcode;
5547dd8fe1fSHeiko Carstens 	u16 refdwlen;
5557dd8fe1fSHeiko Carstens 	u16 refversn;
5567dd8fe1fSHeiko Carstens 	u64 refgaddr;
5577dd8fe1fSHeiko Carstens 	u64 refselmk;
5587dd8fe1fSHeiko Carstens 	u64 refcmpmk;
5597dd8fe1fSHeiko Carstens 	u64 reserved;
5607dd8fe1fSHeiko Carstens } __attribute__ ((packed, aligned(8)));
5611da177e4SLinus Torvalds 
56200e9e664SMartin Schwidefsky static struct pfault_refbk pfault_init_refbk = {
5637dd8fe1fSHeiko Carstens 	.refdiagc = 0x258,
5647dd8fe1fSHeiko Carstens 	.reffcode = 0,
5657dd8fe1fSHeiko Carstens 	.refdwlen = 5,
5667dd8fe1fSHeiko Carstens 	.refversn = 2,
567e22cf8caSChristian Borntraeger 	.refgaddr = __LC_LPP,
5687dd8fe1fSHeiko Carstens 	.refselmk = 1ULL << 48,
5697dd8fe1fSHeiko Carstens 	.refcmpmk = 1ULL << 48,
57000e9e664SMartin Schwidefsky 	.reserved = __PF_RES_FIELD
57100e9e664SMartin Schwidefsky };
57200e9e664SMartin Schwidefsky 
57300e9e664SMartin Schwidefsky int pfault_init(void)
57400e9e664SMartin Schwidefsky {
5751da177e4SLinus Torvalds         int rc;
5761da177e4SLinus Torvalds 
577f32269a0SCarsten Otte 	if (pfault_disable)
5781da177e4SLinus Torvalds 		return -1;
5791ec2772eSMartin Schwidefsky 	diag_stat_inc(DIAG_STAT_X258);
58094c12cc7SMartin Schwidefsky 	asm volatile(
5811da177e4SLinus Torvalds 		"	diag	%1,%0,0x258\n"
5821da177e4SLinus Torvalds 		"0:	j	2f\n"
5831da177e4SLinus Torvalds 		"1:	la	%0,8\n"
5841da177e4SLinus Torvalds 		"2:\n"
58594c12cc7SMartin Schwidefsky 		EX_TABLE(0b,1b)
58600e9e664SMartin Schwidefsky 		: "=d" (rc)
58700e9e664SMartin Schwidefsky 		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
5881da177e4SLinus Torvalds         return rc;
5891da177e4SLinus Torvalds }
5901da177e4SLinus Torvalds 
59100e9e664SMartin Schwidefsky static struct pfault_refbk pfault_fini_refbk = {
5927dd8fe1fSHeiko Carstens 	.refdiagc = 0x258,
5937dd8fe1fSHeiko Carstens 	.reffcode = 1,
5947dd8fe1fSHeiko Carstens 	.refdwlen = 5,
5957dd8fe1fSHeiko Carstens 	.refversn = 2,
5967dd8fe1fSHeiko Carstens };
5971da177e4SLinus Torvalds 
59800e9e664SMartin Schwidefsky void pfault_fini(void)
59900e9e664SMartin Schwidefsky {
60000e9e664SMartin Schwidefsky 
601f32269a0SCarsten Otte 	if (pfault_disable)
6021da177e4SLinus Torvalds 		return;
6031ec2772eSMartin Schwidefsky 	diag_stat_inc(DIAG_STAT_X258);
60494c12cc7SMartin Schwidefsky 	asm volatile(
6051da177e4SLinus Torvalds 		"	diag	%0,0,0x258\n"
6066c22c986SHeiko Carstens 		"0:	nopr	%%r7\n"
60794c12cc7SMartin Schwidefsky 		EX_TABLE(0b,0b)
60800e9e664SMartin Schwidefsky 		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
6091da177e4SLinus Torvalds }
6101da177e4SLinus Torvalds 
611f2db2e6cSHeiko Carstens static DEFINE_SPINLOCK(pfault_lock);
612f2db2e6cSHeiko Carstens static LIST_HEAD(pfault_list);
613f2db2e6cSHeiko Carstens 
6140227f7c4SPeter Zijlstra #define PF_COMPLETE	0x0080
6150227f7c4SPeter Zijlstra 
6160227f7c4SPeter Zijlstra /*
6170227f7c4SPeter Zijlstra  * The mechanism of our pfault code: if Linux is running as guest, runs a user
6180227f7c4SPeter Zijlstra  * space process and the user space process accesses a page that the host has
6190227f7c4SPeter Zijlstra  * paged out we get a pfault interrupt.
6200227f7c4SPeter Zijlstra  *
6210227f7c4SPeter Zijlstra  * This allows us, within the guest, to schedule a different process. Without
6220227f7c4SPeter Zijlstra  * this mechanism the host would have to suspend the whole virtual cpu until
6230227f7c4SPeter Zijlstra  * the page has been paged in.
6240227f7c4SPeter Zijlstra  *
6250227f7c4SPeter Zijlstra  * So when we get such an interrupt then we set the state of the current task
6260227f7c4SPeter Zijlstra  * to uninterruptible and also set the need_resched flag. Both happens within
6270227f7c4SPeter Zijlstra  * interrupt context(!). If we later on want to return to user space we
6280227f7c4SPeter Zijlstra  * recognize the need_resched flag and then call schedule().  It's not very
6290227f7c4SPeter Zijlstra  * obvious how this works...
6300227f7c4SPeter Zijlstra  *
6310227f7c4SPeter Zijlstra  * Of course we have a lot of additional fun with the completion interrupt (->
6320227f7c4SPeter Zijlstra  * host signals that a page of a process has been paged in and the process can
6330227f7c4SPeter Zijlstra  * continue to run). This interrupt can arrive on any cpu and, since we have
6340227f7c4SPeter Zijlstra  * virtual cpus, actually appear before the interrupt that signals that a page
6350227f7c4SPeter Zijlstra  * is missing.
6360227f7c4SPeter Zijlstra  */
637fde15c3aSHeiko Carstens static void pfault_interrupt(struct ext_code ext_code,
638f6649a7eSMartin Schwidefsky 			     unsigned int param32, unsigned long param64)
6391da177e4SLinus Torvalds {
6401da177e4SLinus Torvalds 	struct task_struct *tsk;
6411da177e4SLinus Torvalds 	__u16 subcode;
642f2db2e6cSHeiko Carstens 	pid_t pid;
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds 	/*
6450227f7c4SPeter Zijlstra 	 * Get the external interruption subcode & pfault initial/completion
6460227f7c4SPeter Zijlstra 	 * signal bit. VM stores this in the 'cpu address' field associated
6470227f7c4SPeter Zijlstra 	 * with the external interrupt.
6481da177e4SLinus Torvalds 	 */
649fde15c3aSHeiko Carstens 	subcode = ext_code.subcode;
6501da177e4SLinus Torvalds 	if ((subcode & 0xff00) != __SUBCODE_MASK)
6511da177e4SLinus Torvalds 		return;
652420f42ecSHeiko Carstens 	inc_irq_stat(IRQEXT_PFL);
653f2db2e6cSHeiko Carstens 	/* Get the token (= pid of the affected task). */
654544e8dd7SHendrik Brueckner 	pid = param64 & LPP_PID_MASK;
655f2db2e6cSHeiko Carstens 	rcu_read_lock();
656f2db2e6cSHeiko Carstens 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
657f2db2e6cSHeiko Carstens 	if (tsk)
658f2db2e6cSHeiko Carstens 		get_task_struct(tsk);
659f2db2e6cSHeiko Carstens 	rcu_read_unlock();
660f2db2e6cSHeiko Carstens 	if (!tsk)
661f2db2e6cSHeiko Carstens 		return;
662f2db2e6cSHeiko Carstens 	spin_lock(&pfault_lock);
6630227f7c4SPeter Zijlstra 	if (subcode & PF_COMPLETE) {
6641da177e4SLinus Torvalds 		/* signal bit is set -> a page has been swapped in by VM */
665f2db2e6cSHeiko Carstens 		if (tsk->thread.pfault_wait == 1) {
6661da177e4SLinus Torvalds 			/* Initial interrupt was faster than the completion
6671da177e4SLinus Torvalds 			 * interrupt. pfault_wait is valid. Set pfault_wait
6681da177e4SLinus Torvalds 			 * back to zero and wake up the process. This can
6691da177e4SLinus Torvalds 			 * safely be done because the task is still sleeping
670b6d09449SMartin Schwidefsky 			 * and can't produce new pfaults. */
6711da177e4SLinus Torvalds 			tsk->thread.pfault_wait = 0;
672f2db2e6cSHeiko Carstens 			list_del(&tsk->thread.list);
6731da177e4SLinus Torvalds 			wake_up_process(tsk);
674d5e50a51SHeiko Carstens 			put_task_struct(tsk);
675f2db2e6cSHeiko Carstens 		} else {
676f2db2e6cSHeiko Carstens 			/* Completion interrupt was faster than initial
677f2db2e6cSHeiko Carstens 			 * interrupt. Set pfault_wait to -1 so the initial
678fa2fb2f4SHeiko Carstens 			 * interrupt doesn't put the task to sleep.
679fa2fb2f4SHeiko Carstens 			 * If the task is not running, ignore the completion
680fa2fb2f4SHeiko Carstens 			 * interrupt since it must be a leftover of a PFAULT
681fa2fb2f4SHeiko Carstens 			 * CANCEL operation which didn't remove all pending
682fa2fb2f4SHeiko Carstens 			 * completion interrupts. */
683b03fbd4fSPeter Zijlstra 			if (task_is_running(tsk))
684f2db2e6cSHeiko Carstens 				tsk->thread.pfault_wait = -1;
6851da177e4SLinus Torvalds 		}
6861da177e4SLinus Torvalds 	} else {
6871da177e4SLinus Torvalds 		/* signal bit not set -> a real page is missing. */
688d49f47f8SHeiko Carstens 		if (WARN_ON_ONCE(tsk != current))
689d49f47f8SHeiko Carstens 			goto out;
690d5e50a51SHeiko Carstens 		if (tsk->thread.pfault_wait == 1) {
691d5e50a51SHeiko Carstens 			/* Already on the list with a reference: put to sleep */
6920227f7c4SPeter Zijlstra 			goto block;
693d5e50a51SHeiko Carstens 		} else if (tsk->thread.pfault_wait == -1) {
6941da177e4SLinus Torvalds 			/* Completion interrupt was faster than the initial
695f2db2e6cSHeiko Carstens 			 * interrupt (pfault_wait == -1). Set pfault_wait
696f2db2e6cSHeiko Carstens 			 * back to zero and exit. */
6971da177e4SLinus Torvalds 			tsk->thread.pfault_wait = 0;
698f2db2e6cSHeiko Carstens 		} else {
699f2db2e6cSHeiko Carstens 			/* Initial interrupt arrived before completion
700d5e50a51SHeiko Carstens 			 * interrupt. Let the task sleep.
701d5e50a51SHeiko Carstens 			 * An extra task reference is needed since a different
702d5e50a51SHeiko Carstens 			 * cpu may set the task state to TASK_RUNNING again
703d5e50a51SHeiko Carstens 			 * before the scheduler is reached. */
704d5e50a51SHeiko Carstens 			get_task_struct(tsk);
705f2db2e6cSHeiko Carstens 			tsk->thread.pfault_wait = 1;
706f2db2e6cSHeiko Carstens 			list_add(&tsk->thread.list, &pfault_list);
7070227f7c4SPeter Zijlstra block:
7080227f7c4SPeter Zijlstra 			/* Since this must be a userspace fault, there
7090227f7c4SPeter Zijlstra 			 * is no kernel task state to trample. Rely on the
7100227f7c4SPeter Zijlstra 			 * return to userspace schedule() to block. */
7110227f7c4SPeter Zijlstra 			__set_current_state(TASK_UNINTERRUPTIBLE);
7121da177e4SLinus Torvalds 			set_tsk_need_resched(tsk);
713c360192bSMartin Schwidefsky 			set_preempt_need_resched();
7141da177e4SLinus Torvalds 		}
7151da177e4SLinus Torvalds 	}
716d49f47f8SHeiko Carstens out:
717f2db2e6cSHeiko Carstens 	spin_unlock(&pfault_lock);
71854c27791SHeiko Carstens 	put_task_struct(tsk);
719f2db2e6cSHeiko Carstens }
720f2db2e6cSHeiko Carstens 
72184c9ceefSSebastian Andrzej Siewior static int pfault_cpu_dead(unsigned int cpu)
722f2db2e6cSHeiko Carstens {
723f2db2e6cSHeiko Carstens 	struct thread_struct *thread, *next;
724f2db2e6cSHeiko Carstens 	struct task_struct *tsk;
725f2db2e6cSHeiko Carstens 
726f2db2e6cSHeiko Carstens 	spin_lock_irq(&pfault_lock);
727f2db2e6cSHeiko Carstens 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
728f2db2e6cSHeiko Carstens 		thread->pfault_wait = 0;
729f2db2e6cSHeiko Carstens 		list_del(&thread->list);
730f2db2e6cSHeiko Carstens 		tsk = container_of(thread, struct task_struct, thread);
731f2db2e6cSHeiko Carstens 		wake_up_process(tsk);
732d5e50a51SHeiko Carstens 		put_task_struct(tsk);
733f2db2e6cSHeiko Carstens 	}
734f2db2e6cSHeiko Carstens 	spin_unlock_irq(&pfault_lock);
73584c9ceefSSebastian Andrzej Siewior 	return 0;
736f2db2e6cSHeiko Carstens }
7371da177e4SLinus Torvalds 
738fb0a9d7eSHeiko Carstens static int __init pfault_irq_init(void)
73929b08d2bSHeiko Carstens {
740fb0a9d7eSHeiko Carstens 	int rc;
74129b08d2bSHeiko Carstens 
7421dad093bSThomas Huth 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
7437dd8fe1fSHeiko Carstens 	if (rc)
7447dd8fe1fSHeiko Carstens 		goto out_extint;
7457dd8fe1fSHeiko Carstens 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
7467dd8fe1fSHeiko Carstens 	if (rc)
7477dd8fe1fSHeiko Carstens 		goto out_pfault;
74882003c3eSHeiko Carstens 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
74984c9ceefSSebastian Andrzej Siewior 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
75084c9ceefSSebastian Andrzej Siewior 				  NULL, pfault_cpu_dead);
7517dd8fe1fSHeiko Carstens 	return 0;
7527dd8fe1fSHeiko Carstens 
7537dd8fe1fSHeiko Carstens out_pfault:
7541dad093bSThomas Huth 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
7557dd8fe1fSHeiko Carstens out_extint:
756fb0a9d7eSHeiko Carstens 	pfault_disable = 1;
757fb0a9d7eSHeiko Carstens 	return rc;
758fb0a9d7eSHeiko Carstens }
759fb0a9d7eSHeiko Carstens early_initcall(pfault_irq_init);
760fb0a9d7eSHeiko Carstens 
7617dd8fe1fSHeiko Carstens #endif /* CONFIG_PFAULT */
762084ea4d6SVasily Gorbik 
763084ea4d6SVasily Gorbik #if IS_ENABLED(CONFIG_PGSTE)
76417a363dcSHeiko Carstens 
765084ea4d6SVasily Gorbik void do_secure_storage_access(struct pt_regs *regs)
766084ea4d6SVasily Gorbik {
767084ea4d6SVasily Gorbik 	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
768084ea4d6SVasily Gorbik 	struct vm_area_struct *vma;
769084ea4d6SVasily Gorbik 	struct mm_struct *mm;
770084ea4d6SVasily Gorbik 	struct page *page;
771b108f7f0SClaudio Imbrenda 	struct gmap *gmap;
772084ea4d6SVasily Gorbik 	int rc;
773084ea4d6SVasily Gorbik 
77485b18d7bSJanosch Frank 	/*
77585b18d7bSJanosch Frank 	 * bit 61 tells us if the address is valid, if it's not we
77685b18d7bSJanosch Frank 	 * have a major problem and should stop the kernel or send a
77785b18d7bSJanosch Frank 	 * SIGSEGV to the process. Unfortunately bit 61 is not
77885b18d7bSJanosch Frank 	 * reliable without the misc UV feature so we need to check
77985b18d7bSJanosch Frank 	 * for that as well.
78085b18d7bSJanosch Frank 	 */
78185b18d7bSJanosch Frank 	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
78285b18d7bSJanosch Frank 	    !test_bit_inv(61, &regs->int_parm_long)) {
78385b18d7bSJanosch Frank 		/*
78485b18d7bSJanosch Frank 		 * When this happens, userspace did something that it
78585b18d7bSJanosch Frank 		 * was not supposed to do, e.g. branching into secure
78685b18d7bSJanosch Frank 		 * memory. Trigger a segmentation fault.
78785b18d7bSJanosch Frank 		 */
78885b18d7bSJanosch Frank 		if (user_mode(regs)) {
78985b18d7bSJanosch Frank 			send_sig(SIGSEGV, current, 0);
79085b18d7bSJanosch Frank 			return;
79185b18d7bSJanosch Frank 		}
79285b18d7bSJanosch Frank 
79385b18d7bSJanosch Frank 		/*
79485b18d7bSJanosch Frank 		 * The kernel should never run into this case and we
79585b18d7bSJanosch Frank 		 * have no way out of this situation.
79685b18d7bSJanosch Frank 		 */
79785b18d7bSJanosch Frank 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
79885b18d7bSJanosch Frank 	}
79985b18d7bSJanosch Frank 
800084ea4d6SVasily Gorbik 	switch (get_fault_type(regs)) {
801b108f7f0SClaudio Imbrenda 	case GMAP_FAULT:
802b108f7f0SClaudio Imbrenda 		mm = current->mm;
803b108f7f0SClaudio Imbrenda 		gmap = (struct gmap *)S390_lowcore.gmap;
804b108f7f0SClaudio Imbrenda 		mmap_read_lock(mm);
805b108f7f0SClaudio Imbrenda 		addr = __gmap_translate(gmap, addr);
806b108f7f0SClaudio Imbrenda 		mmap_read_unlock(mm);
807b108f7f0SClaudio Imbrenda 		if (IS_ERR_VALUE(addr)) {
808b108f7f0SClaudio Imbrenda 			do_fault_error(regs, VM_ACCESS_FLAGS, VM_FAULT_BADMAP);
809b108f7f0SClaudio Imbrenda 			break;
810b108f7f0SClaudio Imbrenda 		}
811b108f7f0SClaudio Imbrenda 		fallthrough;
812084ea4d6SVasily Gorbik 	case USER_FAULT:
813084ea4d6SVasily Gorbik 		mm = current->mm;
814d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
815084ea4d6SVasily Gorbik 		vma = find_vma(mm, addr);
816084ea4d6SVasily Gorbik 		if (!vma) {
817d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
818084ea4d6SVasily Gorbik 			do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
819084ea4d6SVasily Gorbik 			break;
820084ea4d6SVasily Gorbik 		}
821084ea4d6SVasily Gorbik 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
822084ea4d6SVasily Gorbik 		if (IS_ERR_OR_NULL(page)) {
823d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
824084ea4d6SVasily Gorbik 			break;
825084ea4d6SVasily Gorbik 		}
826084ea4d6SVasily Gorbik 		if (arch_make_page_accessible(page))
827084ea4d6SVasily Gorbik 			send_sig(SIGSEGV, current, 0);
828084ea4d6SVasily Gorbik 		put_page(page);
829d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
830084ea4d6SVasily Gorbik 		break;
831084ea4d6SVasily Gorbik 	case KERNEL_FAULT:
832084ea4d6SVasily Gorbik 		page = phys_to_page(addr);
833cd1adf1bSLinus Torvalds 		if (unlikely(!try_get_page(page)))
834084ea4d6SVasily Gorbik 			break;
835084ea4d6SVasily Gorbik 		rc = arch_make_page_accessible(page);
836084ea4d6SVasily Gorbik 		put_page(page);
837084ea4d6SVasily Gorbik 		if (rc)
838084ea4d6SVasily Gorbik 			BUG();
839084ea4d6SVasily Gorbik 		break;
840084ea4d6SVasily Gorbik 	default:
841084ea4d6SVasily Gorbik 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
842084ea4d6SVasily Gorbik 		WARN_ON_ONCE(1);
843084ea4d6SVasily Gorbik 	}
844084ea4d6SVasily Gorbik }
845084ea4d6SVasily Gorbik NOKPROBE_SYMBOL(do_secure_storage_access);
846084ea4d6SVasily Gorbik 
847084ea4d6SVasily Gorbik void do_non_secure_storage_access(struct pt_regs *regs)
848084ea4d6SVasily Gorbik {
849084ea4d6SVasily Gorbik 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
850084ea4d6SVasily Gorbik 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
851084ea4d6SVasily Gorbik 
852084ea4d6SVasily Gorbik 	if (get_fault_type(regs) != GMAP_FAULT) {
853084ea4d6SVasily Gorbik 		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
854084ea4d6SVasily Gorbik 		WARN_ON_ONCE(1);
855084ea4d6SVasily Gorbik 		return;
856084ea4d6SVasily Gorbik 	}
857084ea4d6SVasily Gorbik 
858084ea4d6SVasily Gorbik 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
859084ea4d6SVasily Gorbik 		send_sig(SIGSEGV, current, 0);
860084ea4d6SVasily Gorbik }
861084ea4d6SVasily Gorbik NOKPROBE_SYMBOL(do_non_secure_storage_access);
862084ea4d6SVasily Gorbik 
863cd4d3d5fSJanosch Frank void do_secure_storage_violation(struct pt_regs *regs)
864cd4d3d5fSJanosch Frank {
865a52c2584SClaudio Imbrenda 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
866a52c2584SClaudio Imbrenda 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
867a52c2584SClaudio Imbrenda 
868a52c2584SClaudio Imbrenda 	/*
869a52c2584SClaudio Imbrenda 	 * If the VM has been rebooted, its address space might still contain
870a52c2584SClaudio Imbrenda 	 * secure pages from the previous boot.
871a52c2584SClaudio Imbrenda 	 * Clear the page so it can be reused.
872a52c2584SClaudio Imbrenda 	 */
873a52c2584SClaudio Imbrenda 	if (!gmap_destroy_page(gmap, gaddr))
874a52c2584SClaudio Imbrenda 		return;
875cd4d3d5fSJanosch Frank 	/*
876cd4d3d5fSJanosch Frank 	 * Either KVM messed up the secure guest mapping or the same
877cd4d3d5fSJanosch Frank 	 * page is mapped into multiple secure guests.
878cd4d3d5fSJanosch Frank 	 *
879cd4d3d5fSJanosch Frank 	 * This exception is only triggered when a guest 2 is running
880cd4d3d5fSJanosch Frank 	 * and can therefore never occur in kernel context.
881cd4d3d5fSJanosch Frank 	 */
882cd4d3d5fSJanosch Frank 	printk_ratelimited(KERN_WARNING
883cd4d3d5fSJanosch Frank 			   "Secure storage violation in task: %s, pid %d\n",
884cd4d3d5fSJanosch Frank 			   current->comm, current->pid);
885cd4d3d5fSJanosch Frank 	send_sig(SIGSEGV, current, 0);
886cd4d3d5fSJanosch Frank }
887cd4d3d5fSJanosch Frank 
88817a363dcSHeiko Carstens #endif /* CONFIG_PGSTE */
889