1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * S390 version 4a53c8fabSHeiko Carstens * Copyright IBM Corp. 1999 51da177e4SLinus Torvalds * Author(s): Hartmut Penner (hp@de.ibm.com) 61da177e4SLinus Torvalds * Ulrich Weigand (uweigand@de.ibm.com) 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Derived from "arch/i386/mm/fault.c" 91da177e4SLinus Torvalds * Copyright (C) 1995 Linus Torvalds 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12052ff461SHeiko Carstens #include <linux/kernel_stat.h> 13cdd6c482SIngo Molnar #include <linux/perf_event.h> 141da177e4SLinus Torvalds #include <linux/signal.h> 151da177e4SLinus Torvalds #include <linux/sched.h> 16b17b0153SIngo Molnar #include <linux/sched/debug.h> 171da177e4SLinus Torvalds #include <linux/kernel.h> 181da177e4SLinus Torvalds #include <linux/errno.h> 191da177e4SLinus Torvalds #include <linux/string.h> 201da177e4SLinus Torvalds #include <linux/types.h> 211da177e4SLinus Torvalds #include <linux/ptrace.h> 221da177e4SLinus Torvalds #include <linux/mman.h> 231da177e4SLinus Torvalds #include <linux/mm.h> 247757591aSHeiko Carstens #include <linux/compat.h> 251da177e4SLinus Torvalds #include <linux/smp.h> 261eeb66a1SChristoph Hellwig #include <linux/kdebug.h> 271da177e4SLinus Torvalds #include <linux/init.h> 281da177e4SLinus Torvalds #include <linux/console.h> 29dcc096c5SPaul Gortmaker #include <linux/extable.h> 301da177e4SLinus Torvalds #include <linux/hardirq.h> 314ba069b8SMichael Grundy #include <linux/kprobes.h> 32be5ec363SMartin Schwidefsky #include <linux/uaccess.h> 3353492b1dSGerald Schaefer #include <linux/hugetlb.h> 34e41ba111SSven Schnelle #include <linux/kfence.h> 35d09a307fSHeiko Carstens #include <asm/asm-extable.h> 36cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 371ec2772eSMartin Schwidefsky #include <asm/diag.h> 381e133ab2SMartin Schwidefsky #include <asm/gmap.h> 39d7b250e2SHeiko Carstens #include <asm/irq.h> 406252d702SMartin Schwidefsky #include <asm/mmu_context.h> 41a0616cdeSDavid Howells #include <asm/facility.h> 42084ea4d6SVasily Gorbik #include <asm/uv.h> 43a806170eSHeiko Carstens #include "../kernel/entry.h" 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds #define __FAIL_ADDR_MASK -4096L 461da177e4SLinus Torvalds #define __SUBCODE_MASK 0x0600 471da177e4SLinus Torvalds #define __PF_RES_FIELD 0x8000000000000000ULL 481da177e4SLinus Torvalds 49d939474bSPeter Xu /* 50d939474bSPeter Xu * Allocate private vm_fault_reason from top. Please make sure it won't 51d939474bSPeter Xu * collide with vm_fault_reason. 52d939474bSPeter Xu */ 53d939474bSPeter Xu #define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000) 54d939474bSPeter Xu #define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000) 55d939474bSPeter Xu #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000) 56d939474bSPeter Xu #define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000) 57d939474bSPeter Xu #define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000) 5850d7280dSMartin Schwidefsky 590aaba41bSMartin Schwidefsky enum fault_type { 600aaba41bSMartin Schwidefsky KERNEL_FAULT, 610aaba41bSMartin Schwidefsky USER_FAULT, 620aaba41bSMartin Schwidefsky GMAP_FAULT, 630aaba41bSMartin Schwidefsky }; 640aaba41bSMartin Schwidefsky 65a4f32bdbSHeiko Carstens static unsigned long store_indication __read_mostly; 6692f842eaSMartin Schwidefsky 67a4f32bdbSHeiko Carstens static int __init fault_init(void) 6892f842eaSMartin Schwidefsky { 69a4f32bdbSHeiko Carstens if (test_facility(75)) 7092f842eaSMartin Schwidefsky store_indication = 0xc00; 71a4f32bdbSHeiko Carstens return 0; 7292f842eaSMartin Schwidefsky } 73a4f32bdbSHeiko Carstens early_initcall(fault_init); 7492f842eaSMartin Schwidefsky 751da177e4SLinus Torvalds /* 760aaba41bSMartin Schwidefsky * Find out which address space caused the exception. 771da177e4SLinus Torvalds */ 78bf2f1eeeSMasahiro Yamada static enum fault_type get_fault_type(struct pt_regs *regs) 791da177e4SLinus Torvalds { 80457f2180SHeiko Carstens unsigned long trans_exc_code; 81457f2180SHeiko Carstens 82457f2180SHeiko Carstens trans_exc_code = regs->int_parm_long & 3; 830aaba41bSMartin Schwidefsky if (likely(trans_exc_code == 0)) { 840aaba41bSMartin Schwidefsky /* primary space exception */ 8587d59863SHeiko Carstens if (user_mode(regs)) 8687d59863SHeiko Carstens return USER_FAULT; 8787d59863SHeiko Carstens if (!IS_ENABLED(CONFIG_PGSTE)) 8887d59863SHeiko Carstens return KERNEL_FAULT; 8987d59863SHeiko Carstens if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) 900aaba41bSMartin Schwidefsky return GMAP_FAULT; 910aaba41bSMartin Schwidefsky return KERNEL_FAULT; 920aaba41bSMartin Schwidefsky } 9387d59863SHeiko Carstens if (trans_exc_code == 2) 940aaba41bSMartin Schwidefsky return USER_FAULT; 95962f0af8SGerald Schaefer if (trans_exc_code == 1) { 96962f0af8SGerald Schaefer /* access register mode, not used in the kernel */ 97962f0af8SGerald Schaefer return USER_FAULT; 98962f0af8SGerald Schaefer } 990aaba41bSMartin Schwidefsky /* home space exception -> access via kernel ASCE */ 1000aaba41bSMartin Schwidefsky return KERNEL_FAULT; 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 103d9c2cf67SHeiko Carstens static unsigned long get_fault_address(struct pt_regs *regs) 104d9c2cf67SHeiko Carstens { 105d9c2cf67SHeiko Carstens unsigned long trans_exc_code = regs->int_parm_long; 106d9c2cf67SHeiko Carstens 107d9c2cf67SHeiko Carstens return trans_exc_code & __FAIL_ADDR_MASK; 108d9c2cf67SHeiko Carstens } 109d9c2cf67SHeiko Carstens 110d9c2cf67SHeiko Carstens static bool fault_is_write(struct pt_regs *regs) 111d9c2cf67SHeiko Carstens { 112d9c2cf67SHeiko Carstens unsigned long trans_exc_code = regs->int_parm_long; 113d9c2cf67SHeiko Carstens 114d9c2cf67SHeiko Carstens return (trans_exc_code & store_indication) == 0x400; 115d9c2cf67SHeiko Carstens } 116d9c2cf67SHeiko Carstens 1173b7df342SHeiko Carstens static int bad_address(void *p) 1183b7df342SHeiko Carstens { 1193b7df342SHeiko Carstens unsigned long dummy; 1203b7df342SHeiko Carstens 12125f12ae4SChristoph Hellwig return get_kernel_nofault(dummy, (unsigned long *)p); 1223b7df342SHeiko Carstens } 1233b7df342SHeiko Carstens 1243b7df342SHeiko Carstens static void dump_pagetable(unsigned long asce, unsigned long address) 1253b7df342SHeiko Carstens { 126fe7b2747SHeiko Carstens unsigned long *table = __va(asce & _ASCE_ORIGIN); 1273b7df342SHeiko Carstens 1283b7df342SHeiko Carstens pr_alert("AS:%016lx ", asce); 1293b7df342SHeiko Carstens switch (asce & _ASCE_TYPE_MASK) { 1303b7df342SHeiko Carstens case _ASCE_TYPE_REGION1: 131f1c1174fSHeiko Carstens table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; 1323b7df342SHeiko Carstens if (bad_address(table)) 1333b7df342SHeiko Carstens goto bad; 1343b7df342SHeiko Carstens pr_cont("R1:%016lx ", *table); 1353b7df342SHeiko Carstens if (*table & _REGION_ENTRY_INVALID) 1363b7df342SHeiko Carstens goto out; 137d2f2949aSHeiko Carstens table = __va(*table & _REGION_ENTRY_ORIGIN); 1382c7749b9SJoe Perches fallthrough; 1393b7df342SHeiko Carstens case _ASCE_TYPE_REGION2: 140f1c1174fSHeiko Carstens table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 1413b7df342SHeiko Carstens if (bad_address(table)) 1423b7df342SHeiko Carstens goto bad; 1433b7df342SHeiko Carstens pr_cont("R2:%016lx ", *table); 1443b7df342SHeiko Carstens if (*table & _REGION_ENTRY_INVALID) 1453b7df342SHeiko Carstens goto out; 146d2f2949aSHeiko Carstens table = __va(*table & _REGION_ENTRY_ORIGIN); 1472c7749b9SJoe Perches fallthrough; 1483b7df342SHeiko Carstens case _ASCE_TYPE_REGION3: 149f1c1174fSHeiko Carstens table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 1503b7df342SHeiko Carstens if (bad_address(table)) 1513b7df342SHeiko Carstens goto bad; 1523b7df342SHeiko Carstens pr_cont("R3:%016lx ", *table); 1533b7df342SHeiko Carstens if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 1543b7df342SHeiko Carstens goto out; 155d2f2949aSHeiko Carstens table = __va(*table & _REGION_ENTRY_ORIGIN); 1562c7749b9SJoe Perches fallthrough; 1573b7df342SHeiko Carstens case _ASCE_TYPE_SEGMENT: 158f1c1174fSHeiko Carstens table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 1593b7df342SHeiko Carstens if (bad_address(table)) 1603b7df342SHeiko Carstens goto bad; 16191c0837eSJoe Perches pr_cont("S:%016lx ", *table); 1623b7df342SHeiko Carstens if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 1633b7df342SHeiko Carstens goto out; 164d2f2949aSHeiko Carstens table = __va(*table & _SEGMENT_ENTRY_ORIGIN); 1653b7df342SHeiko Carstens } 166f1c1174fSHeiko Carstens table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; 1673b7df342SHeiko Carstens if (bad_address(table)) 1683b7df342SHeiko Carstens goto bad; 1693b7df342SHeiko Carstens pr_cont("P:%016lx ", *table); 1703b7df342SHeiko Carstens out: 1713b7df342SHeiko Carstens pr_cont("\n"); 1723b7df342SHeiko Carstens return; 1733b7df342SHeiko Carstens bad: 1743b7df342SHeiko Carstens pr_cont("BAD\n"); 1753b7df342SHeiko Carstens } 1763b7df342SHeiko Carstens 1773b7df342SHeiko Carstens static void dump_fault_info(struct pt_regs *regs) 1783b7df342SHeiko Carstens { 1793b7df342SHeiko Carstens unsigned long asce; 1803b7df342SHeiko Carstens 1815d7eccecSHeiko Carstens pr_alert("Failing address: %016lx TEID: %016lx\n", 1825d7eccecSHeiko Carstens regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); 1833b7df342SHeiko Carstens pr_alert("Fault in "); 1843b7df342SHeiko Carstens switch (regs->int_parm_long & 3) { 1853b7df342SHeiko Carstens case 3: 1863b7df342SHeiko Carstens pr_cont("home space "); 1873b7df342SHeiko Carstens break; 1883b7df342SHeiko Carstens case 2: 1893b7df342SHeiko Carstens pr_cont("secondary space "); 1903b7df342SHeiko Carstens break; 1913b7df342SHeiko Carstens case 1: 1923b7df342SHeiko Carstens pr_cont("access register "); 1933b7df342SHeiko Carstens break; 1943b7df342SHeiko Carstens case 0: 1953b7df342SHeiko Carstens pr_cont("primary space "); 1963b7df342SHeiko Carstens break; 1973b7df342SHeiko Carstens } 1983b7df342SHeiko Carstens pr_cont("mode while using "); 1990aaba41bSMartin Schwidefsky switch (get_fault_type(regs)) { 2000aaba41bSMartin Schwidefsky case USER_FAULT: 2013b7df342SHeiko Carstens asce = S390_lowcore.user_asce; 2023b7df342SHeiko Carstens pr_cont("user "); 2030aaba41bSMartin Schwidefsky break; 2040aaba41bSMartin Schwidefsky case GMAP_FAULT: 2050aaba41bSMartin Schwidefsky asce = ((struct gmap *) S390_lowcore.gmap)->asce; 2060aaba41bSMartin Schwidefsky pr_cont("gmap "); 2070aaba41bSMartin Schwidefsky break; 2080aaba41bSMartin Schwidefsky case KERNEL_FAULT: 2090aaba41bSMartin Schwidefsky asce = S390_lowcore.kernel_asce; 2100aaba41bSMartin Schwidefsky pr_cont("kernel "); 2110aaba41bSMartin Schwidefsky break; 212bf2f1eeeSMasahiro Yamada default: 213bf2f1eeeSMasahiro Yamada unreachable(); 2143b7df342SHeiko Carstens } 2153b7df342SHeiko Carstens pr_cont("ASCE.\n"); 2163b7df342SHeiko Carstens dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); 2173b7df342SHeiko Carstens } 2183b7df342SHeiko Carstens 2195d7eccecSHeiko Carstens int show_unhandled_signals = 1; 2205d7eccecSHeiko Carstens 2215d7eccecSHeiko Carstens void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) 222ab3c68eeSHeiko Carstens { 223ab3c68eeSHeiko Carstens if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 224ab3c68eeSHeiko Carstens return; 225ab3c68eeSHeiko Carstens if (!unhandled_signal(current, signr)) 226ab3c68eeSHeiko Carstens return; 227ab3c68eeSHeiko Carstens if (!printk_ratelimit()) 228ab3c68eeSHeiko Carstens return; 229413d4047SHeiko Carstens printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", 230413d4047SHeiko Carstens regs->int_code & 0xffff, regs->int_code >> 17); 2319cb1ccecSHeiko Carstens print_vma_addr(KERN_CONT "in ", regs->psw.addr); 232aa33c8cbSMartin Schwidefsky printk(KERN_CONT "\n"); 2335d7eccecSHeiko Carstens if (is_mm_fault) 2343b7df342SHeiko Carstens dump_fault_info(regs); 235ab3c68eeSHeiko Carstens show_regs(regs); 236ab3c68eeSHeiko Carstens } 237ab3c68eeSHeiko Carstens 2381da177e4SLinus Torvalds /* 2391da177e4SLinus Torvalds * Send SIGSEGV to task. This is an external routine 2401da177e4SLinus Torvalds * to keep the stack usage of do_page_fault small. 2411da177e4SLinus Torvalds */ 242aa33c8cbSMartin Schwidefsky static noinline void do_sigsegv(struct pt_regs *regs, int si_code) 2431da177e4SLinus Torvalds { 2445d7eccecSHeiko Carstens report_user_fault(regs, SIGSEGV, 1); 2459507a5d0SEric W. Biederman force_sig_fault(SIGSEGV, si_code, 2462e1661d2SEric W. Biederman (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds 249d9c2cf67SHeiko Carstens static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault) 25010c1031fSMartin Schwidefsky { 251d9c2cf67SHeiko Carstens enum fault_type fault_type; 252d9c2cf67SHeiko Carstens unsigned long address; 253d9c2cf67SHeiko Carstens bool is_write; 254d9c2cf67SHeiko Carstens 25546fee16fSHeiko Carstens if (fixup_exception(regs)) 25610c1031fSMartin Schwidefsky return; 257d9c2cf67SHeiko Carstens fault_type = get_fault_type(regs); 258d9c2cf67SHeiko Carstens if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) { 259d9c2cf67SHeiko Carstens address = get_fault_address(regs); 260d9c2cf67SHeiko Carstens is_write = fault_is_write(regs); 261d9c2cf67SHeiko Carstens if (kfence_handle_page_fault(address, is_write, regs)) 262d9c2cf67SHeiko Carstens return; 263d9c2cf67SHeiko Carstens } 26410c1031fSMartin Schwidefsky /* 26510c1031fSMartin Schwidefsky * Oops. The kernel tried to access some bad page. We'll have to 26610c1031fSMartin Schwidefsky * terminate things with extreme prejudice. 26710c1031fSMartin Schwidefsky */ 268d9c2cf67SHeiko Carstens if (fault_type == KERNEL_FAULT) 26910c1031fSMartin Schwidefsky printk(KERN_ALERT "Unable to handle kernel pointer dereference" 2703b7df342SHeiko Carstens " in virtual kernel address space\n"); 27110c1031fSMartin Schwidefsky else 27210c1031fSMartin Schwidefsky printk(KERN_ALERT "Unable to handle kernel paging request" 2733b7df342SHeiko Carstens " in virtual user address space\n"); 2743b7df342SHeiko Carstens dump_fault_info(regs); 275aa33c8cbSMartin Schwidefsky die(regs, "Oops"); 27610c1031fSMartin Schwidefsky } 27710c1031fSMartin Schwidefsky 278aa33c8cbSMartin Schwidefsky static noinline void do_low_address(struct pt_regs *regs) 27910c1031fSMartin Schwidefsky { 28010c1031fSMartin Schwidefsky /* Low-address protection hit in kernel mode means 28110c1031fSMartin Schwidefsky NULL pointer write access in kernel mode. */ 28210c1031fSMartin Schwidefsky if (regs->psw.mask & PSW_MASK_PSTATE) { 28310c1031fSMartin Schwidefsky /* Low-address protection hit in user mode 'cannot happen'. */ 284aa33c8cbSMartin Schwidefsky die (regs, "Low-address protection"); 28510c1031fSMartin Schwidefsky } 28610c1031fSMartin Schwidefsky 287d9c2cf67SHeiko Carstens do_no_context(regs, VM_FAULT_BADACCESS); 28810c1031fSMartin Schwidefsky } 28910c1031fSMartin Schwidefsky 290aa33c8cbSMartin Schwidefsky static noinline void do_sigbus(struct pt_regs *regs) 29110c1031fSMartin Schwidefsky { 29210c1031fSMartin Schwidefsky /* 29310c1031fSMartin Schwidefsky * Send a sigbus, regardless of whether we were in kernel 29410c1031fSMartin Schwidefsky * or user mode. 29510c1031fSMartin Schwidefsky */ 2969507a5d0SEric W. Biederman force_sig_fault(SIGBUS, BUS_ADRERR, 2972e1661d2SEric W. Biederman (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); 29810c1031fSMartin Schwidefsky } 29910c1031fSMartin Schwidefsky 300bf2ce385SHeiko Carstens static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault) 30150d7280dSMartin Schwidefsky { 30250d7280dSMartin Schwidefsky int si_code; 30350d7280dSMartin Schwidefsky 30450d7280dSMartin Schwidefsky switch (fault) { 30550d7280dSMartin Schwidefsky case VM_FAULT_BADACCESS: 30650d7280dSMartin Schwidefsky case VM_FAULT_BADMAP: 30750d7280dSMartin Schwidefsky /* Bad memory access. Check if it is kernel or user space. */ 3087d256175SHeiko Carstens if (user_mode(regs)) { 30950d7280dSMartin Schwidefsky /* User mode accesses just cause a SIGSEGV */ 31050d7280dSMartin Schwidefsky si_code = (fault == VM_FAULT_BADMAP) ? 31150d7280dSMartin Schwidefsky SEGV_MAPERR : SEGV_ACCERR; 312aa33c8cbSMartin Schwidefsky do_sigsegv(regs, si_code); 31357d7f939SMartin Schwidefsky break; 31450d7280dSMartin Schwidefsky } 3152c7749b9SJoe Perches fallthrough; 31650d7280dSMartin Schwidefsky case VM_FAULT_BADCONTEXT: 31724eb3a82SDominik Dingel case VM_FAULT_PFAULT: 318d9c2cf67SHeiko Carstens do_no_context(regs, fault); 31950d7280dSMartin Schwidefsky break; 320f2c76e3bSHeiko Carstens case VM_FAULT_SIGNAL: 321f2c76e3bSHeiko Carstens if (!user_mode(regs)) 322d9c2cf67SHeiko Carstens do_no_context(regs, fault); 323f2c76e3bSHeiko Carstens break; 32450d7280dSMartin Schwidefsky default: /* fault & VM_FAULT_ERROR */ 32599583181SHeiko Carstens if (fault & VM_FAULT_OOM) { 3267d256175SHeiko Carstens if (!user_mode(regs)) 327d9c2cf67SHeiko Carstens do_no_context(regs, fault); 32899583181SHeiko Carstens else 32950d7280dSMartin Schwidefsky pagefault_out_of_memory(); 33033692f27SLinus Torvalds } else if (fault & VM_FAULT_SIGSEGV) { 33133692f27SLinus Torvalds /* Kernel mode? Handle exceptions or die */ 33233692f27SLinus Torvalds if (!user_mode(regs)) 333d9c2cf67SHeiko Carstens do_no_context(regs, fault); 33433692f27SLinus Torvalds else 33533692f27SLinus Torvalds do_sigsegv(regs, SEGV_MAPERR); 33699583181SHeiko Carstens } else if (fault & VM_FAULT_SIGBUS) { 33750d7280dSMartin Schwidefsky /* Kernel mode? Handle exceptions or die */ 3387d256175SHeiko Carstens if (!user_mode(regs)) 339d9c2cf67SHeiko Carstens do_no_context(regs, fault); 34036bf9680SMartin Schwidefsky else 341aa33c8cbSMartin Schwidefsky do_sigbus(regs); 34250d7280dSMartin Schwidefsky } else 34350d7280dSMartin Schwidefsky BUG(); 34450d7280dSMartin Schwidefsky break; 34550d7280dSMartin Schwidefsky } 34650d7280dSMartin Schwidefsky } 34750d7280dSMartin Schwidefsky 3481da177e4SLinus Torvalds /* 3491da177e4SLinus Torvalds * This routine handles page faults. It determines the address, 3501da177e4SLinus Torvalds * and the problem, and then passes it off to one of the appropriate 3511da177e4SLinus Torvalds * routines. 3521da177e4SLinus Torvalds * 35350d7280dSMartin Schwidefsky * interruption code (int_code): 3547904aaa8SHeiko Carstens * 04 Protection -> Write-Protection (suppression) 3551da177e4SLinus Torvalds * 10 Segment translation -> Not present (nullification) 3561da177e4SLinus Torvalds * 11 Page translation -> Not present (nullification) 3571da177e4SLinus Torvalds * 3b Region third trans. -> Not present (nullification) 3581da177e4SLinus Torvalds */ 35950a7ca3cSSouptick Joarder static inline vm_fault_t do_exception(struct pt_regs *regs, int access) 3601da177e4SLinus Torvalds { 36124eb3a82SDominik Dingel struct gmap *gmap; 3621da177e4SLinus Torvalds struct task_struct *tsk; 3631da177e4SLinus Torvalds struct mm_struct *mm; 3641da177e4SLinus Torvalds struct vm_area_struct *vma; 3650aaba41bSMartin Schwidefsky enum fault_type type; 3661da177e4SLinus Torvalds unsigned long address; 36733ce6140SHeiko Carstens unsigned int flags; 36850a7ca3cSSouptick Joarder vm_fault_t fault; 369e41ba111SSven Schnelle bool is_write; 37010c1031fSMartin Schwidefsky 37139efd4ecSMartin Schwidefsky tsk = current; 37239efd4ecSMartin Schwidefsky /* 37339efd4ecSMartin Schwidefsky * The instruction that caused the program check has 37439efd4ecSMartin Schwidefsky * been nullified. Don't signal single step via SIGTRAP. 37539efd4ecSMartin Schwidefsky */ 37656e62a73SSven Schnelle clear_thread_flag(TIF_PER_TRAP); 37739efd4ecSMartin Schwidefsky 378b98cca44SAnshuman Khandual if (kprobe_page_fault(regs, 14)) 37950d7280dSMartin Schwidefsky return 0; 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds mm = tsk->mm; 382d9c2cf67SHeiko Carstens address = get_fault_address(regs); 383d9c2cf67SHeiko Carstens is_write = fault_is_write(regs); 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds /* 3861da177e4SLinus Torvalds * Verify that the fault happened in user space, that 3871da177e4SLinus Torvalds * we are not in an interrupt and that there is a 3881da177e4SLinus Torvalds * user context. 3891da177e4SLinus Torvalds */ 39050d7280dSMartin Schwidefsky fault = VM_FAULT_BADCONTEXT; 3910aaba41bSMartin Schwidefsky type = get_fault_type(regs); 3920aaba41bSMartin Schwidefsky switch (type) { 3930aaba41bSMartin Schwidefsky case KERNEL_FAULT: 39450d7280dSMartin Schwidefsky goto out; 3950aaba41bSMartin Schwidefsky case USER_FAULT: 3960aaba41bSMartin Schwidefsky case GMAP_FAULT: 3970aaba41bSMartin Schwidefsky if (faulthandler_disabled() || !mm) 3980aaba41bSMartin Schwidefsky goto out; 3990aaba41bSMartin Schwidefsky break; 4000aaba41bSMartin Schwidefsky } 4011da177e4SLinus Torvalds 402a8b0ca17SPeter Zijlstra perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 403dde16072SPeter Xu flags = FAULT_FLAG_DEFAULT; 404759496baSJohannes Weiner if (user_mode(regs)) 405759496baSJohannes Weiner flags |= FAULT_FLAG_USER; 40641ac42f1SGerald Schaefer if (is_write) 40741ac42f1SGerald Schaefer access = VM_WRITE; 40841ac42f1SGerald Schaefer if (access == VM_WRITE) 40933ce6140SHeiko Carstens flags |= FAULT_FLAG_WRITE; 410*e06f47a1SHeiko Carstens #ifdef CONFIG_PER_VMA_LOCK 411*e06f47a1SHeiko Carstens if (!(flags & FAULT_FLAG_USER)) 412*e06f47a1SHeiko Carstens goto lock_mmap; 413*e06f47a1SHeiko Carstens vma = lock_vma_under_rcu(mm, address); 414*e06f47a1SHeiko Carstens if (!vma) 415*e06f47a1SHeiko Carstens goto lock_mmap; 416*e06f47a1SHeiko Carstens if (!(vma->vm_flags & access)) { 417*e06f47a1SHeiko Carstens vma_end_read(vma); 418*e06f47a1SHeiko Carstens goto lock_mmap; 419*e06f47a1SHeiko Carstens } 420*e06f47a1SHeiko Carstens fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); 421*e06f47a1SHeiko Carstens vma_end_read(vma); 422*e06f47a1SHeiko Carstens if (!(fault & VM_FAULT_RETRY)) { 423*e06f47a1SHeiko Carstens count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 424*e06f47a1SHeiko Carstens goto out; 425*e06f47a1SHeiko Carstens } 426*e06f47a1SHeiko Carstens count_vm_vma_lock_event(VMA_LOCK_RETRY); 427*e06f47a1SHeiko Carstens /* Quick path to respond to signals */ 428*e06f47a1SHeiko Carstens if (fault_signal_pending(fault, regs)) { 429*e06f47a1SHeiko Carstens fault = VM_FAULT_SIGNAL; 430*e06f47a1SHeiko Carstens goto out; 431*e06f47a1SHeiko Carstens } 432*e06f47a1SHeiko Carstens lock_mmap: 433*e06f47a1SHeiko Carstens #endif /* CONFIG_PER_VMA_LOCK */ 434d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 4351da177e4SLinus Torvalds 4360aaba41bSMartin Schwidefsky gmap = NULL; 4370aaba41bSMartin Schwidefsky if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { 4380aaba41bSMartin Schwidefsky gmap = (struct gmap *) S390_lowcore.gmap; 439527e30b4SMartin Schwidefsky current->thread.gmap_addr = address; 4404be130a0SMartin Schwidefsky current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); 4414a494439SDavid Hildenbrand current->thread.gmap_int_code = regs->int_code & 0xffff; 442527e30b4SMartin Schwidefsky address = __gmap_translate(gmap, address); 443e5992f2eSMartin Schwidefsky if (address == -EFAULT) { 444e5992f2eSMartin Schwidefsky fault = VM_FAULT_BADMAP; 445e5992f2eSMartin Schwidefsky goto out_up; 446e5992f2eSMartin Schwidefsky } 44724eb3a82SDominik Dingel if (gmap->pfault_enabled) 44824eb3a82SDominik Dingel flags |= FAULT_FLAG_RETRY_NOWAIT; 449e5992f2eSMartin Schwidefsky } 450e5992f2eSMartin Schwidefsky 451e5992f2eSMartin Schwidefsky retry: 45250d7280dSMartin Schwidefsky fault = VM_FAULT_BADMAP; 4531da177e4SLinus Torvalds vma = find_vma(mm, address); 4541da177e4SLinus Torvalds if (!vma) 45550d7280dSMartin Schwidefsky goto out_up; 456c1821c2eSGerald Schaefer 45750d7280dSMartin Schwidefsky if (unlikely(vma->vm_start > address)) { 4581da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 45950d7280dSMartin Schwidefsky goto out_up; 4601da177e4SLinus Torvalds if (expand_stack(vma, address)) 46150d7280dSMartin Schwidefsky goto out_up; 46250d7280dSMartin Schwidefsky } 46350d7280dSMartin Schwidefsky 4641da177e4SLinus Torvalds /* 4651da177e4SLinus Torvalds * Ok, we have a good vm_area for this memory access, so 4661da177e4SLinus Torvalds * we can handle it.. 4671da177e4SLinus Torvalds */ 46850d7280dSMartin Schwidefsky fault = VM_FAULT_BADACCESS; 4691ab947deSMartin Schwidefsky if (unlikely(!(vma->vm_flags & access))) 47050d7280dSMartin Schwidefsky goto out_up; 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds /* 4731da177e4SLinus Torvalds * If for any reason at all we couldn't handle the fault, 4741da177e4SLinus Torvalds * make sure we exit gracefully rather than endlessly redo 4751da177e4SLinus Torvalds * the fault. 4761da177e4SLinus Torvalds */ 47735e45f3eSPeter Xu fault = handle_mm_fault(vma, address, flags, regs); 4784ef87322SPeter Xu if (fault_signal_pending(fault, regs)) { 479f2c76e3bSHeiko Carstens fault = VM_FAULT_SIGNAL; 480306d6c49SClaudio Imbrenda if (flags & FAULT_FLAG_RETRY_NOWAIT) 481306d6c49SClaudio Imbrenda goto out_up; 482f2c76e3bSHeiko Carstens goto out; 483f2c76e3bSHeiko Carstens } 484d9272525SPeter Xu 485d9272525SPeter Xu /* The fault is fully completed (including releasing mmap lock) */ 486d9272525SPeter Xu if (fault & VM_FAULT_COMPLETED) { 487d9272525SPeter Xu if (gmap) { 488d9272525SPeter Xu mmap_read_lock(mm); 489d9272525SPeter Xu goto out_gmap; 490d9272525SPeter Xu } 491d9272525SPeter Xu fault = 0; 492d9272525SPeter Xu goto out; 493d9272525SPeter Xu } 494d9272525SPeter Xu 49550d7280dSMartin Schwidefsky if (unlikely(fault & VM_FAULT_ERROR)) 49650d7280dSMartin Schwidefsky goto out_up; 49750d7280dSMartin Schwidefsky 49833ce6140SHeiko Carstens if (fault & VM_FAULT_RETRY) { 4990aaba41bSMartin Schwidefsky if (IS_ENABLED(CONFIG_PGSTE) && gmap && 5000aaba41bSMartin Schwidefsky (flags & FAULT_FLAG_RETRY_NOWAIT)) { 50136ef159fSQi Zheng /* 50236ef159fSQi Zheng * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has 50336ef159fSQi Zheng * not been released 50436ef159fSQi Zheng */ 50524eb3a82SDominik Dingel current->thread.gmap_pfault = 1; 50624eb3a82SDominik Dingel fault = VM_FAULT_PFAULT; 50724eb3a82SDominik Dingel goto out_up; 50824eb3a82SDominik Dingel } 5094064b982SPeter Xu flags &= ~FAULT_FLAG_RETRY_NOWAIT; 51045cac65bSShaohua Li flags |= FAULT_FLAG_TRIED; 511d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 51233ce6140SHeiko Carstens goto retry; 51333ce6140SHeiko Carstens } 514d9272525SPeter Xu out_gmap: 5150aaba41bSMartin Schwidefsky if (IS_ENABLED(CONFIG_PGSTE) && gmap) { 516527e30b4SMartin Schwidefsky address = __gmap_link(gmap, current->thread.gmap_addr, 517527e30b4SMartin Schwidefsky address); 518527e30b4SMartin Schwidefsky if (address == -EFAULT) { 519527e30b4SMartin Schwidefsky fault = VM_FAULT_BADMAP; 520527e30b4SMartin Schwidefsky goto out_up; 521527e30b4SMartin Schwidefsky } 522527e30b4SMartin Schwidefsky if (address == -ENOMEM) { 523527e30b4SMartin Schwidefsky fault = VM_FAULT_OOM; 524527e30b4SMartin Schwidefsky goto out_up; 525527e30b4SMartin Schwidefsky } 526527e30b4SMartin Schwidefsky } 52750d7280dSMartin Schwidefsky fault = 0; 52850d7280dSMartin Schwidefsky out_up: 529d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 53050d7280dSMartin Schwidefsky out: 53150d7280dSMartin Schwidefsky return fault; 5321da177e4SLinus Torvalds } 5331da177e4SLinus Torvalds 5347a5388deSHeiko Carstens void do_protection_exception(struct pt_regs *regs) 5351da177e4SLinus Torvalds { 536aa33c8cbSMartin Schwidefsky unsigned long trans_exc_code; 53750a7ca3cSSouptick Joarder int access; 53850a7ca3cSSouptick Joarder vm_fault_t fault; 53961365e13SMartin Schwidefsky 540aa33c8cbSMartin Schwidefsky trans_exc_code = regs->int_parm_long; 541f752ac4dSMartin Schwidefsky /* 542f752ac4dSMartin Schwidefsky * Protection exceptions are suppressing, decrement psw address. 543f752ac4dSMartin Schwidefsky * The exception to this rule are aborted transactions, for these 544f752ac4dSMartin Schwidefsky * the PSW already points to the correct location. 545f752ac4dSMartin Schwidefsky */ 546f752ac4dSMartin Schwidefsky if (!(regs->int_code & 0x200)) 547aa33c8cbSMartin Schwidefsky regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 54810c1031fSMartin Schwidefsky /* 54910c1031fSMartin Schwidefsky * Check for low-address protection. This needs to be treated 55010c1031fSMartin Schwidefsky * as a special case because the translation exception code 55110c1031fSMartin Schwidefsky * field is not guaranteed to contain valid data in this case. 55210c1031fSMartin Schwidefsky */ 55361365e13SMartin Schwidefsky if (unlikely(!(trans_exc_code & 4))) { 554aa33c8cbSMartin Schwidefsky do_low_address(regs); 55510c1031fSMartin Schwidefsky return; 55610c1031fSMartin Schwidefsky } 55757d7f939SMartin Schwidefsky if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) { 55857d7f939SMartin Schwidefsky regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) | 55957d7f939SMartin Schwidefsky (regs->psw.addr & PAGE_MASK); 56057d7f939SMartin Schwidefsky access = VM_EXEC; 56157d7f939SMartin Schwidefsky fault = VM_FAULT_BADACCESS; 56257d7f939SMartin Schwidefsky } else { 56357d7f939SMartin Schwidefsky access = VM_WRITE; 56457d7f939SMartin Schwidefsky fault = do_exception(regs, access); 56557d7f939SMartin Schwidefsky } 56650d7280dSMartin Schwidefsky if (unlikely(fault)) 567bf2ce385SHeiko Carstens do_fault_error(regs, fault); 5681da177e4SLinus Torvalds } 5697a5388deSHeiko Carstens NOKPROBE_SYMBOL(do_protection_exception); 5701da177e4SLinus Torvalds 5717a5388deSHeiko Carstens void do_dat_exception(struct pt_regs *regs) 5721da177e4SLinus Torvalds { 57350a7ca3cSSouptick Joarder int access; 57450a7ca3cSSouptick Joarder vm_fault_t fault; 57550d7280dSMartin Schwidefsky 5766cb4d9a2SAnshuman Khandual access = VM_ACCESS_FLAGS; 577aa33c8cbSMartin Schwidefsky fault = do_exception(regs, access); 57850d7280dSMartin Schwidefsky if (unlikely(fault)) 579bf2ce385SHeiko Carstens do_fault_error(regs, fault); 5801da177e4SLinus Torvalds } 5817a5388deSHeiko Carstens NOKPROBE_SYMBOL(do_dat_exception); 5821da177e4SLinus Torvalds 5831da177e4SLinus Torvalds #ifdef CONFIG_PFAULT 5841da177e4SLinus Torvalds /* 5851da177e4SLinus Torvalds * 'pfault' pseudo page faults routines. 5861da177e4SLinus Torvalds */ 587fb0a9d7eSHeiko Carstens static int pfault_disable; 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds static int __init nopfault(char *str) 5901da177e4SLinus Torvalds { 5911da177e4SLinus Torvalds pfault_disable = 1; 5921da177e4SLinus Torvalds return 1; 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds __setup("nopfault", nopfault); 5961da177e4SLinus Torvalds 5977dd8fe1fSHeiko Carstens struct pfault_refbk { 5987dd8fe1fSHeiko Carstens u16 refdiagc; 5997dd8fe1fSHeiko Carstens u16 reffcode; 6007dd8fe1fSHeiko Carstens u16 refdwlen; 6017dd8fe1fSHeiko Carstens u16 refversn; 6027dd8fe1fSHeiko Carstens u64 refgaddr; 6037dd8fe1fSHeiko Carstens u64 refselmk; 6047dd8fe1fSHeiko Carstens u64 refcmpmk; 6057dd8fe1fSHeiko Carstens u64 reserved; 6067dd8fe1fSHeiko Carstens } __attribute__ ((packed, aligned(8))); 6071da177e4SLinus Torvalds 60800e9e664SMartin Schwidefsky static struct pfault_refbk pfault_init_refbk = { 6097dd8fe1fSHeiko Carstens .refdiagc = 0x258, 6107dd8fe1fSHeiko Carstens .reffcode = 0, 6117dd8fe1fSHeiko Carstens .refdwlen = 5, 6127dd8fe1fSHeiko Carstens .refversn = 2, 613e22cf8caSChristian Borntraeger .refgaddr = __LC_LPP, 6147dd8fe1fSHeiko Carstens .refselmk = 1ULL << 48, 6157dd8fe1fSHeiko Carstens .refcmpmk = 1ULL << 48, 61600e9e664SMartin Schwidefsky .reserved = __PF_RES_FIELD 61700e9e664SMartin Schwidefsky }; 61800e9e664SMartin Schwidefsky 61900e9e664SMartin Schwidefsky int pfault_init(void) 62000e9e664SMartin Schwidefsky { 6211da177e4SLinus Torvalds int rc; 6221da177e4SLinus Torvalds 623f32269a0SCarsten Otte if (pfault_disable) 6241da177e4SLinus Torvalds return -1; 6251ec2772eSMartin Schwidefsky diag_stat_inc(DIAG_STAT_X258); 62694c12cc7SMartin Schwidefsky asm volatile( 6271da177e4SLinus Torvalds " diag %1,%0,0x258\n" 6281da177e4SLinus Torvalds "0: j 2f\n" 6291da177e4SLinus Torvalds "1: la %0,8\n" 6301da177e4SLinus Torvalds "2:\n" 63194c12cc7SMartin Schwidefsky EX_TABLE(0b,1b) 63200e9e664SMartin Schwidefsky : "=d" (rc) 63300e9e664SMartin Schwidefsky : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc"); 6341da177e4SLinus Torvalds return rc; 6351da177e4SLinus Torvalds } 6361da177e4SLinus Torvalds 63700e9e664SMartin Schwidefsky static struct pfault_refbk pfault_fini_refbk = { 6387dd8fe1fSHeiko Carstens .refdiagc = 0x258, 6397dd8fe1fSHeiko Carstens .reffcode = 1, 6407dd8fe1fSHeiko Carstens .refdwlen = 5, 6417dd8fe1fSHeiko Carstens .refversn = 2, 6427dd8fe1fSHeiko Carstens }; 6431da177e4SLinus Torvalds 64400e9e664SMartin Schwidefsky void pfault_fini(void) 64500e9e664SMartin Schwidefsky { 64600e9e664SMartin Schwidefsky 647f32269a0SCarsten Otte if (pfault_disable) 6481da177e4SLinus Torvalds return; 6491ec2772eSMartin Schwidefsky diag_stat_inc(DIAG_STAT_X258); 65094c12cc7SMartin Schwidefsky asm volatile( 6511da177e4SLinus Torvalds " diag %0,0,0x258\n" 6526c22c986SHeiko Carstens "0: nopr %%r7\n" 65394c12cc7SMartin Schwidefsky EX_TABLE(0b,0b) 65400e9e664SMartin Schwidefsky : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc"); 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 657f2db2e6cSHeiko Carstens static DEFINE_SPINLOCK(pfault_lock); 658f2db2e6cSHeiko Carstens static LIST_HEAD(pfault_list); 659f2db2e6cSHeiko Carstens 6600227f7c4SPeter Zijlstra #define PF_COMPLETE 0x0080 6610227f7c4SPeter Zijlstra 6620227f7c4SPeter Zijlstra /* 6630227f7c4SPeter Zijlstra * The mechanism of our pfault code: if Linux is running as guest, runs a user 6640227f7c4SPeter Zijlstra * space process and the user space process accesses a page that the host has 6650227f7c4SPeter Zijlstra * paged out we get a pfault interrupt. 6660227f7c4SPeter Zijlstra * 6670227f7c4SPeter Zijlstra * This allows us, within the guest, to schedule a different process. Without 6680227f7c4SPeter Zijlstra * this mechanism the host would have to suspend the whole virtual cpu until 6690227f7c4SPeter Zijlstra * the page has been paged in. 6700227f7c4SPeter Zijlstra * 6710227f7c4SPeter Zijlstra * So when we get such an interrupt then we set the state of the current task 6720227f7c4SPeter Zijlstra * to uninterruptible and also set the need_resched flag. Both happens within 6730227f7c4SPeter Zijlstra * interrupt context(!). If we later on want to return to user space we 6740227f7c4SPeter Zijlstra * recognize the need_resched flag and then call schedule(). It's not very 6750227f7c4SPeter Zijlstra * obvious how this works... 6760227f7c4SPeter Zijlstra * 6770227f7c4SPeter Zijlstra * Of course we have a lot of additional fun with the completion interrupt (-> 6780227f7c4SPeter Zijlstra * host signals that a page of a process has been paged in and the process can 6790227f7c4SPeter Zijlstra * continue to run). This interrupt can arrive on any cpu and, since we have 6800227f7c4SPeter Zijlstra * virtual cpus, actually appear before the interrupt that signals that a page 6810227f7c4SPeter Zijlstra * is missing. 6820227f7c4SPeter Zijlstra */ 683fde15c3aSHeiko Carstens static void pfault_interrupt(struct ext_code ext_code, 684f6649a7eSMartin Schwidefsky unsigned int param32, unsigned long param64) 6851da177e4SLinus Torvalds { 6861da177e4SLinus Torvalds struct task_struct *tsk; 6871da177e4SLinus Torvalds __u16 subcode; 688f2db2e6cSHeiko Carstens pid_t pid; 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds /* 6910227f7c4SPeter Zijlstra * Get the external interruption subcode & pfault initial/completion 6920227f7c4SPeter Zijlstra * signal bit. VM stores this in the 'cpu address' field associated 6930227f7c4SPeter Zijlstra * with the external interrupt. 6941da177e4SLinus Torvalds */ 695fde15c3aSHeiko Carstens subcode = ext_code.subcode; 6961da177e4SLinus Torvalds if ((subcode & 0xff00) != __SUBCODE_MASK) 6971da177e4SLinus Torvalds return; 698420f42ecSHeiko Carstens inc_irq_stat(IRQEXT_PFL); 699f2db2e6cSHeiko Carstens /* Get the token (= pid of the affected task). */ 700544e8dd7SHendrik Brueckner pid = param64 & LPP_PID_MASK; 701f2db2e6cSHeiko Carstens rcu_read_lock(); 702f2db2e6cSHeiko Carstens tsk = find_task_by_pid_ns(pid, &init_pid_ns); 703f2db2e6cSHeiko Carstens if (tsk) 704f2db2e6cSHeiko Carstens get_task_struct(tsk); 705f2db2e6cSHeiko Carstens rcu_read_unlock(); 706f2db2e6cSHeiko Carstens if (!tsk) 707f2db2e6cSHeiko Carstens return; 708f2db2e6cSHeiko Carstens spin_lock(&pfault_lock); 7090227f7c4SPeter Zijlstra if (subcode & PF_COMPLETE) { 7101da177e4SLinus Torvalds /* signal bit is set -> a page has been swapped in by VM */ 711f2db2e6cSHeiko Carstens if (tsk->thread.pfault_wait == 1) { 7121da177e4SLinus Torvalds /* Initial interrupt was faster than the completion 7131da177e4SLinus Torvalds * interrupt. pfault_wait is valid. Set pfault_wait 7141da177e4SLinus Torvalds * back to zero and wake up the process. This can 7151da177e4SLinus Torvalds * safely be done because the task is still sleeping 716b6d09449SMartin Schwidefsky * and can't produce new pfaults. */ 7171da177e4SLinus Torvalds tsk->thread.pfault_wait = 0; 718f2db2e6cSHeiko Carstens list_del(&tsk->thread.list); 7191da177e4SLinus Torvalds wake_up_process(tsk); 720d5e50a51SHeiko Carstens put_task_struct(tsk); 721f2db2e6cSHeiko Carstens } else { 722f2db2e6cSHeiko Carstens /* Completion interrupt was faster than initial 723f2db2e6cSHeiko Carstens * interrupt. Set pfault_wait to -1 so the initial 724fa2fb2f4SHeiko Carstens * interrupt doesn't put the task to sleep. 725fa2fb2f4SHeiko Carstens * If the task is not running, ignore the completion 726fa2fb2f4SHeiko Carstens * interrupt since it must be a leftover of a PFAULT 727fa2fb2f4SHeiko Carstens * CANCEL operation which didn't remove all pending 728fa2fb2f4SHeiko Carstens * completion interrupts. */ 729b03fbd4fSPeter Zijlstra if (task_is_running(tsk)) 730f2db2e6cSHeiko Carstens tsk->thread.pfault_wait = -1; 7311da177e4SLinus Torvalds } 7321da177e4SLinus Torvalds } else { 7331da177e4SLinus Torvalds /* signal bit not set -> a real page is missing. */ 734d49f47f8SHeiko Carstens if (WARN_ON_ONCE(tsk != current)) 735d49f47f8SHeiko Carstens goto out; 736d5e50a51SHeiko Carstens if (tsk->thread.pfault_wait == 1) { 737d5e50a51SHeiko Carstens /* Already on the list with a reference: put to sleep */ 7380227f7c4SPeter Zijlstra goto block; 739d5e50a51SHeiko Carstens } else if (tsk->thread.pfault_wait == -1) { 7401da177e4SLinus Torvalds /* Completion interrupt was faster than the initial 741f2db2e6cSHeiko Carstens * interrupt (pfault_wait == -1). Set pfault_wait 742f2db2e6cSHeiko Carstens * back to zero and exit. */ 7431da177e4SLinus Torvalds tsk->thread.pfault_wait = 0; 744f2db2e6cSHeiko Carstens } else { 745f2db2e6cSHeiko Carstens /* Initial interrupt arrived before completion 746d5e50a51SHeiko Carstens * interrupt. Let the task sleep. 747d5e50a51SHeiko Carstens * An extra task reference is needed since a different 748d5e50a51SHeiko Carstens * cpu may set the task state to TASK_RUNNING again 749d5e50a51SHeiko Carstens * before the scheduler is reached. */ 750d5e50a51SHeiko Carstens get_task_struct(tsk); 751f2db2e6cSHeiko Carstens tsk->thread.pfault_wait = 1; 752f2db2e6cSHeiko Carstens list_add(&tsk->thread.list, &pfault_list); 7530227f7c4SPeter Zijlstra block: 7540227f7c4SPeter Zijlstra /* Since this must be a userspace fault, there 7550227f7c4SPeter Zijlstra * is no kernel task state to trample. Rely on the 7560227f7c4SPeter Zijlstra * return to userspace schedule() to block. */ 7570227f7c4SPeter Zijlstra __set_current_state(TASK_UNINTERRUPTIBLE); 7581da177e4SLinus Torvalds set_tsk_need_resched(tsk); 759c360192bSMartin Schwidefsky set_preempt_need_resched(); 7601da177e4SLinus Torvalds } 7611da177e4SLinus Torvalds } 762d49f47f8SHeiko Carstens out: 763f2db2e6cSHeiko Carstens spin_unlock(&pfault_lock); 76454c27791SHeiko Carstens put_task_struct(tsk); 765f2db2e6cSHeiko Carstens } 766f2db2e6cSHeiko Carstens 76784c9ceefSSebastian Andrzej Siewior static int pfault_cpu_dead(unsigned int cpu) 768f2db2e6cSHeiko Carstens { 769f2db2e6cSHeiko Carstens struct thread_struct *thread, *next; 770f2db2e6cSHeiko Carstens struct task_struct *tsk; 771f2db2e6cSHeiko Carstens 772f2db2e6cSHeiko Carstens spin_lock_irq(&pfault_lock); 773f2db2e6cSHeiko Carstens list_for_each_entry_safe(thread, next, &pfault_list, list) { 774f2db2e6cSHeiko Carstens thread->pfault_wait = 0; 775f2db2e6cSHeiko Carstens list_del(&thread->list); 776f2db2e6cSHeiko Carstens tsk = container_of(thread, struct task_struct, thread); 777f2db2e6cSHeiko Carstens wake_up_process(tsk); 778d5e50a51SHeiko Carstens put_task_struct(tsk); 779f2db2e6cSHeiko Carstens } 780f2db2e6cSHeiko Carstens spin_unlock_irq(&pfault_lock); 78184c9ceefSSebastian Andrzej Siewior return 0; 782f2db2e6cSHeiko Carstens } 7831da177e4SLinus Torvalds 784fb0a9d7eSHeiko Carstens static int __init pfault_irq_init(void) 78529b08d2bSHeiko Carstens { 786fb0a9d7eSHeiko Carstens int rc; 78729b08d2bSHeiko Carstens 7881dad093bSThomas Huth rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 7897dd8fe1fSHeiko Carstens if (rc) 7907dd8fe1fSHeiko Carstens goto out_extint; 7917dd8fe1fSHeiko Carstens rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 7927dd8fe1fSHeiko Carstens if (rc) 7937dd8fe1fSHeiko Carstens goto out_pfault; 79482003c3eSHeiko Carstens irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 79584c9ceefSSebastian Andrzej Siewior cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead", 79684c9ceefSSebastian Andrzej Siewior NULL, pfault_cpu_dead); 7977dd8fe1fSHeiko Carstens return 0; 7987dd8fe1fSHeiko Carstens 7997dd8fe1fSHeiko Carstens out_pfault: 8001dad093bSThomas Huth unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 8017dd8fe1fSHeiko Carstens out_extint: 802fb0a9d7eSHeiko Carstens pfault_disable = 1; 803fb0a9d7eSHeiko Carstens return rc; 804fb0a9d7eSHeiko Carstens } 805fb0a9d7eSHeiko Carstens early_initcall(pfault_irq_init); 806fb0a9d7eSHeiko Carstens 8077dd8fe1fSHeiko Carstens #endif /* CONFIG_PFAULT */ 808084ea4d6SVasily Gorbik 809084ea4d6SVasily Gorbik #if IS_ENABLED(CONFIG_PGSTE) 81017a363dcSHeiko Carstens 811084ea4d6SVasily Gorbik void do_secure_storage_access(struct pt_regs *regs) 812084ea4d6SVasily Gorbik { 813084ea4d6SVasily Gorbik unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; 814084ea4d6SVasily Gorbik struct vm_area_struct *vma; 815084ea4d6SVasily Gorbik struct mm_struct *mm; 816084ea4d6SVasily Gorbik struct page *page; 817b108f7f0SClaudio Imbrenda struct gmap *gmap; 818084ea4d6SVasily Gorbik int rc; 819084ea4d6SVasily Gorbik 82085b18d7bSJanosch Frank /* 82185b18d7bSJanosch Frank * bit 61 tells us if the address is valid, if it's not we 82285b18d7bSJanosch Frank * have a major problem and should stop the kernel or send a 82385b18d7bSJanosch Frank * SIGSEGV to the process. Unfortunately bit 61 is not 82485b18d7bSJanosch Frank * reliable without the misc UV feature so we need to check 82585b18d7bSJanosch Frank * for that as well. 82685b18d7bSJanosch Frank */ 82785b18d7bSJanosch Frank if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) && 82885b18d7bSJanosch Frank !test_bit_inv(61, ®s->int_parm_long)) { 82985b18d7bSJanosch Frank /* 83085b18d7bSJanosch Frank * When this happens, userspace did something that it 83185b18d7bSJanosch Frank * was not supposed to do, e.g. branching into secure 83285b18d7bSJanosch Frank * memory. Trigger a segmentation fault. 83385b18d7bSJanosch Frank */ 83485b18d7bSJanosch Frank if (user_mode(regs)) { 83585b18d7bSJanosch Frank send_sig(SIGSEGV, current, 0); 83685b18d7bSJanosch Frank return; 83785b18d7bSJanosch Frank } 83885b18d7bSJanosch Frank 83985b18d7bSJanosch Frank /* 84085b18d7bSJanosch Frank * The kernel should never run into this case and we 84185b18d7bSJanosch Frank * have no way out of this situation. 84285b18d7bSJanosch Frank */ 84385b18d7bSJanosch Frank panic("Unexpected PGM 0x3d with TEID bit 61=0"); 84485b18d7bSJanosch Frank } 84585b18d7bSJanosch Frank 846084ea4d6SVasily Gorbik switch (get_fault_type(regs)) { 847b108f7f0SClaudio Imbrenda case GMAP_FAULT: 848b108f7f0SClaudio Imbrenda mm = current->mm; 849b108f7f0SClaudio Imbrenda gmap = (struct gmap *)S390_lowcore.gmap; 850b108f7f0SClaudio Imbrenda mmap_read_lock(mm); 851b108f7f0SClaudio Imbrenda addr = __gmap_translate(gmap, addr); 852b108f7f0SClaudio Imbrenda mmap_read_unlock(mm); 853b108f7f0SClaudio Imbrenda if (IS_ERR_VALUE(addr)) { 854bf2ce385SHeiko Carstens do_fault_error(regs, VM_FAULT_BADMAP); 855b108f7f0SClaudio Imbrenda break; 856b108f7f0SClaudio Imbrenda } 857b108f7f0SClaudio Imbrenda fallthrough; 858084ea4d6SVasily Gorbik case USER_FAULT: 859084ea4d6SVasily Gorbik mm = current->mm; 860d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 861084ea4d6SVasily Gorbik vma = find_vma(mm, addr); 862084ea4d6SVasily Gorbik if (!vma) { 863d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 864bf2ce385SHeiko Carstens do_fault_error(regs, VM_FAULT_BADMAP); 865084ea4d6SVasily Gorbik break; 866084ea4d6SVasily Gorbik } 867084ea4d6SVasily Gorbik page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); 868084ea4d6SVasily Gorbik if (IS_ERR_OR_NULL(page)) { 869d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 870084ea4d6SVasily Gorbik break; 871084ea4d6SVasily Gorbik } 872084ea4d6SVasily Gorbik if (arch_make_page_accessible(page)) 873084ea4d6SVasily Gorbik send_sig(SIGSEGV, current, 0); 874084ea4d6SVasily Gorbik put_page(page); 875d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 876084ea4d6SVasily Gorbik break; 877084ea4d6SVasily Gorbik case KERNEL_FAULT: 878084ea4d6SVasily Gorbik page = phys_to_page(addr); 879cd1adf1bSLinus Torvalds if (unlikely(!try_get_page(page))) 880084ea4d6SVasily Gorbik break; 881084ea4d6SVasily Gorbik rc = arch_make_page_accessible(page); 882084ea4d6SVasily Gorbik put_page(page); 883084ea4d6SVasily Gorbik if (rc) 884084ea4d6SVasily Gorbik BUG(); 885084ea4d6SVasily Gorbik break; 886084ea4d6SVasily Gorbik default: 887bf2ce385SHeiko Carstens do_fault_error(regs, VM_FAULT_BADMAP); 888084ea4d6SVasily Gorbik WARN_ON_ONCE(1); 889084ea4d6SVasily Gorbik } 890084ea4d6SVasily Gorbik } 891084ea4d6SVasily Gorbik NOKPROBE_SYMBOL(do_secure_storage_access); 892084ea4d6SVasily Gorbik 893084ea4d6SVasily Gorbik void do_non_secure_storage_access(struct pt_regs *regs) 894084ea4d6SVasily Gorbik { 895084ea4d6SVasily Gorbik unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; 896084ea4d6SVasily Gorbik struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 897084ea4d6SVasily Gorbik 898084ea4d6SVasily Gorbik if (get_fault_type(regs) != GMAP_FAULT) { 899bf2ce385SHeiko Carstens do_fault_error(regs, VM_FAULT_BADMAP); 900084ea4d6SVasily Gorbik WARN_ON_ONCE(1); 901084ea4d6SVasily Gorbik return; 902084ea4d6SVasily Gorbik } 903084ea4d6SVasily Gorbik 904084ea4d6SVasily Gorbik if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL) 905084ea4d6SVasily Gorbik send_sig(SIGSEGV, current, 0); 906084ea4d6SVasily Gorbik } 907084ea4d6SVasily Gorbik NOKPROBE_SYMBOL(do_non_secure_storage_access); 908084ea4d6SVasily Gorbik 909cd4d3d5fSJanosch Frank void do_secure_storage_violation(struct pt_regs *regs) 910cd4d3d5fSJanosch Frank { 911a52c2584SClaudio Imbrenda unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; 912a52c2584SClaudio Imbrenda struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 913a52c2584SClaudio Imbrenda 914a52c2584SClaudio Imbrenda /* 915a52c2584SClaudio Imbrenda * If the VM has been rebooted, its address space might still contain 916a52c2584SClaudio Imbrenda * secure pages from the previous boot. 917a52c2584SClaudio Imbrenda * Clear the page so it can be reused. 918a52c2584SClaudio Imbrenda */ 919a52c2584SClaudio Imbrenda if (!gmap_destroy_page(gmap, gaddr)) 920a52c2584SClaudio Imbrenda return; 921cd4d3d5fSJanosch Frank /* 922cd4d3d5fSJanosch Frank * Either KVM messed up the secure guest mapping or the same 923cd4d3d5fSJanosch Frank * page is mapped into multiple secure guests. 924cd4d3d5fSJanosch Frank * 925cd4d3d5fSJanosch Frank * This exception is only triggered when a guest 2 is running 926cd4d3d5fSJanosch Frank * and can therefore never occur in kernel context. 927cd4d3d5fSJanosch Frank */ 928cd4d3d5fSJanosch Frank printk_ratelimited(KERN_WARNING 929cd4d3d5fSJanosch Frank "Secure storage violation in task: %s, pid %d\n", 930cd4d3d5fSJanosch Frank current->comm, current->pid); 931cd4d3d5fSJanosch Frank send_sig(SIGSEGV, current, 0); 932cd4d3d5fSJanosch Frank } 933cd4d3d5fSJanosch Frank 93417a363dcSHeiko Carstens #endif /* CONFIG_PGSTE */ 935