xref: /openbmc/linux/arch/s390/mm/fault.c (revision 35e6bcd1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (uweigand@de.ibm.com)
7  *
8  *  Derived from "arch/i386/mm/fault.c"
9  *    Copyright (C) 1995  Linus Torvalds
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-extable.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/diag.h>
38 #include <asm/gmap.h>
39 #include <asm/irq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/facility.h>
42 #include <asm/uv.h>
43 #include "../kernel/entry.h"
44 
45 #define __FAIL_ADDR_MASK -4096L
46 #define __SUBCODE_MASK 0x0600
47 #define __PF_RES_FIELD 0x8000000000000000ULL
48 
49 /*
50  * Allocate private vm_fault_reason from top.  Please make sure it won't
51  * collide with vm_fault_reason.
52  */
53 #define VM_FAULT_BADCONTEXT	((__force vm_fault_t)0x80000000)
54 #define VM_FAULT_BADMAP		((__force vm_fault_t)0x40000000)
55 #define VM_FAULT_BADACCESS	((__force vm_fault_t)0x20000000)
56 #define VM_FAULT_SIGNAL		((__force vm_fault_t)0x10000000)
57 #define VM_FAULT_PFAULT		((__force vm_fault_t)0x8000000)
58 
59 enum fault_type {
60 	KERNEL_FAULT,
61 	USER_FAULT,
62 	GMAP_FAULT,
63 };
64 
65 static unsigned long store_indication __read_mostly;
66 
67 static int __init fault_init(void)
68 {
69 	if (test_facility(75))
70 		store_indication = 0xc00;
71 	return 0;
72 }
73 early_initcall(fault_init);
74 
75 /*
76  * Find out which address space caused the exception.
77  */
78 static enum fault_type get_fault_type(struct pt_regs *regs)
79 {
80 	unsigned long trans_exc_code;
81 
82 	trans_exc_code = regs->int_parm_long & 3;
83 	if (likely(trans_exc_code == 0)) {
84 		/* primary space exception */
85 		if (user_mode(regs))
86 			return USER_FAULT;
87 		if (!IS_ENABLED(CONFIG_PGSTE))
88 			return KERNEL_FAULT;
89 		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
90 			return GMAP_FAULT;
91 		return KERNEL_FAULT;
92 	}
93 	if (trans_exc_code == 2)
94 		return USER_FAULT;
95 	if (trans_exc_code == 1) {
96 		/* access register mode, not used in the kernel */
97 		return USER_FAULT;
98 	}
99 	/* home space exception -> access via kernel ASCE */
100 	return KERNEL_FAULT;
101 }
102 
103 static unsigned long get_fault_address(struct pt_regs *regs)
104 {
105 	unsigned long trans_exc_code = regs->int_parm_long;
106 
107 	return trans_exc_code & __FAIL_ADDR_MASK;
108 }
109 
110 static bool fault_is_write(struct pt_regs *regs)
111 {
112 	unsigned long trans_exc_code = regs->int_parm_long;
113 
114 	return (trans_exc_code & store_indication) == 0x400;
115 }
116 
117 static int bad_address(void *p)
118 {
119 	unsigned long dummy;
120 
121 	return get_kernel_nofault(dummy, (unsigned long *)p);
122 }
123 
124 static void dump_pagetable(unsigned long asce, unsigned long address)
125 {
126 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
127 
128 	pr_alert("AS:%016lx ", asce);
129 	switch (asce & _ASCE_TYPE_MASK) {
130 	case _ASCE_TYPE_REGION1:
131 		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
132 		if (bad_address(table))
133 			goto bad;
134 		pr_cont("R1:%016lx ", *table);
135 		if (*table & _REGION_ENTRY_INVALID)
136 			goto out;
137 		table = __va(*table & _REGION_ENTRY_ORIGIN);
138 		fallthrough;
139 	case _ASCE_TYPE_REGION2:
140 		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
141 		if (bad_address(table))
142 			goto bad;
143 		pr_cont("R2:%016lx ", *table);
144 		if (*table & _REGION_ENTRY_INVALID)
145 			goto out;
146 		table = __va(*table & _REGION_ENTRY_ORIGIN);
147 		fallthrough;
148 	case _ASCE_TYPE_REGION3:
149 		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
150 		if (bad_address(table))
151 			goto bad;
152 		pr_cont("R3:%016lx ", *table);
153 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
154 			goto out;
155 		table = __va(*table & _REGION_ENTRY_ORIGIN);
156 		fallthrough;
157 	case _ASCE_TYPE_SEGMENT:
158 		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
159 		if (bad_address(table))
160 			goto bad;
161 		pr_cont("S:%016lx ", *table);
162 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
163 			goto out;
164 		table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
165 	}
166 	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
167 	if (bad_address(table))
168 		goto bad;
169 	pr_cont("P:%016lx ", *table);
170 out:
171 	pr_cont("\n");
172 	return;
173 bad:
174 	pr_cont("BAD\n");
175 }
176 
177 static void dump_fault_info(struct pt_regs *regs)
178 {
179 	unsigned long asce;
180 
181 	pr_alert("Failing address: %016lx TEID: %016lx\n",
182 		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
183 	pr_alert("Fault in ");
184 	switch (regs->int_parm_long & 3) {
185 	case 3:
186 		pr_cont("home space ");
187 		break;
188 	case 2:
189 		pr_cont("secondary space ");
190 		break;
191 	case 1:
192 		pr_cont("access register ");
193 		break;
194 	case 0:
195 		pr_cont("primary space ");
196 		break;
197 	}
198 	pr_cont("mode while using ");
199 	switch (get_fault_type(regs)) {
200 	case USER_FAULT:
201 		asce = S390_lowcore.user_asce;
202 		pr_cont("user ");
203 		break;
204 	case GMAP_FAULT:
205 		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
206 		pr_cont("gmap ");
207 		break;
208 	case KERNEL_FAULT:
209 		asce = S390_lowcore.kernel_asce;
210 		pr_cont("kernel ");
211 		break;
212 	default:
213 		unreachable();
214 	}
215 	pr_cont("ASCE.\n");
216 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
217 }
218 
219 int show_unhandled_signals = 1;
220 
221 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
222 {
223 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
224 		return;
225 	if (!unhandled_signal(current, signr))
226 		return;
227 	if (!printk_ratelimit())
228 		return;
229 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
230 	       regs->int_code & 0xffff, regs->int_code >> 17);
231 	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
232 	printk(KERN_CONT "\n");
233 	if (is_mm_fault)
234 		dump_fault_info(regs);
235 	show_regs(regs);
236 }
237 
238 /*
239  * Send SIGSEGV to task.  This is an external routine
240  * to keep the stack usage of do_page_fault small.
241  */
242 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
243 {
244 	report_user_fault(regs, SIGSEGV, 1);
245 	force_sig_fault(SIGSEGV, si_code,
246 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
247 }
248 
249 static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
250 {
251 	enum fault_type fault_type;
252 	unsigned long address;
253 	bool is_write;
254 
255 	if (fixup_exception(regs))
256 		return;
257 	fault_type = get_fault_type(regs);
258 	if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
259 		address = get_fault_address(regs);
260 		is_write = fault_is_write(regs);
261 		if (kfence_handle_page_fault(address, is_write, regs))
262 			return;
263 	}
264 	/*
265 	 * Oops. The kernel tried to access some bad page. We'll have to
266 	 * terminate things with extreme prejudice.
267 	 */
268 	if (fault_type == KERNEL_FAULT)
269 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
270 		       " in virtual kernel address space\n");
271 	else
272 		printk(KERN_ALERT "Unable to handle kernel paging request"
273 		       " in virtual user address space\n");
274 	dump_fault_info(regs);
275 	die(regs, "Oops");
276 }
277 
278 static noinline void do_low_address(struct pt_regs *regs)
279 {
280 	/* Low-address protection hit in kernel mode means
281 	   NULL pointer write access in kernel mode.  */
282 	if (regs->psw.mask & PSW_MASK_PSTATE) {
283 		/* Low-address protection hit in user mode 'cannot happen'. */
284 		die (regs, "Low-address protection");
285 	}
286 
287 	do_no_context(regs, VM_FAULT_BADACCESS);
288 }
289 
290 static noinline void do_sigbus(struct pt_regs *regs)
291 {
292 	/*
293 	 * Send a sigbus, regardless of whether we were in kernel
294 	 * or user mode.
295 	 */
296 	force_sig_fault(SIGBUS, BUS_ADRERR,
297 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
298 }
299 
300 static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
301 {
302 	int si_code;
303 
304 	switch (fault) {
305 	case VM_FAULT_BADACCESS:
306 	case VM_FAULT_BADMAP:
307 		/* Bad memory access. Check if it is kernel or user space. */
308 		if (user_mode(regs)) {
309 			/* User mode accesses just cause a SIGSEGV */
310 			si_code = (fault == VM_FAULT_BADMAP) ?
311 				SEGV_MAPERR : SEGV_ACCERR;
312 			do_sigsegv(regs, si_code);
313 			break;
314 		}
315 		fallthrough;
316 	case VM_FAULT_BADCONTEXT:
317 	case VM_FAULT_PFAULT:
318 		do_no_context(regs, fault);
319 		break;
320 	case VM_FAULT_SIGNAL:
321 		if (!user_mode(regs))
322 			do_no_context(regs, fault);
323 		break;
324 	default: /* fault & VM_FAULT_ERROR */
325 		if (fault & VM_FAULT_OOM) {
326 			if (!user_mode(regs))
327 				do_no_context(regs, fault);
328 			else
329 				pagefault_out_of_memory();
330 		} else if (fault & VM_FAULT_SIGSEGV) {
331 			/* Kernel mode? Handle exceptions or die */
332 			if (!user_mode(regs))
333 				do_no_context(regs, fault);
334 			else
335 				do_sigsegv(regs, SEGV_MAPERR);
336 		} else if (fault & VM_FAULT_SIGBUS) {
337 			/* Kernel mode? Handle exceptions or die */
338 			if (!user_mode(regs))
339 				do_no_context(regs, fault);
340 			else
341 				do_sigbus(regs);
342 		} else
343 			BUG();
344 		break;
345 	}
346 }
347 
348 /*
349  * This routine handles page faults.  It determines the address,
350  * and the problem, and then passes it off to one of the appropriate
351  * routines.
352  *
353  * interruption code (int_code):
354  *   04       Protection           ->  Write-Protection  (suppression)
355  *   10       Segment translation  ->  Not present       (nullification)
356  *   11       Page translation     ->  Not present       (nullification)
357  *   3b       Region third trans.  ->  Not present       (nullification)
358  */
359 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
360 {
361 	struct gmap *gmap;
362 	struct task_struct *tsk;
363 	struct mm_struct *mm;
364 	struct vm_area_struct *vma;
365 	enum fault_type type;
366 	unsigned long address;
367 	unsigned int flags;
368 	vm_fault_t fault;
369 	bool is_write;
370 
371 	tsk = current;
372 	/*
373 	 * The instruction that caused the program check has
374 	 * been nullified. Don't signal single step via SIGTRAP.
375 	 */
376 	clear_thread_flag(TIF_PER_TRAP);
377 
378 	if (kprobe_page_fault(regs, 14))
379 		return 0;
380 
381 	mm = tsk->mm;
382 	address = get_fault_address(regs);
383 	is_write = fault_is_write(regs);
384 
385 	/*
386 	 * Verify that the fault happened in user space, that
387 	 * we are not in an interrupt and that there is a
388 	 * user context.
389 	 */
390 	fault = VM_FAULT_BADCONTEXT;
391 	type = get_fault_type(regs);
392 	switch (type) {
393 	case KERNEL_FAULT:
394 		goto out;
395 	case USER_FAULT:
396 	case GMAP_FAULT:
397 		if (faulthandler_disabled() || !mm)
398 			goto out;
399 		break;
400 	}
401 
402 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
403 	flags = FAULT_FLAG_DEFAULT;
404 	if (user_mode(regs))
405 		flags |= FAULT_FLAG_USER;
406 	if (is_write)
407 		access = VM_WRITE;
408 	if (access == VM_WRITE)
409 		flags |= FAULT_FLAG_WRITE;
410 #ifdef CONFIG_PER_VMA_LOCK
411 	if (!(flags & FAULT_FLAG_USER))
412 		goto lock_mmap;
413 	vma = lock_vma_under_rcu(mm, address);
414 	if (!vma)
415 		goto lock_mmap;
416 	if (!(vma->vm_flags & access)) {
417 		vma_end_read(vma);
418 		goto lock_mmap;
419 	}
420 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
421 	vma_end_read(vma);
422 	if (!(fault & VM_FAULT_RETRY)) {
423 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
424 		goto out;
425 	}
426 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
427 	/* Quick path to respond to signals */
428 	if (fault_signal_pending(fault, regs)) {
429 		fault = VM_FAULT_SIGNAL;
430 		goto out;
431 	}
432 lock_mmap:
433 #endif /* CONFIG_PER_VMA_LOCK */
434 	mmap_read_lock(mm);
435 
436 	gmap = NULL;
437 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
438 		gmap = (struct gmap *) S390_lowcore.gmap;
439 		current->thread.gmap_addr = address;
440 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
441 		current->thread.gmap_int_code = regs->int_code & 0xffff;
442 		address = __gmap_translate(gmap, address);
443 		if (address == -EFAULT) {
444 			fault = VM_FAULT_BADMAP;
445 			goto out_up;
446 		}
447 		if (gmap->pfault_enabled)
448 			flags |= FAULT_FLAG_RETRY_NOWAIT;
449 	}
450 
451 retry:
452 	fault = VM_FAULT_BADMAP;
453 	vma = find_vma(mm, address);
454 	if (!vma)
455 		goto out_up;
456 
457 	if (unlikely(vma->vm_start > address)) {
458 		if (!(vma->vm_flags & VM_GROWSDOWN))
459 			goto out_up;
460 		if (expand_stack(vma, address))
461 			goto out_up;
462 	}
463 
464 	/*
465 	 * Ok, we have a good vm_area for this memory access, so
466 	 * we can handle it..
467 	 */
468 	fault = VM_FAULT_BADACCESS;
469 	if (unlikely(!(vma->vm_flags & access)))
470 		goto out_up;
471 
472 	/*
473 	 * If for any reason at all we couldn't handle the fault,
474 	 * make sure we exit gracefully rather than endlessly redo
475 	 * the fault.
476 	 */
477 	fault = handle_mm_fault(vma, address, flags, regs);
478 	if (fault_signal_pending(fault, regs)) {
479 		fault = VM_FAULT_SIGNAL;
480 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
481 			goto out_up;
482 		goto out;
483 	}
484 
485 	/* The fault is fully completed (including releasing mmap lock) */
486 	if (fault & VM_FAULT_COMPLETED) {
487 		if (gmap) {
488 			mmap_read_lock(mm);
489 			goto out_gmap;
490 		}
491 		fault = 0;
492 		goto out;
493 	}
494 
495 	if (unlikely(fault & VM_FAULT_ERROR))
496 		goto out_up;
497 
498 	if (fault & VM_FAULT_RETRY) {
499 		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
500 			(flags & FAULT_FLAG_RETRY_NOWAIT)) {
501 			/*
502 			 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
503 			 * not been released
504 			 */
505 			current->thread.gmap_pfault = 1;
506 			fault = VM_FAULT_PFAULT;
507 			goto out_up;
508 		}
509 		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
510 		flags |= FAULT_FLAG_TRIED;
511 		mmap_read_lock(mm);
512 		goto retry;
513 	}
514 out_gmap:
515 	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
516 		address =  __gmap_link(gmap, current->thread.gmap_addr,
517 				       address);
518 		if (address == -EFAULT) {
519 			fault = VM_FAULT_BADMAP;
520 			goto out_up;
521 		}
522 		if (address == -ENOMEM) {
523 			fault = VM_FAULT_OOM;
524 			goto out_up;
525 		}
526 	}
527 	fault = 0;
528 out_up:
529 	mmap_read_unlock(mm);
530 out:
531 	return fault;
532 }
533 
534 void do_protection_exception(struct pt_regs *regs)
535 {
536 	unsigned long trans_exc_code;
537 	int access;
538 	vm_fault_t fault;
539 
540 	trans_exc_code = regs->int_parm_long;
541 	/*
542 	 * Protection exceptions are suppressing, decrement psw address.
543 	 * The exception to this rule are aborted transactions, for these
544 	 * the PSW already points to the correct location.
545 	 */
546 	if (!(regs->int_code & 0x200))
547 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
548 	/*
549 	 * Check for low-address protection.  This needs to be treated
550 	 * as a special case because the translation exception code
551 	 * field is not guaranteed to contain valid data in this case.
552 	 */
553 	if (unlikely(!(trans_exc_code & 4))) {
554 		do_low_address(regs);
555 		return;
556 	}
557 	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
558 		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
559 					(regs->psw.addr & PAGE_MASK);
560 		access = VM_EXEC;
561 		fault = VM_FAULT_BADACCESS;
562 	} else {
563 		access = VM_WRITE;
564 		fault = do_exception(regs, access);
565 	}
566 	if (unlikely(fault))
567 		do_fault_error(regs, fault);
568 }
569 NOKPROBE_SYMBOL(do_protection_exception);
570 
571 void do_dat_exception(struct pt_regs *regs)
572 {
573 	int access;
574 	vm_fault_t fault;
575 
576 	access = VM_ACCESS_FLAGS;
577 	fault = do_exception(regs, access);
578 	if (unlikely(fault))
579 		do_fault_error(regs, fault);
580 }
581 NOKPROBE_SYMBOL(do_dat_exception);
582 
583 #ifdef CONFIG_PFAULT
584 /*
585  * 'pfault' pseudo page faults routines.
586  */
587 static int pfault_disable;
588 
589 static int __init nopfault(char *str)
590 {
591 	pfault_disable = 1;
592 	return 1;
593 }
594 
595 __setup("nopfault", nopfault);
596 
597 struct pfault_refbk {
598 	u16 refdiagc;
599 	u16 reffcode;
600 	u16 refdwlen;
601 	u16 refversn;
602 	u64 refgaddr;
603 	u64 refselmk;
604 	u64 refcmpmk;
605 	u64 reserved;
606 } __attribute__ ((packed, aligned(8)));
607 
608 static struct pfault_refbk pfault_init_refbk = {
609 	.refdiagc = 0x258,
610 	.reffcode = 0,
611 	.refdwlen = 5,
612 	.refversn = 2,
613 	.refgaddr = __LC_LPP,
614 	.refselmk = 1ULL << 48,
615 	.refcmpmk = 1ULL << 48,
616 	.reserved = __PF_RES_FIELD
617 };
618 
619 int pfault_init(void)
620 {
621         int rc;
622 
623 	if (pfault_disable)
624 		return -1;
625 	diag_stat_inc(DIAG_STAT_X258);
626 	asm volatile(
627 		"	diag	%1,%0,0x258\n"
628 		"0:	j	2f\n"
629 		"1:	la	%0,8\n"
630 		"2:\n"
631 		EX_TABLE(0b,1b)
632 		: "=d" (rc)
633 		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
634         return rc;
635 }
636 
637 static struct pfault_refbk pfault_fini_refbk = {
638 	.refdiagc = 0x258,
639 	.reffcode = 1,
640 	.refdwlen = 5,
641 	.refversn = 2,
642 };
643 
644 void pfault_fini(void)
645 {
646 
647 	if (pfault_disable)
648 		return;
649 	diag_stat_inc(DIAG_STAT_X258);
650 	asm volatile(
651 		"	diag	%0,0,0x258\n"
652 		"0:	nopr	%%r7\n"
653 		EX_TABLE(0b,0b)
654 		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
655 }
656 
657 static DEFINE_SPINLOCK(pfault_lock);
658 static LIST_HEAD(pfault_list);
659 
660 #define PF_COMPLETE	0x0080
661 
662 /*
663  * The mechanism of our pfault code: if Linux is running as guest, runs a user
664  * space process and the user space process accesses a page that the host has
665  * paged out we get a pfault interrupt.
666  *
667  * This allows us, within the guest, to schedule a different process. Without
668  * this mechanism the host would have to suspend the whole virtual cpu until
669  * the page has been paged in.
670  *
671  * So when we get such an interrupt then we set the state of the current task
672  * to uninterruptible and also set the need_resched flag. Both happens within
673  * interrupt context(!). If we later on want to return to user space we
674  * recognize the need_resched flag and then call schedule().  It's not very
675  * obvious how this works...
676  *
677  * Of course we have a lot of additional fun with the completion interrupt (->
678  * host signals that a page of a process has been paged in and the process can
679  * continue to run). This interrupt can arrive on any cpu and, since we have
680  * virtual cpus, actually appear before the interrupt that signals that a page
681  * is missing.
682  */
683 static void pfault_interrupt(struct ext_code ext_code,
684 			     unsigned int param32, unsigned long param64)
685 {
686 	struct task_struct *tsk;
687 	__u16 subcode;
688 	pid_t pid;
689 
690 	/*
691 	 * Get the external interruption subcode & pfault initial/completion
692 	 * signal bit. VM stores this in the 'cpu address' field associated
693 	 * with the external interrupt.
694 	 */
695 	subcode = ext_code.subcode;
696 	if ((subcode & 0xff00) != __SUBCODE_MASK)
697 		return;
698 	inc_irq_stat(IRQEXT_PFL);
699 	/* Get the token (= pid of the affected task). */
700 	pid = param64 & LPP_PID_MASK;
701 	rcu_read_lock();
702 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
703 	if (tsk)
704 		get_task_struct(tsk);
705 	rcu_read_unlock();
706 	if (!tsk)
707 		return;
708 	spin_lock(&pfault_lock);
709 	if (subcode & PF_COMPLETE) {
710 		/* signal bit is set -> a page has been swapped in by VM */
711 		if (tsk->thread.pfault_wait == 1) {
712 			/* Initial interrupt was faster than the completion
713 			 * interrupt. pfault_wait is valid. Set pfault_wait
714 			 * back to zero and wake up the process. This can
715 			 * safely be done because the task is still sleeping
716 			 * and can't produce new pfaults. */
717 			tsk->thread.pfault_wait = 0;
718 			list_del(&tsk->thread.list);
719 			wake_up_process(tsk);
720 			put_task_struct(tsk);
721 		} else {
722 			/* Completion interrupt was faster than initial
723 			 * interrupt. Set pfault_wait to -1 so the initial
724 			 * interrupt doesn't put the task to sleep.
725 			 * If the task is not running, ignore the completion
726 			 * interrupt since it must be a leftover of a PFAULT
727 			 * CANCEL operation which didn't remove all pending
728 			 * completion interrupts. */
729 			if (task_is_running(tsk))
730 				tsk->thread.pfault_wait = -1;
731 		}
732 	} else {
733 		/* signal bit not set -> a real page is missing. */
734 		if (WARN_ON_ONCE(tsk != current))
735 			goto out;
736 		if (tsk->thread.pfault_wait == 1) {
737 			/* Already on the list with a reference: put to sleep */
738 			goto block;
739 		} else if (tsk->thread.pfault_wait == -1) {
740 			/* Completion interrupt was faster than the initial
741 			 * interrupt (pfault_wait == -1). Set pfault_wait
742 			 * back to zero and exit. */
743 			tsk->thread.pfault_wait = 0;
744 		} else {
745 			/* Initial interrupt arrived before completion
746 			 * interrupt. Let the task sleep.
747 			 * An extra task reference is needed since a different
748 			 * cpu may set the task state to TASK_RUNNING again
749 			 * before the scheduler is reached. */
750 			get_task_struct(tsk);
751 			tsk->thread.pfault_wait = 1;
752 			list_add(&tsk->thread.list, &pfault_list);
753 block:
754 			/* Since this must be a userspace fault, there
755 			 * is no kernel task state to trample. Rely on the
756 			 * return to userspace schedule() to block. */
757 			__set_current_state(TASK_UNINTERRUPTIBLE);
758 			set_tsk_need_resched(tsk);
759 			set_preempt_need_resched();
760 		}
761 	}
762 out:
763 	spin_unlock(&pfault_lock);
764 	put_task_struct(tsk);
765 }
766 
767 static int pfault_cpu_dead(unsigned int cpu)
768 {
769 	struct thread_struct *thread, *next;
770 	struct task_struct *tsk;
771 
772 	spin_lock_irq(&pfault_lock);
773 	list_for_each_entry_safe(thread, next, &pfault_list, list) {
774 		thread->pfault_wait = 0;
775 		list_del(&thread->list);
776 		tsk = container_of(thread, struct task_struct, thread);
777 		wake_up_process(tsk);
778 		put_task_struct(tsk);
779 	}
780 	spin_unlock_irq(&pfault_lock);
781 	return 0;
782 }
783 
784 static int __init pfault_irq_init(void)
785 {
786 	int rc;
787 
788 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
789 	if (rc)
790 		goto out_extint;
791 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
792 	if (rc)
793 		goto out_pfault;
794 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
795 	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
796 				  NULL, pfault_cpu_dead);
797 	return 0;
798 
799 out_pfault:
800 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
801 out_extint:
802 	pfault_disable = 1;
803 	return rc;
804 }
805 early_initcall(pfault_irq_init);
806 
807 #endif /* CONFIG_PFAULT */
808 
809 #if IS_ENABLED(CONFIG_PGSTE)
810 
811 void do_secure_storage_access(struct pt_regs *regs)
812 {
813 	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
814 	struct vm_area_struct *vma;
815 	struct mm_struct *mm;
816 	struct page *page;
817 	struct gmap *gmap;
818 	int rc;
819 
820 	/*
821 	 * bit 61 tells us if the address is valid, if it's not we
822 	 * have a major problem and should stop the kernel or send a
823 	 * SIGSEGV to the process. Unfortunately bit 61 is not
824 	 * reliable without the misc UV feature so we need to check
825 	 * for that as well.
826 	 */
827 	if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
828 	    !test_bit_inv(61, &regs->int_parm_long)) {
829 		/*
830 		 * When this happens, userspace did something that it
831 		 * was not supposed to do, e.g. branching into secure
832 		 * memory. Trigger a segmentation fault.
833 		 */
834 		if (user_mode(regs)) {
835 			send_sig(SIGSEGV, current, 0);
836 			return;
837 		}
838 
839 		/*
840 		 * The kernel should never run into this case and we
841 		 * have no way out of this situation.
842 		 */
843 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
844 	}
845 
846 	switch (get_fault_type(regs)) {
847 	case GMAP_FAULT:
848 		mm = current->mm;
849 		gmap = (struct gmap *)S390_lowcore.gmap;
850 		mmap_read_lock(mm);
851 		addr = __gmap_translate(gmap, addr);
852 		mmap_read_unlock(mm);
853 		if (IS_ERR_VALUE(addr)) {
854 			do_fault_error(regs, VM_FAULT_BADMAP);
855 			break;
856 		}
857 		fallthrough;
858 	case USER_FAULT:
859 		mm = current->mm;
860 		mmap_read_lock(mm);
861 		vma = find_vma(mm, addr);
862 		if (!vma) {
863 			mmap_read_unlock(mm);
864 			do_fault_error(regs, VM_FAULT_BADMAP);
865 			break;
866 		}
867 		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
868 		if (IS_ERR_OR_NULL(page)) {
869 			mmap_read_unlock(mm);
870 			break;
871 		}
872 		if (arch_make_page_accessible(page))
873 			send_sig(SIGSEGV, current, 0);
874 		put_page(page);
875 		mmap_read_unlock(mm);
876 		break;
877 	case KERNEL_FAULT:
878 		page = phys_to_page(addr);
879 		if (unlikely(!try_get_page(page)))
880 			break;
881 		rc = arch_make_page_accessible(page);
882 		put_page(page);
883 		if (rc)
884 			BUG();
885 		break;
886 	default:
887 		do_fault_error(regs, VM_FAULT_BADMAP);
888 		WARN_ON_ONCE(1);
889 	}
890 }
891 NOKPROBE_SYMBOL(do_secure_storage_access);
892 
893 void do_non_secure_storage_access(struct pt_regs *regs)
894 {
895 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
896 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
897 
898 	if (get_fault_type(regs) != GMAP_FAULT) {
899 		do_fault_error(regs, VM_FAULT_BADMAP);
900 		WARN_ON_ONCE(1);
901 		return;
902 	}
903 
904 	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
905 		send_sig(SIGSEGV, current, 0);
906 }
907 NOKPROBE_SYMBOL(do_non_secure_storage_access);
908 
909 void do_secure_storage_violation(struct pt_regs *regs)
910 {
911 	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
912 	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
913 
914 	/*
915 	 * If the VM has been rebooted, its address space might still contain
916 	 * secure pages from the previous boot.
917 	 * Clear the page so it can be reused.
918 	 */
919 	if (!gmap_destroy_page(gmap, gaddr))
920 		return;
921 	/*
922 	 * Either KVM messed up the secure guest mapping or the same
923 	 * page is mapped into multiple secure guests.
924 	 *
925 	 * This exception is only triggered when a guest 2 is running
926 	 * and can therefore never occur in kernel context.
927 	 */
928 	printk_ratelimited(KERN_WARNING
929 			   "Secure storage violation in task: %s, pid %d\n",
930 			   current->comm, current->pid);
931 	send_sig(SIGSEGV, current, 0);
932 }
933 
934 #endif /* CONFIG_PGSTE */
935