xref: /openbmc/linux/arch/s390/kernel/nmi.c (revision 8e7ae8ba)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *   Machine check handler
4  *
5  *    Copyright IBM Corp. 2000, 2009
6  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
7  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
8  *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
9  */
10 
11 #include <linux/kernel_stat.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/hardirq.h>
15 #include <linux/log2.h>
16 #include <linux/kprobes.h>
17 #include <linux/kmemleak.h>
18 #include <linux/time.h>
19 #include <linux/module.h>
20 #include <linux/sched/signal.h>
21 
22 #include <linux/export.h>
23 #include <asm/lowcore.h>
24 #include <asm/smp.h>
25 #include <asm/stp.h>
26 #include <asm/cputime.h>
27 #include <asm/nmi.h>
28 #include <asm/crw.h>
29 #include <asm/switch_to.h>
30 #include <asm/ctl_reg.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/pai.h>
33 
34 #include <linux/kvm_host.h>
35 
36 struct mcck_struct {
37 	unsigned int kill_task : 1;
38 	unsigned int channel_report : 1;
39 	unsigned int warning : 1;
40 	unsigned int stp_queue : 1;
41 	unsigned long mcck_code;
42 };
43 
44 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
45 static struct kmem_cache *mcesa_cache;
46 static unsigned long mcesa_origin_lc;
47 
48 static inline int nmi_needs_mcesa(void)
49 {
50 	return MACHINE_HAS_VX || MACHINE_HAS_GS;
51 }
52 
53 static inline unsigned long nmi_get_mcesa_size(void)
54 {
55 	if (MACHINE_HAS_GS)
56 		return MCESA_MAX_SIZE;
57 	return MCESA_MIN_SIZE;
58 }
59 
60 /*
61  * The initial machine check extended save area for the boot CPU.
62  * It will be replaced on the boot CPU reinit with an allocated
63  * structure. The structure is required for machine check happening
64  * early in the boot process.
65  */
66 static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
67 
68 void __init nmi_alloc_mcesa_early(u64 *mcesad)
69 {
70 	if (!nmi_needs_mcesa())
71 		return;
72 	*mcesad = __pa(&boot_mcesa);
73 	if (MACHINE_HAS_GS)
74 		*mcesad |= ilog2(MCESA_MAX_SIZE);
75 }
76 
77 static void __init nmi_alloc_cache(void)
78 {
79 	unsigned long size;
80 
81 	if (!nmi_needs_mcesa())
82 		return;
83 	size = nmi_get_mcesa_size();
84 	if (size > MCESA_MIN_SIZE)
85 		mcesa_origin_lc = ilog2(size);
86 	/* create slab cache for the machine-check-extended-save-areas */
87 	mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
88 	if (!mcesa_cache)
89 		panic("Couldn't create nmi save area cache");
90 }
91 
92 int __ref nmi_alloc_mcesa(u64 *mcesad)
93 {
94 	unsigned long origin;
95 
96 	*mcesad = 0;
97 	if (!nmi_needs_mcesa())
98 		return 0;
99 	if (!mcesa_cache)
100 		nmi_alloc_cache();
101 	origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
102 	if (!origin)
103 		return -ENOMEM;
104 	/* The pointer is stored with mcesa_bits ORed in */
105 	kmemleak_not_leak((void *) origin);
106 	*mcesad = __pa(origin) | mcesa_origin_lc;
107 	return 0;
108 }
109 
110 void nmi_free_mcesa(u64 *mcesad)
111 {
112 	if (!nmi_needs_mcesa())
113 		return;
114 	kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK));
115 }
116 
117 static notrace void s390_handle_damage(void)
118 {
119 	smp_emergency_stop();
120 	disabled_wait();
121 	while (1);
122 }
123 NOKPROBE_SYMBOL(s390_handle_damage);
124 
125 /*
126  * Main machine check handler function. Will be called with interrupts disabled
127  * and machine checks enabled.
128  */
129 void __s390_handle_mcck(void)
130 {
131 	struct mcck_struct mcck;
132 
133 	/*
134 	 * Disable machine checks and get the current state of accumulated
135 	 * machine checks. Afterwards delete the old state and enable machine
136 	 * checks again.
137 	 */
138 	local_mcck_disable();
139 	mcck = *this_cpu_ptr(&cpu_mcck);
140 	memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
141 	local_mcck_enable();
142 
143 	if (mcck.channel_report)
144 		crw_handle_channel_report();
145 	/*
146 	 * A warning may remain for a prolonged period on the bare iron.
147 	 * (actually until the machine is powered off, or the problem is gone)
148 	 * So we just stop listening for the WARNING MCH and avoid continuously
149 	 * being interrupted.  One caveat is however, that we must do this per
150 	 * processor and cannot use the smp version of ctl_clear_bit().
151 	 * On VM we only get one interrupt per virtally presented machinecheck.
152 	 * Though one suffices, we may get one interrupt per (virtual) cpu.
153 	 */
154 	if (mcck.warning) {	/* WARNING pending ? */
155 		static int mchchk_wng_posted = 0;
156 
157 		/* Use single cpu clear, as we cannot handle smp here. */
158 		__ctl_clear_bit(14, 24);	/* Disable WARNING MCH */
159 		if (xchg(&mchchk_wng_posted, 1) == 0)
160 			kill_cad_pid(SIGPWR, 1);
161 	}
162 	if (mcck.stp_queue)
163 		stp_queue_work();
164 	if (mcck.kill_task) {
165 		local_irq_enable();
166 		printk(KERN_EMERG "mcck: Terminating task because of machine "
167 		       "malfunction (code 0x%016lx).\n", mcck.mcck_code);
168 		printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
169 		       current->comm, current->pid);
170 		make_task_dead(SIGSEGV);
171 	}
172 }
173 
174 void noinstr s390_handle_mcck(struct pt_regs *regs)
175 {
176 	trace_hardirqs_off();
177 	pai_kernel_enter(regs);
178 	__s390_handle_mcck();
179 	pai_kernel_exit(regs);
180 	trace_hardirqs_on();
181 }
182 /*
183  * returns 0 if all required registers are available
184  * returns 1 otherwise
185  */
186 static int notrace s390_validate_registers(union mci mci, int umode)
187 {
188 	struct mcesa *mcesa;
189 	void *fpt_save_area;
190 	union ctlreg2 cr2;
191 	int kill_task;
192 	u64 zero;
193 
194 	kill_task = 0;
195 	zero = 0;
196 
197 	if (!mci.gr) {
198 		/*
199 		 * General purpose registers couldn't be restored and have
200 		 * unknown contents. Stop system or terminate process.
201 		 */
202 		if (!umode)
203 			s390_handle_damage();
204 		kill_task = 1;
205 	}
206 	if (!mci.fp) {
207 		/*
208 		 * Floating point registers can't be restored. If the
209 		 * kernel currently uses floating point registers the
210 		 * system is stopped. If the process has its floating
211 		 * pointer registers loaded it is terminated.
212 		 */
213 		if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
214 			s390_handle_damage();
215 		if (!test_cpu_flag(CIF_FPU))
216 			kill_task = 1;
217 	}
218 	fpt_save_area = &S390_lowcore.floating_pt_save_area;
219 	if (!mci.fc) {
220 		/*
221 		 * Floating point control register can't be restored.
222 		 * If the kernel currently uses the floating pointer
223 		 * registers and needs the FPC register the system is
224 		 * stopped. If the process has its floating pointer
225 		 * registers loaded it is terminated. Otherwise the
226 		 * FPC is just validated.
227 		 */
228 		if (S390_lowcore.fpu_flags & KERNEL_FPC)
229 			s390_handle_damage();
230 		asm volatile(
231 			"	lfpc	%0\n"
232 			:
233 			: "Q" (zero));
234 		if (!test_cpu_flag(CIF_FPU))
235 			kill_task = 1;
236 	} else {
237 		asm volatile(
238 			"	lfpc	%0\n"
239 			:
240 			: "Q" (S390_lowcore.fpt_creg_save_area));
241 	}
242 
243 	mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
244 	if (!MACHINE_HAS_VX) {
245 		/* Validate floating point registers */
246 		asm volatile(
247 			"	ld	0,0(%0)\n"
248 			"	ld	1,8(%0)\n"
249 			"	ld	2,16(%0)\n"
250 			"	ld	3,24(%0)\n"
251 			"	ld	4,32(%0)\n"
252 			"	ld	5,40(%0)\n"
253 			"	ld	6,48(%0)\n"
254 			"	ld	7,56(%0)\n"
255 			"	ld	8,64(%0)\n"
256 			"	ld	9,72(%0)\n"
257 			"	ld	10,80(%0)\n"
258 			"	ld	11,88(%0)\n"
259 			"	ld	12,96(%0)\n"
260 			"	ld	13,104(%0)\n"
261 			"	ld	14,112(%0)\n"
262 			"	ld	15,120(%0)\n"
263 			:
264 			: "a" (fpt_save_area)
265 			: "memory");
266 	} else {
267 		/* Validate vector registers */
268 		union ctlreg0 cr0;
269 
270 		/*
271 		 * The vector validity must only be checked if not running a
272 		 * KVM guest. For KVM guests the machine check is forwarded by
273 		 * KVM and it is the responsibility of the guest to take
274 		 * appropriate actions. The host vector or FPU values have been
275 		 * saved by KVM and will be restored by KVM.
276 		 */
277 		if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) {
278 			/*
279 			 * Vector registers can't be restored. If the kernel
280 			 * currently uses vector registers the system is
281 			 * stopped. If the process has its vector registers
282 			 * loaded it is terminated. Otherwise just validate
283 			 * the registers.
284 			 */
285 			if (S390_lowcore.fpu_flags & KERNEL_VXR)
286 				s390_handle_damage();
287 			if (!test_cpu_flag(CIF_FPU))
288 				kill_task = 1;
289 		}
290 		cr0.val = S390_lowcore.cregs_save_area[0];
291 		cr0.afp = cr0.vx = 1;
292 		__ctl_load(cr0.val, 0, 0);
293 		asm volatile(
294 			"	la	1,%0\n"
295 			"	.word	0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
296 			"	.word	0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
297 			:
298 			: "Q" (*(struct vx_array *)mcesa->vector_save_area)
299 			: "1");
300 		__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
301 	}
302 	/* Validate access registers */
303 	asm volatile(
304 		"	lam	0,15,0(%0)\n"
305 		:
306 		: "a" (&S390_lowcore.access_regs_save_area)
307 		: "memory");
308 	if (!mci.ar) {
309 		/*
310 		 * Access registers have unknown contents.
311 		 * Terminating task.
312 		 */
313 		kill_task = 1;
314 	}
315 	/* Validate guarded storage registers */
316 	cr2.val = S390_lowcore.cregs_save_area[2];
317 	if (cr2.gse) {
318 		if (!mci.gs) {
319 			/*
320 			 * 2 cases:
321 			 * - machine check in kernel or userspace
322 			 * - machine check while running SIE (KVM guest)
323 			 * For kernel or userspace the userspace values of
324 			 * guarded storage control can not be recreated, the
325 			 * process must be terminated.
326 			 * For SIE the guest values of guarded storage can not
327 			 * be recreated. This is either due to a bug or due to
328 			 * GS being disabled in the guest. The guest will be
329 			 * notified by KVM code and the guests machine check
330 			 * handling must take care of this.  The host values
331 			 * are saved by KVM and are not affected.
332 			 */
333 			if (!test_cpu_flag(CIF_MCCK_GUEST))
334 				kill_task = 1;
335 		} else {
336 			load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
337 		}
338 	}
339 	/*
340 	 * The getcpu vdso syscall reads CPU number from the programmable
341 	 * field of the TOD clock. Disregard the TOD programmable register
342 	 * validity bit and load the CPU number into the TOD programmable
343 	 * field unconditionally.
344 	 */
345 	set_tod_programmable_field(raw_smp_processor_id());
346 	/* Validate clock comparator register */
347 	set_clock_comparator(S390_lowcore.clock_comparator);
348 
349 	if (!mci.ms || !mci.pm || !mci.ia)
350 		kill_task = 1;
351 
352 	return kill_task;
353 }
354 NOKPROBE_SYMBOL(s390_validate_registers);
355 
356 /*
357  * Backup the guest's machine check info to its description block
358  */
359 static void notrace s390_backup_mcck_info(struct pt_regs *regs)
360 {
361 	struct mcck_volatile_info *mcck_backup;
362 	struct sie_page *sie_page;
363 
364 	/* r14 contains the sie block, which was set in sie64a */
365 	struct kvm_s390_sie_block *sie_block =
366 			(struct kvm_s390_sie_block *) regs->gprs[14];
367 
368 	if (sie_block == NULL)
369 		/* Something's seriously wrong, stop system. */
370 		s390_handle_damage();
371 
372 	sie_page = container_of(sie_block, struct sie_page, sie_block);
373 	mcck_backup = &sie_page->mcck_info;
374 	mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
375 				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
376 	mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
377 	mcck_backup->failing_storage_address
378 			= S390_lowcore.failing_storage_address;
379 }
380 NOKPROBE_SYMBOL(s390_backup_mcck_info);
381 
382 #define MAX_IPD_COUNT	29
383 #define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
384 
385 #define ED_STP_ISLAND	6	/* External damage STP island check */
386 #define ED_STP_SYNC	7	/* External damage STP sync check */
387 
388 #define MCCK_CODE_NO_GUEST	(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
389 
390 /*
391  * machine check handler.
392  */
393 int notrace s390_do_machine_check(struct pt_regs *regs)
394 {
395 	static int ipd_count;
396 	static DEFINE_SPINLOCK(ipd_lock);
397 	static unsigned long long last_ipd;
398 	struct mcck_struct *mcck;
399 	unsigned long long tmp;
400 	union mci mci;
401 	unsigned long mcck_dam_code;
402 	int mcck_pending = 0;
403 
404 	nmi_enter();
405 
406 	if (user_mode(regs))
407 		update_timer_mcck();
408 	inc_irq_stat(NMI_NMI);
409 	mci.val = S390_lowcore.mcck_interruption_code;
410 	mcck = this_cpu_ptr(&cpu_mcck);
411 
412 	/*
413 	 * Reinject the instruction processing damages' machine checks
414 	 * including Delayed Access Exception into the guest
415 	 * instead of damaging the host if they happen in the guest.
416 	 */
417 	if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
418 		if (mci.b) {
419 			/* Processing backup -> verify if we can survive this */
420 			u64 z_mcic, o_mcic, t_mcic;
421 			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
422 			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
423 				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
424 				  1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
425 				  1ULL<<16);
426 			t_mcic = mci.val;
427 
428 			if (((t_mcic & z_mcic) != 0) ||
429 			    ((t_mcic & o_mcic) != o_mcic)) {
430 				s390_handle_damage();
431 			}
432 
433 			/*
434 			 * Nullifying exigent condition, therefore we might
435 			 * retry this instruction.
436 			 */
437 			spin_lock(&ipd_lock);
438 			tmp = get_tod_clock();
439 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
440 				ipd_count++;
441 			else
442 				ipd_count = 1;
443 			last_ipd = tmp;
444 			if (ipd_count == MAX_IPD_COUNT)
445 				s390_handle_damage();
446 			spin_unlock(&ipd_lock);
447 		} else {
448 			/* Processing damage -> stopping machine */
449 			s390_handle_damage();
450 		}
451 	}
452 	if (s390_validate_registers(mci, user_mode(regs))) {
453 		/*
454 		 * Couldn't restore all register contents for the
455 		 * user space process -> mark task for termination.
456 		 */
457 		mcck->kill_task = 1;
458 		mcck->mcck_code = mci.val;
459 		mcck_pending = 1;
460 	}
461 
462 	/*
463 	 * Backup the machine check's info if it happens when the guest
464 	 * is running.
465 	 */
466 	if (test_cpu_flag(CIF_MCCK_GUEST))
467 		s390_backup_mcck_info(regs);
468 
469 	if (mci.cd) {
470 		/* Timing facility damage */
471 		s390_handle_damage();
472 	}
473 	if (mci.ed && mci.ec) {
474 		/* External damage */
475 		if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
476 			mcck->stp_queue |= stp_sync_check();
477 		if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
478 			mcck->stp_queue |= stp_island_check();
479 		mcck_pending = 1;
480 	}
481 
482 	if (mci.cp) {
483 		/* Channel report word pending */
484 		mcck->channel_report = 1;
485 		mcck_pending = 1;
486 	}
487 	if (mci.w) {
488 		/* Warning pending */
489 		mcck->warning = 1;
490 		mcck_pending = 1;
491 	}
492 
493 	/*
494 	 * If there are only Channel Report Pending and External Damage
495 	 * machine checks, they will not be reinjected into the guest
496 	 * because they refer to host conditions only.
497 	 */
498 	mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
499 	if (test_cpu_flag(CIF_MCCK_GUEST) &&
500 	(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
501 		/* Set exit reason code for host's later handling */
502 		*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
503 	}
504 	clear_cpu_flag(CIF_MCCK_GUEST);
505 
506 	if (user_mode(regs) && mcck_pending) {
507 		nmi_exit();
508 		return 1;
509 	}
510 
511 	if (mcck_pending)
512 		schedule_mcck_handler();
513 
514 	nmi_exit();
515 	return 0;
516 }
517 NOKPROBE_SYMBOL(s390_do_machine_check);
518 
519 static int __init machine_check_init(void)
520 {
521 	ctl_set_bit(14, 25);	/* enable external damage MCH */
522 	ctl_set_bit(14, 27);	/* enable system recovery MCH */
523 	ctl_set_bit(14, 24);	/* enable warning MCH */
524 	return 0;
525 }
526 early_initcall(machine_check_init);
527