xref: /openbmc/linux/arch/x86/kernel/cpu/mce/intel.c (revision c3629dd7)
121afaf18SBorislav Petkov // SPDX-License-Identifier: GPL-2.0
221afaf18SBorislav Petkov /*
321afaf18SBorislav Petkov  * Intel specific MCE features.
421afaf18SBorislav Petkov  * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
521afaf18SBorislav Petkov  * Copyright (C) 2008, 2009 Intel Corporation
621afaf18SBorislav Petkov  * Author: Andi Kleen
721afaf18SBorislav Petkov  */
821afaf18SBorislav Petkov 
921afaf18SBorislav Petkov #include <linux/gfp.h>
1021afaf18SBorislav Petkov #include <linux/interrupt.h>
1121afaf18SBorislav Petkov #include <linux/percpu.h>
1221afaf18SBorislav Petkov #include <linux/sched.h>
1321afaf18SBorislav Petkov #include <linux/cpumask.h>
1421afaf18SBorislav Petkov #include <asm/apic.h>
1521afaf18SBorislav Petkov #include <asm/cpufeature.h>
1621afaf18SBorislav Petkov #include <asm/intel-family.h>
1721afaf18SBorislav Petkov #include <asm/processor.h>
1821afaf18SBorislav Petkov #include <asm/msr.h>
1921afaf18SBorislav Petkov #include <asm/mce.h>
2021afaf18SBorislav Petkov 
2121afaf18SBorislav Petkov #include "internal.h"
2221afaf18SBorislav Petkov 
2321afaf18SBorislav Petkov /*
2421afaf18SBorislav Petkov  * Support for Intel Correct Machine Check Interrupts. This allows
2521afaf18SBorislav Petkov  * the CPU to raise an interrupt when a corrected machine check happened.
2621afaf18SBorislav Petkov  * Normally we pick those up using a regular polling timer.
2721afaf18SBorislav Petkov  * Also supports reliable discovery of shared banks.
2821afaf18SBorislav Petkov  */
2921afaf18SBorislav Petkov 
3021afaf18SBorislav Petkov /*
3121afaf18SBorislav Petkov  * CMCI can be delivered to multiple cpus that share a machine check bank
3221afaf18SBorislav Petkov  * so we need to designate a single cpu to process errors logged in each bank
3321afaf18SBorislav Petkov  * in the interrupt handler (otherwise we would have many races and potential
3421afaf18SBorislav Petkov  * double reporting of the same error).
3521afaf18SBorislav Petkov  * Note that this can change when a cpu is offlined or brought online since
3621afaf18SBorislav Petkov  * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
3721afaf18SBorislav Petkov  * disables CMCI on all banks owned by the cpu and clears this bitfield. At
3821afaf18SBorislav Petkov  * this point, cmci_rediscover() kicks in and a different cpu may end up
3921afaf18SBorislav Petkov  * taking ownership of some of the shared MCA banks that were previously
4021afaf18SBorislav Petkov  * owned by the offlined cpu.
4121afaf18SBorislav Petkov  */
4221afaf18SBorislav Petkov static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
4321afaf18SBorislav Petkov 
4421afaf18SBorislav Petkov /*
4521afaf18SBorislav Petkov  * CMCI storm detection backoff counter
4621afaf18SBorislav Petkov  *
4721afaf18SBorislav Petkov  * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
4821afaf18SBorislav Petkov  * encountered an error. If not, we decrement it by one. We signal the end of
4921afaf18SBorislav Petkov  * the CMCI storm when it reaches 0.
5021afaf18SBorislav Petkov  */
5121afaf18SBorislav Petkov static DEFINE_PER_CPU(int, cmci_backoff_cnt);
5221afaf18SBorislav Petkov 
5321afaf18SBorislav Petkov /*
5421afaf18SBorislav Petkov  * cmci_discover_lock protects against parallel discovery attempts
5521afaf18SBorislav Petkov  * which could race against each other.
5621afaf18SBorislav Petkov  */
5721afaf18SBorislav Petkov static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
5821afaf18SBorislav Petkov 
59*c3629dd7SBorislav Petkov (AMD) /*
60*c3629dd7SBorislav Petkov (AMD)  * On systems that do support CMCI but it's disabled, polling for MCEs can
61*c3629dd7SBorislav Petkov (AMD)  * cause the same event to be reported multiple times because IA32_MCi_STATUS
62*c3629dd7SBorislav Petkov (AMD)  * is shared by the same package.
63*c3629dd7SBorislav Petkov (AMD)  */
64*c3629dd7SBorislav Petkov (AMD) static DEFINE_SPINLOCK(cmci_poll_lock);
65*c3629dd7SBorislav Petkov (AMD) 
6621afaf18SBorislav Petkov #define CMCI_THRESHOLD		1
6721afaf18SBorislav Petkov #define CMCI_POLL_INTERVAL	(30 * HZ)
6821afaf18SBorislav Petkov #define CMCI_STORM_INTERVAL	(HZ)
6921afaf18SBorislav Petkov #define CMCI_STORM_THRESHOLD	15
7021afaf18SBorislav Petkov 
7121afaf18SBorislav Petkov static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
7221afaf18SBorislav Petkov static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
7321afaf18SBorislav Petkov static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
7421afaf18SBorislav Petkov 
7521afaf18SBorislav Petkov enum {
7621afaf18SBorislav Petkov 	CMCI_STORM_NONE,
7721afaf18SBorislav Petkov 	CMCI_STORM_ACTIVE,
7821afaf18SBorislav Petkov 	CMCI_STORM_SUBSIDED,
7921afaf18SBorislav Petkov };
8021afaf18SBorislav Petkov 
8121afaf18SBorislav Petkov static atomic_t cmci_storm_on_cpus;
8221afaf18SBorislav Petkov 
cmci_supported(int * banks)8321afaf18SBorislav Petkov static int cmci_supported(int *banks)
8421afaf18SBorislav Petkov {
8521afaf18SBorislav Petkov 	u64 cap;
8621afaf18SBorislav Petkov 
8721afaf18SBorislav Petkov 	if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
8821afaf18SBorislav Petkov 		return 0;
8921afaf18SBorislav Petkov 
9021afaf18SBorislav Petkov 	/*
9121afaf18SBorislav Petkov 	 * Vendor check is not strictly needed, but the initial
9221afaf18SBorislav Petkov 	 * initialization is vendor keyed and this
9321afaf18SBorislav Petkov 	 * makes sure none of the backdoors are entered otherwise.
9421afaf18SBorislav Petkov 	 */
955a3d56a0STony W Wang-oc 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
965a3d56a0STony W Wang-oc 	    boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
9721afaf18SBorislav Petkov 		return 0;
985a3d56a0STony W Wang-oc 
9921afaf18SBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
10021afaf18SBorislav Petkov 		return 0;
10121afaf18SBorislav Petkov 	rdmsrl(MSR_IA32_MCG_CAP, cap);
10221afaf18SBorislav Petkov 	*banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
10321afaf18SBorislav Petkov 	return !!(cap & MCG_CMCI_P);
10421afaf18SBorislav Petkov }
10521afaf18SBorislav Petkov 
lmce_supported(void)10621afaf18SBorislav Petkov static bool lmce_supported(void)
10721afaf18SBorislav Petkov {
10821afaf18SBorislav Petkov 	u64 tmp;
10921afaf18SBorislav Petkov 
11021afaf18SBorislav Petkov 	if (mca_cfg.lmce_disabled)
11121afaf18SBorislav Petkov 		return false;
11221afaf18SBorislav Petkov 
11321afaf18SBorislav Petkov 	rdmsrl(MSR_IA32_MCG_CAP, tmp);
11421afaf18SBorislav Petkov 
11521afaf18SBorislav Petkov 	/*
11621afaf18SBorislav Petkov 	 * LMCE depends on recovery support in the processor. Hence both
11721afaf18SBorislav Petkov 	 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
11821afaf18SBorislav Petkov 	 */
11921afaf18SBorislav Petkov 	if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
12021afaf18SBorislav Petkov 		   (MCG_SER_P | MCG_LMCE_P))
12121afaf18SBorislav Petkov 		return false;
12221afaf18SBorislav Petkov 
12321afaf18SBorislav Petkov 	/*
12421afaf18SBorislav Petkov 	 * BIOS should indicate support for LMCE by setting bit 20 in
12532ad73dbSSean Christopherson 	 * IA32_FEAT_CTL without which touching MCG_EXT_CTL will generate a #GP
1266d527cebSSean Christopherson 	 * fault.  The MSR must also be locked for LMCE_ENABLED to take effect.
1276d527cebSSean Christopherson 	 * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
1286d527cebSSean Christopherson 	 * locks the MSR in the event that it wasn't already locked by BIOS.
12921afaf18SBorislav Petkov 	 */
13032ad73dbSSean Christopherson 	rdmsrl(MSR_IA32_FEAT_CTL, tmp);
1316d527cebSSean Christopherson 	if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
13221afaf18SBorislav Petkov 		return false;
1336d527cebSSean Christopherson 
1346d527cebSSean Christopherson 	return tmp & FEAT_CTL_LMCE_ENABLED;
13521afaf18SBorislav Petkov }
13621afaf18SBorislav Petkov 
mce_intel_cmci_poll(void)13721afaf18SBorislav Petkov bool mce_intel_cmci_poll(void)
13821afaf18SBorislav Petkov {
13921afaf18SBorislav Petkov 	if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
14021afaf18SBorislav Petkov 		return false;
14121afaf18SBorislav Petkov 
14221afaf18SBorislav Petkov 	/*
14321afaf18SBorislav Petkov 	 * Reset the counter if we've logged an error in the last poll
14421afaf18SBorislav Petkov 	 * during the storm.
14521afaf18SBorislav Petkov 	 */
14621afaf18SBorislav Petkov 	if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
14721afaf18SBorislav Petkov 		this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
14821afaf18SBorislav Petkov 	else
14921afaf18SBorislav Petkov 		this_cpu_dec(cmci_backoff_cnt);
15021afaf18SBorislav Petkov 
15121afaf18SBorislav Petkov 	return true;
15221afaf18SBorislav Petkov }
15321afaf18SBorislav Petkov 
mce_intel_hcpu_update(unsigned long cpu)15421afaf18SBorislav Petkov void mce_intel_hcpu_update(unsigned long cpu)
15521afaf18SBorislav Petkov {
15621afaf18SBorislav Petkov 	if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
15721afaf18SBorislav Petkov 		atomic_dec(&cmci_storm_on_cpus);
15821afaf18SBorislav Petkov 
15921afaf18SBorislav Petkov 	per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
16021afaf18SBorislav Petkov }
16121afaf18SBorislav Petkov 
cmci_toggle_interrupt_mode(bool on)16221afaf18SBorislav Petkov static void cmci_toggle_interrupt_mode(bool on)
16321afaf18SBorislav Petkov {
16421afaf18SBorislav Petkov 	unsigned long flags, *owned;
16521afaf18SBorislav Petkov 	int bank;
16621afaf18SBorislav Petkov 	u64 val;
16721afaf18SBorislav Petkov 
16821afaf18SBorislav Petkov 	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
16921afaf18SBorislav Petkov 	owned = this_cpu_ptr(mce_banks_owned);
17021afaf18SBorislav Petkov 	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
17121afaf18SBorislav Petkov 		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
17221afaf18SBorislav Petkov 
17321afaf18SBorislav Petkov 		if (on)
17421afaf18SBorislav Petkov 			val |= MCI_CTL2_CMCI_EN;
17521afaf18SBorislav Petkov 		else
17621afaf18SBorislav Petkov 			val &= ~MCI_CTL2_CMCI_EN;
17721afaf18SBorislav Petkov 
17821afaf18SBorislav Petkov 		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
17921afaf18SBorislav Petkov 	}
18021afaf18SBorislav Petkov 	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
18121afaf18SBorislav Petkov }
18221afaf18SBorislav Petkov 
cmci_intel_adjust_timer(unsigned long interval)18321afaf18SBorislav Petkov unsigned long cmci_intel_adjust_timer(unsigned long interval)
18421afaf18SBorislav Petkov {
18521afaf18SBorislav Petkov 	if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
18621afaf18SBorislav Petkov 	    (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
18721afaf18SBorislav Petkov 		mce_notify_irq();
18821afaf18SBorislav Petkov 		return CMCI_STORM_INTERVAL;
18921afaf18SBorislav Petkov 	}
19021afaf18SBorislav Petkov 
19121afaf18SBorislav Petkov 	switch (__this_cpu_read(cmci_storm_state)) {
19221afaf18SBorislav Petkov 	case CMCI_STORM_ACTIVE:
19321afaf18SBorislav Petkov 
19421afaf18SBorislav Petkov 		/*
19521afaf18SBorislav Petkov 		 * We switch back to interrupt mode once the poll timer has
19621afaf18SBorislav Petkov 		 * silenced itself. That means no events recorded and the timer
19721afaf18SBorislav Petkov 		 * interval is back to our poll interval.
19821afaf18SBorislav Petkov 		 */
19921afaf18SBorislav Petkov 		__this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
20021afaf18SBorislav Petkov 		if (!atomic_sub_return(1, &cmci_storm_on_cpus))
20121afaf18SBorislav Petkov 			pr_notice("CMCI storm subsided: switching to interrupt mode\n");
20221afaf18SBorislav Petkov 
203df561f66SGustavo A. R. Silva 		fallthrough;
20421afaf18SBorislav Petkov 
20521afaf18SBorislav Petkov 	case CMCI_STORM_SUBSIDED:
20621afaf18SBorislav Petkov 		/*
20721afaf18SBorislav Petkov 		 * We wait for all CPUs to go back to SUBSIDED state. When that
20821afaf18SBorislav Petkov 		 * happens we switch back to interrupt mode.
20921afaf18SBorislav Petkov 		 */
21021afaf18SBorislav Petkov 		if (!atomic_read(&cmci_storm_on_cpus)) {
21121afaf18SBorislav Petkov 			__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
21221afaf18SBorislav Petkov 			cmci_toggle_interrupt_mode(true);
21321afaf18SBorislav Petkov 			cmci_recheck();
21421afaf18SBorislav Petkov 		}
21521afaf18SBorislav Petkov 		return CMCI_POLL_INTERVAL;
21621afaf18SBorislav Petkov 	default:
21721afaf18SBorislav Petkov 
21821afaf18SBorislav Petkov 		/* We have shiny weather. Let the poll do whatever it thinks. */
21921afaf18SBorislav Petkov 		return interval;
22021afaf18SBorislav Petkov 	}
22121afaf18SBorislav Petkov }
22221afaf18SBorislav Petkov 
cmci_storm_detect(void)22321afaf18SBorislav Petkov static bool cmci_storm_detect(void)
22421afaf18SBorislav Petkov {
22521afaf18SBorislav Petkov 	unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
22621afaf18SBorislav Petkov 	unsigned long ts = __this_cpu_read(cmci_time_stamp);
22721afaf18SBorislav Petkov 	unsigned long now = jiffies;
22821afaf18SBorislav Petkov 	int r;
22921afaf18SBorislav Petkov 
23021afaf18SBorislav Petkov 	if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
23121afaf18SBorislav Petkov 		return true;
23221afaf18SBorislav Petkov 
23321afaf18SBorislav Petkov 	if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
23421afaf18SBorislav Petkov 		cnt++;
23521afaf18SBorislav Petkov 	} else {
23621afaf18SBorislav Petkov 		cnt = 1;
23721afaf18SBorislav Petkov 		__this_cpu_write(cmci_time_stamp, now);
23821afaf18SBorislav Petkov 	}
23921afaf18SBorislav Petkov 	__this_cpu_write(cmci_storm_cnt, cnt);
24021afaf18SBorislav Petkov 
24121afaf18SBorislav Petkov 	if (cnt <= CMCI_STORM_THRESHOLD)
24221afaf18SBorislav Petkov 		return false;
24321afaf18SBorislav Petkov 
24421afaf18SBorislav Petkov 	cmci_toggle_interrupt_mode(false);
24521afaf18SBorislav Petkov 	__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
24621afaf18SBorislav Petkov 	r = atomic_add_return(1, &cmci_storm_on_cpus);
24721afaf18SBorislav Petkov 	mce_timer_kick(CMCI_STORM_INTERVAL);
24821afaf18SBorislav Petkov 	this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
24921afaf18SBorislav Petkov 
25021afaf18SBorislav Petkov 	if (r == 1)
25121afaf18SBorislav Petkov 		pr_notice("CMCI storm detected: switching to poll mode\n");
25221afaf18SBorislav Petkov 	return true;
25321afaf18SBorislav Petkov }
25421afaf18SBorislav Petkov 
25521afaf18SBorislav Petkov /*
25621afaf18SBorislav Petkov  * The interrupt handler. This is called on every event.
25721afaf18SBorislav Petkov  * Just call the poller directly to log any events.
25821afaf18SBorislav Petkov  * This could in theory increase the threshold under high load,
25921afaf18SBorislav Petkov  * but doesn't for now.
26021afaf18SBorislav Petkov  */
intel_threshold_interrupt(void)26121afaf18SBorislav Petkov static void intel_threshold_interrupt(void)
26221afaf18SBorislav Petkov {
26321afaf18SBorislav Petkov 	if (cmci_storm_detect())
26421afaf18SBorislav Petkov 		return;
26521afaf18SBorislav Petkov 
26621afaf18SBorislav Petkov 	machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
26721afaf18SBorislav Petkov }
26821afaf18SBorislav Petkov 
26921afaf18SBorislav Petkov /*
27021afaf18SBorislav Petkov  * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
27121afaf18SBorislav Petkov  * on this CPU. Use the algorithm recommended in the SDM to discover shared
27221afaf18SBorislav Petkov  * banks.
27321afaf18SBorislav Petkov  */
cmci_discover(int banks)27421afaf18SBorislav Petkov static void cmci_discover(int banks)
27521afaf18SBorislav Petkov {
27621afaf18SBorislav Petkov 	unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
27721afaf18SBorislav Petkov 	unsigned long flags;
27821afaf18SBorislav Petkov 	int i;
27921afaf18SBorislav Petkov 	int bios_wrong_thresh = 0;
28021afaf18SBorislav Petkov 
28121afaf18SBorislav Petkov 	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
28221afaf18SBorislav Petkov 	for (i = 0; i < banks; i++) {
28321afaf18SBorislav Petkov 		u64 val;
28421afaf18SBorislav Petkov 		int bios_zero_thresh = 0;
28521afaf18SBorislav Petkov 
28621afaf18SBorislav Petkov 		if (test_bit(i, owned))
28721afaf18SBorislav Petkov 			continue;
28821afaf18SBorislav Petkov 
28921afaf18SBorislav Petkov 		/* Skip banks in firmware first mode */
29021afaf18SBorislav Petkov 		if (test_bit(i, mce_banks_ce_disabled))
29121afaf18SBorislav Petkov 			continue;
29221afaf18SBorislav Petkov 
29321afaf18SBorislav Petkov 		rdmsrl(MSR_IA32_MCx_CTL2(i), val);
29421afaf18SBorislav Petkov 
29521afaf18SBorislav Petkov 		/* Already owned by someone else? */
29621afaf18SBorislav Petkov 		if (val & MCI_CTL2_CMCI_EN) {
29721afaf18SBorislav Petkov 			clear_bit(i, owned);
29821afaf18SBorislav Petkov 			__clear_bit(i, this_cpu_ptr(mce_poll_banks));
29921afaf18SBorislav Petkov 			continue;
30021afaf18SBorislav Petkov 		}
30121afaf18SBorislav Petkov 
30221afaf18SBorislav Petkov 		if (!mca_cfg.bios_cmci_threshold) {
30321afaf18SBorislav Petkov 			val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
30421afaf18SBorislav Petkov 			val |= CMCI_THRESHOLD;
30521afaf18SBorislav Petkov 		} else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
30621afaf18SBorislav Petkov 			/*
30721afaf18SBorislav Petkov 			 * If bios_cmci_threshold boot option was specified
30821afaf18SBorislav Petkov 			 * but the threshold is zero, we'll try to initialize
30921afaf18SBorislav Petkov 			 * it to 1.
31021afaf18SBorislav Petkov 			 */
31121afaf18SBorislav Petkov 			bios_zero_thresh = 1;
31221afaf18SBorislav Petkov 			val |= CMCI_THRESHOLD;
31321afaf18SBorislav Petkov 		}
31421afaf18SBorislav Petkov 
31521afaf18SBorislav Petkov 		val |= MCI_CTL2_CMCI_EN;
31621afaf18SBorislav Petkov 		wrmsrl(MSR_IA32_MCx_CTL2(i), val);
31721afaf18SBorislav Petkov 		rdmsrl(MSR_IA32_MCx_CTL2(i), val);
31821afaf18SBorislav Petkov 
31921afaf18SBorislav Petkov 		/* Did the enable bit stick? -- the bank supports CMCI */
32021afaf18SBorislav Petkov 		if (val & MCI_CTL2_CMCI_EN) {
32121afaf18SBorislav Petkov 			set_bit(i, owned);
32221afaf18SBorislav Petkov 			__clear_bit(i, this_cpu_ptr(mce_poll_banks));
32321afaf18SBorislav Petkov 			/*
32421afaf18SBorislav Petkov 			 * We are able to set thresholds for some banks that
32521afaf18SBorislav Petkov 			 * had a threshold of 0. This means the BIOS has not
32621afaf18SBorislav Petkov 			 * set the thresholds properly or does not work with
32721afaf18SBorislav Petkov 			 * this boot option. Note down now and report later.
32821afaf18SBorislav Petkov 			 */
32921afaf18SBorislav Petkov 			if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
33021afaf18SBorislav Petkov 					(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
33121afaf18SBorislav Petkov 				bios_wrong_thresh = 1;
33221afaf18SBorislav Petkov 		} else {
33321afaf18SBorislav Petkov 			WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
33421afaf18SBorislav Petkov 		}
33521afaf18SBorislav Petkov 	}
33621afaf18SBorislav Petkov 	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
33721afaf18SBorislav Petkov 	if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
33821afaf18SBorislav Petkov 		pr_info_once(
33921afaf18SBorislav Petkov 			"bios_cmci_threshold: Some banks do not have valid thresholds set\n");
34021afaf18SBorislav Petkov 		pr_info_once(
34121afaf18SBorislav Petkov 			"bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
34221afaf18SBorislav Petkov 	}
34321afaf18SBorislav Petkov }
34421afaf18SBorislav Petkov 
34521afaf18SBorislav Petkov /*
34621afaf18SBorislav Petkov  * Just in case we missed an event during initialization check
34721afaf18SBorislav Petkov  * all the CMCI owned banks.
34821afaf18SBorislav Petkov  */
cmci_recheck(void)34921afaf18SBorislav Petkov void cmci_recheck(void)
35021afaf18SBorislav Petkov {
35121afaf18SBorislav Petkov 	unsigned long flags;
35221afaf18SBorislav Petkov 	int banks;
35321afaf18SBorislav Petkov 
35421afaf18SBorislav Petkov 	if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
35521afaf18SBorislav Petkov 		return;
35621afaf18SBorislav Petkov 
35721afaf18SBorislav Petkov 	local_irq_save(flags);
35821afaf18SBorislav Petkov 	machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
35921afaf18SBorislav Petkov 	local_irq_restore(flags);
36021afaf18SBorislav Petkov }
36121afaf18SBorislav Petkov 
36221afaf18SBorislav Petkov /* Caller must hold the lock on cmci_discover_lock */
__cmci_disable_bank(int bank)36321afaf18SBorislav Petkov static void __cmci_disable_bank(int bank)
36421afaf18SBorislav Petkov {
36521afaf18SBorislav Petkov 	u64 val;
36621afaf18SBorislav Petkov 
36721afaf18SBorislav Petkov 	if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
36821afaf18SBorislav Petkov 		return;
36921afaf18SBorislav Petkov 	rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
37021afaf18SBorislav Petkov 	val &= ~MCI_CTL2_CMCI_EN;
37121afaf18SBorislav Petkov 	wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
37221afaf18SBorislav Petkov 	__clear_bit(bank, this_cpu_ptr(mce_banks_owned));
37321afaf18SBorislav Petkov }
37421afaf18SBorislav Petkov 
37521afaf18SBorislav Petkov /*
37621afaf18SBorislav Petkov  * Disable CMCI on this CPU for all banks it owns when it goes down.
37721afaf18SBorislav Petkov  * This allows other CPUs to claim the banks on rediscovery.
37821afaf18SBorislav Petkov  */
cmci_clear(void)37921afaf18SBorislav Petkov void cmci_clear(void)
38021afaf18SBorislav Petkov {
38121afaf18SBorislav Petkov 	unsigned long flags;
38221afaf18SBorislav Petkov 	int i;
38321afaf18SBorislav Petkov 	int banks;
38421afaf18SBorislav Petkov 
38521afaf18SBorislav Petkov 	if (!cmci_supported(&banks))
38621afaf18SBorislav Petkov 		return;
38721afaf18SBorislav Petkov 	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
38821afaf18SBorislav Petkov 	for (i = 0; i < banks; i++)
38921afaf18SBorislav Petkov 		__cmci_disable_bank(i);
39021afaf18SBorislav Petkov 	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
39121afaf18SBorislav Petkov }
39221afaf18SBorislav Petkov 
cmci_rediscover_work_func(void * arg)39321afaf18SBorislav Petkov static void cmci_rediscover_work_func(void *arg)
39421afaf18SBorislav Petkov {
39521afaf18SBorislav Petkov 	int banks;
39621afaf18SBorislav Petkov 
39721afaf18SBorislav Petkov 	/* Recheck banks in case CPUs don't all have the same */
39821afaf18SBorislav Petkov 	if (cmci_supported(&banks))
39921afaf18SBorislav Petkov 		cmci_discover(banks);
40021afaf18SBorislav Petkov }
40121afaf18SBorislav Petkov 
40221afaf18SBorislav Petkov /* After a CPU went down cycle through all the others and rediscover */
cmci_rediscover(void)40321afaf18SBorislav Petkov void cmci_rediscover(void)
40421afaf18SBorislav Petkov {
40521afaf18SBorislav Petkov 	int banks;
40621afaf18SBorislav Petkov 
40721afaf18SBorislav Petkov 	if (!cmci_supported(&banks))
40821afaf18SBorislav Petkov 		return;
40921afaf18SBorislav Petkov 
41021afaf18SBorislav Petkov 	on_each_cpu(cmci_rediscover_work_func, NULL, 1);
41121afaf18SBorislav Petkov }
41221afaf18SBorislav Petkov 
41321afaf18SBorislav Petkov /*
41421afaf18SBorislav Petkov  * Reenable CMCI on this CPU in case a CPU down failed.
41521afaf18SBorislav Petkov  */
cmci_reenable(void)41621afaf18SBorislav Petkov void cmci_reenable(void)
41721afaf18SBorislav Petkov {
41821afaf18SBorislav Petkov 	int banks;
41921afaf18SBorislav Petkov 	if (cmci_supported(&banks))
42021afaf18SBorislav Petkov 		cmci_discover(banks);
42121afaf18SBorislav Petkov }
42221afaf18SBorislav Petkov 
cmci_disable_bank(int bank)42321afaf18SBorislav Petkov void cmci_disable_bank(int bank)
42421afaf18SBorislav Petkov {
42521afaf18SBorislav Petkov 	int banks;
42621afaf18SBorislav Petkov 	unsigned long flags;
42721afaf18SBorislav Petkov 
42821afaf18SBorislav Petkov 	if (!cmci_supported(&banks))
42921afaf18SBorislav Petkov 		return;
43021afaf18SBorislav Petkov 
43121afaf18SBorislav Petkov 	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
43221afaf18SBorislav Petkov 	__cmci_disable_bank(bank);
43321afaf18SBorislav Petkov 	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
43421afaf18SBorislav Petkov }
43521afaf18SBorislav Petkov 
436*c3629dd7SBorislav Petkov (AMD) /* Bank polling function when CMCI is disabled. */
cmci_mc_poll_banks(void)437*c3629dd7SBorislav Petkov (AMD) static void cmci_mc_poll_banks(void)
438*c3629dd7SBorislav Petkov (AMD) {
439*c3629dd7SBorislav Petkov (AMD) 	spin_lock(&cmci_poll_lock);
440*c3629dd7SBorislav Petkov (AMD) 	machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
441*c3629dd7SBorislav Petkov (AMD) 	spin_unlock(&cmci_poll_lock);
442*c3629dd7SBorislav Petkov (AMD) }
443*c3629dd7SBorislav Petkov (AMD) 
intel_init_cmci(void)4445a3d56a0STony W Wang-oc void intel_init_cmci(void)
44521afaf18SBorislav Petkov {
44621afaf18SBorislav Petkov 	int banks;
44721afaf18SBorislav Petkov 
448*c3629dd7SBorislav Petkov (AMD) 	if (!cmci_supported(&banks)) {
449*c3629dd7SBorislav Petkov (AMD) 		mc_poll_banks = cmci_mc_poll_banks;
45021afaf18SBorislav Petkov 		return;
451*c3629dd7SBorislav Petkov (AMD) 	}
45221afaf18SBorislav Petkov 
45321afaf18SBorislav Petkov 	mce_threshold_vector = intel_threshold_interrupt;
45421afaf18SBorislav Petkov 	cmci_discover(banks);
45521afaf18SBorislav Petkov 	/*
45621afaf18SBorislav Petkov 	 * For CPU #0 this runs with still disabled APIC, but that's
45721afaf18SBorislav Petkov 	 * ok because only the vector is set up. We still do another
45821afaf18SBorislav Petkov 	 * check for the banks later for CPU #0 just to make sure
45921afaf18SBorislav Petkov 	 * to not miss any events.
46021afaf18SBorislav Petkov 	 */
46121afaf18SBorislav Petkov 	apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
46221afaf18SBorislav Petkov 	cmci_recheck();
46321afaf18SBorislav Petkov }
46421afaf18SBorislav Petkov 
intel_init_lmce(void)46570f0c230STony W Wang-oc void intel_init_lmce(void)
46621afaf18SBorislav Petkov {
46721afaf18SBorislav Petkov 	u64 val;
46821afaf18SBorislav Petkov 
46921afaf18SBorislav Petkov 	if (!lmce_supported())
47021afaf18SBorislav Petkov 		return;
47121afaf18SBorislav Petkov 
47221afaf18SBorislav Petkov 	rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
47321afaf18SBorislav Petkov 
47421afaf18SBorislav Petkov 	if (!(val & MCG_EXT_CTL_LMCE_EN))
47521afaf18SBorislav Petkov 		wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
47621afaf18SBorislav Petkov }
47721afaf18SBorislav Petkov 
intel_clear_lmce(void)47870f0c230STony W Wang-oc void intel_clear_lmce(void)
47921afaf18SBorislav Petkov {
48021afaf18SBorislav Petkov 	u64 val;
48121afaf18SBorislav Petkov 
48221afaf18SBorislav Petkov 	if (!lmce_supported())
48321afaf18SBorislav Petkov 		return;
48421afaf18SBorislav Petkov 
48521afaf18SBorislav Petkov 	rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
48621afaf18SBorislav Petkov 	val &= ~MCG_EXT_CTL_LMCE_EN;
48721afaf18SBorislav Petkov 	wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
48821afaf18SBorislav Petkov }
48921afaf18SBorislav Petkov 
49068299a42STony Luck /*
49168299a42STony Luck  * Enable additional error logs from the integrated
49268299a42STony Luck  * memory controller on processors that support this.
49368299a42STony Luck  */
intel_imc_init(struct cpuinfo_x86 * c)49468299a42STony Luck static void intel_imc_init(struct cpuinfo_x86 *c)
49568299a42STony Luck {
49668299a42STony Luck 	u64 error_control;
49768299a42STony Luck 
49868299a42STony Luck 	switch (c->x86_model) {
49968299a42STony Luck 	case INTEL_FAM6_SANDYBRIDGE_X:
50068299a42STony Luck 	case INTEL_FAM6_IVYBRIDGE_X:
50168299a42STony Luck 	case INTEL_FAM6_HASWELL_X:
502098416e6STony Luck 		if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
503098416e6STony Luck 			return;
50468299a42STony Luck 		error_control |= 2;
505098416e6STony Luck 		wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
50668299a42STony Luck 		break;
50768299a42STony Luck 	}
50868299a42STony Luck }
50968299a42STony Luck 
mce_intel_feature_init(struct cpuinfo_x86 * c)51021afaf18SBorislav Petkov void mce_intel_feature_init(struct cpuinfo_x86 *c)
51121afaf18SBorislav Petkov {
51221afaf18SBorislav Petkov 	intel_init_cmci();
51321afaf18SBorislav Petkov 	intel_init_lmce();
51468299a42STony Luck 	intel_imc_init(c);
51521afaf18SBorislav Petkov }
51621afaf18SBorislav Petkov 
mce_intel_feature_clear(struct cpuinfo_x86 * c)51721afaf18SBorislav Petkov void mce_intel_feature_clear(struct cpuinfo_x86 *c)
51821afaf18SBorislav Petkov {
51921afaf18SBorislav Petkov 	intel_clear_lmce();
52021afaf18SBorislav Petkov }
5212976908eSPrarit Bhargava 
intel_filter_mce(struct mce * m)5222976908eSPrarit Bhargava bool intel_filter_mce(struct mce *m)
5232976908eSPrarit Bhargava {
5242976908eSPrarit Bhargava 	struct cpuinfo_x86 *c = &boot_cpu_data;
5252976908eSPrarit Bhargava 
526e629fc14SDave Jones 	/* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */
5272976908eSPrarit Bhargava 	if ((c->x86 == 6) &&
5282976908eSPrarit Bhargava 	    ((c->x86_model == INTEL_FAM6_HASWELL) ||
5292976908eSPrarit Bhargava 	     (c->x86_model == INTEL_FAM6_HASWELL_L) ||
5302976908eSPrarit Bhargava 	     (c->x86_model == INTEL_FAM6_BROADWELL) ||
531e629fc14SDave Jones 	     (c->x86_model == INTEL_FAM6_HASWELL_G) ||
532e629fc14SDave Jones 	     (c->x86_model == INTEL_FAM6_SKYLAKE_X)) &&
5332976908eSPrarit Bhargava 	    (m->bank == 0) &&
5342976908eSPrarit Bhargava 	    ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005))
5352976908eSPrarit Bhargava 		return true;
5362976908eSPrarit Bhargava 
5372976908eSPrarit Bhargava 	return false;
5382976908eSPrarit Bhargava }
539