xref: /openbmc/linux/arch/x86/kernel/cpu/mce/inject.c (revision 1804569d)
1 /*
2  * Machine check injection support.
3  * Copyright 2008 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * as published by the Free Software Foundation; version 2
8  * of the License.
9  *
10  * Authors:
11  * Andi Kleen
12  * Ying Huang
13  *
14  * The AMD part (from mce_amd_inj.c): a simple MCE injection facility
15  * for testing different aspects of the RAS code. This driver should be
16  * built as module so that it can be loaded on production kernels for
17  * testing purposes.
18  *
19  * This file may be distributed under the terms of the GNU General Public
20  * License version 2.
21  *
22  * Copyright (c) 2010-17:  Borislav Petkov <bp@alien8.de>
23  *			   Advanced Micro Devices Inc.
24  */
25 
26 #include <linux/cpu.h>
27 #include <linux/debugfs.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/notifier.h>
31 #include <linux/pci.h>
32 #include <linux/uaccess.h>
33 
34 #include <asm/amd_nb.h>
35 #include <asm/apic.h>
36 #include <asm/irq_vectors.h>
37 #include <asm/mce.h>
38 #include <asm/nmi.h>
39 #include <asm/smp.h>
40 
41 #include "internal.h"
42 
43 /*
44  * Collect all the MCi_XXX settings
45  */
46 static struct mce i_mce;
47 static struct dentry *dfs_inj;
48 
49 static u8 n_banks;
50 
51 #define MAX_FLAG_OPT_SIZE	4
52 #define NBCFG			0x44
53 
54 enum injection_type {
55 	SW_INJ = 0,	/* SW injection, simply decode the error */
56 	HW_INJ,		/* Trigger a #MC */
57 	DFR_INT_INJ,    /* Trigger Deferred error interrupt */
58 	THR_INT_INJ,    /* Trigger threshold interrupt */
59 	N_INJ_TYPES,
60 };
61 
62 static const char * const flags_options[] = {
63 	[SW_INJ] = "sw",
64 	[HW_INJ] = "hw",
65 	[DFR_INT_INJ] = "df",
66 	[THR_INT_INJ] = "th",
67 	NULL
68 };
69 
70 /* Set default injection to SW_INJ */
71 static enum injection_type inj_type = SW_INJ;
72 
73 #define MCE_INJECT_SET(reg)						\
74 static int inj_##reg##_set(void *data, u64 val)				\
75 {									\
76 	struct mce *m = (struct mce *)data;				\
77 									\
78 	m->reg = val;							\
79 	return 0;							\
80 }
81 
82 MCE_INJECT_SET(status);
83 MCE_INJECT_SET(misc);
84 MCE_INJECT_SET(addr);
85 MCE_INJECT_SET(synd);
86 
87 #define MCE_INJECT_GET(reg)						\
88 static int inj_##reg##_get(void *data, u64 *val)			\
89 {									\
90 	struct mce *m = (struct mce *)data;				\
91 									\
92 	*val = m->reg;							\
93 	return 0;							\
94 }
95 
96 MCE_INJECT_GET(status);
97 MCE_INJECT_GET(misc);
98 MCE_INJECT_GET(addr);
99 MCE_INJECT_GET(synd);
100 
101 DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
102 DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
103 DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
104 DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
105 
106 static void setup_inj_struct(struct mce *m)
107 {
108 	memset(m, 0, sizeof(struct mce));
109 
110 	m->cpuvendor = boot_cpu_data.x86_vendor;
111 	m->time	     = ktime_get_real_seconds();
112 	m->cpuid     = cpuid_eax(1);
113 	m->microcode = boot_cpu_data.microcode;
114 }
115 
116 /* Update fake mce registers on current CPU. */
117 static void inject_mce(struct mce *m)
118 {
119 	struct mce *i = &per_cpu(injectm, m->extcpu);
120 
121 	/* Make sure no one reads partially written injectm */
122 	i->finished = 0;
123 	mb();
124 	m->finished = 0;
125 	/* First set the fields after finished */
126 	i->extcpu = m->extcpu;
127 	mb();
128 	/* Now write record in order, finished last (except above) */
129 	memcpy(i, m, sizeof(struct mce));
130 	/* Finally activate it */
131 	mb();
132 	i->finished = 1;
133 }
134 
135 static void raise_poll(struct mce *m)
136 {
137 	unsigned long flags;
138 	mce_banks_t b;
139 
140 	memset(&b, 0xff, sizeof(mce_banks_t));
141 	local_irq_save(flags);
142 	machine_check_poll(0, &b);
143 	local_irq_restore(flags);
144 	m->finished = 0;
145 }
146 
147 static void raise_exception(struct mce *m, struct pt_regs *pregs)
148 {
149 	struct pt_regs regs;
150 	unsigned long flags;
151 
152 	if (!pregs) {
153 		memset(&regs, 0, sizeof(struct pt_regs));
154 		regs.ip = m->ip;
155 		regs.cs = m->cs;
156 		pregs = &regs;
157 	}
158 	/* in mcheck exeception handler, irq will be disabled */
159 	local_irq_save(flags);
160 	do_machine_check(pregs, 0);
161 	local_irq_restore(flags);
162 	m->finished = 0;
163 }
164 
165 static cpumask_var_t mce_inject_cpumask;
166 static DEFINE_MUTEX(mce_inject_mutex);
167 
168 static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
169 {
170 	int cpu = smp_processor_id();
171 	struct mce *m = this_cpu_ptr(&injectm);
172 	if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
173 		return NMI_DONE;
174 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
175 	if (m->inject_flags & MCJ_EXCEPTION)
176 		raise_exception(m, regs);
177 	else if (m->status)
178 		raise_poll(m);
179 	return NMI_HANDLED;
180 }
181 
182 static void mce_irq_ipi(void *info)
183 {
184 	int cpu = smp_processor_id();
185 	struct mce *m = this_cpu_ptr(&injectm);
186 
187 	if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
188 			m->inject_flags & MCJ_EXCEPTION) {
189 		cpumask_clear_cpu(cpu, mce_inject_cpumask);
190 		raise_exception(m, NULL);
191 	}
192 }
193 
194 /* Inject mce on current CPU */
195 static int raise_local(void)
196 {
197 	struct mce *m = this_cpu_ptr(&injectm);
198 	int context = MCJ_CTX(m->inject_flags);
199 	int ret = 0;
200 	int cpu = m->extcpu;
201 
202 	if (m->inject_flags & MCJ_EXCEPTION) {
203 		pr_info("Triggering MCE exception on CPU %d\n", cpu);
204 		switch (context) {
205 		case MCJ_CTX_IRQ:
206 			/*
207 			 * Could do more to fake interrupts like
208 			 * calling irq_enter, but the necessary
209 			 * machinery isn't exported currently.
210 			 */
211 			/*FALL THROUGH*/
212 		case MCJ_CTX_PROCESS:
213 			raise_exception(m, NULL);
214 			break;
215 		default:
216 			pr_info("Invalid MCE context\n");
217 			ret = -EINVAL;
218 		}
219 		pr_info("MCE exception done on CPU %d\n", cpu);
220 	} else if (m->status) {
221 		pr_info("Starting machine check poll CPU %d\n", cpu);
222 		raise_poll(m);
223 		mce_notify_irq();
224 		pr_info("Machine check poll done on CPU %d\n", cpu);
225 	} else
226 		m->finished = 0;
227 
228 	return ret;
229 }
230 
231 static void __maybe_unused raise_mce(struct mce *m)
232 {
233 	int context = MCJ_CTX(m->inject_flags);
234 
235 	inject_mce(m);
236 
237 	if (context == MCJ_CTX_RANDOM)
238 		return;
239 
240 	if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
241 		unsigned long start;
242 		int cpu;
243 
244 		get_online_cpus();
245 		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
246 		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
247 		for_each_online_cpu(cpu) {
248 			struct mce *mcpu = &per_cpu(injectm, cpu);
249 			if (!mcpu->finished ||
250 			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
251 				cpumask_clear_cpu(cpu, mce_inject_cpumask);
252 		}
253 		if (!cpumask_empty(mce_inject_cpumask)) {
254 			if (m->inject_flags & MCJ_IRQ_BROADCAST) {
255 				/*
256 				 * don't wait because mce_irq_ipi is necessary
257 				 * to be sync with following raise_local
258 				 */
259 				preempt_disable();
260 				smp_call_function_many(mce_inject_cpumask,
261 					mce_irq_ipi, NULL, 0);
262 				preempt_enable();
263 			} else if (m->inject_flags & MCJ_NMI_BROADCAST)
264 				apic->send_IPI_mask(mce_inject_cpumask,
265 						NMI_VECTOR);
266 		}
267 		start = jiffies;
268 		while (!cpumask_empty(mce_inject_cpumask)) {
269 			if (!time_before(jiffies, start + 2*HZ)) {
270 				pr_err("Timeout waiting for mce inject %lx\n",
271 					*cpumask_bits(mce_inject_cpumask));
272 				break;
273 			}
274 			cpu_relax();
275 		}
276 		raise_local();
277 		put_cpu();
278 		put_online_cpus();
279 	} else {
280 		preempt_disable();
281 		raise_local();
282 		preempt_enable();
283 	}
284 }
285 
286 static int mce_inject_raise(struct notifier_block *nb, unsigned long val,
287 			    void *data)
288 {
289 	struct mce *m = (struct mce *)data;
290 
291 	if (!m)
292 		return NOTIFY_DONE;
293 
294 	mutex_lock(&mce_inject_mutex);
295 	raise_mce(m);
296 	mutex_unlock(&mce_inject_mutex);
297 
298 	return NOTIFY_DONE;
299 }
300 
301 static struct notifier_block inject_nb = {
302 	.notifier_call  = mce_inject_raise,
303 };
304 
305 /*
306  * Caller needs to be make sure this cpu doesn't disappear
307  * from under us, i.e.: get_cpu/put_cpu.
308  */
309 static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
310 {
311 	u32 l, h;
312 	int err;
313 
314 	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
315 	if (err) {
316 		pr_err("%s: error reading HWCR\n", __func__);
317 		return err;
318 	}
319 
320 	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
321 
322 	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
323 	if (err)
324 		pr_err("%s: error writing HWCR\n", __func__);
325 
326 	return err;
327 }
328 
329 static int __set_inj(const char *buf)
330 {
331 	int i;
332 
333 	for (i = 0; i < N_INJ_TYPES; i++) {
334 		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
335 			inj_type = i;
336 			return 0;
337 		}
338 	}
339 	return -EINVAL;
340 }
341 
342 static ssize_t flags_read(struct file *filp, char __user *ubuf,
343 			  size_t cnt, loff_t *ppos)
344 {
345 	char buf[MAX_FLAG_OPT_SIZE];
346 	int n;
347 
348 	n = sprintf(buf, "%s\n", flags_options[inj_type]);
349 
350 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
351 }
352 
353 static ssize_t flags_write(struct file *filp, const char __user *ubuf,
354 			   size_t cnt, loff_t *ppos)
355 {
356 	char buf[MAX_FLAG_OPT_SIZE], *__buf;
357 	int err;
358 
359 	if (cnt > MAX_FLAG_OPT_SIZE)
360 		return -EINVAL;
361 
362 	if (copy_from_user(&buf, ubuf, cnt))
363 		return -EFAULT;
364 
365 	buf[cnt - 1] = 0;
366 
367 	/* strip whitespace */
368 	__buf = strstrip(buf);
369 
370 	err = __set_inj(__buf);
371 	if (err) {
372 		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
373 		return err;
374 	}
375 
376 	*ppos += cnt;
377 
378 	return cnt;
379 }
380 
381 static const struct file_operations flags_fops = {
382 	.read           = flags_read,
383 	.write          = flags_write,
384 	.llseek         = generic_file_llseek,
385 };
386 
387 /*
388  * On which CPU to inject?
389  */
390 MCE_INJECT_GET(extcpu);
391 
392 static int inj_extcpu_set(void *data, u64 val)
393 {
394 	struct mce *m = (struct mce *)data;
395 
396 	if (val >= nr_cpu_ids || !cpu_online(val)) {
397 		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
398 		return -EINVAL;
399 	}
400 	m->extcpu = val;
401 	return 0;
402 }
403 
404 DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
405 
406 static void trigger_mce(void *info)
407 {
408 	asm volatile("int $18");
409 }
410 
411 static void trigger_dfr_int(void *info)
412 {
413 	asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
414 }
415 
416 static void trigger_thr_int(void *info)
417 {
418 	asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
419 }
420 
421 static u32 get_nbc_for_node(int node_id)
422 {
423 	struct cpuinfo_x86 *c = &boot_cpu_data;
424 	u32 cores_per_node;
425 
426 	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
427 
428 	return cores_per_node * node_id;
429 }
430 
431 static void toggle_nb_mca_mst_cpu(u16 nid)
432 {
433 	struct amd_northbridge *nb;
434 	struct pci_dev *F3;
435 	u32 val;
436 	int err;
437 
438 	nb = node_to_amd_nb(nid);
439 	if (!nb)
440 		return;
441 
442 	F3 = nb->misc;
443 	if (!F3)
444 		return;
445 
446 	err = pci_read_config_dword(F3, NBCFG, &val);
447 	if (err) {
448 		pr_err("%s: Error reading F%dx%03x.\n",
449 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
450 		return;
451 	}
452 
453 	if (val & BIT(27))
454 		return;
455 
456 	pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
457 	       __func__);
458 
459 	val |= BIT(27);
460 	err = pci_write_config_dword(F3, NBCFG, val);
461 	if (err)
462 		pr_err("%s: Error writing F%dx%03x.\n",
463 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
464 }
465 
466 static void prepare_msrs(void *info)
467 {
468 	struct mce m = *(struct mce *)info;
469 	u8 b = m.bank;
470 
471 	wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
472 
473 	if (boot_cpu_has(X86_FEATURE_SMCA)) {
474 		if (m.inject_flags == DFR_INT_INJ) {
475 			wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
476 			wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
477 		} else {
478 			wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
479 			wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
480 		}
481 
482 		wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
483 		wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
484 	} else {
485 		wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
486 		wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
487 		wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
488 	}
489 }
490 
491 static void do_inject(void)
492 {
493 	u64 mcg_status = 0;
494 	unsigned int cpu = i_mce.extcpu;
495 	u8 b = i_mce.bank;
496 
497 	i_mce.tsc = rdtsc_ordered();
498 
499 	if (i_mce.misc)
500 		i_mce.status |= MCI_STATUS_MISCV;
501 
502 	if (i_mce.synd)
503 		i_mce.status |= MCI_STATUS_SYNDV;
504 
505 	if (inj_type == SW_INJ) {
506 		mce_inject_log(&i_mce);
507 		return;
508 	}
509 
510 	/* prep MCE global settings for the injection */
511 	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
512 
513 	if (!(i_mce.status & MCI_STATUS_PCC))
514 		mcg_status |= MCG_STATUS_RIPV;
515 
516 	/*
517 	 * Ensure necessary status bits for deferred errors:
518 	 * - MCx_STATUS[Deferred]: make sure it is a deferred error
519 	 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
520 	 */
521 	if (inj_type == DFR_INT_INJ) {
522 		i_mce.status |= MCI_STATUS_DEFERRED;
523 		i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
524 	}
525 
526 	/*
527 	 * For multi node CPUs, logging and reporting of bank 4 errors happens
528 	 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
529 	 * Fam10h and later BKDGs.
530 	 */
531 	if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
532 	    b == 4 &&
533 	    boot_cpu_data.x86 < 0x17) {
534 		toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
535 		cpu = get_nbc_for_node(amd_get_nb_id(cpu));
536 	}
537 
538 	get_online_cpus();
539 	if (!cpu_online(cpu))
540 		goto err;
541 
542 	toggle_hw_mce_inject(cpu, true);
543 
544 	i_mce.mcgstatus = mcg_status;
545 	i_mce.inject_flags = inj_type;
546 	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
547 
548 	toggle_hw_mce_inject(cpu, false);
549 
550 	switch (inj_type) {
551 	case DFR_INT_INJ:
552 		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
553 		break;
554 	case THR_INT_INJ:
555 		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
556 		break;
557 	default:
558 		smp_call_function_single(cpu, trigger_mce, NULL, 0);
559 	}
560 
561 err:
562 	put_online_cpus();
563 
564 }
565 
566 /*
567  * This denotes into which bank we're injecting and triggers
568  * the injection, at the same time.
569  */
570 static int inj_bank_set(void *data, u64 val)
571 {
572 	struct mce *m = (struct mce *)data;
573 
574 	if (val >= n_banks) {
575 		pr_err("Non-existent MCE bank: %llu\n", val);
576 		return -EINVAL;
577 	}
578 
579 	m->bank = val;
580 	do_inject();
581 
582 	/* Reset injection struct */
583 	setup_inj_struct(&i_mce);
584 
585 	return 0;
586 }
587 
588 MCE_INJECT_GET(bank);
589 
590 DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
591 
592 static const char readme_msg[] =
593 "Description of the files and their usages:\n"
594 "\n"
595 "Note1: i refers to the bank number below.\n"
596 "Note2: See respective BKDGs for the exact bit definitions of the files below\n"
597 "as they mirror the hardware registers.\n"
598 "\n"
599 "status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
600 "\t attributes of the error which caused the MCE.\n"
601 "\n"
602 "misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
603 "\t used for error thresholding purposes and its validity is indicated by\n"
604 "\t MCi_STATUS[MiscV].\n"
605 "\n"
606 "synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n"
607 "\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n"
608 "\n"
609 "addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
610 "\t associated with the error.\n"
611 "\n"
612 "cpu:\t The CPU to inject the error on.\n"
613 "\n"
614 "bank:\t Specify the bank you want to inject the error into: the number of\n"
615 "\t banks in a processor varies and is family/model-specific, therefore, the\n"
616 "\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
617 "\t injection.\n"
618 "\n"
619 "flags:\t Injection type to be performed. Writing to this file will trigger a\n"
620 "\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
621 "\t for AMD processors.\n"
622 "\n"
623 "\t Allowed error injection types:\n"
624 "\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
625 "\t    format only. Safe to use.\n"
626 "\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
627 "\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
628 "\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
629 "\t    before injecting.\n"
630 "\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
631 "\t    error APIC interrupt handler to handle the error if the feature is \n"
632 "\t    is present in hardware. \n"
633 "\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
634 "\t    APIC interrupt handler to handle the error. \n"
635 "\n";
636 
637 static ssize_t
638 inj_readme_read(struct file *filp, char __user *ubuf,
639 		       size_t cnt, loff_t *ppos)
640 {
641 	return simple_read_from_buffer(ubuf, cnt, ppos,
642 					readme_msg, strlen(readme_msg));
643 }
644 
645 static const struct file_operations readme_fops = {
646 	.read		= inj_readme_read,
647 };
648 
649 static struct dfs_node {
650 	char *name;
651 	struct dentry *d;
652 	const struct file_operations *fops;
653 	umode_t perm;
654 } dfs_fls[] = {
655 	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
656 	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
657 	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
658 	{ .name = "synd",	.fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
659 	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
660 	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
661 	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
662 	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
663 };
664 
665 static int __init debugfs_init(void)
666 {
667 	unsigned int i;
668 	u64 cap;
669 
670 	rdmsrl(MSR_IA32_MCG_CAP, cap);
671 	n_banks = cap & MCG_BANKCNT_MASK;
672 
673 	dfs_inj = debugfs_create_dir("mce-inject", NULL);
674 	if (!dfs_inj)
675 		return -EINVAL;
676 
677 	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
678 		dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
679 						    dfs_fls[i].perm,
680 						    dfs_inj,
681 						    &i_mce,
682 						    dfs_fls[i].fops);
683 
684 		if (!dfs_fls[i].d)
685 			goto err_dfs_add;
686 	}
687 
688 	return 0;
689 
690 err_dfs_add:
691 	while (i-- > 0)
692 		debugfs_remove(dfs_fls[i].d);
693 
694 	debugfs_remove(dfs_inj);
695 	dfs_inj = NULL;
696 
697 	return -ENODEV;
698 }
699 
700 static int __init inject_init(void)
701 {
702 	int err;
703 
704 	if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
705 		return -ENOMEM;
706 
707 	err = debugfs_init();
708 	if (err) {
709 		free_cpumask_var(mce_inject_cpumask);
710 		return err;
711 	}
712 
713 	register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify");
714 	mce_register_injector_chain(&inject_nb);
715 
716 	setup_inj_struct(&i_mce);
717 
718 	pr_info("Machine check injector initialized\n");
719 
720 	return 0;
721 }
722 
723 static void __exit inject_exit(void)
724 {
725 
726 	mce_unregister_injector_chain(&inject_nb);
727 	unregister_nmi_handler(NMI_LOCAL, "mce_notify");
728 
729 	debugfs_remove_recursive(dfs_inj);
730 	dfs_inj = NULL;
731 
732 	memset(&dfs_fls, 0, sizeof(dfs_fls));
733 
734 	free_cpumask_var(mce_inject_cpumask);
735 }
736 
737 module_init(inject_init);
738 module_exit(inject_exit);
739 MODULE_LICENSE("GPL");
740