xref: /openbmc/linux/arch/x86/kernel/quirks.c (revision 8cb5d748)
1 /*
2  * This file contains work-arounds for x86 and x86_64 platform bugs.
3  */
4 #include <linux/dmi.h>
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 
8 #include <asm/hpet.h>
9 
10 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
11 
12 static void quirk_intel_irqbalance(struct pci_dev *dev)
13 {
14 	u8 config;
15 	u16 word;
16 
17 	/* BIOS may enable hardware IRQ balancing for
18 	 * E7520/E7320/E7525(revision ID 0x9 and below)
19 	 * based platforms.
20 	 * Disable SW irqbalance/affinity on those platforms.
21 	 */
22 	if (dev->revision > 0x9)
23 		return;
24 
25 	/* enable access to config space*/
26 	pci_read_config_byte(dev, 0xf4, &config);
27 	pci_write_config_byte(dev, 0xf4, config|0x2);
28 
29 	/*
30 	 * read xTPR register.  We may not have a pci_dev for device 8
31 	 * because it might be hidden until the above write.
32 	 */
33 	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
34 
35 	if (!(word & (1 << 13))) {
36 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
37 			"disabling irq balancing and affinity\n");
38 		noirqdebug_setup("");
39 #ifdef CONFIG_PROC_FS
40 		no_irq_affinity = 1;
41 #endif
42 	}
43 
44 	/* put back the original value for config space*/
45 	if (!(config & 0x2))
46 		pci_write_config_byte(dev, 0xf4, config);
47 }
48 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
49 			quirk_intel_irqbalance);
50 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
51 			quirk_intel_irqbalance);
52 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
53 			quirk_intel_irqbalance);
54 #endif
55 
56 #if defined(CONFIG_HPET_TIMER)
57 unsigned long force_hpet_address;
58 
59 static enum {
60 	NONE_FORCE_HPET_RESUME,
61 	OLD_ICH_FORCE_HPET_RESUME,
62 	ICH_FORCE_HPET_RESUME,
63 	VT8237_FORCE_HPET_RESUME,
64 	NVIDIA_FORCE_HPET_RESUME,
65 	ATI_FORCE_HPET_RESUME,
66 } force_hpet_resume_type;
67 
68 static void __iomem *rcba_base;
69 
70 static void ich_force_hpet_resume(void)
71 {
72 	u32 val;
73 
74 	if (!force_hpet_address)
75 		return;
76 
77 	BUG_ON(rcba_base == NULL);
78 
79 	/* read the Function Disable register, dword mode only */
80 	val = readl(rcba_base + 0x3404);
81 	if (!(val & 0x80)) {
82 		/* HPET disabled in HPTC. Trying to enable */
83 		writel(val | 0x80, rcba_base + 0x3404);
84 	}
85 
86 	val = readl(rcba_base + 0x3404);
87 	if (!(val & 0x80))
88 		BUG();
89 	else
90 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
91 
92 	return;
93 }
94 
95 static void ich_force_enable_hpet(struct pci_dev *dev)
96 {
97 	u32 val;
98 	u32 uninitialized_var(rcba);
99 	int err = 0;
100 
101 	if (hpet_address || force_hpet_address)
102 		return;
103 
104 	pci_read_config_dword(dev, 0xF0, &rcba);
105 	rcba &= 0xFFFFC000;
106 	if (rcba == 0) {
107 		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
108 			"cannot force enable HPET\n");
109 		return;
110 	}
111 
112 	/* use bits 31:14, 16 kB aligned */
113 	rcba_base = ioremap_nocache(rcba, 0x4000);
114 	if (rcba_base == NULL) {
115 		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
116 			"cannot force enable HPET\n");
117 		return;
118 	}
119 
120 	/* read the Function Disable register, dword mode only */
121 	val = readl(rcba_base + 0x3404);
122 
123 	if (val & 0x80) {
124 		/* HPET is enabled in HPTC. Just not reported by BIOS */
125 		val = val & 0x3;
126 		force_hpet_address = 0xFED00000 | (val << 12);
127 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
128 			"0x%lx\n", force_hpet_address);
129 		iounmap(rcba_base);
130 		return;
131 	}
132 
133 	/* HPET disabled in HPTC. Trying to enable */
134 	writel(val | 0x80, rcba_base + 0x3404);
135 
136 	val = readl(rcba_base + 0x3404);
137 	if (!(val & 0x80)) {
138 		err = 1;
139 	} else {
140 		val = val & 0x3;
141 		force_hpet_address = 0xFED00000 | (val << 12);
142 	}
143 
144 	if (err) {
145 		force_hpet_address = 0;
146 		iounmap(rcba_base);
147 		dev_printk(KERN_DEBUG, &dev->dev,
148 			"Failed to force enable HPET\n");
149 	} else {
150 		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
151 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
152 			"0x%lx\n", force_hpet_address);
153 	}
154 }
155 
156 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
157 			 ich_force_enable_hpet);
158 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
159 			 ich_force_enable_hpet);
160 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
161 			 ich_force_enable_hpet);
162 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
163 			 ich_force_enable_hpet);
164 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
165 			 ich_force_enable_hpet);
166 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
167 			 ich_force_enable_hpet);
168 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
169 			 ich_force_enable_hpet);
170 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
171 			 ich_force_enable_hpet);
172 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
173 			 ich_force_enable_hpet);
174 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
175 			 ich_force_enable_hpet);
176 
177 static struct pci_dev *cached_dev;
178 
179 static void hpet_print_force_info(void)
180 {
181 	printk(KERN_INFO "HPET not enabled in BIOS. "
182 	       "You might try hpet=force boot option\n");
183 }
184 
185 static void old_ich_force_hpet_resume(void)
186 {
187 	u32 val;
188 	u32 uninitialized_var(gen_cntl);
189 
190 	if (!force_hpet_address || !cached_dev)
191 		return;
192 
193 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
194 	gen_cntl &= (~(0x7 << 15));
195 	gen_cntl |= (0x4 << 15);
196 
197 	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
198 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
199 	val = gen_cntl >> 15;
200 	val &= 0x7;
201 	if (val == 0x4)
202 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
203 	else
204 		BUG();
205 }
206 
207 static void old_ich_force_enable_hpet(struct pci_dev *dev)
208 {
209 	u32 val;
210 	u32 uninitialized_var(gen_cntl);
211 
212 	if (hpet_address || force_hpet_address)
213 		return;
214 
215 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
216 	/*
217 	 * Bit 17 is HPET enable bit.
218 	 * Bit 16:15 control the HPET base address.
219 	 */
220 	val = gen_cntl >> 15;
221 	val &= 0x7;
222 	if (val & 0x4) {
223 		val &= 0x3;
224 		force_hpet_address = 0xFED00000 | (val << 12);
225 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
226 			force_hpet_address);
227 		return;
228 	}
229 
230 	/*
231 	 * HPET is disabled. Trying enabling at FED00000 and check
232 	 * whether it sticks
233 	 */
234 	gen_cntl &= (~(0x7 << 15));
235 	gen_cntl |= (0x4 << 15);
236 	pci_write_config_dword(dev, 0xD0, gen_cntl);
237 
238 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
239 
240 	val = gen_cntl >> 15;
241 	val &= 0x7;
242 	if (val & 0x4) {
243 		/* HPET is enabled in HPTC. Just not reported by BIOS */
244 		val &= 0x3;
245 		force_hpet_address = 0xFED00000 | (val << 12);
246 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
247 			"0x%lx\n", force_hpet_address);
248 		cached_dev = dev;
249 		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
250 		return;
251 	}
252 
253 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
254 }
255 
256 /*
257  * Undocumented chipset features. Make sure that the user enforced
258  * this.
259  */
260 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
261 {
262 	if (hpet_force_user)
263 		old_ich_force_enable_hpet(dev);
264 }
265 
266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
267 			 old_ich_force_enable_hpet_user);
268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
269 			 old_ich_force_enable_hpet_user);
270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
271 			 old_ich_force_enable_hpet_user);
272 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
273 			 old_ich_force_enable_hpet_user);
274 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
275 			 old_ich_force_enable_hpet_user);
276 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
277 			 old_ich_force_enable_hpet);
278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
279 			 old_ich_force_enable_hpet);
280 
281 
282 static void vt8237_force_hpet_resume(void)
283 {
284 	u32 val;
285 
286 	if (!force_hpet_address || !cached_dev)
287 		return;
288 
289 	val = 0xfed00000 | 0x80;
290 	pci_write_config_dword(cached_dev, 0x68, val);
291 
292 	pci_read_config_dword(cached_dev, 0x68, &val);
293 	if (val & 0x80)
294 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
295 	else
296 		BUG();
297 }
298 
299 static void vt8237_force_enable_hpet(struct pci_dev *dev)
300 {
301 	u32 uninitialized_var(val);
302 
303 	if (hpet_address || force_hpet_address)
304 		return;
305 
306 	if (!hpet_force_user) {
307 		hpet_print_force_info();
308 		return;
309 	}
310 
311 	pci_read_config_dword(dev, 0x68, &val);
312 	/*
313 	 * Bit 7 is HPET enable bit.
314 	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
315 	 */
316 	if (val & 0x80) {
317 		force_hpet_address = (val & ~0x3ff);
318 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
319 			force_hpet_address);
320 		return;
321 	}
322 
323 	/*
324 	 * HPET is disabled. Trying enabling at FED00000 and check
325 	 * whether it sticks
326 	 */
327 	val = 0xfed00000 | 0x80;
328 	pci_write_config_dword(dev, 0x68, val);
329 
330 	pci_read_config_dword(dev, 0x68, &val);
331 	if (val & 0x80) {
332 		force_hpet_address = (val & ~0x3ff);
333 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
334 			"0x%lx\n", force_hpet_address);
335 		cached_dev = dev;
336 		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
337 		return;
338 	}
339 
340 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
341 }
342 
343 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
344 			 vt8237_force_enable_hpet);
345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
346 			 vt8237_force_enable_hpet);
347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
348 			 vt8237_force_enable_hpet);
349 
350 static void ati_force_hpet_resume(void)
351 {
352 	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
353 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
354 }
355 
356 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
357 {
358 	int err = 0;
359 	u32 d = 0;
360 	u8  b = 0;
361 
362 	err = pci_read_config_byte(dev, 0xac, &b);
363 	b &= ~(1<<5);
364 	err |= pci_write_config_byte(dev, 0xac, b);
365 	err |= pci_read_config_dword(dev, 0x70, &d);
366 	d |= 1<<8;
367 	err |= pci_write_config_dword(dev, 0x70, d);
368 	err |= pci_read_config_dword(dev, 0x8, &d);
369 	d &= 0xff;
370 	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
371 
372 	WARN_ON_ONCE(err);
373 
374 	return d;
375 }
376 
377 static void ati_force_enable_hpet(struct pci_dev *dev)
378 {
379 	u32 d, val;
380 	u8  b;
381 
382 	if (hpet_address || force_hpet_address)
383 		return;
384 
385 	if (!hpet_force_user) {
386 		hpet_print_force_info();
387 		return;
388 	}
389 
390 	d = ati_ixp4x0_rev(dev);
391 	if (d  < 0x82)
392 		return;
393 
394 	/* base address */
395 	pci_write_config_dword(dev, 0x14, 0xfed00000);
396 	pci_read_config_dword(dev, 0x14, &val);
397 
398 	/* enable interrupt */
399 	outb(0x72, 0xcd6); b = inb(0xcd7);
400 	b |= 0x1;
401 	outb(0x72, 0xcd6); outb(b, 0xcd7);
402 	outb(0x72, 0xcd6); b = inb(0xcd7);
403 	if (!(b & 0x1))
404 		return;
405 	pci_read_config_dword(dev, 0x64, &d);
406 	d |= (1<<10);
407 	pci_write_config_dword(dev, 0x64, d);
408 	pci_read_config_dword(dev, 0x64, &d);
409 	if (!(d & (1<<10)))
410 		return;
411 
412 	force_hpet_address = val;
413 	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
414 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
415 		   force_hpet_address);
416 	cached_dev = dev;
417 }
418 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
419 			 ati_force_enable_hpet);
420 
421 /*
422  * Undocumented chipset feature taken from LinuxBIOS.
423  */
424 static void nvidia_force_hpet_resume(void)
425 {
426 	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
427 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
428 }
429 
430 static void nvidia_force_enable_hpet(struct pci_dev *dev)
431 {
432 	u32 uninitialized_var(val);
433 
434 	if (hpet_address || force_hpet_address)
435 		return;
436 
437 	if (!hpet_force_user) {
438 		hpet_print_force_info();
439 		return;
440 	}
441 
442 	pci_write_config_dword(dev, 0x44, 0xfed00001);
443 	pci_read_config_dword(dev, 0x44, &val);
444 	force_hpet_address = val & 0xfffffffe;
445 	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
446 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
447 		force_hpet_address);
448 	cached_dev = dev;
449 	return;
450 }
451 
452 /* ISA Bridges */
453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
454 			nvidia_force_enable_hpet);
455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
456 			nvidia_force_enable_hpet);
457 
458 /* LPC bridges */
459 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
460 			nvidia_force_enable_hpet);
461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
462 			nvidia_force_enable_hpet);
463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
464 			nvidia_force_enable_hpet);
465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
466 			nvidia_force_enable_hpet);
467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
468 			nvidia_force_enable_hpet);
469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
470 			nvidia_force_enable_hpet);
471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
472 			nvidia_force_enable_hpet);
473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
474 			nvidia_force_enable_hpet);
475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
476 			nvidia_force_enable_hpet);
477 
478 void force_hpet_resume(void)
479 {
480 	switch (force_hpet_resume_type) {
481 	case ICH_FORCE_HPET_RESUME:
482 		ich_force_hpet_resume();
483 		return;
484 	case OLD_ICH_FORCE_HPET_RESUME:
485 		old_ich_force_hpet_resume();
486 		return;
487 	case VT8237_FORCE_HPET_RESUME:
488 		vt8237_force_hpet_resume();
489 		return;
490 	case NVIDIA_FORCE_HPET_RESUME:
491 		nvidia_force_hpet_resume();
492 		return;
493 	case ATI_FORCE_HPET_RESUME:
494 		ati_force_hpet_resume();
495 		return;
496 	default:
497 		break;
498 	}
499 }
500 
501 /*
502  * According to the datasheet e6xx systems have the HPET hardwired to
503  * 0xfed00000
504  */
505 static void e6xx_force_enable_hpet(struct pci_dev *dev)
506 {
507 	if (hpet_address || force_hpet_address)
508 		return;
509 
510 	force_hpet_address = 0xFED00000;
511 	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
512 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
513 		"0x%lx\n", force_hpet_address);
514 	return;
515 }
516 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
517 			 e6xx_force_enable_hpet);
518 
519 /*
520  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
521  * floppy DMA. Disable HPET MSI on such platforms.
522  * See erratum #27 (Misinterpreted MSI Requests May Result in
523  * Corrupted LPC DMA Data) in AMD Publication #46837,
524  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
525  */
526 static void force_disable_hpet_msi(struct pci_dev *unused)
527 {
528 	hpet_msi_disable = true;
529 }
530 
531 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
532 			 force_disable_hpet_msi);
533 
534 #endif
535 
536 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
537 /* Set correct numa_node information for AMD NB functions */
538 static void quirk_amd_nb_node(struct pci_dev *dev)
539 {
540 	struct pci_dev *nb_ht;
541 	unsigned int devfn;
542 	u32 node;
543 	u32 val;
544 
545 	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
546 	nb_ht = pci_get_slot(dev->bus, devfn);
547 	if (!nb_ht)
548 		return;
549 
550 	pci_read_config_dword(nb_ht, 0x60, &val);
551 	node = pcibus_to_node(dev->bus) | (val & 7);
552 	/*
553 	 * Some hardware may return an invalid node ID,
554 	 * so check it first:
555 	 */
556 	if (node_online(node))
557 		set_dev_node(&dev->dev, node);
558 	pci_dev_put(nb_ht);
559 }
560 
561 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
562 			quirk_amd_nb_node);
563 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
564 			quirk_amd_nb_node);
565 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
566 			quirk_amd_nb_node);
567 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
568 			quirk_amd_nb_node);
569 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
570 			quirk_amd_nb_node);
571 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
572 			quirk_amd_nb_node);
573 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
574 			quirk_amd_nb_node);
575 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
576 			quirk_amd_nb_node);
577 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
578 			quirk_amd_nb_node);
579 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
580 			quirk_amd_nb_node);
581 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
582 			quirk_amd_nb_node);
583 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
584 			quirk_amd_nb_node);
585 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
586 			quirk_amd_nb_node);
587 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
588 			quirk_amd_nb_node);
589 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
590 			quirk_amd_nb_node);
591 
592 #endif
593 
594 #ifdef CONFIG_PCI
595 /*
596  * Processor does not ensure DRAM scrub read/write sequence
597  * is atomic wrt accesses to CC6 save state area. Therefore
598  * if a concurrent scrub read/write access is to same address
599  * the entry may appear as if it is not written. This quirk
600  * applies to Fam16h models 00h-0Fh
601  *
602  * See "Revision Guide" for AMD F16h models 00h-0fh,
603  * document 51810 rev. 3.04, Nov 2013
604  */
605 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
606 {
607 	u32 val;
608 
609 	/*
610 	 * Suggested workaround:
611 	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
612 	 */
613 	pci_read_config_dword(dev, 0x58, &val);
614 	if (val & 0x1F) {
615 		val &= ~(0x1F);
616 		pci_write_config_dword(dev, 0x58, val);
617 	}
618 
619 	pci_read_config_dword(dev, 0x5C, &val);
620 	if (val & BIT(0)) {
621 		val &= ~BIT(0);
622 		pci_write_config_dword(dev, 0x5c, val);
623 	}
624 }
625 
626 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
627 			amd_disable_seq_and_redirect_scrub);
628 
629 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
630 #include <linux/jump_label.h>
631 #include <asm/string_64.h>
632 
633 /* Ivy Bridge, Haswell, Broadwell */
634 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
635 {
636 	u32 capid0;
637 
638 	pci_read_config_dword(pdev, 0x84, &capid0);
639 
640 	if (capid0 & 0x10)
641 		static_branch_inc(&mcsafe_key);
642 }
643 
644 /* Skylake */
645 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
646 {
647 	u32 capid0;
648 
649 	pci_read_config_dword(pdev, 0x84, &capid0);
650 
651 	if ((capid0 & 0xc0) == 0xc0)
652 		static_branch_inc(&mcsafe_key);
653 }
654 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
655 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
658 #endif
659 #endif
660 
661 bool x86_apple_machine;
662 EXPORT_SYMBOL(x86_apple_machine);
663 
664 void __init early_platform_quirks(void)
665 {
666 	x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
667 			    dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
668 }
669