xref: /openbmc/linux/arch/x86/kernel/quirks.c (revision 4f3db074)
1 /*
2  * This file contains work-arounds for x86 and x86_64 platform bugs.
3  */
4 #include <linux/pci.h>
5 #include <linux/irq.h>
6 
7 #include <asm/hpet.h>
8 
9 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
10 
11 static void quirk_intel_irqbalance(struct pci_dev *dev)
12 {
13 	u8 config;
14 	u16 word;
15 
16 	/* BIOS may enable hardware IRQ balancing for
17 	 * E7520/E7320/E7525(revision ID 0x9 and below)
18 	 * based platforms.
19 	 * Disable SW irqbalance/affinity on those platforms.
20 	 */
21 	if (dev->revision > 0x9)
22 		return;
23 
24 	/* enable access to config space*/
25 	pci_read_config_byte(dev, 0xf4, &config);
26 	pci_write_config_byte(dev, 0xf4, config|0x2);
27 
28 	/*
29 	 * read xTPR register.  We may not have a pci_dev for device 8
30 	 * because it might be hidden until the above write.
31 	 */
32 	pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
33 
34 	if (!(word & (1 << 13))) {
35 		dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
36 			"disabling irq balancing and affinity\n");
37 		noirqdebug_setup("");
38 #ifdef CONFIG_PROC_FS
39 		no_irq_affinity = 1;
40 #endif
41 	}
42 
43 	/* put back the original value for config space*/
44 	if (!(config & 0x2))
45 		pci_write_config_byte(dev, 0xf4, config);
46 }
47 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
48 			quirk_intel_irqbalance);
49 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
50 			quirk_intel_irqbalance);
51 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
52 			quirk_intel_irqbalance);
53 #endif
54 
55 #if defined(CONFIG_HPET_TIMER)
56 unsigned long force_hpet_address;
57 
58 static enum {
59 	NONE_FORCE_HPET_RESUME,
60 	OLD_ICH_FORCE_HPET_RESUME,
61 	ICH_FORCE_HPET_RESUME,
62 	VT8237_FORCE_HPET_RESUME,
63 	NVIDIA_FORCE_HPET_RESUME,
64 	ATI_FORCE_HPET_RESUME,
65 } force_hpet_resume_type;
66 
67 static void __iomem *rcba_base;
68 
69 static void ich_force_hpet_resume(void)
70 {
71 	u32 val;
72 
73 	if (!force_hpet_address)
74 		return;
75 
76 	BUG_ON(rcba_base == NULL);
77 
78 	/* read the Function Disable register, dword mode only */
79 	val = readl(rcba_base + 0x3404);
80 	if (!(val & 0x80)) {
81 		/* HPET disabled in HPTC. Trying to enable */
82 		writel(val | 0x80, rcba_base + 0x3404);
83 	}
84 
85 	val = readl(rcba_base + 0x3404);
86 	if (!(val & 0x80))
87 		BUG();
88 	else
89 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
90 
91 	return;
92 }
93 
94 static void ich_force_enable_hpet(struct pci_dev *dev)
95 {
96 	u32 val;
97 	u32 uninitialized_var(rcba);
98 	int err = 0;
99 
100 	if (hpet_address || force_hpet_address)
101 		return;
102 
103 	pci_read_config_dword(dev, 0xF0, &rcba);
104 	rcba &= 0xFFFFC000;
105 	if (rcba == 0) {
106 		dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
107 			"cannot force enable HPET\n");
108 		return;
109 	}
110 
111 	/* use bits 31:14, 16 kB aligned */
112 	rcba_base = ioremap_nocache(rcba, 0x4000);
113 	if (rcba_base == NULL) {
114 		dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
115 			"cannot force enable HPET\n");
116 		return;
117 	}
118 
119 	/* read the Function Disable register, dword mode only */
120 	val = readl(rcba_base + 0x3404);
121 
122 	if (val & 0x80) {
123 		/* HPET is enabled in HPTC. Just not reported by BIOS */
124 		val = val & 0x3;
125 		force_hpet_address = 0xFED00000 | (val << 12);
126 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
127 			"0x%lx\n", force_hpet_address);
128 		iounmap(rcba_base);
129 		return;
130 	}
131 
132 	/* HPET disabled in HPTC. Trying to enable */
133 	writel(val | 0x80, rcba_base + 0x3404);
134 
135 	val = readl(rcba_base + 0x3404);
136 	if (!(val & 0x80)) {
137 		err = 1;
138 	} else {
139 		val = val & 0x3;
140 		force_hpet_address = 0xFED00000 | (val << 12);
141 	}
142 
143 	if (err) {
144 		force_hpet_address = 0;
145 		iounmap(rcba_base);
146 		dev_printk(KERN_DEBUG, &dev->dev,
147 			"Failed to force enable HPET\n");
148 	} else {
149 		force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
150 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
151 			"0x%lx\n", force_hpet_address);
152 	}
153 }
154 
155 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
156 			 ich_force_enable_hpet);
157 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
158 			 ich_force_enable_hpet);
159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
160 			 ich_force_enable_hpet);
161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
162 			 ich_force_enable_hpet);
163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
164 			 ich_force_enable_hpet);
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
166 			 ich_force_enable_hpet);
167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
168 			 ich_force_enable_hpet);
169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
170 			 ich_force_enable_hpet);
171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
172 			 ich_force_enable_hpet);
173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,	/* ICH10 */
174 			 ich_force_enable_hpet);
175 
176 static struct pci_dev *cached_dev;
177 
178 static void hpet_print_force_info(void)
179 {
180 	printk(KERN_INFO "HPET not enabled in BIOS. "
181 	       "You might try hpet=force boot option\n");
182 }
183 
184 static void old_ich_force_hpet_resume(void)
185 {
186 	u32 val;
187 	u32 uninitialized_var(gen_cntl);
188 
189 	if (!force_hpet_address || !cached_dev)
190 		return;
191 
192 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
193 	gen_cntl &= (~(0x7 << 15));
194 	gen_cntl |= (0x4 << 15);
195 
196 	pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
197 	pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
198 	val = gen_cntl >> 15;
199 	val &= 0x7;
200 	if (val == 0x4)
201 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
202 	else
203 		BUG();
204 }
205 
206 static void old_ich_force_enable_hpet(struct pci_dev *dev)
207 {
208 	u32 val;
209 	u32 uninitialized_var(gen_cntl);
210 
211 	if (hpet_address || force_hpet_address)
212 		return;
213 
214 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
215 	/*
216 	 * Bit 17 is HPET enable bit.
217 	 * Bit 16:15 control the HPET base address.
218 	 */
219 	val = gen_cntl >> 15;
220 	val &= 0x7;
221 	if (val & 0x4) {
222 		val &= 0x3;
223 		force_hpet_address = 0xFED00000 | (val << 12);
224 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
225 			force_hpet_address);
226 		return;
227 	}
228 
229 	/*
230 	 * HPET is disabled. Trying enabling at FED00000 and check
231 	 * whether it sticks
232 	 */
233 	gen_cntl &= (~(0x7 << 15));
234 	gen_cntl |= (0x4 << 15);
235 	pci_write_config_dword(dev, 0xD0, gen_cntl);
236 
237 	pci_read_config_dword(dev, 0xD0, &gen_cntl);
238 
239 	val = gen_cntl >> 15;
240 	val &= 0x7;
241 	if (val & 0x4) {
242 		/* HPET is enabled in HPTC. Just not reported by BIOS */
243 		val &= 0x3;
244 		force_hpet_address = 0xFED00000 | (val << 12);
245 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
246 			"0x%lx\n", force_hpet_address);
247 		cached_dev = dev;
248 		force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
249 		return;
250 	}
251 
252 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
253 }
254 
255 /*
256  * Undocumented chipset features. Make sure that the user enforced
257  * this.
258  */
259 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
260 {
261 	if (hpet_force_user)
262 		old_ich_force_enable_hpet(dev);
263 }
264 
265 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
266 			 old_ich_force_enable_hpet_user);
267 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
268 			 old_ich_force_enable_hpet_user);
269 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
270 			 old_ich_force_enable_hpet_user);
271 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
272 			 old_ich_force_enable_hpet_user);
273 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
274 			 old_ich_force_enable_hpet_user);
275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
276 			 old_ich_force_enable_hpet);
277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
278 			 old_ich_force_enable_hpet);
279 
280 
281 static void vt8237_force_hpet_resume(void)
282 {
283 	u32 val;
284 
285 	if (!force_hpet_address || !cached_dev)
286 		return;
287 
288 	val = 0xfed00000 | 0x80;
289 	pci_write_config_dword(cached_dev, 0x68, val);
290 
291 	pci_read_config_dword(cached_dev, 0x68, &val);
292 	if (val & 0x80)
293 		printk(KERN_DEBUG "Force enabled HPET at resume\n");
294 	else
295 		BUG();
296 }
297 
298 static void vt8237_force_enable_hpet(struct pci_dev *dev)
299 {
300 	u32 uninitialized_var(val);
301 
302 	if (hpet_address || force_hpet_address)
303 		return;
304 
305 	if (!hpet_force_user) {
306 		hpet_print_force_info();
307 		return;
308 	}
309 
310 	pci_read_config_dword(dev, 0x68, &val);
311 	/*
312 	 * Bit 7 is HPET enable bit.
313 	 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
314 	 */
315 	if (val & 0x80) {
316 		force_hpet_address = (val & ~0x3ff);
317 		dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
318 			force_hpet_address);
319 		return;
320 	}
321 
322 	/*
323 	 * HPET is disabled. Trying enabling at FED00000 and check
324 	 * whether it sticks
325 	 */
326 	val = 0xfed00000 | 0x80;
327 	pci_write_config_dword(dev, 0x68, val);
328 
329 	pci_read_config_dword(dev, 0x68, &val);
330 	if (val & 0x80) {
331 		force_hpet_address = (val & ~0x3ff);
332 		dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
333 			"0x%lx\n", force_hpet_address);
334 		cached_dev = dev;
335 		force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
336 		return;
337 	}
338 
339 	dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
340 }
341 
342 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
343 			 vt8237_force_enable_hpet);
344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
345 			 vt8237_force_enable_hpet);
346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
347 			 vt8237_force_enable_hpet);
348 
349 static void ati_force_hpet_resume(void)
350 {
351 	pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
352 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
353 }
354 
355 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
356 {
357 	int err = 0;
358 	u32 d = 0;
359 	u8  b = 0;
360 
361 	err = pci_read_config_byte(dev, 0xac, &b);
362 	b &= ~(1<<5);
363 	err |= pci_write_config_byte(dev, 0xac, b);
364 	err |= pci_read_config_dword(dev, 0x70, &d);
365 	d |= 1<<8;
366 	err |= pci_write_config_dword(dev, 0x70, d);
367 	err |= pci_read_config_dword(dev, 0x8, &d);
368 	d &= 0xff;
369 	dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
370 
371 	WARN_ON_ONCE(err);
372 
373 	return d;
374 }
375 
376 static void ati_force_enable_hpet(struct pci_dev *dev)
377 {
378 	u32 d, val;
379 	u8  b;
380 
381 	if (hpet_address || force_hpet_address)
382 		return;
383 
384 	if (!hpet_force_user) {
385 		hpet_print_force_info();
386 		return;
387 	}
388 
389 	d = ati_ixp4x0_rev(dev);
390 	if (d  < 0x82)
391 		return;
392 
393 	/* base address */
394 	pci_write_config_dword(dev, 0x14, 0xfed00000);
395 	pci_read_config_dword(dev, 0x14, &val);
396 
397 	/* enable interrupt */
398 	outb(0x72, 0xcd6); b = inb(0xcd7);
399 	b |= 0x1;
400 	outb(0x72, 0xcd6); outb(b, 0xcd7);
401 	outb(0x72, 0xcd6); b = inb(0xcd7);
402 	if (!(b & 0x1))
403 		return;
404 	pci_read_config_dword(dev, 0x64, &d);
405 	d |= (1<<10);
406 	pci_write_config_dword(dev, 0x64, d);
407 	pci_read_config_dword(dev, 0x64, &d);
408 	if (!(d & (1<<10)))
409 		return;
410 
411 	force_hpet_address = val;
412 	force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
413 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
414 		   force_hpet_address);
415 	cached_dev = dev;
416 }
417 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
418 			 ati_force_enable_hpet);
419 
420 /*
421  * Undocumented chipset feature taken from LinuxBIOS.
422  */
423 static void nvidia_force_hpet_resume(void)
424 {
425 	pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
426 	printk(KERN_DEBUG "Force enabled HPET at resume\n");
427 }
428 
429 static void nvidia_force_enable_hpet(struct pci_dev *dev)
430 {
431 	u32 uninitialized_var(val);
432 
433 	if (hpet_address || force_hpet_address)
434 		return;
435 
436 	if (!hpet_force_user) {
437 		hpet_print_force_info();
438 		return;
439 	}
440 
441 	pci_write_config_dword(dev, 0x44, 0xfed00001);
442 	pci_read_config_dword(dev, 0x44, &val);
443 	force_hpet_address = val & 0xfffffffe;
444 	force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
445 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
446 		force_hpet_address);
447 	cached_dev = dev;
448 	return;
449 }
450 
451 /* ISA Bridges */
452 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
453 			nvidia_force_enable_hpet);
454 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
455 			nvidia_force_enable_hpet);
456 
457 /* LPC bridges */
458 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
459 			nvidia_force_enable_hpet);
460 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
461 			nvidia_force_enable_hpet);
462 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
463 			nvidia_force_enable_hpet);
464 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
465 			nvidia_force_enable_hpet);
466 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
467 			nvidia_force_enable_hpet);
468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
469 			nvidia_force_enable_hpet);
470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
471 			nvidia_force_enable_hpet);
472 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
473 			nvidia_force_enable_hpet);
474 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
475 			nvidia_force_enable_hpet);
476 
477 void force_hpet_resume(void)
478 {
479 	switch (force_hpet_resume_type) {
480 	case ICH_FORCE_HPET_RESUME:
481 		ich_force_hpet_resume();
482 		return;
483 	case OLD_ICH_FORCE_HPET_RESUME:
484 		old_ich_force_hpet_resume();
485 		return;
486 	case VT8237_FORCE_HPET_RESUME:
487 		vt8237_force_hpet_resume();
488 		return;
489 	case NVIDIA_FORCE_HPET_RESUME:
490 		nvidia_force_hpet_resume();
491 		return;
492 	case ATI_FORCE_HPET_RESUME:
493 		ati_force_hpet_resume();
494 		return;
495 	default:
496 		break;
497 	}
498 }
499 
500 /*
501  * According to the datasheet e6xx systems have the HPET hardwired to
502  * 0xfed00000
503  */
504 static void e6xx_force_enable_hpet(struct pci_dev *dev)
505 {
506 	if (hpet_address || force_hpet_address)
507 		return;
508 
509 	force_hpet_address = 0xFED00000;
510 	force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
511 	dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
512 		"0x%lx\n", force_hpet_address);
513 	return;
514 }
515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
516 			 e6xx_force_enable_hpet);
517 
518 /*
519  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
520  * floppy DMA. Disable HPET MSI on such platforms.
521  * See erratum #27 (Misinterpreted MSI Requests May Result in
522  * Corrupted LPC DMA Data) in AMD Publication #46837,
523  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
524  */
525 static void force_disable_hpet_msi(struct pci_dev *unused)
526 {
527 	hpet_msi_disable = 1;
528 }
529 
530 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
531 			 force_disable_hpet_msi);
532 
533 #endif
534 
535 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
536 /* Set correct numa_node information for AMD NB functions */
537 static void quirk_amd_nb_node(struct pci_dev *dev)
538 {
539 	struct pci_dev *nb_ht;
540 	unsigned int devfn;
541 	u32 node;
542 	u32 val;
543 
544 	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
545 	nb_ht = pci_get_slot(dev->bus, devfn);
546 	if (!nb_ht)
547 		return;
548 
549 	pci_read_config_dword(nb_ht, 0x60, &val);
550 	node = pcibus_to_node(dev->bus) | (val & 7);
551 	/*
552 	 * Some hardware may return an invalid node ID,
553 	 * so check it first:
554 	 */
555 	if (node_online(node))
556 		set_dev_node(&dev->dev, node);
557 	pci_dev_put(nb_ht);
558 }
559 
560 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
561 			quirk_amd_nb_node);
562 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
563 			quirk_amd_nb_node);
564 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
565 			quirk_amd_nb_node);
566 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
567 			quirk_amd_nb_node);
568 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
569 			quirk_amd_nb_node);
570 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
571 			quirk_amd_nb_node);
572 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
573 			quirk_amd_nb_node);
574 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
575 			quirk_amd_nb_node);
576 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
577 			quirk_amd_nb_node);
578 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
579 			quirk_amd_nb_node);
580 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
581 			quirk_amd_nb_node);
582 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
583 			quirk_amd_nb_node);
584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
585 			quirk_amd_nb_node);
586 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
587 			quirk_amd_nb_node);
588 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
589 			quirk_amd_nb_node);
590 
591 #endif
592 
593 #ifdef CONFIG_PCI
594 /*
595  * Processor does not ensure DRAM scrub read/write sequence
596  * is atomic wrt accesses to CC6 save state area. Therefore
597  * if a concurrent scrub read/write access is to same address
598  * the entry may appear as if it is not written. This quirk
599  * applies to Fam16h models 00h-0Fh
600  *
601  * See "Revision Guide" for AMD F16h models 00h-0fh,
602  * document 51810 rev. 3.04, Nov 2013
603  */
604 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
605 {
606 	u32 val;
607 
608 	/*
609 	 * Suggested workaround:
610 	 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
611 	 */
612 	pci_read_config_dword(dev, 0x58, &val);
613 	if (val & 0x1F) {
614 		val &= ~(0x1F);
615 		pci_write_config_dword(dev, 0x58, val);
616 	}
617 
618 	pci_read_config_dword(dev, 0x5C, &val);
619 	if (val & BIT(0)) {
620 		val &= ~BIT(0);
621 		pci_write_config_dword(dev, 0x5c, val);
622 	}
623 }
624 
625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
626 			amd_disable_seq_and_redirect_scrub);
627 
628 #endif
629