xref: /openbmc/linux/arch/x86/kernel/amd_nb.c (revision 943126417891372d56aa3fe46295cbf53db31370)
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/export.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 #define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
17 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
18 #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
19 #define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
20 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
21 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
22 
23 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
24 static DEFINE_MUTEX(smn_mutex);
25 
26 static u32 *flush_words;
27 
28 static const struct pci_device_id amd_root_ids[] = {
29 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
31 	{}
32 };
33 
34 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
35 
36 const struct pci_device_id amd_nb_misc_ids[] = {
37 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
38 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
39 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
40 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
41 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
42 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
43 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
44 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
45 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
46 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
47 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
48 	{}
49 };
50 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
51 
52 static const struct pci_device_id amd_nb_link_ids[] = {
53 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
54 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
55 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
56 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
57 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
58 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
59 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
60 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
61 	{}
62 };
63 
64 static const struct pci_device_id hygon_root_ids[] = {
65 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
66 	{}
67 };
68 
69 const struct pci_device_id hygon_nb_misc_ids[] = {
70 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
71 	{}
72 };
73 
74 static const struct pci_device_id hygon_nb_link_ids[] = {
75 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
76 	{}
77 };
78 
79 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
80 	{ 0x00, 0x18, 0x20 },
81 	{ 0xff, 0x00, 0x20 },
82 	{ 0xfe, 0x00, 0x20 },
83 	{ }
84 };
85 
86 static struct amd_northbridge_info amd_northbridges;
87 
88 u16 amd_nb_num(void)
89 {
90 	return amd_northbridges.num;
91 }
92 EXPORT_SYMBOL_GPL(amd_nb_num);
93 
94 bool amd_nb_has_feature(unsigned int feature)
95 {
96 	return ((amd_northbridges.flags & feature) == feature);
97 }
98 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
99 
100 struct amd_northbridge *node_to_amd_nb(int node)
101 {
102 	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
103 }
104 EXPORT_SYMBOL_GPL(node_to_amd_nb);
105 
106 static struct pci_dev *next_northbridge(struct pci_dev *dev,
107 					const struct pci_device_id *ids)
108 {
109 	do {
110 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
111 		if (!dev)
112 			break;
113 	} while (!pci_match_id(ids, dev));
114 	return dev;
115 }
116 
117 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
118 {
119 	struct pci_dev *root;
120 	int err = -ENODEV;
121 
122 	if (node >= amd_northbridges.num)
123 		goto out;
124 
125 	root = node_to_amd_nb(node)->root;
126 	if (!root)
127 		goto out;
128 
129 	mutex_lock(&smn_mutex);
130 
131 	err = pci_write_config_dword(root, 0x60, address);
132 	if (err) {
133 		pr_warn("Error programming SMN address 0x%x.\n", address);
134 		goto out_unlock;
135 	}
136 
137 	err = (write ? pci_write_config_dword(root, 0x64, *value)
138 		     : pci_read_config_dword(root, 0x64, value));
139 	if (err)
140 		pr_warn("Error %s SMN address 0x%x.\n",
141 			(write ? "writing to" : "reading from"), address);
142 
143 out_unlock:
144 	mutex_unlock(&smn_mutex);
145 
146 out:
147 	return err;
148 }
149 
150 int amd_smn_read(u16 node, u32 address, u32 *value)
151 {
152 	return __amd_smn_rw(node, address, value, false);
153 }
154 EXPORT_SYMBOL_GPL(amd_smn_read);
155 
156 int amd_smn_write(u16 node, u32 address, u32 value)
157 {
158 	return __amd_smn_rw(node, address, &value, true);
159 }
160 EXPORT_SYMBOL_GPL(amd_smn_write);
161 
162 /*
163  * Data Fabric Indirect Access uses FICAA/FICAD.
164  *
165  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
166  * on the device's Instance Id and the PCI function and register offset of
167  * the desired register.
168  *
169  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
170  * and FICAD HI registers but so far we only need the LO register.
171  */
172 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
173 {
174 	struct pci_dev *F4;
175 	u32 ficaa;
176 	int err = -ENODEV;
177 
178 	if (node >= amd_northbridges.num)
179 		goto out;
180 
181 	F4 = node_to_amd_nb(node)->link;
182 	if (!F4)
183 		goto out;
184 
185 	ficaa  = 1;
186 	ficaa |= reg & 0x3FC;
187 	ficaa |= (func & 0x7) << 11;
188 	ficaa |= instance_id << 16;
189 
190 	mutex_lock(&smn_mutex);
191 
192 	err = pci_write_config_dword(F4, 0x5C, ficaa);
193 	if (err) {
194 		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
195 		goto out_unlock;
196 	}
197 
198 	err = pci_read_config_dword(F4, 0x98, lo);
199 	if (err)
200 		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
201 
202 out_unlock:
203 	mutex_unlock(&smn_mutex);
204 
205 out:
206 	return err;
207 }
208 EXPORT_SYMBOL_GPL(amd_df_indirect_read);
209 
210 int amd_cache_northbridges(void)
211 {
212 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
213 	const struct pci_device_id *link_ids = amd_nb_link_ids;
214 	const struct pci_device_id *root_ids = amd_root_ids;
215 	struct pci_dev *root, *misc, *link;
216 	struct amd_northbridge *nb;
217 	u16 i = 0;
218 
219 	if (amd_northbridges.num)
220 		return 0;
221 
222 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
223 		root_ids = hygon_root_ids;
224 		misc_ids = hygon_nb_misc_ids;
225 		link_ids = hygon_nb_link_ids;
226 	}
227 
228 	misc = NULL;
229 	while ((misc = next_northbridge(misc, misc_ids)) != NULL)
230 		i++;
231 
232 	if (!i)
233 		return -ENODEV;
234 
235 	nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
236 	if (!nb)
237 		return -ENOMEM;
238 
239 	amd_northbridges.nb = nb;
240 	amd_northbridges.num = i;
241 
242 	link = misc = root = NULL;
243 	for (i = 0; i != amd_northbridges.num; i++) {
244 		node_to_amd_nb(i)->root = root =
245 			next_northbridge(root, root_ids);
246 		node_to_amd_nb(i)->misc = misc =
247 			next_northbridge(misc, misc_ids);
248 		node_to_amd_nb(i)->link = link =
249 			next_northbridge(link, link_ids);
250 	}
251 
252 	if (amd_gart_present())
253 		amd_northbridges.flags |= AMD_NB_GART;
254 
255 	/*
256 	 * Check for L3 cache presence.
257 	 */
258 	if (!cpuid_edx(0x80000006))
259 		return 0;
260 
261 	/*
262 	 * Some CPU families support L3 Cache Index Disable. There are some
263 	 * limitations because of E382 and E388 on family 0x10.
264 	 */
265 	if (boot_cpu_data.x86 == 0x10 &&
266 	    boot_cpu_data.x86_model >= 0x8 &&
267 	    (boot_cpu_data.x86_model > 0x9 ||
268 	     boot_cpu_data.x86_stepping >= 0x1))
269 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
270 
271 	if (boot_cpu_data.x86 == 0x15)
272 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
273 
274 	/* L3 cache partitioning is supported on family 0x15 */
275 	if (boot_cpu_data.x86 == 0x15)
276 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
277 
278 	return 0;
279 }
280 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
281 
282 /*
283  * Ignores subdevice/subvendor but as far as I can figure out
284  * they're useless anyways
285  */
286 bool __init early_is_amd_nb(u32 device)
287 {
288 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
289 	const struct pci_device_id *id;
290 	u32 vendor = device & 0xffff;
291 
292 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
293 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
294 		return false;
295 
296 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
297 		misc_ids = hygon_nb_misc_ids;
298 
299 	device >>= 16;
300 	for (id = misc_ids; id->vendor; id++)
301 		if (vendor == id->vendor && device == id->device)
302 			return true;
303 	return false;
304 }
305 
306 struct resource *amd_get_mmconfig_range(struct resource *res)
307 {
308 	u32 address;
309 	u64 base, msr;
310 	unsigned int segn_busn_bits;
311 
312 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
313 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
314 		return NULL;
315 
316 	/* assume all cpus from fam10h have mmconfig */
317 	if (boot_cpu_data.x86 < 0x10)
318 		return NULL;
319 
320 	address = MSR_FAM10H_MMIO_CONF_BASE;
321 	rdmsrl(address, msr);
322 
323 	/* mmconfig is not enabled */
324 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
325 		return NULL;
326 
327 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
328 
329 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
330 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
331 
332 	res->flags = IORESOURCE_MEM;
333 	res->start = base;
334 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
335 	return res;
336 }
337 
338 int amd_get_subcaches(int cpu)
339 {
340 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
341 	unsigned int mask;
342 
343 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
344 		return 0;
345 
346 	pci_read_config_dword(link, 0x1d4, &mask);
347 
348 	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
349 }
350 
351 int amd_set_subcaches(int cpu, unsigned long mask)
352 {
353 	static unsigned int reset, ban;
354 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
355 	unsigned int reg;
356 	int cuid;
357 
358 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
359 		return -EINVAL;
360 
361 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
362 	if (reset == 0) {
363 		pci_read_config_dword(nb->link, 0x1d4, &reset);
364 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
365 		ban &= 0x180000;
366 	}
367 
368 	/* deactivate BAN mode if any subcaches are to be disabled */
369 	if (mask != 0xf) {
370 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
371 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
372 	}
373 
374 	cuid = cpu_data(cpu).cpu_core_id;
375 	mask <<= 4 * cuid;
376 	mask |= (0xf ^ (1 << cuid)) << 26;
377 
378 	pci_write_config_dword(nb->link, 0x1d4, mask);
379 
380 	/* reset BAN mode if L3 partitioning returned to reset state */
381 	pci_read_config_dword(nb->link, 0x1d4, &reg);
382 	if (reg == reset) {
383 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
384 		reg &= ~0x180000;
385 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
386 	}
387 
388 	return 0;
389 }
390 
391 static void amd_cache_gart(void)
392 {
393 	u16 i;
394 
395 	if (!amd_nb_has_feature(AMD_NB_GART))
396 		return;
397 
398 	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
399 	if (!flush_words) {
400 		amd_northbridges.flags &= ~AMD_NB_GART;
401 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
402 		return;
403 	}
404 
405 	for (i = 0; i != amd_northbridges.num; i++)
406 		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
407 }
408 
409 void amd_flush_garts(void)
410 {
411 	int flushed, i;
412 	unsigned long flags;
413 	static DEFINE_SPINLOCK(gart_lock);
414 
415 	if (!amd_nb_has_feature(AMD_NB_GART))
416 		return;
417 
418 	/*
419 	 * Avoid races between AGP and IOMMU. In theory it's not needed
420 	 * but I'm not sure if the hardware won't lose flush requests
421 	 * when another is pending. This whole thing is so expensive anyways
422 	 * that it doesn't matter to serialize more. -AK
423 	 */
424 	spin_lock_irqsave(&gart_lock, flags);
425 	flushed = 0;
426 	for (i = 0; i < amd_northbridges.num; i++) {
427 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
428 				       flush_words[i] | 1);
429 		flushed++;
430 	}
431 	for (i = 0; i < amd_northbridges.num; i++) {
432 		u32 w;
433 		/* Make sure the hardware actually executed the flush*/
434 		for (;;) {
435 			pci_read_config_dword(node_to_amd_nb(i)->misc,
436 					      0x9c, &w);
437 			if (!(w & 1))
438 				break;
439 			cpu_relax();
440 		}
441 	}
442 	spin_unlock_irqrestore(&gart_lock, flags);
443 	if (!flushed)
444 		pr_notice("nothing to flush?\n");
445 }
446 EXPORT_SYMBOL_GPL(amd_flush_garts);
447 
448 static void __fix_erratum_688(void *info)
449 {
450 #define MSR_AMD64_IC_CFG 0xC0011021
451 
452 	msr_set_bit(MSR_AMD64_IC_CFG, 3);
453 	msr_set_bit(MSR_AMD64_IC_CFG, 14);
454 }
455 
456 /* Apply erratum 688 fix so machines without a BIOS fix work. */
457 static __init void fix_erratum_688(void)
458 {
459 	struct pci_dev *F4;
460 	u32 val;
461 
462 	if (boot_cpu_data.x86 != 0x14)
463 		return;
464 
465 	if (!amd_northbridges.num)
466 		return;
467 
468 	F4 = node_to_amd_nb(0)->link;
469 	if (!F4)
470 		return;
471 
472 	if (pci_read_config_dword(F4, 0x164, &val))
473 		return;
474 
475 	if (val & BIT(2))
476 		return;
477 
478 	on_each_cpu(__fix_erratum_688, NULL, 0);
479 
480 	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
481 }
482 
483 static __init int init_amd_nbs(void)
484 {
485 	amd_cache_northbridges();
486 	amd_cache_gart();
487 
488 	fix_erratum_688();
489 
490 	return 0;
491 }
492 
493 /* This has to go after the PCI subsystem */
494 fs_initcall(init_amd_nbs);
495