xref: /openbmc/linux/arch/x86/kernel/amd_nb.c (revision 96de2506)
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/export.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 #define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
17 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
18 #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
19 #define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
20 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
21 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
22 
23 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
24 static DEFINE_MUTEX(smn_mutex);
25 
26 static u32 *flush_words;
27 
28 static const struct pci_device_id amd_root_ids[] = {
29 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
31 	{}
32 };
33 
34 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
35 
36 const struct pci_device_id amd_nb_misc_ids[] = {
37 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
38 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
39 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
40 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
41 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
42 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
43 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
44 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
45 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
46 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
47 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
48 	{}
49 };
50 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
51 
52 static const struct pci_device_id amd_nb_link_ids[] = {
53 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
54 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
55 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
56 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
57 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
58 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
59 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
60 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
61 	{}
62 };
63 
64 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
65 	{ 0x00, 0x18, 0x20 },
66 	{ 0xff, 0x00, 0x20 },
67 	{ 0xfe, 0x00, 0x20 },
68 	{ }
69 };
70 
71 static struct amd_northbridge_info amd_northbridges;
72 
73 u16 amd_nb_num(void)
74 {
75 	return amd_northbridges.num;
76 }
77 EXPORT_SYMBOL_GPL(amd_nb_num);
78 
79 bool amd_nb_has_feature(unsigned int feature)
80 {
81 	return ((amd_northbridges.flags & feature) == feature);
82 }
83 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
84 
85 struct amd_northbridge *node_to_amd_nb(int node)
86 {
87 	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
88 }
89 EXPORT_SYMBOL_GPL(node_to_amd_nb);
90 
91 static struct pci_dev *next_northbridge(struct pci_dev *dev,
92 					const struct pci_device_id *ids)
93 {
94 	do {
95 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
96 		if (!dev)
97 			break;
98 	} while (!pci_match_id(ids, dev));
99 	return dev;
100 }
101 
102 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
103 {
104 	struct pci_dev *root;
105 	int err = -ENODEV;
106 
107 	if (node >= amd_northbridges.num)
108 		goto out;
109 
110 	root = node_to_amd_nb(node)->root;
111 	if (!root)
112 		goto out;
113 
114 	mutex_lock(&smn_mutex);
115 
116 	err = pci_write_config_dword(root, 0x60, address);
117 	if (err) {
118 		pr_warn("Error programming SMN address 0x%x.\n", address);
119 		goto out_unlock;
120 	}
121 
122 	err = (write ? pci_write_config_dword(root, 0x64, *value)
123 		     : pci_read_config_dword(root, 0x64, value));
124 	if (err)
125 		pr_warn("Error %s SMN address 0x%x.\n",
126 			(write ? "writing to" : "reading from"), address);
127 
128 out_unlock:
129 	mutex_unlock(&smn_mutex);
130 
131 out:
132 	return err;
133 }
134 
135 int amd_smn_read(u16 node, u32 address, u32 *value)
136 {
137 	return __amd_smn_rw(node, address, value, false);
138 }
139 EXPORT_SYMBOL_GPL(amd_smn_read);
140 
141 int amd_smn_write(u16 node, u32 address, u32 value)
142 {
143 	return __amd_smn_rw(node, address, &value, true);
144 }
145 EXPORT_SYMBOL_GPL(amd_smn_write);
146 
147 /*
148  * Data Fabric Indirect Access uses FICAA/FICAD.
149  *
150  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
151  * on the device's Instance Id and the PCI function and register offset of
152  * the desired register.
153  *
154  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
155  * and FICAD HI registers but so far we only need the LO register.
156  */
157 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
158 {
159 	struct pci_dev *F4;
160 	u32 ficaa;
161 	int err = -ENODEV;
162 
163 	if (node >= amd_northbridges.num)
164 		goto out;
165 
166 	F4 = node_to_amd_nb(node)->link;
167 	if (!F4)
168 		goto out;
169 
170 	ficaa  = 1;
171 	ficaa |= reg & 0x3FC;
172 	ficaa |= (func & 0x7) << 11;
173 	ficaa |= instance_id << 16;
174 
175 	mutex_lock(&smn_mutex);
176 
177 	err = pci_write_config_dword(F4, 0x5C, ficaa);
178 	if (err) {
179 		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
180 		goto out_unlock;
181 	}
182 
183 	err = pci_read_config_dword(F4, 0x98, lo);
184 	if (err)
185 		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
186 
187 out_unlock:
188 	mutex_unlock(&smn_mutex);
189 
190 out:
191 	return err;
192 }
193 EXPORT_SYMBOL_GPL(amd_df_indirect_read);
194 
195 int amd_cache_northbridges(void)
196 {
197 	u16 i = 0;
198 	struct amd_northbridge *nb;
199 	struct pci_dev *root, *misc, *link;
200 
201 	if (amd_northbridges.num)
202 		return 0;
203 
204 	misc = NULL;
205 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
206 		i++;
207 
208 	if (!i)
209 		return -ENODEV;
210 
211 	nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
212 	if (!nb)
213 		return -ENOMEM;
214 
215 	amd_northbridges.nb = nb;
216 	amd_northbridges.num = i;
217 
218 	link = misc = root = NULL;
219 	for (i = 0; i != amd_northbridges.num; i++) {
220 		node_to_amd_nb(i)->root = root =
221 			next_northbridge(root, amd_root_ids);
222 		node_to_amd_nb(i)->misc = misc =
223 			next_northbridge(misc, amd_nb_misc_ids);
224 		node_to_amd_nb(i)->link = link =
225 			next_northbridge(link, amd_nb_link_ids);
226 	}
227 
228 	if (amd_gart_present())
229 		amd_northbridges.flags |= AMD_NB_GART;
230 
231 	/*
232 	 * Check for L3 cache presence.
233 	 */
234 	if (!cpuid_edx(0x80000006))
235 		return 0;
236 
237 	/*
238 	 * Some CPU families support L3 Cache Index Disable. There are some
239 	 * limitations because of E382 and E388 on family 0x10.
240 	 */
241 	if (boot_cpu_data.x86 == 0x10 &&
242 	    boot_cpu_data.x86_model >= 0x8 &&
243 	    (boot_cpu_data.x86_model > 0x9 ||
244 	     boot_cpu_data.x86_stepping >= 0x1))
245 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
246 
247 	if (boot_cpu_data.x86 == 0x15)
248 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
249 
250 	/* L3 cache partitioning is supported on family 0x15 */
251 	if (boot_cpu_data.x86 == 0x15)
252 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
253 
254 	return 0;
255 }
256 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
257 
258 /*
259  * Ignores subdevice/subvendor but as far as I can figure out
260  * they're useless anyways
261  */
262 bool __init early_is_amd_nb(u32 device)
263 {
264 	const struct pci_device_id *id;
265 	u32 vendor = device & 0xffff;
266 
267 	device >>= 16;
268 	for (id = amd_nb_misc_ids; id->vendor; id++)
269 		if (vendor == id->vendor && device == id->device)
270 			return true;
271 	return false;
272 }
273 
274 struct resource *amd_get_mmconfig_range(struct resource *res)
275 {
276 	u32 address;
277 	u64 base, msr;
278 	unsigned int segn_busn_bits;
279 
280 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
281 		return NULL;
282 
283 	/* assume all cpus from fam10h have mmconfig */
284 	if (boot_cpu_data.x86 < 0x10)
285 		return NULL;
286 
287 	address = MSR_FAM10H_MMIO_CONF_BASE;
288 	rdmsrl(address, msr);
289 
290 	/* mmconfig is not enabled */
291 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
292 		return NULL;
293 
294 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
295 
296 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
297 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
298 
299 	res->flags = IORESOURCE_MEM;
300 	res->start = base;
301 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
302 	return res;
303 }
304 
305 int amd_get_subcaches(int cpu)
306 {
307 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
308 	unsigned int mask;
309 
310 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
311 		return 0;
312 
313 	pci_read_config_dword(link, 0x1d4, &mask);
314 
315 	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
316 }
317 
318 int amd_set_subcaches(int cpu, unsigned long mask)
319 {
320 	static unsigned int reset, ban;
321 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
322 	unsigned int reg;
323 	int cuid;
324 
325 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
326 		return -EINVAL;
327 
328 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
329 	if (reset == 0) {
330 		pci_read_config_dword(nb->link, 0x1d4, &reset);
331 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
332 		ban &= 0x180000;
333 	}
334 
335 	/* deactivate BAN mode if any subcaches are to be disabled */
336 	if (mask != 0xf) {
337 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
338 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
339 	}
340 
341 	cuid = cpu_data(cpu).cpu_core_id;
342 	mask <<= 4 * cuid;
343 	mask |= (0xf ^ (1 << cuid)) << 26;
344 
345 	pci_write_config_dword(nb->link, 0x1d4, mask);
346 
347 	/* reset BAN mode if L3 partitioning returned to reset state */
348 	pci_read_config_dword(nb->link, 0x1d4, &reg);
349 	if (reg == reset) {
350 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
351 		reg &= ~0x180000;
352 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
353 	}
354 
355 	return 0;
356 }
357 
358 static void amd_cache_gart(void)
359 {
360 	u16 i;
361 
362 	if (!amd_nb_has_feature(AMD_NB_GART))
363 		return;
364 
365 	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
366 	if (!flush_words) {
367 		amd_northbridges.flags &= ~AMD_NB_GART;
368 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
369 		return;
370 	}
371 
372 	for (i = 0; i != amd_northbridges.num; i++)
373 		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
374 }
375 
376 void amd_flush_garts(void)
377 {
378 	int flushed, i;
379 	unsigned long flags;
380 	static DEFINE_SPINLOCK(gart_lock);
381 
382 	if (!amd_nb_has_feature(AMD_NB_GART))
383 		return;
384 
385 	/*
386 	 * Avoid races between AGP and IOMMU. In theory it's not needed
387 	 * but I'm not sure if the hardware won't lose flush requests
388 	 * when another is pending. This whole thing is so expensive anyways
389 	 * that it doesn't matter to serialize more. -AK
390 	 */
391 	spin_lock_irqsave(&gart_lock, flags);
392 	flushed = 0;
393 	for (i = 0; i < amd_northbridges.num; i++) {
394 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
395 				       flush_words[i] | 1);
396 		flushed++;
397 	}
398 	for (i = 0; i < amd_northbridges.num; i++) {
399 		u32 w;
400 		/* Make sure the hardware actually executed the flush*/
401 		for (;;) {
402 			pci_read_config_dword(node_to_amd_nb(i)->misc,
403 					      0x9c, &w);
404 			if (!(w & 1))
405 				break;
406 			cpu_relax();
407 		}
408 	}
409 	spin_unlock_irqrestore(&gart_lock, flags);
410 	if (!flushed)
411 		pr_notice("nothing to flush?\n");
412 }
413 EXPORT_SYMBOL_GPL(amd_flush_garts);
414 
415 static void __fix_erratum_688(void *info)
416 {
417 #define MSR_AMD64_IC_CFG 0xC0011021
418 
419 	msr_set_bit(MSR_AMD64_IC_CFG, 3);
420 	msr_set_bit(MSR_AMD64_IC_CFG, 14);
421 }
422 
423 /* Apply erratum 688 fix so machines without a BIOS fix work. */
424 static __init void fix_erratum_688(void)
425 {
426 	struct pci_dev *F4;
427 	u32 val;
428 
429 	if (boot_cpu_data.x86 != 0x14)
430 		return;
431 
432 	if (!amd_northbridges.num)
433 		return;
434 
435 	F4 = node_to_amd_nb(0)->link;
436 	if (!F4)
437 		return;
438 
439 	if (pci_read_config_dword(F4, 0x164, &val))
440 		return;
441 
442 	if (val & BIT(2))
443 		return;
444 
445 	on_each_cpu(__fix_erratum_688, NULL, 0);
446 
447 	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
448 }
449 
450 static __init int init_amd_nbs(void)
451 {
452 	amd_cache_northbridges();
453 	amd_cache_gart();
454 
455 	fix_erratum_688();
456 
457 	return 0;
458 }
459 
460 /* This has to go after the PCI subsystem */
461 fs_initcall(init_amd_nbs);
462