xref: /openbmc/linux/arch/x86/kernel/amd_nb.c (revision 4949009e)
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 static u32 *flush_words;
17 
18 const struct pci_device_id amd_nb_misc_ids[] = {
19 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
24 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
25 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
26 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
27 	{}
28 };
29 EXPORT_SYMBOL(amd_nb_misc_ids);
30 
31 static const struct pci_device_id amd_nb_link_ids[] = {
32 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
33 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
34 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
35 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
36 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
37 	{}
38 };
39 
40 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
41 	{ 0x00, 0x18, 0x20 },
42 	{ 0xff, 0x00, 0x20 },
43 	{ 0xfe, 0x00, 0x20 },
44 	{ }
45 };
46 
47 struct amd_northbridge_info amd_northbridges;
48 EXPORT_SYMBOL(amd_northbridges);
49 
50 static struct pci_dev *next_northbridge(struct pci_dev *dev,
51 					const struct pci_device_id *ids)
52 {
53 	do {
54 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
55 		if (!dev)
56 			break;
57 	} while (!pci_match_id(ids, dev));
58 	return dev;
59 }
60 
61 int amd_cache_northbridges(void)
62 {
63 	u16 i = 0;
64 	struct amd_northbridge *nb;
65 	struct pci_dev *misc, *link;
66 
67 	if (amd_nb_num())
68 		return 0;
69 
70 	misc = NULL;
71 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
72 		i++;
73 
74 	if (i == 0)
75 		return 0;
76 
77 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
78 	if (!nb)
79 		return -ENOMEM;
80 
81 	amd_northbridges.nb = nb;
82 	amd_northbridges.num = i;
83 
84 	link = misc = NULL;
85 	for (i = 0; i != amd_nb_num(); i++) {
86 		node_to_amd_nb(i)->misc = misc =
87 			next_northbridge(misc, amd_nb_misc_ids);
88 		node_to_amd_nb(i)->link = link =
89 			next_northbridge(link, amd_nb_link_ids);
90 	}
91 
92 	/* GART present only on Fam15h upto model 0fh */
93 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
94 	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
95 		amd_northbridges.flags |= AMD_NB_GART;
96 
97 	/*
98 	 * Check for L3 cache presence.
99 	 */
100 	if (!cpuid_edx(0x80000006))
101 		return 0;
102 
103 	/*
104 	 * Some CPU families support L3 Cache Index Disable. There are some
105 	 * limitations because of E382 and E388 on family 0x10.
106 	 */
107 	if (boot_cpu_data.x86 == 0x10 &&
108 	    boot_cpu_data.x86_model >= 0x8 &&
109 	    (boot_cpu_data.x86_model > 0x9 ||
110 	     boot_cpu_data.x86_mask >= 0x1))
111 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
112 
113 	if (boot_cpu_data.x86 == 0x15)
114 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
115 
116 	/* L3 cache partitioning is supported on family 0x15 */
117 	if (boot_cpu_data.x86 == 0x15)
118 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
119 
120 	return 0;
121 }
122 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
123 
124 /*
125  * Ignores subdevice/subvendor but as far as I can figure out
126  * they're useless anyways
127  */
128 bool __init early_is_amd_nb(u32 device)
129 {
130 	const struct pci_device_id *id;
131 	u32 vendor = device & 0xffff;
132 
133 	device >>= 16;
134 	for (id = amd_nb_misc_ids; id->vendor; id++)
135 		if (vendor == id->vendor && device == id->device)
136 			return true;
137 	return false;
138 }
139 
140 struct resource *amd_get_mmconfig_range(struct resource *res)
141 {
142 	u32 address;
143 	u64 base, msr;
144 	unsigned segn_busn_bits;
145 
146 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
147 		return NULL;
148 
149 	/* assume all cpus from fam10h have mmconfig */
150         if (boot_cpu_data.x86 < 0x10)
151 		return NULL;
152 
153 	address = MSR_FAM10H_MMIO_CONF_BASE;
154 	rdmsrl(address, msr);
155 
156 	/* mmconfig is not enabled */
157 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
158 		return NULL;
159 
160 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
161 
162 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
163 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
164 
165 	res->flags = IORESOURCE_MEM;
166 	res->start = base;
167 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
168 	return res;
169 }
170 
171 int amd_get_subcaches(int cpu)
172 {
173 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
174 	unsigned int mask;
175 	int cuid;
176 
177 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
178 		return 0;
179 
180 	pci_read_config_dword(link, 0x1d4, &mask);
181 
182 	cuid = cpu_data(cpu).compute_unit_id;
183 	return (mask >> (4 * cuid)) & 0xf;
184 }
185 
186 int amd_set_subcaches(int cpu, unsigned long mask)
187 {
188 	static unsigned int reset, ban;
189 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
190 	unsigned int reg;
191 	int cuid;
192 
193 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
194 		return -EINVAL;
195 
196 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
197 	if (reset == 0) {
198 		pci_read_config_dword(nb->link, 0x1d4, &reset);
199 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
200 		ban &= 0x180000;
201 	}
202 
203 	/* deactivate BAN mode if any subcaches are to be disabled */
204 	if (mask != 0xf) {
205 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
206 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
207 	}
208 
209 	cuid = cpu_data(cpu).compute_unit_id;
210 	mask <<= 4 * cuid;
211 	mask |= (0xf ^ (1 << cuid)) << 26;
212 
213 	pci_write_config_dword(nb->link, 0x1d4, mask);
214 
215 	/* reset BAN mode if L3 partitioning returned to reset state */
216 	pci_read_config_dword(nb->link, 0x1d4, &reg);
217 	if (reg == reset) {
218 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
219 		reg &= ~0x180000;
220 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
221 	}
222 
223 	return 0;
224 }
225 
226 static int amd_cache_gart(void)
227 {
228 	u16 i;
229 
230        if (!amd_nb_has_feature(AMD_NB_GART))
231                return 0;
232 
233        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
234        if (!flush_words) {
235                amd_northbridges.flags &= ~AMD_NB_GART;
236                return -ENOMEM;
237        }
238 
239        for (i = 0; i != amd_nb_num(); i++)
240                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
241                                      &flush_words[i]);
242 
243        return 0;
244 }
245 
246 void amd_flush_garts(void)
247 {
248 	int flushed, i;
249 	unsigned long flags;
250 	static DEFINE_SPINLOCK(gart_lock);
251 
252 	if (!amd_nb_has_feature(AMD_NB_GART))
253 		return;
254 
255 	/* Avoid races between AGP and IOMMU. In theory it's not needed
256 	   but I'm not sure if the hardware won't lose flush requests
257 	   when another is pending. This whole thing is so expensive anyways
258 	   that it doesn't matter to serialize more. -AK */
259 	spin_lock_irqsave(&gart_lock, flags);
260 	flushed = 0;
261 	for (i = 0; i < amd_nb_num(); i++) {
262 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
263 				       flush_words[i] | 1);
264 		flushed++;
265 	}
266 	for (i = 0; i < amd_nb_num(); i++) {
267 		u32 w;
268 		/* Make sure the hardware actually executed the flush*/
269 		for (;;) {
270 			pci_read_config_dword(node_to_amd_nb(i)->misc,
271 					      0x9c, &w);
272 			if (!(w & 1))
273 				break;
274 			cpu_relax();
275 		}
276 	}
277 	spin_unlock_irqrestore(&gart_lock, flags);
278 	if (!flushed)
279 		pr_notice("nothing to flush?\n");
280 }
281 EXPORT_SYMBOL_GPL(amd_flush_garts);
282 
283 static __init int init_amd_nbs(void)
284 {
285 	int err = 0;
286 
287 	err = amd_cache_northbridges();
288 
289 	if (err < 0)
290 		pr_notice("Cannot enumerate AMD northbridges\n");
291 
292 	if (amd_cache_gart() < 0)
293 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
294 
295 	return err;
296 }
297 
298 /* This has to go after the PCI subsystem */
299 fs_initcall(init_amd_nbs);
300