xref: /openbmc/linux/arch/x86/kernel/amd_nb.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
12 
13 static u32 *flush_words;
14 
15 struct pci_device_id amd_nb_misc_ids[] = {
16 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 	{}
20 };
21 EXPORT_SYMBOL(amd_nb_misc_ids);
22 
23 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
24 	{ 0x00, 0x18, 0x20 },
25 	{ 0xff, 0x00, 0x20 },
26 	{ 0xfe, 0x00, 0x20 },
27 	{ }
28 };
29 
30 struct amd_northbridge_info amd_northbridges;
31 EXPORT_SYMBOL(amd_northbridges);
32 
33 static struct pci_dev *next_northbridge(struct pci_dev *dev,
34 					struct pci_device_id *ids)
35 {
36 	do {
37 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
38 		if (!dev)
39 			break;
40 	} while (!pci_match_id(ids, dev));
41 	return dev;
42 }
43 
44 int amd_cache_northbridges(void)
45 {
46 	int i = 0;
47 	struct amd_northbridge *nb;
48 	struct pci_dev *misc;
49 
50 	if (amd_nb_num())
51 		return 0;
52 
53 	misc = NULL;
54 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
55 		i++;
56 
57 	if (i == 0)
58 		return 0;
59 
60 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
61 	if (!nb)
62 		return -ENOMEM;
63 
64 	amd_northbridges.nb = nb;
65 	amd_northbridges.num = i;
66 
67 	misc = NULL;
68 	for (i = 0; i != amd_nb_num(); i++) {
69 		node_to_amd_nb(i)->misc = misc =
70 			next_northbridge(misc, amd_nb_misc_ids);
71         }
72 
73 	/* some CPU families (e.g. family 0x11) do not support GART */
74 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
75 	    boot_cpu_data.x86 == 0x15)
76 		amd_northbridges.flags |= AMD_NB_GART;
77 
78 	/*
79 	 * Some CPU families support L3 Cache Index Disable. There are some
80 	 * limitations because of E382 and E388 on family 0x10.
81 	 */
82 	if (boot_cpu_data.x86 == 0x10 &&
83 	    boot_cpu_data.x86_model >= 0x8 &&
84 	    (boot_cpu_data.x86_model > 0x9 ||
85 	     boot_cpu_data.x86_mask >= 0x1))
86 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
87 
88 	return 0;
89 }
90 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
91 
92 /* Ignores subdevice/subvendor but as far as I can figure out
93    they're useless anyways */
94 int __init early_is_amd_nb(u32 device)
95 {
96 	struct pci_device_id *id;
97 	u32 vendor = device & 0xffff;
98 	device >>= 16;
99 	for (id = amd_nb_misc_ids; id->vendor; id++)
100 		if (vendor == id->vendor && device == id->device)
101 			return 1;
102 	return 0;
103 }
104 
105 int amd_cache_gart(void)
106 {
107        int i;
108 
109        if (!amd_nb_has_feature(AMD_NB_GART))
110                return 0;
111 
112        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
113        if (!flush_words) {
114                amd_northbridges.flags &= ~AMD_NB_GART;
115                return -ENOMEM;
116        }
117 
118        for (i = 0; i != amd_nb_num(); i++)
119                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
120                                      &flush_words[i]);
121 
122        return 0;
123 }
124 
125 void amd_flush_garts(void)
126 {
127 	int flushed, i;
128 	unsigned long flags;
129 	static DEFINE_SPINLOCK(gart_lock);
130 
131 	if (!amd_nb_has_feature(AMD_NB_GART))
132 		return;
133 
134 	/* Avoid races between AGP and IOMMU. In theory it's not needed
135 	   but I'm not sure if the hardware won't lose flush requests
136 	   when another is pending. This whole thing is so expensive anyways
137 	   that it doesn't matter to serialize more. -AK */
138 	spin_lock_irqsave(&gart_lock, flags);
139 	flushed = 0;
140 	for (i = 0; i < amd_nb_num(); i++) {
141 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
142 				       flush_words[i] | 1);
143 		flushed++;
144 	}
145 	for (i = 0; i < amd_nb_num(); i++) {
146 		u32 w;
147 		/* Make sure the hardware actually executed the flush*/
148 		for (;;) {
149 			pci_read_config_dword(node_to_amd_nb(i)->misc,
150 					      0x9c, &w);
151 			if (!(w & 1))
152 				break;
153 			cpu_relax();
154 		}
155 	}
156 	spin_unlock_irqrestore(&gart_lock, flags);
157 	if (!flushed)
158 		printk("nothing to flush?\n");
159 }
160 EXPORT_SYMBOL_GPL(amd_flush_garts);
161 
162 static __init int init_amd_nbs(void)
163 {
164 	int err = 0;
165 
166 	err = amd_cache_northbridges();
167 
168 	if (err < 0)
169 		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
170 
171 	if (amd_cache_gart() < 0)
172 		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
173 		       "GART support disabled.\n");
174 
175 	return err;
176 }
177 
178 /* This has to go after the PCI subsystem */
179 fs_initcall(init_amd_nbs);
180