1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5 23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4 24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a 28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 29 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb 30 31 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 32 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec 33 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 34 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c 35 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 36 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728 37 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 38 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1 39 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d 40 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e 41 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 42 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 43 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc 44 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 45 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 46 47 /* Protect the PCI config register pairs used for SMN. */ 48 static DEFINE_MUTEX(smn_mutex); 49 50 static u32 *flush_words; 51 52 static const struct pci_device_id amd_root_ids[] = { 53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, 55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, 56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, 57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) }, 58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) }, 59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, 60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) }, 61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, 65 {} 66 }; 67 68 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 69 70 static const struct pci_device_id amd_nb_misc_ids[] = { 71 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 72 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 73 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 74 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 75 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 76 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 77 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 80 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, 86 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, 87 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, 88 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, 89 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, 91 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, 92 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, 94 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, 95 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, 96 {} 97 }; 98 99 static const struct pci_device_id amd_nb_link_ids[] = { 100 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 102 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 103 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 104 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 105 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, 108 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, 110 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, 112 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, 113 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, 114 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, 115 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) }, 116 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) }, 117 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, 118 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 119 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, 120 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, 121 {} 122 }; 123 124 static const struct pci_device_id hygon_root_ids[] = { 125 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, 126 {} 127 }; 128 129 static const struct pci_device_id hygon_nb_misc_ids[] = { 130 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 131 {} 132 }; 133 134 static const struct pci_device_id hygon_nb_link_ids[] = { 135 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 136 {} 137 }; 138 139 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 140 { 0x00, 0x18, 0x20 }, 141 { 0xff, 0x00, 0x20 }, 142 { 0xfe, 0x00, 0x20 }, 143 { } 144 }; 145 146 static struct amd_northbridge_info amd_northbridges; 147 148 u16 amd_nb_num(void) 149 { 150 return amd_northbridges.num; 151 } 152 EXPORT_SYMBOL_GPL(amd_nb_num); 153 154 bool amd_nb_has_feature(unsigned int feature) 155 { 156 return ((amd_northbridges.flags & feature) == feature); 157 } 158 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 159 160 struct amd_northbridge *node_to_amd_nb(int node) 161 { 162 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 163 } 164 EXPORT_SYMBOL_GPL(node_to_amd_nb); 165 166 static struct pci_dev *next_northbridge(struct pci_dev *dev, 167 const struct pci_device_id *ids) 168 { 169 do { 170 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 171 if (!dev) 172 break; 173 } while (!pci_match_id(ids, dev)); 174 return dev; 175 } 176 177 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) 178 { 179 struct pci_dev *root; 180 int err = -ENODEV; 181 182 if (node >= amd_northbridges.num) 183 goto out; 184 185 root = node_to_amd_nb(node)->root; 186 if (!root) 187 goto out; 188 189 mutex_lock(&smn_mutex); 190 191 err = pci_write_config_dword(root, 0x60, address); 192 if (err) { 193 pr_warn("Error programming SMN address 0x%x.\n", address); 194 goto out_unlock; 195 } 196 197 err = (write ? pci_write_config_dword(root, 0x64, *value) 198 : pci_read_config_dword(root, 0x64, value)); 199 if (err) 200 pr_warn("Error %s SMN address 0x%x.\n", 201 (write ? "writing to" : "reading from"), address); 202 203 out_unlock: 204 mutex_unlock(&smn_mutex); 205 206 out: 207 return err; 208 } 209 210 int amd_smn_read(u16 node, u32 address, u32 *value) 211 { 212 int err = __amd_smn_rw(node, address, value, false); 213 214 if (PCI_POSSIBLE_ERROR(*value)) { 215 err = -ENODEV; 216 *value = 0; 217 } 218 219 return err; 220 } 221 EXPORT_SYMBOL_GPL(amd_smn_read); 222 223 int amd_smn_write(u16 node, u32 address, u32 value) 224 { 225 return __amd_smn_rw(node, address, &value, true); 226 } 227 EXPORT_SYMBOL_GPL(amd_smn_write); 228 229 230 static int amd_cache_northbridges(void) 231 { 232 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 233 const struct pci_device_id *link_ids = amd_nb_link_ids; 234 const struct pci_device_id *root_ids = amd_root_ids; 235 struct pci_dev *root, *misc, *link; 236 struct amd_northbridge *nb; 237 u16 roots_per_misc = 0; 238 u16 misc_count = 0; 239 u16 root_count = 0; 240 u16 i, j; 241 242 if (amd_northbridges.num) 243 return 0; 244 245 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 246 root_ids = hygon_root_ids; 247 misc_ids = hygon_nb_misc_ids; 248 link_ids = hygon_nb_link_ids; 249 } 250 251 misc = NULL; 252 while ((misc = next_northbridge(misc, misc_ids))) 253 misc_count++; 254 255 if (!misc_count) 256 return -ENODEV; 257 258 root = NULL; 259 while ((root = next_northbridge(root, root_ids))) 260 root_count++; 261 262 if (root_count) { 263 roots_per_misc = root_count / misc_count; 264 265 /* 266 * There should be _exactly_ N roots for each DF/SMN 267 * interface. 268 */ 269 if (!roots_per_misc || (root_count % roots_per_misc)) { 270 pr_info("Unsupported AMD DF/PCI configuration found\n"); 271 return -ENODEV; 272 } 273 } 274 275 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); 276 if (!nb) 277 return -ENOMEM; 278 279 amd_northbridges.nb = nb; 280 amd_northbridges.num = misc_count; 281 282 link = misc = root = NULL; 283 for (i = 0; i < amd_northbridges.num; i++) { 284 node_to_amd_nb(i)->root = root = 285 next_northbridge(root, root_ids); 286 node_to_amd_nb(i)->misc = misc = 287 next_northbridge(misc, misc_ids); 288 node_to_amd_nb(i)->link = link = 289 next_northbridge(link, link_ids); 290 291 /* 292 * If there are more PCI root devices than data fabric/ 293 * system management network interfaces, then the (N) 294 * PCI roots per DF/SMN interface are functionally the 295 * same (for DF/SMN access) and N-1 are redundant. N-1 296 * PCI roots should be skipped per DF/SMN interface so 297 * the following DF/SMN interfaces get mapped to 298 * correct PCI roots. 299 */ 300 for (j = 1; j < roots_per_misc; j++) 301 root = next_northbridge(root, root_ids); 302 } 303 304 if (amd_gart_present()) 305 amd_northbridges.flags |= AMD_NB_GART; 306 307 /* 308 * Check for L3 cache presence. 309 */ 310 if (!cpuid_edx(0x80000006)) 311 return 0; 312 313 /* 314 * Some CPU families support L3 Cache Index Disable. There are some 315 * limitations because of E382 and E388 on family 0x10. 316 */ 317 if (boot_cpu_data.x86 == 0x10 && 318 boot_cpu_data.x86_model >= 0x8 && 319 (boot_cpu_data.x86_model > 0x9 || 320 boot_cpu_data.x86_stepping >= 0x1)) 321 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 322 323 if (boot_cpu_data.x86 == 0x15) 324 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 325 326 /* L3 cache partitioning is supported on family 0x15 */ 327 if (boot_cpu_data.x86 == 0x15) 328 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 329 330 return 0; 331 } 332 333 /* 334 * Ignores subdevice/subvendor but as far as I can figure out 335 * they're useless anyways 336 */ 337 bool __init early_is_amd_nb(u32 device) 338 { 339 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 340 const struct pci_device_id *id; 341 u32 vendor = device & 0xffff; 342 343 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 344 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 345 return false; 346 347 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 348 misc_ids = hygon_nb_misc_ids; 349 350 device >>= 16; 351 for (id = misc_ids; id->vendor; id++) 352 if (vendor == id->vendor && device == id->device) 353 return true; 354 return false; 355 } 356 357 struct resource *amd_get_mmconfig_range(struct resource *res) 358 { 359 u32 address; 360 u64 base, msr; 361 unsigned int segn_busn_bits; 362 363 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 364 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 365 return NULL; 366 367 /* assume all cpus from fam10h have mmconfig */ 368 if (boot_cpu_data.x86 < 0x10) 369 return NULL; 370 371 address = MSR_FAM10H_MMIO_CONF_BASE; 372 rdmsrl(address, msr); 373 374 /* mmconfig is not enabled */ 375 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 376 return NULL; 377 378 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 379 380 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 381 FAM10H_MMIO_CONF_BUSRANGE_MASK; 382 383 res->flags = IORESOURCE_MEM; 384 res->start = base; 385 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 386 return res; 387 } 388 389 int amd_get_subcaches(int cpu) 390 { 391 struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; 392 unsigned int mask; 393 394 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 395 return 0; 396 397 pci_read_config_dword(link, 0x1d4, &mask); 398 399 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; 400 } 401 402 int amd_set_subcaches(int cpu, unsigned long mask) 403 { 404 static unsigned int reset, ban; 405 struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); 406 unsigned int reg; 407 int cuid; 408 409 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 410 return -EINVAL; 411 412 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 413 if (reset == 0) { 414 pci_read_config_dword(nb->link, 0x1d4, &reset); 415 pci_read_config_dword(nb->misc, 0x1b8, &ban); 416 ban &= 0x180000; 417 } 418 419 /* deactivate BAN mode if any subcaches are to be disabled */ 420 if (mask != 0xf) { 421 pci_read_config_dword(nb->misc, 0x1b8, ®); 422 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 423 } 424 425 cuid = cpu_data(cpu).cpu_core_id; 426 mask <<= 4 * cuid; 427 mask |= (0xf ^ (1 << cuid)) << 26; 428 429 pci_write_config_dword(nb->link, 0x1d4, mask); 430 431 /* reset BAN mode if L3 partitioning returned to reset state */ 432 pci_read_config_dword(nb->link, 0x1d4, ®); 433 if (reg == reset) { 434 pci_read_config_dword(nb->misc, 0x1b8, ®); 435 reg &= ~0x180000; 436 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 437 } 438 439 return 0; 440 } 441 442 static void amd_cache_gart(void) 443 { 444 u16 i; 445 446 if (!amd_nb_has_feature(AMD_NB_GART)) 447 return; 448 449 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 450 if (!flush_words) { 451 amd_northbridges.flags &= ~AMD_NB_GART; 452 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 453 return; 454 } 455 456 for (i = 0; i != amd_northbridges.num; i++) 457 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 458 } 459 460 void amd_flush_garts(void) 461 { 462 int flushed, i; 463 unsigned long flags; 464 static DEFINE_SPINLOCK(gart_lock); 465 466 if (!amd_nb_has_feature(AMD_NB_GART)) 467 return; 468 469 /* 470 * Avoid races between AGP and IOMMU. In theory it's not needed 471 * but I'm not sure if the hardware won't lose flush requests 472 * when another is pending. This whole thing is so expensive anyways 473 * that it doesn't matter to serialize more. -AK 474 */ 475 spin_lock_irqsave(&gart_lock, flags); 476 flushed = 0; 477 for (i = 0; i < amd_northbridges.num; i++) { 478 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 479 flush_words[i] | 1); 480 flushed++; 481 } 482 for (i = 0; i < amd_northbridges.num; i++) { 483 u32 w; 484 /* Make sure the hardware actually executed the flush*/ 485 for (;;) { 486 pci_read_config_dword(node_to_amd_nb(i)->misc, 487 0x9c, &w); 488 if (!(w & 1)) 489 break; 490 cpu_relax(); 491 } 492 } 493 spin_unlock_irqrestore(&gart_lock, flags); 494 if (!flushed) 495 pr_notice("nothing to flush?\n"); 496 } 497 EXPORT_SYMBOL_GPL(amd_flush_garts); 498 499 static void __fix_erratum_688(void *info) 500 { 501 #define MSR_AMD64_IC_CFG 0xC0011021 502 503 msr_set_bit(MSR_AMD64_IC_CFG, 3); 504 msr_set_bit(MSR_AMD64_IC_CFG, 14); 505 } 506 507 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 508 static __init void fix_erratum_688(void) 509 { 510 struct pci_dev *F4; 511 u32 val; 512 513 if (boot_cpu_data.x86 != 0x14) 514 return; 515 516 if (!amd_northbridges.num) 517 return; 518 519 F4 = node_to_amd_nb(0)->link; 520 if (!F4) 521 return; 522 523 if (pci_read_config_dword(F4, 0x164, &val)) 524 return; 525 526 if (val & BIT(2)) 527 return; 528 529 on_each_cpu(__fix_erratum_688, NULL, 0); 530 531 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 532 } 533 534 static __init int init_amd_nbs(void) 535 { 536 amd_cache_northbridges(); 537 amd_cache_gart(); 538 539 fix_erratum_688(); 540 541 return 0; 542 } 543 544 /* This has to go after the PCI subsystem */ 545 fs_initcall(init_amd_nbs); 546