1 /* 2 * Copyright 2001-2003 SuSE Labs. 3 * Distributed under the GNU public license, v2. 4 * 5 * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge. 6 * It also includes support for the AMD 8151 AGP bridge, 7 * although it doesn't actually do much, as all the real 8 * work is done in the northbridge(s). 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/init.h> 14 #include <linux/agp_backend.h> 15 #include <linux/mmzone.h> 16 #include <asm/page.h> /* PAGE_SIZE */ 17 #include <asm/e820.h> 18 #include <asm/k8.h> 19 #include <asm/gart.h> 20 #include "agp.h" 21 22 /* NVIDIA K8 registers */ 23 #define NVIDIA_X86_64_0_APBASE 0x10 24 #define NVIDIA_X86_64_1_APBASE1 0x50 25 #define NVIDIA_X86_64_1_APLIMIT1 0x54 26 #define NVIDIA_X86_64_1_APSIZE 0xa8 27 #define NVIDIA_X86_64_1_APBASE2 0xd8 28 #define NVIDIA_X86_64_1_APLIMIT2 0xdc 29 30 /* ULi K8 registers */ 31 #define ULI_X86_64_BASE_ADDR 0x10 32 #define ULI_X86_64_HTT_FEA_REG 0x50 33 #define ULI_X86_64_ENU_SCR_REG 0x54 34 35 static struct resource *aperture_resource; 36 static int __initdata agp_try_unsupported = 1; 37 static int agp_bridges_found; 38 39 static void amd64_tlbflush(struct agp_memory *temp) 40 { 41 k8_flush_garts(); 42 } 43 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 45 { 46 int i, j, num_entries; 47 long long tmp; 48 int mask_type; 49 struct agp_bridge_data *bridge = mem->bridge; 50 u32 pte; 51 52 num_entries = agp_num_entries(); 53 54 if (type != mem->type) 55 return -EINVAL; 56 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 57 if (mask_type != 0) 58 return -EINVAL; 59 60 61 /* Make sure we can fit the range in the gatt table. */ 62 /* FIXME: could wrap */ 63 if (((unsigned long)pg_start + mem->page_count) > num_entries) 64 return -EINVAL; 65 66 j = pg_start; 67 68 /* gatt table should be empty. */ 69 while (j < (pg_start + mem->page_count)) { 70 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) 71 return -EBUSY; 72 j++; 73 } 74 75 if (!mem->is_flushed) { 76 global_cache_flush(); 77 mem->is_flushed = true; 78 } 79 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 81 tmp = agp_bridge->driver->mask_memory(agp_bridge, 82 page_to_phys(mem->pages[i]), 83 mask_type); 84 85 BUG_ON(tmp & 0xffffff0000000ffcULL); 86 pte = (tmp & 0x000000ff00000000ULL) >> 28; 87 pte |=(tmp & 0x00000000fffff000ULL); 88 pte |= GPTE_VALID | GPTE_COHERENT; 89 90 writel(pte, agp_bridge->gatt_table+j); 91 readl(agp_bridge->gatt_table+j); /* PCI Posting. */ 92 } 93 amd64_tlbflush(mem); 94 return 0; 95 } 96 97 /* 98 * This hack alters the order element according 99 * to the size of a long. It sucks. I totally disown this, even 100 * though it does appear to work for the most part. 101 */ 102 static struct aper_size_info_32 amd64_aperture_sizes[7] = 103 { 104 {32, 8192, 3+(sizeof(long)/8), 0 }, 105 {64, 16384, 4+(sizeof(long)/8), 1<<1 }, 106 {128, 32768, 5+(sizeof(long)/8), 1<<2 }, 107 {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 }, 108 {512, 131072, 7+(sizeof(long)/8), 1<<3 }, 109 {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3}, 110 {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3} 111 }; 112 113 114 /* 115 * Get the current Aperture size from the x86-64. 116 * Note, that there may be multiple x86-64's, but we just return 117 * the value from the first one we find. The set_size functions 118 * keep the rest coherent anyway. Or at least should do. 119 */ 120 static int amd64_fetch_size(void) 121 { 122 struct pci_dev *dev; 123 int i; 124 u32 temp; 125 struct aper_size_info_32 *values; 126 127 dev = k8_northbridges[0]; 128 if (dev==NULL) 129 return 0; 130 131 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp); 132 temp = (temp & 0xe); 133 values = A_SIZE_32(amd64_aperture_sizes); 134 135 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 136 if (temp == values[i].size_value) { 137 agp_bridge->previous_size = 138 agp_bridge->current_size = (void *) (values + i); 139 140 agp_bridge->aperture_size_idx = i; 141 return values[i].size; 142 } 143 } 144 return 0; 145 } 146 147 /* 148 * In a multiprocessor x86-64 system, this function gets 149 * called once for each CPU. 150 */ 151 static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) 152 { 153 u64 aperturebase; 154 u32 tmp; 155 u64 aper_base; 156 157 /* Address to map to */ 158 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); 159 aperturebase = tmp << 25; 160 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); 161 162 enable_gart_translation(hammer, gatt_table); 163 164 return aper_base; 165 } 166 167 168 static const struct aper_size_info_32 amd_8151_sizes[7] = 169 { 170 {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */ 171 {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */ 172 {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */ 173 {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */ 174 {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */ 175 {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */ 176 {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */ 177 }; 178 179 static int amd_8151_configure(void) 180 { 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 182 int i; 183 184 /* Configure AGP regs in each x86-64 host bridge. */ 185 for (i = 0; i < num_k8_northbridges; i++) { 186 agp_bridge->gart_bus_addr = 187 amd64_configure(k8_northbridges[i], gatt_bus); 188 } 189 k8_flush_garts(); 190 return 0; 191 } 192 193 194 static void amd64_cleanup(void) 195 { 196 u32 tmp; 197 int i; 198 for (i = 0; i < num_k8_northbridges; i++) { 199 struct pci_dev *dev = k8_northbridges[i]; 200 /* disable gart translation */ 201 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 202 tmp &= ~AMD64_GARTEN; 203 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); 204 } 205 } 206 207 208 static const struct agp_bridge_driver amd_8151_driver = { 209 .owner = THIS_MODULE, 210 .aperture_sizes = amd_8151_sizes, 211 .size_type = U32_APER_SIZE, 212 .num_aperture_sizes = 7, 213 .needs_scratch_page = true, 214 .configure = amd_8151_configure, 215 .fetch_size = amd64_fetch_size, 216 .cleanup = amd64_cleanup, 217 .tlb_flush = amd64_tlbflush, 218 .mask_memory = agp_generic_mask_memory, 219 .masks = NULL, 220 .agp_enable = agp_generic_enable, 221 .cache_flush = global_cache_flush, 222 .create_gatt_table = agp_generic_create_gatt_table, 223 .free_gatt_table = agp_generic_free_gatt_table, 224 .insert_memory = amd64_insert_memory, 225 .remove_memory = agp_generic_remove_memory, 226 .alloc_by_type = agp_generic_alloc_by_type, 227 .free_by_type = agp_generic_free_by_type, 228 .agp_alloc_page = agp_generic_alloc_page, 229 .agp_alloc_pages = agp_generic_alloc_pages, 230 .agp_destroy_page = agp_generic_destroy_page, 231 .agp_destroy_pages = agp_generic_destroy_pages, 232 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 233 }; 234 235 /* Some basic sanity checks for the aperture. */ 236 static int __devinit agp_aperture_valid(u64 aper, u32 size) 237 { 238 if (!aperture_valid(aper, size, 32*1024*1024)) 239 return 0; 240 241 /* Request the Aperture. This catches cases when someone else 242 already put a mapping in there - happens with some very broken BIOS 243 244 Maybe better to use pci_assign_resource/pci_enable_device instead 245 trusting the bridges? */ 246 if (!aperture_resource && 247 !(aperture_resource = request_mem_region(aper, size, "aperture"))) { 248 printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n"); 249 return 0; 250 } 251 return 1; 252 } 253 254 /* 255 * W*s centric BIOS sometimes only set up the aperture in the AGP 256 * bridge, not the northbridge. On AMD64 this is handled early 257 * in aperture.c, but when IOMMU is not enabled or we run 258 * on a 32bit kernel this needs to be redone. 259 * Unfortunately it is impossible to fix the aperture here because it's too late 260 * to allocate that much memory. But at least error out cleanly instead of 261 * crashing. 262 */ 263 static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, 264 u16 cap) 265 { 266 u32 aper_low, aper_hi; 267 u64 aper, nb_aper; 268 int order = 0; 269 u32 nb_order, nb_base; 270 u16 apsize; 271 272 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); 273 nb_order = (nb_order >> 1) & 7; 274 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); 275 nb_aper = nb_base << 25; 276 277 /* Northbridge seems to contain crap. Try the AGP bridge. */ 278 279 pci_read_config_word(agp, cap+0x14, &apsize); 280 if (apsize == 0xffff) { 281 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) 282 return 0; 283 return -1; 284 } 285 286 apsize &= 0xfff; 287 /* Some BIOS use weird encodings not in the AGPv3 table. */ 288 if (apsize & 0xff) 289 apsize |= 0xf00; 290 order = 7 - hweight16(apsize); 291 292 pci_read_config_dword(agp, 0x10, &aper_low); 293 pci_read_config_dword(agp, 0x14, &aper_hi); 294 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); 295 296 /* 297 * On some sick chips APSIZE is 0. This means it wants 4G 298 * so let double check that order, and lets trust the AMD NB settings 299 */ 300 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { 301 dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", 302 32 << order); 303 order = nb_order; 304 } 305 306 if (nb_order >= order) { 307 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) 308 return 0; 309 } 310 311 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", 312 aper, 32 << order); 313 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 314 return -1; 315 316 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1); 317 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); 318 319 return 0; 320 } 321 322 static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) 323 { 324 int i; 325 326 if (cache_k8_northbridges() < 0) 327 return -ENODEV; 328 329 i = 0; 330 for (i = 0; i < num_k8_northbridges; i++) { 331 struct pci_dev *dev = k8_northbridges[i]; 332 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 333 dev_err(&dev->dev, "no usable aperture found\n"); 334 #ifdef __x86_64__ 335 /* should port this to i386 */ 336 dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); 337 #endif 338 return -1; 339 } 340 } 341 return 0; 342 } 343 344 /* Handle AMD 8151 quirks */ 345 static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge) 346 { 347 char *revstring; 348 349 switch (pdev->revision) { 350 case 0x01: revstring="A0"; break; 351 case 0x02: revstring="A1"; break; 352 case 0x11: revstring="B0"; break; 353 case 0x12: revstring="B1"; break; 354 case 0x13: revstring="B2"; break; 355 case 0x14: revstring="B3"; break; 356 default: revstring="??"; break; 357 } 358 359 dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); 360 361 /* 362 * Work around errata. 363 * Chips before B2 stepping incorrectly reporting v3.5 364 */ 365 if (pdev->revision < 0x13) { 366 dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); 367 bridge->major_version = 3; 368 bridge->minor_version = 0; 369 } 370 } 371 372 373 static const struct aper_size_info_32 uli_sizes[7] = 374 { 375 {256, 65536, 6, 10}, 376 {128, 32768, 5, 9}, 377 {64, 16384, 4, 8}, 378 {32, 8192, 3, 7}, 379 {16, 4096, 2, 6}, 380 {8, 2048, 1, 4}, 381 {4, 1024, 0, 3} 382 }; 383 static int __devinit uli_agp_init(struct pci_dev *pdev) 384 { 385 u32 httfea,baseaddr,enuscr; 386 struct pci_dev *dev1; 387 int i; 388 unsigned size = amd64_fetch_size(); 389 390 dev_info(&pdev->dev, "setting up ULi AGP\n"); 391 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); 392 if (dev1 == NULL) { 393 dev_info(&pdev->dev, "can't find ULi secondary device\n"); 394 return -ENODEV; 395 } 396 397 for (i = 0; i < ARRAY_SIZE(uli_sizes); i++) 398 if (uli_sizes[i].size == size) 399 break; 400 401 if (i == ARRAY_SIZE(uli_sizes)) { 402 dev_info(&pdev->dev, "no ULi size found for %d\n", size); 403 return -ENODEV; 404 } 405 406 /* shadow x86-64 registers into ULi registers */ 407 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 408 409 /* if x86-64 aperture base is beyond 4G, exit here */ 410 if ((httfea & 0x7fff) >> (32 - 25)) 411 return -ENODEV; 412 413 httfea = (httfea& 0x7fff) << 25; 414 415 pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr); 416 baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK; 417 baseaddr|= httfea; 418 pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr); 419 420 enuscr= httfea+ (size * 1024 * 1024) - 1; 421 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); 422 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); 423 424 pci_dev_put(dev1); 425 return 0; 426 } 427 428 429 static const struct aper_size_info_32 nforce3_sizes[5] = 430 { 431 {512, 131072, 7, 0x00000000 }, 432 {256, 65536, 6, 0x00000008 }, 433 {128, 32768, 5, 0x0000000C }, 434 {64, 16384, 4, 0x0000000E }, 435 {32, 8192, 3, 0x0000000F } 436 }; 437 438 /* Handle shadow device of the Nvidia NForce3 */ 439 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */ 440 static int nforce3_agp_init(struct pci_dev *pdev) 441 { 442 u32 tmp, apbase, apbar, aplimit; 443 struct pci_dev *dev1; 444 int i; 445 unsigned size = amd64_fetch_size(); 446 447 dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); 448 449 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); 450 if (dev1 == NULL) { 451 dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); 452 return -ENODEV; 453 } 454 455 for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++) 456 if (nforce3_sizes[i].size == size) 457 break; 458 459 if (i == ARRAY_SIZE(nforce3_sizes)) { 460 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); 461 return -ENODEV; 462 } 463 464 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); 465 tmp &= ~(0xf); 466 tmp |= nforce3_sizes[i].size_value; 467 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 468 469 /* shadow x86-64 registers into NVIDIA registers */ 470 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); 471 472 /* if x86-64 aperture base is beyond 4G, exit here */ 473 if ( (apbase & 0x7fff) >> (32 - 25) ) { 474 dev_info(&pdev->dev, "aperture base > 4G\n"); 475 return -ENODEV; 476 } 477 478 apbase = (apbase & 0x7fff) << 25; 479 480 pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar); 481 apbar &= ~PCI_BASE_ADDRESS_MEM_MASK; 482 apbar |= apbase; 483 pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar); 484 485 aplimit = apbase + (size * 1024 * 1024) - 1; 486 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase); 487 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit); 488 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); 489 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); 490 491 pci_dev_put(dev1); 492 493 return 0; 494 } 495 496 static int __devinit agp_amd64_probe(struct pci_dev *pdev, 497 const struct pci_device_id *ent) 498 { 499 struct agp_bridge_data *bridge; 500 u8 cap_ptr; 501 int err; 502 503 /* The Highlander principle */ 504 if (agp_bridges_found) 505 return -ENODEV; 506 507 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 508 if (!cap_ptr) 509 return -ENODEV; 510 511 /* Could check for AGPv3 here */ 512 513 bridge = agp_alloc_bridge(); 514 if (!bridge) 515 return -ENOMEM; 516 517 if (pdev->vendor == PCI_VENDOR_ID_AMD && 518 pdev->device == PCI_DEVICE_ID_AMD_8151_0) { 519 amd8151_init(pdev, bridge); 520 } else { 521 dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", 522 pdev->vendor, pdev->device); 523 } 524 525 bridge->driver = &amd_8151_driver; 526 bridge->dev = pdev; 527 bridge->capndx = cap_ptr; 528 529 /* Fill in the mode register */ 530 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); 531 532 if (cache_nbs(pdev, cap_ptr) == -1) { 533 agp_put_bridge(bridge); 534 return -ENODEV; 535 } 536 537 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 538 int ret = nforce3_agp_init(pdev); 539 if (ret) { 540 agp_put_bridge(bridge); 541 return ret; 542 } 543 } 544 545 if (pdev->vendor == PCI_VENDOR_ID_AL) { 546 int ret = uli_agp_init(pdev); 547 if (ret) { 548 agp_put_bridge(bridge); 549 return ret; 550 } 551 } 552 553 pci_set_drvdata(pdev, bridge); 554 err = agp_add_bridge(bridge); 555 if (err < 0) 556 return err; 557 558 agp_bridges_found++; 559 return 0; 560 } 561 562 static void __devexit agp_amd64_remove(struct pci_dev *pdev) 563 { 564 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 565 566 release_mem_region(virt_to_phys(bridge->gatt_table_real), 567 amd64_aperture_sizes[bridge->aperture_size_idx].size); 568 agp_remove_bridge(bridge); 569 agp_put_bridge(bridge); 570 571 agp_bridges_found--; 572 } 573 574 #ifdef CONFIG_PM 575 576 static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state) 577 { 578 pci_save_state(pdev); 579 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 580 581 return 0; 582 } 583 584 static int agp_amd64_resume(struct pci_dev *pdev) 585 { 586 pci_set_power_state(pdev, PCI_D0); 587 pci_restore_state(pdev); 588 589 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) 590 nforce3_agp_init(pdev); 591 592 return amd_8151_configure(); 593 } 594 595 #endif /* CONFIG_PM */ 596 597 static struct pci_device_id agp_amd64_pci_table[] = { 598 { 599 .class = (PCI_CLASS_BRIDGE_HOST << 8), 600 .class_mask = ~0, 601 .vendor = PCI_VENDOR_ID_AMD, 602 .device = PCI_DEVICE_ID_AMD_8151_0, 603 .subvendor = PCI_ANY_ID, 604 .subdevice = PCI_ANY_ID, 605 }, 606 /* ULi M1689 */ 607 { 608 .class = (PCI_CLASS_BRIDGE_HOST << 8), 609 .class_mask = ~0, 610 .vendor = PCI_VENDOR_ID_AL, 611 .device = PCI_DEVICE_ID_AL_M1689, 612 .subvendor = PCI_ANY_ID, 613 .subdevice = PCI_ANY_ID, 614 }, 615 /* VIA K8T800Pro */ 616 { 617 .class = (PCI_CLASS_BRIDGE_HOST << 8), 618 .class_mask = ~0, 619 .vendor = PCI_VENDOR_ID_VIA, 620 .device = PCI_DEVICE_ID_VIA_K8T800PRO_0, 621 .subvendor = PCI_ANY_ID, 622 .subdevice = PCI_ANY_ID, 623 }, 624 /* VIA K8T800 */ 625 { 626 .class = (PCI_CLASS_BRIDGE_HOST << 8), 627 .class_mask = ~0, 628 .vendor = PCI_VENDOR_ID_VIA, 629 .device = PCI_DEVICE_ID_VIA_8385_0, 630 .subvendor = PCI_ANY_ID, 631 .subdevice = PCI_ANY_ID, 632 }, 633 /* VIA K8M800 / K8N800 */ 634 { 635 .class = (PCI_CLASS_BRIDGE_HOST << 8), 636 .class_mask = ~0, 637 .vendor = PCI_VENDOR_ID_VIA, 638 .device = PCI_DEVICE_ID_VIA_8380_0, 639 .subvendor = PCI_ANY_ID, 640 .subdevice = PCI_ANY_ID, 641 }, 642 /* VIA K8M890 / K8N890 */ 643 { 644 .class = (PCI_CLASS_BRIDGE_HOST << 8), 645 .class_mask = ~0, 646 .vendor = PCI_VENDOR_ID_VIA, 647 .device = PCI_DEVICE_ID_VIA_VT3336, 648 .subvendor = PCI_ANY_ID, 649 .subdevice = PCI_ANY_ID, 650 }, 651 /* VIA K8T890 */ 652 { 653 .class = (PCI_CLASS_BRIDGE_HOST << 8), 654 .class_mask = ~0, 655 .vendor = PCI_VENDOR_ID_VIA, 656 .device = PCI_DEVICE_ID_VIA_3238_0, 657 .subvendor = PCI_ANY_ID, 658 .subdevice = PCI_ANY_ID, 659 }, 660 /* VIA K8T800/K8M800/K8N800 */ 661 { 662 .class = (PCI_CLASS_BRIDGE_HOST << 8), 663 .class_mask = ~0, 664 .vendor = PCI_VENDOR_ID_VIA, 665 .device = PCI_DEVICE_ID_VIA_838X_1, 666 .subvendor = PCI_ANY_ID, 667 .subdevice = PCI_ANY_ID, 668 }, 669 /* NForce3 */ 670 { 671 .class = (PCI_CLASS_BRIDGE_HOST << 8), 672 .class_mask = ~0, 673 .vendor = PCI_VENDOR_ID_NVIDIA, 674 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3, 675 .subvendor = PCI_ANY_ID, 676 .subdevice = PCI_ANY_ID, 677 }, 678 { 679 .class = (PCI_CLASS_BRIDGE_HOST << 8), 680 .class_mask = ~0, 681 .vendor = PCI_VENDOR_ID_NVIDIA, 682 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S, 683 .subvendor = PCI_ANY_ID, 684 .subdevice = PCI_ANY_ID, 685 }, 686 /* SIS 755 */ 687 { 688 .class = (PCI_CLASS_BRIDGE_HOST << 8), 689 .class_mask = ~0, 690 .vendor = PCI_VENDOR_ID_SI, 691 .device = PCI_DEVICE_ID_SI_755, 692 .subvendor = PCI_ANY_ID, 693 .subdevice = PCI_ANY_ID, 694 }, 695 /* SIS 760 */ 696 { 697 .class = (PCI_CLASS_BRIDGE_HOST << 8), 698 .class_mask = ~0, 699 .vendor = PCI_VENDOR_ID_SI, 700 .device = PCI_DEVICE_ID_SI_760, 701 .subvendor = PCI_ANY_ID, 702 .subdevice = PCI_ANY_ID, 703 }, 704 /* ALI/ULI M1695 */ 705 { 706 .class = (PCI_CLASS_BRIDGE_HOST << 8), 707 .class_mask = ~0, 708 .vendor = PCI_VENDOR_ID_AL, 709 .device = 0x1695, 710 .subvendor = PCI_ANY_ID, 711 .subdevice = PCI_ANY_ID, 712 }, 713 714 { } 715 }; 716 717 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); 718 719 static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { 720 { PCI_DEVICE_CLASS(0, 0) }, 721 { } 722 }; 723 724 static struct pci_driver agp_amd64_pci_driver = { 725 .name = "agpgart-amd64", 726 .id_table = agp_amd64_pci_table, 727 .probe = agp_amd64_probe, 728 .remove = agp_amd64_remove, 729 #ifdef CONFIG_PM 730 .suspend = agp_amd64_suspend, 731 .resume = agp_amd64_resume, 732 #endif 733 }; 734 735 736 /* Not static due to IOMMU code calling it early. */ 737 int __init agp_amd64_init(void) 738 { 739 int err = 0; 740 741 if (agp_off) 742 return -EINVAL; 743 744 err = pci_register_driver(&agp_amd64_pci_driver); 745 if (err < 0) 746 return err; 747 748 if (agp_bridges_found == 0) { 749 if (!agp_try_unsupported && !agp_try_unsupported_boot) { 750 printk(KERN_INFO PFX "No supported AGP bridge found.\n"); 751 #ifdef MODULE 752 printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n"); 753 #else 754 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); 755 #endif 756 return -ENODEV; 757 } 758 759 /* First check that we have at least one AMD64 NB */ 760 if (!pci_dev_present(k8_nb_ids)) 761 return -ENODEV; 762 763 /* Look for any AGP bridge */ 764 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; 765 err = driver_attach(&agp_amd64_pci_driver.driver); 766 if (err == 0 && agp_bridges_found == 0) 767 err = -ENODEV; 768 } 769 return err; 770 } 771 772 static int __init agp_amd64_mod_init(void) 773 { 774 #ifndef MODULE 775 if (gart_iommu_aperture) 776 return agp_bridges_found ? 0 : -ENODEV; 777 #endif 778 return agp_amd64_init(); 779 } 780 781 static void __exit agp_amd64_cleanup(void) 782 { 783 #ifndef MODULE 784 if (gart_iommu_aperture) 785 return; 786 #endif 787 if (aperture_resource) 788 release_resource(aperture_resource); 789 pci_unregister_driver(&agp_amd64_pci_driver); 790 } 791 792 module_init(agp_amd64_mod_init); 793 module_exit(agp_amd64_cleanup); 794 795 MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); 796 module_param(agp_try_unsupported, bool, 0); 797 MODULE_LICENSE("GPL"); 798