1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <linux/sched.h> 41 #include <linux/slab.h> 42 #include <asm/io.h> 43 #include <asm/cacheflush.h> 44 #include <asm/pgtable.h> 45 #include "agp.h" 46 47 __u32 *agp_gatt_table; 48 int agp_memory_reserved; 49 50 /* 51 * Needed by the Nforce GART driver for the time being. Would be 52 * nice to do this some other way instead of needing this export. 53 */ 54 EXPORT_SYMBOL_GPL(agp_memory_reserved); 55 56 /* 57 * Generic routines for handling agp_memory structures - 58 * They use the basic page allocation routines to do the brunt of the work. 59 */ 60 61 void agp_free_key(int key) 62 { 63 if (key < 0) 64 return; 65 66 if (key < MAXKEY) 67 clear_bit(key, agp_bridge->key_list); 68 } 69 EXPORT_SYMBOL(agp_free_key); 70 71 72 static int agp_get_key(void) 73 { 74 int bit; 75 76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 77 if (bit < MAXKEY) { 78 set_bit(bit, agp_bridge->key_list); 79 return bit; 80 } 81 return -1; 82 } 83 84 /* 85 * Use kmalloc if possible for the page list. Otherwise fall back to 86 * vmalloc. This speeds things up and also saves memory for small AGP 87 * regions. 88 */ 89 90 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 91 { 92 mem->pages = NULL; 93 94 if (size <= 2*PAGE_SIZE) 95 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 96 if (mem->pages == NULL) { 97 mem->pages = vmalloc(size); 98 } 99 } 100 EXPORT_SYMBOL(agp_alloc_page_array); 101 102 void agp_free_page_array(struct agp_memory *mem) 103 { 104 if (is_vmalloc_addr(mem->pages)) { 105 vfree(mem->pages); 106 } else { 107 kfree(mem->pages); 108 } 109 } 110 EXPORT_SYMBOL(agp_free_page_array); 111 112 113 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 114 { 115 struct agp_memory *new; 116 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 117 118 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 119 if (new == NULL) 120 return NULL; 121 122 new->key = agp_get_key(); 123 124 if (new->key < 0) { 125 kfree(new); 126 return NULL; 127 } 128 129 agp_alloc_page_array(alloc_size, new); 130 131 if (new->pages == NULL) { 132 agp_free_key(new->key); 133 kfree(new); 134 return NULL; 135 } 136 new->num_scratch_pages = 0; 137 return new; 138 } 139 140 struct agp_memory *agp_create_memory(int scratch_pages) 141 { 142 struct agp_memory *new; 143 144 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 145 if (new == NULL) 146 return NULL; 147 148 new->key = agp_get_key(); 149 150 if (new->key < 0) { 151 kfree(new); 152 return NULL; 153 } 154 155 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 156 157 if (new->pages == NULL) { 158 agp_free_key(new->key); 159 kfree(new); 160 return NULL; 161 } 162 new->num_scratch_pages = scratch_pages; 163 new->type = AGP_NORMAL_MEMORY; 164 return new; 165 } 166 EXPORT_SYMBOL(agp_create_memory); 167 168 /** 169 * agp_free_memory - free memory associated with an agp_memory pointer. 170 * 171 * @curr: agp_memory pointer to be freed. 172 * 173 * It is the only function that can be called when the backend is not owned 174 * by the caller. (So it can free memory on client death.) 175 */ 176 void agp_free_memory(struct agp_memory *curr) 177 { 178 size_t i; 179 180 if (curr == NULL) 181 return; 182 183 if (curr->is_bound) 184 agp_unbind_memory(curr); 185 186 if (curr->type >= AGP_USER_TYPES) { 187 agp_generic_free_by_type(curr); 188 return; 189 } 190 191 if (curr->type != 0) { 192 curr->bridge->driver->free_by_type(curr); 193 return; 194 } 195 if (curr->page_count != 0) { 196 if (curr->bridge->driver->agp_destroy_pages) { 197 curr->bridge->driver->agp_destroy_pages(curr); 198 } else { 199 200 for (i = 0; i < curr->page_count; i++) { 201 curr->bridge->driver->agp_destroy_page( 202 curr->pages[i], 203 AGP_PAGE_DESTROY_UNMAP); 204 } 205 for (i = 0; i < curr->page_count; i++) { 206 curr->bridge->driver->agp_destroy_page( 207 curr->pages[i], 208 AGP_PAGE_DESTROY_FREE); 209 } 210 } 211 } 212 agp_free_key(curr->key); 213 agp_free_page_array(curr); 214 kfree(curr); 215 } 216 EXPORT_SYMBOL(agp_free_memory); 217 218 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 219 220 /** 221 * agp_allocate_memory - allocate a group of pages of a certain type. 222 * 223 * @page_count: size_t argument of the number of pages 224 * @type: u32 argument of the type of memory to be allocated. 225 * 226 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 227 * maps to physical ram. Any other type is device dependent. 228 * 229 * It returns NULL whenever memory is unavailable. 230 */ 231 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 232 size_t page_count, u32 type) 233 { 234 int scratch_pages; 235 struct agp_memory *new; 236 size_t i; 237 238 if (!bridge) 239 return NULL; 240 241 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 242 return NULL; 243 244 if (type >= AGP_USER_TYPES) { 245 new = agp_generic_alloc_user(page_count, type); 246 if (new) 247 new->bridge = bridge; 248 return new; 249 } 250 251 if (type != 0) { 252 new = bridge->driver->alloc_by_type(page_count, type); 253 if (new) 254 new->bridge = bridge; 255 return new; 256 } 257 258 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 259 260 new = agp_create_memory(scratch_pages); 261 262 if (new == NULL) 263 return NULL; 264 265 if (bridge->driver->agp_alloc_pages) { 266 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { 267 agp_free_memory(new); 268 return NULL; 269 } 270 new->bridge = bridge; 271 return new; 272 } 273 274 for (i = 0; i < page_count; i++) { 275 struct page *page = bridge->driver->agp_alloc_page(bridge); 276 277 if (page == NULL) { 278 agp_free_memory(new); 279 return NULL; 280 } 281 new->pages[i] = page; 282 new->page_count++; 283 } 284 new->bridge = bridge; 285 286 return new; 287 } 288 EXPORT_SYMBOL(agp_allocate_memory); 289 290 291 /* End - Generic routines for handling agp_memory structures */ 292 293 294 static int agp_return_size(void) 295 { 296 int current_size; 297 void *temp; 298 299 temp = agp_bridge->current_size; 300 301 switch (agp_bridge->driver->size_type) { 302 case U8_APER_SIZE: 303 current_size = A_SIZE_8(temp)->size; 304 break; 305 case U16_APER_SIZE: 306 current_size = A_SIZE_16(temp)->size; 307 break; 308 case U32_APER_SIZE: 309 current_size = A_SIZE_32(temp)->size; 310 break; 311 case LVL2_APER_SIZE: 312 current_size = A_SIZE_LVL2(temp)->size; 313 break; 314 case FIXED_APER_SIZE: 315 current_size = A_SIZE_FIX(temp)->size; 316 break; 317 default: 318 current_size = 0; 319 break; 320 } 321 322 current_size -= (agp_memory_reserved / (1024*1024)); 323 if (current_size <0) 324 current_size = 0; 325 return current_size; 326 } 327 328 329 int agp_num_entries(void) 330 { 331 int num_entries; 332 void *temp; 333 334 temp = agp_bridge->current_size; 335 336 switch (agp_bridge->driver->size_type) { 337 case U8_APER_SIZE: 338 num_entries = A_SIZE_8(temp)->num_entries; 339 break; 340 case U16_APER_SIZE: 341 num_entries = A_SIZE_16(temp)->num_entries; 342 break; 343 case U32_APER_SIZE: 344 num_entries = A_SIZE_32(temp)->num_entries; 345 break; 346 case LVL2_APER_SIZE: 347 num_entries = A_SIZE_LVL2(temp)->num_entries; 348 break; 349 case FIXED_APER_SIZE: 350 num_entries = A_SIZE_FIX(temp)->num_entries; 351 break; 352 default: 353 num_entries = 0; 354 break; 355 } 356 357 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 358 if (num_entries<0) 359 num_entries = 0; 360 return num_entries; 361 } 362 EXPORT_SYMBOL_GPL(agp_num_entries); 363 364 365 /** 366 * agp_copy_info - copy bridge state information 367 * 368 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 369 * 370 * This function copies information about the agp bridge device and the state of 371 * the agp backend into an agp_kern_info pointer. 372 */ 373 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 374 { 375 memset(info, 0, sizeof(struct agp_kern_info)); 376 if (!bridge) { 377 info->chipset = NOT_SUPPORTED; 378 return -EIO; 379 } 380 381 info->version.major = bridge->version->major; 382 info->version.minor = bridge->version->minor; 383 info->chipset = SUPPORTED; 384 info->device = bridge->dev; 385 if (bridge->mode & AGPSTAT_MODE_3_0) 386 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 387 else 388 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 389 info->aper_base = bridge->gart_bus_addr; 390 info->aper_size = agp_return_size(); 391 info->max_memory = bridge->max_memory_agp; 392 info->current_memory = atomic_read(&bridge->current_memory_agp); 393 info->cant_use_aperture = bridge->driver->cant_use_aperture; 394 info->vm_ops = bridge->vm_ops; 395 info->page_mask = ~0UL; 396 return 0; 397 } 398 EXPORT_SYMBOL(agp_copy_info); 399 400 /* End - Routine to copy over information structure */ 401 402 /* 403 * Routines for handling swapping of agp_memory into the GATT - 404 * These routines take agp_memory and insert them into the GATT. 405 * They call device specific routines to actually write to the GATT. 406 */ 407 408 /** 409 * agp_bind_memory - Bind an agp_memory structure into the GATT. 410 * 411 * @curr: agp_memory pointer 412 * @pg_start: an offset into the graphics aperture translation table 413 * 414 * It returns -EINVAL if the pointer == NULL. 415 * It returns -EBUSY if the area of the table requested is already in use. 416 */ 417 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 418 { 419 int ret_val; 420 421 if (curr == NULL) 422 return -EINVAL; 423 424 if (curr->is_bound) { 425 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 426 return -EINVAL; 427 } 428 if (!curr->is_flushed) { 429 curr->bridge->driver->cache_flush(); 430 curr->is_flushed = true; 431 } 432 433 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 434 435 if (ret_val != 0) 436 return ret_val; 437 438 curr->is_bound = true; 439 curr->pg_start = pg_start; 440 spin_lock(&agp_bridge->mapped_lock); 441 list_add(&curr->mapped_list, &agp_bridge->mapped_list); 442 spin_unlock(&agp_bridge->mapped_lock); 443 444 return 0; 445 } 446 EXPORT_SYMBOL(agp_bind_memory); 447 448 449 /** 450 * agp_unbind_memory - Removes an agp_memory structure from the GATT 451 * 452 * @curr: agp_memory pointer to be removed from the GATT. 453 * 454 * It returns -EINVAL if this piece of agp_memory is not currently bound to 455 * the graphics aperture translation table or if the agp_memory pointer == NULL 456 */ 457 int agp_unbind_memory(struct agp_memory *curr) 458 { 459 int ret_val; 460 461 if (curr == NULL) 462 return -EINVAL; 463 464 if (!curr->is_bound) { 465 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 466 return -EINVAL; 467 } 468 469 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 470 471 if (ret_val != 0) 472 return ret_val; 473 474 curr->is_bound = false; 475 curr->pg_start = 0; 476 spin_lock(&curr->bridge->mapped_lock); 477 list_del(&curr->mapped_list); 478 spin_unlock(&curr->bridge->mapped_lock); 479 return 0; 480 } 481 EXPORT_SYMBOL(agp_unbind_memory); 482 483 484 /* End - Routines for handling swapping of agp_memory into the GATT */ 485 486 487 /* Generic Agp routines - Start */ 488 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 489 { 490 u32 tmp; 491 492 if (*requested_mode & AGP2_RESERVED_MASK) { 493 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 494 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 495 *requested_mode &= ~AGP2_RESERVED_MASK; 496 } 497 498 /* 499 * Some dumb bridges are programmed to disobey the AGP2 spec. 500 * This is likely a BIOS misprogramming rather than poweron default, or 501 * it would be a lot more common. 502 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 503 * AGPv2 spec 6.1.9 states: 504 * The RATE field indicates the data transfer rates supported by this 505 * device. A.G.P. devices must report all that apply. 506 * Fix them up as best we can. 507 */ 508 switch (*bridge_agpstat & 7) { 509 case 4: 510 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 511 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 512 "Fixing up support for x2 & x1\n"); 513 break; 514 case 2: 515 *bridge_agpstat |= AGPSTAT2_1X; 516 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 517 "Fixing up support for x1\n"); 518 break; 519 default: 520 break; 521 } 522 523 /* Check the speed bits make sense. Only one should be set. */ 524 tmp = *requested_mode & 7; 525 switch (tmp) { 526 case 0: 527 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 528 *requested_mode |= AGPSTAT2_1X; 529 break; 530 case 1: 531 case 2: 532 break; 533 case 3: 534 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 535 break; 536 case 4: 537 break; 538 case 5: 539 case 6: 540 case 7: 541 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 542 break; 543 } 544 545 /* disable SBA if it's not supported */ 546 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 547 *bridge_agpstat &= ~AGPSTAT_SBA; 548 549 /* Set rate */ 550 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 551 *bridge_agpstat &= ~AGPSTAT2_4X; 552 553 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 554 *bridge_agpstat &= ~AGPSTAT2_2X; 555 556 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 557 *bridge_agpstat &= ~AGPSTAT2_1X; 558 559 /* Now we know what mode it should be, clear out the unwanted bits. */ 560 if (*bridge_agpstat & AGPSTAT2_4X) 561 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 562 563 if (*bridge_agpstat & AGPSTAT2_2X) 564 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 565 566 if (*bridge_agpstat & AGPSTAT2_1X) 567 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 568 569 /* Apply any errata. */ 570 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 571 *bridge_agpstat &= ~AGPSTAT_FW; 572 573 if (agp_bridge->flags & AGP_ERRATA_SBA) 574 *bridge_agpstat &= ~AGPSTAT_SBA; 575 576 if (agp_bridge->flags & AGP_ERRATA_1X) { 577 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 578 *bridge_agpstat |= AGPSTAT2_1X; 579 } 580 581 /* If we've dropped down to 1X, disable fast writes. */ 582 if (*bridge_agpstat & AGPSTAT2_1X) 583 *bridge_agpstat &= ~AGPSTAT_FW; 584 } 585 586 /* 587 * requested_mode = Mode requested by (typically) X. 588 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 589 * vga_agpstat = PCI_AGP_STATUS from graphic card. 590 */ 591 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 592 { 593 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 594 u32 tmp; 595 596 if (*requested_mode & AGP3_RESERVED_MASK) { 597 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 598 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 599 *requested_mode &= ~AGP3_RESERVED_MASK; 600 } 601 602 /* Check the speed bits make sense. */ 603 tmp = *requested_mode & 7; 604 if (tmp == 0) { 605 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 606 *requested_mode |= AGPSTAT3_4X; 607 } 608 if (tmp >= 3) { 609 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 610 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 611 } 612 613 /* ARQSZ - Set the value to the maximum one. 614 * Don't allow the mode register to override values. */ 615 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 616 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 617 618 /* Calibration cycle. 619 * Don't allow the mode register to override values. */ 620 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 621 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 622 623 /* SBA *must* be supported for AGP v3 */ 624 *bridge_agpstat |= AGPSTAT_SBA; 625 626 /* 627 * Set speed. 628 * Check for invalid speeds. This can happen when applications 629 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 630 */ 631 if (*requested_mode & AGPSTAT_MODE_3_0) { 632 /* 633 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 634 * have been passed a 3.0 mode, but with 2.x speed bits set. 635 * AGP2.x 4x -> AGP3.0 4x. 636 */ 637 if (*requested_mode & AGPSTAT2_4X) { 638 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 639 current->comm, *requested_mode); 640 *requested_mode &= ~AGPSTAT2_4X; 641 *requested_mode |= AGPSTAT3_4X; 642 } 643 } else { 644 /* 645 * The caller doesn't know what they are doing. We are in 3.0 mode, 646 * but have been passed an AGP 2.x mode. 647 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 648 */ 649 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 650 current->comm, *requested_mode); 651 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 652 *requested_mode |= AGPSTAT3_4X; 653 } 654 655 if (*requested_mode & AGPSTAT3_8X) { 656 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 657 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 658 *bridge_agpstat |= AGPSTAT3_4X; 659 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 660 return; 661 } 662 if (!(*vga_agpstat & AGPSTAT3_8X)) { 663 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 664 *bridge_agpstat |= AGPSTAT3_4X; 665 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 666 return; 667 } 668 /* All set, bridge & device can do AGP x8*/ 669 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 670 goto done; 671 672 } else if (*requested_mode & AGPSTAT3_4X) { 673 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 674 *bridge_agpstat |= AGPSTAT3_4X; 675 goto done; 676 677 } else { 678 679 /* 680 * If we didn't specify an AGP mode, we see if both 681 * the graphics card, and the bridge can do x8, and use if so. 682 * If not, we fall back to x4 mode. 683 */ 684 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 685 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 686 "supported by bridge & card (x8).\n"); 687 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 688 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 689 } else { 690 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 691 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 692 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 693 *bridge_agpstat, origbridge); 694 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 695 *bridge_agpstat |= AGPSTAT3_4X; 696 } 697 if (!(*vga_agpstat & AGPSTAT3_8X)) { 698 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 699 *vga_agpstat, origvga); 700 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 701 *vga_agpstat |= AGPSTAT3_4X; 702 } 703 } 704 } 705 706 done: 707 /* Apply any errata. */ 708 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 709 *bridge_agpstat &= ~AGPSTAT_FW; 710 711 if (agp_bridge->flags & AGP_ERRATA_SBA) 712 *bridge_agpstat &= ~AGPSTAT_SBA; 713 714 if (agp_bridge->flags & AGP_ERRATA_1X) { 715 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 716 *bridge_agpstat |= AGPSTAT2_1X; 717 } 718 } 719 720 721 /** 722 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 723 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 724 * @requested_mode: requested agp_stat from userspace (Typically from X) 725 * @bridge_agpstat: current agp_stat from AGP bridge. 726 * 727 * This function will hunt for an AGP graphics card, and try to match 728 * the requested mode to the capabilities of both the bridge and the card. 729 */ 730 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 731 { 732 struct pci_dev *device = NULL; 733 u32 vga_agpstat; 734 u8 cap_ptr; 735 736 for (;;) { 737 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 738 if (!device) { 739 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 740 return 0; 741 } 742 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 743 if (cap_ptr) 744 break; 745 } 746 747 /* 748 * Ok, here we have a AGP device. Disable impossible 749 * settings, and adjust the readqueue to the minimum. 750 */ 751 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 752 753 /* adjust RQ depth */ 754 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 755 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 756 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 757 758 /* disable FW if it's not supported */ 759 if (!((bridge_agpstat & AGPSTAT_FW) && 760 (vga_agpstat & AGPSTAT_FW) && 761 (requested_mode & AGPSTAT_FW))) 762 bridge_agpstat &= ~AGPSTAT_FW; 763 764 /* Check to see if we are operating in 3.0 mode */ 765 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 766 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 767 else 768 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 769 770 pci_dev_put(device); 771 return bridge_agpstat; 772 } 773 EXPORT_SYMBOL(agp_collect_device_status); 774 775 776 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 777 { 778 struct pci_dev *device = NULL; 779 int mode; 780 781 mode = bridge_agpstat & 0x7; 782 if (agp_v3) 783 mode *= 4; 784 785 for_each_pci_dev(device) { 786 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 787 if (!agp) 788 continue; 789 790 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", 791 agp_v3 ? 3 : 2, mode); 792 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 793 } 794 } 795 EXPORT_SYMBOL(agp_device_command); 796 797 798 void get_agp_version(struct agp_bridge_data *bridge) 799 { 800 u32 ncapid; 801 802 /* Exit early if already set by errata workarounds. */ 803 if (bridge->major_version != 0) 804 return; 805 806 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 807 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 808 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 809 } 810 EXPORT_SYMBOL(get_agp_version); 811 812 813 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 814 { 815 u32 bridge_agpstat, temp; 816 817 get_agp_version(agp_bridge); 818 819 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", 820 agp_bridge->major_version, agp_bridge->minor_version); 821 822 pci_read_config_dword(agp_bridge->dev, 823 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 824 825 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 826 if (bridge_agpstat == 0) 827 /* Something bad happened. FIXME: Return error code? */ 828 return; 829 830 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 831 832 /* Do AGP version specific frobbing. */ 833 if (bridge->major_version >= 3) { 834 if (bridge->mode & AGPSTAT_MODE_3_0) { 835 /* If we have 3.5, we can do the isoch stuff. */ 836 if (bridge->minor_version >= 5) 837 agp_3_5_enable(bridge); 838 agp_device_command(bridge_agpstat, true); 839 return; 840 } else { 841 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 842 bridge_agpstat &= ~(7<<10) ; 843 pci_read_config_dword(bridge->dev, 844 bridge->capndx+AGPCTRL, &temp); 845 temp |= (1<<9); 846 pci_write_config_dword(bridge->dev, 847 bridge->capndx+AGPCTRL, temp); 848 849 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); 850 } 851 } 852 853 /* AGP v<3 */ 854 agp_device_command(bridge_agpstat, false); 855 } 856 EXPORT_SYMBOL(agp_generic_enable); 857 858 859 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 860 { 861 char *table; 862 char *table_end; 863 int size; 864 int page_order; 865 int num_entries; 866 int i; 867 void *temp; 868 struct page *page; 869 870 /* The generic routines can't handle 2 level gatt's */ 871 if (bridge->driver->size_type == LVL2_APER_SIZE) 872 return -EINVAL; 873 874 table = NULL; 875 i = bridge->aperture_size_idx; 876 temp = bridge->current_size; 877 size = page_order = num_entries = 0; 878 879 if (bridge->driver->size_type != FIXED_APER_SIZE) { 880 do { 881 switch (bridge->driver->size_type) { 882 case U8_APER_SIZE: 883 size = A_SIZE_8(temp)->size; 884 page_order = 885 A_SIZE_8(temp)->page_order; 886 num_entries = 887 A_SIZE_8(temp)->num_entries; 888 break; 889 case U16_APER_SIZE: 890 size = A_SIZE_16(temp)->size; 891 page_order = A_SIZE_16(temp)->page_order; 892 num_entries = A_SIZE_16(temp)->num_entries; 893 break; 894 case U32_APER_SIZE: 895 size = A_SIZE_32(temp)->size; 896 page_order = A_SIZE_32(temp)->page_order; 897 num_entries = A_SIZE_32(temp)->num_entries; 898 break; 899 /* This case will never really happen. */ 900 case FIXED_APER_SIZE: 901 case LVL2_APER_SIZE: 902 default: 903 size = page_order = num_entries = 0; 904 break; 905 } 906 907 table = alloc_gatt_pages(page_order); 908 909 if (table == NULL) { 910 i++; 911 switch (bridge->driver->size_type) { 912 case U8_APER_SIZE: 913 bridge->current_size = A_IDX8(bridge); 914 break; 915 case U16_APER_SIZE: 916 bridge->current_size = A_IDX16(bridge); 917 break; 918 case U32_APER_SIZE: 919 bridge->current_size = A_IDX32(bridge); 920 break; 921 /* These cases will never really happen. */ 922 case FIXED_APER_SIZE: 923 case LVL2_APER_SIZE: 924 default: 925 break; 926 } 927 temp = bridge->current_size; 928 } else { 929 bridge->aperture_size_idx = i; 930 } 931 } while (!table && (i < bridge->driver->num_aperture_sizes)); 932 } else { 933 size = ((struct aper_size_info_fixed *) temp)->size; 934 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 935 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 936 table = alloc_gatt_pages(page_order); 937 } 938 939 if (table == NULL) 940 return -ENOMEM; 941 942 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 943 944 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 945 SetPageReserved(page); 946 947 bridge->gatt_table_real = (u32 *) table; 948 agp_gatt_table = (void *)table; 949 950 bridge->driver->cache_flush(); 951 #ifdef CONFIG_X86 952 if (set_memory_uc((unsigned long)table, 1 << page_order)) 953 printk(KERN_WARNING "Could not set GATT table memory to UC!"); 954 955 bridge->gatt_table = (void *)table; 956 #else 957 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 958 (PAGE_SIZE * (1 << page_order))); 959 bridge->driver->cache_flush(); 960 #endif 961 962 if (bridge->gatt_table == NULL) { 963 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 964 ClearPageReserved(page); 965 966 free_gatt_pages(table, page_order); 967 968 return -ENOMEM; 969 } 970 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 971 972 /* AK: bogus, should encode addresses > 4GB */ 973 for (i = 0; i < num_entries; i++) { 974 writel(bridge->scratch_page, bridge->gatt_table+i); 975 readl(bridge->gatt_table+i); /* PCI Posting. */ 976 } 977 978 return 0; 979 } 980 EXPORT_SYMBOL(agp_generic_create_gatt_table); 981 982 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 983 { 984 int page_order; 985 char *table, *table_end; 986 void *temp; 987 struct page *page; 988 989 temp = bridge->current_size; 990 991 switch (bridge->driver->size_type) { 992 case U8_APER_SIZE: 993 page_order = A_SIZE_8(temp)->page_order; 994 break; 995 case U16_APER_SIZE: 996 page_order = A_SIZE_16(temp)->page_order; 997 break; 998 case U32_APER_SIZE: 999 page_order = A_SIZE_32(temp)->page_order; 1000 break; 1001 case FIXED_APER_SIZE: 1002 page_order = A_SIZE_FIX(temp)->page_order; 1003 break; 1004 case LVL2_APER_SIZE: 1005 /* The generic routines can't deal with 2 level gatt's */ 1006 return -EINVAL; 1007 break; 1008 default: 1009 page_order = 0; 1010 break; 1011 } 1012 1013 /* Do not worry about freeing memory, because if this is 1014 * called, then all agp memory is deallocated and removed 1015 * from the table. */ 1016 1017 #ifdef CONFIG_X86 1018 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1019 #else 1020 iounmap(bridge->gatt_table); 1021 #endif 1022 table = (char *) bridge->gatt_table_real; 1023 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1024 1025 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1026 ClearPageReserved(page); 1027 1028 free_gatt_pages(bridge->gatt_table_real, page_order); 1029 1030 agp_gatt_table = NULL; 1031 bridge->gatt_table = NULL; 1032 bridge->gatt_table_real = NULL; 1033 bridge->gatt_bus_addr = 0; 1034 1035 return 0; 1036 } 1037 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1038 1039 1040 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1041 { 1042 int num_entries; 1043 size_t i; 1044 off_t j; 1045 void *temp; 1046 struct agp_bridge_data *bridge; 1047 int mask_type; 1048 1049 bridge = mem->bridge; 1050 if (!bridge) 1051 return -EINVAL; 1052 1053 if (mem->page_count == 0) 1054 return 0; 1055 1056 temp = bridge->current_size; 1057 1058 switch (bridge->driver->size_type) { 1059 case U8_APER_SIZE: 1060 num_entries = A_SIZE_8(temp)->num_entries; 1061 break; 1062 case U16_APER_SIZE: 1063 num_entries = A_SIZE_16(temp)->num_entries; 1064 break; 1065 case U32_APER_SIZE: 1066 num_entries = A_SIZE_32(temp)->num_entries; 1067 break; 1068 case FIXED_APER_SIZE: 1069 num_entries = A_SIZE_FIX(temp)->num_entries; 1070 break; 1071 case LVL2_APER_SIZE: 1072 /* The generic routines can't deal with 2 level gatt's */ 1073 return -EINVAL; 1074 break; 1075 default: 1076 num_entries = 0; 1077 break; 1078 } 1079 1080 num_entries -= agp_memory_reserved/PAGE_SIZE; 1081 if (num_entries < 0) num_entries = 0; 1082 1083 if (type != mem->type) 1084 return -EINVAL; 1085 1086 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1087 if (mask_type != 0) { 1088 /* The generic routines know nothing of memory types */ 1089 return -EINVAL; 1090 } 1091 1092 /* AK: could wrap */ 1093 if ((pg_start + mem->page_count) > num_entries) 1094 return -EINVAL; 1095 1096 j = pg_start; 1097 1098 while (j < (pg_start + mem->page_count)) { 1099 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1100 return -EBUSY; 1101 j++; 1102 } 1103 1104 if (!mem->is_flushed) { 1105 bridge->driver->cache_flush(); 1106 mem->is_flushed = true; 1107 } 1108 1109 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1110 writel(bridge->driver->mask_memory(bridge, 1111 page_to_phys(mem->pages[i]), 1112 mask_type), 1113 bridge->gatt_table+j); 1114 } 1115 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1116 1117 bridge->driver->tlb_flush(mem); 1118 return 0; 1119 } 1120 EXPORT_SYMBOL(agp_generic_insert_memory); 1121 1122 1123 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1124 { 1125 size_t i; 1126 struct agp_bridge_data *bridge; 1127 int mask_type; 1128 1129 bridge = mem->bridge; 1130 if (!bridge) 1131 return -EINVAL; 1132 1133 if (mem->page_count == 0) 1134 return 0; 1135 1136 if (type != mem->type) 1137 return -EINVAL; 1138 1139 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1140 if (mask_type != 0) { 1141 /* The generic routines know nothing of memory types */ 1142 return -EINVAL; 1143 } 1144 1145 /* AK: bogus, should encode addresses > 4GB */ 1146 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1147 writel(bridge->scratch_page, bridge->gatt_table+i); 1148 } 1149 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1150 1151 bridge->driver->tlb_flush(mem); 1152 return 0; 1153 } 1154 EXPORT_SYMBOL(agp_generic_remove_memory); 1155 1156 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1157 { 1158 return NULL; 1159 } 1160 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1161 1162 void agp_generic_free_by_type(struct agp_memory *curr) 1163 { 1164 agp_free_page_array(curr); 1165 agp_free_key(curr->key); 1166 kfree(curr); 1167 } 1168 EXPORT_SYMBOL(agp_generic_free_by_type); 1169 1170 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1171 { 1172 struct agp_memory *new; 1173 int i; 1174 int pages; 1175 1176 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1177 new = agp_create_user_memory(page_count); 1178 if (new == NULL) 1179 return NULL; 1180 1181 for (i = 0; i < page_count; i++) 1182 new->pages[i] = NULL; 1183 new->page_count = 0; 1184 new->type = type; 1185 new->num_scratch_pages = pages; 1186 1187 return new; 1188 } 1189 EXPORT_SYMBOL(agp_generic_alloc_user); 1190 1191 /* 1192 * Basic Page Allocation Routines - 1193 * These routines handle page allocation and by default they reserve the allocated 1194 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1195 * against a maximum value. 1196 */ 1197 1198 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) 1199 { 1200 struct page * page; 1201 int i, ret = -ENOMEM; 1202 1203 for (i = 0; i < num_pages; i++) { 1204 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1205 /* agp_free_memory() needs gart address */ 1206 if (page == NULL) 1207 goto out; 1208 1209 #ifndef CONFIG_X86 1210 map_page_into_agp(page); 1211 #endif 1212 get_page(page); 1213 atomic_inc(&agp_bridge->current_memory_agp); 1214 1215 mem->pages[i] = page; 1216 mem->page_count++; 1217 } 1218 1219 #ifdef CONFIG_X86 1220 set_pages_array_uc(mem->pages, num_pages); 1221 #endif 1222 ret = 0; 1223 out: 1224 return ret; 1225 } 1226 EXPORT_SYMBOL(agp_generic_alloc_pages); 1227 1228 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1229 { 1230 struct page * page; 1231 1232 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1233 if (page == NULL) 1234 return NULL; 1235 1236 map_page_into_agp(page); 1237 1238 get_page(page); 1239 atomic_inc(&agp_bridge->current_memory_agp); 1240 return page; 1241 } 1242 EXPORT_SYMBOL(agp_generic_alloc_page); 1243 1244 void agp_generic_destroy_pages(struct agp_memory *mem) 1245 { 1246 int i; 1247 struct page *page; 1248 1249 if (!mem) 1250 return; 1251 1252 #ifdef CONFIG_X86 1253 set_pages_array_wb(mem->pages, mem->page_count); 1254 #endif 1255 1256 for (i = 0; i < mem->page_count; i++) { 1257 page = mem->pages[i]; 1258 1259 #ifndef CONFIG_X86 1260 unmap_page_from_agp(page); 1261 #endif 1262 put_page(page); 1263 __free_page(page); 1264 atomic_dec(&agp_bridge->current_memory_agp); 1265 mem->pages[i] = NULL; 1266 } 1267 } 1268 EXPORT_SYMBOL(agp_generic_destroy_pages); 1269 1270 void agp_generic_destroy_page(struct page *page, int flags) 1271 { 1272 if (page == NULL) 1273 return; 1274 1275 if (flags & AGP_PAGE_DESTROY_UNMAP) 1276 unmap_page_from_agp(page); 1277 1278 if (flags & AGP_PAGE_DESTROY_FREE) { 1279 put_page(page); 1280 __free_page(page); 1281 atomic_dec(&agp_bridge->current_memory_agp); 1282 } 1283 } 1284 EXPORT_SYMBOL(agp_generic_destroy_page); 1285 1286 /* End Basic Page Allocation Routines */ 1287 1288 1289 /** 1290 * agp_enable - initialise the agp point-to-point connection. 1291 * 1292 * @mode: agp mode register value to configure with. 1293 */ 1294 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1295 { 1296 if (!bridge) 1297 return; 1298 bridge->driver->agp_enable(bridge, mode); 1299 } 1300 EXPORT_SYMBOL(agp_enable); 1301 1302 /* When we remove the global variable agp_bridge from all drivers 1303 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1304 */ 1305 1306 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1307 { 1308 if (list_empty(&agp_bridges)) 1309 return NULL; 1310 1311 return agp_bridge; 1312 } 1313 1314 static void ipi_handler(void *null) 1315 { 1316 flush_agp_cache(); 1317 } 1318 1319 void global_cache_flush(void) 1320 { 1321 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1322 panic(PFX "timed out waiting for the other CPUs!\n"); 1323 } 1324 EXPORT_SYMBOL(global_cache_flush); 1325 1326 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1327 dma_addr_t addr, int type) 1328 { 1329 /* memory type is ignored in the generic routine */ 1330 if (bridge->driver->masks) 1331 return addr | bridge->driver->masks[0].mask; 1332 else 1333 return addr; 1334 } 1335 EXPORT_SYMBOL(agp_generic_mask_memory); 1336 1337 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1338 int type) 1339 { 1340 if (type >= AGP_USER_TYPES) 1341 return 0; 1342 return type; 1343 } 1344 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1345 1346 /* 1347 * These functions are implemented according to the AGPv3 spec, 1348 * which covers implementation details that had previously been 1349 * left open. 1350 */ 1351 1352 int agp3_generic_fetch_size(void) 1353 { 1354 u16 temp_size; 1355 int i; 1356 struct aper_size_info_16 *values; 1357 1358 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1359 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1360 1361 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1362 if (temp_size == values[i].size_value) { 1363 agp_bridge->previous_size = 1364 agp_bridge->current_size = (void *) (values + i); 1365 1366 agp_bridge->aperture_size_idx = i; 1367 return values[i].size; 1368 } 1369 } 1370 return 0; 1371 } 1372 EXPORT_SYMBOL(agp3_generic_fetch_size); 1373 1374 void agp3_generic_tlbflush(struct agp_memory *mem) 1375 { 1376 u32 ctrl; 1377 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1378 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1379 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1380 } 1381 EXPORT_SYMBOL(agp3_generic_tlbflush); 1382 1383 int agp3_generic_configure(void) 1384 { 1385 u32 temp; 1386 struct aper_size_info_16 *current_size; 1387 1388 current_size = A_SIZE_16(agp_bridge->current_size); 1389 1390 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1391 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1392 1393 /* set aperture size */ 1394 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1395 /* set gart pointer */ 1396 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1397 /* enable aperture and GTLB */ 1398 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1399 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1400 return 0; 1401 } 1402 EXPORT_SYMBOL(agp3_generic_configure); 1403 1404 void agp3_generic_cleanup(void) 1405 { 1406 u32 ctrl; 1407 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1408 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1409 } 1410 EXPORT_SYMBOL(agp3_generic_cleanup); 1411 1412 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1413 { 1414 {4096, 1048576, 10,0x000}, 1415 {2048, 524288, 9, 0x800}, 1416 {1024, 262144, 8, 0xc00}, 1417 { 512, 131072, 7, 0xe00}, 1418 { 256, 65536, 6, 0xf00}, 1419 { 128, 32768, 5, 0xf20}, 1420 { 64, 16384, 4, 0xf30}, 1421 { 32, 8192, 3, 0xf38}, 1422 { 16, 4096, 2, 0xf3c}, 1423 { 8, 2048, 1, 0xf3e}, 1424 { 4, 1024, 0, 0xf3f} 1425 }; 1426 EXPORT_SYMBOL(agp3_generic_sizes); 1427 1428