1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/pagemap.h> 33 #include <linux/miscdevice.h> 34 #include <linux/pm.h> 35 #include <linux/agp_backend.h> 36 #include <linux/vmalloc.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/mm.h> 39 #include <linux/sched.h> 40 #include <linux/slab.h> 41 #include <asm/io.h> 42 #ifdef CONFIG_X86 43 #include <asm/set_memory.h> 44 #endif 45 #include <asm/pgtable.h> 46 #include "agp.h" 47 48 __u32 *agp_gatt_table; 49 int agp_memory_reserved; 50 51 /* 52 * Needed by the Nforce GART driver for the time being. Would be 53 * nice to do this some other way instead of needing this export. 54 */ 55 EXPORT_SYMBOL_GPL(agp_memory_reserved); 56 57 /* 58 * Generic routines for handling agp_memory structures - 59 * They use the basic page allocation routines to do the brunt of the work. 60 */ 61 62 void agp_free_key(int key) 63 { 64 if (key < 0) 65 return; 66 67 if (key < MAXKEY) 68 clear_bit(key, agp_bridge->key_list); 69 } 70 EXPORT_SYMBOL(agp_free_key); 71 72 73 static int agp_get_key(void) 74 { 75 int bit; 76 77 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 78 if (bit < MAXKEY) { 79 set_bit(bit, agp_bridge->key_list); 80 return bit; 81 } 82 return -1; 83 } 84 85 /* 86 * Use kmalloc if possible for the page list. Otherwise fall back to 87 * vmalloc. This speeds things up and also saves memory for small AGP 88 * regions. 89 */ 90 91 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 92 { 93 mem->pages = kvmalloc(size, GFP_KERNEL); 94 } 95 EXPORT_SYMBOL(agp_alloc_page_array); 96 97 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 98 { 99 struct agp_memory *new; 100 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 101 102 if (INT_MAX/sizeof(struct page *) < num_agp_pages) 103 return NULL; 104 105 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 106 if (new == NULL) 107 return NULL; 108 109 new->key = agp_get_key(); 110 111 if (new->key < 0) { 112 kfree(new); 113 return NULL; 114 } 115 116 agp_alloc_page_array(alloc_size, new); 117 118 if (new->pages == NULL) { 119 agp_free_key(new->key); 120 kfree(new); 121 return NULL; 122 } 123 new->num_scratch_pages = 0; 124 return new; 125 } 126 127 struct agp_memory *agp_create_memory(int scratch_pages) 128 { 129 struct agp_memory *new; 130 131 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 132 if (new == NULL) 133 return NULL; 134 135 new->key = agp_get_key(); 136 137 if (new->key < 0) { 138 kfree(new); 139 return NULL; 140 } 141 142 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 143 144 if (new->pages == NULL) { 145 agp_free_key(new->key); 146 kfree(new); 147 return NULL; 148 } 149 new->num_scratch_pages = scratch_pages; 150 new->type = AGP_NORMAL_MEMORY; 151 return new; 152 } 153 EXPORT_SYMBOL(agp_create_memory); 154 155 /** 156 * agp_free_memory - free memory associated with an agp_memory pointer. 157 * 158 * @curr: agp_memory pointer to be freed. 159 * 160 * It is the only function that can be called when the backend is not owned 161 * by the caller. (So it can free memory on client death.) 162 */ 163 void agp_free_memory(struct agp_memory *curr) 164 { 165 size_t i; 166 167 if (curr == NULL) 168 return; 169 170 if (curr->is_bound) 171 agp_unbind_memory(curr); 172 173 if (curr->type >= AGP_USER_TYPES) { 174 agp_generic_free_by_type(curr); 175 return; 176 } 177 178 if (curr->type != 0) { 179 curr->bridge->driver->free_by_type(curr); 180 return; 181 } 182 if (curr->page_count != 0) { 183 if (curr->bridge->driver->agp_destroy_pages) { 184 curr->bridge->driver->agp_destroy_pages(curr); 185 } else { 186 187 for (i = 0; i < curr->page_count; i++) { 188 curr->bridge->driver->agp_destroy_page( 189 curr->pages[i], 190 AGP_PAGE_DESTROY_UNMAP); 191 } 192 for (i = 0; i < curr->page_count; i++) { 193 curr->bridge->driver->agp_destroy_page( 194 curr->pages[i], 195 AGP_PAGE_DESTROY_FREE); 196 } 197 } 198 } 199 agp_free_key(curr->key); 200 agp_free_page_array(curr); 201 kfree(curr); 202 } 203 EXPORT_SYMBOL(agp_free_memory); 204 205 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 206 207 /** 208 * agp_allocate_memory - allocate a group of pages of a certain type. 209 * 210 * @page_count: size_t argument of the number of pages 211 * @type: u32 argument of the type of memory to be allocated. 212 * 213 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 214 * maps to physical ram. Any other type is device dependent. 215 * 216 * It returns NULL whenever memory is unavailable. 217 */ 218 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 219 size_t page_count, u32 type) 220 { 221 int scratch_pages; 222 struct agp_memory *new; 223 size_t i; 224 int cur_memory; 225 226 if (!bridge) 227 return NULL; 228 229 cur_memory = atomic_read(&bridge->current_memory_agp); 230 if ((cur_memory + page_count > bridge->max_memory_agp) || 231 (cur_memory + page_count < page_count)) 232 return NULL; 233 234 if (type >= AGP_USER_TYPES) { 235 new = agp_generic_alloc_user(page_count, type); 236 if (new) 237 new->bridge = bridge; 238 return new; 239 } 240 241 if (type != 0) { 242 new = bridge->driver->alloc_by_type(page_count, type); 243 if (new) 244 new->bridge = bridge; 245 return new; 246 } 247 248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 249 250 new = agp_create_memory(scratch_pages); 251 252 if (new == NULL) 253 return NULL; 254 255 if (bridge->driver->agp_alloc_pages) { 256 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { 257 agp_free_memory(new); 258 return NULL; 259 } 260 new->bridge = bridge; 261 return new; 262 } 263 264 for (i = 0; i < page_count; i++) { 265 struct page *page = bridge->driver->agp_alloc_page(bridge); 266 267 if (page == NULL) { 268 agp_free_memory(new); 269 return NULL; 270 } 271 new->pages[i] = page; 272 new->page_count++; 273 } 274 new->bridge = bridge; 275 276 return new; 277 } 278 EXPORT_SYMBOL(agp_allocate_memory); 279 280 281 /* End - Generic routines for handling agp_memory structures */ 282 283 284 static int agp_return_size(void) 285 { 286 int current_size; 287 void *temp; 288 289 temp = agp_bridge->current_size; 290 291 switch (agp_bridge->driver->size_type) { 292 case U8_APER_SIZE: 293 current_size = A_SIZE_8(temp)->size; 294 break; 295 case U16_APER_SIZE: 296 current_size = A_SIZE_16(temp)->size; 297 break; 298 case U32_APER_SIZE: 299 current_size = A_SIZE_32(temp)->size; 300 break; 301 case LVL2_APER_SIZE: 302 current_size = A_SIZE_LVL2(temp)->size; 303 break; 304 case FIXED_APER_SIZE: 305 current_size = A_SIZE_FIX(temp)->size; 306 break; 307 default: 308 current_size = 0; 309 break; 310 } 311 312 current_size -= (agp_memory_reserved / (1024*1024)); 313 if (current_size <0) 314 current_size = 0; 315 return current_size; 316 } 317 318 319 int agp_num_entries(void) 320 { 321 int num_entries; 322 void *temp; 323 324 temp = agp_bridge->current_size; 325 326 switch (agp_bridge->driver->size_type) { 327 case U8_APER_SIZE: 328 num_entries = A_SIZE_8(temp)->num_entries; 329 break; 330 case U16_APER_SIZE: 331 num_entries = A_SIZE_16(temp)->num_entries; 332 break; 333 case U32_APER_SIZE: 334 num_entries = A_SIZE_32(temp)->num_entries; 335 break; 336 case LVL2_APER_SIZE: 337 num_entries = A_SIZE_LVL2(temp)->num_entries; 338 break; 339 case FIXED_APER_SIZE: 340 num_entries = A_SIZE_FIX(temp)->num_entries; 341 break; 342 default: 343 num_entries = 0; 344 break; 345 } 346 347 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 348 if (num_entries<0) 349 num_entries = 0; 350 return num_entries; 351 } 352 EXPORT_SYMBOL_GPL(agp_num_entries); 353 354 355 /** 356 * agp_copy_info - copy bridge state information 357 * 358 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 359 * 360 * This function copies information about the agp bridge device and the state of 361 * the agp backend into an agp_kern_info pointer. 362 */ 363 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 364 { 365 memset(info, 0, sizeof(struct agp_kern_info)); 366 if (!bridge) { 367 info->chipset = NOT_SUPPORTED; 368 return -EIO; 369 } 370 371 info->version.major = bridge->version->major; 372 info->version.minor = bridge->version->minor; 373 info->chipset = SUPPORTED; 374 info->device = bridge->dev; 375 if (bridge->mode & AGPSTAT_MODE_3_0) 376 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 377 else 378 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 379 info->aper_base = bridge->gart_bus_addr; 380 info->aper_size = agp_return_size(); 381 info->max_memory = bridge->max_memory_agp; 382 info->current_memory = atomic_read(&bridge->current_memory_agp); 383 info->cant_use_aperture = bridge->driver->cant_use_aperture; 384 info->vm_ops = bridge->vm_ops; 385 info->page_mask = ~0UL; 386 return 0; 387 } 388 EXPORT_SYMBOL(agp_copy_info); 389 390 /* End - Routine to copy over information structure */ 391 392 /* 393 * Routines for handling swapping of agp_memory into the GATT - 394 * These routines take agp_memory and insert them into the GATT. 395 * They call device specific routines to actually write to the GATT. 396 */ 397 398 /** 399 * agp_bind_memory - Bind an agp_memory structure into the GATT. 400 * 401 * @curr: agp_memory pointer 402 * @pg_start: an offset into the graphics aperture translation table 403 * 404 * It returns -EINVAL if the pointer == NULL. 405 * It returns -EBUSY if the area of the table requested is already in use. 406 */ 407 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 408 { 409 int ret_val; 410 411 if (curr == NULL) 412 return -EINVAL; 413 414 if (curr->is_bound) { 415 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 416 return -EINVAL; 417 } 418 if (!curr->is_flushed) { 419 curr->bridge->driver->cache_flush(); 420 curr->is_flushed = true; 421 } 422 423 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 424 425 if (ret_val != 0) 426 return ret_val; 427 428 curr->is_bound = true; 429 curr->pg_start = pg_start; 430 spin_lock(&agp_bridge->mapped_lock); 431 list_add(&curr->mapped_list, &agp_bridge->mapped_list); 432 spin_unlock(&agp_bridge->mapped_lock); 433 434 return 0; 435 } 436 EXPORT_SYMBOL(agp_bind_memory); 437 438 439 /** 440 * agp_unbind_memory - Removes an agp_memory structure from the GATT 441 * 442 * @curr: agp_memory pointer to be removed from the GATT. 443 * 444 * It returns -EINVAL if this piece of agp_memory is not currently bound to 445 * the graphics aperture translation table or if the agp_memory pointer == NULL 446 */ 447 int agp_unbind_memory(struct agp_memory *curr) 448 { 449 int ret_val; 450 451 if (curr == NULL) 452 return -EINVAL; 453 454 if (!curr->is_bound) { 455 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 456 return -EINVAL; 457 } 458 459 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 460 461 if (ret_val != 0) 462 return ret_val; 463 464 curr->is_bound = false; 465 curr->pg_start = 0; 466 spin_lock(&curr->bridge->mapped_lock); 467 list_del(&curr->mapped_list); 468 spin_unlock(&curr->bridge->mapped_lock); 469 return 0; 470 } 471 EXPORT_SYMBOL(agp_unbind_memory); 472 473 474 /* End - Routines for handling swapping of agp_memory into the GATT */ 475 476 477 /* Generic Agp routines - Start */ 478 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 479 { 480 u32 tmp; 481 482 if (*requested_mode & AGP2_RESERVED_MASK) { 483 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 484 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 485 *requested_mode &= ~AGP2_RESERVED_MASK; 486 } 487 488 /* 489 * Some dumb bridges are programmed to disobey the AGP2 spec. 490 * This is likely a BIOS misprogramming rather than poweron default, or 491 * it would be a lot more common. 492 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 493 * AGPv2 spec 6.1.9 states: 494 * The RATE field indicates the data transfer rates supported by this 495 * device. A.G.P. devices must report all that apply. 496 * Fix them up as best we can. 497 */ 498 switch (*bridge_agpstat & 7) { 499 case 4: 500 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 501 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " 502 "Fixing up support for x2 & x1\n"); 503 break; 504 case 2: 505 *bridge_agpstat |= AGPSTAT2_1X; 506 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " 507 "Fixing up support for x1\n"); 508 break; 509 default: 510 break; 511 } 512 513 /* Check the speed bits make sense. Only one should be set. */ 514 tmp = *requested_mode & 7; 515 switch (tmp) { 516 case 0: 517 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 518 *requested_mode |= AGPSTAT2_1X; 519 break; 520 case 1: 521 case 2: 522 break; 523 case 3: 524 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 525 break; 526 case 4: 527 break; 528 case 5: 529 case 6: 530 case 7: 531 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 532 break; 533 } 534 535 /* disable SBA if it's not supported */ 536 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 537 *bridge_agpstat &= ~AGPSTAT_SBA; 538 539 /* Set rate */ 540 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 541 *bridge_agpstat &= ~AGPSTAT2_4X; 542 543 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 544 *bridge_agpstat &= ~AGPSTAT2_2X; 545 546 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 547 *bridge_agpstat &= ~AGPSTAT2_1X; 548 549 /* Now we know what mode it should be, clear out the unwanted bits. */ 550 if (*bridge_agpstat & AGPSTAT2_4X) 551 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 552 553 if (*bridge_agpstat & AGPSTAT2_2X) 554 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 555 556 if (*bridge_agpstat & AGPSTAT2_1X) 557 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 558 559 /* Apply any errata. */ 560 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 561 *bridge_agpstat &= ~AGPSTAT_FW; 562 563 if (agp_bridge->flags & AGP_ERRATA_SBA) 564 *bridge_agpstat &= ~AGPSTAT_SBA; 565 566 if (agp_bridge->flags & AGP_ERRATA_1X) { 567 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 568 *bridge_agpstat |= AGPSTAT2_1X; 569 } 570 571 /* If we've dropped down to 1X, disable fast writes. */ 572 if (*bridge_agpstat & AGPSTAT2_1X) 573 *bridge_agpstat &= ~AGPSTAT_FW; 574 } 575 576 /* 577 * requested_mode = Mode requested by (typically) X. 578 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 579 * vga_agpstat = PCI_AGP_STATUS from graphic card. 580 */ 581 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 582 { 583 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 584 u32 tmp; 585 586 if (*requested_mode & AGP3_RESERVED_MASK) { 587 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 588 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 589 *requested_mode &= ~AGP3_RESERVED_MASK; 590 } 591 592 /* Check the speed bits make sense. */ 593 tmp = *requested_mode & 7; 594 if (tmp == 0) { 595 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 596 *requested_mode |= AGPSTAT3_4X; 597 } 598 if (tmp >= 3) { 599 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 600 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 601 } 602 603 /* ARQSZ - Set the value to the maximum one. 604 * Don't allow the mode register to override values. */ 605 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 606 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 607 608 /* Calibration cycle. 609 * Don't allow the mode register to override values. */ 610 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 611 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 612 613 /* SBA *must* be supported for AGP v3 */ 614 *bridge_agpstat |= AGPSTAT_SBA; 615 616 /* 617 * Set speed. 618 * Check for invalid speeds. This can happen when applications 619 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 620 */ 621 if (*requested_mode & AGPSTAT_MODE_3_0) { 622 /* 623 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 624 * have been passed a 3.0 mode, but with 2.x speed bits set. 625 * AGP2.x 4x -> AGP3.0 4x. 626 */ 627 if (*requested_mode & AGPSTAT2_4X) { 628 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 629 current->comm, *requested_mode); 630 *requested_mode &= ~AGPSTAT2_4X; 631 *requested_mode |= AGPSTAT3_4X; 632 } 633 } else { 634 /* 635 * The caller doesn't know what they are doing. We are in 3.0 mode, 636 * but have been passed an AGP 2.x mode. 637 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 638 */ 639 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 640 current->comm, *requested_mode); 641 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 642 *requested_mode |= AGPSTAT3_4X; 643 } 644 645 if (*requested_mode & AGPSTAT3_8X) { 646 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 647 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 648 *bridge_agpstat |= AGPSTAT3_4X; 649 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 650 return; 651 } 652 if (!(*vga_agpstat & AGPSTAT3_8X)) { 653 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 654 *bridge_agpstat |= AGPSTAT3_4X; 655 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 656 return; 657 } 658 /* All set, bridge & device can do AGP x8*/ 659 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 660 goto done; 661 662 } else if (*requested_mode & AGPSTAT3_4X) { 663 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 664 *bridge_agpstat |= AGPSTAT3_4X; 665 goto done; 666 667 } else { 668 669 /* 670 * If we didn't specify an AGP mode, we see if both 671 * the graphics card, and the bridge can do x8, and use if so. 672 * If not, we fall back to x4 mode. 673 */ 674 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 675 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 676 "supported by bridge & card (x8).\n"); 677 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 678 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 679 } else { 680 printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); 681 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 682 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 683 *bridge_agpstat, origbridge); 684 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 685 *bridge_agpstat |= AGPSTAT3_4X; 686 } 687 if (!(*vga_agpstat & AGPSTAT3_8X)) { 688 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 689 *vga_agpstat, origvga); 690 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 691 *vga_agpstat |= AGPSTAT3_4X; 692 } 693 } 694 } 695 696 done: 697 /* Apply any errata. */ 698 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 699 *bridge_agpstat &= ~AGPSTAT_FW; 700 701 if (agp_bridge->flags & AGP_ERRATA_SBA) 702 *bridge_agpstat &= ~AGPSTAT_SBA; 703 704 if (agp_bridge->flags & AGP_ERRATA_1X) { 705 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 706 *bridge_agpstat |= AGPSTAT2_1X; 707 } 708 } 709 710 711 /** 712 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 713 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 714 * @requested_mode: requested agp_stat from userspace (Typically from X) 715 * @bridge_agpstat: current agp_stat from AGP bridge. 716 * 717 * This function will hunt for an AGP graphics card, and try to match 718 * the requested mode to the capabilities of both the bridge and the card. 719 */ 720 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 721 { 722 struct pci_dev *device = NULL; 723 u32 vga_agpstat; 724 u8 cap_ptr; 725 726 for (;;) { 727 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 728 if (!device) { 729 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 730 return 0; 731 } 732 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 733 if (cap_ptr) 734 break; 735 } 736 737 /* 738 * Ok, here we have a AGP device. Disable impossible 739 * settings, and adjust the readqueue to the minimum. 740 */ 741 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 742 743 /* adjust RQ depth */ 744 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 745 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 746 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 747 748 /* disable FW if it's not supported */ 749 if (!((bridge_agpstat & AGPSTAT_FW) && 750 (vga_agpstat & AGPSTAT_FW) && 751 (requested_mode & AGPSTAT_FW))) 752 bridge_agpstat &= ~AGPSTAT_FW; 753 754 /* Check to see if we are operating in 3.0 mode */ 755 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 756 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 757 else 758 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 759 760 pci_dev_put(device); 761 return bridge_agpstat; 762 } 763 EXPORT_SYMBOL(agp_collect_device_status); 764 765 766 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 767 { 768 struct pci_dev *device = NULL; 769 int mode; 770 771 mode = bridge_agpstat & 0x7; 772 if (agp_v3) 773 mode *= 4; 774 775 for_each_pci_dev(device) { 776 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 777 if (!agp) 778 continue; 779 780 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", 781 agp_v3 ? 3 : 2, mode); 782 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 783 } 784 } 785 EXPORT_SYMBOL(agp_device_command); 786 787 788 void get_agp_version(struct agp_bridge_data *bridge) 789 { 790 u32 ncapid; 791 792 /* Exit early if already set by errata workarounds. */ 793 if (bridge->major_version != 0) 794 return; 795 796 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 797 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 798 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 799 } 800 EXPORT_SYMBOL(get_agp_version); 801 802 803 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 804 { 805 u32 bridge_agpstat, temp; 806 807 get_agp_version(agp_bridge); 808 809 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", 810 agp_bridge->major_version, agp_bridge->minor_version); 811 812 pci_read_config_dword(agp_bridge->dev, 813 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 814 815 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 816 if (bridge_agpstat == 0) 817 /* Something bad happened. FIXME: Return error code? */ 818 return; 819 820 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 821 822 /* Do AGP version specific frobbing. */ 823 if (bridge->major_version >= 3) { 824 if (bridge->mode & AGPSTAT_MODE_3_0) { 825 /* If we have 3.5, we can do the isoch stuff. */ 826 if (bridge->minor_version >= 5) 827 agp_3_5_enable(bridge); 828 agp_device_command(bridge_agpstat, true); 829 return; 830 } else { 831 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 832 bridge_agpstat &= ~(7<<10) ; 833 pci_read_config_dword(bridge->dev, 834 bridge->capndx+AGPCTRL, &temp); 835 temp |= (1<<9); 836 pci_write_config_dword(bridge->dev, 837 bridge->capndx+AGPCTRL, temp); 838 839 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); 840 } 841 } 842 843 /* AGP v<3 */ 844 agp_device_command(bridge_agpstat, false); 845 } 846 EXPORT_SYMBOL(agp_generic_enable); 847 848 849 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 850 { 851 char *table; 852 char *table_end; 853 int size; 854 int page_order; 855 int num_entries; 856 int i; 857 void *temp; 858 struct page *page; 859 860 /* The generic routines can't handle 2 level gatt's */ 861 if (bridge->driver->size_type == LVL2_APER_SIZE) 862 return -EINVAL; 863 864 table = NULL; 865 i = bridge->aperture_size_idx; 866 temp = bridge->current_size; 867 size = page_order = num_entries = 0; 868 869 if (bridge->driver->size_type != FIXED_APER_SIZE) { 870 do { 871 switch (bridge->driver->size_type) { 872 case U8_APER_SIZE: 873 size = A_SIZE_8(temp)->size; 874 page_order = 875 A_SIZE_8(temp)->page_order; 876 num_entries = 877 A_SIZE_8(temp)->num_entries; 878 break; 879 case U16_APER_SIZE: 880 size = A_SIZE_16(temp)->size; 881 page_order = A_SIZE_16(temp)->page_order; 882 num_entries = A_SIZE_16(temp)->num_entries; 883 break; 884 case U32_APER_SIZE: 885 size = A_SIZE_32(temp)->size; 886 page_order = A_SIZE_32(temp)->page_order; 887 num_entries = A_SIZE_32(temp)->num_entries; 888 break; 889 /* This case will never really happen. */ 890 case FIXED_APER_SIZE: 891 case LVL2_APER_SIZE: 892 default: 893 size = page_order = num_entries = 0; 894 break; 895 } 896 897 table = alloc_gatt_pages(page_order); 898 899 if (table == NULL) { 900 i++; 901 switch (bridge->driver->size_type) { 902 case U8_APER_SIZE: 903 bridge->current_size = A_IDX8(bridge); 904 break; 905 case U16_APER_SIZE: 906 bridge->current_size = A_IDX16(bridge); 907 break; 908 case U32_APER_SIZE: 909 bridge->current_size = A_IDX32(bridge); 910 break; 911 /* These cases will never really happen. */ 912 case FIXED_APER_SIZE: 913 case LVL2_APER_SIZE: 914 default: 915 break; 916 } 917 temp = bridge->current_size; 918 } else { 919 bridge->aperture_size_idx = i; 920 } 921 } while (!table && (i < bridge->driver->num_aperture_sizes)); 922 } else { 923 size = ((struct aper_size_info_fixed *) temp)->size; 924 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 925 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 926 table = alloc_gatt_pages(page_order); 927 } 928 929 if (table == NULL) 930 return -ENOMEM; 931 932 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 933 934 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 935 SetPageReserved(page); 936 937 bridge->gatt_table_real = (u32 *) table; 938 agp_gatt_table = (void *)table; 939 940 bridge->driver->cache_flush(); 941 #ifdef CONFIG_X86 942 if (set_memory_uc((unsigned long)table, 1 << page_order)) 943 printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); 944 945 bridge->gatt_table = (u32 __iomem *)table; 946 #else 947 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 948 (PAGE_SIZE * (1 << page_order))); 949 bridge->driver->cache_flush(); 950 #endif 951 952 if (bridge->gatt_table == NULL) { 953 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 954 ClearPageReserved(page); 955 956 free_gatt_pages(table, page_order); 957 958 return -ENOMEM; 959 } 960 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 961 962 /* AK: bogus, should encode addresses > 4GB */ 963 for (i = 0; i < num_entries; i++) { 964 writel(bridge->scratch_page, bridge->gatt_table+i); 965 readl(bridge->gatt_table+i); /* PCI Posting. */ 966 } 967 968 return 0; 969 } 970 EXPORT_SYMBOL(agp_generic_create_gatt_table); 971 972 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 973 { 974 int page_order; 975 char *table, *table_end; 976 void *temp; 977 struct page *page; 978 979 temp = bridge->current_size; 980 981 switch (bridge->driver->size_type) { 982 case U8_APER_SIZE: 983 page_order = A_SIZE_8(temp)->page_order; 984 break; 985 case U16_APER_SIZE: 986 page_order = A_SIZE_16(temp)->page_order; 987 break; 988 case U32_APER_SIZE: 989 page_order = A_SIZE_32(temp)->page_order; 990 break; 991 case FIXED_APER_SIZE: 992 page_order = A_SIZE_FIX(temp)->page_order; 993 break; 994 case LVL2_APER_SIZE: 995 /* The generic routines can't deal with 2 level gatt's */ 996 return -EINVAL; 997 default: 998 page_order = 0; 999 break; 1000 } 1001 1002 /* Do not worry about freeing memory, because if this is 1003 * called, then all agp memory is deallocated and removed 1004 * from the table. */ 1005 1006 #ifdef CONFIG_X86 1007 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1008 #else 1009 iounmap(bridge->gatt_table); 1010 #endif 1011 table = (char *) bridge->gatt_table_real; 1012 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1013 1014 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1015 ClearPageReserved(page); 1016 1017 free_gatt_pages(bridge->gatt_table_real, page_order); 1018 1019 agp_gatt_table = NULL; 1020 bridge->gatt_table = NULL; 1021 bridge->gatt_table_real = NULL; 1022 bridge->gatt_bus_addr = 0; 1023 1024 return 0; 1025 } 1026 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1027 1028 1029 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1030 { 1031 int num_entries; 1032 size_t i; 1033 off_t j; 1034 void *temp; 1035 struct agp_bridge_data *bridge; 1036 int mask_type; 1037 1038 bridge = mem->bridge; 1039 if (!bridge) 1040 return -EINVAL; 1041 1042 if (mem->page_count == 0) 1043 return 0; 1044 1045 temp = bridge->current_size; 1046 1047 switch (bridge->driver->size_type) { 1048 case U8_APER_SIZE: 1049 num_entries = A_SIZE_8(temp)->num_entries; 1050 break; 1051 case U16_APER_SIZE: 1052 num_entries = A_SIZE_16(temp)->num_entries; 1053 break; 1054 case U32_APER_SIZE: 1055 num_entries = A_SIZE_32(temp)->num_entries; 1056 break; 1057 case FIXED_APER_SIZE: 1058 num_entries = A_SIZE_FIX(temp)->num_entries; 1059 break; 1060 case LVL2_APER_SIZE: 1061 /* The generic routines can't deal with 2 level gatt's */ 1062 return -EINVAL; 1063 default: 1064 num_entries = 0; 1065 break; 1066 } 1067 1068 num_entries -= agp_memory_reserved/PAGE_SIZE; 1069 if (num_entries < 0) num_entries = 0; 1070 1071 if (type != mem->type) 1072 return -EINVAL; 1073 1074 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1075 if (mask_type != 0) { 1076 /* The generic routines know nothing of memory types */ 1077 return -EINVAL; 1078 } 1079 1080 if (((pg_start + mem->page_count) > num_entries) || 1081 ((pg_start + mem->page_count) < pg_start)) 1082 return -EINVAL; 1083 1084 j = pg_start; 1085 1086 while (j < (pg_start + mem->page_count)) { 1087 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1088 return -EBUSY; 1089 j++; 1090 } 1091 1092 if (!mem->is_flushed) { 1093 bridge->driver->cache_flush(); 1094 mem->is_flushed = true; 1095 } 1096 1097 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1098 writel(bridge->driver->mask_memory(bridge, 1099 page_to_phys(mem->pages[i]), 1100 mask_type), 1101 bridge->gatt_table+j); 1102 } 1103 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1104 1105 bridge->driver->tlb_flush(mem); 1106 return 0; 1107 } 1108 EXPORT_SYMBOL(agp_generic_insert_memory); 1109 1110 1111 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1112 { 1113 size_t i; 1114 struct agp_bridge_data *bridge; 1115 int mask_type, num_entries; 1116 1117 bridge = mem->bridge; 1118 if (!bridge) 1119 return -EINVAL; 1120 1121 if (mem->page_count == 0) 1122 return 0; 1123 1124 if (type != mem->type) 1125 return -EINVAL; 1126 1127 num_entries = agp_num_entries(); 1128 if (((pg_start + mem->page_count) > num_entries) || 1129 ((pg_start + mem->page_count) < pg_start)) 1130 return -EINVAL; 1131 1132 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1133 if (mask_type != 0) { 1134 /* The generic routines know nothing of memory types */ 1135 return -EINVAL; 1136 } 1137 1138 /* AK: bogus, should encode addresses > 4GB */ 1139 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1140 writel(bridge->scratch_page, bridge->gatt_table+i); 1141 } 1142 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1143 1144 bridge->driver->tlb_flush(mem); 1145 return 0; 1146 } 1147 EXPORT_SYMBOL(agp_generic_remove_memory); 1148 1149 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1150 { 1151 return NULL; 1152 } 1153 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1154 1155 void agp_generic_free_by_type(struct agp_memory *curr) 1156 { 1157 agp_free_page_array(curr); 1158 agp_free_key(curr->key); 1159 kfree(curr); 1160 } 1161 EXPORT_SYMBOL(agp_generic_free_by_type); 1162 1163 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1164 { 1165 struct agp_memory *new; 1166 int i; 1167 int pages; 1168 1169 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1170 new = agp_create_user_memory(page_count); 1171 if (new == NULL) 1172 return NULL; 1173 1174 for (i = 0; i < page_count; i++) 1175 new->pages[i] = NULL; 1176 new->page_count = 0; 1177 new->type = type; 1178 new->num_scratch_pages = pages; 1179 1180 return new; 1181 } 1182 EXPORT_SYMBOL(agp_generic_alloc_user); 1183 1184 /* 1185 * Basic Page Allocation Routines - 1186 * These routines handle page allocation and by default they reserve the allocated 1187 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1188 * against a maximum value. 1189 */ 1190 1191 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) 1192 { 1193 struct page * page; 1194 int i, ret = -ENOMEM; 1195 1196 for (i = 0; i < num_pages; i++) { 1197 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1198 /* agp_free_memory() needs gart address */ 1199 if (page == NULL) 1200 goto out; 1201 1202 #ifndef CONFIG_X86 1203 map_page_into_agp(page); 1204 #endif 1205 get_page(page); 1206 atomic_inc(&agp_bridge->current_memory_agp); 1207 1208 mem->pages[i] = page; 1209 mem->page_count++; 1210 } 1211 1212 #ifdef CONFIG_X86 1213 set_pages_array_uc(mem->pages, num_pages); 1214 #endif 1215 ret = 0; 1216 out: 1217 return ret; 1218 } 1219 EXPORT_SYMBOL(agp_generic_alloc_pages); 1220 1221 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1222 { 1223 struct page * page; 1224 1225 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1226 if (page == NULL) 1227 return NULL; 1228 1229 map_page_into_agp(page); 1230 1231 get_page(page); 1232 atomic_inc(&agp_bridge->current_memory_agp); 1233 return page; 1234 } 1235 EXPORT_SYMBOL(agp_generic_alloc_page); 1236 1237 void agp_generic_destroy_pages(struct agp_memory *mem) 1238 { 1239 int i; 1240 struct page *page; 1241 1242 if (!mem) 1243 return; 1244 1245 #ifdef CONFIG_X86 1246 set_pages_array_wb(mem->pages, mem->page_count); 1247 #endif 1248 1249 for (i = 0; i < mem->page_count; i++) { 1250 page = mem->pages[i]; 1251 1252 #ifndef CONFIG_X86 1253 unmap_page_from_agp(page); 1254 #endif 1255 put_page(page); 1256 __free_page(page); 1257 atomic_dec(&agp_bridge->current_memory_agp); 1258 mem->pages[i] = NULL; 1259 } 1260 } 1261 EXPORT_SYMBOL(agp_generic_destroy_pages); 1262 1263 void agp_generic_destroy_page(struct page *page, int flags) 1264 { 1265 if (page == NULL) 1266 return; 1267 1268 if (flags & AGP_PAGE_DESTROY_UNMAP) 1269 unmap_page_from_agp(page); 1270 1271 if (flags & AGP_PAGE_DESTROY_FREE) { 1272 put_page(page); 1273 __free_page(page); 1274 atomic_dec(&agp_bridge->current_memory_agp); 1275 } 1276 } 1277 EXPORT_SYMBOL(agp_generic_destroy_page); 1278 1279 /* End Basic Page Allocation Routines */ 1280 1281 1282 /** 1283 * agp_enable - initialise the agp point-to-point connection. 1284 * 1285 * @mode: agp mode register value to configure with. 1286 */ 1287 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1288 { 1289 if (!bridge) 1290 return; 1291 bridge->driver->agp_enable(bridge, mode); 1292 } 1293 EXPORT_SYMBOL(agp_enable); 1294 1295 /* When we remove the global variable agp_bridge from all drivers 1296 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1297 */ 1298 1299 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1300 { 1301 if (list_empty(&agp_bridges)) 1302 return NULL; 1303 1304 return agp_bridge; 1305 } 1306 1307 static void ipi_handler(void *null) 1308 { 1309 flush_agp_cache(); 1310 } 1311 1312 void global_cache_flush(void) 1313 { 1314 on_each_cpu(ipi_handler, NULL, 1); 1315 } 1316 EXPORT_SYMBOL(global_cache_flush); 1317 1318 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1319 dma_addr_t addr, int type) 1320 { 1321 /* memory type is ignored in the generic routine */ 1322 if (bridge->driver->masks) 1323 return addr | bridge->driver->masks[0].mask; 1324 else 1325 return addr; 1326 } 1327 EXPORT_SYMBOL(agp_generic_mask_memory); 1328 1329 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1330 int type) 1331 { 1332 if (type >= AGP_USER_TYPES) 1333 return 0; 1334 return type; 1335 } 1336 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1337 1338 /* 1339 * These functions are implemented according to the AGPv3 spec, 1340 * which covers implementation details that had previously been 1341 * left open. 1342 */ 1343 1344 int agp3_generic_fetch_size(void) 1345 { 1346 u16 temp_size; 1347 int i; 1348 struct aper_size_info_16 *values; 1349 1350 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1351 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1352 1353 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1354 if (temp_size == values[i].size_value) { 1355 agp_bridge->previous_size = 1356 agp_bridge->current_size = (void *) (values + i); 1357 1358 agp_bridge->aperture_size_idx = i; 1359 return values[i].size; 1360 } 1361 } 1362 return 0; 1363 } 1364 EXPORT_SYMBOL(agp3_generic_fetch_size); 1365 1366 void agp3_generic_tlbflush(struct agp_memory *mem) 1367 { 1368 u32 ctrl; 1369 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1370 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1371 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1372 } 1373 EXPORT_SYMBOL(agp3_generic_tlbflush); 1374 1375 int agp3_generic_configure(void) 1376 { 1377 u32 temp; 1378 struct aper_size_info_16 *current_size; 1379 1380 current_size = A_SIZE_16(agp_bridge->current_size); 1381 1382 agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, 1383 AGP_APERTURE_BAR); 1384 1385 /* set aperture size */ 1386 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1387 /* set gart pointer */ 1388 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1389 /* enable aperture and GTLB */ 1390 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1391 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1392 return 0; 1393 } 1394 EXPORT_SYMBOL(agp3_generic_configure); 1395 1396 void agp3_generic_cleanup(void) 1397 { 1398 u32 ctrl; 1399 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1400 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1401 } 1402 EXPORT_SYMBOL(agp3_generic_cleanup); 1403 1404 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1405 { 1406 {4096, 1048576, 10,0x000}, 1407 {2048, 524288, 9, 0x800}, 1408 {1024, 262144, 8, 0xc00}, 1409 { 512, 131072, 7, 0xe00}, 1410 { 256, 65536, 6, 0xf00}, 1411 { 128, 32768, 5, 0xf20}, 1412 { 64, 16384, 4, 0xf30}, 1413 { 32, 8192, 3, 0xf38}, 1414 { 16, 4096, 2, 0xf3c}, 1415 { 8, 2048, 1, 0xf3e}, 1416 { 4, 1024, 0, 0xf3f} 1417 }; 1418 EXPORT_SYMBOL(agp3_generic_sizes); 1419 1420