1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <linux/sched.h> 41 #include <linux/slab.h> 42 #include <asm/io.h> 43 #include <asm/cacheflush.h> 44 #include <asm/pgtable.h> 45 #include "agp.h" 46 47 __u32 *agp_gatt_table; 48 int agp_memory_reserved; 49 50 /* 51 * Needed by the Nforce GART driver for the time being. Would be 52 * nice to do this some other way instead of needing this export. 53 */ 54 EXPORT_SYMBOL_GPL(agp_memory_reserved); 55 56 /* 57 * Generic routines for handling agp_memory structures - 58 * They use the basic page allocation routines to do the brunt of the work. 59 */ 60 61 void agp_free_key(int key) 62 { 63 if (key < 0) 64 return; 65 66 if (key < MAXKEY) 67 clear_bit(key, agp_bridge->key_list); 68 } 69 EXPORT_SYMBOL(agp_free_key); 70 71 72 static int agp_get_key(void) 73 { 74 int bit; 75 76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 77 if (bit < MAXKEY) { 78 set_bit(bit, agp_bridge->key_list); 79 return bit; 80 } 81 return -1; 82 } 83 84 /* 85 * Use kmalloc if possible for the page list. Otherwise fall back to 86 * vmalloc. This speeds things up and also saves memory for small AGP 87 * regions. 88 */ 89 90 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 91 { 92 mem->pages = NULL; 93 94 if (size <= 2*PAGE_SIZE) 95 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 96 if (mem->pages == NULL) { 97 mem->pages = vmalloc(size); 98 } 99 } 100 EXPORT_SYMBOL(agp_alloc_page_array); 101 102 void agp_free_page_array(struct agp_memory *mem) 103 { 104 if (is_vmalloc_addr(mem->pages)) { 105 vfree(mem->pages); 106 } else { 107 kfree(mem->pages); 108 } 109 } 110 EXPORT_SYMBOL(agp_free_page_array); 111 112 113 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 114 { 115 struct agp_memory *new; 116 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 117 118 if (INT_MAX/sizeof(struct page *) < num_agp_pages) 119 return NULL; 120 121 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 122 if (new == NULL) 123 return NULL; 124 125 new->key = agp_get_key(); 126 127 if (new->key < 0) { 128 kfree(new); 129 return NULL; 130 } 131 132 agp_alloc_page_array(alloc_size, new); 133 134 if (new->pages == NULL) { 135 agp_free_key(new->key); 136 kfree(new); 137 return NULL; 138 } 139 new->num_scratch_pages = 0; 140 return new; 141 } 142 143 struct agp_memory *agp_create_memory(int scratch_pages) 144 { 145 struct agp_memory *new; 146 147 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 148 if (new == NULL) 149 return NULL; 150 151 new->key = agp_get_key(); 152 153 if (new->key < 0) { 154 kfree(new); 155 return NULL; 156 } 157 158 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 159 160 if (new->pages == NULL) { 161 agp_free_key(new->key); 162 kfree(new); 163 return NULL; 164 } 165 new->num_scratch_pages = scratch_pages; 166 new->type = AGP_NORMAL_MEMORY; 167 return new; 168 } 169 EXPORT_SYMBOL(agp_create_memory); 170 171 /** 172 * agp_free_memory - free memory associated with an agp_memory pointer. 173 * 174 * @curr: agp_memory pointer to be freed. 175 * 176 * It is the only function that can be called when the backend is not owned 177 * by the caller. (So it can free memory on client death.) 178 */ 179 void agp_free_memory(struct agp_memory *curr) 180 { 181 size_t i; 182 183 if (curr == NULL) 184 return; 185 186 if (curr->is_bound) 187 agp_unbind_memory(curr); 188 189 if (curr->type >= AGP_USER_TYPES) { 190 agp_generic_free_by_type(curr); 191 return; 192 } 193 194 if (curr->type != 0) { 195 curr->bridge->driver->free_by_type(curr); 196 return; 197 } 198 if (curr->page_count != 0) { 199 if (curr->bridge->driver->agp_destroy_pages) { 200 curr->bridge->driver->agp_destroy_pages(curr); 201 } else { 202 203 for (i = 0; i < curr->page_count; i++) { 204 curr->bridge->driver->agp_destroy_page( 205 curr->pages[i], 206 AGP_PAGE_DESTROY_UNMAP); 207 } 208 for (i = 0; i < curr->page_count; i++) { 209 curr->bridge->driver->agp_destroy_page( 210 curr->pages[i], 211 AGP_PAGE_DESTROY_FREE); 212 } 213 } 214 } 215 agp_free_key(curr->key); 216 agp_free_page_array(curr); 217 kfree(curr); 218 } 219 EXPORT_SYMBOL(agp_free_memory); 220 221 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 222 223 /** 224 * agp_allocate_memory - allocate a group of pages of a certain type. 225 * 226 * @page_count: size_t argument of the number of pages 227 * @type: u32 argument of the type of memory to be allocated. 228 * 229 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 230 * maps to physical ram. Any other type is device dependent. 231 * 232 * It returns NULL whenever memory is unavailable. 233 */ 234 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 235 size_t page_count, u32 type) 236 { 237 int scratch_pages; 238 struct agp_memory *new; 239 size_t i; 240 int cur_memory; 241 242 if (!bridge) 243 return NULL; 244 245 cur_memory = atomic_read(&bridge->current_memory_agp); 246 if ((cur_memory + page_count > bridge->max_memory_agp) || 247 (cur_memory + page_count < page_count)) 248 return NULL; 249 250 if (type >= AGP_USER_TYPES) { 251 new = agp_generic_alloc_user(page_count, type); 252 if (new) 253 new->bridge = bridge; 254 return new; 255 } 256 257 if (type != 0) { 258 new = bridge->driver->alloc_by_type(page_count, type); 259 if (new) 260 new->bridge = bridge; 261 return new; 262 } 263 264 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 265 266 new = agp_create_memory(scratch_pages); 267 268 if (new == NULL) 269 return NULL; 270 271 if (bridge->driver->agp_alloc_pages) { 272 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { 273 agp_free_memory(new); 274 return NULL; 275 } 276 new->bridge = bridge; 277 return new; 278 } 279 280 for (i = 0; i < page_count; i++) { 281 struct page *page = bridge->driver->agp_alloc_page(bridge); 282 283 if (page == NULL) { 284 agp_free_memory(new); 285 return NULL; 286 } 287 new->pages[i] = page; 288 new->page_count++; 289 } 290 new->bridge = bridge; 291 292 return new; 293 } 294 EXPORT_SYMBOL(agp_allocate_memory); 295 296 297 /* End - Generic routines for handling agp_memory structures */ 298 299 300 static int agp_return_size(void) 301 { 302 int current_size; 303 void *temp; 304 305 temp = agp_bridge->current_size; 306 307 switch (agp_bridge->driver->size_type) { 308 case U8_APER_SIZE: 309 current_size = A_SIZE_8(temp)->size; 310 break; 311 case U16_APER_SIZE: 312 current_size = A_SIZE_16(temp)->size; 313 break; 314 case U32_APER_SIZE: 315 current_size = A_SIZE_32(temp)->size; 316 break; 317 case LVL2_APER_SIZE: 318 current_size = A_SIZE_LVL2(temp)->size; 319 break; 320 case FIXED_APER_SIZE: 321 current_size = A_SIZE_FIX(temp)->size; 322 break; 323 default: 324 current_size = 0; 325 break; 326 } 327 328 current_size -= (agp_memory_reserved / (1024*1024)); 329 if (current_size <0) 330 current_size = 0; 331 return current_size; 332 } 333 334 335 int agp_num_entries(void) 336 { 337 int num_entries; 338 void *temp; 339 340 temp = agp_bridge->current_size; 341 342 switch (agp_bridge->driver->size_type) { 343 case U8_APER_SIZE: 344 num_entries = A_SIZE_8(temp)->num_entries; 345 break; 346 case U16_APER_SIZE: 347 num_entries = A_SIZE_16(temp)->num_entries; 348 break; 349 case U32_APER_SIZE: 350 num_entries = A_SIZE_32(temp)->num_entries; 351 break; 352 case LVL2_APER_SIZE: 353 num_entries = A_SIZE_LVL2(temp)->num_entries; 354 break; 355 case FIXED_APER_SIZE: 356 num_entries = A_SIZE_FIX(temp)->num_entries; 357 break; 358 default: 359 num_entries = 0; 360 break; 361 } 362 363 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 364 if (num_entries<0) 365 num_entries = 0; 366 return num_entries; 367 } 368 EXPORT_SYMBOL_GPL(agp_num_entries); 369 370 371 /** 372 * agp_copy_info - copy bridge state information 373 * 374 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 375 * 376 * This function copies information about the agp bridge device and the state of 377 * the agp backend into an agp_kern_info pointer. 378 */ 379 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 380 { 381 memset(info, 0, sizeof(struct agp_kern_info)); 382 if (!bridge) { 383 info->chipset = NOT_SUPPORTED; 384 return -EIO; 385 } 386 387 info->version.major = bridge->version->major; 388 info->version.minor = bridge->version->minor; 389 info->chipset = SUPPORTED; 390 info->device = bridge->dev; 391 if (bridge->mode & AGPSTAT_MODE_3_0) 392 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 393 else 394 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 395 info->aper_base = bridge->gart_bus_addr; 396 info->aper_size = agp_return_size(); 397 info->max_memory = bridge->max_memory_agp; 398 info->current_memory = atomic_read(&bridge->current_memory_agp); 399 info->cant_use_aperture = bridge->driver->cant_use_aperture; 400 info->vm_ops = bridge->vm_ops; 401 info->page_mask = ~0UL; 402 return 0; 403 } 404 EXPORT_SYMBOL(agp_copy_info); 405 406 /* End - Routine to copy over information structure */ 407 408 /* 409 * Routines for handling swapping of agp_memory into the GATT - 410 * These routines take agp_memory and insert them into the GATT. 411 * They call device specific routines to actually write to the GATT. 412 */ 413 414 /** 415 * agp_bind_memory - Bind an agp_memory structure into the GATT. 416 * 417 * @curr: agp_memory pointer 418 * @pg_start: an offset into the graphics aperture translation table 419 * 420 * It returns -EINVAL if the pointer == NULL. 421 * It returns -EBUSY if the area of the table requested is already in use. 422 */ 423 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 424 { 425 int ret_val; 426 427 if (curr == NULL) 428 return -EINVAL; 429 430 if (curr->is_bound) { 431 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 432 return -EINVAL; 433 } 434 if (!curr->is_flushed) { 435 curr->bridge->driver->cache_flush(); 436 curr->is_flushed = true; 437 } 438 439 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 440 441 if (ret_val != 0) 442 return ret_val; 443 444 curr->is_bound = true; 445 curr->pg_start = pg_start; 446 spin_lock(&agp_bridge->mapped_lock); 447 list_add(&curr->mapped_list, &agp_bridge->mapped_list); 448 spin_unlock(&agp_bridge->mapped_lock); 449 450 return 0; 451 } 452 EXPORT_SYMBOL(agp_bind_memory); 453 454 455 /** 456 * agp_unbind_memory - Removes an agp_memory structure from the GATT 457 * 458 * @curr: agp_memory pointer to be removed from the GATT. 459 * 460 * It returns -EINVAL if this piece of agp_memory is not currently bound to 461 * the graphics aperture translation table or if the agp_memory pointer == NULL 462 */ 463 int agp_unbind_memory(struct agp_memory *curr) 464 { 465 int ret_val; 466 467 if (curr == NULL) 468 return -EINVAL; 469 470 if (!curr->is_bound) { 471 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 472 return -EINVAL; 473 } 474 475 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 476 477 if (ret_val != 0) 478 return ret_val; 479 480 curr->is_bound = false; 481 curr->pg_start = 0; 482 spin_lock(&curr->bridge->mapped_lock); 483 list_del(&curr->mapped_list); 484 spin_unlock(&curr->bridge->mapped_lock); 485 return 0; 486 } 487 EXPORT_SYMBOL(agp_unbind_memory); 488 489 490 /* End - Routines for handling swapping of agp_memory into the GATT */ 491 492 493 /* Generic Agp routines - Start */ 494 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 495 { 496 u32 tmp; 497 498 if (*requested_mode & AGP2_RESERVED_MASK) { 499 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 500 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 501 *requested_mode &= ~AGP2_RESERVED_MASK; 502 } 503 504 /* 505 * Some dumb bridges are programmed to disobey the AGP2 spec. 506 * This is likely a BIOS misprogramming rather than poweron default, or 507 * it would be a lot more common. 508 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 509 * AGPv2 spec 6.1.9 states: 510 * The RATE field indicates the data transfer rates supported by this 511 * device. A.G.P. devices must report all that apply. 512 * Fix them up as best we can. 513 */ 514 switch (*bridge_agpstat & 7) { 515 case 4: 516 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 517 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " 518 "Fixing up support for x2 & x1\n"); 519 break; 520 case 2: 521 *bridge_agpstat |= AGPSTAT2_1X; 522 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " 523 "Fixing up support for x1\n"); 524 break; 525 default: 526 break; 527 } 528 529 /* Check the speed bits make sense. Only one should be set. */ 530 tmp = *requested_mode & 7; 531 switch (tmp) { 532 case 0: 533 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 534 *requested_mode |= AGPSTAT2_1X; 535 break; 536 case 1: 537 case 2: 538 break; 539 case 3: 540 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 541 break; 542 case 4: 543 break; 544 case 5: 545 case 6: 546 case 7: 547 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 548 break; 549 } 550 551 /* disable SBA if it's not supported */ 552 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 553 *bridge_agpstat &= ~AGPSTAT_SBA; 554 555 /* Set rate */ 556 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 557 *bridge_agpstat &= ~AGPSTAT2_4X; 558 559 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 560 *bridge_agpstat &= ~AGPSTAT2_2X; 561 562 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 563 *bridge_agpstat &= ~AGPSTAT2_1X; 564 565 /* Now we know what mode it should be, clear out the unwanted bits. */ 566 if (*bridge_agpstat & AGPSTAT2_4X) 567 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 568 569 if (*bridge_agpstat & AGPSTAT2_2X) 570 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 571 572 if (*bridge_agpstat & AGPSTAT2_1X) 573 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 574 575 /* Apply any errata. */ 576 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 577 *bridge_agpstat &= ~AGPSTAT_FW; 578 579 if (agp_bridge->flags & AGP_ERRATA_SBA) 580 *bridge_agpstat &= ~AGPSTAT_SBA; 581 582 if (agp_bridge->flags & AGP_ERRATA_1X) { 583 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 584 *bridge_agpstat |= AGPSTAT2_1X; 585 } 586 587 /* If we've dropped down to 1X, disable fast writes. */ 588 if (*bridge_agpstat & AGPSTAT2_1X) 589 *bridge_agpstat &= ~AGPSTAT_FW; 590 } 591 592 /* 593 * requested_mode = Mode requested by (typically) X. 594 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 595 * vga_agpstat = PCI_AGP_STATUS from graphic card. 596 */ 597 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 598 { 599 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 600 u32 tmp; 601 602 if (*requested_mode & AGP3_RESERVED_MASK) { 603 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 604 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 605 *requested_mode &= ~AGP3_RESERVED_MASK; 606 } 607 608 /* Check the speed bits make sense. */ 609 tmp = *requested_mode & 7; 610 if (tmp == 0) { 611 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 612 *requested_mode |= AGPSTAT3_4X; 613 } 614 if (tmp >= 3) { 615 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 616 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 617 } 618 619 /* ARQSZ - Set the value to the maximum one. 620 * Don't allow the mode register to override values. */ 621 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 622 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 623 624 /* Calibration cycle. 625 * Don't allow the mode register to override values. */ 626 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 627 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 628 629 /* SBA *must* be supported for AGP v3 */ 630 *bridge_agpstat |= AGPSTAT_SBA; 631 632 /* 633 * Set speed. 634 * Check for invalid speeds. This can happen when applications 635 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 636 */ 637 if (*requested_mode & AGPSTAT_MODE_3_0) { 638 /* 639 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 640 * have been passed a 3.0 mode, but with 2.x speed bits set. 641 * AGP2.x 4x -> AGP3.0 4x. 642 */ 643 if (*requested_mode & AGPSTAT2_4X) { 644 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 645 current->comm, *requested_mode); 646 *requested_mode &= ~AGPSTAT2_4X; 647 *requested_mode |= AGPSTAT3_4X; 648 } 649 } else { 650 /* 651 * The caller doesn't know what they are doing. We are in 3.0 mode, 652 * but have been passed an AGP 2.x mode. 653 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 654 */ 655 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 656 current->comm, *requested_mode); 657 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 658 *requested_mode |= AGPSTAT3_4X; 659 } 660 661 if (*requested_mode & AGPSTAT3_8X) { 662 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 663 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 664 *bridge_agpstat |= AGPSTAT3_4X; 665 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 666 return; 667 } 668 if (!(*vga_agpstat & AGPSTAT3_8X)) { 669 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 670 *bridge_agpstat |= AGPSTAT3_4X; 671 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 672 return; 673 } 674 /* All set, bridge & device can do AGP x8*/ 675 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 676 goto done; 677 678 } else if (*requested_mode & AGPSTAT3_4X) { 679 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 680 *bridge_agpstat |= AGPSTAT3_4X; 681 goto done; 682 683 } else { 684 685 /* 686 * If we didn't specify an AGP mode, we see if both 687 * the graphics card, and the bridge can do x8, and use if so. 688 * If not, we fall back to x4 mode. 689 */ 690 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 691 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 692 "supported by bridge & card (x8).\n"); 693 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 694 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 695 } else { 696 printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); 697 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 698 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 699 *bridge_agpstat, origbridge); 700 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 701 *bridge_agpstat |= AGPSTAT3_4X; 702 } 703 if (!(*vga_agpstat & AGPSTAT3_8X)) { 704 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 705 *vga_agpstat, origvga); 706 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 707 *vga_agpstat |= AGPSTAT3_4X; 708 } 709 } 710 } 711 712 done: 713 /* Apply any errata. */ 714 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 715 *bridge_agpstat &= ~AGPSTAT_FW; 716 717 if (agp_bridge->flags & AGP_ERRATA_SBA) 718 *bridge_agpstat &= ~AGPSTAT_SBA; 719 720 if (agp_bridge->flags & AGP_ERRATA_1X) { 721 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 722 *bridge_agpstat |= AGPSTAT2_1X; 723 } 724 } 725 726 727 /** 728 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 729 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 730 * @requested_mode: requested agp_stat from userspace (Typically from X) 731 * @bridge_agpstat: current agp_stat from AGP bridge. 732 * 733 * This function will hunt for an AGP graphics card, and try to match 734 * the requested mode to the capabilities of both the bridge and the card. 735 */ 736 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 737 { 738 struct pci_dev *device = NULL; 739 u32 vga_agpstat; 740 u8 cap_ptr; 741 742 for (;;) { 743 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 744 if (!device) { 745 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 746 return 0; 747 } 748 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 749 if (cap_ptr) 750 break; 751 } 752 753 /* 754 * Ok, here we have a AGP device. Disable impossible 755 * settings, and adjust the readqueue to the minimum. 756 */ 757 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 758 759 /* adjust RQ depth */ 760 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 761 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 762 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 763 764 /* disable FW if it's not supported */ 765 if (!((bridge_agpstat & AGPSTAT_FW) && 766 (vga_agpstat & AGPSTAT_FW) && 767 (requested_mode & AGPSTAT_FW))) 768 bridge_agpstat &= ~AGPSTAT_FW; 769 770 /* Check to see if we are operating in 3.0 mode */ 771 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 772 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 773 else 774 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 775 776 pci_dev_put(device); 777 return bridge_agpstat; 778 } 779 EXPORT_SYMBOL(agp_collect_device_status); 780 781 782 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 783 { 784 struct pci_dev *device = NULL; 785 int mode; 786 787 mode = bridge_agpstat & 0x7; 788 if (agp_v3) 789 mode *= 4; 790 791 for_each_pci_dev(device) { 792 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 793 if (!agp) 794 continue; 795 796 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", 797 agp_v3 ? 3 : 2, mode); 798 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 799 } 800 } 801 EXPORT_SYMBOL(agp_device_command); 802 803 804 void get_agp_version(struct agp_bridge_data *bridge) 805 { 806 u32 ncapid; 807 808 /* Exit early if already set by errata workarounds. */ 809 if (bridge->major_version != 0) 810 return; 811 812 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 813 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 814 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 815 } 816 EXPORT_SYMBOL(get_agp_version); 817 818 819 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 820 { 821 u32 bridge_agpstat, temp; 822 823 get_agp_version(agp_bridge); 824 825 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", 826 agp_bridge->major_version, agp_bridge->minor_version); 827 828 pci_read_config_dword(agp_bridge->dev, 829 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 830 831 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 832 if (bridge_agpstat == 0) 833 /* Something bad happened. FIXME: Return error code? */ 834 return; 835 836 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 837 838 /* Do AGP version specific frobbing. */ 839 if (bridge->major_version >= 3) { 840 if (bridge->mode & AGPSTAT_MODE_3_0) { 841 /* If we have 3.5, we can do the isoch stuff. */ 842 if (bridge->minor_version >= 5) 843 agp_3_5_enable(bridge); 844 agp_device_command(bridge_agpstat, true); 845 return; 846 } else { 847 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 848 bridge_agpstat &= ~(7<<10) ; 849 pci_read_config_dword(bridge->dev, 850 bridge->capndx+AGPCTRL, &temp); 851 temp |= (1<<9); 852 pci_write_config_dword(bridge->dev, 853 bridge->capndx+AGPCTRL, temp); 854 855 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); 856 } 857 } 858 859 /* AGP v<3 */ 860 agp_device_command(bridge_agpstat, false); 861 } 862 EXPORT_SYMBOL(agp_generic_enable); 863 864 865 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 866 { 867 char *table; 868 char *table_end; 869 int size; 870 int page_order; 871 int num_entries; 872 int i; 873 void *temp; 874 struct page *page; 875 876 /* The generic routines can't handle 2 level gatt's */ 877 if (bridge->driver->size_type == LVL2_APER_SIZE) 878 return -EINVAL; 879 880 table = NULL; 881 i = bridge->aperture_size_idx; 882 temp = bridge->current_size; 883 size = page_order = num_entries = 0; 884 885 if (bridge->driver->size_type != FIXED_APER_SIZE) { 886 do { 887 switch (bridge->driver->size_type) { 888 case U8_APER_SIZE: 889 size = A_SIZE_8(temp)->size; 890 page_order = 891 A_SIZE_8(temp)->page_order; 892 num_entries = 893 A_SIZE_8(temp)->num_entries; 894 break; 895 case U16_APER_SIZE: 896 size = A_SIZE_16(temp)->size; 897 page_order = A_SIZE_16(temp)->page_order; 898 num_entries = A_SIZE_16(temp)->num_entries; 899 break; 900 case U32_APER_SIZE: 901 size = A_SIZE_32(temp)->size; 902 page_order = A_SIZE_32(temp)->page_order; 903 num_entries = A_SIZE_32(temp)->num_entries; 904 break; 905 /* This case will never really happen. */ 906 case FIXED_APER_SIZE: 907 case LVL2_APER_SIZE: 908 default: 909 size = page_order = num_entries = 0; 910 break; 911 } 912 913 table = alloc_gatt_pages(page_order); 914 915 if (table == NULL) { 916 i++; 917 switch (bridge->driver->size_type) { 918 case U8_APER_SIZE: 919 bridge->current_size = A_IDX8(bridge); 920 break; 921 case U16_APER_SIZE: 922 bridge->current_size = A_IDX16(bridge); 923 break; 924 case U32_APER_SIZE: 925 bridge->current_size = A_IDX32(bridge); 926 break; 927 /* These cases will never really happen. */ 928 case FIXED_APER_SIZE: 929 case LVL2_APER_SIZE: 930 default: 931 break; 932 } 933 temp = bridge->current_size; 934 } else { 935 bridge->aperture_size_idx = i; 936 } 937 } while (!table && (i < bridge->driver->num_aperture_sizes)); 938 } else { 939 size = ((struct aper_size_info_fixed *) temp)->size; 940 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 941 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 942 table = alloc_gatt_pages(page_order); 943 } 944 945 if (table == NULL) 946 return -ENOMEM; 947 948 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 949 950 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 951 SetPageReserved(page); 952 953 bridge->gatt_table_real = (u32 *) table; 954 agp_gatt_table = (void *)table; 955 956 bridge->driver->cache_flush(); 957 #ifdef CONFIG_X86 958 if (set_memory_uc((unsigned long)table, 1 << page_order)) 959 printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); 960 961 bridge->gatt_table = (u32 __iomem *)table; 962 #else 963 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 964 (PAGE_SIZE * (1 << page_order))); 965 bridge->driver->cache_flush(); 966 #endif 967 968 if (bridge->gatt_table == NULL) { 969 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 970 ClearPageReserved(page); 971 972 free_gatt_pages(table, page_order); 973 974 return -ENOMEM; 975 } 976 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 977 978 /* AK: bogus, should encode addresses > 4GB */ 979 for (i = 0; i < num_entries; i++) { 980 writel(bridge->scratch_page, bridge->gatt_table+i); 981 readl(bridge->gatt_table+i); /* PCI Posting. */ 982 } 983 984 return 0; 985 } 986 EXPORT_SYMBOL(agp_generic_create_gatt_table); 987 988 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 989 { 990 int page_order; 991 char *table, *table_end; 992 void *temp; 993 struct page *page; 994 995 temp = bridge->current_size; 996 997 switch (bridge->driver->size_type) { 998 case U8_APER_SIZE: 999 page_order = A_SIZE_8(temp)->page_order; 1000 break; 1001 case U16_APER_SIZE: 1002 page_order = A_SIZE_16(temp)->page_order; 1003 break; 1004 case U32_APER_SIZE: 1005 page_order = A_SIZE_32(temp)->page_order; 1006 break; 1007 case FIXED_APER_SIZE: 1008 page_order = A_SIZE_FIX(temp)->page_order; 1009 break; 1010 case LVL2_APER_SIZE: 1011 /* The generic routines can't deal with 2 level gatt's */ 1012 return -EINVAL; 1013 default: 1014 page_order = 0; 1015 break; 1016 } 1017 1018 /* Do not worry about freeing memory, because if this is 1019 * called, then all agp memory is deallocated and removed 1020 * from the table. */ 1021 1022 #ifdef CONFIG_X86 1023 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1024 #else 1025 iounmap(bridge->gatt_table); 1026 #endif 1027 table = (char *) bridge->gatt_table_real; 1028 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1029 1030 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1031 ClearPageReserved(page); 1032 1033 free_gatt_pages(bridge->gatt_table_real, page_order); 1034 1035 agp_gatt_table = NULL; 1036 bridge->gatt_table = NULL; 1037 bridge->gatt_table_real = NULL; 1038 bridge->gatt_bus_addr = 0; 1039 1040 return 0; 1041 } 1042 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1043 1044 1045 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1046 { 1047 int num_entries; 1048 size_t i; 1049 off_t j; 1050 void *temp; 1051 struct agp_bridge_data *bridge; 1052 int mask_type; 1053 1054 bridge = mem->bridge; 1055 if (!bridge) 1056 return -EINVAL; 1057 1058 if (mem->page_count == 0) 1059 return 0; 1060 1061 temp = bridge->current_size; 1062 1063 switch (bridge->driver->size_type) { 1064 case U8_APER_SIZE: 1065 num_entries = A_SIZE_8(temp)->num_entries; 1066 break; 1067 case U16_APER_SIZE: 1068 num_entries = A_SIZE_16(temp)->num_entries; 1069 break; 1070 case U32_APER_SIZE: 1071 num_entries = A_SIZE_32(temp)->num_entries; 1072 break; 1073 case FIXED_APER_SIZE: 1074 num_entries = A_SIZE_FIX(temp)->num_entries; 1075 break; 1076 case LVL2_APER_SIZE: 1077 /* The generic routines can't deal with 2 level gatt's */ 1078 return -EINVAL; 1079 default: 1080 num_entries = 0; 1081 break; 1082 } 1083 1084 num_entries -= agp_memory_reserved/PAGE_SIZE; 1085 if (num_entries < 0) num_entries = 0; 1086 1087 if (type != mem->type) 1088 return -EINVAL; 1089 1090 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1091 if (mask_type != 0) { 1092 /* The generic routines know nothing of memory types */ 1093 return -EINVAL; 1094 } 1095 1096 if (((pg_start + mem->page_count) > num_entries) || 1097 ((pg_start + mem->page_count) < pg_start)) 1098 return -EINVAL; 1099 1100 j = pg_start; 1101 1102 while (j < (pg_start + mem->page_count)) { 1103 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1104 return -EBUSY; 1105 j++; 1106 } 1107 1108 if (!mem->is_flushed) { 1109 bridge->driver->cache_flush(); 1110 mem->is_flushed = true; 1111 } 1112 1113 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1114 writel(bridge->driver->mask_memory(bridge, 1115 page_to_phys(mem->pages[i]), 1116 mask_type), 1117 bridge->gatt_table+j); 1118 } 1119 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1120 1121 bridge->driver->tlb_flush(mem); 1122 return 0; 1123 } 1124 EXPORT_SYMBOL(agp_generic_insert_memory); 1125 1126 1127 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1128 { 1129 size_t i; 1130 struct agp_bridge_data *bridge; 1131 int mask_type, num_entries; 1132 1133 bridge = mem->bridge; 1134 if (!bridge) 1135 return -EINVAL; 1136 1137 if (mem->page_count == 0) 1138 return 0; 1139 1140 if (type != mem->type) 1141 return -EINVAL; 1142 1143 num_entries = agp_num_entries(); 1144 if (((pg_start + mem->page_count) > num_entries) || 1145 ((pg_start + mem->page_count) < pg_start)) 1146 return -EINVAL; 1147 1148 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1149 if (mask_type != 0) { 1150 /* The generic routines know nothing of memory types */ 1151 return -EINVAL; 1152 } 1153 1154 /* AK: bogus, should encode addresses > 4GB */ 1155 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1156 writel(bridge->scratch_page, bridge->gatt_table+i); 1157 } 1158 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1159 1160 bridge->driver->tlb_flush(mem); 1161 return 0; 1162 } 1163 EXPORT_SYMBOL(agp_generic_remove_memory); 1164 1165 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1166 { 1167 return NULL; 1168 } 1169 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1170 1171 void agp_generic_free_by_type(struct agp_memory *curr) 1172 { 1173 agp_free_page_array(curr); 1174 agp_free_key(curr->key); 1175 kfree(curr); 1176 } 1177 EXPORT_SYMBOL(agp_generic_free_by_type); 1178 1179 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1180 { 1181 struct agp_memory *new; 1182 int i; 1183 int pages; 1184 1185 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1186 new = agp_create_user_memory(page_count); 1187 if (new == NULL) 1188 return NULL; 1189 1190 for (i = 0; i < page_count; i++) 1191 new->pages[i] = NULL; 1192 new->page_count = 0; 1193 new->type = type; 1194 new->num_scratch_pages = pages; 1195 1196 return new; 1197 } 1198 EXPORT_SYMBOL(agp_generic_alloc_user); 1199 1200 /* 1201 * Basic Page Allocation Routines - 1202 * These routines handle page allocation and by default they reserve the allocated 1203 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1204 * against a maximum value. 1205 */ 1206 1207 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) 1208 { 1209 struct page * page; 1210 int i, ret = -ENOMEM; 1211 1212 for (i = 0; i < num_pages; i++) { 1213 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1214 /* agp_free_memory() needs gart address */ 1215 if (page == NULL) 1216 goto out; 1217 1218 #ifndef CONFIG_X86 1219 map_page_into_agp(page); 1220 #endif 1221 get_page(page); 1222 atomic_inc(&agp_bridge->current_memory_agp); 1223 1224 mem->pages[i] = page; 1225 mem->page_count++; 1226 } 1227 1228 #ifdef CONFIG_X86 1229 set_pages_array_uc(mem->pages, num_pages); 1230 #endif 1231 ret = 0; 1232 out: 1233 return ret; 1234 } 1235 EXPORT_SYMBOL(agp_generic_alloc_pages); 1236 1237 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1238 { 1239 struct page * page; 1240 1241 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1242 if (page == NULL) 1243 return NULL; 1244 1245 map_page_into_agp(page); 1246 1247 get_page(page); 1248 atomic_inc(&agp_bridge->current_memory_agp); 1249 return page; 1250 } 1251 EXPORT_SYMBOL(agp_generic_alloc_page); 1252 1253 void agp_generic_destroy_pages(struct agp_memory *mem) 1254 { 1255 int i; 1256 struct page *page; 1257 1258 if (!mem) 1259 return; 1260 1261 #ifdef CONFIG_X86 1262 set_pages_array_wb(mem->pages, mem->page_count); 1263 #endif 1264 1265 for (i = 0; i < mem->page_count; i++) { 1266 page = mem->pages[i]; 1267 1268 #ifndef CONFIG_X86 1269 unmap_page_from_agp(page); 1270 #endif 1271 put_page(page); 1272 __free_page(page); 1273 atomic_dec(&agp_bridge->current_memory_agp); 1274 mem->pages[i] = NULL; 1275 } 1276 } 1277 EXPORT_SYMBOL(agp_generic_destroy_pages); 1278 1279 void agp_generic_destroy_page(struct page *page, int flags) 1280 { 1281 if (page == NULL) 1282 return; 1283 1284 if (flags & AGP_PAGE_DESTROY_UNMAP) 1285 unmap_page_from_agp(page); 1286 1287 if (flags & AGP_PAGE_DESTROY_FREE) { 1288 put_page(page); 1289 __free_page(page); 1290 atomic_dec(&agp_bridge->current_memory_agp); 1291 } 1292 } 1293 EXPORT_SYMBOL(agp_generic_destroy_page); 1294 1295 /* End Basic Page Allocation Routines */ 1296 1297 1298 /** 1299 * agp_enable - initialise the agp point-to-point connection. 1300 * 1301 * @mode: agp mode register value to configure with. 1302 */ 1303 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1304 { 1305 if (!bridge) 1306 return; 1307 bridge->driver->agp_enable(bridge, mode); 1308 } 1309 EXPORT_SYMBOL(agp_enable); 1310 1311 /* When we remove the global variable agp_bridge from all drivers 1312 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1313 */ 1314 1315 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1316 { 1317 if (list_empty(&agp_bridges)) 1318 return NULL; 1319 1320 return agp_bridge; 1321 } 1322 1323 static void ipi_handler(void *null) 1324 { 1325 flush_agp_cache(); 1326 } 1327 1328 void global_cache_flush(void) 1329 { 1330 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1331 panic(PFX "timed out waiting for the other CPUs!\n"); 1332 } 1333 EXPORT_SYMBOL(global_cache_flush); 1334 1335 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1336 dma_addr_t addr, int type) 1337 { 1338 /* memory type is ignored in the generic routine */ 1339 if (bridge->driver->masks) 1340 return addr | bridge->driver->masks[0].mask; 1341 else 1342 return addr; 1343 } 1344 EXPORT_SYMBOL(agp_generic_mask_memory); 1345 1346 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1347 int type) 1348 { 1349 if (type >= AGP_USER_TYPES) 1350 return 0; 1351 return type; 1352 } 1353 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1354 1355 /* 1356 * These functions are implemented according to the AGPv3 spec, 1357 * which covers implementation details that had previously been 1358 * left open. 1359 */ 1360 1361 int agp3_generic_fetch_size(void) 1362 { 1363 u16 temp_size; 1364 int i; 1365 struct aper_size_info_16 *values; 1366 1367 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1368 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1369 1370 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1371 if (temp_size == values[i].size_value) { 1372 agp_bridge->previous_size = 1373 agp_bridge->current_size = (void *) (values + i); 1374 1375 agp_bridge->aperture_size_idx = i; 1376 return values[i].size; 1377 } 1378 } 1379 return 0; 1380 } 1381 EXPORT_SYMBOL(agp3_generic_fetch_size); 1382 1383 void agp3_generic_tlbflush(struct agp_memory *mem) 1384 { 1385 u32 ctrl; 1386 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1387 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1388 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1389 } 1390 EXPORT_SYMBOL(agp3_generic_tlbflush); 1391 1392 int agp3_generic_configure(void) 1393 { 1394 u32 temp; 1395 struct aper_size_info_16 *current_size; 1396 1397 current_size = A_SIZE_16(agp_bridge->current_size); 1398 1399 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1400 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1401 1402 /* set aperture size */ 1403 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1404 /* set gart pointer */ 1405 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1406 /* enable aperture and GTLB */ 1407 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1408 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1409 return 0; 1410 } 1411 EXPORT_SYMBOL(agp3_generic_configure); 1412 1413 void agp3_generic_cleanup(void) 1414 { 1415 u32 ctrl; 1416 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1417 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1418 } 1419 EXPORT_SYMBOL(agp3_generic_cleanup); 1420 1421 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1422 { 1423 {4096, 1048576, 10,0x000}, 1424 {2048, 524288, 9, 0x800}, 1425 {1024, 262144, 8, 0xc00}, 1426 { 512, 131072, 7, 0xe00}, 1427 { 256, 65536, 6, 0xf00}, 1428 { 128, 32768, 5, 0xf20}, 1429 { 64, 16384, 4, 0xf30}, 1430 { 32, 8192, 3, 0xf38}, 1431 { 16, 4096, 2, 0xf3c}, 1432 { 8, 2048, 1, 0xf3e}, 1433 { 4, 1024, 0, 0xf3f} 1434 }; 1435 EXPORT_SYMBOL(agp3_generic_sizes); 1436 1437