1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <linux/sched.h> 41 #include <asm/io.h> 42 #include <asm/cacheflush.h> 43 #include <asm/pgtable.h> 44 #include "agp.h" 45 46 __u32 *agp_gatt_table; 47 int agp_memory_reserved; 48 49 /* 50 * Needed by the Nforce GART driver for the time being. Would be 51 * nice to do this some other way instead of needing this export. 52 */ 53 EXPORT_SYMBOL_GPL(agp_memory_reserved); 54 55 /* 56 * Generic routines for handling agp_memory structures - 57 * They use the basic page allocation routines to do the brunt of the work. 58 */ 59 60 void agp_free_key(int key) 61 { 62 if (key < 0) 63 return; 64 65 if (key < MAXKEY) 66 clear_bit(key, agp_bridge->key_list); 67 } 68 EXPORT_SYMBOL(agp_free_key); 69 70 71 static int agp_get_key(void) 72 { 73 int bit; 74 75 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 76 if (bit < MAXKEY) { 77 set_bit(bit, agp_bridge->key_list); 78 return bit; 79 } 80 return -1; 81 } 82 83 /* 84 * Use kmalloc if possible for the page list. Otherwise fall back to 85 * vmalloc. This speeds things up and also saves memory for small AGP 86 * regions. 87 */ 88 89 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 90 { 91 mem->memory = NULL; 92 mem->vmalloc_flag = 0; 93 94 if (size <= 2*PAGE_SIZE) 95 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 96 if (mem->memory == NULL) { 97 mem->memory = vmalloc(size); 98 mem->vmalloc_flag = 1; 99 } 100 } 101 EXPORT_SYMBOL(agp_alloc_page_array); 102 103 void agp_free_page_array(struct agp_memory *mem) 104 { 105 if (mem->vmalloc_flag) { 106 vfree(mem->memory); 107 } else { 108 kfree(mem->memory); 109 } 110 } 111 EXPORT_SYMBOL(agp_free_page_array); 112 113 114 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 115 { 116 struct agp_memory *new; 117 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 118 119 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 120 if (new == NULL) 121 return NULL; 122 123 new->key = agp_get_key(); 124 125 if (new->key < 0) { 126 kfree(new); 127 return NULL; 128 } 129 130 agp_alloc_page_array(alloc_size, new); 131 132 if (new->memory == NULL) { 133 agp_free_key(new->key); 134 kfree(new); 135 return NULL; 136 } 137 new->num_scratch_pages = 0; 138 return new; 139 } 140 141 struct agp_memory *agp_create_memory(int scratch_pages) 142 { 143 struct agp_memory *new; 144 145 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 146 if (new == NULL) 147 return NULL; 148 149 new->key = agp_get_key(); 150 151 if (new->key < 0) { 152 kfree(new); 153 return NULL; 154 } 155 156 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 157 158 if (new->memory == NULL) { 159 agp_free_key(new->key); 160 kfree(new); 161 return NULL; 162 } 163 new->num_scratch_pages = scratch_pages; 164 new->type = AGP_NORMAL_MEMORY; 165 return new; 166 } 167 EXPORT_SYMBOL(agp_create_memory); 168 169 /** 170 * agp_free_memory - free memory associated with an agp_memory pointer. 171 * 172 * @curr: agp_memory pointer to be freed. 173 * 174 * It is the only function that can be called when the backend is not owned 175 * by the caller. (So it can free memory on client death.) 176 */ 177 void agp_free_memory(struct agp_memory *curr) 178 { 179 size_t i; 180 181 if (curr == NULL) 182 return; 183 184 if (curr->is_bound == TRUE) 185 agp_unbind_memory(curr); 186 187 if (curr->type >= AGP_USER_TYPES) { 188 agp_generic_free_by_type(curr); 189 return; 190 } 191 192 if (curr->type != 0) { 193 curr->bridge->driver->free_by_type(curr); 194 return; 195 } 196 if (curr->page_count != 0) { 197 for (i = 0; i < curr->page_count; i++) { 198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); 199 } 200 flush_agp_mappings(); 201 } 202 agp_free_key(curr->key); 203 agp_free_page_array(curr); 204 kfree(curr); 205 } 206 EXPORT_SYMBOL(agp_free_memory); 207 208 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 209 210 /** 211 * agp_allocate_memory - allocate a group of pages of a certain type. 212 * 213 * @page_count: size_t argument of the number of pages 214 * @type: u32 argument of the type of memory to be allocated. 215 * 216 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 217 * maps to physical ram. Any other type is device dependent. 218 * 219 * It returns NULL whenever memory is unavailable. 220 */ 221 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 222 size_t page_count, u32 type) 223 { 224 int scratch_pages; 225 struct agp_memory *new; 226 size_t i; 227 228 if (!bridge) 229 return NULL; 230 231 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 232 return NULL; 233 234 if (type >= AGP_USER_TYPES) { 235 new = agp_generic_alloc_user(page_count, type); 236 if (new) 237 new->bridge = bridge; 238 return new; 239 } 240 241 if (type != 0) { 242 new = bridge->driver->alloc_by_type(page_count, type); 243 if (new) 244 new->bridge = bridge; 245 return new; 246 } 247 248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 249 250 new = agp_create_memory(scratch_pages); 251 252 if (new == NULL) 253 return NULL; 254 255 for (i = 0; i < page_count; i++) { 256 void *addr = bridge->driver->agp_alloc_page(bridge); 257 258 if (addr == NULL) { 259 agp_free_memory(new); 260 return NULL; 261 } 262 new->memory[i] = virt_to_gart(addr); 263 new->page_count++; 264 } 265 new->bridge = bridge; 266 267 flush_agp_mappings(); 268 269 return new; 270 } 271 EXPORT_SYMBOL(agp_allocate_memory); 272 273 274 /* End - Generic routines for handling agp_memory structures */ 275 276 277 static int agp_return_size(void) 278 { 279 int current_size; 280 void *temp; 281 282 temp = agp_bridge->current_size; 283 284 switch (agp_bridge->driver->size_type) { 285 case U8_APER_SIZE: 286 current_size = A_SIZE_8(temp)->size; 287 break; 288 case U16_APER_SIZE: 289 current_size = A_SIZE_16(temp)->size; 290 break; 291 case U32_APER_SIZE: 292 current_size = A_SIZE_32(temp)->size; 293 break; 294 case LVL2_APER_SIZE: 295 current_size = A_SIZE_LVL2(temp)->size; 296 break; 297 case FIXED_APER_SIZE: 298 current_size = A_SIZE_FIX(temp)->size; 299 break; 300 default: 301 current_size = 0; 302 break; 303 } 304 305 current_size -= (agp_memory_reserved / (1024*1024)); 306 if (current_size <0) 307 current_size = 0; 308 return current_size; 309 } 310 311 312 int agp_num_entries(void) 313 { 314 int num_entries; 315 void *temp; 316 317 temp = agp_bridge->current_size; 318 319 switch (agp_bridge->driver->size_type) { 320 case U8_APER_SIZE: 321 num_entries = A_SIZE_8(temp)->num_entries; 322 break; 323 case U16_APER_SIZE: 324 num_entries = A_SIZE_16(temp)->num_entries; 325 break; 326 case U32_APER_SIZE: 327 num_entries = A_SIZE_32(temp)->num_entries; 328 break; 329 case LVL2_APER_SIZE: 330 num_entries = A_SIZE_LVL2(temp)->num_entries; 331 break; 332 case FIXED_APER_SIZE: 333 num_entries = A_SIZE_FIX(temp)->num_entries; 334 break; 335 default: 336 num_entries = 0; 337 break; 338 } 339 340 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 341 if (num_entries<0) 342 num_entries = 0; 343 return num_entries; 344 } 345 EXPORT_SYMBOL_GPL(agp_num_entries); 346 347 348 /** 349 * agp_copy_info - copy bridge state information 350 * 351 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 352 * 353 * This function copies information about the agp bridge device and the state of 354 * the agp backend into an agp_kern_info pointer. 355 */ 356 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 357 { 358 memset(info, 0, sizeof(struct agp_kern_info)); 359 if (!bridge) { 360 info->chipset = NOT_SUPPORTED; 361 return -EIO; 362 } 363 364 info->version.major = bridge->version->major; 365 info->version.minor = bridge->version->minor; 366 info->chipset = SUPPORTED; 367 info->device = bridge->dev; 368 if (bridge->mode & AGPSTAT_MODE_3_0) 369 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 370 else 371 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 372 info->aper_base = bridge->gart_bus_addr; 373 info->aper_size = agp_return_size(); 374 info->max_memory = bridge->max_memory_agp; 375 info->current_memory = atomic_read(&bridge->current_memory_agp); 376 info->cant_use_aperture = bridge->driver->cant_use_aperture; 377 info->vm_ops = bridge->vm_ops; 378 info->page_mask = ~0UL; 379 return 0; 380 } 381 EXPORT_SYMBOL(agp_copy_info); 382 383 /* End - Routine to copy over information structure */ 384 385 /* 386 * Routines for handling swapping of agp_memory into the GATT - 387 * These routines take agp_memory and insert them into the GATT. 388 * They call device specific routines to actually write to the GATT. 389 */ 390 391 /** 392 * agp_bind_memory - Bind an agp_memory structure into the GATT. 393 * 394 * @curr: agp_memory pointer 395 * @pg_start: an offset into the graphics aperture translation table 396 * 397 * It returns -EINVAL if the pointer == NULL. 398 * It returns -EBUSY if the area of the table requested is already in use. 399 */ 400 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 401 { 402 int ret_val; 403 404 if (curr == NULL) 405 return -EINVAL; 406 407 if (curr->is_bound == TRUE) { 408 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 409 return -EINVAL; 410 } 411 if (curr->is_flushed == FALSE) { 412 curr->bridge->driver->cache_flush(); 413 curr->is_flushed = TRUE; 414 } 415 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 416 417 if (ret_val != 0) 418 return ret_val; 419 420 curr->is_bound = TRUE; 421 curr->pg_start = pg_start; 422 return 0; 423 } 424 EXPORT_SYMBOL(agp_bind_memory); 425 426 427 /** 428 * agp_unbind_memory - Removes an agp_memory structure from the GATT 429 * 430 * @curr: agp_memory pointer to be removed from the GATT. 431 * 432 * It returns -EINVAL if this piece of agp_memory is not currently bound to 433 * the graphics aperture translation table or if the agp_memory pointer == NULL 434 */ 435 int agp_unbind_memory(struct agp_memory *curr) 436 { 437 int ret_val; 438 439 if (curr == NULL) 440 return -EINVAL; 441 442 if (curr->is_bound != TRUE) { 443 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 444 return -EINVAL; 445 } 446 447 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 448 449 if (ret_val != 0) 450 return ret_val; 451 452 curr->is_bound = FALSE; 453 curr->pg_start = 0; 454 return 0; 455 } 456 EXPORT_SYMBOL(agp_unbind_memory); 457 458 /* End - Routines for handling swapping of agp_memory into the GATT */ 459 460 461 /* Generic Agp routines - Start */ 462 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 463 { 464 u32 tmp; 465 466 if (*requested_mode & AGP2_RESERVED_MASK) { 467 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 468 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 469 *requested_mode &= ~AGP2_RESERVED_MASK; 470 } 471 472 /* 473 * Some dumb bridges are programmed to disobey the AGP2 spec. 474 * This is likely a BIOS misprogramming rather than poweron default, or 475 * it would be a lot more common. 476 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 477 * AGPv2 spec 6.1.9 states: 478 * The RATE field indicates the data transfer rates supported by this 479 * device. A.G.P. devices must report all that apply. 480 * Fix them up as best we can. 481 */ 482 switch (*bridge_agpstat & 7) { 483 case 4: 484 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 485 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 486 "Fixing up support for x2 & x1\n"); 487 break; 488 case 2: 489 *bridge_agpstat |= AGPSTAT2_1X; 490 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 491 "Fixing up support for x1\n"); 492 break; 493 default: 494 break; 495 } 496 497 /* Check the speed bits make sense. Only one should be set. */ 498 tmp = *requested_mode & 7; 499 switch (tmp) { 500 case 0: 501 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 502 *requested_mode |= AGPSTAT2_1X; 503 break; 504 case 1: 505 case 2: 506 break; 507 case 3: 508 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 509 break; 510 case 4: 511 break; 512 case 5: 513 case 6: 514 case 7: 515 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 516 break; 517 } 518 519 /* disable SBA if it's not supported */ 520 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 521 *bridge_agpstat &= ~AGPSTAT_SBA; 522 523 /* Set rate */ 524 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 525 *bridge_agpstat &= ~AGPSTAT2_4X; 526 527 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 528 *bridge_agpstat &= ~AGPSTAT2_2X; 529 530 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 531 *bridge_agpstat &= ~AGPSTAT2_1X; 532 533 /* Now we know what mode it should be, clear out the unwanted bits. */ 534 if (*bridge_agpstat & AGPSTAT2_4X) 535 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 536 537 if (*bridge_agpstat & AGPSTAT2_2X) 538 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 539 540 if (*bridge_agpstat & AGPSTAT2_1X) 541 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 542 543 /* Apply any errata. */ 544 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 545 *bridge_agpstat &= ~AGPSTAT_FW; 546 547 if (agp_bridge->flags & AGP_ERRATA_SBA) 548 *bridge_agpstat &= ~AGPSTAT_SBA; 549 550 if (agp_bridge->flags & AGP_ERRATA_1X) { 551 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 552 *bridge_agpstat |= AGPSTAT2_1X; 553 } 554 555 /* If we've dropped down to 1X, disable fast writes. */ 556 if (*bridge_agpstat & AGPSTAT2_1X) 557 *bridge_agpstat &= ~AGPSTAT_FW; 558 } 559 560 /* 561 * requested_mode = Mode requested by (typically) X. 562 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 563 * vga_agpstat = PCI_AGP_STATUS from graphic card. 564 */ 565 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 566 { 567 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 568 u32 tmp; 569 570 if (*requested_mode & AGP3_RESERVED_MASK) { 571 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 572 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 573 *requested_mode &= ~AGP3_RESERVED_MASK; 574 } 575 576 /* Check the speed bits make sense. */ 577 tmp = *requested_mode & 7; 578 if (tmp == 0) { 579 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 580 *requested_mode |= AGPSTAT3_4X; 581 } 582 if (tmp >= 3) { 583 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 584 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 585 } 586 587 /* ARQSZ - Set the value to the maximum one. 588 * Don't allow the mode register to override values. */ 589 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 590 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 591 592 /* Calibration cycle. 593 * Don't allow the mode register to override values. */ 594 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 595 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 596 597 /* SBA *must* be supported for AGP v3 */ 598 *bridge_agpstat |= AGPSTAT_SBA; 599 600 /* 601 * Set speed. 602 * Check for invalid speeds. This can happen when applications 603 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 604 */ 605 if (*requested_mode & AGPSTAT_MODE_3_0) { 606 /* 607 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 608 * have been passed a 3.0 mode, but with 2.x speed bits set. 609 * AGP2.x 4x -> AGP3.0 4x. 610 */ 611 if (*requested_mode & AGPSTAT2_4X) { 612 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 613 current->comm, *requested_mode); 614 *requested_mode &= ~AGPSTAT2_4X; 615 *requested_mode |= AGPSTAT3_4X; 616 } 617 } else { 618 /* 619 * The caller doesn't know what they are doing. We are in 3.0 mode, 620 * but have been passed an AGP 2.x mode. 621 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 622 */ 623 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 624 current->comm, *requested_mode); 625 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 626 *requested_mode |= AGPSTAT3_4X; 627 } 628 629 if (*requested_mode & AGPSTAT3_8X) { 630 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 631 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 632 *bridge_agpstat |= AGPSTAT3_4X; 633 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 634 return; 635 } 636 if (!(*vga_agpstat & AGPSTAT3_8X)) { 637 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 638 *bridge_agpstat |= AGPSTAT3_4X; 639 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 640 return; 641 } 642 /* All set, bridge & device can do AGP x8*/ 643 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 644 goto done; 645 646 } else if (*requested_mode & AGPSTAT3_4X) { 647 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 648 *bridge_agpstat |= AGPSTAT3_4X; 649 goto done; 650 651 } else { 652 653 /* 654 * If we didn't specify an AGP mode, we see if both 655 * the graphics card, and the bridge can do x8, and use if so. 656 * If not, we fall back to x4 mode. 657 */ 658 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 659 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 660 "supported by bridge & card (x8).\n"); 661 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 662 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 663 } else { 664 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 665 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 666 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 667 *bridge_agpstat, origbridge); 668 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 669 *bridge_agpstat |= AGPSTAT3_4X; 670 } 671 if (!(*vga_agpstat & AGPSTAT3_8X)) { 672 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 673 *vga_agpstat, origvga); 674 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 675 *vga_agpstat |= AGPSTAT3_4X; 676 } 677 } 678 } 679 680 done: 681 /* Apply any errata. */ 682 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 683 *bridge_agpstat &= ~AGPSTAT_FW; 684 685 if (agp_bridge->flags & AGP_ERRATA_SBA) 686 *bridge_agpstat &= ~AGPSTAT_SBA; 687 688 if (agp_bridge->flags & AGP_ERRATA_1X) { 689 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 690 *bridge_agpstat |= AGPSTAT2_1X; 691 } 692 } 693 694 695 /** 696 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 697 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 698 * @requested_mode: requested agp_stat from userspace (Typically from X) 699 * @bridge_agpstat: current agp_stat from AGP bridge. 700 * 701 * This function will hunt for an AGP graphics card, and try to match 702 * the requested mode to the capabilities of both the bridge and the card. 703 */ 704 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 705 { 706 struct pci_dev *device = NULL; 707 u32 vga_agpstat; 708 u8 cap_ptr; 709 710 for (;;) { 711 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 712 if (!device) { 713 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 714 return 0; 715 } 716 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 717 if (cap_ptr) 718 break; 719 } 720 721 /* 722 * Ok, here we have a AGP device. Disable impossible 723 * settings, and adjust the readqueue to the minimum. 724 */ 725 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 726 727 /* adjust RQ depth */ 728 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 729 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 730 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 731 732 /* disable FW if it's not supported */ 733 if (!((bridge_agpstat & AGPSTAT_FW) && 734 (vga_agpstat & AGPSTAT_FW) && 735 (requested_mode & AGPSTAT_FW))) 736 bridge_agpstat &= ~AGPSTAT_FW; 737 738 /* Check to see if we are operating in 3.0 mode */ 739 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 740 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 741 else 742 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 743 744 pci_dev_put(device); 745 return bridge_agpstat; 746 } 747 EXPORT_SYMBOL(agp_collect_device_status); 748 749 750 void agp_device_command(u32 bridge_agpstat, int agp_v3) 751 { 752 struct pci_dev *device = NULL; 753 int mode; 754 755 mode = bridge_agpstat & 0x7; 756 if (agp_v3) 757 mode *= 4; 758 759 for_each_pci_dev(device) { 760 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 761 if (!agp) 762 continue; 763 764 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", 765 agp_v3 ? 3 : 2, pci_name(device), mode); 766 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 767 } 768 } 769 EXPORT_SYMBOL(agp_device_command); 770 771 772 void get_agp_version(struct agp_bridge_data *bridge) 773 { 774 u32 ncapid; 775 776 /* Exit early if already set by errata workarounds. */ 777 if (bridge->major_version != 0) 778 return; 779 780 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 781 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 782 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 783 } 784 EXPORT_SYMBOL(get_agp_version); 785 786 787 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 788 { 789 u32 bridge_agpstat, temp; 790 791 get_agp_version(agp_bridge); 792 793 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 794 agp_bridge->major_version, 795 agp_bridge->minor_version, 796 pci_name(agp_bridge->dev)); 797 798 pci_read_config_dword(agp_bridge->dev, 799 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 800 801 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 802 if (bridge_agpstat == 0) 803 /* Something bad happened. FIXME: Return error code? */ 804 return; 805 806 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 807 808 /* Do AGP version specific frobbing. */ 809 if (bridge->major_version >= 3) { 810 if (bridge->mode & AGPSTAT_MODE_3_0) { 811 /* If we have 3.5, we can do the isoch stuff. */ 812 if (bridge->minor_version >= 5) 813 agp_3_5_enable(bridge); 814 agp_device_command(bridge_agpstat, TRUE); 815 return; 816 } else { 817 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 818 bridge_agpstat &= ~(7<<10) ; 819 pci_read_config_dword(bridge->dev, 820 bridge->capndx+AGPCTRL, &temp); 821 temp |= (1<<9); 822 pci_write_config_dword(bridge->dev, 823 bridge->capndx+AGPCTRL, temp); 824 825 printk(KERN_INFO PFX "Device is in legacy mode," 826 " falling back to 2.x\n"); 827 } 828 } 829 830 /* AGP v<3 */ 831 agp_device_command(bridge_agpstat, FALSE); 832 } 833 EXPORT_SYMBOL(agp_generic_enable); 834 835 836 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 837 { 838 char *table; 839 char *table_end; 840 int size; 841 int page_order; 842 int num_entries; 843 int i; 844 void *temp; 845 struct page *page; 846 847 /* The generic routines can't handle 2 level gatt's */ 848 if (bridge->driver->size_type == LVL2_APER_SIZE) 849 return -EINVAL; 850 851 table = NULL; 852 i = bridge->aperture_size_idx; 853 temp = bridge->current_size; 854 size = page_order = num_entries = 0; 855 856 if (bridge->driver->size_type != FIXED_APER_SIZE) { 857 do { 858 switch (bridge->driver->size_type) { 859 case U8_APER_SIZE: 860 size = A_SIZE_8(temp)->size; 861 page_order = 862 A_SIZE_8(temp)->page_order; 863 num_entries = 864 A_SIZE_8(temp)->num_entries; 865 break; 866 case U16_APER_SIZE: 867 size = A_SIZE_16(temp)->size; 868 page_order = A_SIZE_16(temp)->page_order; 869 num_entries = A_SIZE_16(temp)->num_entries; 870 break; 871 case U32_APER_SIZE: 872 size = A_SIZE_32(temp)->size; 873 page_order = A_SIZE_32(temp)->page_order; 874 num_entries = A_SIZE_32(temp)->num_entries; 875 break; 876 /* This case will never really happen. */ 877 case FIXED_APER_SIZE: 878 case LVL2_APER_SIZE: 879 default: 880 size = page_order = num_entries = 0; 881 break; 882 } 883 884 table = alloc_gatt_pages(page_order); 885 886 if (table == NULL) { 887 i++; 888 switch (bridge->driver->size_type) { 889 case U8_APER_SIZE: 890 bridge->current_size = A_IDX8(bridge); 891 break; 892 case U16_APER_SIZE: 893 bridge->current_size = A_IDX16(bridge); 894 break; 895 case U32_APER_SIZE: 896 bridge->current_size = A_IDX32(bridge); 897 break; 898 /* These cases will never really happen. */ 899 case FIXED_APER_SIZE: 900 case LVL2_APER_SIZE: 901 default: 902 break; 903 } 904 temp = bridge->current_size; 905 } else { 906 bridge->aperture_size_idx = i; 907 } 908 } while (!table && (i < bridge->driver->num_aperture_sizes)); 909 } else { 910 size = ((struct aper_size_info_fixed *) temp)->size; 911 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 912 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 913 table = alloc_gatt_pages(page_order); 914 } 915 916 if (table == NULL) 917 return -ENOMEM; 918 919 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 920 921 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 922 SetPageReserved(page); 923 924 bridge->gatt_table_real = (u32 *) table; 925 agp_gatt_table = (void *)table; 926 927 bridge->driver->cache_flush(); 928 bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 929 (PAGE_SIZE * (1 << page_order))); 930 bridge->driver->cache_flush(); 931 932 if (bridge->gatt_table == NULL) { 933 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 934 ClearPageReserved(page); 935 936 free_gatt_pages(table, page_order); 937 938 return -ENOMEM; 939 } 940 bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 941 942 /* AK: bogus, should encode addresses > 4GB */ 943 for (i = 0; i < num_entries; i++) { 944 writel(bridge->scratch_page, bridge->gatt_table+i); 945 readl(bridge->gatt_table+i); /* PCI Posting. */ 946 } 947 948 return 0; 949 } 950 EXPORT_SYMBOL(agp_generic_create_gatt_table); 951 952 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 953 { 954 int page_order; 955 char *table, *table_end; 956 void *temp; 957 struct page *page; 958 959 temp = bridge->current_size; 960 961 switch (bridge->driver->size_type) { 962 case U8_APER_SIZE: 963 page_order = A_SIZE_8(temp)->page_order; 964 break; 965 case U16_APER_SIZE: 966 page_order = A_SIZE_16(temp)->page_order; 967 break; 968 case U32_APER_SIZE: 969 page_order = A_SIZE_32(temp)->page_order; 970 break; 971 case FIXED_APER_SIZE: 972 page_order = A_SIZE_FIX(temp)->page_order; 973 break; 974 case LVL2_APER_SIZE: 975 /* The generic routines can't deal with 2 level gatt's */ 976 return -EINVAL; 977 break; 978 default: 979 page_order = 0; 980 break; 981 } 982 983 /* Do not worry about freeing memory, because if this is 984 * called, then all agp memory is deallocated and removed 985 * from the table. */ 986 987 iounmap(bridge->gatt_table); 988 table = (char *) bridge->gatt_table_real; 989 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 990 991 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 992 ClearPageReserved(page); 993 994 free_gatt_pages(bridge->gatt_table_real, page_order); 995 996 agp_gatt_table = NULL; 997 bridge->gatt_table = NULL; 998 bridge->gatt_table_real = NULL; 999 bridge->gatt_bus_addr = 0; 1000 1001 return 0; 1002 } 1003 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1004 1005 1006 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1007 { 1008 int num_entries; 1009 size_t i; 1010 off_t j; 1011 void *temp; 1012 struct agp_bridge_data *bridge; 1013 int mask_type; 1014 1015 bridge = mem->bridge; 1016 if (!bridge) 1017 return -EINVAL; 1018 1019 if (mem->page_count == 0) 1020 return 0; 1021 1022 temp = bridge->current_size; 1023 1024 switch (bridge->driver->size_type) { 1025 case U8_APER_SIZE: 1026 num_entries = A_SIZE_8(temp)->num_entries; 1027 break; 1028 case U16_APER_SIZE: 1029 num_entries = A_SIZE_16(temp)->num_entries; 1030 break; 1031 case U32_APER_SIZE: 1032 num_entries = A_SIZE_32(temp)->num_entries; 1033 break; 1034 case FIXED_APER_SIZE: 1035 num_entries = A_SIZE_FIX(temp)->num_entries; 1036 break; 1037 case LVL2_APER_SIZE: 1038 /* The generic routines can't deal with 2 level gatt's */ 1039 return -EINVAL; 1040 break; 1041 default: 1042 num_entries = 0; 1043 break; 1044 } 1045 1046 num_entries -= agp_memory_reserved/PAGE_SIZE; 1047 if (num_entries < 0) num_entries = 0; 1048 1049 if (type != mem->type) 1050 return -EINVAL; 1051 1052 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1053 if (mask_type != 0) { 1054 /* The generic routines know nothing of memory types */ 1055 return -EINVAL; 1056 } 1057 1058 /* AK: could wrap */ 1059 if ((pg_start + mem->page_count) > num_entries) 1060 return -EINVAL; 1061 1062 j = pg_start; 1063 1064 while (j < (pg_start + mem->page_count)) { 1065 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1066 return -EBUSY; 1067 j++; 1068 } 1069 1070 if (mem->is_flushed == FALSE) { 1071 bridge->driver->cache_flush(); 1072 mem->is_flushed = TRUE; 1073 } 1074 1075 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1076 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), 1077 bridge->gatt_table+j); 1078 } 1079 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1080 1081 bridge->driver->tlb_flush(mem); 1082 return 0; 1083 } 1084 EXPORT_SYMBOL(agp_generic_insert_memory); 1085 1086 1087 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1088 { 1089 size_t i; 1090 struct agp_bridge_data *bridge; 1091 int mask_type; 1092 1093 bridge = mem->bridge; 1094 if (!bridge) 1095 return -EINVAL; 1096 1097 if (mem->page_count == 0) 1098 return 0; 1099 1100 if (type != mem->type) 1101 return -EINVAL; 1102 1103 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1104 if (mask_type != 0) { 1105 /* The generic routines know nothing of memory types */ 1106 return -EINVAL; 1107 } 1108 1109 /* AK: bogus, should encode addresses > 4GB */ 1110 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1111 writel(bridge->scratch_page, bridge->gatt_table+i); 1112 } 1113 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1114 1115 bridge->driver->tlb_flush(mem); 1116 return 0; 1117 } 1118 EXPORT_SYMBOL(agp_generic_remove_memory); 1119 1120 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1121 { 1122 return NULL; 1123 } 1124 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1125 1126 void agp_generic_free_by_type(struct agp_memory *curr) 1127 { 1128 agp_free_page_array(curr); 1129 agp_free_key(curr->key); 1130 kfree(curr); 1131 } 1132 EXPORT_SYMBOL(agp_generic_free_by_type); 1133 1134 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1135 { 1136 struct agp_memory *new; 1137 int i; 1138 int pages; 1139 1140 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1141 new = agp_create_user_memory(page_count); 1142 if (new == NULL) 1143 return NULL; 1144 1145 for (i = 0; i < page_count; i++) 1146 new->memory[i] = 0; 1147 new->page_count = 0; 1148 new->type = type; 1149 new->num_scratch_pages = pages; 1150 1151 return new; 1152 } 1153 EXPORT_SYMBOL(agp_generic_alloc_user); 1154 1155 /* 1156 * Basic Page Allocation Routines - 1157 * These routines handle page allocation and by default they reserve the allocated 1158 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1159 * against a maximum value. 1160 */ 1161 1162 void *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1163 { 1164 struct page * page; 1165 1166 page = alloc_page(GFP_KERNEL | GFP_DMA32); 1167 if (page == NULL) 1168 return NULL; 1169 1170 map_page_into_agp(page); 1171 1172 get_page(page); 1173 atomic_inc(&agp_bridge->current_memory_agp); 1174 return page_address(page); 1175 } 1176 EXPORT_SYMBOL(agp_generic_alloc_page); 1177 1178 1179 void agp_generic_destroy_page(void *addr) 1180 { 1181 struct page *page; 1182 1183 if (addr == NULL) 1184 return; 1185 1186 page = virt_to_page(addr); 1187 unmap_page_from_agp(page); 1188 put_page(page); 1189 free_page((unsigned long)addr); 1190 atomic_dec(&agp_bridge->current_memory_agp); 1191 } 1192 EXPORT_SYMBOL(agp_generic_destroy_page); 1193 1194 /* End Basic Page Allocation Routines */ 1195 1196 1197 /** 1198 * agp_enable - initialise the agp point-to-point connection. 1199 * 1200 * @mode: agp mode register value to configure with. 1201 */ 1202 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1203 { 1204 if (!bridge) 1205 return; 1206 bridge->driver->agp_enable(bridge, mode); 1207 } 1208 EXPORT_SYMBOL(agp_enable); 1209 1210 /* When we remove the global variable agp_bridge from all drivers 1211 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1212 */ 1213 1214 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1215 { 1216 if (list_empty(&agp_bridges)) 1217 return NULL; 1218 1219 return agp_bridge; 1220 } 1221 1222 static void ipi_handler(void *null) 1223 { 1224 flush_agp_cache(); 1225 } 1226 1227 void global_cache_flush(void) 1228 { 1229 if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) 1230 panic(PFX "timed out waiting for the other CPUs!\n"); 1231 } 1232 EXPORT_SYMBOL(global_cache_flush); 1233 1234 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1235 unsigned long addr, int type) 1236 { 1237 /* memory type is ignored in the generic routine */ 1238 if (bridge->driver->masks) 1239 return addr | bridge->driver->masks[0].mask; 1240 else 1241 return addr; 1242 } 1243 EXPORT_SYMBOL(agp_generic_mask_memory); 1244 1245 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1246 int type) 1247 { 1248 if (type >= AGP_USER_TYPES) 1249 return 0; 1250 return type; 1251 } 1252 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1253 1254 /* 1255 * These functions are implemented according to the AGPv3 spec, 1256 * which covers implementation details that had previously been 1257 * left open. 1258 */ 1259 1260 int agp3_generic_fetch_size(void) 1261 { 1262 u16 temp_size; 1263 int i; 1264 struct aper_size_info_16 *values; 1265 1266 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1267 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1268 1269 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1270 if (temp_size == values[i].size_value) { 1271 agp_bridge->previous_size = 1272 agp_bridge->current_size = (void *) (values + i); 1273 1274 agp_bridge->aperture_size_idx = i; 1275 return values[i].size; 1276 } 1277 } 1278 return 0; 1279 } 1280 EXPORT_SYMBOL(agp3_generic_fetch_size); 1281 1282 void agp3_generic_tlbflush(struct agp_memory *mem) 1283 { 1284 u32 ctrl; 1285 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1286 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1287 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1288 } 1289 EXPORT_SYMBOL(agp3_generic_tlbflush); 1290 1291 int agp3_generic_configure(void) 1292 { 1293 u32 temp; 1294 struct aper_size_info_16 *current_size; 1295 1296 current_size = A_SIZE_16(agp_bridge->current_size); 1297 1298 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1299 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1300 1301 /* set aperture size */ 1302 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1303 /* set gart pointer */ 1304 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1305 /* enable aperture and GTLB */ 1306 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1307 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1308 return 0; 1309 } 1310 EXPORT_SYMBOL(agp3_generic_configure); 1311 1312 void agp3_generic_cleanup(void) 1313 { 1314 u32 ctrl; 1315 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1316 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1317 } 1318 EXPORT_SYMBOL(agp3_generic_cleanup); 1319 1320 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1321 { 1322 {4096, 1048576, 10,0x000}, 1323 {2048, 524288, 9, 0x800}, 1324 {1024, 262144, 8, 0xc00}, 1325 { 512, 131072, 7, 0xe00}, 1326 { 256, 65536, 6, 0xf00}, 1327 { 128, 32768, 5, 0xf20}, 1328 { 64, 16384, 4, 0xf30}, 1329 { 32, 8192, 3, 0xf38}, 1330 { 16, 4096, 2, 0xf3c}, 1331 { 8, 2048, 1, 0xf3e}, 1332 { 4, 1024, 0, 0xf3f} 1333 }; 1334 EXPORT_SYMBOL(agp3_generic_sizes); 1335 1336