1 /* 2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 3 * Copyright 2005 Stephane Marchesin 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the 6 * initial release of the Radeon 8500 driver under the XFree86 license. 7 * This notice must be preserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 * 28 * Authors: 29 * Keith Whitwell <keith@tungstengraphics.com> 30 */ 31 32 33 #include "drmP.h" 34 #include "drm.h" 35 #include "drm_sarea.h" 36 37 #include "nouveau_drv.h" 38 #include "nouveau_pm.h" 39 40 /* 41 * NV10-NV40 tiling helpers 42 */ 43 44 static void 45 nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 46 uint32_t size, uint32_t pitch) 47 { 48 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 52 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 53 54 tile->addr = addr; 55 tile->size = size; 56 tile->used = !!pitch; 57 nouveau_fence_unref((void **)&tile->fence); 58 59 pfifo->reassign(dev, false); 60 pfifo->cache_pull(dev, false); 61 62 nouveau_wait_for_idle(dev); 63 64 pgraph->set_region_tiling(dev, i, addr, size, pitch); 65 pfb->set_region_tiling(dev, i, addr, size, pitch); 66 67 pfifo->cache_pull(dev, true); 68 pfifo->reassign(dev, true); 69 } 70 71 struct nouveau_tile_reg * 72 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 73 uint32_t pitch) 74 { 75 struct drm_nouveau_private *dev_priv = dev->dev_private; 76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 77 struct nouveau_tile_reg *found = NULL; 78 unsigned long i, flags; 79 80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 81 82 for (i = 0; i < pfb->num_tiles; i++) { 83 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 84 85 if (tile->used) 86 /* Tile region in use. */ 87 continue; 88 89 if (tile->fence && 90 !nouveau_fence_signalled(tile->fence, NULL)) 91 /* Pending tile region. */ 92 continue; 93 94 if (max(tile->addr, addr) < 95 min(tile->addr + tile->size, addr + size)) 96 /* Kill an intersecting tile region. */ 97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0); 98 99 if (pitch && !found) { 100 /* Free tile region. */ 101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch); 102 found = tile; 103 } 104 } 105 106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 107 108 return found; 109 } 110 111 void 112 nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, 113 struct nouveau_fence *fence) 114 { 115 if (fence) { 116 /* Mark it as pending. */ 117 tile->fence = fence; 118 nouveau_fence_ref(fence); 119 } 120 121 tile->used = false; 122 } 123 124 /* 125 * NV50 VM helpers 126 */ 127 int 128 nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, 129 uint32_t flags, uint64_t phys) 130 { 131 struct drm_nouveau_private *dev_priv = dev->dev_private; 132 struct nouveau_gpuobj *pgt; 133 unsigned block; 134 int i; 135 136 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; 137 size = (size >> 16) << 1; 138 139 phys |= ((uint64_t)flags << 32); 140 phys |= 1; 141 if (dev_priv->vram_sys_base) { 142 phys += dev_priv->vram_sys_base; 143 phys |= 0x30; 144 } 145 146 while (size) { 147 unsigned offset_h = upper_32_bits(phys); 148 unsigned offset_l = lower_32_bits(phys); 149 unsigned pte, end; 150 151 for (i = 7; i >= 0; i--) { 152 block = 1 << (i + 1); 153 if (size >= block && !(virt & (block - 1))) 154 break; 155 } 156 offset_l |= (i << 7); 157 158 phys += block << 15; 159 size -= block; 160 161 while (block) { 162 pgt = dev_priv->vm_vram_pt[virt >> 14]; 163 pte = virt & 0x3ffe; 164 165 end = pte + block; 166 if (end > 16384) 167 end = 16384; 168 block -= (end - pte); 169 virt += (end - pte); 170 171 while (pte < end) { 172 nv_wo32(pgt, (pte * 4) + 0, offset_l); 173 nv_wo32(pgt, (pte * 4) + 4, offset_h); 174 pte += 2; 175 } 176 } 177 } 178 179 dev_priv->engine.instmem.flush(dev); 180 dev_priv->engine.fifo.tlb_flush(dev); 181 dev_priv->engine.graph.tlb_flush(dev); 182 nv50_vm_flush(dev, 6); 183 return 0; 184 } 185 186 void 187 nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 188 { 189 struct drm_nouveau_private *dev_priv = dev->dev_private; 190 struct nouveau_gpuobj *pgt; 191 unsigned pages, pte, end; 192 193 virt -= dev_priv->vm_vram_base; 194 pages = (size >> 16) << 1; 195 196 while (pages) { 197 pgt = dev_priv->vm_vram_pt[virt >> 29]; 198 pte = (virt & 0x1ffe0000ULL) >> 15; 199 200 end = pte + pages; 201 if (end > 16384) 202 end = 16384; 203 pages -= (end - pte); 204 virt += (end - pte) << 15; 205 206 while (pte < end) { 207 nv_wo32(pgt, (pte * 4), 0); 208 pte++; 209 } 210 } 211 212 dev_priv->engine.instmem.flush(dev); 213 dev_priv->engine.fifo.tlb_flush(dev); 214 dev_priv->engine.graph.tlb_flush(dev); 215 nv50_vm_flush(dev, 6); 216 } 217 218 /* 219 * Cleanup everything 220 */ 221 void 222 nouveau_mem_vram_fini(struct drm_device *dev) 223 { 224 struct drm_nouveau_private *dev_priv = dev->dev_private; 225 226 nouveau_bo_unpin(dev_priv->vga_ram); 227 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 228 229 ttm_bo_device_release(&dev_priv->ttm.bdev); 230 231 nouveau_ttm_global_release(dev_priv); 232 233 if (dev_priv->fb_mtrr >= 0) { 234 drm_mtrr_del(dev_priv->fb_mtrr, 235 pci_resource_start(dev->pdev, 1), 236 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); 237 dev_priv->fb_mtrr = -1; 238 } 239 } 240 241 void 242 nouveau_mem_gart_fini(struct drm_device *dev) 243 { 244 nouveau_sgdma_takedown(dev); 245 246 if (drm_core_has_AGP(dev) && dev->agp) { 247 struct drm_agp_mem *entry, *tempe; 248 249 /* Remove AGP resources, but leave dev->agp 250 intact until drv_cleanup is called. */ 251 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { 252 if (entry->bound) 253 drm_unbind_agp(entry->memory); 254 drm_free_agp(entry->memory, entry->pages); 255 kfree(entry); 256 } 257 INIT_LIST_HEAD(&dev->agp->memory); 258 259 if (dev->agp->acquired) 260 drm_agp_release(dev); 261 262 dev->agp->acquired = 0; 263 dev->agp->enabled = 0; 264 } 265 } 266 267 static uint32_t 268 nouveau_mem_detect_nv04(struct drm_device *dev) 269 { 270 uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0); 271 272 if (boot0 & 0x00000100) 273 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; 274 275 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { 276 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: 277 return 32 * 1024 * 1024; 278 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: 279 return 16 * 1024 * 1024; 280 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: 281 return 8 * 1024 * 1024; 282 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: 283 return 4 * 1024 * 1024; 284 } 285 286 return 0; 287 } 288 289 static uint32_t 290 nouveau_mem_detect_nforce(struct drm_device *dev) 291 { 292 struct drm_nouveau_private *dev_priv = dev->dev_private; 293 struct pci_dev *bridge; 294 uint32_t mem; 295 296 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); 297 if (!bridge) { 298 NV_ERROR(dev, "no bridge device\n"); 299 return 0; 300 } 301 302 if (dev_priv->flags & NV_NFORCE) { 303 pci_read_config_dword(bridge, 0x7C, &mem); 304 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 305 } else 306 if (dev_priv->flags & NV_NFORCE2) { 307 pci_read_config_dword(bridge, 0x84, &mem); 308 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 309 } 310 311 NV_ERROR(dev, "impossible!\n"); 312 return 0; 313 } 314 315 static void 316 nv50_vram_preinit(struct drm_device *dev) 317 { 318 struct drm_nouveau_private *dev_priv = dev->dev_private; 319 int i, parts, colbits, rowbitsa, rowbitsb, banks; 320 u64 rowsize, predicted; 321 u32 r0, r4, rt, ru; 322 323 r0 = nv_rd32(dev, 0x100200); 324 r4 = nv_rd32(dev, 0x100204); 325 rt = nv_rd32(dev, 0x100250); 326 ru = nv_rd32(dev, 0x001540); 327 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); 328 329 for (i = 0, parts = 0; i < 8; i++) { 330 if (ru & (0x00010000 << i)) 331 parts++; 332 } 333 334 colbits = (r4 & 0x0000f000) >> 12; 335 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 336 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 337 banks = ((r4 & 0x01000000) ? 8 : 4); 338 339 rowsize = parts * banks * (1 << colbits) * 8; 340 predicted = rowsize << rowbitsa; 341 if (r0 & 0x00000004) 342 predicted += rowsize << rowbitsb; 343 344 if (predicted != dev_priv->vram_size) { 345 NV_WARN(dev, "memory controller reports %dMiB VRAM\n", 346 (u32)(dev_priv->vram_size >> 20)); 347 NV_WARN(dev, "we calculated %dMiB VRAM\n", 348 (u32)(predicted >> 20)); 349 } 350 351 dev_priv->vram_rblock_size = rowsize >> 12; 352 if (rt & 1) 353 dev_priv->vram_rblock_size *= 3; 354 355 NV_DEBUG(dev, "rblock %lld bytes\n", 356 (u64)dev_priv->vram_rblock_size << 12); 357 } 358 359 static void 360 nvaa_vram_preinit(struct drm_device *dev) 361 { 362 struct drm_nouveau_private *dev_priv = dev->dev_private; 363 364 /* To our knowledge, there's no large scale reordering of pages 365 * that occurs on IGP chipsets. 366 */ 367 dev_priv->vram_rblock_size = 1; 368 } 369 370 static int 371 nouveau_mem_detect(struct drm_device *dev) 372 { 373 struct drm_nouveau_private *dev_priv = dev->dev_private; 374 375 if (dev_priv->card_type == NV_04) { 376 dev_priv->vram_size = nouveau_mem_detect_nv04(dev); 377 } else 378 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { 379 dev_priv->vram_size = nouveau_mem_detect_nforce(dev); 380 } else 381 if (dev_priv->card_type < NV_50) { 382 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 383 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; 384 } else 385 if (dev_priv->card_type < NV_C0) { 386 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 387 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; 388 dev_priv->vram_size &= 0xffffffff00ll; 389 390 switch (dev_priv->chipset) { 391 case 0xaa: 392 case 0xac: 393 case 0xaf: 394 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); 395 dev_priv->vram_sys_base <<= 12; 396 nvaa_vram_preinit(dev); 397 break; 398 default: 399 nv50_vram_preinit(dev); 400 break; 401 } 402 } else { 403 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 404 dev_priv->vram_size *= nv_rd32(dev, 0x121c74); 405 } 406 407 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 408 if (dev_priv->vram_sys_base) { 409 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", 410 dev_priv->vram_sys_base); 411 } 412 413 if (dev_priv->vram_size) 414 return 0; 415 return -ENOMEM; 416 } 417 418 #if __OS_HAS_AGP 419 static unsigned long 420 get_agp_mode(struct drm_device *dev, unsigned long mode) 421 { 422 struct drm_nouveau_private *dev_priv = dev->dev_private; 423 424 /* 425 * FW seems to be broken on nv18, it makes the card lock up 426 * randomly. 427 */ 428 if (dev_priv->chipset == 0x18) 429 mode &= ~PCI_AGP_COMMAND_FW; 430 431 /* 432 * AGP mode set in the command line. 433 */ 434 if (nouveau_agpmode > 0) { 435 bool agpv3 = mode & 0x8; 436 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; 437 438 mode = (mode & ~0x7) | (rate & 0x7); 439 } 440 441 return mode; 442 } 443 #endif 444 445 int 446 nouveau_mem_reset_agp(struct drm_device *dev) 447 { 448 #if __OS_HAS_AGP 449 uint32_t saved_pci_nv_1, pmc_enable; 450 int ret; 451 452 /* First of all, disable fast writes, otherwise if it's 453 * already enabled in the AGP bridge and we disable the card's 454 * AGP controller we might be locking ourselves out of it. */ 455 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | 456 dev->agp->mode) & PCI_AGP_COMMAND_FW) { 457 struct drm_agp_info info; 458 struct drm_agp_mode mode; 459 460 ret = drm_agp_info(dev, &info); 461 if (ret) 462 return ret; 463 464 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; 465 ret = drm_agp_enable(dev, mode); 466 if (ret) 467 return ret; 468 } 469 470 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); 471 472 /* clear busmaster bit */ 473 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); 474 /* disable AGP */ 475 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0); 476 477 /* power cycle pgraph, if enabled */ 478 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); 479 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { 480 nv_wr32(dev, NV03_PMC_ENABLE, 481 pmc_enable & ~NV_PMC_ENABLE_PGRAPH); 482 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 483 NV_PMC_ENABLE_PGRAPH); 484 } 485 486 /* and restore (gives effect of resetting AGP) */ 487 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); 488 #endif 489 490 return 0; 491 } 492 493 int 494 nouveau_mem_init_agp(struct drm_device *dev) 495 { 496 #if __OS_HAS_AGP 497 struct drm_nouveau_private *dev_priv = dev->dev_private; 498 struct drm_agp_info info; 499 struct drm_agp_mode mode; 500 int ret; 501 502 if (!dev->agp->acquired) { 503 ret = drm_agp_acquire(dev); 504 if (ret) { 505 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); 506 return ret; 507 } 508 } 509 510 nouveau_mem_reset_agp(dev); 511 512 ret = drm_agp_info(dev, &info); 513 if (ret) { 514 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); 515 return ret; 516 } 517 518 /* see agp.h for the AGPSTAT_* modes available */ 519 mode.mode = get_agp_mode(dev, info.mode); 520 ret = drm_agp_enable(dev, mode); 521 if (ret) { 522 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); 523 return ret; 524 } 525 526 dev_priv->gart_info.type = NOUVEAU_GART_AGP; 527 dev_priv->gart_info.aper_base = info.aperture_base; 528 dev_priv->gart_info.aper_size = info.aperture_size; 529 #endif 530 return 0; 531 } 532 533 int 534 nouveau_mem_vram_init(struct drm_device *dev) 535 { 536 struct drm_nouveau_private *dev_priv = dev->dev_private; 537 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 538 int ret, dma_bits; 539 540 if (dev_priv->card_type >= NV_50 && 541 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 542 dma_bits = 40; 543 else 544 dma_bits = 32; 545 546 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); 547 if (ret) 548 return ret; 549 550 ret = nouveau_mem_detect(dev); 551 if (ret) 552 return ret; 553 554 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); 555 556 ret = nouveau_ttm_global_init(dev_priv); 557 if (ret) 558 return ret; 559 560 ret = ttm_bo_device_init(&dev_priv->ttm.bdev, 561 dev_priv->ttm.bo_global_ref.ref.object, 562 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, 563 dma_bits <= 32 ? true : false); 564 if (ret) { 565 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); 566 return ret; 567 } 568 569 dev_priv->fb_available_size = dev_priv->vram_size; 570 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 571 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) 572 dev_priv->fb_mappable_pages = 573 pci_resource_len(dev->pdev, 1); 574 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 575 576 /* reserve space at end of VRAM for PRAMIN */ 577 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || 578 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) 579 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); 580 else 581 if (dev_priv->card_type >= NV_40) 582 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); 583 else 584 dev_priv->ramin_rsvd_vram = (512 * 1024); 585 586 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 587 dev_priv->fb_aper_free = dev_priv->fb_available_size; 588 589 /* mappable vram */ 590 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 591 dev_priv->fb_available_size >> PAGE_SHIFT); 592 if (ret) { 593 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); 594 return ret; 595 } 596 597 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 598 0, 0, true, true, &dev_priv->vga_ram); 599 if (ret == 0) 600 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); 601 if (ret) { 602 NV_WARN(dev, "failed to reserve VGA memory\n"); 603 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 604 } 605 606 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), 607 pci_resource_len(dev->pdev, 1), 608 DRM_MTRR_WC); 609 return 0; 610 } 611 612 int 613 nouveau_mem_gart_init(struct drm_device *dev) 614 { 615 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 617 int ret; 618 619 dev_priv->gart_info.type = NOUVEAU_GART_NONE; 620 621 #if !defined(__powerpc__) && !defined(__ia64__) 622 if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) { 623 ret = nouveau_mem_init_agp(dev); 624 if (ret) 625 NV_ERROR(dev, "Error initialising AGP: %d\n", ret); 626 } 627 #endif 628 629 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { 630 ret = nouveau_sgdma_init(dev); 631 if (ret) { 632 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); 633 return ret; 634 } 635 } 636 637 NV_INFO(dev, "%d MiB GART (aperture)\n", 638 (int)(dev_priv->gart_info.aper_size >> 20)); 639 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; 640 641 ret = ttm_bo_init_mm(bdev, TTM_PL_TT, 642 dev_priv->gart_info.aper_size >> PAGE_SHIFT); 643 if (ret) { 644 NV_ERROR(dev, "Failed TT mm init: %d\n", ret); 645 return ret; 646 } 647 648 return 0; 649 } 650 651 void 652 nouveau_mem_timing_init(struct drm_device *dev) 653 { 654 /* cards < NVC0 only */ 655 struct drm_nouveau_private *dev_priv = dev->dev_private; 656 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 657 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; 658 struct nvbios *bios = &dev_priv->vbios; 659 struct bit_entry P; 660 u8 tUNK_0, tUNK_1, tUNK_2; 661 u8 tRP; /* Byte 3 */ 662 u8 tRAS; /* Byte 5 */ 663 u8 tRFC; /* Byte 7 */ 664 u8 tRC; /* Byte 9 */ 665 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; 666 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; 667 u8 *mem = NULL, *entry; 668 int i, recordlen, entries; 669 670 if (bios->type == NVBIOS_BIT) { 671 if (bit_table(dev, 'P', &P)) 672 return; 673 674 if (P.version == 1) 675 mem = ROMPTR(bios, P.data[4]); 676 else 677 if (P.version == 2) 678 mem = ROMPTR(bios, P.data[8]); 679 else { 680 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); 681 } 682 } else { 683 NV_DEBUG(dev, "BMP version too old for memory\n"); 684 return; 685 } 686 687 if (!mem) { 688 NV_DEBUG(dev, "memory timing table pointer invalid\n"); 689 return; 690 } 691 692 if (mem[0] != 0x10) { 693 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); 694 return; 695 } 696 697 /* validate record length */ 698 entries = mem[2]; 699 recordlen = mem[3]; 700 if (recordlen < 15) { 701 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]); 702 return; 703 } 704 705 /* parse vbios entries into common format */ 706 memtimings->timing = 707 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 708 if (!memtimings->timing) 709 return; 710 711 entry = mem + mem[1]; 712 for (i = 0; i < entries; i++, entry += recordlen) { 713 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; 714 if (entry[0] == 0) 715 continue; 716 717 tUNK_18 = 1; 718 tUNK_19 = 1; 719 tUNK_20 = 0; 720 tUNK_21 = 0; 721 switch (min(recordlen, 22)) { 722 case 22: 723 tUNK_21 = entry[21]; 724 case 21: 725 tUNK_20 = entry[20]; 726 case 20: 727 tUNK_19 = entry[19]; 728 case 19: 729 tUNK_18 = entry[18]; 730 default: 731 tUNK_0 = entry[0]; 732 tUNK_1 = entry[1]; 733 tUNK_2 = entry[2]; 734 tRP = entry[3]; 735 tRAS = entry[5]; 736 tRFC = entry[7]; 737 tRC = entry[9]; 738 tUNK_10 = entry[10]; 739 tUNK_11 = entry[11]; 740 tUNK_12 = entry[12]; 741 tUNK_13 = entry[13]; 742 tUNK_14 = entry[14]; 743 break; 744 } 745 746 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP); 747 748 /* XXX: I don't trust the -1's and +1's... they must come 749 * from somewhere! */ 750 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | 751 tUNK_18 << 16 | 752 (tUNK_1 + tUNK_19 + 1) << 8 | 753 (tUNK_2 - 1)); 754 755 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 756 if(recordlen > 19) { 757 timing->reg_100228 += (tUNK_19 - 1) << 24; 758 }/* I cannot back-up this else-statement right now 759 else { 760 timing->reg_100228 += tUNK_12 << 24; 761 }*/ 762 763 /* XXX: reg_10022c */ 764 timing->reg_10022c = tUNK_2 - 1; 765 766 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 767 tUNK_13 << 8 | tUNK_13); 768 769 /* XXX: +6? */ 770 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 771 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 772 773 /* XXX; reg_100238, reg_10023c 774 * reg: 0x00?????? 775 * reg_10023c: 776 * 0 for pre-NV50 cards 777 * 0x????0202 for NV50+ cards (empirical evidence) */ 778 if(dev_priv->card_type >= NV_50) { 779 timing->reg_10023c = 0x202; 780 } 781 782 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 783 timing->reg_100220, timing->reg_100224, 784 timing->reg_100228, timing->reg_10022c); 785 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 786 timing->reg_100230, timing->reg_100234, 787 timing->reg_100238, timing->reg_10023c); 788 } 789 790 memtimings->nr_timing = entries; 791 memtimings->supported = true; 792 } 793 794 void 795 nouveau_mem_timing_fini(struct drm_device *dev) 796 { 797 struct drm_nouveau_private *dev_priv = dev->dev_private; 798 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; 799 800 kfree(mem->timing); 801 } 802