1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include "drmP.h" 30 #include "drm.h" 31 #include "radeon_drm.h" 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "r100d.h" 35 #include "rs100d.h" 36 #include "rv200d.h" 37 #include "rv250d.h" 38 39 #include <linux/firmware.h> 40 #include <linux/platform_device.h> 41 42 #include "r100_reg_safe.h" 43 #include "rn50_reg_safe.h" 44 45 /* Firmware Names */ 46 #define FIRMWARE_R100 "radeon/R100_cp.bin" 47 #define FIRMWARE_R200 "radeon/R200_cp.bin" 48 #define FIRMWARE_R300 "radeon/R300_cp.bin" 49 #define FIRMWARE_R420 "radeon/R420_cp.bin" 50 #define FIRMWARE_RS690 "radeon/RS690_cp.bin" 51 #define FIRMWARE_RS600 "radeon/RS600_cp.bin" 52 #define FIRMWARE_R520 "radeon/R520_cp.bin" 53 54 MODULE_FIRMWARE(FIRMWARE_R100); 55 MODULE_FIRMWARE(FIRMWARE_R200); 56 MODULE_FIRMWARE(FIRMWARE_R300); 57 MODULE_FIRMWARE(FIRMWARE_R420); 58 MODULE_FIRMWARE(FIRMWARE_RS690); 59 MODULE_FIRMWARE(FIRMWARE_RS600); 60 MODULE_FIRMWARE(FIRMWARE_R520); 61 62 #include "r100_track.h" 63 64 /* This files gather functions specifics to: 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 66 */ 67 68 /* 69 * PCI GART 70 */ 71 void r100_pci_gart_tlb_flush(struct radeon_device *rdev) 72 { 73 /* TODO: can we do somethings here ? */ 74 /* It seems hw only cache one entry so we should discard this 75 * entry otherwise if first GPU GART read hit this entry it 76 * could end up in wrong address. */ 77 } 78 79 int r100_pci_gart_init(struct radeon_device *rdev) 80 { 81 int r; 82 83 if (rdev->gart.table.ram.ptr) { 84 WARN(1, "R100 PCI GART already initialized.\n"); 85 return 0; 86 } 87 /* Initialize common gart structure */ 88 r = radeon_gart_init(rdev); 89 if (r) 90 return r; 91 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 92 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 93 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 94 return radeon_gart_table_ram_alloc(rdev); 95 } 96 97 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ 98 void r100_enable_bm(struct radeon_device *rdev) 99 { 100 uint32_t tmp; 101 /* Enable bus mastering */ 102 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 103 WREG32(RADEON_BUS_CNTL, tmp); 104 } 105 106 int r100_pci_gart_enable(struct radeon_device *rdev) 107 { 108 uint32_t tmp; 109 110 /* discard memory request outside of configured range */ 111 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 112 WREG32(RADEON_AIC_CNTL, tmp); 113 /* set address range for PCI address translate */ 114 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 115 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 116 WREG32(RADEON_AIC_HI_ADDR, tmp); 117 /* set PCI GART page-table base address */ 118 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 119 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 120 WREG32(RADEON_AIC_CNTL, tmp); 121 r100_pci_gart_tlb_flush(rdev); 122 rdev->gart.ready = true; 123 return 0; 124 } 125 126 void r100_pci_gart_disable(struct radeon_device *rdev) 127 { 128 uint32_t tmp; 129 130 /* discard memory request outside of configured range */ 131 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 132 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); 133 WREG32(RADEON_AIC_LO_ADDR, 0); 134 WREG32(RADEON_AIC_HI_ADDR, 0); 135 } 136 137 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 138 { 139 if (i < 0 || i > rdev->gart.num_gpu_pages) { 140 return -EINVAL; 141 } 142 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); 143 return 0; 144 } 145 146 void r100_pci_gart_fini(struct radeon_device *rdev) 147 { 148 r100_pci_gart_disable(rdev); 149 radeon_gart_table_ram_free(rdev); 150 radeon_gart_fini(rdev); 151 } 152 153 int r100_irq_set(struct radeon_device *rdev) 154 { 155 uint32_t tmp = 0; 156 157 if (rdev->irq.sw_int) { 158 tmp |= RADEON_SW_INT_ENABLE; 159 } 160 if (rdev->irq.crtc_vblank_int[0]) { 161 tmp |= RADEON_CRTC_VBLANK_MASK; 162 } 163 if (rdev->irq.crtc_vblank_int[1]) { 164 tmp |= RADEON_CRTC2_VBLANK_MASK; 165 } 166 WREG32(RADEON_GEN_INT_CNTL, tmp); 167 return 0; 168 } 169 170 void r100_irq_disable(struct radeon_device *rdev) 171 { 172 u32 tmp; 173 174 WREG32(R_000040_GEN_INT_CNTL, 0); 175 /* Wait and acknowledge irq */ 176 mdelay(1); 177 tmp = RREG32(R_000044_GEN_INT_STATUS); 178 WREG32(R_000044_GEN_INT_STATUS, tmp); 179 } 180 181 static inline uint32_t r100_irq_ack(struct radeon_device *rdev) 182 { 183 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 184 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | 185 RADEON_CRTC2_VBLANK_STAT; 186 187 if (irqs) { 188 WREG32(RADEON_GEN_INT_STATUS, irqs); 189 } 190 return irqs & irq_mask; 191 } 192 193 int r100_irq_process(struct radeon_device *rdev) 194 { 195 uint32_t status, msi_rearm; 196 197 status = r100_irq_ack(rdev); 198 if (!status) { 199 return IRQ_NONE; 200 } 201 if (rdev->shutdown) { 202 return IRQ_NONE; 203 } 204 while (status) { 205 /* SW interrupt */ 206 if (status & RADEON_SW_INT_TEST) { 207 radeon_fence_process(rdev); 208 } 209 /* Vertical blank interrupts */ 210 if (status & RADEON_CRTC_VBLANK_STAT) { 211 drm_handle_vblank(rdev->ddev, 0); 212 } 213 if (status & RADEON_CRTC2_VBLANK_STAT) { 214 drm_handle_vblank(rdev->ddev, 1); 215 } 216 status = r100_irq_ack(rdev); 217 } 218 if (rdev->msi_enabled) { 219 switch (rdev->family) { 220 case CHIP_RS400: 221 case CHIP_RS480: 222 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM; 223 WREG32(RADEON_AIC_CNTL, msi_rearm); 224 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); 225 break; 226 default: 227 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 228 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 229 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 230 break; 231 } 232 } 233 return IRQ_HANDLED; 234 } 235 236 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) 237 { 238 if (crtc == 0) 239 return RREG32(RADEON_CRTC_CRNT_FRAME); 240 else 241 return RREG32(RADEON_CRTC2_CRNT_FRAME); 242 } 243 244 void r100_fence_ring_emit(struct radeon_device *rdev, 245 struct radeon_fence *fence) 246 { 247 /* Who ever call radeon_fence_emit should call ring_lock and ask 248 * for enough space (today caller are ib schedule and buffer move) */ 249 /* Wait until IDLE & CLEAN */ 250 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 251 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 252 /* Emit fence sequence & fire IRQ */ 253 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 254 radeon_ring_write(rdev, fence->seq); 255 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 256 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 257 } 258 259 int r100_wb_init(struct radeon_device *rdev) 260 { 261 int r; 262 263 if (rdev->wb.wb_obj == NULL) { 264 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 265 RADEON_GEM_DOMAIN_GTT, 266 &rdev->wb.wb_obj); 267 if (r) { 268 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); 269 return r; 270 } 271 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 272 if (unlikely(r != 0)) 273 return r; 274 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 275 &rdev->wb.gpu_addr); 276 if (r) { 277 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); 278 radeon_bo_unreserve(rdev->wb.wb_obj); 279 return r; 280 } 281 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 282 radeon_bo_unreserve(rdev->wb.wb_obj); 283 if (r) { 284 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); 285 return r; 286 } 287 } 288 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); 289 WREG32(R_00070C_CP_RB_RPTR_ADDR, 290 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); 291 WREG32(R_000770_SCRATCH_UMSK, 0xff); 292 return 0; 293 } 294 295 void r100_wb_disable(struct radeon_device *rdev) 296 { 297 WREG32(R_000770_SCRATCH_UMSK, 0); 298 } 299 300 void r100_wb_fini(struct radeon_device *rdev) 301 { 302 int r; 303 304 r100_wb_disable(rdev); 305 if (rdev->wb.wb_obj) { 306 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 307 if (unlikely(r != 0)) { 308 dev_err(rdev->dev, "(%d) can't finish WB\n", r); 309 return; 310 } 311 radeon_bo_kunmap(rdev->wb.wb_obj); 312 radeon_bo_unpin(rdev->wb.wb_obj); 313 radeon_bo_unreserve(rdev->wb.wb_obj); 314 radeon_bo_unref(&rdev->wb.wb_obj); 315 rdev->wb.wb = NULL; 316 rdev->wb.wb_obj = NULL; 317 } 318 } 319 320 int r100_copy_blit(struct radeon_device *rdev, 321 uint64_t src_offset, 322 uint64_t dst_offset, 323 unsigned num_pages, 324 struct radeon_fence *fence) 325 { 326 uint32_t cur_pages; 327 uint32_t stride_bytes = PAGE_SIZE; 328 uint32_t pitch; 329 uint32_t stride_pixels; 330 unsigned ndw; 331 int num_loops; 332 int r = 0; 333 334 /* radeon limited to 16k stride */ 335 stride_bytes &= 0x3fff; 336 /* radeon pitch is /64 */ 337 pitch = stride_bytes / 64; 338 stride_pixels = stride_bytes / 4; 339 num_loops = DIV_ROUND_UP(num_pages, 8191); 340 341 /* Ask for enough room for blit + flush + fence */ 342 ndw = 64 + (10 * num_loops); 343 r = radeon_ring_lock(rdev, ndw); 344 if (r) { 345 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 346 return -EINVAL; 347 } 348 while (num_pages > 0) { 349 cur_pages = num_pages; 350 if (cur_pages > 8191) { 351 cur_pages = 8191; 352 } 353 num_pages -= cur_pages; 354 355 /* pages are in Y direction - height 356 page width in X direction - width */ 357 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8)); 358 radeon_ring_write(rdev, 359 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 360 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 361 RADEON_GMC_SRC_CLIPPING | 362 RADEON_GMC_DST_CLIPPING | 363 RADEON_GMC_BRUSH_NONE | 364 (RADEON_COLOR_FORMAT_ARGB8888 << 8) | 365 RADEON_GMC_SRC_DATATYPE_COLOR | 366 RADEON_ROP3_S | 367 RADEON_DP_SRC_SOURCE_MEMORY | 368 RADEON_GMC_CLR_CMP_CNTL_DIS | 369 RADEON_GMC_WR_MSK_DIS); 370 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10)); 371 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10)); 372 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 373 radeon_ring_write(rdev, 0); 374 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 375 radeon_ring_write(rdev, num_pages); 376 radeon_ring_write(rdev, num_pages); 377 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 378 } 379 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 380 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL); 381 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 382 radeon_ring_write(rdev, 383 RADEON_WAIT_2D_IDLECLEAN | 384 RADEON_WAIT_HOST_IDLECLEAN | 385 RADEON_WAIT_DMA_GUI_IDLE); 386 if (fence) { 387 r = radeon_fence_emit(rdev, fence); 388 } 389 radeon_ring_unlock_commit(rdev); 390 return r; 391 } 392 393 static int r100_cp_wait_for_idle(struct radeon_device *rdev) 394 { 395 unsigned i; 396 u32 tmp; 397 398 for (i = 0; i < rdev->usec_timeout; i++) { 399 tmp = RREG32(R_000E40_RBBM_STATUS); 400 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { 401 return 0; 402 } 403 udelay(1); 404 } 405 return -1; 406 } 407 408 void r100_ring_start(struct radeon_device *rdev) 409 { 410 int r; 411 412 r = radeon_ring_lock(rdev, 2); 413 if (r) { 414 return; 415 } 416 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 417 radeon_ring_write(rdev, 418 RADEON_ISYNC_ANY2D_IDLE3D | 419 RADEON_ISYNC_ANY3D_IDLE2D | 420 RADEON_ISYNC_WAIT_IDLEGUI | 421 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 422 radeon_ring_unlock_commit(rdev); 423 } 424 425 426 /* Load the microcode for the CP */ 427 static int r100_cp_init_microcode(struct radeon_device *rdev) 428 { 429 struct platform_device *pdev; 430 const char *fw_name = NULL; 431 int err; 432 433 DRM_DEBUG("\n"); 434 435 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 436 err = IS_ERR(pdev); 437 if (err) { 438 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 439 return -EINVAL; 440 } 441 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || 442 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || 443 (rdev->family == CHIP_RS200)) { 444 DRM_INFO("Loading R100 Microcode\n"); 445 fw_name = FIRMWARE_R100; 446 } else if ((rdev->family == CHIP_R200) || 447 (rdev->family == CHIP_RV250) || 448 (rdev->family == CHIP_RV280) || 449 (rdev->family == CHIP_RS300)) { 450 DRM_INFO("Loading R200 Microcode\n"); 451 fw_name = FIRMWARE_R200; 452 } else if ((rdev->family == CHIP_R300) || 453 (rdev->family == CHIP_R350) || 454 (rdev->family == CHIP_RV350) || 455 (rdev->family == CHIP_RV380) || 456 (rdev->family == CHIP_RS400) || 457 (rdev->family == CHIP_RS480)) { 458 DRM_INFO("Loading R300 Microcode\n"); 459 fw_name = FIRMWARE_R300; 460 } else if ((rdev->family == CHIP_R420) || 461 (rdev->family == CHIP_R423) || 462 (rdev->family == CHIP_RV410)) { 463 DRM_INFO("Loading R400 Microcode\n"); 464 fw_name = FIRMWARE_R420; 465 } else if ((rdev->family == CHIP_RS690) || 466 (rdev->family == CHIP_RS740)) { 467 DRM_INFO("Loading RS690/RS740 Microcode\n"); 468 fw_name = FIRMWARE_RS690; 469 } else if (rdev->family == CHIP_RS600) { 470 DRM_INFO("Loading RS600 Microcode\n"); 471 fw_name = FIRMWARE_RS600; 472 } else if ((rdev->family == CHIP_RV515) || 473 (rdev->family == CHIP_R520) || 474 (rdev->family == CHIP_RV530) || 475 (rdev->family == CHIP_R580) || 476 (rdev->family == CHIP_RV560) || 477 (rdev->family == CHIP_RV570)) { 478 DRM_INFO("Loading R500 Microcode\n"); 479 fw_name = FIRMWARE_R520; 480 } 481 482 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 483 platform_device_unregister(pdev); 484 if (err) { 485 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 486 fw_name); 487 } else if (rdev->me_fw->size % 8) { 488 printk(KERN_ERR 489 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 490 rdev->me_fw->size, fw_name); 491 err = -EINVAL; 492 release_firmware(rdev->me_fw); 493 rdev->me_fw = NULL; 494 } 495 return err; 496 } 497 498 static void r100_cp_load_microcode(struct radeon_device *rdev) 499 { 500 const __be32 *fw_data; 501 int i, size; 502 503 if (r100_gui_wait_for_idle(rdev)) { 504 printk(KERN_WARNING "Failed to wait GUI idle while " 505 "programming pipes. Bad things might happen.\n"); 506 } 507 508 if (rdev->me_fw) { 509 size = rdev->me_fw->size / 4; 510 fw_data = (const __be32 *)&rdev->me_fw->data[0]; 511 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 512 for (i = 0; i < size; i += 2) { 513 WREG32(RADEON_CP_ME_RAM_DATAH, 514 be32_to_cpup(&fw_data[i])); 515 WREG32(RADEON_CP_ME_RAM_DATAL, 516 be32_to_cpup(&fw_data[i + 1])); 517 } 518 } 519 } 520 521 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 522 { 523 unsigned rb_bufsz; 524 unsigned rb_blksz; 525 unsigned max_fetch; 526 unsigned pre_write_timer; 527 unsigned pre_write_limit; 528 unsigned indirect2_start; 529 unsigned indirect1_start; 530 uint32_t tmp; 531 int r; 532 533 if (r100_debugfs_cp_init(rdev)) { 534 DRM_ERROR("Failed to register debugfs file for CP !\n"); 535 } 536 /* Reset CP */ 537 tmp = RREG32(RADEON_CP_CSQ_STAT); 538 if ((tmp & (1 << 31))) { 539 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); 540 WREG32(RADEON_CP_CSQ_MODE, 0); 541 WREG32(RADEON_CP_CSQ_CNTL, 0); 542 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); 543 tmp = RREG32(RADEON_RBBM_SOFT_RESET); 544 mdelay(2); 545 WREG32(RADEON_RBBM_SOFT_RESET, 0); 546 tmp = RREG32(RADEON_RBBM_SOFT_RESET); 547 mdelay(2); 548 tmp = RREG32(RADEON_CP_CSQ_STAT); 549 if ((tmp & (1 << 31))) { 550 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); 551 } 552 } else { 553 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); 554 } 555 556 if (!rdev->me_fw) { 557 r = r100_cp_init_microcode(rdev); 558 if (r) { 559 DRM_ERROR("Failed to load firmware!\n"); 560 return r; 561 } 562 } 563 564 /* Align ring size */ 565 rb_bufsz = drm_order(ring_size / 8); 566 ring_size = (1 << (rb_bufsz + 1)) * 4; 567 r100_cp_load_microcode(rdev); 568 r = radeon_ring_init(rdev, ring_size); 569 if (r) { 570 return r; 571 } 572 /* Each time the cp read 1024 bytes (16 dword/quadword) update 573 * the rptr copy in system ram */ 574 rb_blksz = 9; 575 /* cp will read 128bytes at a time (4 dwords) */ 576 max_fetch = 1; 577 rdev->cp.align_mask = 16 - 1; 578 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 579 pre_write_timer = 64; 580 /* Force CP_RB_WPTR write if written more than one time before the 581 * delay expire 582 */ 583 pre_write_limit = 0; 584 /* Setup the cp cache like this (cache size is 96 dwords) : 585 * RING 0 to 15 586 * INDIRECT1 16 to 79 587 * INDIRECT2 80 to 95 588 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 589 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) 590 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) 591 * Idea being that most of the gpu cmd will be through indirect1 buffer 592 * so it gets the bigger cache. 593 */ 594 indirect2_start = 80; 595 indirect1_start = 16; 596 /* cp setup */ 597 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 598 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 599 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 600 REG_SET(RADEON_MAX_FETCH, max_fetch) | 601 RADEON_RB_NO_UPDATE); 602 #ifdef __BIG_ENDIAN 603 tmp |= RADEON_BUF_SWAP_32BIT; 604 #endif 605 WREG32(RADEON_CP_RB_CNTL, tmp); 606 607 /* Set ring address */ 608 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); 609 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); 610 /* Force read & write ptr to 0 */ 611 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 612 WREG32(RADEON_CP_RB_RPTR_WR, 0); 613 WREG32(RADEON_CP_RB_WPTR, 0); 614 WREG32(RADEON_CP_RB_CNTL, tmp); 615 udelay(10); 616 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 617 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 618 /* Set cp mode to bus mastering & enable cp*/ 619 WREG32(RADEON_CP_CSQ_MODE, 620 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 621 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 622 WREG32(0x718, 0); 623 WREG32(0x744, 0x00004D4D); 624 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 625 radeon_ring_start(rdev); 626 r = radeon_ring_test(rdev); 627 if (r) { 628 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 629 return r; 630 } 631 rdev->cp.ready = true; 632 return 0; 633 } 634 635 void r100_cp_fini(struct radeon_device *rdev) 636 { 637 if (r100_cp_wait_for_idle(rdev)) { 638 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); 639 } 640 /* Disable ring */ 641 r100_cp_disable(rdev); 642 radeon_ring_fini(rdev); 643 DRM_INFO("radeon: cp finalized\n"); 644 } 645 646 void r100_cp_disable(struct radeon_device *rdev) 647 { 648 /* Disable ring */ 649 rdev->cp.ready = false; 650 WREG32(RADEON_CP_CSQ_MODE, 0); 651 WREG32(RADEON_CP_CSQ_CNTL, 0); 652 if (r100_gui_wait_for_idle(rdev)) { 653 printk(KERN_WARNING "Failed to wait GUI idle while " 654 "programming pipes. Bad things might happen.\n"); 655 } 656 } 657 658 int r100_cp_reset(struct radeon_device *rdev) 659 { 660 uint32_t tmp; 661 bool reinit_cp; 662 int i; 663 664 reinit_cp = rdev->cp.ready; 665 rdev->cp.ready = false; 666 WREG32(RADEON_CP_CSQ_MODE, 0); 667 WREG32(RADEON_CP_CSQ_CNTL, 0); 668 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); 669 (void)RREG32(RADEON_RBBM_SOFT_RESET); 670 udelay(200); 671 WREG32(RADEON_RBBM_SOFT_RESET, 0); 672 /* Wait to prevent race in RBBM_STATUS */ 673 mdelay(1); 674 for (i = 0; i < rdev->usec_timeout; i++) { 675 tmp = RREG32(RADEON_RBBM_STATUS); 676 if (!(tmp & (1 << 16))) { 677 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", 678 tmp); 679 if (reinit_cp) { 680 return r100_cp_init(rdev, rdev->cp.ring_size); 681 } 682 return 0; 683 } 684 DRM_UDELAY(1); 685 } 686 tmp = RREG32(RADEON_RBBM_STATUS); 687 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); 688 return -1; 689 } 690 691 void r100_cp_commit(struct radeon_device *rdev) 692 { 693 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 694 (void)RREG32(RADEON_CP_RB_WPTR); 695 } 696 697 698 /* 699 * CS functions 700 */ 701 int r100_cs_parse_packet0(struct radeon_cs_parser *p, 702 struct radeon_cs_packet *pkt, 703 const unsigned *auth, unsigned n, 704 radeon_packet0_check_t check) 705 { 706 unsigned reg; 707 unsigned i, j, m; 708 unsigned idx; 709 int r; 710 711 idx = pkt->idx + 1; 712 reg = pkt->reg; 713 /* Check that register fall into register range 714 * determined by the number of entry (n) in the 715 * safe register bitmap. 716 */ 717 if (pkt->one_reg_wr) { 718 if ((reg >> 7) > n) { 719 return -EINVAL; 720 } 721 } else { 722 if (((reg + (pkt->count << 2)) >> 7) > n) { 723 return -EINVAL; 724 } 725 } 726 for (i = 0; i <= pkt->count; i++, idx++) { 727 j = (reg >> 7); 728 m = 1 << ((reg >> 2) & 31); 729 if (auth[j] & m) { 730 r = check(p, pkt, idx, reg); 731 if (r) { 732 return r; 733 } 734 } 735 if (pkt->one_reg_wr) { 736 if (!(auth[j] & m)) { 737 break; 738 } 739 } else { 740 reg += 4; 741 } 742 } 743 return 0; 744 } 745 746 void r100_cs_dump_packet(struct radeon_cs_parser *p, 747 struct radeon_cs_packet *pkt) 748 { 749 volatile uint32_t *ib; 750 unsigned i; 751 unsigned idx; 752 753 ib = p->ib->ptr; 754 idx = pkt->idx; 755 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 756 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 757 } 758 } 759 760 /** 761 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet 762 * @parser: parser structure holding parsing context. 763 * @pkt: where to store packet informations 764 * 765 * Assume that chunk_ib_index is properly set. Will return -EINVAL 766 * if packet is bigger than remaining ib size. or if packets is unknown. 767 **/ 768 int r100_cs_packet_parse(struct radeon_cs_parser *p, 769 struct radeon_cs_packet *pkt, 770 unsigned idx) 771 { 772 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 773 uint32_t header; 774 775 if (idx >= ib_chunk->length_dw) { 776 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 777 idx, ib_chunk->length_dw); 778 return -EINVAL; 779 } 780 header = radeon_get_ib_value(p, idx); 781 pkt->idx = idx; 782 pkt->type = CP_PACKET_GET_TYPE(header); 783 pkt->count = CP_PACKET_GET_COUNT(header); 784 switch (pkt->type) { 785 case PACKET_TYPE0: 786 pkt->reg = CP_PACKET0_GET_REG(header); 787 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header); 788 break; 789 case PACKET_TYPE3: 790 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 791 break; 792 case PACKET_TYPE2: 793 pkt->count = -1; 794 break; 795 default: 796 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 797 return -EINVAL; 798 } 799 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 800 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 801 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 802 return -EINVAL; 803 } 804 return 0; 805 } 806 807 /** 808 * r100_cs_packet_next_vline() - parse userspace VLINE packet 809 * @parser: parser structure holding parsing context. 810 * 811 * Userspace sends a special sequence for VLINE waits. 812 * PACKET0 - VLINE_START_END + value 813 * PACKET0 - WAIT_UNTIL +_value 814 * RELOC (P3) - crtc_id in reloc. 815 * 816 * This function parses this and relocates the VLINE START END 817 * and WAIT UNTIL packets to the correct crtc. 818 * It also detects a switched off crtc and nulls out the 819 * wait in that case. 820 */ 821 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 822 { 823 struct drm_mode_object *obj; 824 struct drm_crtc *crtc; 825 struct radeon_crtc *radeon_crtc; 826 struct radeon_cs_packet p3reloc, waitreloc; 827 int crtc_id; 828 int r; 829 uint32_t header, h_idx, reg; 830 volatile uint32_t *ib; 831 832 ib = p->ib->ptr; 833 834 /* parse the wait until */ 835 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 836 if (r) 837 return r; 838 839 /* check its a wait until and only 1 count */ 840 if (waitreloc.reg != RADEON_WAIT_UNTIL || 841 waitreloc.count != 0) { 842 DRM_ERROR("vline wait had illegal wait until segment\n"); 843 r = -EINVAL; 844 return r; 845 } 846 847 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 848 DRM_ERROR("vline wait had illegal wait until\n"); 849 r = -EINVAL; 850 return r; 851 } 852 853 /* jump over the NOP */ 854 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 855 if (r) 856 return r; 857 858 h_idx = p->idx - 2; 859 p->idx += waitreloc.count + 2; 860 p->idx += p3reloc.count + 2; 861 862 header = radeon_get_ib_value(p, h_idx); 863 crtc_id = radeon_get_ib_value(p, h_idx + 5); 864 reg = CP_PACKET0_GET_REG(header); 865 mutex_lock(&p->rdev->ddev->mode_config.mutex); 866 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 867 if (!obj) { 868 DRM_ERROR("cannot find crtc %d\n", crtc_id); 869 r = -EINVAL; 870 goto out; 871 } 872 crtc = obj_to_crtc(obj); 873 radeon_crtc = to_radeon_crtc(crtc); 874 crtc_id = radeon_crtc->crtc_id; 875 876 if (!crtc->enabled) { 877 /* if the CRTC isn't enabled - we need to nop out the wait until */ 878 ib[h_idx + 2] = PACKET2(0); 879 ib[h_idx + 3] = PACKET2(0); 880 } else if (crtc_id == 1) { 881 switch (reg) { 882 case AVIVO_D1MODE_VLINE_START_END: 883 header &= ~R300_CP_PACKET0_REG_MASK; 884 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 885 break; 886 case RADEON_CRTC_GUI_TRIG_VLINE: 887 header &= ~R300_CP_PACKET0_REG_MASK; 888 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 889 break; 890 default: 891 DRM_ERROR("unknown crtc reloc\n"); 892 r = -EINVAL; 893 goto out; 894 } 895 ib[h_idx] = header; 896 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 897 } 898 out: 899 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 900 return r; 901 } 902 903 /** 904 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 905 * @parser: parser structure holding parsing context. 906 * @data: pointer to relocation data 907 * @offset_start: starting offset 908 * @offset_mask: offset mask (to align start offset on) 909 * @reloc: reloc informations 910 * 911 * Check next packet is relocation packet3, do bo validation and compute 912 * GPU offset using the provided start. 913 **/ 914 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 915 struct radeon_cs_reloc **cs_reloc) 916 { 917 struct radeon_cs_chunk *relocs_chunk; 918 struct radeon_cs_packet p3reloc; 919 unsigned idx; 920 int r; 921 922 if (p->chunk_relocs_idx == -1) { 923 DRM_ERROR("No relocation chunk !\n"); 924 return -EINVAL; 925 } 926 *cs_reloc = NULL; 927 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 928 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 929 if (r) { 930 return r; 931 } 932 p->idx += p3reloc.count + 2; 933 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 934 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 935 p3reloc.idx); 936 r100_cs_dump_packet(p, &p3reloc); 937 return -EINVAL; 938 } 939 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 940 if (idx >= relocs_chunk->length_dw) { 941 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 942 idx, relocs_chunk->length_dw); 943 r100_cs_dump_packet(p, &p3reloc); 944 return -EINVAL; 945 } 946 /* FIXME: we assume reloc size is 4 dwords */ 947 *cs_reloc = p->relocs_ptr[(idx / 4)]; 948 return 0; 949 } 950 951 static int r100_get_vtx_size(uint32_t vtx_fmt) 952 { 953 int vtx_size; 954 vtx_size = 2; 955 /* ordered according to bits in spec */ 956 if (vtx_fmt & RADEON_SE_VTX_FMT_W0) 957 vtx_size++; 958 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) 959 vtx_size += 3; 960 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) 961 vtx_size++; 962 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) 963 vtx_size++; 964 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) 965 vtx_size += 3; 966 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) 967 vtx_size++; 968 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) 969 vtx_size++; 970 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) 971 vtx_size += 2; 972 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) 973 vtx_size += 2; 974 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) 975 vtx_size++; 976 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) 977 vtx_size += 2; 978 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) 979 vtx_size++; 980 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) 981 vtx_size += 2; 982 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) 983 vtx_size++; 984 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) 985 vtx_size++; 986 /* blend weight */ 987 if (vtx_fmt & (0x7 << 15)) 988 vtx_size += (vtx_fmt >> 15) & 0x7; 989 if (vtx_fmt & RADEON_SE_VTX_FMT_N0) 990 vtx_size += 3; 991 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) 992 vtx_size += 2; 993 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) 994 vtx_size++; 995 if (vtx_fmt & RADEON_SE_VTX_FMT_W1) 996 vtx_size++; 997 if (vtx_fmt & RADEON_SE_VTX_FMT_N1) 998 vtx_size++; 999 if (vtx_fmt & RADEON_SE_VTX_FMT_Z) 1000 vtx_size++; 1001 return vtx_size; 1002 } 1003 1004 static int r100_packet0_check(struct radeon_cs_parser *p, 1005 struct radeon_cs_packet *pkt, 1006 unsigned idx, unsigned reg) 1007 { 1008 struct radeon_cs_reloc *reloc; 1009 struct r100_cs_track *track; 1010 volatile uint32_t *ib; 1011 uint32_t tmp; 1012 int r; 1013 int i, face; 1014 u32 tile_flags = 0; 1015 u32 idx_value; 1016 1017 ib = p->ib->ptr; 1018 track = (struct r100_cs_track *)p->track; 1019 1020 idx_value = radeon_get_ib_value(p, idx); 1021 1022 switch (reg) { 1023 case RADEON_CRTC_GUI_TRIG_VLINE: 1024 r = r100_cs_packet_parse_vline(p); 1025 if (r) { 1026 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1027 idx, reg); 1028 r100_cs_dump_packet(p, pkt); 1029 return r; 1030 } 1031 break; 1032 /* FIXME: only allow PACKET3 blit? easier to check for out of 1033 * range access */ 1034 case RADEON_DST_PITCH_OFFSET: 1035 case RADEON_SRC_PITCH_OFFSET: 1036 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 1037 if (r) 1038 return r; 1039 break; 1040 case RADEON_RB3D_DEPTHOFFSET: 1041 r = r100_cs_packet_next_reloc(p, &reloc); 1042 if (r) { 1043 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1044 idx, reg); 1045 r100_cs_dump_packet(p, pkt); 1046 return r; 1047 } 1048 track->zb.robj = reloc->robj; 1049 track->zb.offset = idx_value; 1050 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1051 break; 1052 case RADEON_RB3D_COLOROFFSET: 1053 r = r100_cs_packet_next_reloc(p, &reloc); 1054 if (r) { 1055 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1056 idx, reg); 1057 r100_cs_dump_packet(p, pkt); 1058 return r; 1059 } 1060 track->cb[0].robj = reloc->robj; 1061 track->cb[0].offset = idx_value; 1062 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1063 break; 1064 case RADEON_PP_TXOFFSET_0: 1065 case RADEON_PP_TXOFFSET_1: 1066 case RADEON_PP_TXOFFSET_2: 1067 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1068 r = r100_cs_packet_next_reloc(p, &reloc); 1069 if (r) { 1070 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1071 idx, reg); 1072 r100_cs_dump_packet(p, pkt); 1073 return r; 1074 } 1075 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1076 track->textures[i].robj = reloc->robj; 1077 break; 1078 case RADEON_PP_CUBIC_OFFSET_T0_0: 1079 case RADEON_PP_CUBIC_OFFSET_T0_1: 1080 case RADEON_PP_CUBIC_OFFSET_T0_2: 1081 case RADEON_PP_CUBIC_OFFSET_T0_3: 1082 case RADEON_PP_CUBIC_OFFSET_T0_4: 1083 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1084 r = r100_cs_packet_next_reloc(p, &reloc); 1085 if (r) { 1086 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1087 idx, reg); 1088 r100_cs_dump_packet(p, pkt); 1089 return r; 1090 } 1091 track->textures[0].cube_info[i].offset = idx_value; 1092 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1093 track->textures[0].cube_info[i].robj = reloc->robj; 1094 break; 1095 case RADEON_PP_CUBIC_OFFSET_T1_0: 1096 case RADEON_PP_CUBIC_OFFSET_T1_1: 1097 case RADEON_PP_CUBIC_OFFSET_T1_2: 1098 case RADEON_PP_CUBIC_OFFSET_T1_3: 1099 case RADEON_PP_CUBIC_OFFSET_T1_4: 1100 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1101 r = r100_cs_packet_next_reloc(p, &reloc); 1102 if (r) { 1103 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1104 idx, reg); 1105 r100_cs_dump_packet(p, pkt); 1106 return r; 1107 } 1108 track->textures[1].cube_info[i].offset = idx_value; 1109 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1110 track->textures[1].cube_info[i].robj = reloc->robj; 1111 break; 1112 case RADEON_PP_CUBIC_OFFSET_T2_0: 1113 case RADEON_PP_CUBIC_OFFSET_T2_1: 1114 case RADEON_PP_CUBIC_OFFSET_T2_2: 1115 case RADEON_PP_CUBIC_OFFSET_T2_3: 1116 case RADEON_PP_CUBIC_OFFSET_T2_4: 1117 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1118 r = r100_cs_packet_next_reloc(p, &reloc); 1119 if (r) { 1120 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1121 idx, reg); 1122 r100_cs_dump_packet(p, pkt); 1123 return r; 1124 } 1125 track->textures[2].cube_info[i].offset = idx_value; 1126 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1127 track->textures[2].cube_info[i].robj = reloc->robj; 1128 break; 1129 case RADEON_RE_WIDTH_HEIGHT: 1130 track->maxy = ((idx_value >> 16) & 0x7FF); 1131 break; 1132 case RADEON_RB3D_COLORPITCH: 1133 r = r100_cs_packet_next_reloc(p, &reloc); 1134 if (r) { 1135 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1136 idx, reg); 1137 r100_cs_dump_packet(p, pkt); 1138 return r; 1139 } 1140 1141 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1142 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1143 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1144 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1145 1146 tmp = idx_value & ~(0x7 << 16); 1147 tmp |= tile_flags; 1148 ib[idx] = tmp; 1149 1150 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1151 break; 1152 case RADEON_RB3D_DEPTHPITCH: 1153 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1154 break; 1155 case RADEON_RB3D_CNTL: 1156 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1157 case 7: 1158 case 8: 1159 case 9: 1160 case 11: 1161 case 12: 1162 track->cb[0].cpp = 1; 1163 break; 1164 case 3: 1165 case 4: 1166 case 15: 1167 track->cb[0].cpp = 2; 1168 break; 1169 case 6: 1170 track->cb[0].cpp = 4; 1171 break; 1172 default: 1173 DRM_ERROR("Invalid color buffer format (%d) !\n", 1174 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1175 return -EINVAL; 1176 } 1177 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1178 break; 1179 case RADEON_RB3D_ZSTENCILCNTL: 1180 switch (idx_value & 0xf) { 1181 case 0: 1182 track->zb.cpp = 2; 1183 break; 1184 case 2: 1185 case 3: 1186 case 4: 1187 case 5: 1188 case 9: 1189 case 11: 1190 track->zb.cpp = 4; 1191 break; 1192 default: 1193 break; 1194 } 1195 break; 1196 case RADEON_RB3D_ZPASS_ADDR: 1197 r = r100_cs_packet_next_reloc(p, &reloc); 1198 if (r) { 1199 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1200 idx, reg); 1201 r100_cs_dump_packet(p, pkt); 1202 return r; 1203 } 1204 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1205 break; 1206 case RADEON_PP_CNTL: 1207 { 1208 uint32_t temp = idx_value >> 4; 1209 for (i = 0; i < track->num_texture; i++) 1210 track->textures[i].enabled = !!(temp & (1 << i)); 1211 } 1212 break; 1213 case RADEON_SE_VF_CNTL: 1214 track->vap_vf_cntl = idx_value; 1215 break; 1216 case RADEON_SE_VTX_FMT: 1217 track->vtx_size = r100_get_vtx_size(idx_value); 1218 break; 1219 case RADEON_PP_TEX_SIZE_0: 1220 case RADEON_PP_TEX_SIZE_1: 1221 case RADEON_PP_TEX_SIZE_2: 1222 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1223 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1224 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1225 break; 1226 case RADEON_PP_TEX_PITCH_0: 1227 case RADEON_PP_TEX_PITCH_1: 1228 case RADEON_PP_TEX_PITCH_2: 1229 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1230 track->textures[i].pitch = idx_value + 32; 1231 break; 1232 case RADEON_PP_TXFILTER_0: 1233 case RADEON_PP_TXFILTER_1: 1234 case RADEON_PP_TXFILTER_2: 1235 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1236 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) 1237 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1238 tmp = (idx_value >> 23) & 0x7; 1239 if (tmp == 2 || tmp == 6) 1240 track->textures[i].roundup_w = false; 1241 tmp = (idx_value >> 27) & 0x7; 1242 if (tmp == 2 || tmp == 6) 1243 track->textures[i].roundup_h = false; 1244 break; 1245 case RADEON_PP_TXFORMAT_0: 1246 case RADEON_PP_TXFORMAT_1: 1247 case RADEON_PP_TXFORMAT_2: 1248 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1249 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1250 track->textures[i].use_pitch = 1; 1251 } else { 1252 track->textures[i].use_pitch = 0; 1253 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1254 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1255 } 1256 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1257 track->textures[i].tex_coord_type = 2; 1258 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { 1259 case RADEON_TXFORMAT_I8: 1260 case RADEON_TXFORMAT_RGB332: 1261 case RADEON_TXFORMAT_Y8: 1262 track->textures[i].cpp = 1; 1263 break; 1264 case RADEON_TXFORMAT_AI88: 1265 case RADEON_TXFORMAT_ARGB1555: 1266 case RADEON_TXFORMAT_RGB565: 1267 case RADEON_TXFORMAT_ARGB4444: 1268 case RADEON_TXFORMAT_VYUY422: 1269 case RADEON_TXFORMAT_YVYU422: 1270 case RADEON_TXFORMAT_DXT1: 1271 case RADEON_TXFORMAT_SHADOW16: 1272 case RADEON_TXFORMAT_LDUDV655: 1273 case RADEON_TXFORMAT_DUDV88: 1274 track->textures[i].cpp = 2; 1275 break; 1276 case RADEON_TXFORMAT_ARGB8888: 1277 case RADEON_TXFORMAT_RGBA8888: 1278 case RADEON_TXFORMAT_DXT23: 1279 case RADEON_TXFORMAT_DXT45: 1280 case RADEON_TXFORMAT_SHADOW32: 1281 case RADEON_TXFORMAT_LDUDUV8888: 1282 track->textures[i].cpp = 4; 1283 break; 1284 } 1285 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1286 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1287 break; 1288 case RADEON_PP_CUBIC_FACES_0: 1289 case RADEON_PP_CUBIC_FACES_1: 1290 case RADEON_PP_CUBIC_FACES_2: 1291 tmp = idx_value; 1292 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1293 for (face = 0; face < 4; face++) { 1294 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1295 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1296 } 1297 break; 1298 default: 1299 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1300 reg, idx); 1301 return -EINVAL; 1302 } 1303 return 0; 1304 } 1305 1306 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1307 struct radeon_cs_packet *pkt, 1308 struct radeon_bo *robj) 1309 { 1310 unsigned idx; 1311 u32 value; 1312 idx = pkt->idx + 1; 1313 value = radeon_get_ib_value(p, idx + 2); 1314 if ((value + 1) > radeon_bo_size(robj)) { 1315 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1316 "(need %u have %lu) !\n", 1317 value + 1, 1318 radeon_bo_size(robj)); 1319 return -EINVAL; 1320 } 1321 return 0; 1322 } 1323 1324 static int r100_packet3_check(struct radeon_cs_parser *p, 1325 struct radeon_cs_packet *pkt) 1326 { 1327 struct radeon_cs_reloc *reloc; 1328 struct r100_cs_track *track; 1329 unsigned idx; 1330 volatile uint32_t *ib; 1331 int r; 1332 1333 ib = p->ib->ptr; 1334 idx = pkt->idx + 1; 1335 track = (struct r100_cs_track *)p->track; 1336 switch (pkt->opcode) { 1337 case PACKET3_3D_LOAD_VBPNTR: 1338 r = r100_packet3_load_vbpntr(p, pkt, idx); 1339 if (r) 1340 return r; 1341 break; 1342 case PACKET3_INDX_BUFFER: 1343 r = r100_cs_packet_next_reloc(p, &reloc); 1344 if (r) { 1345 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1346 r100_cs_dump_packet(p, pkt); 1347 return r; 1348 } 1349 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1350 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1351 if (r) { 1352 return r; 1353 } 1354 break; 1355 case 0x23: 1356 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1357 r = r100_cs_packet_next_reloc(p, &reloc); 1358 if (r) { 1359 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1360 r100_cs_dump_packet(p, pkt); 1361 return r; 1362 } 1363 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 1364 track->num_arrays = 1; 1365 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); 1366 1367 track->arrays[0].robj = reloc->robj; 1368 track->arrays[0].esize = track->vtx_size; 1369 1370 track->max_indx = radeon_get_ib_value(p, idx+1); 1371 1372 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); 1373 track->immd_dwords = pkt->count - 1; 1374 r = r100_cs_track_check(p->rdev, track); 1375 if (r) 1376 return r; 1377 break; 1378 case PACKET3_3D_DRAW_IMMD: 1379 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1380 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1381 return -EINVAL; 1382 } 1383 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1384 track->immd_dwords = pkt->count - 1; 1385 r = r100_cs_track_check(p->rdev, track); 1386 if (r) 1387 return r; 1388 break; 1389 /* triggers drawing using in-packet vertex data */ 1390 case PACKET3_3D_DRAW_IMMD_2: 1391 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1392 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1393 return -EINVAL; 1394 } 1395 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1396 track->immd_dwords = pkt->count; 1397 r = r100_cs_track_check(p->rdev, track); 1398 if (r) 1399 return r; 1400 break; 1401 /* triggers drawing using in-packet vertex data */ 1402 case PACKET3_3D_DRAW_VBUF_2: 1403 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1404 r = r100_cs_track_check(p->rdev, track); 1405 if (r) 1406 return r; 1407 break; 1408 /* triggers drawing of vertex buffers setup elsewhere */ 1409 case PACKET3_3D_DRAW_INDX_2: 1410 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1411 r = r100_cs_track_check(p->rdev, track); 1412 if (r) 1413 return r; 1414 break; 1415 /* triggers drawing using indices to vertex buffer */ 1416 case PACKET3_3D_DRAW_VBUF: 1417 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1418 r = r100_cs_track_check(p->rdev, track); 1419 if (r) 1420 return r; 1421 break; 1422 /* triggers drawing of vertex buffers setup elsewhere */ 1423 case PACKET3_3D_DRAW_INDX: 1424 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1425 r = r100_cs_track_check(p->rdev, track); 1426 if (r) 1427 return r; 1428 break; 1429 /* triggers drawing using indices to vertex buffer */ 1430 case PACKET3_NOP: 1431 break; 1432 default: 1433 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1434 return -EINVAL; 1435 } 1436 return 0; 1437 } 1438 1439 int r100_cs_parse(struct radeon_cs_parser *p) 1440 { 1441 struct radeon_cs_packet pkt; 1442 struct r100_cs_track *track; 1443 int r; 1444 1445 track = kzalloc(sizeof(*track), GFP_KERNEL); 1446 r100_cs_track_clear(p->rdev, track); 1447 p->track = track; 1448 do { 1449 r = r100_cs_packet_parse(p, &pkt, p->idx); 1450 if (r) { 1451 return r; 1452 } 1453 p->idx += pkt.count + 2; 1454 switch (pkt.type) { 1455 case PACKET_TYPE0: 1456 if (p->rdev->family >= CHIP_R200) 1457 r = r100_cs_parse_packet0(p, &pkt, 1458 p->rdev->config.r100.reg_safe_bm, 1459 p->rdev->config.r100.reg_safe_bm_size, 1460 &r200_packet0_check); 1461 else 1462 r = r100_cs_parse_packet0(p, &pkt, 1463 p->rdev->config.r100.reg_safe_bm, 1464 p->rdev->config.r100.reg_safe_bm_size, 1465 &r100_packet0_check); 1466 break; 1467 case PACKET_TYPE2: 1468 break; 1469 case PACKET_TYPE3: 1470 r = r100_packet3_check(p, &pkt); 1471 break; 1472 default: 1473 DRM_ERROR("Unknown packet type %d !\n", 1474 pkt.type); 1475 return -EINVAL; 1476 } 1477 if (r) { 1478 return r; 1479 } 1480 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1481 return 0; 1482 } 1483 1484 1485 /* 1486 * Global GPU functions 1487 */ 1488 void r100_errata(struct radeon_device *rdev) 1489 { 1490 rdev->pll_errata = 0; 1491 1492 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { 1493 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; 1494 } 1495 1496 if (rdev->family == CHIP_RV100 || 1497 rdev->family == CHIP_RS100 || 1498 rdev->family == CHIP_RS200) { 1499 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; 1500 } 1501 } 1502 1503 /* Wait for vertical sync on primary CRTC */ 1504 void r100_gpu_wait_for_vsync(struct radeon_device *rdev) 1505 { 1506 uint32_t crtc_gen_cntl, tmp; 1507 int i; 1508 1509 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); 1510 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || 1511 !(crtc_gen_cntl & RADEON_CRTC_EN)) { 1512 return; 1513 } 1514 /* Clear the CRTC_VBLANK_SAVE bit */ 1515 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); 1516 for (i = 0; i < rdev->usec_timeout; i++) { 1517 tmp = RREG32(RADEON_CRTC_STATUS); 1518 if (tmp & RADEON_CRTC_VBLANK_SAVE) { 1519 return; 1520 } 1521 DRM_UDELAY(1); 1522 } 1523 } 1524 1525 /* Wait for vertical sync on secondary CRTC */ 1526 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) 1527 { 1528 uint32_t crtc2_gen_cntl, tmp; 1529 int i; 1530 1531 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1532 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || 1533 !(crtc2_gen_cntl & RADEON_CRTC2_EN)) 1534 return; 1535 1536 /* Clear the CRTC_VBLANK_SAVE bit */ 1537 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); 1538 for (i = 0; i < rdev->usec_timeout; i++) { 1539 tmp = RREG32(RADEON_CRTC2_STATUS); 1540 if (tmp & RADEON_CRTC2_VBLANK_SAVE) { 1541 return; 1542 } 1543 DRM_UDELAY(1); 1544 } 1545 } 1546 1547 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) 1548 { 1549 unsigned i; 1550 uint32_t tmp; 1551 1552 for (i = 0; i < rdev->usec_timeout; i++) { 1553 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; 1554 if (tmp >= n) { 1555 return 0; 1556 } 1557 DRM_UDELAY(1); 1558 } 1559 return -1; 1560 } 1561 1562 int r100_gui_wait_for_idle(struct radeon_device *rdev) 1563 { 1564 unsigned i; 1565 uint32_t tmp; 1566 1567 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { 1568 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" 1569 " Bad things might happen.\n"); 1570 } 1571 for (i = 0; i < rdev->usec_timeout; i++) { 1572 tmp = RREG32(RADEON_RBBM_STATUS); 1573 if (!(tmp & (1 << 31))) { 1574 return 0; 1575 } 1576 DRM_UDELAY(1); 1577 } 1578 return -1; 1579 } 1580 1581 int r100_mc_wait_for_idle(struct radeon_device *rdev) 1582 { 1583 unsigned i; 1584 uint32_t tmp; 1585 1586 for (i = 0; i < rdev->usec_timeout; i++) { 1587 /* read MC_STATUS */ 1588 tmp = RREG32(0x0150); 1589 if (tmp & (1 << 2)) { 1590 return 0; 1591 } 1592 DRM_UDELAY(1); 1593 } 1594 return -1; 1595 } 1596 1597 void r100_gpu_init(struct radeon_device *rdev) 1598 { 1599 /* TODO: anythings to do here ? pipes ? */ 1600 r100_hdp_reset(rdev); 1601 } 1602 1603 void r100_hdp_flush(struct radeon_device *rdev) 1604 { 1605 u32 tmp; 1606 tmp = RREG32(RADEON_HOST_PATH_CNTL); 1607 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; 1608 WREG32(RADEON_HOST_PATH_CNTL, tmp); 1609 } 1610 1611 void r100_hdp_reset(struct radeon_device *rdev) 1612 { 1613 uint32_t tmp; 1614 1615 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; 1616 tmp |= (7 << 28); 1617 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); 1618 (void)RREG32(RADEON_HOST_PATH_CNTL); 1619 udelay(200); 1620 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1621 WREG32(RADEON_HOST_PATH_CNTL, tmp); 1622 (void)RREG32(RADEON_HOST_PATH_CNTL); 1623 } 1624 1625 int r100_rb2d_reset(struct radeon_device *rdev) 1626 { 1627 uint32_t tmp; 1628 int i; 1629 1630 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); 1631 (void)RREG32(RADEON_RBBM_SOFT_RESET); 1632 udelay(200); 1633 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1634 /* Wait to prevent race in RBBM_STATUS */ 1635 mdelay(1); 1636 for (i = 0; i < rdev->usec_timeout; i++) { 1637 tmp = RREG32(RADEON_RBBM_STATUS); 1638 if (!(tmp & (1 << 26))) { 1639 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", 1640 tmp); 1641 return 0; 1642 } 1643 DRM_UDELAY(1); 1644 } 1645 tmp = RREG32(RADEON_RBBM_STATUS); 1646 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); 1647 return -1; 1648 } 1649 1650 int r100_gpu_reset(struct radeon_device *rdev) 1651 { 1652 uint32_t status; 1653 1654 /* reset order likely matter */ 1655 status = RREG32(RADEON_RBBM_STATUS); 1656 /* reset HDP */ 1657 r100_hdp_reset(rdev); 1658 /* reset rb2d */ 1659 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { 1660 r100_rb2d_reset(rdev); 1661 } 1662 /* TODO: reset 3D engine */ 1663 /* reset CP */ 1664 status = RREG32(RADEON_RBBM_STATUS); 1665 if (status & (1 << 16)) { 1666 r100_cp_reset(rdev); 1667 } 1668 /* Check if GPU is idle */ 1669 status = RREG32(RADEON_RBBM_STATUS); 1670 if (status & (1 << 31)) { 1671 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1672 return -1; 1673 } 1674 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 1675 return 0; 1676 } 1677 1678 1679 /* 1680 * VRAM info 1681 */ 1682 static void r100_vram_get_type(struct radeon_device *rdev) 1683 { 1684 uint32_t tmp; 1685 1686 rdev->mc.vram_is_ddr = false; 1687 if (rdev->flags & RADEON_IS_IGP) 1688 rdev->mc.vram_is_ddr = true; 1689 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) 1690 rdev->mc.vram_is_ddr = true; 1691 if ((rdev->family == CHIP_RV100) || 1692 (rdev->family == CHIP_RS100) || 1693 (rdev->family == CHIP_RS200)) { 1694 tmp = RREG32(RADEON_MEM_CNTL); 1695 if (tmp & RV100_HALF_MODE) { 1696 rdev->mc.vram_width = 32; 1697 } else { 1698 rdev->mc.vram_width = 64; 1699 } 1700 if (rdev->flags & RADEON_SINGLE_CRTC) { 1701 rdev->mc.vram_width /= 4; 1702 rdev->mc.vram_is_ddr = true; 1703 } 1704 } else if (rdev->family <= CHIP_RV280) { 1705 tmp = RREG32(RADEON_MEM_CNTL); 1706 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { 1707 rdev->mc.vram_width = 128; 1708 } else { 1709 rdev->mc.vram_width = 64; 1710 } 1711 } else { 1712 /* newer IGPs */ 1713 rdev->mc.vram_width = 128; 1714 } 1715 } 1716 1717 static u32 r100_get_accessible_vram(struct radeon_device *rdev) 1718 { 1719 u32 aper_size; 1720 u8 byte; 1721 1722 aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1723 1724 /* Set HDP_APER_CNTL only on cards that are known not to be broken, 1725 * that is has the 2nd generation multifunction PCI interface 1726 */ 1727 if (rdev->family == CHIP_RV280 || 1728 rdev->family >= CHIP_RV350) { 1729 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, 1730 ~RADEON_HDP_APER_CNTL); 1731 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); 1732 return aper_size * 2; 1733 } 1734 1735 /* Older cards have all sorts of funny issues to deal with. First 1736 * check if it's a multifunction card by reading the PCI config 1737 * header type... Limit those to one aperture size 1738 */ 1739 pci_read_config_byte(rdev->pdev, 0xe, &byte); 1740 if (byte & 0x80) { 1741 DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); 1742 DRM_INFO("Limiting VRAM to one aperture\n"); 1743 return aper_size; 1744 } 1745 1746 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS 1747 * have set it up. We don't write this as it's broken on some ASICs but 1748 * we expect the BIOS to have done the right thing (might be too optimistic...) 1749 */ 1750 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) 1751 return aper_size * 2; 1752 return aper_size; 1753 } 1754 1755 void r100_vram_init_sizes(struct radeon_device *rdev) 1756 { 1757 u64 config_aper_size; 1758 u32 accessible; 1759 1760 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1761 1762 if (rdev->flags & RADEON_IS_IGP) { 1763 uint32_t tom; 1764 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1765 tom = RREG32(RADEON_NB_TOM); 1766 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1767 /* for IGPs we need to keep VRAM where it was put by the BIOS */ 1768 rdev->mc.vram_location = (tom & 0xffff) << 16; 1769 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1770 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1771 } else { 1772 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 1773 /* Some production boards of m6 will report 0 1774 * if it's 8 MB 1775 */ 1776 if (rdev->mc.real_vram_size == 0) { 1777 rdev->mc.real_vram_size = 8192 * 1024; 1778 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1779 } 1780 /* let driver place VRAM */ 1781 rdev->mc.vram_location = 0xFFFFFFFFUL; 1782 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 1783 * Novell bug 204882 + along with lots of ubuntu ones */ 1784 if (config_aper_size > rdev->mc.real_vram_size) 1785 rdev->mc.mc_vram_size = config_aper_size; 1786 else 1787 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1788 } 1789 1790 /* work out accessible VRAM */ 1791 accessible = r100_get_accessible_vram(rdev); 1792 1793 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1794 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1795 1796 if (accessible > rdev->mc.aper_size) 1797 accessible = rdev->mc.aper_size; 1798 1799 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 1800 rdev->mc.mc_vram_size = rdev->mc.aper_size; 1801 1802 if (rdev->mc.real_vram_size > rdev->mc.aper_size) 1803 rdev->mc.real_vram_size = rdev->mc.aper_size; 1804 } 1805 1806 void r100_vga_set_state(struct radeon_device *rdev, bool state) 1807 { 1808 uint32_t temp; 1809 1810 temp = RREG32(RADEON_CONFIG_CNTL); 1811 if (state == false) { 1812 temp &= ~(1<<8); 1813 temp |= (1<<9); 1814 } else { 1815 temp &= ~(1<<9); 1816 } 1817 WREG32(RADEON_CONFIG_CNTL, temp); 1818 } 1819 1820 void r100_vram_info(struct radeon_device *rdev) 1821 { 1822 r100_vram_get_type(rdev); 1823 1824 r100_vram_init_sizes(rdev); 1825 } 1826 1827 1828 /* 1829 * Indirect registers accessor 1830 */ 1831 void r100_pll_errata_after_index(struct radeon_device *rdev) 1832 { 1833 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { 1834 return; 1835 } 1836 (void)RREG32(RADEON_CLOCK_CNTL_DATA); 1837 (void)RREG32(RADEON_CRTC_GEN_CNTL); 1838 } 1839 1840 static void r100_pll_errata_after_data(struct radeon_device *rdev) 1841 { 1842 /* This workarounds is necessary on RV100, RS100 and RS200 chips 1843 * or the chip could hang on a subsequent access 1844 */ 1845 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 1846 udelay(5000); 1847 } 1848 1849 /* This function is required to workaround a hardware bug in some (all?) 1850 * revisions of the R300. This workaround should be called after every 1851 * CLOCK_CNTL_INDEX register access. If not, register reads afterward 1852 * may not be correct. 1853 */ 1854 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { 1855 uint32_t save, tmp; 1856 1857 save = RREG32(RADEON_CLOCK_CNTL_INDEX); 1858 tmp = save & ~(0x3f | RADEON_PLL_WR_EN); 1859 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); 1860 tmp = RREG32(RADEON_CLOCK_CNTL_DATA); 1861 WREG32(RADEON_CLOCK_CNTL_INDEX, save); 1862 } 1863 } 1864 1865 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 1866 { 1867 uint32_t data; 1868 1869 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 1870 r100_pll_errata_after_index(rdev); 1871 data = RREG32(RADEON_CLOCK_CNTL_DATA); 1872 r100_pll_errata_after_data(rdev); 1873 return data; 1874 } 1875 1876 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1877 { 1878 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 1879 r100_pll_errata_after_index(rdev); 1880 WREG32(RADEON_CLOCK_CNTL_DATA, v); 1881 r100_pll_errata_after_data(rdev); 1882 } 1883 1884 void r100_set_safe_registers(struct radeon_device *rdev) 1885 { 1886 if (ASIC_IS_RN50(rdev)) { 1887 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 1888 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); 1889 } else if (rdev->family < CHIP_R200) { 1890 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 1891 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 1892 } else { 1893 r200_set_safe_registers(rdev); 1894 } 1895 } 1896 1897 /* 1898 * Debugfs info 1899 */ 1900 #if defined(CONFIG_DEBUG_FS) 1901 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) 1902 { 1903 struct drm_info_node *node = (struct drm_info_node *) m->private; 1904 struct drm_device *dev = node->minor->dev; 1905 struct radeon_device *rdev = dev->dev_private; 1906 uint32_t reg, value; 1907 unsigned i; 1908 1909 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); 1910 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); 1911 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 1912 for (i = 0; i < 64; i++) { 1913 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); 1914 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; 1915 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); 1916 value = RREG32(RADEON_RBBM_CMDFIFO_DATA); 1917 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); 1918 } 1919 return 0; 1920 } 1921 1922 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) 1923 { 1924 struct drm_info_node *node = (struct drm_info_node *) m->private; 1925 struct drm_device *dev = node->minor->dev; 1926 struct radeon_device *rdev = dev->dev_private; 1927 uint32_t rdp, wdp; 1928 unsigned count, i, j; 1929 1930 radeon_ring_free_size(rdev); 1931 rdp = RREG32(RADEON_CP_RB_RPTR); 1932 wdp = RREG32(RADEON_CP_RB_WPTR); 1933 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; 1934 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 1935 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 1936 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 1937 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 1938 seq_printf(m, "%u dwords in ring\n", count); 1939 for (j = 0; j <= count; j++) { 1940 i = (rdp + j) & rdev->cp.ptr_mask; 1941 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 1942 } 1943 return 0; 1944 } 1945 1946 1947 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) 1948 { 1949 struct drm_info_node *node = (struct drm_info_node *) m->private; 1950 struct drm_device *dev = node->minor->dev; 1951 struct radeon_device *rdev = dev->dev_private; 1952 uint32_t csq_stat, csq2_stat, tmp; 1953 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; 1954 unsigned i; 1955 1956 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 1957 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); 1958 csq_stat = RREG32(RADEON_CP_CSQ_STAT); 1959 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); 1960 r_rptr = (csq_stat >> 0) & 0x3ff; 1961 r_wptr = (csq_stat >> 10) & 0x3ff; 1962 ib1_rptr = (csq_stat >> 20) & 0x3ff; 1963 ib1_wptr = (csq2_stat >> 0) & 0x3ff; 1964 ib2_rptr = (csq2_stat >> 10) & 0x3ff; 1965 ib2_wptr = (csq2_stat >> 20) & 0x3ff; 1966 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); 1967 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); 1968 seq_printf(m, "Ring rptr %u\n", r_rptr); 1969 seq_printf(m, "Ring wptr %u\n", r_wptr); 1970 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); 1971 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); 1972 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); 1973 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); 1974 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms 1975 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ 1976 seq_printf(m, "Ring fifo:\n"); 1977 for (i = 0; i < 256; i++) { 1978 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 1979 tmp = RREG32(RADEON_CP_CSQ_DATA); 1980 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); 1981 } 1982 seq_printf(m, "Indirect1 fifo:\n"); 1983 for (i = 256; i <= 512; i++) { 1984 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 1985 tmp = RREG32(RADEON_CP_CSQ_DATA); 1986 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); 1987 } 1988 seq_printf(m, "Indirect2 fifo:\n"); 1989 for (i = 640; i < ib1_wptr; i++) { 1990 WREG32(RADEON_CP_CSQ_ADDR, i << 2); 1991 tmp = RREG32(RADEON_CP_CSQ_DATA); 1992 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); 1993 } 1994 return 0; 1995 } 1996 1997 static int r100_debugfs_mc_info(struct seq_file *m, void *data) 1998 { 1999 struct drm_info_node *node = (struct drm_info_node *) m->private; 2000 struct drm_device *dev = node->minor->dev; 2001 struct radeon_device *rdev = dev->dev_private; 2002 uint32_t tmp; 2003 2004 tmp = RREG32(RADEON_CONFIG_MEMSIZE); 2005 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); 2006 tmp = RREG32(RADEON_MC_FB_LOCATION); 2007 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); 2008 tmp = RREG32(RADEON_BUS_CNTL); 2009 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 2010 tmp = RREG32(RADEON_MC_AGP_LOCATION); 2011 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 2012 tmp = RREG32(RADEON_AGP_BASE); 2013 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 2014 tmp = RREG32(RADEON_HOST_PATH_CNTL); 2015 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 2016 tmp = RREG32(0x01D0); 2017 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); 2018 tmp = RREG32(RADEON_AIC_LO_ADDR); 2019 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); 2020 tmp = RREG32(RADEON_AIC_HI_ADDR); 2021 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); 2022 tmp = RREG32(0x01E4); 2023 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); 2024 return 0; 2025 } 2026 2027 static struct drm_info_list r100_debugfs_rbbm_list[] = { 2028 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, 2029 }; 2030 2031 static struct drm_info_list r100_debugfs_cp_list[] = { 2032 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, 2033 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, 2034 }; 2035 2036 static struct drm_info_list r100_debugfs_mc_info_list[] = { 2037 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, 2038 }; 2039 #endif 2040 2041 int r100_debugfs_rbbm_init(struct radeon_device *rdev) 2042 { 2043 #if defined(CONFIG_DEBUG_FS) 2044 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); 2045 #else 2046 return 0; 2047 #endif 2048 } 2049 2050 int r100_debugfs_cp_init(struct radeon_device *rdev) 2051 { 2052 #if defined(CONFIG_DEBUG_FS) 2053 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); 2054 #else 2055 return 0; 2056 #endif 2057 } 2058 2059 int r100_debugfs_mc_info_init(struct radeon_device *rdev) 2060 { 2061 #if defined(CONFIG_DEBUG_FS) 2062 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); 2063 #else 2064 return 0; 2065 #endif 2066 } 2067 2068 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 2069 uint32_t tiling_flags, uint32_t pitch, 2070 uint32_t offset, uint32_t obj_size) 2071 { 2072 int surf_index = reg * 16; 2073 int flags = 0; 2074 2075 /* r100/r200 divide by 16 */ 2076 if (rdev->family < CHIP_R300) 2077 flags = pitch / 16; 2078 else 2079 flags = pitch / 8; 2080 2081 if (rdev->family <= CHIP_RS200) { 2082 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2083 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2084 flags |= RADEON_SURF_TILE_COLOR_BOTH; 2085 if (tiling_flags & RADEON_TILING_MACRO) 2086 flags |= RADEON_SURF_TILE_COLOR_MACRO; 2087 } else if (rdev->family <= CHIP_RV280) { 2088 if (tiling_flags & (RADEON_TILING_MACRO)) 2089 flags |= R200_SURF_TILE_COLOR_MACRO; 2090 if (tiling_flags & RADEON_TILING_MICRO) 2091 flags |= R200_SURF_TILE_COLOR_MICRO; 2092 } else { 2093 if (tiling_flags & RADEON_TILING_MACRO) 2094 flags |= R300_SURF_TILE_MACRO; 2095 if (tiling_flags & RADEON_TILING_MICRO) 2096 flags |= R300_SURF_TILE_MICRO; 2097 } 2098 2099 if (tiling_flags & RADEON_TILING_SWAP_16BIT) 2100 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; 2101 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 2102 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 2103 2104 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 2105 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 2106 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 2107 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); 2108 return 0; 2109 } 2110 2111 void r100_clear_surface_reg(struct radeon_device *rdev, int reg) 2112 { 2113 int surf_index = reg * 16; 2114 WREG32(RADEON_SURFACE0_INFO + surf_index, 0); 2115 } 2116 2117 void r100_bandwidth_update(struct radeon_device *rdev) 2118 { 2119 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; 2120 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; 2121 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 2122 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 2123 fixed20_12 memtcas_ff[8] = { 2124 fixed_init(1), 2125 fixed_init(2), 2126 fixed_init(3), 2127 fixed_init(0), 2128 fixed_init_half(1), 2129 fixed_init_half(2), 2130 fixed_init(0), 2131 }; 2132 fixed20_12 memtcas_rs480_ff[8] = { 2133 fixed_init(0), 2134 fixed_init(1), 2135 fixed_init(2), 2136 fixed_init(3), 2137 fixed_init(0), 2138 fixed_init_half(1), 2139 fixed_init_half(2), 2140 fixed_init_half(3), 2141 }; 2142 fixed20_12 memtcas2_ff[8] = { 2143 fixed_init(0), 2144 fixed_init(1), 2145 fixed_init(2), 2146 fixed_init(3), 2147 fixed_init(4), 2148 fixed_init(5), 2149 fixed_init(6), 2150 fixed_init(7), 2151 }; 2152 fixed20_12 memtrbs[8] = { 2153 fixed_init(1), 2154 fixed_init_half(1), 2155 fixed_init(2), 2156 fixed_init_half(2), 2157 fixed_init(3), 2158 fixed_init_half(3), 2159 fixed_init(4), 2160 fixed_init_half(4) 2161 }; 2162 fixed20_12 memtrbs_r4xx[8] = { 2163 fixed_init(4), 2164 fixed_init(5), 2165 fixed_init(6), 2166 fixed_init(7), 2167 fixed_init(8), 2168 fixed_init(9), 2169 fixed_init(10), 2170 fixed_init(11) 2171 }; 2172 fixed20_12 min_mem_eff; 2173 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 2174 fixed20_12 cur_latency_mclk, cur_latency_sclk; 2175 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, 2176 disp_drain_rate2, read_return_rate; 2177 fixed20_12 time_disp1_drop_priority; 2178 int c; 2179 int cur_size = 16; /* in octawords */ 2180 int critical_point = 0, critical_point2; 2181 /* uint32_t read_return_rate, time_disp1_drop_priority; */ 2182 int stop_req, max_stop_req; 2183 struct drm_display_mode *mode1 = NULL; 2184 struct drm_display_mode *mode2 = NULL; 2185 uint32_t pixel_bytes1 = 0; 2186 uint32_t pixel_bytes2 = 0; 2187 2188 if (rdev->mode_info.crtcs[0]->base.enabled) { 2189 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2190 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2191 } 2192 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 2193 if (rdev->mode_info.crtcs[1]->base.enabled) { 2194 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 2195 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 2196 } 2197 } 2198 2199 min_mem_eff.full = rfixed_const_8(0); 2200 /* get modes */ 2201 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 2202 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 2203 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); 2204 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); 2205 /* check crtc enables */ 2206 if (mode2) 2207 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); 2208 if (mode1) 2209 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); 2210 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); 2211 } 2212 2213 /* 2214 * determine is there is enough bw for current mode 2215 */ 2216 mclk_ff.full = rfixed_const(rdev->clock.default_mclk); 2217 temp_ff.full = rfixed_const(100); 2218 mclk_ff.full = rfixed_div(mclk_ff, temp_ff); 2219 sclk_ff.full = rfixed_const(rdev->clock.default_sclk); 2220 sclk_ff.full = rfixed_div(sclk_ff, temp_ff); 2221 2222 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2223 temp_ff.full = rfixed_const(temp); 2224 mem_bw.full = rfixed_mul(mclk_ff, temp_ff); 2225 2226 pix_clk.full = 0; 2227 pix_clk2.full = 0; 2228 peak_disp_bw.full = 0; 2229 if (mode1) { 2230 temp_ff.full = rfixed_const(1000); 2231 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ 2232 pix_clk.full = rfixed_div(pix_clk, temp_ff); 2233 temp_ff.full = rfixed_const(pixel_bytes1); 2234 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); 2235 } 2236 if (mode2) { 2237 temp_ff.full = rfixed_const(1000); 2238 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ 2239 pix_clk2.full = rfixed_div(pix_clk2, temp_ff); 2240 temp_ff.full = rfixed_const(pixel_bytes2); 2241 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); 2242 } 2243 2244 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); 2245 if (peak_disp_bw.full >= mem_bw.full) { 2246 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 2247 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 2248 } 2249 2250 /* Get values from the EXT_MEM_CNTL register...converting its contents. */ 2251 temp = RREG32(RADEON_MEM_TIMING_CNTL); 2252 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ 2253 mem_trcd = ((temp >> 2) & 0x3) + 1; 2254 mem_trp = ((temp & 0x3)) + 1; 2255 mem_tras = ((temp & 0x70) >> 4) + 1; 2256 } else if (rdev->family == CHIP_R300 || 2257 rdev->family == CHIP_R350) { /* r300, r350 */ 2258 mem_trcd = (temp & 0x7) + 1; 2259 mem_trp = ((temp >> 8) & 0x7) + 1; 2260 mem_tras = ((temp >> 11) & 0xf) + 4; 2261 } else if (rdev->family == CHIP_RV350 || 2262 rdev->family <= CHIP_RV380) { 2263 /* rv3x0 */ 2264 mem_trcd = (temp & 0x7) + 3; 2265 mem_trp = ((temp >> 8) & 0x7) + 3; 2266 mem_tras = ((temp >> 11) & 0xf) + 6; 2267 } else if (rdev->family == CHIP_R420 || 2268 rdev->family == CHIP_R423 || 2269 rdev->family == CHIP_RV410) { 2270 /* r4xx */ 2271 mem_trcd = (temp & 0xf) + 3; 2272 if (mem_trcd > 15) 2273 mem_trcd = 15; 2274 mem_trp = ((temp >> 8) & 0xf) + 3; 2275 if (mem_trp > 15) 2276 mem_trp = 15; 2277 mem_tras = ((temp >> 12) & 0x1f) + 6; 2278 if (mem_tras > 31) 2279 mem_tras = 31; 2280 } else { /* RV200, R200 */ 2281 mem_trcd = (temp & 0x7) + 1; 2282 mem_trp = ((temp >> 8) & 0x7) + 1; 2283 mem_tras = ((temp >> 12) & 0xf) + 4; 2284 } 2285 /* convert to FF */ 2286 trcd_ff.full = rfixed_const(mem_trcd); 2287 trp_ff.full = rfixed_const(mem_trp); 2288 tras_ff.full = rfixed_const(mem_tras); 2289 2290 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 2291 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 2292 data = (temp & (7 << 20)) >> 20; 2293 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { 2294 if (rdev->family == CHIP_RS480) /* don't think rs400 */ 2295 tcas_ff = memtcas_rs480_ff[data]; 2296 else 2297 tcas_ff = memtcas_ff[data]; 2298 } else 2299 tcas_ff = memtcas2_ff[data]; 2300 2301 if (rdev->family == CHIP_RS400 || 2302 rdev->family == CHIP_RS480) { 2303 /* extra cas latency stored in bits 23-25 0-4 clocks */ 2304 data = (temp >> 23) & 0x7; 2305 if (data < 5) 2306 tcas_ff.full += rfixed_const(data); 2307 } 2308 2309 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2310 /* on the R300, Tcas is included in Trbs. 2311 */ 2312 temp = RREG32(RADEON_MEM_CNTL); 2313 data = (R300_MEM_NUM_CHANNELS_MASK & temp); 2314 if (data == 1) { 2315 if (R300_MEM_USE_CD_CH_ONLY & temp) { 2316 temp = RREG32(R300_MC_IND_INDEX); 2317 temp &= ~R300_MC_IND_ADDR_MASK; 2318 temp |= R300_MC_READ_CNTL_CD_mcind; 2319 WREG32(R300_MC_IND_INDEX, temp); 2320 temp = RREG32(R300_MC_IND_DATA); 2321 data = (R300_MEM_RBS_POSITION_C_MASK & temp); 2322 } else { 2323 temp = RREG32(R300_MC_READ_CNTL_AB); 2324 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 2325 } 2326 } else { 2327 temp = RREG32(R300_MC_READ_CNTL_AB); 2328 data = (R300_MEM_RBS_POSITION_A_MASK & temp); 2329 } 2330 if (rdev->family == CHIP_RV410 || 2331 rdev->family == CHIP_R420 || 2332 rdev->family == CHIP_R423) 2333 trbs_ff = memtrbs_r4xx[data]; 2334 else 2335 trbs_ff = memtrbs[data]; 2336 tcas_ff.full += trbs_ff.full; 2337 } 2338 2339 sclk_eff_ff.full = sclk_ff.full; 2340 2341 if (rdev->flags & RADEON_IS_AGP) { 2342 fixed20_12 agpmode_ff; 2343 agpmode_ff.full = rfixed_const(radeon_agpmode); 2344 temp_ff.full = rfixed_const_666(16); 2345 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); 2346 } 2347 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 2348 2349 if (ASIC_IS_R300(rdev)) { 2350 sclk_delay_ff.full = rfixed_const(250); 2351 } else { 2352 if ((rdev->family == CHIP_RV100) || 2353 rdev->flags & RADEON_IS_IGP) { 2354 if (rdev->mc.vram_is_ddr) 2355 sclk_delay_ff.full = rfixed_const(41); 2356 else 2357 sclk_delay_ff.full = rfixed_const(33); 2358 } else { 2359 if (rdev->mc.vram_width == 128) 2360 sclk_delay_ff.full = rfixed_const(57); 2361 else 2362 sclk_delay_ff.full = rfixed_const(41); 2363 } 2364 } 2365 2366 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); 2367 2368 if (rdev->mc.vram_is_ddr) { 2369 if (rdev->mc.vram_width == 32) { 2370 k1.full = rfixed_const(40); 2371 c = 3; 2372 } else { 2373 k1.full = rfixed_const(20); 2374 c = 1; 2375 } 2376 } else { 2377 k1.full = rfixed_const(40); 2378 c = 3; 2379 } 2380 2381 temp_ff.full = rfixed_const(2); 2382 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); 2383 temp_ff.full = rfixed_const(c); 2384 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); 2385 temp_ff.full = rfixed_const(4); 2386 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); 2387 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); 2388 mc_latency_mclk.full += k1.full; 2389 2390 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); 2391 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); 2392 2393 /* 2394 HW cursor time assuming worst case of full size colour cursor. 2395 */ 2396 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 2397 temp_ff.full += trcd_ff.full; 2398 if (temp_ff.full < tras_ff.full) 2399 temp_ff.full = tras_ff.full; 2400 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); 2401 2402 temp_ff.full = rfixed_const(cur_size); 2403 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); 2404 /* 2405 Find the total latency for the display data. 2406 */ 2407 disp_latency_overhead.full = rfixed_const(8); 2408 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); 2409 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 2410 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 2411 2412 if (mc_latency_mclk.full > mc_latency_sclk.full) 2413 disp_latency.full = mc_latency_mclk.full; 2414 else 2415 disp_latency.full = mc_latency_sclk.full; 2416 2417 /* setup Max GRPH_STOP_REQ default value */ 2418 if (ASIC_IS_RV100(rdev)) 2419 max_stop_req = 0x5c; 2420 else 2421 max_stop_req = 0x7c; 2422 2423 if (mode1) { 2424 /* CRTC1 2425 Set GRPH_BUFFER_CNTL register using h/w defined optimal values. 2426 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] 2427 */ 2428 stop_req = mode1->hdisplay * pixel_bytes1 / 16; 2429 2430 if (stop_req > max_stop_req) 2431 stop_req = max_stop_req; 2432 2433 /* 2434 Find the drain rate of the display buffer. 2435 */ 2436 temp_ff.full = rfixed_const((16/pixel_bytes1)); 2437 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); 2438 2439 /* 2440 Find the critical point of the display buffer. 2441 */ 2442 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); 2443 crit_point_ff.full += rfixed_const_half(0); 2444 2445 critical_point = rfixed_trunc(crit_point_ff); 2446 2447 if (rdev->disp_priority == 2) { 2448 critical_point = 0; 2449 } 2450 2451 /* 2452 The critical point should never be above max_stop_req-4. Setting 2453 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. 2454 */ 2455 if (max_stop_req - critical_point < 4) 2456 critical_point = 0; 2457 2458 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { 2459 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ 2460 critical_point = 0x10; 2461 } 2462 2463 temp = RREG32(RADEON_GRPH_BUFFER_CNTL); 2464 temp &= ~(RADEON_GRPH_STOP_REQ_MASK); 2465 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 2466 temp &= ~(RADEON_GRPH_START_REQ_MASK); 2467 if ((rdev->family == CHIP_R350) && 2468 (stop_req > 0x15)) { 2469 stop_req -= 0x10; 2470 } 2471 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 2472 temp |= RADEON_GRPH_BUFFER_SIZE; 2473 temp &= ~(RADEON_GRPH_CRITICAL_CNTL | 2474 RADEON_GRPH_CRITICAL_AT_SOF | 2475 RADEON_GRPH_STOP_CNTL); 2476 /* 2477 Write the result into the register. 2478 */ 2479 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 2480 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 2481 2482 #if 0 2483 if ((rdev->family == CHIP_RS400) || 2484 (rdev->family == CHIP_RS480)) { 2485 /* attempt to program RS400 disp regs correctly ??? */ 2486 temp = RREG32(RS400_DISP1_REG_CNTL); 2487 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | 2488 RS400_DISP1_STOP_REQ_LEVEL_MASK); 2489 WREG32(RS400_DISP1_REQ_CNTL1, (temp | 2490 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 2491 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 2492 temp = RREG32(RS400_DMIF_MEM_CNTL1); 2493 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | 2494 RS400_DISP1_CRITICAL_POINT_STOP_MASK); 2495 WREG32(RS400_DMIF_MEM_CNTL1, (temp | 2496 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | 2497 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); 2498 } 2499 #endif 2500 2501 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", 2502 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ 2503 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); 2504 } 2505 2506 if (mode2) { 2507 u32 grph2_cntl; 2508 stop_req = mode2->hdisplay * pixel_bytes2 / 16; 2509 2510 if (stop_req > max_stop_req) 2511 stop_req = max_stop_req; 2512 2513 /* 2514 Find the drain rate of the display buffer. 2515 */ 2516 temp_ff.full = rfixed_const((16/pixel_bytes2)); 2517 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); 2518 2519 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 2520 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 2521 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); 2522 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); 2523 if ((rdev->family == CHIP_R350) && 2524 (stop_req > 0x15)) { 2525 stop_req -= 0x10; 2526 } 2527 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); 2528 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; 2529 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | 2530 RADEON_GRPH_CRITICAL_AT_SOF | 2531 RADEON_GRPH_STOP_CNTL); 2532 2533 if ((rdev->family == CHIP_RS100) || 2534 (rdev->family == CHIP_RS200)) 2535 critical_point2 = 0; 2536 else { 2537 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 2538 temp_ff.full = rfixed_const(temp); 2539 temp_ff.full = rfixed_mul(mclk_ff, temp_ff); 2540 if (sclk_ff.full < temp_ff.full) 2541 temp_ff.full = sclk_ff.full; 2542 2543 read_return_rate.full = temp_ff.full; 2544 2545 if (mode1) { 2546 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 2547 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); 2548 } else { 2549 time_disp1_drop_priority.full = 0; 2550 } 2551 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 2552 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); 2553 crit_point_ff.full += rfixed_const_half(0); 2554 2555 critical_point2 = rfixed_trunc(crit_point_ff); 2556 2557 if (rdev->disp_priority == 2) { 2558 critical_point2 = 0; 2559 } 2560 2561 if (max_stop_req - critical_point2 < 4) 2562 critical_point2 = 0; 2563 2564 } 2565 2566 if (critical_point2 == 0 && rdev->family == CHIP_R300) { 2567 /* some R300 cards have problem with this set to 0 */ 2568 critical_point2 = 0x10; 2569 } 2570 2571 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | 2572 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); 2573 2574 if ((rdev->family == CHIP_RS400) || 2575 (rdev->family == CHIP_RS480)) { 2576 #if 0 2577 /* attempt to program RS400 disp2 regs correctly ??? */ 2578 temp = RREG32(RS400_DISP2_REQ_CNTL1); 2579 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | 2580 RS400_DISP2_STOP_REQ_LEVEL_MASK); 2581 WREG32(RS400_DISP2_REQ_CNTL1, (temp | 2582 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | 2583 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); 2584 temp = RREG32(RS400_DISP2_REQ_CNTL2); 2585 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | 2586 RS400_DISP2_CRITICAL_POINT_STOP_MASK); 2587 WREG32(RS400_DISP2_REQ_CNTL2, (temp | 2588 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | 2589 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); 2590 #endif 2591 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); 2592 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); 2593 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); 2594 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); 2595 } 2596 2597 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", 2598 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 2599 } 2600 } 2601 2602 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2603 { 2604 DRM_ERROR("pitch %d\n", t->pitch); 2605 DRM_ERROR("use_pitch %d\n", t->use_pitch); 2606 DRM_ERROR("width %d\n", t->width); 2607 DRM_ERROR("width_11 %d\n", t->width_11); 2608 DRM_ERROR("height %d\n", t->height); 2609 DRM_ERROR("height_11 %d\n", t->height_11); 2610 DRM_ERROR("num levels %d\n", t->num_levels); 2611 DRM_ERROR("depth %d\n", t->txdepth); 2612 DRM_ERROR("bpp %d\n", t->cpp); 2613 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2614 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2615 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2616 } 2617 2618 static int r100_cs_track_cube(struct radeon_device *rdev, 2619 struct r100_cs_track *track, unsigned idx) 2620 { 2621 unsigned face, w, h; 2622 struct radeon_bo *cube_robj; 2623 unsigned long size; 2624 2625 for (face = 0; face < 5; face++) { 2626 cube_robj = track->textures[idx].cube_info[face].robj; 2627 w = track->textures[idx].cube_info[face].width; 2628 h = track->textures[idx].cube_info[face].height; 2629 2630 size = w * h; 2631 size *= track->textures[idx].cpp; 2632 2633 size += track->textures[idx].cube_info[face].offset; 2634 2635 if (size > radeon_bo_size(cube_robj)) { 2636 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2637 size, radeon_bo_size(cube_robj)); 2638 r100_cs_track_texture_print(&track->textures[idx]); 2639 return -1; 2640 } 2641 } 2642 return 0; 2643 } 2644 2645 static int r100_cs_track_texture_check(struct radeon_device *rdev, 2646 struct r100_cs_track *track) 2647 { 2648 struct radeon_bo *robj; 2649 unsigned long size; 2650 unsigned u, i, w, h; 2651 int ret; 2652 2653 for (u = 0; u < track->num_texture; u++) { 2654 if (!track->textures[u].enabled) 2655 continue; 2656 robj = track->textures[u].robj; 2657 if (robj == NULL) { 2658 DRM_ERROR("No texture bound to unit %u\n", u); 2659 return -EINVAL; 2660 } 2661 size = 0; 2662 for (i = 0; i <= track->textures[u].num_levels; i++) { 2663 if (track->textures[u].use_pitch) { 2664 if (rdev->family < CHIP_R300) 2665 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); 2666 else 2667 w = track->textures[u].pitch / (1 << i); 2668 } else { 2669 w = track->textures[u].width; 2670 if (rdev->family >= CHIP_RV515) 2671 w |= track->textures[u].width_11; 2672 w = w / (1 << i); 2673 if (track->textures[u].roundup_w) 2674 w = roundup_pow_of_two(w); 2675 } 2676 h = track->textures[u].height; 2677 if (rdev->family >= CHIP_RV515) 2678 h |= track->textures[u].height_11; 2679 h = h / (1 << i); 2680 if (track->textures[u].roundup_h) 2681 h = roundup_pow_of_two(h); 2682 size += w * h; 2683 } 2684 size *= track->textures[u].cpp; 2685 switch (track->textures[u].tex_coord_type) { 2686 case 0: 2687 break; 2688 case 1: 2689 size *= (1 << track->textures[u].txdepth); 2690 break; 2691 case 2: 2692 if (track->separate_cube) { 2693 ret = r100_cs_track_cube(rdev, track, u); 2694 if (ret) 2695 return ret; 2696 } else 2697 size *= 6; 2698 break; 2699 default: 2700 DRM_ERROR("Invalid texture coordinate type %u for unit " 2701 "%u\n", track->textures[u].tex_coord_type, u); 2702 return -EINVAL; 2703 } 2704 if (size > radeon_bo_size(robj)) { 2705 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2706 "%lu\n", u, size, radeon_bo_size(robj)); 2707 r100_cs_track_texture_print(&track->textures[u]); 2708 return -EINVAL; 2709 } 2710 } 2711 return 0; 2712 } 2713 2714 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) 2715 { 2716 unsigned i; 2717 unsigned long size; 2718 unsigned prim_walk; 2719 unsigned nverts; 2720 2721 for (i = 0; i < track->num_cb; i++) { 2722 if (track->cb[i].robj == NULL) { 2723 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2724 return -EINVAL; 2725 } 2726 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2727 size += track->cb[i].offset; 2728 if (size > radeon_bo_size(track->cb[i].robj)) { 2729 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2730 "(need %lu have %lu) !\n", i, size, 2731 radeon_bo_size(track->cb[i].robj)); 2732 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2733 i, track->cb[i].pitch, track->cb[i].cpp, 2734 track->cb[i].offset, track->maxy); 2735 return -EINVAL; 2736 } 2737 } 2738 if (track->z_enabled) { 2739 if (track->zb.robj == NULL) { 2740 DRM_ERROR("[drm] No buffer for z buffer !\n"); 2741 return -EINVAL; 2742 } 2743 size = track->zb.pitch * track->zb.cpp * track->maxy; 2744 size += track->zb.offset; 2745 if (size > radeon_bo_size(track->zb.robj)) { 2746 DRM_ERROR("[drm] Buffer too small for z buffer " 2747 "(need %lu have %lu) !\n", size, 2748 radeon_bo_size(track->zb.robj)); 2749 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2750 track->zb.pitch, track->zb.cpp, 2751 track->zb.offset, track->maxy); 2752 return -EINVAL; 2753 } 2754 } 2755 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 2756 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 2757 switch (prim_walk) { 2758 case 1: 2759 for (i = 0; i < track->num_arrays; i++) { 2760 size = track->arrays[i].esize * track->max_indx * 4; 2761 if (track->arrays[i].robj == NULL) { 2762 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2763 "bound\n", prim_walk, i); 2764 return -EINVAL; 2765 } 2766 if (size > radeon_bo_size(track->arrays[i].robj)) { 2767 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2768 "need %lu dwords have %lu dwords\n", 2769 prim_walk, i, size >> 2, 2770 radeon_bo_size(track->arrays[i].robj) 2771 >> 2); 2772 DRM_ERROR("Max indices %u\n", track->max_indx); 2773 return -EINVAL; 2774 } 2775 } 2776 break; 2777 case 2: 2778 for (i = 0; i < track->num_arrays; i++) { 2779 size = track->arrays[i].esize * (nverts - 1) * 4; 2780 if (track->arrays[i].robj == NULL) { 2781 DRM_ERROR("(PW %u) Vertex array %u no buffer " 2782 "bound\n", prim_walk, i); 2783 return -EINVAL; 2784 } 2785 if (size > radeon_bo_size(track->arrays[i].robj)) { 2786 dev_err(rdev->dev, "(PW %u) Vertex array %u " 2787 "need %lu dwords have %lu dwords\n", 2788 prim_walk, i, size >> 2, 2789 radeon_bo_size(track->arrays[i].robj) 2790 >> 2); 2791 return -EINVAL; 2792 } 2793 } 2794 break; 2795 case 3: 2796 size = track->vtx_size * nverts; 2797 if (size != track->immd_dwords) { 2798 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2799 track->immd_dwords, size); 2800 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2801 nverts, track->vtx_size); 2802 return -EINVAL; 2803 } 2804 break; 2805 default: 2806 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2807 prim_walk); 2808 return -EINVAL; 2809 } 2810 return r100_cs_track_texture_check(rdev, track); 2811 } 2812 2813 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 2814 { 2815 unsigned i, face; 2816 2817 if (rdev->family < CHIP_R300) { 2818 track->num_cb = 1; 2819 if (rdev->family <= CHIP_RS200) 2820 track->num_texture = 3; 2821 else 2822 track->num_texture = 6; 2823 track->maxy = 2048; 2824 track->separate_cube = 1; 2825 } else { 2826 track->num_cb = 4; 2827 track->num_texture = 16; 2828 track->maxy = 4096; 2829 track->separate_cube = 0; 2830 } 2831 2832 for (i = 0; i < track->num_cb; i++) { 2833 track->cb[i].robj = NULL; 2834 track->cb[i].pitch = 8192; 2835 track->cb[i].cpp = 16; 2836 track->cb[i].offset = 0; 2837 } 2838 track->z_enabled = true; 2839 track->zb.robj = NULL; 2840 track->zb.pitch = 8192; 2841 track->zb.cpp = 4; 2842 track->zb.offset = 0; 2843 track->vtx_size = 0x7F; 2844 track->immd_dwords = 0xFFFFFFFFUL; 2845 track->num_arrays = 11; 2846 track->max_indx = 0x00FFFFFFUL; 2847 for (i = 0; i < track->num_arrays; i++) { 2848 track->arrays[i].robj = NULL; 2849 track->arrays[i].esize = 0x7F; 2850 } 2851 for (i = 0; i < track->num_texture; i++) { 2852 track->textures[i].pitch = 16536; 2853 track->textures[i].width = 16536; 2854 track->textures[i].height = 16536; 2855 track->textures[i].width_11 = 1 << 11; 2856 track->textures[i].height_11 = 1 << 11; 2857 track->textures[i].num_levels = 12; 2858 if (rdev->family <= CHIP_RS200) { 2859 track->textures[i].tex_coord_type = 0; 2860 track->textures[i].txdepth = 0; 2861 } else { 2862 track->textures[i].txdepth = 16; 2863 track->textures[i].tex_coord_type = 1; 2864 } 2865 track->textures[i].cpp = 64; 2866 track->textures[i].robj = NULL; 2867 /* CS IB emission code makes sure texture unit are disabled */ 2868 track->textures[i].enabled = false; 2869 track->textures[i].roundup_w = true; 2870 track->textures[i].roundup_h = true; 2871 if (track->separate_cube) 2872 for (face = 0; face < 5; face++) { 2873 track->textures[i].cube_info[face].robj = NULL; 2874 track->textures[i].cube_info[face].width = 16536; 2875 track->textures[i].cube_info[face].height = 16536; 2876 track->textures[i].cube_info[face].offset = 0; 2877 } 2878 } 2879 } 2880 2881 int r100_ring_test(struct radeon_device *rdev) 2882 { 2883 uint32_t scratch; 2884 uint32_t tmp = 0; 2885 unsigned i; 2886 int r; 2887 2888 r = radeon_scratch_get(rdev, &scratch); 2889 if (r) { 2890 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2891 return r; 2892 } 2893 WREG32(scratch, 0xCAFEDEAD); 2894 r = radeon_ring_lock(rdev, 2); 2895 if (r) { 2896 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2897 radeon_scratch_free(rdev, scratch); 2898 return r; 2899 } 2900 radeon_ring_write(rdev, PACKET0(scratch, 0)); 2901 radeon_ring_write(rdev, 0xDEADBEEF); 2902 radeon_ring_unlock_commit(rdev); 2903 for (i = 0; i < rdev->usec_timeout; i++) { 2904 tmp = RREG32(scratch); 2905 if (tmp == 0xDEADBEEF) { 2906 break; 2907 } 2908 DRM_UDELAY(1); 2909 } 2910 if (i < rdev->usec_timeout) { 2911 DRM_INFO("ring test succeeded in %d usecs\n", i); 2912 } else { 2913 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", 2914 scratch, tmp); 2915 r = -EINVAL; 2916 } 2917 radeon_scratch_free(rdev, scratch); 2918 return r; 2919 } 2920 2921 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 2922 { 2923 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 2924 radeon_ring_write(rdev, ib->gpu_addr); 2925 radeon_ring_write(rdev, ib->length_dw); 2926 } 2927 2928 int r100_ib_test(struct radeon_device *rdev) 2929 { 2930 struct radeon_ib *ib; 2931 uint32_t scratch; 2932 uint32_t tmp = 0; 2933 unsigned i; 2934 int r; 2935 2936 r = radeon_scratch_get(rdev, &scratch); 2937 if (r) { 2938 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 2939 return r; 2940 } 2941 WREG32(scratch, 0xCAFEDEAD); 2942 r = radeon_ib_get(rdev, &ib); 2943 if (r) { 2944 return r; 2945 } 2946 ib->ptr[0] = PACKET0(scratch, 0); 2947 ib->ptr[1] = 0xDEADBEEF; 2948 ib->ptr[2] = PACKET2(0); 2949 ib->ptr[3] = PACKET2(0); 2950 ib->ptr[4] = PACKET2(0); 2951 ib->ptr[5] = PACKET2(0); 2952 ib->ptr[6] = PACKET2(0); 2953 ib->ptr[7] = PACKET2(0); 2954 ib->length_dw = 8; 2955 r = radeon_ib_schedule(rdev, ib); 2956 if (r) { 2957 radeon_scratch_free(rdev, scratch); 2958 radeon_ib_free(rdev, &ib); 2959 return r; 2960 } 2961 r = radeon_fence_wait(ib->fence, false); 2962 if (r) { 2963 return r; 2964 } 2965 for (i = 0; i < rdev->usec_timeout; i++) { 2966 tmp = RREG32(scratch); 2967 if (tmp == 0xDEADBEEF) { 2968 break; 2969 } 2970 DRM_UDELAY(1); 2971 } 2972 if (i < rdev->usec_timeout) { 2973 DRM_INFO("ib test succeeded in %u usecs\n", i); 2974 } else { 2975 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2976 scratch, tmp); 2977 r = -EINVAL; 2978 } 2979 radeon_scratch_free(rdev, scratch); 2980 radeon_ib_free(rdev, &ib); 2981 return r; 2982 } 2983 2984 void r100_ib_fini(struct radeon_device *rdev) 2985 { 2986 radeon_ib_pool_fini(rdev); 2987 } 2988 2989 int r100_ib_init(struct radeon_device *rdev) 2990 { 2991 int r; 2992 2993 r = radeon_ib_pool_init(rdev); 2994 if (r) { 2995 dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); 2996 r100_ib_fini(rdev); 2997 return r; 2998 } 2999 r = r100_ib_test(rdev); 3000 if (r) { 3001 dev_err(rdev->dev, "failled testing IB (%d).\n", r); 3002 r100_ib_fini(rdev); 3003 return r; 3004 } 3005 return 0; 3006 } 3007 3008 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) 3009 { 3010 /* Shutdown CP we shouldn't need to do that but better be safe than 3011 * sorry 3012 */ 3013 rdev->cp.ready = false; 3014 WREG32(R_000740_CP_CSQ_CNTL, 0); 3015 3016 /* Save few CRTC registers */ 3017 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); 3018 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 3019 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 3020 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 3021 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3022 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); 3023 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); 3024 } 3025 3026 /* Disable VGA aperture access */ 3027 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); 3028 /* Disable cursor, overlay, crtc */ 3029 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 3030 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 3031 S_000054_CRTC_DISPLAY_DIS(1)); 3032 WREG32(R_000050_CRTC_GEN_CNTL, 3033 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | 3034 S_000050_CRTC_DISP_REQ_EN_B(1)); 3035 WREG32(R_000420_OV0_SCALE_CNTL, 3036 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); 3037 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); 3038 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3039 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | 3040 S_000360_CUR2_LOCK(1)); 3041 WREG32(R_0003F8_CRTC2_GEN_CNTL, 3042 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | 3043 S_0003F8_CRTC2_DISPLAY_DIS(1) | 3044 S_0003F8_CRTC2_DISP_REQ_EN_B(1)); 3045 WREG32(R_000360_CUR2_OFFSET, 3046 C_000360_CUR2_LOCK & save->CUR2_OFFSET); 3047 } 3048 } 3049 3050 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3051 { 3052 /* Update base address for crtc */ 3053 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); 3054 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3055 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, 3056 rdev->mc.vram_location); 3057 } 3058 /* Restore CRTC registers */ 3059 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3060 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3061 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3062 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3063 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3064 } 3065 } 3066 3067 void r100_vga_render_disable(struct radeon_device *rdev) 3068 { 3069 u32 tmp; 3070 3071 tmp = RREG8(R_0003C2_GENMO_WT); 3072 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); 3073 } 3074 3075 static void r100_debugfs(struct radeon_device *rdev) 3076 { 3077 int r; 3078 3079 r = r100_debugfs_mc_info_init(rdev); 3080 if (r) 3081 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 3082 } 3083 3084 static void r100_mc_program(struct radeon_device *rdev) 3085 { 3086 struct r100_mc_save save; 3087 3088 /* Stops all mc clients */ 3089 r100_mc_stop(rdev, &save); 3090 if (rdev->flags & RADEON_IS_AGP) { 3091 WREG32(R_00014C_MC_AGP_LOCATION, 3092 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 3093 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 3094 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 3095 if (rdev->family > CHIP_RV200) 3096 WREG32(R_00015C_AGP_BASE_2, 3097 upper_32_bits(rdev->mc.agp_base) & 0xff); 3098 } else { 3099 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 3100 WREG32(R_000170_AGP_BASE, 0); 3101 if (rdev->family > CHIP_RV200) 3102 WREG32(R_00015C_AGP_BASE_2, 0); 3103 } 3104 /* Wait for mc idle */ 3105 if (r100_mc_wait_for_idle(rdev)) 3106 dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); 3107 /* Program MC, should be a 32bits limited address space */ 3108 WREG32(R_000148_MC_FB_LOCATION, 3109 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 3110 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 3111 r100_mc_resume(rdev, &save); 3112 } 3113 3114 void r100_clock_startup(struct radeon_device *rdev) 3115 { 3116 u32 tmp; 3117 3118 if (radeon_dynclks != -1 && radeon_dynclks) 3119 radeon_legacy_set_clock_gating(rdev, 1); 3120 /* We need to force on some of the block */ 3121 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 3122 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 3123 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) 3124 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); 3125 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 3126 } 3127 3128 static int r100_startup(struct radeon_device *rdev) 3129 { 3130 int r; 3131 3132 r100_mc_program(rdev); 3133 /* Resume clock */ 3134 r100_clock_startup(rdev); 3135 /* Initialize GPU configuration (# pipes, ...) */ 3136 r100_gpu_init(rdev); 3137 /* Initialize GART (initialize after TTM so we can allocate 3138 * memory through TTM but finalize after TTM) */ 3139 r100_enable_bm(rdev); 3140 if (rdev->flags & RADEON_IS_PCI) { 3141 r = r100_pci_gart_enable(rdev); 3142 if (r) 3143 return r; 3144 } 3145 /* Enable IRQ */ 3146 r100_irq_set(rdev); 3147 /* 1M ring buffer */ 3148 r = r100_cp_init(rdev, 1024 * 1024); 3149 if (r) { 3150 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 3151 return r; 3152 } 3153 r = r100_wb_init(rdev); 3154 if (r) 3155 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 3156 r = r100_ib_init(rdev); 3157 if (r) { 3158 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 3159 return r; 3160 } 3161 return 0; 3162 } 3163 3164 int r100_resume(struct radeon_device *rdev) 3165 { 3166 /* Make sur GART are not working */ 3167 if (rdev->flags & RADEON_IS_PCI) 3168 r100_pci_gart_disable(rdev); 3169 /* Resume clock before doing reset */ 3170 r100_clock_startup(rdev); 3171 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3172 if (radeon_gpu_reset(rdev)) { 3173 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3174 RREG32(R_000E40_RBBM_STATUS), 3175 RREG32(R_0007C0_CP_STAT)); 3176 } 3177 /* post */ 3178 radeon_combios_asic_init(rdev->ddev); 3179 /* Resume clock after posting */ 3180 r100_clock_startup(rdev); 3181 return r100_startup(rdev); 3182 } 3183 3184 int r100_suspend(struct radeon_device *rdev) 3185 { 3186 r100_cp_disable(rdev); 3187 r100_wb_disable(rdev); 3188 r100_irq_disable(rdev); 3189 if (rdev->flags & RADEON_IS_PCI) 3190 r100_pci_gart_disable(rdev); 3191 return 0; 3192 } 3193 3194 void r100_fini(struct radeon_device *rdev) 3195 { 3196 r100_suspend(rdev); 3197 r100_cp_fini(rdev); 3198 r100_wb_fini(rdev); 3199 r100_ib_fini(rdev); 3200 radeon_gem_fini(rdev); 3201 if (rdev->flags & RADEON_IS_PCI) 3202 r100_pci_gart_fini(rdev); 3203 radeon_irq_kms_fini(rdev); 3204 radeon_fence_driver_fini(rdev); 3205 radeon_bo_fini(rdev); 3206 radeon_atombios_fini(rdev); 3207 kfree(rdev->bios); 3208 rdev->bios = NULL; 3209 } 3210 3211 int r100_mc_init(struct radeon_device *rdev) 3212 { 3213 int r; 3214 u32 tmp; 3215 3216 /* Setup GPU memory space */ 3217 rdev->mc.vram_location = 0xFFFFFFFFUL; 3218 rdev->mc.gtt_location = 0xFFFFFFFFUL; 3219 if (rdev->flags & RADEON_IS_IGP) { 3220 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); 3221 rdev->mc.vram_location = tmp << 16; 3222 } 3223 if (rdev->flags & RADEON_IS_AGP) { 3224 r = radeon_agp_init(rdev); 3225 if (r) { 3226 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3227 rdev->flags &= ~RADEON_IS_AGP; 3228 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 3229 } else { 3230 rdev->mc.gtt_location = rdev->mc.agp_base; 3231 } 3232 } 3233 r = radeon_mc_setup(rdev); 3234 if (r) 3235 return r; 3236 return 0; 3237 } 3238 3239 int r100_init(struct radeon_device *rdev) 3240 { 3241 int r; 3242 3243 /* Register debugfs file specific to this group of asics */ 3244 r100_debugfs(rdev); 3245 /* Disable VGA */ 3246 r100_vga_render_disable(rdev); 3247 /* Initialize scratch registers */ 3248 radeon_scratch_init(rdev); 3249 /* Initialize surface registers */ 3250 radeon_surface_init(rdev); 3251 /* TODO: disable VGA need to use VGA request */ 3252 /* BIOS*/ 3253 if (!radeon_get_bios(rdev)) { 3254 if (ASIC_IS_AVIVO(rdev)) 3255 return -EINVAL; 3256 } 3257 if (rdev->is_atom_bios) { 3258 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 3259 return -EINVAL; 3260 } else { 3261 r = radeon_combios_init(rdev); 3262 if (r) 3263 return r; 3264 } 3265 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3266 if (radeon_gpu_reset(rdev)) { 3267 dev_warn(rdev->dev, 3268 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3269 RREG32(R_000E40_RBBM_STATUS), 3270 RREG32(R_0007C0_CP_STAT)); 3271 } 3272 /* check if cards are posted or not */ 3273 if (radeon_boot_test_post_card(rdev) == false) 3274 return -EINVAL; 3275 /* Set asic errata */ 3276 r100_errata(rdev); 3277 /* Initialize clocks */ 3278 radeon_get_clock_info(rdev->ddev); 3279 /* Get vram informations */ 3280 r100_vram_info(rdev); 3281 /* Initialize memory controller (also test AGP) */ 3282 r = r100_mc_init(rdev); 3283 if (r) 3284 return r; 3285 /* Fence driver */ 3286 r = radeon_fence_driver_init(rdev); 3287 if (r) 3288 return r; 3289 r = radeon_irq_kms_init(rdev); 3290 if (r) 3291 return r; 3292 /* Memory manager */ 3293 r = radeon_bo_init(rdev); 3294 if (r) 3295 return r; 3296 if (rdev->flags & RADEON_IS_PCI) { 3297 r = r100_pci_gart_init(rdev); 3298 if (r) 3299 return r; 3300 } 3301 r100_set_safe_registers(rdev); 3302 rdev->accel_working = true; 3303 r = r100_startup(rdev); 3304 if (r) { 3305 /* Somethings want wront with the accel init stop accel */ 3306 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3307 r100_suspend(rdev); 3308 r100_cp_fini(rdev); 3309 r100_wb_fini(rdev); 3310 r100_ib_fini(rdev); 3311 if (rdev->flags & RADEON_IS_PCI) 3312 r100_pci_gart_fini(rdev); 3313 radeon_irq_kms_fini(rdev); 3314 rdev->accel_working = false; 3315 } 3316 return 0; 3317 } 3318