1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <drm/drmP.h> 30 #include <drm/drm.h> 31 #include <drm/drm_crtc_helper.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "radeon_drm.h" 36 #include "r100_track.h" 37 #include "r300d.h" 38 #include "rv350d.h" 39 #include "r300_reg_safe.h" 40 41 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 42 * 43 * GPU Errata: 44 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 45 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 46 * However, scheduling such write to the ring seems harmless, i suspect 47 * the CP read collide with the flush somehow, or maybe the MC, hard to 48 * tell. (Jerome Glisse) 49 */ 50 51 /* 52 * rv370,rv380 PCIE GART 53 */ 54 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 55 56 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 57 { 58 uint32_t tmp; 59 int i; 60 61 /* Workaround HW bug do flush 2 times */ 62 for (i = 0; i < 2; i++) { 63 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 64 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 65 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 66 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 67 } 68 mb(); 69 } 70 71 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 72 { 73 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 74 75 if (i < 0 || i > rdev->gart.num_gpu_pages) { 76 return -EINVAL; 77 } 78 addr = (lower_32_bits(addr) >> 8) | 79 ((upper_32_bits(addr) & 0xff) << 24) | 80 0xc; 81 /* on x86 we want this to be CPU endian, on powerpc 82 * on powerpc without HW swappers, it'll get swapped on way 83 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 84 writel(addr, ((void __iomem *)ptr) + (i * 4)); 85 return 0; 86 } 87 88 int rv370_pcie_gart_init(struct radeon_device *rdev) 89 { 90 int r; 91 92 if (rdev->gart.table.vram.robj) { 93 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 return 0; 95 } 96 /* Initialize common gart structure */ 97 r = radeon_gart_init(rdev); 98 if (r) 99 return r; 100 r = rv370_debugfs_pcie_gart_info_init(rdev); 101 if (r) 102 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); 103 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 104 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 105 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 106 return radeon_gart_table_vram_alloc(rdev); 107 } 108 109 int rv370_pcie_gart_enable(struct radeon_device *rdev) 110 { 111 uint32_t table_addr; 112 uint32_t tmp; 113 int r; 114 115 if (rdev->gart.table.vram.robj == NULL) { 116 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 117 return -EINVAL; 118 } 119 r = radeon_gart_table_vram_pin(rdev); 120 if (r) 121 return r; 122 radeon_gart_restore(rdev); 123 /* discard memory request outside of configured range */ 124 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 125 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 126 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 127 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 129 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 130 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 131 table_addr = rdev->gart.table_addr; 132 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 133 /* FIXME: setup default page */ 134 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 136 /* Clear error */ 137 WREG32_PCIE(0x18, 0); 138 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 139 tmp |= RADEON_PCIE_TX_GART_EN; 140 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 141 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 142 rv370_pcie_gart_tlb_flush(rdev); 143 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", 144 (unsigned)(rdev->mc.gtt_size >> 20), table_addr); 145 rdev->gart.ready = true; 146 return 0; 147 } 148 149 void rv370_pcie_gart_disable(struct radeon_device *rdev) 150 { 151 u32 tmp; 152 int r; 153 154 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 155 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 156 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 157 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 158 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 159 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 160 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 161 if (rdev->gart.table.vram.robj) { 162 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 163 if (likely(r == 0)) { 164 radeon_bo_kunmap(rdev->gart.table.vram.robj); 165 radeon_bo_unpin(rdev->gart.table.vram.robj); 166 radeon_bo_unreserve(rdev->gart.table.vram.robj); 167 } 168 } 169 } 170 171 void rv370_pcie_gart_fini(struct radeon_device *rdev) 172 { 173 radeon_gart_fini(rdev); 174 rv370_pcie_gart_disable(rdev); 175 radeon_gart_table_vram_free(rdev); 176 } 177 178 void r300_fence_ring_emit(struct radeon_device *rdev, 179 struct radeon_fence *fence) 180 { 181 /* Who ever call radeon_fence_emit should call ring_lock and ask 182 * for enough space (today caller are ib schedule and buffer move) */ 183 /* Write SC register so SC & US assert idle */ 184 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); 185 radeon_ring_write(rdev, 0); 186 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); 187 radeon_ring_write(rdev, 0); 188 /* Flush 3D cache */ 189 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 190 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); 191 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 192 radeon_ring_write(rdev, R300_ZC_FLUSH); 193 /* Wait until IDLE & CLEAN */ 194 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 195 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | 196 RADEON_WAIT_2D_IDLECLEAN | 197 RADEON_WAIT_DMA_GUI_IDLE)); 198 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 199 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | 200 RADEON_HDP_READ_BUFFER_INVALIDATE); 201 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 202 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); 203 /* Emit fence sequence & fire IRQ */ 204 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 205 radeon_ring_write(rdev, fence->seq); 206 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 207 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 208 } 209 210 void r300_ring_start(struct radeon_device *rdev) 211 { 212 unsigned gb_tile_config; 213 int r; 214 215 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 216 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 217 switch(rdev->num_gb_pipes) { 218 case 2: 219 gb_tile_config |= R300_PIPE_COUNT_R300; 220 break; 221 case 3: 222 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 223 break; 224 case 4: 225 gb_tile_config |= R300_PIPE_COUNT_R420; 226 break; 227 case 1: 228 default: 229 gb_tile_config |= R300_PIPE_COUNT_RV350; 230 break; 231 } 232 233 r = radeon_ring_lock(rdev, 64); 234 if (r) { 235 return; 236 } 237 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 238 radeon_ring_write(rdev, 239 RADEON_ISYNC_ANY2D_IDLE3D | 240 RADEON_ISYNC_ANY3D_IDLE2D | 241 RADEON_ISYNC_WAIT_IDLEGUI | 242 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 243 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); 244 radeon_ring_write(rdev, gb_tile_config); 245 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 246 radeon_ring_write(rdev, 247 RADEON_WAIT_2D_IDLECLEAN | 248 RADEON_WAIT_3D_IDLECLEAN); 249 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); 250 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); 251 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 252 radeon_ring_write(rdev, 0); 253 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 254 radeon_ring_write(rdev, 0); 255 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 256 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 257 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 258 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 259 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 260 radeon_ring_write(rdev, 261 RADEON_WAIT_2D_IDLECLEAN | 262 RADEON_WAIT_3D_IDLECLEAN); 263 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); 264 radeon_ring_write(rdev, 0); 265 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 266 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 267 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 268 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 269 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); 270 radeon_ring_write(rdev, 271 ((6 << R300_MS_X0_SHIFT) | 272 (6 << R300_MS_Y0_SHIFT) | 273 (6 << R300_MS_X1_SHIFT) | 274 (6 << R300_MS_Y1_SHIFT) | 275 (6 << R300_MS_X2_SHIFT) | 276 (6 << R300_MS_Y2_SHIFT) | 277 (6 << R300_MSBD0_Y_SHIFT) | 278 (6 << R300_MSBD0_X_SHIFT))); 279 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); 280 radeon_ring_write(rdev, 281 ((6 << R300_MS_X3_SHIFT) | 282 (6 << R300_MS_Y3_SHIFT) | 283 (6 << R300_MS_X4_SHIFT) | 284 (6 << R300_MS_Y4_SHIFT) | 285 (6 << R300_MS_X5_SHIFT) | 286 (6 << R300_MS_Y5_SHIFT) | 287 (6 << R300_MSBD1_SHIFT))); 288 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); 289 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 290 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); 291 radeon_ring_write(rdev, 292 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 293 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); 294 radeon_ring_write(rdev, 295 R300_GEOMETRY_ROUND_NEAREST | 296 R300_COLOR_ROUND_NEAREST); 297 radeon_ring_unlock_commit(rdev); 298 } 299 300 void r300_errata(struct radeon_device *rdev) 301 { 302 rdev->pll_errata = 0; 303 304 if (rdev->family == CHIP_R300 && 305 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 306 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 307 } 308 } 309 310 int r300_mc_wait_for_idle(struct radeon_device *rdev) 311 { 312 unsigned i; 313 uint32_t tmp; 314 315 for (i = 0; i < rdev->usec_timeout; i++) { 316 /* read MC_STATUS */ 317 tmp = RREG32(RADEON_MC_STATUS); 318 if (tmp & R300_MC_IDLE) { 319 return 0; 320 } 321 DRM_UDELAY(1); 322 } 323 return -1; 324 } 325 326 void r300_gpu_init(struct radeon_device *rdev) 327 { 328 uint32_t gb_tile_config, tmp; 329 330 /* FIXME: rv380 one pipes ? */ 331 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 332 (rdev->family == CHIP_R350)) { 333 /* r300,r350 */ 334 rdev->num_gb_pipes = 2; 335 } else { 336 /* rv350,rv370,rv380,r300 AD */ 337 rdev->num_gb_pipes = 1; 338 } 339 rdev->num_z_pipes = 1; 340 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 341 switch (rdev->num_gb_pipes) { 342 case 2: 343 gb_tile_config |= R300_PIPE_COUNT_R300; 344 break; 345 case 3: 346 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 347 break; 348 case 4: 349 gb_tile_config |= R300_PIPE_COUNT_R420; 350 break; 351 default: 352 case 1: 353 gb_tile_config |= R300_PIPE_COUNT_RV350; 354 break; 355 } 356 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 357 358 if (r100_gui_wait_for_idle(rdev)) { 359 printk(KERN_WARNING "Failed to wait GUI idle while " 360 "programming pipes. Bad things might happen.\n"); 361 } 362 363 tmp = RREG32(R300_DST_PIPE_CONFIG); 364 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 365 366 WREG32(R300_RB2D_DSTCACHE_MODE, 367 R300_DC_AUTOFLUSH_ENABLE | 368 R300_DC_DC_DISABLE_IGNORE_PE); 369 370 if (r100_gui_wait_for_idle(rdev)) { 371 printk(KERN_WARNING "Failed to wait GUI idle while " 372 "programming pipes. Bad things might happen.\n"); 373 } 374 if (r300_mc_wait_for_idle(rdev)) { 375 printk(KERN_WARNING "Failed to wait MC idle while " 376 "programming pipes. Bad things might happen.\n"); 377 } 378 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", 379 rdev->num_gb_pipes, rdev->num_z_pipes); 380 } 381 382 bool r300_gpu_is_lockup(struct radeon_device *rdev) 383 { 384 u32 rbbm_status; 385 int r; 386 387 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 388 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 389 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); 390 return false; 391 } 392 /* force CP activities */ 393 r = radeon_ring_lock(rdev, 2); 394 if (!r) { 395 /* PACKET2 NOP */ 396 radeon_ring_write(rdev, 0x80000000); 397 radeon_ring_write(rdev, 0x80000000); 398 radeon_ring_unlock_commit(rdev); 399 } 400 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 401 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); 402 } 403 404 int r300_asic_reset(struct radeon_device *rdev) 405 { 406 struct r100_mc_save save; 407 u32 status, tmp; 408 409 r100_mc_stop(rdev, &save); 410 status = RREG32(R_000E40_RBBM_STATUS); 411 if (!G_000E40_GUI_ACTIVE(status)) { 412 return 0; 413 } 414 status = RREG32(R_000E40_RBBM_STATUS); 415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 416 /* stop CP */ 417 WREG32(RADEON_CP_CSQ_CNTL, 0); 418 tmp = RREG32(RADEON_CP_RB_CNTL); 419 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 420 WREG32(RADEON_CP_RB_RPTR_WR, 0); 421 WREG32(RADEON_CP_RB_WPTR, 0); 422 WREG32(RADEON_CP_RB_CNTL, tmp); 423 /* save PCI state */ 424 pci_save_state(rdev->pdev); 425 /* disable bus mastering */ 426 r100_bm_disable(rdev); 427 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 428 S_0000F0_SOFT_RESET_GA(1)); 429 RREG32(R_0000F0_RBBM_SOFT_RESET); 430 mdelay(500); 431 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 432 mdelay(1); 433 status = RREG32(R_000E40_RBBM_STATUS); 434 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 435 /* resetting the CP seems to be problematic sometimes it end up 436 * hard locking the computer, but it's necessary for successfull 437 * reset more test & playing is needed on R3XX/R4XX to find a 438 * reliable (if any solution) 439 */ 440 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 441 RREG32(R_0000F0_RBBM_SOFT_RESET); 442 mdelay(500); 443 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 444 mdelay(1); 445 status = RREG32(R_000E40_RBBM_STATUS); 446 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 447 /* reset MC */ 448 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 449 RREG32(R_0000F0_RBBM_SOFT_RESET); 450 mdelay(500); 451 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 452 mdelay(1); 453 status = RREG32(R_000E40_RBBM_STATUS); 454 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 455 /* restore PCI & busmastering */ 456 pci_restore_state(rdev->pdev); 457 r100_enable_bm(rdev); 458 /* Check if GPU is idle */ 459 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 460 dev_err(rdev->dev, "failed to reset GPU\n"); 461 rdev->gpu_lockup = true; 462 return -1; 463 } 464 r100_mc_resume(rdev, &save); 465 dev_info(rdev->dev, "GPU reset succeed\n"); 466 return 0; 467 } 468 469 /* 470 * r300,r350,rv350,rv380 VRAM info 471 */ 472 void r300_mc_init(struct radeon_device *rdev) 473 { 474 u64 base; 475 u32 tmp; 476 477 /* DDR for all card after R300 & IGP */ 478 rdev->mc.vram_is_ddr = true; 479 tmp = RREG32(RADEON_MEM_CNTL); 480 tmp &= R300_MEM_NUM_CHANNELS_MASK; 481 switch (tmp) { 482 case 0: rdev->mc.vram_width = 64; break; 483 case 1: rdev->mc.vram_width = 128; break; 484 case 2: rdev->mc.vram_width = 256; break; 485 default: rdev->mc.vram_width = 128; break; 486 } 487 r100_vram_init_sizes(rdev); 488 base = rdev->mc.aper_base; 489 if (rdev->flags & RADEON_IS_IGP) 490 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 491 radeon_vram_location(rdev, &rdev->mc, base); 492 if (!(rdev->flags & RADEON_IS_AGP)) 493 radeon_gtt_location(rdev, &rdev->mc); 494 radeon_update_bandwidth_info(rdev); 495 } 496 497 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 498 { 499 uint32_t link_width_cntl, mask; 500 501 if (rdev->flags & RADEON_IS_IGP) 502 return; 503 504 if (!(rdev->flags & RADEON_IS_PCIE)) 505 return; 506 507 /* FIXME wait for idle */ 508 509 switch (lanes) { 510 case 0: 511 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 512 break; 513 case 1: 514 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 515 break; 516 case 2: 517 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 518 break; 519 case 4: 520 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 521 break; 522 case 8: 523 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 524 break; 525 case 12: 526 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 527 break; 528 case 16: 529 default: 530 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 531 break; 532 } 533 534 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 535 536 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 537 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 538 return; 539 540 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 541 RADEON_PCIE_LC_RECONFIG_NOW | 542 RADEON_PCIE_LC_RECONFIG_LATER | 543 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 544 link_width_cntl |= mask; 545 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 546 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 547 RADEON_PCIE_LC_RECONFIG_NOW)); 548 549 /* wait for lane set to complete */ 550 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 551 while (link_width_cntl == 0xffffffff) 552 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 553 554 } 555 556 int rv370_get_pcie_lanes(struct radeon_device *rdev) 557 { 558 u32 link_width_cntl; 559 560 if (rdev->flags & RADEON_IS_IGP) 561 return 0; 562 563 if (!(rdev->flags & RADEON_IS_PCIE)) 564 return 0; 565 566 /* FIXME wait for idle */ 567 568 if (rdev->family < CHIP_R600) 569 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 570 else 571 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 572 573 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 574 case RADEON_PCIE_LC_LINK_WIDTH_X0: 575 return 0; 576 case RADEON_PCIE_LC_LINK_WIDTH_X1: 577 return 1; 578 case RADEON_PCIE_LC_LINK_WIDTH_X2: 579 return 2; 580 case RADEON_PCIE_LC_LINK_WIDTH_X4: 581 return 4; 582 case RADEON_PCIE_LC_LINK_WIDTH_X8: 583 return 8; 584 case RADEON_PCIE_LC_LINK_WIDTH_X16: 585 default: 586 return 16; 587 } 588 } 589 590 #if defined(CONFIG_DEBUG_FS) 591 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 592 { 593 struct drm_info_node *node = (struct drm_info_node *) m->private; 594 struct drm_device *dev = node->minor->dev; 595 struct radeon_device *rdev = dev->dev_private; 596 uint32_t tmp; 597 598 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 599 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 600 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 601 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 602 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 603 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 604 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 605 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 606 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 607 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 608 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 609 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 610 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 611 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 612 return 0; 613 } 614 615 static struct drm_info_list rv370_pcie_gart_info_list[] = { 616 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL}, 617 }; 618 #endif 619 620 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 621 { 622 #if defined(CONFIG_DEBUG_FS) 623 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 624 #else 625 return 0; 626 #endif 627 } 628 629 static int r300_packet0_check(struct radeon_cs_parser *p, 630 struct radeon_cs_packet *pkt, 631 unsigned idx, unsigned reg) 632 { 633 struct radeon_cs_reloc *reloc; 634 struct r100_cs_track *track; 635 volatile uint32_t *ib; 636 uint32_t tmp, tile_flags = 0; 637 unsigned i; 638 int r; 639 u32 idx_value; 640 641 ib = p->ib->ptr; 642 track = (struct r100_cs_track *)p->track; 643 idx_value = radeon_get_ib_value(p, idx); 644 645 switch(reg) { 646 case AVIVO_D1MODE_VLINE_START_END: 647 case RADEON_CRTC_GUI_TRIG_VLINE: 648 r = r100_cs_packet_parse_vline(p); 649 if (r) { 650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 651 idx, reg); 652 r100_cs_dump_packet(p, pkt); 653 return r; 654 } 655 break; 656 case RADEON_DST_PITCH_OFFSET: 657 case RADEON_SRC_PITCH_OFFSET: 658 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 659 if (r) 660 return r; 661 break; 662 case R300_RB3D_COLOROFFSET0: 663 case R300_RB3D_COLOROFFSET1: 664 case R300_RB3D_COLOROFFSET2: 665 case R300_RB3D_COLOROFFSET3: 666 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 667 r = r100_cs_packet_next_reloc(p, &reloc); 668 if (r) { 669 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 670 idx, reg); 671 r100_cs_dump_packet(p, pkt); 672 return r; 673 } 674 track->cb[i].robj = reloc->robj; 675 track->cb[i].offset = idx_value; 676 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 677 break; 678 case R300_ZB_DEPTHOFFSET: 679 r = r100_cs_packet_next_reloc(p, &reloc); 680 if (r) { 681 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 682 idx, reg); 683 r100_cs_dump_packet(p, pkt); 684 return r; 685 } 686 track->zb.robj = reloc->robj; 687 track->zb.offset = idx_value; 688 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 689 break; 690 case R300_TX_OFFSET_0: 691 case R300_TX_OFFSET_0+4: 692 case R300_TX_OFFSET_0+8: 693 case R300_TX_OFFSET_0+12: 694 case R300_TX_OFFSET_0+16: 695 case R300_TX_OFFSET_0+20: 696 case R300_TX_OFFSET_0+24: 697 case R300_TX_OFFSET_0+28: 698 case R300_TX_OFFSET_0+32: 699 case R300_TX_OFFSET_0+36: 700 case R300_TX_OFFSET_0+40: 701 case R300_TX_OFFSET_0+44: 702 case R300_TX_OFFSET_0+48: 703 case R300_TX_OFFSET_0+52: 704 case R300_TX_OFFSET_0+56: 705 case R300_TX_OFFSET_0+60: 706 i = (reg - R300_TX_OFFSET_0) >> 2; 707 r = r100_cs_packet_next_reloc(p, &reloc); 708 if (r) { 709 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 710 idx, reg); 711 r100_cs_dump_packet(p, pkt); 712 return r; 713 } 714 715 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 716 tile_flags |= R300_TXO_MACRO_TILE; 717 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 718 tile_flags |= R300_TXO_MICRO_TILE; 719 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 720 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 721 722 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 723 tmp |= tile_flags; 724 ib[idx] = tmp; 725 track->textures[i].robj = reloc->robj; 726 break; 727 /* Tracked registers */ 728 case 0x2084: 729 /* VAP_VF_CNTL */ 730 track->vap_vf_cntl = idx_value; 731 break; 732 case 0x20B4: 733 /* VAP_VTX_SIZE */ 734 track->vtx_size = idx_value & 0x7F; 735 break; 736 case 0x2134: 737 /* VAP_VF_MAX_VTX_INDX */ 738 track->max_indx = idx_value & 0x00FFFFFFUL; 739 break; 740 case 0x43E4: 741 /* SC_SCISSOR1 */ 742 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 743 if (p->rdev->family < CHIP_RV515) { 744 track->maxy -= 1440; 745 } 746 break; 747 case 0x4E00: 748 /* RB3D_CCTL */ 749 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 750 break; 751 case 0x4E38: 752 case 0x4E3C: 753 case 0x4E40: 754 case 0x4E44: 755 /* RB3D_COLORPITCH0 */ 756 /* RB3D_COLORPITCH1 */ 757 /* RB3D_COLORPITCH2 */ 758 /* RB3D_COLORPITCH3 */ 759 r = r100_cs_packet_next_reloc(p, &reloc); 760 if (r) { 761 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 762 idx, reg); 763 r100_cs_dump_packet(p, pkt); 764 return r; 765 } 766 767 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 768 tile_flags |= R300_COLOR_TILE_ENABLE; 769 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 770 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 771 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 772 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 773 774 tmp = idx_value & ~(0x7 << 16); 775 tmp |= tile_flags; 776 ib[idx] = tmp; 777 778 i = (reg - 0x4E38) >> 2; 779 track->cb[i].pitch = idx_value & 0x3FFE; 780 switch (((idx_value >> 21) & 0xF)) { 781 case 9: 782 case 11: 783 case 12: 784 track->cb[i].cpp = 1; 785 break; 786 case 3: 787 case 4: 788 case 13: 789 case 15: 790 track->cb[i].cpp = 2; 791 break; 792 case 6: 793 track->cb[i].cpp = 4; 794 break; 795 case 10: 796 track->cb[i].cpp = 8; 797 break; 798 case 7: 799 track->cb[i].cpp = 16; 800 break; 801 default: 802 DRM_ERROR("Invalid color buffer format (%d) !\n", 803 ((idx_value >> 21) & 0xF)); 804 return -EINVAL; 805 } 806 break; 807 case 0x4F00: 808 /* ZB_CNTL */ 809 if (idx_value & 2) { 810 track->z_enabled = true; 811 } else { 812 track->z_enabled = false; 813 } 814 break; 815 case 0x4F10: 816 /* ZB_FORMAT */ 817 switch ((idx_value & 0xF)) { 818 case 0: 819 case 1: 820 track->zb.cpp = 2; 821 break; 822 case 2: 823 track->zb.cpp = 4; 824 break; 825 default: 826 DRM_ERROR("Invalid z buffer format (%d) !\n", 827 (idx_value & 0xF)); 828 return -EINVAL; 829 } 830 break; 831 case 0x4F24: 832 /* ZB_DEPTHPITCH */ 833 r = r100_cs_packet_next_reloc(p, &reloc); 834 if (r) { 835 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 836 idx, reg); 837 r100_cs_dump_packet(p, pkt); 838 return r; 839 } 840 841 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 842 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 843 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 844 tile_flags |= R300_DEPTHMICROTILE_TILED; 845 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 846 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 847 848 tmp = idx_value & ~(0x7 << 16); 849 tmp |= tile_flags; 850 ib[idx] = tmp; 851 852 track->zb.pitch = idx_value & 0x3FFC; 853 break; 854 case 0x4104: 855 for (i = 0; i < 16; i++) { 856 bool enabled; 857 858 enabled = !!(idx_value & (1 << i)); 859 track->textures[i].enabled = enabled; 860 } 861 break; 862 case 0x44C0: 863 case 0x44C4: 864 case 0x44C8: 865 case 0x44CC: 866 case 0x44D0: 867 case 0x44D4: 868 case 0x44D8: 869 case 0x44DC: 870 case 0x44E0: 871 case 0x44E4: 872 case 0x44E8: 873 case 0x44EC: 874 case 0x44F0: 875 case 0x44F4: 876 case 0x44F8: 877 case 0x44FC: 878 /* TX_FORMAT1_[0-15] */ 879 i = (reg - 0x44C0) >> 2; 880 tmp = (idx_value >> 25) & 0x3; 881 track->textures[i].tex_coord_type = tmp; 882 switch ((idx_value & 0x1F)) { 883 case R300_TX_FORMAT_X8: 884 case R300_TX_FORMAT_Y4X4: 885 case R300_TX_FORMAT_Z3Y3X2: 886 track->textures[i].cpp = 1; 887 break; 888 case R300_TX_FORMAT_X16: 889 case R300_TX_FORMAT_Y8X8: 890 case R300_TX_FORMAT_Z5Y6X5: 891 case R300_TX_FORMAT_Z6Y5X5: 892 case R300_TX_FORMAT_W4Z4Y4X4: 893 case R300_TX_FORMAT_W1Z5Y5X5: 894 case R300_TX_FORMAT_D3DMFT_CxV8U8: 895 case R300_TX_FORMAT_B8G8_B8G8: 896 case R300_TX_FORMAT_G8R8_G8B8: 897 track->textures[i].cpp = 2; 898 break; 899 case R300_TX_FORMAT_Y16X16: 900 case R300_TX_FORMAT_Z11Y11X10: 901 case R300_TX_FORMAT_Z10Y11X11: 902 case R300_TX_FORMAT_W8Z8Y8X8: 903 case R300_TX_FORMAT_W2Z10Y10X10: 904 case 0x17: 905 case R300_TX_FORMAT_FL_I32: 906 case 0x1e: 907 track->textures[i].cpp = 4; 908 break; 909 case R300_TX_FORMAT_W16Z16Y16X16: 910 case R300_TX_FORMAT_FL_R16G16B16A16: 911 case R300_TX_FORMAT_FL_I32A32: 912 track->textures[i].cpp = 8; 913 break; 914 case R300_TX_FORMAT_FL_R32G32B32A32: 915 track->textures[i].cpp = 16; 916 break; 917 case R300_TX_FORMAT_DXT1: 918 track->textures[i].cpp = 1; 919 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 920 break; 921 case R300_TX_FORMAT_ATI2N: 922 if (p->rdev->family < CHIP_R420) { 923 DRM_ERROR("Invalid texture format %u\n", 924 (idx_value & 0x1F)); 925 return -EINVAL; 926 } 927 /* The same rules apply as for DXT3/5. */ 928 /* Pass through. */ 929 case R300_TX_FORMAT_DXT3: 930 case R300_TX_FORMAT_DXT5: 931 track->textures[i].cpp = 1; 932 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 933 break; 934 default: 935 DRM_ERROR("Invalid texture format %u\n", 936 (idx_value & 0x1F)); 937 return -EINVAL; 938 break; 939 } 940 break; 941 case 0x4400: 942 case 0x4404: 943 case 0x4408: 944 case 0x440C: 945 case 0x4410: 946 case 0x4414: 947 case 0x4418: 948 case 0x441C: 949 case 0x4420: 950 case 0x4424: 951 case 0x4428: 952 case 0x442C: 953 case 0x4430: 954 case 0x4434: 955 case 0x4438: 956 case 0x443C: 957 /* TX_FILTER0_[0-15] */ 958 i = (reg - 0x4400) >> 2; 959 tmp = idx_value & 0x7; 960 if (tmp == 2 || tmp == 4 || tmp == 6) { 961 track->textures[i].roundup_w = false; 962 } 963 tmp = (idx_value >> 3) & 0x7; 964 if (tmp == 2 || tmp == 4 || tmp == 6) { 965 track->textures[i].roundup_h = false; 966 } 967 break; 968 case 0x4500: 969 case 0x4504: 970 case 0x4508: 971 case 0x450C: 972 case 0x4510: 973 case 0x4514: 974 case 0x4518: 975 case 0x451C: 976 case 0x4520: 977 case 0x4524: 978 case 0x4528: 979 case 0x452C: 980 case 0x4530: 981 case 0x4534: 982 case 0x4538: 983 case 0x453C: 984 /* TX_FORMAT2_[0-15] */ 985 i = (reg - 0x4500) >> 2; 986 tmp = idx_value & 0x3FFF; 987 track->textures[i].pitch = tmp + 1; 988 if (p->rdev->family >= CHIP_RV515) { 989 tmp = ((idx_value >> 15) & 1) << 11; 990 track->textures[i].width_11 = tmp; 991 tmp = ((idx_value >> 16) & 1) << 11; 992 track->textures[i].height_11 = tmp; 993 994 /* ATI1N */ 995 if (idx_value & (1 << 14)) { 996 /* The same rules apply as for DXT1. */ 997 track->textures[i].compress_format = 998 R100_TRACK_COMP_DXT1; 999 } 1000 } else if (idx_value & (1 << 14)) { 1001 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1002 return -EINVAL; 1003 } 1004 break; 1005 case 0x4480: 1006 case 0x4484: 1007 case 0x4488: 1008 case 0x448C: 1009 case 0x4490: 1010 case 0x4494: 1011 case 0x4498: 1012 case 0x449C: 1013 case 0x44A0: 1014 case 0x44A4: 1015 case 0x44A8: 1016 case 0x44AC: 1017 case 0x44B0: 1018 case 0x44B4: 1019 case 0x44B8: 1020 case 0x44BC: 1021 /* TX_FORMAT0_[0-15] */ 1022 i = (reg - 0x4480) >> 2; 1023 tmp = idx_value & 0x7FF; 1024 track->textures[i].width = tmp + 1; 1025 tmp = (idx_value >> 11) & 0x7FF; 1026 track->textures[i].height = tmp + 1; 1027 tmp = (idx_value >> 26) & 0xF; 1028 track->textures[i].num_levels = tmp; 1029 tmp = idx_value & (1 << 31); 1030 track->textures[i].use_pitch = !!tmp; 1031 tmp = (idx_value >> 22) & 0xF; 1032 track->textures[i].txdepth = tmp; 1033 break; 1034 case R300_ZB_ZPASS_ADDR: 1035 r = r100_cs_packet_next_reloc(p, &reloc); 1036 if (r) { 1037 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1038 idx, reg); 1039 r100_cs_dump_packet(p, pkt); 1040 return r; 1041 } 1042 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1043 break; 1044 case 0x4e0c: 1045 /* RB3D_COLOR_CHANNEL_MASK */ 1046 track->color_channel_mask = idx_value; 1047 break; 1048 case 0x4d1c: 1049 /* ZB_BW_CNTL */ 1050 track->fastfill = !!(idx_value & (1 << 2)); 1051 break; 1052 case 0x4e04: 1053 /* RB3D_BLENDCNTL */ 1054 track->blend_read_enable = !!(idx_value & (1 << 2)); 1055 break; 1056 case 0x4be8: 1057 /* valid register only on RV530 */ 1058 if (p->rdev->family == CHIP_RV530) 1059 break; 1060 /* fallthrough do not move */ 1061 default: 1062 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1063 reg, idx); 1064 return -EINVAL; 1065 } 1066 return 0; 1067 } 1068 1069 static int r300_packet3_check(struct radeon_cs_parser *p, 1070 struct radeon_cs_packet *pkt) 1071 { 1072 struct radeon_cs_reloc *reloc; 1073 struct r100_cs_track *track; 1074 volatile uint32_t *ib; 1075 unsigned idx; 1076 int r; 1077 1078 ib = p->ib->ptr; 1079 idx = pkt->idx + 1; 1080 track = (struct r100_cs_track *)p->track; 1081 switch(pkt->opcode) { 1082 case PACKET3_3D_LOAD_VBPNTR: 1083 r = r100_packet3_load_vbpntr(p, pkt, idx); 1084 if (r) 1085 return r; 1086 break; 1087 case PACKET3_INDX_BUFFER: 1088 r = r100_cs_packet_next_reloc(p, &reloc); 1089 if (r) { 1090 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1091 r100_cs_dump_packet(p, pkt); 1092 return r; 1093 } 1094 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1095 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1096 if (r) { 1097 return r; 1098 } 1099 break; 1100 /* Draw packet */ 1101 case PACKET3_3D_DRAW_IMMD: 1102 /* Number of dwords is vtx_size * (num_vertices - 1) 1103 * PRIM_WALK must be equal to 3 vertex data in embedded 1104 * in cmd stream */ 1105 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1106 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1107 return -EINVAL; 1108 } 1109 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1110 track->immd_dwords = pkt->count - 1; 1111 r = r100_cs_track_check(p->rdev, track); 1112 if (r) { 1113 return r; 1114 } 1115 break; 1116 case PACKET3_3D_DRAW_IMMD_2: 1117 /* Number of dwords is vtx_size * (num_vertices - 1) 1118 * PRIM_WALK must be equal to 3 vertex data in embedded 1119 * in cmd stream */ 1120 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1121 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1122 return -EINVAL; 1123 } 1124 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1125 track->immd_dwords = pkt->count; 1126 r = r100_cs_track_check(p->rdev, track); 1127 if (r) { 1128 return r; 1129 } 1130 break; 1131 case PACKET3_3D_DRAW_VBUF: 1132 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1133 r = r100_cs_track_check(p->rdev, track); 1134 if (r) { 1135 return r; 1136 } 1137 break; 1138 case PACKET3_3D_DRAW_VBUF_2: 1139 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1140 r = r100_cs_track_check(p->rdev, track); 1141 if (r) { 1142 return r; 1143 } 1144 break; 1145 case PACKET3_3D_DRAW_INDX: 1146 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1147 r = r100_cs_track_check(p->rdev, track); 1148 if (r) { 1149 return r; 1150 } 1151 break; 1152 case PACKET3_3D_DRAW_INDX_2: 1153 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1154 r = r100_cs_track_check(p->rdev, track); 1155 if (r) { 1156 return r; 1157 } 1158 break; 1159 case PACKET3_NOP: 1160 break; 1161 default: 1162 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1163 return -EINVAL; 1164 } 1165 return 0; 1166 } 1167 1168 int r300_cs_parse(struct radeon_cs_parser *p) 1169 { 1170 struct radeon_cs_packet pkt; 1171 struct r100_cs_track *track; 1172 int r; 1173 1174 track = kzalloc(sizeof(*track), GFP_KERNEL); 1175 r100_cs_track_clear(p->rdev, track); 1176 p->track = track; 1177 do { 1178 r = r100_cs_packet_parse(p, &pkt, p->idx); 1179 if (r) { 1180 return r; 1181 } 1182 p->idx += pkt.count + 2; 1183 switch (pkt.type) { 1184 case PACKET_TYPE0: 1185 r = r100_cs_parse_packet0(p, &pkt, 1186 p->rdev->config.r300.reg_safe_bm, 1187 p->rdev->config.r300.reg_safe_bm_size, 1188 &r300_packet0_check); 1189 break; 1190 case PACKET_TYPE2: 1191 break; 1192 case PACKET_TYPE3: 1193 r = r300_packet3_check(p, &pkt); 1194 break; 1195 default: 1196 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1197 return -EINVAL; 1198 } 1199 if (r) { 1200 return r; 1201 } 1202 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1203 return 0; 1204 } 1205 1206 void r300_set_reg_safe(struct radeon_device *rdev) 1207 { 1208 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1209 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1210 } 1211 1212 void r300_mc_program(struct radeon_device *rdev) 1213 { 1214 struct r100_mc_save save; 1215 int r; 1216 1217 r = r100_debugfs_mc_info_init(rdev); 1218 if (r) { 1219 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); 1220 } 1221 1222 /* Stops all mc clients */ 1223 r100_mc_stop(rdev, &save); 1224 if (rdev->flags & RADEON_IS_AGP) { 1225 WREG32(R_00014C_MC_AGP_LOCATION, 1226 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1227 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1228 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1229 WREG32(R_00015C_AGP_BASE_2, 1230 upper_32_bits(rdev->mc.agp_base) & 0xff); 1231 } else { 1232 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1233 WREG32(R_000170_AGP_BASE, 0); 1234 WREG32(R_00015C_AGP_BASE_2, 0); 1235 } 1236 /* Wait for mc idle */ 1237 if (r300_mc_wait_for_idle(rdev)) 1238 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1239 /* Program MC, should be a 32bits limited address space */ 1240 WREG32(R_000148_MC_FB_LOCATION, 1241 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1242 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1243 r100_mc_resume(rdev, &save); 1244 } 1245 1246 void r300_clock_startup(struct radeon_device *rdev) 1247 { 1248 u32 tmp; 1249 1250 if (radeon_dynclks != -1 && radeon_dynclks) 1251 radeon_legacy_set_clock_gating(rdev, 1); 1252 /* We need to force on some of the block */ 1253 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1254 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1255 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1256 tmp |= S_00000D_FORCE_VAP(1); 1257 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1258 } 1259 1260 static int r300_startup(struct radeon_device *rdev) 1261 { 1262 int r; 1263 1264 /* set common regs */ 1265 r100_set_common_regs(rdev); 1266 /* program mc */ 1267 r300_mc_program(rdev); 1268 /* Resume clock */ 1269 r300_clock_startup(rdev); 1270 /* Initialize GPU configuration (# pipes, ...) */ 1271 r300_gpu_init(rdev); 1272 /* Initialize GART (initialize after TTM so we can allocate 1273 * memory through TTM but finalize after TTM) */ 1274 if (rdev->flags & RADEON_IS_PCIE) { 1275 r = rv370_pcie_gart_enable(rdev); 1276 if (r) 1277 return r; 1278 } 1279 1280 if (rdev->family == CHIP_R300 || 1281 rdev->family == CHIP_R350 || 1282 rdev->family == CHIP_RV350) 1283 r100_enable_bm(rdev); 1284 1285 if (rdev->flags & RADEON_IS_PCI) { 1286 r = r100_pci_gart_enable(rdev); 1287 if (r) 1288 return r; 1289 } 1290 /* Enable IRQ */ 1291 r100_irq_set(rdev); 1292 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1293 /* 1M ring buffer */ 1294 r = r100_cp_init(rdev, 1024 * 1024); 1295 if (r) { 1296 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 1297 return r; 1298 } 1299 r = r100_wb_init(rdev); 1300 if (r) 1301 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 1302 r = r100_ib_init(rdev); 1303 if (r) { 1304 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 1305 return r; 1306 } 1307 return 0; 1308 } 1309 1310 int r300_resume(struct radeon_device *rdev) 1311 { 1312 /* Make sur GART are not working */ 1313 if (rdev->flags & RADEON_IS_PCIE) 1314 rv370_pcie_gart_disable(rdev); 1315 if (rdev->flags & RADEON_IS_PCI) 1316 r100_pci_gart_disable(rdev); 1317 /* Resume clock before doing reset */ 1318 r300_clock_startup(rdev); 1319 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1320 if (radeon_asic_reset(rdev)) { 1321 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1322 RREG32(R_000E40_RBBM_STATUS), 1323 RREG32(R_0007C0_CP_STAT)); 1324 } 1325 /* post */ 1326 radeon_combios_asic_init(rdev->ddev); 1327 /* Resume clock after posting */ 1328 r300_clock_startup(rdev); 1329 /* Initialize surface registers */ 1330 radeon_surface_init(rdev); 1331 return r300_startup(rdev); 1332 } 1333 1334 int r300_suspend(struct radeon_device *rdev) 1335 { 1336 r100_cp_disable(rdev); 1337 r100_wb_disable(rdev); 1338 r100_irq_disable(rdev); 1339 if (rdev->flags & RADEON_IS_PCIE) 1340 rv370_pcie_gart_disable(rdev); 1341 if (rdev->flags & RADEON_IS_PCI) 1342 r100_pci_gart_disable(rdev); 1343 return 0; 1344 } 1345 1346 void r300_fini(struct radeon_device *rdev) 1347 { 1348 radeon_pm_fini(rdev); 1349 r100_cp_fini(rdev); 1350 r100_wb_fini(rdev); 1351 r100_ib_fini(rdev); 1352 radeon_gem_fini(rdev); 1353 if (rdev->flags & RADEON_IS_PCIE) 1354 rv370_pcie_gart_fini(rdev); 1355 if (rdev->flags & RADEON_IS_PCI) 1356 r100_pci_gart_fini(rdev); 1357 radeon_agp_fini(rdev); 1358 radeon_irq_kms_fini(rdev); 1359 radeon_fence_driver_fini(rdev); 1360 radeon_bo_fini(rdev); 1361 radeon_atombios_fini(rdev); 1362 kfree(rdev->bios); 1363 rdev->bios = NULL; 1364 } 1365 1366 int r300_init(struct radeon_device *rdev) 1367 { 1368 int r; 1369 1370 /* Disable VGA */ 1371 r100_vga_render_disable(rdev); 1372 /* Initialize scratch registers */ 1373 radeon_scratch_init(rdev); 1374 /* Initialize surface registers */ 1375 radeon_surface_init(rdev); 1376 /* TODO: disable VGA need to use VGA request */ 1377 /* BIOS*/ 1378 if (!radeon_get_bios(rdev)) { 1379 if (ASIC_IS_AVIVO(rdev)) 1380 return -EINVAL; 1381 } 1382 if (rdev->is_atom_bios) { 1383 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1384 return -EINVAL; 1385 } else { 1386 r = radeon_combios_init(rdev); 1387 if (r) 1388 return r; 1389 } 1390 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1391 if (radeon_asic_reset(rdev)) { 1392 dev_warn(rdev->dev, 1393 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1394 RREG32(R_000E40_RBBM_STATUS), 1395 RREG32(R_0007C0_CP_STAT)); 1396 } 1397 /* check if cards are posted or not */ 1398 if (radeon_boot_test_post_card(rdev) == false) 1399 return -EINVAL; 1400 /* Set asic errata */ 1401 r300_errata(rdev); 1402 /* Initialize clocks */ 1403 radeon_get_clock_info(rdev->ddev); 1404 /* Initialize power management */ 1405 radeon_pm_init(rdev); 1406 /* initialize AGP */ 1407 if (rdev->flags & RADEON_IS_AGP) { 1408 r = radeon_agp_init(rdev); 1409 if (r) { 1410 radeon_agp_disable(rdev); 1411 } 1412 } 1413 /* initialize memory controller */ 1414 r300_mc_init(rdev); 1415 /* Fence driver */ 1416 r = radeon_fence_driver_init(rdev); 1417 if (r) 1418 return r; 1419 r = radeon_irq_kms_init(rdev); 1420 if (r) 1421 return r; 1422 /* Memory manager */ 1423 r = radeon_bo_init(rdev); 1424 if (r) 1425 return r; 1426 if (rdev->flags & RADEON_IS_PCIE) { 1427 r = rv370_pcie_gart_init(rdev); 1428 if (r) 1429 return r; 1430 } 1431 if (rdev->flags & RADEON_IS_PCI) { 1432 r = r100_pci_gart_init(rdev); 1433 if (r) 1434 return r; 1435 } 1436 r300_set_reg_safe(rdev); 1437 rdev->accel_working = true; 1438 r = r300_startup(rdev); 1439 if (r) { 1440 /* Somethings want wront with the accel init stop accel */ 1441 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1442 r100_cp_fini(rdev); 1443 r100_wb_fini(rdev); 1444 r100_ib_fini(rdev); 1445 radeon_irq_kms_fini(rdev); 1446 if (rdev->flags & RADEON_IS_PCIE) 1447 rv370_pcie_gart_fini(rdev); 1448 if (rdev->flags & RADEON_IS_PCI) 1449 r100_pci_gart_fini(rdev); 1450 radeon_agp_fini(rdev); 1451 rdev->accel_working = false; 1452 } 1453 return 0; 1454 } 1455