1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/pci.h> 30 #include <linux/seq_file.h> 31 #include <linux/slab.h> 32 33 #include <drm/drm.h> 34 #include <drm/drm_crtc_helper.h> 35 #include <drm/drm_device.h> 36 #include <drm/drm_file.h> 37 #include <drm/radeon_drm.h> 38 39 #include "r100_track.h" 40 #include "r300_reg_safe.h" 41 #include "r300d.h" 42 #include "radeon.h" 43 #include "radeon_asic.h" 44 #include "radeon_reg.h" 45 #include "rv350d.h" 46 47 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 48 * 49 * GPU Errata: 50 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL 51 * using MMIO to flush host path read cache, this lead to HARDLOCKUP. 52 * However, scheduling such write to the ring seems harmless, i suspect 53 * the CP read collide with the flush somehow, or maybe the MC, hard to 54 * tell. (Jerome Glisse) 55 */ 56 57 /* 58 * Indirect registers accessor 59 */ 60 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 61 { 62 unsigned long flags; 63 uint32_t r; 64 65 spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 66 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 67 r = RREG32(RADEON_PCIE_DATA); 68 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 69 return r; 70 } 71 72 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 73 { 74 unsigned long flags; 75 76 spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 77 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 78 WREG32(RADEON_PCIE_DATA, (v)); 79 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 80 } 81 82 /* 83 * rv370,rv380 PCIE GART 84 */ 85 static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 86 87 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 88 { 89 uint32_t tmp; 90 int i; 91 92 /* Workaround HW bug do flush 2 times */ 93 for (i = 0; i < 2; i++) { 94 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 95 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 96 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 97 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 98 } 99 mb(); 100 } 101 102 #define R300_PTE_UNSNOOPED (1 << 0) 103 #define R300_PTE_WRITEABLE (1 << 2) 104 #define R300_PTE_READABLE (1 << 3) 105 106 uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) 107 { 108 addr = (lower_32_bits(addr) >> 8) | 109 ((upper_32_bits(addr) & 0xff) << 24); 110 if (flags & RADEON_GART_PAGE_READ) 111 addr |= R300_PTE_READABLE; 112 if (flags & RADEON_GART_PAGE_WRITE) 113 addr |= R300_PTE_WRITEABLE; 114 if (!(flags & RADEON_GART_PAGE_SNOOP)) 115 addr |= R300_PTE_UNSNOOPED; 116 return addr; 117 } 118 119 void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 120 uint64_t entry) 121 { 122 void __iomem *ptr = rdev->gart.ptr; 123 124 /* on x86 we want this to be CPU endian, on powerpc 125 * on powerpc without HW swappers, it'll get swapped on way 126 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 127 writel(entry, ((void __iomem *)ptr) + (i * 4)); 128 } 129 130 int rv370_pcie_gart_init(struct radeon_device *rdev) 131 { 132 int r; 133 134 if (rdev->gart.robj) { 135 WARN(1, "RV370 PCIE GART already initialized\n"); 136 return 0; 137 } 138 /* Initialize common gart structure */ 139 r = radeon_gart_init(rdev); 140 if (r) 141 return r; 142 rv370_debugfs_pcie_gart_info_init(rdev); 143 144 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 145 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; 146 rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; 147 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; 148 return radeon_gart_table_vram_alloc(rdev); 149 } 150 151 int rv370_pcie_gart_enable(struct radeon_device *rdev) 152 { 153 uint32_t table_addr; 154 uint32_t tmp; 155 int r; 156 157 if (rdev->gart.robj == NULL) { 158 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 159 return -EINVAL; 160 } 161 r = radeon_gart_table_vram_pin(rdev); 162 if (r) 163 return r; 164 /* discard memory request outside of configured range */ 165 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 166 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 167 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); 168 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; 169 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 170 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 171 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 172 table_addr = rdev->gart.table_addr; 173 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 174 /* FIXME: setup default page */ 175 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 176 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 177 /* Clear error */ 178 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); 179 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 180 tmp |= RADEON_PCIE_TX_GART_EN; 181 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 182 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 183 rv370_pcie_gart_tlb_flush(rdev); 184 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 185 (unsigned)(rdev->mc.gtt_size >> 20), 186 (unsigned long long)table_addr); 187 rdev->gart.ready = true; 188 return 0; 189 } 190 191 void rv370_pcie_gart_disable(struct radeon_device *rdev) 192 { 193 u32 tmp; 194 195 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 196 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 197 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 198 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 199 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 200 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 201 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 202 radeon_gart_table_vram_unpin(rdev); 203 } 204 205 void rv370_pcie_gart_fini(struct radeon_device *rdev) 206 { 207 radeon_gart_fini(rdev); 208 rv370_pcie_gart_disable(rdev); 209 radeon_gart_table_vram_free(rdev); 210 } 211 212 void r300_fence_ring_emit(struct radeon_device *rdev, 213 struct radeon_fence *fence) 214 { 215 struct radeon_ring *ring = &rdev->ring[fence->ring]; 216 217 /* Who ever call radeon_fence_emit should call ring_lock and ask 218 * for enough space (today caller are ib schedule and buffer move) */ 219 /* Write SC register so SC & US assert idle */ 220 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); 221 radeon_ring_write(ring, 0); 222 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); 223 radeon_ring_write(ring, 0); 224 /* Flush 3D cache */ 225 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 226 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); 227 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 228 radeon_ring_write(ring, R300_ZC_FLUSH); 229 /* Wait until IDLE & CLEAN */ 230 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 231 radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | 232 RADEON_WAIT_2D_IDLECLEAN | 233 RADEON_WAIT_DMA_GUI_IDLE)); 234 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 235 radeon_ring_write(ring, rdev->config.r300.hdp_cntl | 236 RADEON_HDP_READ_BUFFER_INVALIDATE); 237 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 238 radeon_ring_write(ring, rdev->config.r300.hdp_cntl); 239 /* Emit fence sequence & fire IRQ */ 240 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 241 radeon_ring_write(ring, fence->seq); 242 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); 243 radeon_ring_write(ring, RADEON_SW_INT_FIRE); 244 } 245 246 void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) 247 { 248 unsigned gb_tile_config; 249 int r; 250 251 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 252 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 253 switch(rdev->num_gb_pipes) { 254 case 2: 255 gb_tile_config |= R300_PIPE_COUNT_R300; 256 break; 257 case 3: 258 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 259 break; 260 case 4: 261 gb_tile_config |= R300_PIPE_COUNT_R420; 262 break; 263 case 1: 264 default: 265 gb_tile_config |= R300_PIPE_COUNT_RV350; 266 break; 267 } 268 269 r = radeon_ring_lock(rdev, ring, 64); 270 if (r) { 271 return; 272 } 273 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); 274 radeon_ring_write(ring, 275 RADEON_ISYNC_ANY2D_IDLE3D | 276 RADEON_ISYNC_ANY3D_IDLE2D | 277 RADEON_ISYNC_WAIT_IDLEGUI | 278 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 279 radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); 280 radeon_ring_write(ring, gb_tile_config); 281 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 282 radeon_ring_write(ring, 283 RADEON_WAIT_2D_IDLECLEAN | 284 RADEON_WAIT_3D_IDLECLEAN); 285 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); 286 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); 287 radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); 288 radeon_ring_write(ring, 0); 289 radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); 290 radeon_ring_write(ring, 0); 291 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 292 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 293 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 294 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 295 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 296 radeon_ring_write(ring, 297 RADEON_WAIT_2D_IDLECLEAN | 298 RADEON_WAIT_3D_IDLECLEAN); 299 radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); 300 radeon_ring_write(ring, 0); 301 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 302 radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 303 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 304 radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); 305 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); 306 radeon_ring_write(ring, 307 ((6 << R300_MS_X0_SHIFT) | 308 (6 << R300_MS_Y0_SHIFT) | 309 (6 << R300_MS_X1_SHIFT) | 310 (6 << R300_MS_Y1_SHIFT) | 311 (6 << R300_MS_X2_SHIFT) | 312 (6 << R300_MS_Y2_SHIFT) | 313 (6 << R300_MSBD0_Y_SHIFT) | 314 (6 << R300_MSBD0_X_SHIFT))); 315 radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); 316 radeon_ring_write(ring, 317 ((6 << R300_MS_X3_SHIFT) | 318 (6 << R300_MS_Y3_SHIFT) | 319 (6 << R300_MS_X4_SHIFT) | 320 (6 << R300_MS_Y4_SHIFT) | 321 (6 << R300_MS_X5_SHIFT) | 322 (6 << R300_MS_Y5_SHIFT) | 323 (6 << R300_MSBD1_SHIFT))); 324 radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); 325 radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 326 radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); 327 radeon_ring_write(ring, 328 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 329 radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); 330 radeon_ring_write(ring, 331 R300_GEOMETRY_ROUND_NEAREST | 332 R300_COLOR_ROUND_NEAREST); 333 radeon_ring_unlock_commit(rdev, ring, false); 334 } 335 336 static void r300_errata(struct radeon_device *rdev) 337 { 338 rdev->pll_errata = 0; 339 340 if (rdev->family == CHIP_R300 && 341 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { 342 rdev->pll_errata |= CHIP_ERRATA_R300_CG; 343 } 344 } 345 346 int r300_mc_wait_for_idle(struct radeon_device *rdev) 347 { 348 unsigned i; 349 uint32_t tmp; 350 351 for (i = 0; i < rdev->usec_timeout; i++) { 352 /* read MC_STATUS */ 353 tmp = RREG32(RADEON_MC_STATUS); 354 if (tmp & R300_MC_IDLE) { 355 return 0; 356 } 357 udelay(1); 358 } 359 return -1; 360 } 361 362 static void r300_gpu_init(struct radeon_device *rdev) 363 { 364 uint32_t gb_tile_config, tmp; 365 366 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 367 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 368 /* r300,r350 */ 369 rdev->num_gb_pipes = 2; 370 } else { 371 /* rv350,rv370,rv380,r300 AD, r350 AH */ 372 rdev->num_gb_pipes = 1; 373 } 374 rdev->num_z_pipes = 1; 375 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 376 switch (rdev->num_gb_pipes) { 377 case 2: 378 gb_tile_config |= R300_PIPE_COUNT_R300; 379 break; 380 case 3: 381 gb_tile_config |= R300_PIPE_COUNT_R420_3P; 382 break; 383 case 4: 384 gb_tile_config |= R300_PIPE_COUNT_R420; 385 break; 386 default: 387 case 1: 388 gb_tile_config |= R300_PIPE_COUNT_RV350; 389 break; 390 } 391 WREG32(R300_GB_TILE_CONFIG, gb_tile_config); 392 393 if (r100_gui_wait_for_idle(rdev)) { 394 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n"); 395 } 396 397 tmp = RREG32(R300_DST_PIPE_CONFIG); 398 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); 399 400 WREG32(R300_RB2D_DSTCACHE_MODE, 401 R300_DC_AUTOFLUSH_ENABLE | 402 R300_DC_DC_DISABLE_IGNORE_PE); 403 404 if (r100_gui_wait_for_idle(rdev)) { 405 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n"); 406 } 407 if (r300_mc_wait_for_idle(rdev)) { 408 pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n"); 409 } 410 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized\n", 411 rdev->num_gb_pipes, rdev->num_z_pipes); 412 } 413 414 int r300_asic_reset(struct radeon_device *rdev, bool hard) 415 { 416 struct r100_mc_save save; 417 u32 status, tmp; 418 int ret = 0; 419 420 status = RREG32(R_000E40_RBBM_STATUS); 421 if (!G_000E40_GUI_ACTIVE(status)) { 422 return 0; 423 } 424 r100_mc_stop(rdev, &save); 425 status = RREG32(R_000E40_RBBM_STATUS); 426 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 427 /* stop CP */ 428 WREG32(RADEON_CP_CSQ_CNTL, 0); 429 tmp = RREG32(RADEON_CP_RB_CNTL); 430 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 431 WREG32(RADEON_CP_RB_RPTR_WR, 0); 432 WREG32(RADEON_CP_RB_WPTR, 0); 433 WREG32(RADEON_CP_RB_CNTL, tmp); 434 /* save PCI state */ 435 pci_save_state(rdev->pdev); 436 /* disable bus mastering */ 437 r100_bm_disable(rdev); 438 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 439 S_0000F0_SOFT_RESET_GA(1)); 440 RREG32(R_0000F0_RBBM_SOFT_RESET); 441 mdelay(500); 442 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 443 mdelay(1); 444 status = RREG32(R_000E40_RBBM_STATUS); 445 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 446 /* resetting the CP seems to be problematic sometimes it end up 447 * hard locking the computer, but it's necessary for successful 448 * reset more test & playing is needed on R3XX/R4XX to find a 449 * reliable (if any solution) 450 */ 451 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 452 RREG32(R_0000F0_RBBM_SOFT_RESET); 453 mdelay(500); 454 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 455 mdelay(1); 456 status = RREG32(R_000E40_RBBM_STATUS); 457 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 458 /* restore PCI & busmastering */ 459 pci_restore_state(rdev->pdev); 460 r100_enable_bm(rdev); 461 /* Check if GPU is idle */ 462 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 463 dev_err(rdev->dev, "failed to reset GPU\n"); 464 ret = -1; 465 } else 466 dev_info(rdev->dev, "GPU reset succeed\n"); 467 r100_mc_resume(rdev, &save); 468 return ret; 469 } 470 471 /* 472 * r300,r350,rv350,rv380 VRAM info 473 */ 474 void r300_mc_init(struct radeon_device *rdev) 475 { 476 u64 base; 477 u32 tmp; 478 479 /* DDR for all card after R300 & IGP */ 480 rdev->mc.vram_is_ddr = true; 481 tmp = RREG32(RADEON_MEM_CNTL); 482 tmp &= R300_MEM_NUM_CHANNELS_MASK; 483 switch (tmp) { 484 case 0: rdev->mc.vram_width = 64; break; 485 case 1: rdev->mc.vram_width = 128; break; 486 case 2: rdev->mc.vram_width = 256; break; 487 default: rdev->mc.vram_width = 128; break; 488 } 489 r100_vram_init_sizes(rdev); 490 base = rdev->mc.aper_base; 491 if (rdev->flags & RADEON_IS_IGP) 492 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 493 radeon_vram_location(rdev, &rdev->mc, base); 494 rdev->mc.gtt_base_align = 0; 495 if (!(rdev->flags & RADEON_IS_AGP)) 496 radeon_gtt_location(rdev, &rdev->mc); 497 radeon_update_bandwidth_info(rdev); 498 } 499 500 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 501 { 502 uint32_t link_width_cntl, mask; 503 504 if (rdev->flags & RADEON_IS_IGP) 505 return; 506 507 if (!(rdev->flags & RADEON_IS_PCIE)) 508 return; 509 510 /* FIXME wait for idle */ 511 512 switch (lanes) { 513 case 0: 514 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 515 break; 516 case 1: 517 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 518 break; 519 case 2: 520 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 521 break; 522 case 4: 523 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 524 break; 525 case 8: 526 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 527 break; 528 case 12: 529 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 530 break; 531 case 16: 532 default: 533 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 534 break; 535 } 536 537 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 538 539 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 540 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 541 return; 542 543 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 544 RADEON_PCIE_LC_RECONFIG_NOW | 545 RADEON_PCIE_LC_RECONFIG_LATER | 546 RADEON_PCIE_LC_SHORT_RECONFIG_EN); 547 link_width_cntl |= mask; 548 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 549 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 550 RADEON_PCIE_LC_RECONFIG_NOW)); 551 552 /* wait for lane set to complete */ 553 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 554 while (link_width_cntl == 0xffffffff) 555 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 556 557 } 558 559 int rv370_get_pcie_lanes(struct radeon_device *rdev) 560 { 561 u32 link_width_cntl; 562 563 if (rdev->flags & RADEON_IS_IGP) 564 return 0; 565 566 if (!(rdev->flags & RADEON_IS_PCIE)) 567 return 0; 568 569 /* FIXME wait for idle */ 570 571 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 572 573 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 574 case RADEON_PCIE_LC_LINK_WIDTH_X0: 575 return 0; 576 case RADEON_PCIE_LC_LINK_WIDTH_X1: 577 return 1; 578 case RADEON_PCIE_LC_LINK_WIDTH_X2: 579 return 2; 580 case RADEON_PCIE_LC_LINK_WIDTH_X4: 581 return 4; 582 case RADEON_PCIE_LC_LINK_WIDTH_X8: 583 return 8; 584 case RADEON_PCIE_LC_LINK_WIDTH_X16: 585 default: 586 return 16; 587 } 588 } 589 590 #if defined(CONFIG_DEBUG_FS) 591 static int rv370_debugfs_pcie_gart_info_show(struct seq_file *m, void *unused) 592 { 593 struct radeon_device *rdev = (struct radeon_device *)m->private; 594 uint32_t tmp; 595 596 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 597 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp); 598 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); 599 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp); 600 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); 601 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp); 602 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); 603 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp); 604 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); 605 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp); 606 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); 607 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp); 608 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); 609 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp); 610 return 0; 611 } 612 613 DEFINE_SHOW_ATTRIBUTE(rv370_debugfs_pcie_gart_info); 614 #endif 615 616 static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 617 { 618 #if defined(CONFIG_DEBUG_FS) 619 struct dentry *root = rdev->ddev->primary->debugfs_root; 620 621 debugfs_create_file("rv370_pcie_gart_info", 0444, root, rdev, 622 &rv370_debugfs_pcie_gart_info_fops); 623 #endif 624 } 625 626 static int r300_packet0_check(struct radeon_cs_parser *p, 627 struct radeon_cs_packet *pkt, 628 unsigned idx, unsigned reg) 629 { 630 struct radeon_bo_list *reloc; 631 struct r100_cs_track *track; 632 volatile uint32_t *ib; 633 uint32_t tmp, tile_flags = 0; 634 unsigned i; 635 int r; 636 u32 idx_value; 637 638 ib = p->ib.ptr; 639 track = (struct r100_cs_track *)p->track; 640 idx_value = radeon_get_ib_value(p, idx); 641 642 switch(reg) { 643 case AVIVO_D1MODE_VLINE_START_END: 644 case RADEON_CRTC_GUI_TRIG_VLINE: 645 r = r100_cs_packet_parse_vline(p); 646 if (r) { 647 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 648 idx, reg); 649 radeon_cs_dump_packet(p, pkt); 650 return r; 651 } 652 break; 653 case RADEON_DST_PITCH_OFFSET: 654 case RADEON_SRC_PITCH_OFFSET: 655 r = r100_reloc_pitch_offset(p, pkt, idx, reg); 656 if (r) 657 return r; 658 break; 659 case R300_RB3D_COLOROFFSET0: 660 case R300_RB3D_COLOROFFSET1: 661 case R300_RB3D_COLOROFFSET2: 662 case R300_RB3D_COLOROFFSET3: 663 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 664 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 665 if (r) { 666 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 667 idx, reg); 668 radeon_cs_dump_packet(p, pkt); 669 return r; 670 } 671 track->cb[i].robj = reloc->robj; 672 track->cb[i].offset = idx_value; 673 track->cb_dirty = true; 674 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 675 break; 676 case R300_ZB_DEPTHOFFSET: 677 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 678 if (r) { 679 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 680 idx, reg); 681 radeon_cs_dump_packet(p, pkt); 682 return r; 683 } 684 track->zb.robj = reloc->robj; 685 track->zb.offset = idx_value; 686 track->zb_dirty = true; 687 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 688 break; 689 case R300_TX_OFFSET_0: 690 case R300_TX_OFFSET_0+4: 691 case R300_TX_OFFSET_0+8: 692 case R300_TX_OFFSET_0+12: 693 case R300_TX_OFFSET_0+16: 694 case R300_TX_OFFSET_0+20: 695 case R300_TX_OFFSET_0+24: 696 case R300_TX_OFFSET_0+28: 697 case R300_TX_OFFSET_0+32: 698 case R300_TX_OFFSET_0+36: 699 case R300_TX_OFFSET_0+40: 700 case R300_TX_OFFSET_0+44: 701 case R300_TX_OFFSET_0+48: 702 case R300_TX_OFFSET_0+52: 703 case R300_TX_OFFSET_0+56: 704 case R300_TX_OFFSET_0+60: 705 i = (reg - R300_TX_OFFSET_0) >> 2; 706 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 707 if (r) { 708 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 709 idx, reg); 710 radeon_cs_dump_packet(p, pkt); 711 return r; 712 } 713 714 if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { 715 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 716 ((idx_value & ~31) + (u32)reloc->gpu_offset); 717 } else { 718 if (reloc->tiling_flags & RADEON_TILING_MACRO) 719 tile_flags |= R300_TXO_MACRO_TILE; 720 if (reloc->tiling_flags & RADEON_TILING_MICRO) 721 tile_flags |= R300_TXO_MICRO_TILE; 722 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 723 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 724 725 tmp = idx_value + ((u32)reloc->gpu_offset); 726 tmp |= tile_flags; 727 ib[idx] = tmp; 728 } 729 track->textures[i].robj = reloc->robj; 730 track->tex_dirty = true; 731 break; 732 /* Tracked registers */ 733 case 0x2084: 734 /* VAP_VF_CNTL */ 735 track->vap_vf_cntl = idx_value; 736 break; 737 case 0x20B4: 738 /* VAP_VTX_SIZE */ 739 track->vtx_size = idx_value & 0x7F; 740 break; 741 case 0x2134: 742 /* VAP_VF_MAX_VTX_INDX */ 743 track->max_indx = idx_value & 0x00FFFFFFUL; 744 break; 745 case 0x2088: 746 /* VAP_ALT_NUM_VERTICES - only valid on r500 */ 747 if (p->rdev->family < CHIP_RV515) 748 goto fail; 749 track->vap_alt_nverts = idx_value & 0xFFFFFF; 750 break; 751 case 0x43E4: 752 /* SC_SCISSOR1 */ 753 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 754 if (p->rdev->family < CHIP_RV515) { 755 track->maxy -= 1440; 756 } 757 track->cb_dirty = true; 758 track->zb_dirty = true; 759 break; 760 case 0x4E00: 761 /* RB3D_CCTL */ 762 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 763 p->rdev->cmask_filp != p->filp) { 764 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 765 return -EINVAL; 766 } 767 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 768 track->cb_dirty = true; 769 break; 770 case 0x4E38: 771 case 0x4E3C: 772 case 0x4E40: 773 case 0x4E44: 774 /* RB3D_COLORPITCH0 */ 775 /* RB3D_COLORPITCH1 */ 776 /* RB3D_COLORPITCH2 */ 777 /* RB3D_COLORPITCH3 */ 778 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 779 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 780 if (r) { 781 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 782 idx, reg); 783 radeon_cs_dump_packet(p, pkt); 784 return r; 785 } 786 787 if (reloc->tiling_flags & RADEON_TILING_MACRO) 788 tile_flags |= R300_COLOR_TILE_ENABLE; 789 if (reloc->tiling_flags & RADEON_TILING_MICRO) 790 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 791 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 792 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 793 794 tmp = idx_value & ~(0x7 << 16); 795 tmp |= tile_flags; 796 ib[idx] = tmp; 797 } 798 i = (reg - 0x4E38) >> 2; 799 track->cb[i].pitch = idx_value & 0x3FFE; 800 switch (((idx_value >> 21) & 0xF)) { 801 case 9: 802 case 11: 803 case 12: 804 track->cb[i].cpp = 1; 805 break; 806 case 3: 807 case 4: 808 case 13: 809 case 15: 810 track->cb[i].cpp = 2; 811 break; 812 case 5: 813 if (p->rdev->family < CHIP_RV515) { 814 DRM_ERROR("Invalid color buffer format (%d)!\n", 815 ((idx_value >> 21) & 0xF)); 816 return -EINVAL; 817 } 818 fallthrough; 819 case 6: 820 track->cb[i].cpp = 4; 821 break; 822 case 10: 823 track->cb[i].cpp = 8; 824 break; 825 case 7: 826 track->cb[i].cpp = 16; 827 break; 828 default: 829 DRM_ERROR("Invalid color buffer format (%d) !\n", 830 ((idx_value >> 21) & 0xF)); 831 return -EINVAL; 832 } 833 track->cb_dirty = true; 834 break; 835 case 0x4F00: 836 /* ZB_CNTL */ 837 if (idx_value & 2) { 838 track->z_enabled = true; 839 } else { 840 track->z_enabled = false; 841 } 842 track->zb_dirty = true; 843 break; 844 case 0x4F10: 845 /* ZB_FORMAT */ 846 switch ((idx_value & 0xF)) { 847 case 0: 848 case 1: 849 track->zb.cpp = 2; 850 break; 851 case 2: 852 track->zb.cpp = 4; 853 break; 854 default: 855 DRM_ERROR("Invalid z buffer format (%d) !\n", 856 (idx_value & 0xF)); 857 return -EINVAL; 858 } 859 track->zb_dirty = true; 860 break; 861 case 0x4F24: 862 /* ZB_DEPTHPITCH */ 863 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 864 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 865 if (r) { 866 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 867 idx, reg); 868 radeon_cs_dump_packet(p, pkt); 869 return r; 870 } 871 872 if (reloc->tiling_flags & RADEON_TILING_MACRO) 873 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 874 if (reloc->tiling_flags & RADEON_TILING_MICRO) 875 tile_flags |= R300_DEPTHMICROTILE_TILED; 876 else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) 877 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 878 879 tmp = idx_value & ~(0x7 << 16); 880 tmp |= tile_flags; 881 ib[idx] = tmp; 882 } 883 track->zb.pitch = idx_value & 0x3FFC; 884 track->zb_dirty = true; 885 break; 886 case 0x4104: 887 /* TX_ENABLE */ 888 for (i = 0; i < 16; i++) { 889 bool enabled; 890 891 enabled = !!(idx_value & (1 << i)); 892 track->textures[i].enabled = enabled; 893 } 894 track->tex_dirty = true; 895 break; 896 case 0x44C0: 897 case 0x44C4: 898 case 0x44C8: 899 case 0x44CC: 900 case 0x44D0: 901 case 0x44D4: 902 case 0x44D8: 903 case 0x44DC: 904 case 0x44E0: 905 case 0x44E4: 906 case 0x44E8: 907 case 0x44EC: 908 case 0x44F0: 909 case 0x44F4: 910 case 0x44F8: 911 case 0x44FC: 912 /* TX_FORMAT1_[0-15] */ 913 i = (reg - 0x44C0) >> 2; 914 tmp = (idx_value >> 25) & 0x3; 915 track->textures[i].tex_coord_type = tmp; 916 switch ((idx_value & 0x1F)) { 917 case R300_TX_FORMAT_X8: 918 case R300_TX_FORMAT_Y4X4: 919 case R300_TX_FORMAT_Z3Y3X2: 920 track->textures[i].cpp = 1; 921 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 922 break; 923 case R300_TX_FORMAT_X16: 924 case R300_TX_FORMAT_FL_I16: 925 case R300_TX_FORMAT_Y8X8: 926 case R300_TX_FORMAT_Z5Y6X5: 927 case R300_TX_FORMAT_Z6Y5X5: 928 case R300_TX_FORMAT_W4Z4Y4X4: 929 case R300_TX_FORMAT_W1Z5Y5X5: 930 case R300_TX_FORMAT_D3DMFT_CxV8U8: 931 case R300_TX_FORMAT_B8G8_B8G8: 932 case R300_TX_FORMAT_G8R8_G8B8: 933 track->textures[i].cpp = 2; 934 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 935 break; 936 case R300_TX_FORMAT_Y16X16: 937 case R300_TX_FORMAT_FL_I16A16: 938 case R300_TX_FORMAT_Z11Y11X10: 939 case R300_TX_FORMAT_Z10Y11X11: 940 case R300_TX_FORMAT_W8Z8Y8X8: 941 case R300_TX_FORMAT_W2Z10Y10X10: 942 case 0x17: 943 case R300_TX_FORMAT_FL_I32: 944 case 0x1e: 945 track->textures[i].cpp = 4; 946 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 947 break; 948 case R300_TX_FORMAT_W16Z16Y16X16: 949 case R300_TX_FORMAT_FL_R16G16B16A16: 950 case R300_TX_FORMAT_FL_I32A32: 951 track->textures[i].cpp = 8; 952 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 953 break; 954 case R300_TX_FORMAT_FL_R32G32B32A32: 955 track->textures[i].cpp = 16; 956 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 957 break; 958 case R300_TX_FORMAT_DXT1: 959 track->textures[i].cpp = 1; 960 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 961 break; 962 case R300_TX_FORMAT_ATI2N: 963 if (p->rdev->family < CHIP_R420) { 964 DRM_ERROR("Invalid texture format %u\n", 965 (idx_value & 0x1F)); 966 return -EINVAL; 967 } 968 /* The same rules apply as for DXT3/5. */ 969 fallthrough; 970 case R300_TX_FORMAT_DXT3: 971 case R300_TX_FORMAT_DXT5: 972 track->textures[i].cpp = 1; 973 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 974 break; 975 default: 976 DRM_ERROR("Invalid texture format %u\n", 977 (idx_value & 0x1F)); 978 return -EINVAL; 979 } 980 track->tex_dirty = true; 981 break; 982 case 0x4400: 983 case 0x4404: 984 case 0x4408: 985 case 0x440C: 986 case 0x4410: 987 case 0x4414: 988 case 0x4418: 989 case 0x441C: 990 case 0x4420: 991 case 0x4424: 992 case 0x4428: 993 case 0x442C: 994 case 0x4430: 995 case 0x4434: 996 case 0x4438: 997 case 0x443C: 998 /* TX_FILTER0_[0-15] */ 999 i = (reg - 0x4400) >> 2; 1000 tmp = idx_value & 0x7; 1001 if (tmp == 2 || tmp == 4 || tmp == 6) { 1002 track->textures[i].roundup_w = false; 1003 } 1004 tmp = (idx_value >> 3) & 0x7; 1005 if (tmp == 2 || tmp == 4 || tmp == 6) { 1006 track->textures[i].roundup_h = false; 1007 } 1008 track->tex_dirty = true; 1009 break; 1010 case 0x4500: 1011 case 0x4504: 1012 case 0x4508: 1013 case 0x450C: 1014 case 0x4510: 1015 case 0x4514: 1016 case 0x4518: 1017 case 0x451C: 1018 case 0x4520: 1019 case 0x4524: 1020 case 0x4528: 1021 case 0x452C: 1022 case 0x4530: 1023 case 0x4534: 1024 case 0x4538: 1025 case 0x453C: 1026 /* TX_FORMAT2_[0-15] */ 1027 i = (reg - 0x4500) >> 2; 1028 tmp = idx_value & 0x3FFF; 1029 track->textures[i].pitch = tmp + 1; 1030 if (p->rdev->family >= CHIP_RV515) { 1031 tmp = ((idx_value >> 15) & 1) << 11; 1032 track->textures[i].width_11 = tmp; 1033 tmp = ((idx_value >> 16) & 1) << 11; 1034 track->textures[i].height_11 = tmp; 1035 1036 /* ATI1N */ 1037 if (idx_value & (1 << 14)) { 1038 /* The same rules apply as for DXT1. */ 1039 track->textures[i].compress_format = 1040 R100_TRACK_COMP_DXT1; 1041 } 1042 } else if (idx_value & (1 << 14)) { 1043 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1044 return -EINVAL; 1045 } 1046 track->tex_dirty = true; 1047 break; 1048 case 0x4480: 1049 case 0x4484: 1050 case 0x4488: 1051 case 0x448C: 1052 case 0x4490: 1053 case 0x4494: 1054 case 0x4498: 1055 case 0x449C: 1056 case 0x44A0: 1057 case 0x44A4: 1058 case 0x44A8: 1059 case 0x44AC: 1060 case 0x44B0: 1061 case 0x44B4: 1062 case 0x44B8: 1063 case 0x44BC: 1064 /* TX_FORMAT0_[0-15] */ 1065 i = (reg - 0x4480) >> 2; 1066 tmp = idx_value & 0x7FF; 1067 track->textures[i].width = tmp + 1; 1068 tmp = (idx_value >> 11) & 0x7FF; 1069 track->textures[i].height = tmp + 1; 1070 tmp = (idx_value >> 26) & 0xF; 1071 track->textures[i].num_levels = tmp; 1072 tmp = idx_value & (1 << 31); 1073 track->textures[i].use_pitch = !!tmp; 1074 tmp = (idx_value >> 22) & 0xF; 1075 track->textures[i].txdepth = tmp; 1076 track->tex_dirty = true; 1077 break; 1078 case R300_ZB_ZPASS_ADDR: 1079 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1080 if (r) { 1081 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1082 idx, reg); 1083 radeon_cs_dump_packet(p, pkt); 1084 return r; 1085 } 1086 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1087 break; 1088 case 0x4e0c: 1089 /* RB3D_COLOR_CHANNEL_MASK */ 1090 track->color_channel_mask = idx_value; 1091 track->cb_dirty = true; 1092 break; 1093 case 0x43a4: 1094 /* SC_HYPERZ_EN */ 1095 /* r300c emits this register - we need to disable hyperz for it 1096 * without complaining */ 1097 if (p->rdev->hyperz_filp != p->filp) { 1098 if (idx_value & 0x1) 1099 ib[idx] = idx_value & ~1; 1100 } 1101 break; 1102 case 0x4f1c: 1103 /* ZB_BW_CNTL */ 1104 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1105 track->cb_dirty = true; 1106 track->zb_dirty = true; 1107 if (p->rdev->hyperz_filp != p->filp) { 1108 if (idx_value & (R300_HIZ_ENABLE | 1109 R300_RD_COMP_ENABLE | 1110 R300_WR_COMP_ENABLE | 1111 R300_FAST_FILL_ENABLE)) 1112 goto fail; 1113 } 1114 break; 1115 case 0x4e04: 1116 /* RB3D_BLENDCNTL */ 1117 track->blend_read_enable = !!(idx_value & (1 << 2)); 1118 track->cb_dirty = true; 1119 break; 1120 case R300_RB3D_AARESOLVE_OFFSET: 1121 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1122 if (r) { 1123 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1124 idx, reg); 1125 radeon_cs_dump_packet(p, pkt); 1126 return r; 1127 } 1128 track->aa.robj = reloc->robj; 1129 track->aa.offset = idx_value; 1130 track->aa_dirty = true; 1131 ib[idx] = idx_value + ((u32)reloc->gpu_offset); 1132 break; 1133 case R300_RB3D_AARESOLVE_PITCH: 1134 track->aa.pitch = idx_value & 0x3FFE; 1135 track->aa_dirty = true; 1136 break; 1137 case R300_RB3D_AARESOLVE_CTL: 1138 track->aaresolve = idx_value & 0x1; 1139 track->aa_dirty = true; 1140 break; 1141 case 0x4f30: /* ZB_MASK_OFFSET */ 1142 case 0x4f34: /* ZB_ZMASK_PITCH */ 1143 case 0x4f44: /* ZB_HIZ_OFFSET */ 1144 case 0x4f54: /* ZB_HIZ_PITCH */ 1145 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1146 goto fail; 1147 break; 1148 case 0x4028: 1149 if (idx_value && (p->rdev->hyperz_filp != p->filp)) 1150 goto fail; 1151 /* GB_Z_PEQ_CONFIG */ 1152 if (p->rdev->family >= CHIP_RV350) 1153 break; 1154 goto fail; 1155 break; 1156 case 0x4be8: 1157 /* valid register only on RV530 */ 1158 if (p->rdev->family == CHIP_RV530) 1159 break; 1160 fallthrough; 1161 /* fallthrough do not move */ 1162 default: 1163 goto fail; 1164 } 1165 return 0; 1166 fail: 1167 pr_err("Forbidden register 0x%04X in cs at %d (val=%08x)\n", 1168 reg, idx, idx_value); 1169 return -EINVAL; 1170 } 1171 1172 static int r300_packet3_check(struct radeon_cs_parser *p, 1173 struct radeon_cs_packet *pkt) 1174 { 1175 struct radeon_bo_list *reloc; 1176 struct r100_cs_track *track; 1177 volatile uint32_t *ib; 1178 unsigned idx; 1179 int r; 1180 1181 ib = p->ib.ptr; 1182 idx = pkt->idx + 1; 1183 track = (struct r100_cs_track *)p->track; 1184 switch(pkt->opcode) { 1185 case PACKET3_3D_LOAD_VBPNTR: 1186 r = r100_packet3_load_vbpntr(p, pkt, idx); 1187 if (r) 1188 return r; 1189 break; 1190 case PACKET3_INDX_BUFFER: 1191 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1192 if (r) { 1193 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1194 radeon_cs_dump_packet(p, pkt); 1195 return r; 1196 } 1197 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); 1198 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1199 if (r) { 1200 return r; 1201 } 1202 break; 1203 /* Draw packet */ 1204 case PACKET3_3D_DRAW_IMMD: 1205 /* Number of dwords is vtx_size * (num_vertices - 1) 1206 * PRIM_WALK must be equal to 3 vertex data in embedded 1207 * in cmd stream */ 1208 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1209 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1210 return -EINVAL; 1211 } 1212 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1213 track->immd_dwords = pkt->count - 1; 1214 r = r100_cs_track_check(p->rdev, track); 1215 if (r) { 1216 return r; 1217 } 1218 break; 1219 case PACKET3_3D_DRAW_IMMD_2: 1220 /* Number of dwords is vtx_size * (num_vertices - 1) 1221 * PRIM_WALK must be equal to 3 vertex data in embedded 1222 * in cmd stream */ 1223 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1224 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1225 return -EINVAL; 1226 } 1227 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1228 track->immd_dwords = pkt->count; 1229 r = r100_cs_track_check(p->rdev, track); 1230 if (r) { 1231 return r; 1232 } 1233 break; 1234 case PACKET3_3D_DRAW_VBUF: 1235 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1236 r = r100_cs_track_check(p->rdev, track); 1237 if (r) { 1238 return r; 1239 } 1240 break; 1241 case PACKET3_3D_DRAW_VBUF_2: 1242 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1243 r = r100_cs_track_check(p->rdev, track); 1244 if (r) { 1245 return r; 1246 } 1247 break; 1248 case PACKET3_3D_DRAW_INDX: 1249 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1250 r = r100_cs_track_check(p->rdev, track); 1251 if (r) { 1252 return r; 1253 } 1254 break; 1255 case PACKET3_3D_DRAW_INDX_2: 1256 track->vap_vf_cntl = radeon_get_ib_value(p, idx); 1257 r = r100_cs_track_check(p->rdev, track); 1258 if (r) { 1259 return r; 1260 } 1261 break; 1262 case PACKET3_3D_CLEAR_HIZ: 1263 case PACKET3_3D_CLEAR_ZMASK: 1264 if (p->rdev->hyperz_filp != p->filp) 1265 return -EINVAL; 1266 break; 1267 case PACKET3_3D_CLEAR_CMASK: 1268 if (p->rdev->cmask_filp != p->filp) 1269 return -EINVAL; 1270 break; 1271 case PACKET3_NOP: 1272 break; 1273 default: 1274 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1275 return -EINVAL; 1276 } 1277 return 0; 1278 } 1279 1280 int r300_cs_parse(struct radeon_cs_parser *p) 1281 { 1282 struct radeon_cs_packet pkt; 1283 struct r100_cs_track *track; 1284 int r; 1285 1286 track = kzalloc(sizeof(*track), GFP_KERNEL); 1287 if (track == NULL) 1288 return -ENOMEM; 1289 r100_cs_track_clear(p->rdev, track); 1290 p->track = track; 1291 do { 1292 r = radeon_cs_packet_parse(p, &pkt, p->idx); 1293 if (r) { 1294 return r; 1295 } 1296 p->idx += pkt.count + 2; 1297 switch (pkt.type) { 1298 case RADEON_PACKET_TYPE0: 1299 r = r100_cs_parse_packet0(p, &pkt, 1300 p->rdev->config.r300.reg_safe_bm, 1301 p->rdev->config.r300.reg_safe_bm_size, 1302 &r300_packet0_check); 1303 break; 1304 case RADEON_PACKET_TYPE2: 1305 break; 1306 case RADEON_PACKET_TYPE3: 1307 r = r300_packet3_check(p, &pkt); 1308 break; 1309 default: 1310 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1311 return -EINVAL; 1312 } 1313 if (r) { 1314 return r; 1315 } 1316 } while (p->idx < p->chunk_ib->length_dw); 1317 return 0; 1318 } 1319 1320 void r300_set_reg_safe(struct radeon_device *rdev) 1321 { 1322 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; 1323 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1324 } 1325 1326 void r300_mc_program(struct radeon_device *rdev) 1327 { 1328 struct r100_mc_save save; 1329 1330 r100_debugfs_mc_info_init(rdev); 1331 1332 /* Stops all mc clients */ 1333 r100_mc_stop(rdev, &save); 1334 if (rdev->flags & RADEON_IS_AGP) { 1335 WREG32(R_00014C_MC_AGP_LOCATION, 1336 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | 1337 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 1338 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 1339 WREG32(R_00015C_AGP_BASE_2, 1340 upper_32_bits(rdev->mc.agp_base) & 0xff); 1341 } else { 1342 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); 1343 WREG32(R_000170_AGP_BASE, 0); 1344 WREG32(R_00015C_AGP_BASE_2, 0); 1345 } 1346 /* Wait for mc idle */ 1347 if (r300_mc_wait_for_idle(rdev)) 1348 DRM_INFO("Failed to wait MC idle before programming MC.\n"); 1349 /* Program MC, should be a 32bits limited address space */ 1350 WREG32(R_000148_MC_FB_LOCATION, 1351 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 1352 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1353 r100_mc_resume(rdev, &save); 1354 } 1355 1356 void r300_clock_startup(struct radeon_device *rdev) 1357 { 1358 u32 tmp; 1359 1360 if (radeon_dynclks != -1 && radeon_dynclks) 1361 radeon_legacy_set_clock_gating(rdev, 1); 1362 /* We need to force on some of the block */ 1363 tmp = RREG32_PLL(R_00000D_SCLK_CNTL); 1364 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 1365 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) 1366 tmp |= S_00000D_FORCE_VAP(1); 1367 WREG32_PLL(R_00000D_SCLK_CNTL, tmp); 1368 } 1369 1370 static int r300_startup(struct radeon_device *rdev) 1371 { 1372 int r; 1373 1374 /* set common regs */ 1375 r100_set_common_regs(rdev); 1376 /* program mc */ 1377 r300_mc_program(rdev); 1378 /* Resume clock */ 1379 r300_clock_startup(rdev); 1380 /* Initialize GPU configuration (# pipes, ...) */ 1381 r300_gpu_init(rdev); 1382 /* Initialize GART (initialize after TTM so we can allocate 1383 * memory through TTM but finalize after TTM) */ 1384 if (rdev->flags & RADEON_IS_PCIE) { 1385 r = rv370_pcie_gart_enable(rdev); 1386 if (r) 1387 return r; 1388 } 1389 1390 if (rdev->family == CHIP_R300 || 1391 rdev->family == CHIP_R350 || 1392 rdev->family == CHIP_RV350) 1393 r100_enable_bm(rdev); 1394 1395 if (rdev->flags & RADEON_IS_PCI) { 1396 r = r100_pci_gart_enable(rdev); 1397 if (r) 1398 return r; 1399 } 1400 1401 /* allocate wb buffer */ 1402 r = radeon_wb_init(rdev); 1403 if (r) 1404 return r; 1405 1406 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1407 if (r) { 1408 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1409 return r; 1410 } 1411 1412 /* Enable IRQ */ 1413 if (!rdev->irq.installed) { 1414 r = radeon_irq_kms_init(rdev); 1415 if (r) 1416 return r; 1417 } 1418 1419 r100_irq_set(rdev); 1420 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 1421 /* 1M ring buffer */ 1422 r = r100_cp_init(rdev, 1024 * 1024); 1423 if (r) { 1424 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 1425 return r; 1426 } 1427 1428 r = radeon_ib_pool_init(rdev); 1429 if (r) { 1430 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1431 return r; 1432 } 1433 1434 return 0; 1435 } 1436 1437 int r300_resume(struct radeon_device *rdev) 1438 { 1439 int r; 1440 1441 /* Make sur GART are not working */ 1442 if (rdev->flags & RADEON_IS_PCIE) 1443 rv370_pcie_gart_disable(rdev); 1444 if (rdev->flags & RADEON_IS_PCI) 1445 r100_pci_gart_disable(rdev); 1446 /* Resume clock before doing reset */ 1447 r300_clock_startup(rdev); 1448 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1449 if (radeon_asic_reset(rdev)) { 1450 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1451 RREG32(R_000E40_RBBM_STATUS), 1452 RREG32(R_0007C0_CP_STAT)); 1453 } 1454 /* post */ 1455 radeon_combios_asic_init(rdev->ddev); 1456 /* Resume clock after posting */ 1457 r300_clock_startup(rdev); 1458 /* Initialize surface registers */ 1459 radeon_surface_init(rdev); 1460 1461 rdev->accel_working = true; 1462 r = r300_startup(rdev); 1463 if (r) { 1464 rdev->accel_working = false; 1465 } 1466 return r; 1467 } 1468 1469 int r300_suspend(struct radeon_device *rdev) 1470 { 1471 radeon_pm_suspend(rdev); 1472 r100_cp_disable(rdev); 1473 radeon_wb_disable(rdev); 1474 r100_irq_disable(rdev); 1475 if (rdev->flags & RADEON_IS_PCIE) 1476 rv370_pcie_gart_disable(rdev); 1477 if (rdev->flags & RADEON_IS_PCI) 1478 r100_pci_gart_disable(rdev); 1479 return 0; 1480 } 1481 1482 void r300_fini(struct radeon_device *rdev) 1483 { 1484 radeon_pm_fini(rdev); 1485 r100_cp_fini(rdev); 1486 radeon_wb_fini(rdev); 1487 radeon_ib_pool_fini(rdev); 1488 radeon_gem_fini(rdev); 1489 if (rdev->flags & RADEON_IS_PCIE) 1490 rv370_pcie_gart_fini(rdev); 1491 if (rdev->flags & RADEON_IS_PCI) 1492 r100_pci_gart_fini(rdev); 1493 radeon_agp_fini(rdev); 1494 radeon_irq_kms_fini(rdev); 1495 radeon_fence_driver_fini(rdev); 1496 radeon_bo_fini(rdev); 1497 radeon_atombios_fini(rdev); 1498 kfree(rdev->bios); 1499 rdev->bios = NULL; 1500 } 1501 1502 int r300_init(struct radeon_device *rdev) 1503 { 1504 int r; 1505 1506 /* Disable VGA */ 1507 r100_vga_render_disable(rdev); 1508 /* Initialize scratch registers */ 1509 radeon_scratch_init(rdev); 1510 /* Initialize surface registers */ 1511 radeon_surface_init(rdev); 1512 /* TODO: disable VGA need to use VGA request */ 1513 /* restore some register to sane defaults */ 1514 r100_restore_sanity(rdev); 1515 /* BIOS*/ 1516 if (!radeon_get_bios(rdev)) { 1517 if (ASIC_IS_AVIVO(rdev)) 1518 return -EINVAL; 1519 } 1520 if (rdev->is_atom_bios) { 1521 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 1522 return -EINVAL; 1523 } else { 1524 r = radeon_combios_init(rdev); 1525 if (r) 1526 return r; 1527 } 1528 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1529 if (radeon_asic_reset(rdev)) { 1530 dev_warn(rdev->dev, 1531 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1532 RREG32(R_000E40_RBBM_STATUS), 1533 RREG32(R_0007C0_CP_STAT)); 1534 } 1535 /* check if cards are posted or not */ 1536 if (radeon_boot_test_post_card(rdev) == false) 1537 return -EINVAL; 1538 /* Set asic errata */ 1539 r300_errata(rdev); 1540 /* Initialize clocks */ 1541 radeon_get_clock_info(rdev->ddev); 1542 /* initialize AGP */ 1543 if (rdev->flags & RADEON_IS_AGP) { 1544 r = radeon_agp_init(rdev); 1545 if (r) { 1546 radeon_agp_disable(rdev); 1547 } 1548 } 1549 /* initialize memory controller */ 1550 r300_mc_init(rdev); 1551 /* Fence driver */ 1552 radeon_fence_driver_init(rdev); 1553 /* Memory manager */ 1554 r = radeon_bo_init(rdev); 1555 if (r) 1556 return r; 1557 if (rdev->flags & RADEON_IS_PCIE) { 1558 r = rv370_pcie_gart_init(rdev); 1559 if (r) 1560 return r; 1561 } 1562 if (rdev->flags & RADEON_IS_PCI) { 1563 r = r100_pci_gart_init(rdev); 1564 if (r) 1565 return r; 1566 } 1567 r300_set_reg_safe(rdev); 1568 1569 /* Initialize power management */ 1570 radeon_pm_init(rdev); 1571 1572 rdev->accel_working = true; 1573 r = r300_startup(rdev); 1574 if (r) { 1575 /* Something went wrong with the accel init, so stop accel */ 1576 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1577 r100_cp_fini(rdev); 1578 radeon_wb_fini(rdev); 1579 radeon_ib_pool_fini(rdev); 1580 radeon_irq_kms_fini(rdev); 1581 if (rdev->flags & RADEON_IS_PCIE) 1582 rv370_pcie_gart_fini(rdev); 1583 if (rdev->flags & RADEON_IS_PCI) 1584 r100_pci_gart_fini(rdev); 1585 radeon_agp_fini(rdev); 1586 rdev->accel_working = false; 1587 } 1588 return 0; 1589 } 1590