1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/slab.h> 30 #include <drm/drmP.h> 31 #include "radeon.h" 32 #include "radeon_asic.h" 33 #include "rs400d.h" 34 35 /* This files gather functions specifics to : rs400,rs480 */ 36 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 37 38 void rs400_gart_adjust_size(struct radeon_device *rdev) 39 { 40 /* Check gart size */ 41 switch (rdev->mc.gtt_size/(1024*1024)) { 42 case 32: 43 case 64: 44 case 128: 45 case 256: 46 case 512: 47 case 1024: 48 case 2048: 49 break; 50 default: 51 DRM_ERROR("Unable to use IGP GART size %uM\n", 52 (unsigned)(rdev->mc.gtt_size >> 20)); 53 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); 54 DRM_ERROR("Forcing to 32M GART size\n"); 55 rdev->mc.gtt_size = 32 * 1024 * 1024; 56 return; 57 } 58 } 59 60 void rs400_gart_tlb_flush(struct radeon_device *rdev) 61 { 62 uint32_t tmp; 63 unsigned int timeout = rdev->usec_timeout; 64 65 WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); 66 do { 67 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 68 if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0) 69 break; 70 DRM_UDELAY(1); 71 timeout--; 72 } while (timeout > 0); 73 WREG32_MC(RS480_GART_CACHE_CNTRL, 0); 74 } 75 76 int rs400_gart_init(struct radeon_device *rdev) 77 { 78 int r; 79 80 if (rdev->gart.table.ram.ptr) { 81 WARN(1, "RS400 GART already initialized\n"); 82 return 0; 83 } 84 /* Check gart size */ 85 switch(rdev->mc.gtt_size / (1024 * 1024)) { 86 case 32: 87 case 64: 88 case 128: 89 case 256: 90 case 512: 91 case 1024: 92 case 2048: 93 break; 94 default: 95 return -EINVAL; 96 } 97 /* Initialize common gart structure */ 98 r = radeon_gart_init(rdev); 99 if (r) 100 return r; 101 if (rs400_debugfs_pcie_gart_info_init(rdev)) 102 DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); 103 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 104 return radeon_gart_table_ram_alloc(rdev); 105 } 106 107 int rs400_gart_enable(struct radeon_device *rdev) 108 { 109 uint32_t size_reg; 110 uint32_t tmp; 111 112 radeon_gart_restore(rdev); 113 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 114 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 115 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 116 /* Check gart size */ 117 switch(rdev->mc.gtt_size / (1024 * 1024)) { 118 case 32: 119 size_reg = RS480_VA_SIZE_32MB; 120 break; 121 case 64: 122 size_reg = RS480_VA_SIZE_64MB; 123 break; 124 case 128: 125 size_reg = RS480_VA_SIZE_128MB; 126 break; 127 case 256: 128 size_reg = RS480_VA_SIZE_256MB; 129 break; 130 case 512: 131 size_reg = RS480_VA_SIZE_512MB; 132 break; 133 case 1024: 134 size_reg = RS480_VA_SIZE_1GB; 135 break; 136 case 2048: 137 size_reg = RS480_VA_SIZE_2GB; 138 break; 139 default: 140 return -EINVAL; 141 } 142 /* It should be fine to program it to max value */ 143 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 144 WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); 145 WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0); 146 } else { 147 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 148 WREG32(RS480_AGP_BASE_2, 0); 149 } 150 tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16); 151 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16); 152 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 153 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 154 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 155 WREG32(RADEON_BUS_CNTL, tmp); 156 } else { 157 WREG32(RADEON_MC_AGP_LOCATION, tmp); 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 159 WREG32(RADEON_BUS_CNTL, tmp); 160 } 161 /* Table should be in 32bits address space so ignore bits above. */ 162 tmp = (u32)rdev->gart.table_addr & 0xfffff000; 163 tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; 164 165 WREG32_MC(RS480_GART_BASE, tmp); 166 /* TODO: more tweaking here */ 167 WREG32_MC(RS480_GART_FEATURE_ID, 168 (RS480_TLB_ENABLE | 169 RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); 170 /* Disable snooping */ 171 WREG32_MC(RS480_AGP_MODE_CNTL, 172 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS); 173 /* Disable AGP mode */ 174 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, 175 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ 176 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 177 WREG32_MC(RS480_MC_MISC_CNTL, 178 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); 179 } else { 180 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 181 } 182 /* Enable gart */ 183 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); 184 rs400_gart_tlb_flush(rdev); 185 rdev->gart.ready = true; 186 return 0; 187 } 188 189 void rs400_gart_disable(struct radeon_device *rdev) 190 { 191 uint32_t tmp; 192 193 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 194 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 195 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 196 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); 197 } 198 199 void rs400_gart_fini(struct radeon_device *rdev) 200 { 201 radeon_gart_fini(rdev); 202 rs400_gart_disable(rdev); 203 radeon_gart_table_ram_free(rdev); 204 } 205 206 #define RS400_PTE_WRITEABLE (1 << 2) 207 #define RS400_PTE_READABLE (1 << 3) 208 209 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 210 { 211 uint32_t entry; 212 213 if (i < 0 || i > rdev->gart.num_gpu_pages) { 214 return -EINVAL; 215 } 216 217 entry = (lower_32_bits(addr) & PAGE_MASK) | 218 ((upper_32_bits(addr) & 0xff) << 4) | 219 RS400_PTE_WRITEABLE | RS400_PTE_READABLE; 220 entry = cpu_to_le32(entry); 221 rdev->gart.table.ram.ptr[i] = entry; 222 return 0; 223 } 224 225 int rs400_mc_wait_for_idle(struct radeon_device *rdev) 226 { 227 unsigned i; 228 uint32_t tmp; 229 230 for (i = 0; i < rdev->usec_timeout; i++) { 231 /* read MC_STATUS */ 232 tmp = RREG32(RADEON_MC_STATUS); 233 if (tmp & RADEON_MC_IDLE) { 234 return 0; 235 } 236 DRM_UDELAY(1); 237 } 238 return -1; 239 } 240 241 void rs400_gpu_init(struct radeon_device *rdev) 242 { 243 /* FIXME: is this correct ? */ 244 r420_pipes_init(rdev); 245 if (rs400_mc_wait_for_idle(rdev)) { 246 printk(KERN_WARNING "rs400: Failed to wait MC idle while " 247 "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); 248 } 249 } 250 251 void rs400_mc_init(struct radeon_device *rdev) 252 { 253 u64 base; 254 255 rs400_gart_adjust_size(rdev); 256 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); 257 /* DDR for all card after R300 & IGP */ 258 rdev->mc.vram_is_ddr = true; 259 rdev->mc.vram_width = 128; 260 r100_vram_init_sizes(rdev); 261 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 262 radeon_vram_location(rdev, &rdev->mc, base); 263 rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; 264 radeon_gtt_location(rdev, &rdev->mc); 265 radeon_update_bandwidth_info(rdev); 266 } 267 268 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 269 { 270 uint32_t r; 271 272 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 273 r = RREG32(RS480_NB_MC_DATA); 274 WREG32(RS480_NB_MC_INDEX, 0xff); 275 return r; 276 } 277 278 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 279 { 280 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 281 WREG32(RS480_NB_MC_DATA, (v)); 282 WREG32(RS480_NB_MC_INDEX, 0xff); 283 } 284 285 #if defined(CONFIG_DEBUG_FS) 286 static int rs400_debugfs_gart_info(struct seq_file *m, void *data) 287 { 288 struct drm_info_node *node = (struct drm_info_node *) m->private; 289 struct drm_device *dev = node->minor->dev; 290 struct radeon_device *rdev = dev->dev_private; 291 uint32_t tmp; 292 293 tmp = RREG32(RADEON_HOST_PATH_CNTL); 294 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 295 tmp = RREG32(RADEON_BUS_CNTL); 296 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 297 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 298 seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp); 299 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 300 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE); 301 seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp); 302 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2); 303 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); 304 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); 305 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); 306 tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION); 307 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); 308 tmp = RREG32(RS690_HDP_FB_LOCATION); 309 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); 310 } else { 311 tmp = RREG32(RADEON_AGP_BASE); 312 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 313 tmp = RREG32(RS480_AGP_BASE_2); 314 seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp); 315 tmp = RREG32(RADEON_MC_AGP_LOCATION); 316 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 317 } 318 tmp = RREG32_MC(RS480_GART_BASE); 319 seq_printf(m, "GART_BASE 0x%08x\n", tmp); 320 tmp = RREG32_MC(RS480_GART_FEATURE_ID); 321 seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp); 322 tmp = RREG32_MC(RS480_AGP_MODE_CNTL); 323 seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp); 324 tmp = RREG32_MC(RS480_MC_MISC_CNTL); 325 seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp); 326 tmp = RREG32_MC(0x5F); 327 seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp); 328 tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE); 329 seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp); 330 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 331 seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp); 332 tmp = RREG32_MC(0x3B); 333 seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp); 334 tmp = RREG32_MC(0x3C); 335 seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp); 336 tmp = RREG32_MC(0x30); 337 seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp); 338 tmp = RREG32_MC(0x31); 339 seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp); 340 tmp = RREG32_MC(0x32); 341 seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp); 342 tmp = RREG32_MC(0x33); 343 seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp); 344 tmp = RREG32_MC(0x34); 345 seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp); 346 tmp = RREG32_MC(0x35); 347 seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp); 348 tmp = RREG32_MC(0x36); 349 seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp); 350 tmp = RREG32_MC(0x37); 351 seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp); 352 return 0; 353 } 354 355 static struct drm_info_list rs400_gart_info_list[] = { 356 {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL}, 357 }; 358 #endif 359 360 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 361 { 362 #if defined(CONFIG_DEBUG_FS) 363 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); 364 #else 365 return 0; 366 #endif 367 } 368 369 void rs400_mc_program(struct radeon_device *rdev) 370 { 371 struct r100_mc_save save; 372 373 /* Stops all mc clients */ 374 r100_mc_stop(rdev, &save); 375 376 /* Wait for mc idle */ 377 if (rs400_mc_wait_for_idle(rdev)) 378 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); 379 WREG32(R_000148_MC_FB_LOCATION, 380 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 381 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 382 383 r100_mc_resume(rdev, &save); 384 } 385 386 static int rs400_startup(struct radeon_device *rdev) 387 { 388 int r; 389 390 r100_set_common_regs(rdev); 391 392 rs400_mc_program(rdev); 393 /* Resume clock */ 394 r300_clock_startup(rdev); 395 /* Initialize GPU configuration (# pipes, ...) */ 396 rs400_gpu_init(rdev); 397 r100_enable_bm(rdev); 398 /* Initialize GART (initialize after TTM so we can allocate 399 * memory through TTM but finalize after TTM) */ 400 r = rs400_gart_enable(rdev); 401 if (r) 402 return r; 403 404 /* allocate wb buffer */ 405 r = radeon_wb_init(rdev); 406 if (r) 407 return r; 408 409 /* Enable IRQ */ 410 r100_irq_set(rdev); 411 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 412 /* 1M ring buffer */ 413 r = r100_cp_init(rdev, 1024 * 1024); 414 if (r) { 415 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 416 return r; 417 } 418 r = r100_ib_init(rdev); 419 if (r) { 420 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 421 return r; 422 } 423 return 0; 424 } 425 426 int rs400_resume(struct radeon_device *rdev) 427 { 428 /* Make sur GART are not working */ 429 rs400_gart_disable(rdev); 430 /* Resume clock before doing reset */ 431 r300_clock_startup(rdev); 432 /* setup MC before calling post tables */ 433 rs400_mc_program(rdev); 434 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 435 if (radeon_asic_reset(rdev)) { 436 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 437 RREG32(R_000E40_RBBM_STATUS), 438 RREG32(R_0007C0_CP_STAT)); 439 } 440 /* post */ 441 radeon_combios_asic_init(rdev->ddev); 442 /* Resume clock after posting */ 443 r300_clock_startup(rdev); 444 /* Initialize surface registers */ 445 radeon_surface_init(rdev); 446 return rs400_startup(rdev); 447 } 448 449 int rs400_suspend(struct radeon_device *rdev) 450 { 451 r100_cp_disable(rdev); 452 radeon_wb_disable(rdev); 453 r100_irq_disable(rdev); 454 rs400_gart_disable(rdev); 455 return 0; 456 } 457 458 void rs400_fini(struct radeon_device *rdev) 459 { 460 r100_cp_fini(rdev); 461 radeon_wb_fini(rdev); 462 r100_ib_fini(rdev); 463 radeon_gem_fini(rdev); 464 rs400_gart_fini(rdev); 465 radeon_irq_kms_fini(rdev); 466 radeon_fence_driver_fini(rdev); 467 radeon_bo_fini(rdev); 468 radeon_atombios_fini(rdev); 469 kfree(rdev->bios); 470 rdev->bios = NULL; 471 } 472 473 int rs400_init(struct radeon_device *rdev) 474 { 475 int r; 476 477 /* Disable VGA */ 478 r100_vga_render_disable(rdev); 479 /* Initialize scratch registers */ 480 radeon_scratch_init(rdev); 481 /* Initialize surface registers */ 482 radeon_surface_init(rdev); 483 /* TODO: disable VGA need to use VGA request */ 484 /* restore some register to sane defaults */ 485 r100_restore_sanity(rdev); 486 /* BIOS*/ 487 if (!radeon_get_bios(rdev)) { 488 if (ASIC_IS_AVIVO(rdev)) 489 return -EINVAL; 490 } 491 if (rdev->is_atom_bios) { 492 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 493 return -EINVAL; 494 } else { 495 r = radeon_combios_init(rdev); 496 if (r) 497 return r; 498 } 499 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 500 if (radeon_asic_reset(rdev)) { 501 dev_warn(rdev->dev, 502 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 503 RREG32(R_000E40_RBBM_STATUS), 504 RREG32(R_0007C0_CP_STAT)); 505 } 506 /* check if cards are posted or not */ 507 if (radeon_boot_test_post_card(rdev) == false) 508 return -EINVAL; 509 510 /* Initialize clocks */ 511 radeon_get_clock_info(rdev->ddev); 512 /* initialize memory controller */ 513 rs400_mc_init(rdev); 514 /* Fence driver */ 515 r = radeon_fence_driver_init(rdev); 516 if (r) 517 return r; 518 r = radeon_irq_kms_init(rdev); 519 if (r) 520 return r; 521 /* Memory manager */ 522 r = radeon_bo_init(rdev); 523 if (r) 524 return r; 525 r = rs400_gart_init(rdev); 526 if (r) 527 return r; 528 r300_set_reg_safe(rdev); 529 rdev->accel_working = true; 530 r = rs400_startup(rdev); 531 if (r) { 532 /* Somethings want wront with the accel init stop accel */ 533 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 534 r100_cp_fini(rdev); 535 radeon_wb_fini(rdev); 536 r100_ib_fini(rdev); 537 rs400_gart_fini(rdev); 538 radeon_irq_kms_fini(rdev); 539 rdev->accel_working = false; 540 } 541 return 0; 542 } 543