1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <drm/drmP.h> 30 #include "radeon.h" 31 #include "rs400d.h" 32 33 /* This files gather functions specifics to : rs400,rs480 */ 34 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); 35 36 void rs400_gart_adjust_size(struct radeon_device *rdev) 37 { 38 /* Check gart size */ 39 switch (rdev->mc.gtt_size/(1024*1024)) { 40 case 32: 41 case 64: 42 case 128: 43 case 256: 44 case 512: 45 case 1024: 46 case 2048: 47 break; 48 default: 49 DRM_ERROR("Unable to use IGP GART size %uM\n", 50 (unsigned)(rdev->mc.gtt_size >> 20)); 51 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); 52 DRM_ERROR("Forcing to 32M GART size\n"); 53 rdev->mc.gtt_size = 32 * 1024 * 1024; 54 return; 55 } 56 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { 57 /* FIXME: RS400 & RS480 seems to have issue with GART size 58 * if 4G of system memory (needs more testing) */ 59 rdev->mc.gtt_size = 32 * 1024 * 1024; 60 DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); 61 } 62 } 63 64 void rs400_gart_tlb_flush(struct radeon_device *rdev) 65 { 66 uint32_t tmp; 67 unsigned int timeout = rdev->usec_timeout; 68 69 WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); 70 do { 71 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 72 if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0) 73 break; 74 DRM_UDELAY(1); 75 timeout--; 76 } while (timeout > 0); 77 WREG32_MC(RS480_GART_CACHE_CNTRL, 0); 78 } 79 80 int rs400_gart_init(struct radeon_device *rdev) 81 { 82 int r; 83 84 if (rdev->gart.table.ram.ptr) { 85 WARN(1, "RS400 GART already initialized.\n"); 86 return 0; 87 } 88 /* Check gart size */ 89 switch(rdev->mc.gtt_size / (1024 * 1024)) { 90 case 32: 91 case 64: 92 case 128: 93 case 256: 94 case 512: 95 case 1024: 96 case 2048: 97 break; 98 default: 99 return -EINVAL; 100 } 101 /* Initialize common gart structure */ 102 r = radeon_gart_init(rdev); 103 if (r) 104 return r; 105 if (rs400_debugfs_pcie_gart_info_init(rdev)) 106 DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); 107 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; 108 return radeon_gart_table_ram_alloc(rdev); 109 } 110 111 int rs400_gart_enable(struct radeon_device *rdev) 112 { 113 uint32_t size_reg; 114 uint32_t tmp; 115 116 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 117 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 118 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 119 /* Check gart size */ 120 switch(rdev->mc.gtt_size / (1024 * 1024)) { 121 case 32: 122 size_reg = RS480_VA_SIZE_32MB; 123 break; 124 case 64: 125 size_reg = RS480_VA_SIZE_64MB; 126 break; 127 case 128: 128 size_reg = RS480_VA_SIZE_128MB; 129 break; 130 case 256: 131 size_reg = RS480_VA_SIZE_256MB; 132 break; 133 case 512: 134 size_reg = RS480_VA_SIZE_512MB; 135 break; 136 case 1024: 137 size_reg = RS480_VA_SIZE_1GB; 138 break; 139 case 2048: 140 size_reg = RS480_VA_SIZE_2GB; 141 break; 142 default: 143 return -EINVAL; 144 } 145 /* It should be fine to program it to max value */ 146 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 147 WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); 148 WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0); 149 } else { 150 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 151 WREG32(RS480_AGP_BASE_2, 0); 152 } 153 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 154 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16); 155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16); 156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 159 WREG32(RADEON_BUS_CNTL, tmp); 160 } else { 161 WREG32(RADEON_MC_AGP_LOCATION, tmp); 162 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 163 WREG32(RADEON_BUS_CNTL, tmp); 164 } 165 /* Table should be in 32bits address space so ignore bits above. */ 166 tmp = (u32)rdev->gart.table_addr & 0xfffff000; 167 tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; 168 169 WREG32_MC(RS480_GART_BASE, tmp); 170 /* TODO: more tweaking here */ 171 WREG32_MC(RS480_GART_FEATURE_ID, 172 (RS480_TLB_ENABLE | 173 RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); 174 /* Disable snooping */ 175 WREG32_MC(RS480_AGP_MODE_CNTL, 176 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS); 177 /* Disable AGP mode */ 178 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, 179 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ 180 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 181 WREG32_MC(RS480_MC_MISC_CNTL, 182 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); 183 } else { 184 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); 185 } 186 /* Enable gart */ 187 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); 188 rs400_gart_tlb_flush(rdev); 189 rdev->gart.ready = true; 190 return 0; 191 } 192 193 void rs400_gart_disable(struct radeon_device *rdev) 194 { 195 uint32_t tmp; 196 197 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 198 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 199 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 200 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); 201 } 202 203 void rs400_gart_fini(struct radeon_device *rdev) 204 { 205 rs400_gart_disable(rdev); 206 radeon_gart_table_ram_free(rdev); 207 radeon_gart_fini(rdev); 208 } 209 210 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 211 { 212 uint32_t entry; 213 214 if (i < 0 || i > rdev->gart.num_gpu_pages) { 215 return -EINVAL; 216 } 217 218 entry = (lower_32_bits(addr) & PAGE_MASK) | 219 ((upper_32_bits(addr) & 0xff) << 4) | 220 0xc; 221 entry = cpu_to_le32(entry); 222 rdev->gart.table.ram.ptr[i] = entry; 223 return 0; 224 } 225 226 void rs400_gpu_init(struct radeon_device *rdev) 227 { 228 /* FIXME: HDP same place on rs400 ? */ 229 r100_hdp_reset(rdev); 230 /* FIXME: is this correct ? */ 231 r420_pipes_init(rdev); 232 if (r300_mc_wait_for_idle(rdev)) { 233 printk(KERN_WARNING "Failed to wait MC idle while " 234 "programming pipes. Bad things might happen.\n"); 235 } 236 } 237 238 void rs400_vram_info(struct radeon_device *rdev) 239 { 240 rs400_gart_adjust_size(rdev); 241 /* DDR for all card after R300 & IGP */ 242 rdev->mc.vram_is_ddr = true; 243 rdev->mc.vram_width = 128; 244 245 r100_vram_init_sizes(rdev); 246 } 247 248 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 249 { 250 uint32_t r; 251 252 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 253 r = RREG32(RS480_NB_MC_DATA); 254 WREG32(RS480_NB_MC_INDEX, 0xff); 255 return r; 256 } 257 258 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 259 { 260 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 261 WREG32(RS480_NB_MC_DATA, (v)); 262 WREG32(RS480_NB_MC_INDEX, 0xff); 263 } 264 265 #if defined(CONFIG_DEBUG_FS) 266 static int rs400_debugfs_gart_info(struct seq_file *m, void *data) 267 { 268 struct drm_info_node *node = (struct drm_info_node *) m->private; 269 struct drm_device *dev = node->minor->dev; 270 struct radeon_device *rdev = dev->dev_private; 271 uint32_t tmp; 272 273 tmp = RREG32(RADEON_HOST_PATH_CNTL); 274 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); 275 tmp = RREG32(RADEON_BUS_CNTL); 276 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); 277 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 278 seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp); 279 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { 280 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE); 281 seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp); 282 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2); 283 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); 284 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); 285 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); 286 tmp = RREG32_MC(0x100); 287 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); 288 tmp = RREG32(0x134); 289 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); 290 } else { 291 tmp = RREG32(RADEON_AGP_BASE); 292 seq_printf(m, "AGP_BASE 0x%08x\n", tmp); 293 tmp = RREG32(RS480_AGP_BASE_2); 294 seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp); 295 tmp = RREG32(RADEON_MC_AGP_LOCATION); 296 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); 297 } 298 tmp = RREG32_MC(RS480_GART_BASE); 299 seq_printf(m, "GART_BASE 0x%08x\n", tmp); 300 tmp = RREG32_MC(RS480_GART_FEATURE_ID); 301 seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp); 302 tmp = RREG32_MC(RS480_AGP_MODE_CNTL); 303 seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp); 304 tmp = RREG32_MC(RS480_MC_MISC_CNTL); 305 seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp); 306 tmp = RREG32_MC(0x5F); 307 seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp); 308 tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE); 309 seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp); 310 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); 311 seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp); 312 tmp = RREG32_MC(0x3B); 313 seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp); 314 tmp = RREG32_MC(0x3C); 315 seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp); 316 tmp = RREG32_MC(0x30); 317 seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp); 318 tmp = RREG32_MC(0x31); 319 seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp); 320 tmp = RREG32_MC(0x32); 321 seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp); 322 tmp = RREG32_MC(0x33); 323 seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp); 324 tmp = RREG32_MC(0x34); 325 seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp); 326 tmp = RREG32_MC(0x35); 327 seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp); 328 tmp = RREG32_MC(0x36); 329 seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp); 330 tmp = RREG32_MC(0x37); 331 seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp); 332 return 0; 333 } 334 335 static struct drm_info_list rs400_gart_info_list[] = { 336 {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL}, 337 }; 338 #endif 339 340 static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 341 { 342 #if defined(CONFIG_DEBUG_FS) 343 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); 344 #else 345 return 0; 346 #endif 347 } 348 349 static int rs400_mc_init(struct radeon_device *rdev) 350 { 351 int r; 352 u32 tmp; 353 354 /* Setup GPU memory space */ 355 tmp = RREG32(R_00015C_NB_TOM); 356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; 357 rdev->mc.gtt_location = 0xFFFFFFFFUL; 358 r = radeon_mc_setup(rdev); 359 if (r) 360 return r; 361 return 0; 362 } 363 364 void rs400_mc_program(struct radeon_device *rdev) 365 { 366 struct r100_mc_save save; 367 368 /* Stops all mc clients */ 369 r100_mc_stop(rdev, &save); 370 371 /* Wait for mc idle */ 372 if (r300_mc_wait_for_idle(rdev)) 373 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 374 WREG32(R_000148_MC_FB_LOCATION, 375 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 376 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 377 378 r100_mc_resume(rdev, &save); 379 } 380 381 static int rs400_startup(struct radeon_device *rdev) 382 { 383 int r; 384 385 rs400_mc_program(rdev); 386 /* Resume clock */ 387 r300_clock_startup(rdev); 388 /* Initialize GPU configuration (# pipes, ...) */ 389 rs400_gpu_init(rdev); 390 r100_enable_bm(rdev); 391 /* Initialize GART (initialize after TTM so we can allocate 392 * memory through TTM but finalize after TTM) */ 393 r = rs400_gart_enable(rdev); 394 if (r) 395 return r; 396 /* Enable IRQ */ 397 r100_irq_set(rdev); 398 /* 1M ring buffer */ 399 r = r100_cp_init(rdev, 1024 * 1024); 400 if (r) { 401 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 402 return r; 403 } 404 r = r100_wb_init(rdev); 405 if (r) 406 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 407 r = r100_ib_init(rdev); 408 if (r) { 409 dev_err(rdev->dev, "failled initializing IB (%d).\n", r); 410 return r; 411 } 412 return 0; 413 } 414 415 int rs400_resume(struct radeon_device *rdev) 416 { 417 /* Make sur GART are not working */ 418 rs400_gart_disable(rdev); 419 /* Resume clock before doing reset */ 420 r300_clock_startup(rdev); 421 /* setup MC before calling post tables */ 422 rs400_mc_program(rdev); 423 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 424 if (radeon_gpu_reset(rdev)) { 425 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 426 RREG32(R_000E40_RBBM_STATUS), 427 RREG32(R_0007C0_CP_STAT)); 428 } 429 /* post */ 430 radeon_combios_asic_init(rdev->ddev); 431 /* Resume clock after posting */ 432 r300_clock_startup(rdev); 433 /* Initialize surface registers */ 434 radeon_surface_init(rdev); 435 return rs400_startup(rdev); 436 } 437 438 int rs400_suspend(struct radeon_device *rdev) 439 { 440 r100_cp_disable(rdev); 441 r100_wb_disable(rdev); 442 r100_irq_disable(rdev); 443 rs400_gart_disable(rdev); 444 return 0; 445 } 446 447 void rs400_fini(struct radeon_device *rdev) 448 { 449 rs400_suspend(rdev); 450 r100_cp_fini(rdev); 451 r100_wb_fini(rdev); 452 r100_ib_fini(rdev); 453 radeon_gem_fini(rdev); 454 rs400_gart_fini(rdev); 455 radeon_irq_kms_fini(rdev); 456 radeon_fence_driver_fini(rdev); 457 radeon_bo_fini(rdev); 458 radeon_atombios_fini(rdev); 459 kfree(rdev->bios); 460 rdev->bios = NULL; 461 } 462 463 int rs400_init(struct radeon_device *rdev) 464 { 465 int r; 466 467 /* Disable VGA */ 468 r100_vga_render_disable(rdev); 469 /* Initialize scratch registers */ 470 radeon_scratch_init(rdev); 471 /* Initialize surface registers */ 472 radeon_surface_init(rdev); 473 /* TODO: disable VGA need to use VGA request */ 474 /* BIOS*/ 475 if (!radeon_get_bios(rdev)) { 476 if (ASIC_IS_AVIVO(rdev)) 477 return -EINVAL; 478 } 479 if (rdev->is_atom_bios) { 480 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); 481 return -EINVAL; 482 } else { 483 r = radeon_combios_init(rdev); 484 if (r) 485 return r; 486 } 487 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 488 if (radeon_gpu_reset(rdev)) { 489 dev_warn(rdev->dev, 490 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 491 RREG32(R_000E40_RBBM_STATUS), 492 RREG32(R_0007C0_CP_STAT)); 493 } 494 /* check if cards are posted or not */ 495 if (radeon_boot_test_post_card(rdev) == false) 496 return -EINVAL; 497 498 /* Initialize clocks */ 499 radeon_get_clock_info(rdev->ddev); 500 /* Initialize power management */ 501 radeon_pm_init(rdev); 502 /* Get vram informations */ 503 rs400_vram_info(rdev); 504 /* Initialize memory controller (also test AGP) */ 505 r = rs400_mc_init(rdev); 506 if (r) 507 return r; 508 /* Fence driver */ 509 r = radeon_fence_driver_init(rdev); 510 if (r) 511 return r; 512 r = radeon_irq_kms_init(rdev); 513 if (r) 514 return r; 515 /* Memory manager */ 516 r = radeon_bo_init(rdev); 517 if (r) 518 return r; 519 r = rs400_gart_init(rdev); 520 if (r) 521 return r; 522 r300_set_reg_safe(rdev); 523 rdev->accel_working = true; 524 r = rs400_startup(rdev); 525 if (r) { 526 /* Somethings want wront with the accel init stop accel */ 527 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 528 rs400_suspend(rdev); 529 r100_cp_fini(rdev); 530 r100_wb_fini(rdev); 531 r100_ib_fini(rdev); 532 rs400_gart_fini(rdev); 533 radeon_irq_kms_fini(rdev); 534 rdev->accel_working = false; 535 } 536 return 0; 537 } 538