1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "radeon.h" 30 #include "radeon_asic.h" 31 #include "atom.h" 32 #include "r520d.h" 33 34 /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */ 35 36 int r520_mc_wait_for_idle(struct radeon_device *rdev) 37 { 38 unsigned i; 39 uint32_t tmp; 40 41 for (i = 0; i < rdev->usec_timeout; i++) { 42 /* read MC_STATUS */ 43 tmp = RREG32_MC(R520_MC_STATUS); 44 if (tmp & R520_MC_STATUS_IDLE) { 45 return 0; 46 } 47 DRM_UDELAY(1); 48 } 49 return -1; 50 } 51 52 static void r520_gpu_init(struct radeon_device *rdev) 53 { 54 unsigned pipe_select_current, gb_pipe_select, tmp; 55 56 rv515_vga_render_disable(rdev); 57 /* 58 * DST_PIPE_CONFIG 0x170C 59 * GB_TILE_CONFIG 0x4018 60 * GB_FIFO_SIZE 0x4024 61 * GB_PIPE_SELECT 0x402C 62 * GB_PIPE_SELECT2 0x4124 63 * Z_PIPE_SHIFT 0 64 * Z_PIPE_MASK 0x000000003 65 * GB_FIFO_SIZE2 0x4128 66 * SC_SFIFO_SIZE_SHIFT 0 67 * SC_SFIFO_SIZE_MASK 0x000000003 68 * SC_MFIFO_SIZE_SHIFT 2 69 * SC_MFIFO_SIZE_MASK 0x00000000C 70 * FG_SFIFO_SIZE_SHIFT 4 71 * FG_SFIFO_SIZE_MASK 0x000000030 72 * ZB_MFIFO_SIZE_SHIFT 6 73 * ZB_MFIFO_SIZE_MASK 0x0000000C0 74 * GA_ENHANCE 0x4274 75 * SU_REG_DEST 0x42C8 76 */ 77 /* workaround for RV530 */ 78 if (rdev->family == CHIP_RV530) { 79 WREG32(0x4128, 0xFF); 80 } 81 r420_pipes_init(rdev); 82 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); 83 tmp = RREG32(R300_DST_PIPE_CONFIG); 84 pipe_select_current = (tmp >> 2) & 3; 85 tmp = (1 << pipe_select_current) | 86 (((gb_pipe_select >> 8) & 0xF) << 4); 87 WREG32_PLL(0x000D, tmp); 88 if (r520_mc_wait_for_idle(rdev)) { 89 printk(KERN_WARNING "Failed to wait MC idle while " 90 "programming pipes. Bad things might happen.\n"); 91 } 92 } 93 94 static void r520_vram_get_type(struct radeon_device *rdev) 95 { 96 uint32_t tmp; 97 98 rdev->mc.vram_width = 128; 99 rdev->mc.vram_is_ddr = true; 100 tmp = RREG32_MC(R520_MC_CNTL0); 101 switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) { 102 case 0: 103 rdev->mc.vram_width = 32; 104 break; 105 case 1: 106 rdev->mc.vram_width = 64; 107 break; 108 case 2: 109 rdev->mc.vram_width = 128; 110 break; 111 case 3: 112 rdev->mc.vram_width = 256; 113 break; 114 default: 115 rdev->mc.vram_width = 128; 116 break; 117 } 118 if (tmp & R520_MC_CHANNEL_SIZE) 119 rdev->mc.vram_width *= 2; 120 } 121 122 static void r520_mc_init(struct radeon_device *rdev) 123 { 124 125 r520_vram_get_type(rdev); 126 r100_vram_init_sizes(rdev); 127 radeon_vram_location(rdev, &rdev->mc, 0); 128 rdev->mc.gtt_base_align = 0; 129 if (!(rdev->flags & RADEON_IS_AGP)) 130 radeon_gtt_location(rdev, &rdev->mc); 131 radeon_update_bandwidth_info(rdev); 132 } 133 134 static void r520_mc_program(struct radeon_device *rdev) 135 { 136 struct rv515_mc_save save; 137 138 /* Stops all mc clients */ 139 rv515_mc_stop(rdev, &save); 140 141 /* Wait for mc idle */ 142 if (r520_mc_wait_for_idle(rdev)) 143 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 144 /* Write VRAM size in case we are limiting it */ 145 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 146 /* Program MC, should be a 32bits limited address space */ 147 WREG32_MC(R_000004_MC_FB_LOCATION, 148 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 149 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 150 WREG32(R_000134_HDP_FB_LOCATION, 151 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 152 if (rdev->flags & RADEON_IS_AGP) { 153 WREG32_MC(R_000005_MC_AGP_LOCATION, 154 S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) | 155 S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); 156 WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); 157 WREG32_MC(R_000007_AGP_BASE_2, 158 S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base))); 159 } else { 160 WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF); 161 WREG32_MC(R_000006_AGP_BASE, 0); 162 WREG32_MC(R_000007_AGP_BASE_2, 0); 163 } 164 165 rv515_mc_resume(rdev, &save); 166 } 167 168 static int r520_startup(struct radeon_device *rdev) 169 { 170 int r; 171 172 r520_mc_program(rdev); 173 /* Resume clock */ 174 rv515_clock_startup(rdev); 175 /* Initialize GPU configuration (# pipes, ...) */ 176 r520_gpu_init(rdev); 177 /* Initialize GART (initialize after TTM so we can allocate 178 * memory through TTM but finalize after TTM) */ 179 if (rdev->flags & RADEON_IS_PCIE) { 180 r = rv370_pcie_gart_enable(rdev); 181 if (r) 182 return r; 183 } 184 185 /* allocate wb buffer */ 186 r = radeon_wb_init(rdev); 187 if (r) 188 return r; 189 190 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 191 if (r) { 192 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 193 return r; 194 } 195 196 /* Enable IRQ */ 197 if (!rdev->irq.installed) { 198 r = radeon_irq_kms_init(rdev); 199 if (r) 200 return r; 201 } 202 203 rs600_irq_set(rdev); 204 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 205 /* 1M ring buffer */ 206 r = r100_cp_init(rdev, 1024 * 1024); 207 if (r) { 208 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 209 return r; 210 } 211 212 r = radeon_ib_pool_init(rdev); 213 if (r) { 214 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 215 return r; 216 } 217 218 return 0; 219 } 220 221 int r520_resume(struct radeon_device *rdev) 222 { 223 int r; 224 225 /* Make sur GART are not working */ 226 if (rdev->flags & RADEON_IS_PCIE) 227 rv370_pcie_gart_disable(rdev); 228 /* Resume clock before doing reset */ 229 rv515_clock_startup(rdev); 230 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 231 if (radeon_asic_reset(rdev)) { 232 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 233 RREG32(R_000E40_RBBM_STATUS), 234 RREG32(R_0007C0_CP_STAT)); 235 } 236 /* post */ 237 atom_asic_init(rdev->mode_info.atom_context); 238 /* Resume clock after posting */ 239 rv515_clock_startup(rdev); 240 /* Initialize surface registers */ 241 radeon_surface_init(rdev); 242 243 rdev->accel_working = true; 244 r = r520_startup(rdev); 245 if (r) { 246 rdev->accel_working = false; 247 } 248 return r; 249 } 250 251 int r520_init(struct radeon_device *rdev) 252 { 253 int r; 254 255 /* Initialize scratch registers */ 256 radeon_scratch_init(rdev); 257 /* Initialize surface registers */ 258 radeon_surface_init(rdev); 259 /* restore some register to sane defaults */ 260 r100_restore_sanity(rdev); 261 /* TODO: disable VGA need to use VGA request */ 262 /* BIOS*/ 263 if (!radeon_get_bios(rdev)) { 264 if (ASIC_IS_AVIVO(rdev)) 265 return -EINVAL; 266 } 267 if (rdev->is_atom_bios) { 268 r = radeon_atombios_init(rdev); 269 if (r) 270 return r; 271 } else { 272 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); 273 return -EINVAL; 274 } 275 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 276 if (radeon_asic_reset(rdev)) { 277 dev_warn(rdev->dev, 278 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 279 RREG32(R_000E40_RBBM_STATUS), 280 RREG32(R_0007C0_CP_STAT)); 281 } 282 /* check if cards are posted or not */ 283 if (radeon_boot_test_post_card(rdev) == false) 284 return -EINVAL; 285 286 if (!radeon_card_posted(rdev) && rdev->bios) { 287 DRM_INFO("GPU not posted. posting now...\n"); 288 atom_asic_init(rdev->mode_info.atom_context); 289 } 290 /* Initialize clocks */ 291 radeon_get_clock_info(rdev->ddev); 292 /* initialize AGP */ 293 if (rdev->flags & RADEON_IS_AGP) { 294 r = radeon_agp_init(rdev); 295 if (r) { 296 radeon_agp_disable(rdev); 297 } 298 } 299 /* initialize memory controller */ 300 r520_mc_init(rdev); 301 rv515_debugfs(rdev); 302 /* Fence driver */ 303 r = radeon_fence_driver_init(rdev); 304 if (r) 305 return r; 306 /* Memory manager */ 307 r = radeon_bo_init(rdev); 308 if (r) 309 return r; 310 r = rv370_pcie_gart_init(rdev); 311 if (r) 312 return r; 313 rv515_set_safe_registers(rdev); 314 315 /* Initialize power management */ 316 radeon_pm_init(rdev); 317 318 rdev->accel_working = true; 319 r = r520_startup(rdev); 320 if (r) { 321 /* Somethings want wront with the accel init stop accel */ 322 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 323 r100_cp_fini(rdev); 324 radeon_wb_fini(rdev); 325 radeon_ib_pool_fini(rdev); 326 radeon_irq_kms_fini(rdev); 327 rv370_pcie_gart_fini(rdev); 328 radeon_agp_fini(rdev); 329 rdev->accel_working = false; 330 } 331 return 0; 332 } 333