1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include "drmP.h" 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 50 { 51 /* enable the pflip int */ 52 radeon_irq_kms_pflip_irq_get(rdev, crtc); 53 } 54 55 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 56 { 57 /* disable the pflip int */ 58 radeon_irq_kms_pflip_irq_put(rdev, crtc); 59 } 60 61 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 62 { 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 65 int i; 66 67 /* Lock the graphics update lock */ 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 69 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 70 71 /* update the scanout addresses */ 72 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 73 (u32)crtc_base); 74 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 75 (u32)crtc_base); 76 77 /* Wait for update_pending to go high. */ 78 for (i = 0; i < rdev->usec_timeout; i++) { 79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 80 break; 81 udelay(1); 82 } 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 84 85 /* Unlock the lock, so double-buffering can take place inside vblank */ 86 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 87 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 88 89 /* Return current update_pending status: */ 90 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 91 } 92 93 void rs600_pm_misc(struct radeon_device *rdev) 94 { 95 int requested_index = rdev->pm.requested_power_state_index; 96 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 97 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 98 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 99 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 100 101 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 102 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 103 tmp = RREG32(voltage->gpio.reg); 104 if (voltage->active_high) 105 tmp |= voltage->gpio.mask; 106 else 107 tmp &= ~(voltage->gpio.mask); 108 WREG32(voltage->gpio.reg, tmp); 109 if (voltage->delay) 110 udelay(voltage->delay); 111 } else { 112 tmp = RREG32(voltage->gpio.reg); 113 if (voltage->active_high) 114 tmp &= ~voltage->gpio.mask; 115 else 116 tmp |= voltage->gpio.mask; 117 WREG32(voltage->gpio.reg, tmp); 118 if (voltage->delay) 119 udelay(voltage->delay); 120 } 121 } else if (voltage->type == VOLTAGE_VDDC) 122 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); 123 124 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 125 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 126 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 127 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 128 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 129 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 130 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 131 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 133 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 134 } 135 } else { 136 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 137 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 138 } 139 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 140 141 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 142 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 143 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 144 if (voltage->delay) { 145 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 146 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 147 } else 148 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 149 } else 150 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 151 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 152 153 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 154 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 155 hdp_dyn_cntl &= ~HDP_FORCEON; 156 else 157 hdp_dyn_cntl |= HDP_FORCEON; 158 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 159 #if 0 160 /* mc_host_dyn seems to cause hangs from time to time */ 161 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 162 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 163 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 164 else 165 mc_host_dyn_cntl |= MC_HOST_FORCEON; 166 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 167 #endif 168 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 169 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 170 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 171 else 172 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 173 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 174 175 /* set pcie lanes */ 176 if ((rdev->flags & RADEON_IS_PCIE) && 177 !(rdev->flags & RADEON_IS_IGP) && 178 rdev->asic->set_pcie_lanes && 179 (ps->pcie_lanes != 180 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 181 radeon_set_pcie_lanes(rdev, 182 ps->pcie_lanes); 183 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 184 } 185 } 186 187 void rs600_pm_prepare(struct radeon_device *rdev) 188 { 189 struct drm_device *ddev = rdev->ddev; 190 struct drm_crtc *crtc; 191 struct radeon_crtc *radeon_crtc; 192 u32 tmp; 193 194 /* disable any active CRTCs */ 195 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 196 radeon_crtc = to_radeon_crtc(crtc); 197 if (radeon_crtc->enabled) { 198 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 199 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 200 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 201 } 202 } 203 } 204 205 void rs600_pm_finish(struct radeon_device *rdev) 206 { 207 struct drm_device *ddev = rdev->ddev; 208 struct drm_crtc *crtc; 209 struct radeon_crtc *radeon_crtc; 210 u32 tmp; 211 212 /* enable any active CRTCs */ 213 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 214 radeon_crtc = to_radeon_crtc(crtc); 215 if (radeon_crtc->enabled) { 216 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 217 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 218 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 219 } 220 } 221 } 222 223 /* hpd for digital panel detect/disconnect */ 224 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 225 { 226 u32 tmp; 227 bool connected = false; 228 229 switch (hpd) { 230 case RADEON_HPD_1: 231 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 232 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 233 connected = true; 234 break; 235 case RADEON_HPD_2: 236 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 237 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 238 connected = true; 239 break; 240 default: 241 break; 242 } 243 return connected; 244 } 245 246 void rs600_hpd_set_polarity(struct radeon_device *rdev, 247 enum radeon_hpd_id hpd) 248 { 249 u32 tmp; 250 bool connected = rs600_hpd_sense(rdev, hpd); 251 252 switch (hpd) { 253 case RADEON_HPD_1: 254 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 255 if (connected) 256 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 257 else 258 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 259 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 260 break; 261 case RADEON_HPD_2: 262 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 263 if (connected) 264 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 265 else 266 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 267 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 268 break; 269 default: 270 break; 271 } 272 } 273 274 void rs600_hpd_init(struct radeon_device *rdev) 275 { 276 struct drm_device *dev = rdev->ddev; 277 struct drm_connector *connector; 278 279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 280 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 281 switch (radeon_connector->hpd.hpd) { 282 case RADEON_HPD_1: 283 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 284 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 285 rdev->irq.hpd[0] = true; 286 break; 287 case RADEON_HPD_2: 288 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 289 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 290 rdev->irq.hpd[1] = true; 291 break; 292 default: 293 break; 294 } 295 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 296 } 297 if (rdev->irq.installed) 298 rs600_irq_set(rdev); 299 } 300 301 void rs600_hpd_fini(struct radeon_device *rdev) 302 { 303 struct drm_device *dev = rdev->ddev; 304 struct drm_connector *connector; 305 306 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 307 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 308 switch (radeon_connector->hpd.hpd) { 309 case RADEON_HPD_1: 310 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 311 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 312 rdev->irq.hpd[0] = false; 313 break; 314 case RADEON_HPD_2: 315 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 316 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 317 rdev->irq.hpd[1] = false; 318 break; 319 default: 320 break; 321 } 322 } 323 } 324 325 int rs600_asic_reset(struct radeon_device *rdev) 326 { 327 struct rv515_mc_save save; 328 u32 status, tmp; 329 int ret = 0; 330 331 status = RREG32(R_000E40_RBBM_STATUS); 332 if (!G_000E40_GUI_ACTIVE(status)) { 333 return 0; 334 } 335 /* Stops all mc clients */ 336 rv515_mc_stop(rdev, &save); 337 status = RREG32(R_000E40_RBBM_STATUS); 338 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 339 /* stop CP */ 340 WREG32(RADEON_CP_CSQ_CNTL, 0); 341 tmp = RREG32(RADEON_CP_RB_CNTL); 342 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 343 WREG32(RADEON_CP_RB_RPTR_WR, 0); 344 WREG32(RADEON_CP_RB_WPTR, 0); 345 WREG32(RADEON_CP_RB_CNTL, tmp); 346 pci_save_state(rdev->pdev); 347 /* disable bus mastering */ 348 pci_clear_master(rdev->pdev); 349 mdelay(1); 350 /* reset GA+VAP */ 351 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 352 S_0000F0_SOFT_RESET_GA(1)); 353 RREG32(R_0000F0_RBBM_SOFT_RESET); 354 mdelay(500); 355 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 356 mdelay(1); 357 status = RREG32(R_000E40_RBBM_STATUS); 358 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 359 /* reset CP */ 360 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 361 RREG32(R_0000F0_RBBM_SOFT_RESET); 362 mdelay(500); 363 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 364 mdelay(1); 365 status = RREG32(R_000E40_RBBM_STATUS); 366 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 367 /* reset MC */ 368 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 369 RREG32(R_0000F0_RBBM_SOFT_RESET); 370 mdelay(500); 371 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 372 mdelay(1); 373 status = RREG32(R_000E40_RBBM_STATUS); 374 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 375 /* restore PCI & busmastering */ 376 pci_restore_state(rdev->pdev); 377 /* Check if GPU is idle */ 378 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 379 dev_err(rdev->dev, "failed to reset GPU\n"); 380 rdev->gpu_lockup = true; 381 ret = -1; 382 } else 383 dev_info(rdev->dev, "GPU reset succeed\n"); 384 rv515_mc_resume(rdev, &save); 385 return ret; 386 } 387 388 /* 389 * GART. 390 */ 391 void rs600_gart_tlb_flush(struct radeon_device *rdev) 392 { 393 uint32_t tmp; 394 395 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 396 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 397 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 398 399 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 400 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 401 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 402 403 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 404 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 405 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 406 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 407 } 408 409 int rs600_gart_init(struct radeon_device *rdev) 410 { 411 int r; 412 413 if (rdev->gart.robj) { 414 WARN(1, "RS600 GART already initialized\n"); 415 return 0; 416 } 417 /* Initialize common gart structure */ 418 r = radeon_gart_init(rdev); 419 if (r) { 420 return r; 421 } 422 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 423 return radeon_gart_table_vram_alloc(rdev); 424 } 425 426 static int rs600_gart_enable(struct radeon_device *rdev) 427 { 428 u32 tmp; 429 int r, i; 430 431 if (rdev->gart.robj == NULL) { 432 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 433 return -EINVAL; 434 } 435 r = radeon_gart_table_vram_pin(rdev); 436 if (r) 437 return r; 438 radeon_gart_restore(rdev); 439 /* Enable bus master */ 440 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 441 WREG32(RADEON_BUS_CNTL, tmp); 442 /* FIXME: setup default page */ 443 WREG32_MC(R_000100_MC_PT0_CNTL, 444 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 445 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 446 447 for (i = 0; i < 19; i++) { 448 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 449 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 450 S_00016C_SYSTEM_ACCESS_MODE_MASK( 451 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 452 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 453 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 454 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 455 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 456 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 457 } 458 /* enable first context */ 459 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 460 S_000102_ENABLE_PAGE_TABLE(1) | 461 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 462 463 /* disable all other contexts */ 464 for (i = 1; i < 8; i++) 465 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 466 467 /* setup the page table */ 468 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 469 rdev->gart.table_addr); 470 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 471 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 472 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 473 474 /* System context maps to VRAM space */ 475 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 476 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 477 478 /* enable page tables */ 479 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 480 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 481 tmp = RREG32_MC(R_000009_MC_CNTL1); 482 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 483 rs600_gart_tlb_flush(rdev); 484 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 485 (unsigned)(rdev->mc.gtt_size >> 20), 486 (unsigned long long)rdev->gart.table_addr); 487 rdev->gart.ready = true; 488 return 0; 489 } 490 491 void rs600_gart_disable(struct radeon_device *rdev) 492 { 493 u32 tmp; 494 495 /* FIXME: disable out of gart access */ 496 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 497 tmp = RREG32_MC(R_000009_MC_CNTL1); 498 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 499 radeon_gart_table_vram_unpin(rdev); 500 } 501 502 void rs600_gart_fini(struct radeon_device *rdev) 503 { 504 radeon_gart_fini(rdev); 505 rs600_gart_disable(rdev); 506 radeon_gart_table_vram_free(rdev); 507 } 508 509 #define R600_PTE_VALID (1 << 0) 510 #define R600_PTE_SYSTEM (1 << 1) 511 #define R600_PTE_SNOOPED (1 << 2) 512 #define R600_PTE_READABLE (1 << 5) 513 #define R600_PTE_WRITEABLE (1 << 6) 514 515 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 516 { 517 void __iomem *ptr = (void *)rdev->gart.ptr; 518 519 if (i < 0 || i > rdev->gart.num_gpu_pages) { 520 return -EINVAL; 521 } 522 addr = addr & 0xFFFFFFFFFFFFF000ULL; 523 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 524 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 525 writeq(addr, ptr + (i * 8)); 526 return 0; 527 } 528 529 int rs600_irq_set(struct radeon_device *rdev) 530 { 531 uint32_t tmp = 0; 532 uint32_t mode_int = 0; 533 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 534 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 535 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 536 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 537 538 if (!rdev->irq.installed) { 539 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 540 WREG32(R_000040_GEN_INT_CNTL, 0); 541 return -EINVAL; 542 } 543 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 544 tmp |= S_000040_SW_INT_EN(1); 545 } 546 if (rdev->irq.gui_idle) { 547 tmp |= S_000040_GUI_IDLE(1); 548 } 549 if (rdev->irq.crtc_vblank_int[0] || 550 rdev->irq.pflip[0]) { 551 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 552 } 553 if (rdev->irq.crtc_vblank_int[1] || 554 rdev->irq.pflip[1]) { 555 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 556 } 557 if (rdev->irq.hpd[0]) { 558 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 559 } 560 if (rdev->irq.hpd[1]) { 561 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 562 } 563 WREG32(R_000040_GEN_INT_CNTL, tmp); 564 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 565 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 566 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 567 return 0; 568 } 569 570 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 571 { 572 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 573 uint32_t irq_mask = S_000044_SW_INT(1); 574 u32 tmp; 575 576 /* the interrupt works, but the status bit is permanently asserted */ 577 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 578 if (!rdev->irq.gui_idle_acked) 579 irq_mask |= S_000044_GUI_IDLE_STAT(1); 580 } 581 582 if (G_000044_DISPLAY_INT_STAT(irqs)) { 583 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 584 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 585 WREG32(R_006534_D1MODE_VBLANK_STATUS, 586 S_006534_D1MODE_VBLANK_ACK(1)); 587 } 588 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 589 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 590 S_006D34_D2MODE_VBLANK_ACK(1)); 591 } 592 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 593 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 594 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 595 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 596 } 597 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 598 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 599 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 600 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 601 } 602 } else { 603 rdev->irq.stat_regs.r500.disp_int = 0; 604 } 605 606 if (irqs) { 607 WREG32(R_000044_GEN_INT_STATUS, irqs); 608 } 609 return irqs & irq_mask; 610 } 611 612 void rs600_irq_disable(struct radeon_device *rdev) 613 { 614 WREG32(R_000040_GEN_INT_CNTL, 0); 615 WREG32(R_006540_DxMODE_INT_MASK, 0); 616 /* Wait and acknowledge irq */ 617 mdelay(1); 618 rs600_irq_ack(rdev); 619 } 620 621 int rs600_irq_process(struct radeon_device *rdev) 622 { 623 u32 status, msi_rearm; 624 bool queue_hotplug = false; 625 626 /* reset gui idle ack. the status bit is broken */ 627 rdev->irq.gui_idle_acked = false; 628 629 status = rs600_irq_ack(rdev); 630 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 631 return IRQ_NONE; 632 } 633 while (status || rdev->irq.stat_regs.r500.disp_int) { 634 /* SW interrupt */ 635 if (G_000044_SW_INT(status)) { 636 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 637 } 638 /* GUI idle */ 639 if (G_000040_GUI_IDLE(status)) { 640 rdev->irq.gui_idle_acked = true; 641 rdev->pm.gui_idle = true; 642 wake_up(&rdev->irq.idle_queue); 643 } 644 /* Vertical blank interrupts */ 645 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 646 if (rdev->irq.crtc_vblank_int[0]) { 647 drm_handle_vblank(rdev->ddev, 0); 648 rdev->pm.vblank_sync = true; 649 wake_up(&rdev->irq.vblank_queue); 650 } 651 if (rdev->irq.pflip[0]) 652 radeon_crtc_handle_flip(rdev, 0); 653 } 654 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 655 if (rdev->irq.crtc_vblank_int[1]) { 656 drm_handle_vblank(rdev->ddev, 1); 657 rdev->pm.vblank_sync = true; 658 wake_up(&rdev->irq.vblank_queue); 659 } 660 if (rdev->irq.pflip[1]) 661 radeon_crtc_handle_flip(rdev, 1); 662 } 663 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 664 queue_hotplug = true; 665 DRM_DEBUG("HPD1\n"); 666 } 667 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 668 queue_hotplug = true; 669 DRM_DEBUG("HPD2\n"); 670 } 671 status = rs600_irq_ack(rdev); 672 } 673 /* reset gui idle ack. the status bit is broken */ 674 rdev->irq.gui_idle_acked = false; 675 if (queue_hotplug) 676 schedule_work(&rdev->hotplug_work); 677 if (rdev->msi_enabled) { 678 switch (rdev->family) { 679 case CHIP_RS600: 680 case CHIP_RS690: 681 case CHIP_RS740: 682 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 683 WREG32(RADEON_BUS_CNTL, msi_rearm); 684 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 685 break; 686 default: 687 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 688 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 689 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 690 break; 691 } 692 } 693 return IRQ_HANDLED; 694 } 695 696 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 697 { 698 if (crtc == 0) 699 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 700 else 701 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 702 } 703 704 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 705 { 706 unsigned i; 707 708 for (i = 0; i < rdev->usec_timeout; i++) { 709 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 710 return 0; 711 udelay(1); 712 } 713 return -1; 714 } 715 716 void rs600_gpu_init(struct radeon_device *rdev) 717 { 718 r420_pipes_init(rdev); 719 /* Wait for mc idle */ 720 if (rs600_mc_wait_for_idle(rdev)) 721 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 722 } 723 724 void rs600_mc_init(struct radeon_device *rdev) 725 { 726 u64 base; 727 728 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 729 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 730 rdev->mc.vram_is_ddr = true; 731 rdev->mc.vram_width = 128; 732 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 733 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 734 rdev->mc.visible_vram_size = rdev->mc.aper_size; 735 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 736 base = RREG32_MC(R_000004_MC_FB_LOCATION); 737 base = G_000004_MC_FB_START(base) << 16; 738 radeon_vram_location(rdev, &rdev->mc, base); 739 rdev->mc.gtt_base_align = 0; 740 radeon_gtt_location(rdev, &rdev->mc); 741 radeon_update_bandwidth_info(rdev); 742 } 743 744 void rs600_bandwidth_update(struct radeon_device *rdev) 745 { 746 struct drm_display_mode *mode0 = NULL; 747 struct drm_display_mode *mode1 = NULL; 748 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 749 /* FIXME: implement full support */ 750 751 radeon_update_display_priority(rdev); 752 753 if (rdev->mode_info.crtcs[0]->base.enabled) 754 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 755 if (rdev->mode_info.crtcs[1]->base.enabled) 756 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 757 758 rs690_line_buffer_adjust(rdev, mode0, mode1); 759 760 if (rdev->disp_priority == 2) { 761 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 762 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 763 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 764 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 765 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 766 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 767 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 768 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 769 } 770 } 771 772 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 773 { 774 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 775 S_000070_MC_IND_CITF_ARB0(1)); 776 return RREG32(R_000074_MC_IND_DATA); 777 } 778 779 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 780 { 781 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 782 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 783 WREG32(R_000074_MC_IND_DATA, v); 784 } 785 786 void rs600_debugfs(struct radeon_device *rdev) 787 { 788 if (r100_debugfs_rbbm_init(rdev)) 789 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 790 } 791 792 void rs600_set_safe_registers(struct radeon_device *rdev) 793 { 794 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 795 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 796 } 797 798 static void rs600_mc_program(struct radeon_device *rdev) 799 { 800 struct rv515_mc_save save; 801 802 /* Stops all mc clients */ 803 rv515_mc_stop(rdev, &save); 804 805 /* Wait for mc idle */ 806 if (rs600_mc_wait_for_idle(rdev)) 807 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 808 809 /* FIXME: What does AGP means for such chipset ? */ 810 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 811 WREG32_MC(R_000006_AGP_BASE, 0); 812 WREG32_MC(R_000007_AGP_BASE_2, 0); 813 /* Program MC */ 814 WREG32_MC(R_000004_MC_FB_LOCATION, 815 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 816 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 817 WREG32(R_000134_HDP_FB_LOCATION, 818 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 819 820 rv515_mc_resume(rdev, &save); 821 } 822 823 static int rs600_startup(struct radeon_device *rdev) 824 { 825 int r; 826 827 rs600_mc_program(rdev); 828 /* Resume clock */ 829 rv515_clock_startup(rdev); 830 /* Initialize GPU configuration (# pipes, ...) */ 831 rs600_gpu_init(rdev); 832 /* Initialize GART (initialize after TTM so we can allocate 833 * memory through TTM but finalize after TTM) */ 834 r = rs600_gart_enable(rdev); 835 if (r) 836 return r; 837 838 /* allocate wb buffer */ 839 r = radeon_wb_init(rdev); 840 if (r) 841 return r; 842 843 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 844 if (r) { 845 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 846 return r; 847 } 848 849 /* Enable IRQ */ 850 rs600_irq_set(rdev); 851 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 852 /* 1M ring buffer */ 853 r = r100_cp_init(rdev, 1024 * 1024); 854 if (r) { 855 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 856 return r; 857 } 858 859 r = r600_audio_init(rdev); 860 if (r) { 861 dev_err(rdev->dev, "failed initializing audio\n"); 862 return r; 863 } 864 865 r = radeon_ib_pool_start(rdev); 866 if (r) 867 return r; 868 869 r = r100_ib_test(rdev); 870 if (r) { 871 dev_err(rdev->dev, "failed testing IB (%d).\n", r); 872 rdev->accel_working = false; 873 return r; 874 } 875 876 return 0; 877 } 878 879 int rs600_resume(struct radeon_device *rdev) 880 { 881 /* Make sur GART are not working */ 882 rs600_gart_disable(rdev); 883 /* Resume clock before doing reset */ 884 rv515_clock_startup(rdev); 885 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 886 if (radeon_asic_reset(rdev)) { 887 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 888 RREG32(R_000E40_RBBM_STATUS), 889 RREG32(R_0007C0_CP_STAT)); 890 } 891 /* post */ 892 atom_asic_init(rdev->mode_info.atom_context); 893 /* Resume clock after posting */ 894 rv515_clock_startup(rdev); 895 /* Initialize surface registers */ 896 radeon_surface_init(rdev); 897 898 rdev->accel_working = true; 899 return rs600_startup(rdev); 900 } 901 902 int rs600_suspend(struct radeon_device *rdev) 903 { 904 radeon_ib_pool_suspend(rdev); 905 r600_audio_fini(rdev); 906 r100_cp_disable(rdev); 907 radeon_wb_disable(rdev); 908 rs600_irq_disable(rdev); 909 rs600_gart_disable(rdev); 910 return 0; 911 } 912 913 void rs600_fini(struct radeon_device *rdev) 914 { 915 r600_audio_fini(rdev); 916 r100_cp_fini(rdev); 917 radeon_wb_fini(rdev); 918 r100_ib_fini(rdev); 919 radeon_gem_fini(rdev); 920 rs600_gart_fini(rdev); 921 radeon_irq_kms_fini(rdev); 922 radeon_fence_driver_fini(rdev); 923 radeon_bo_fini(rdev); 924 radeon_atombios_fini(rdev); 925 kfree(rdev->bios); 926 rdev->bios = NULL; 927 } 928 929 int rs600_init(struct radeon_device *rdev) 930 { 931 int r; 932 933 /* Disable VGA */ 934 rv515_vga_render_disable(rdev); 935 /* Initialize scratch registers */ 936 radeon_scratch_init(rdev); 937 /* Initialize surface registers */ 938 radeon_surface_init(rdev); 939 /* restore some register to sane defaults */ 940 r100_restore_sanity(rdev); 941 /* BIOS */ 942 if (!radeon_get_bios(rdev)) { 943 if (ASIC_IS_AVIVO(rdev)) 944 return -EINVAL; 945 } 946 if (rdev->is_atom_bios) { 947 r = radeon_atombios_init(rdev); 948 if (r) 949 return r; 950 } else { 951 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 952 return -EINVAL; 953 } 954 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 955 if (radeon_asic_reset(rdev)) { 956 dev_warn(rdev->dev, 957 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 958 RREG32(R_000E40_RBBM_STATUS), 959 RREG32(R_0007C0_CP_STAT)); 960 } 961 /* check if cards are posted or not */ 962 if (radeon_boot_test_post_card(rdev) == false) 963 return -EINVAL; 964 965 /* Initialize clocks */ 966 radeon_get_clock_info(rdev->ddev); 967 /* initialize memory controller */ 968 rs600_mc_init(rdev); 969 rs600_debugfs(rdev); 970 /* Fence driver */ 971 r = radeon_fence_driver_init(rdev); 972 if (r) 973 return r; 974 r = radeon_irq_kms_init(rdev); 975 if (r) 976 return r; 977 /* Memory manager */ 978 r = radeon_bo_init(rdev); 979 if (r) 980 return r; 981 r = rs600_gart_init(rdev); 982 if (r) 983 return r; 984 rs600_set_safe_registers(rdev); 985 986 r = radeon_ib_pool_init(rdev); 987 rdev->accel_working = true; 988 if (r) { 989 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 990 rdev->accel_working = false; 991 } 992 993 r = rs600_startup(rdev); 994 if (r) { 995 /* Somethings want wront with the accel init stop accel */ 996 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 997 r100_cp_fini(rdev); 998 radeon_wb_fini(rdev); 999 r100_ib_fini(rdev); 1000 rs600_gart_fini(rdev); 1001 radeon_irq_kms_fini(rdev); 1002 rdev->accel_working = false; 1003 } 1004 return 0; 1005 } 1006