1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include "drmP.h" 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 50 { 51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 52 int i; 53 54 if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { 55 for (i = 0; i < rdev->usec_timeout; i++) { 56 if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) 57 break; 58 udelay(1); 59 } 60 for (i = 0; i < rdev->usec_timeout; i++) { 61 if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) 62 break; 63 udelay(1); 64 } 65 } 66 } 67 68 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 69 { 70 /* enable the pflip int */ 71 radeon_irq_kms_pflip_irq_get(rdev, crtc); 72 } 73 74 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 75 { 76 /* disable the pflip int */ 77 radeon_irq_kms_pflip_irq_put(rdev, crtc); 78 } 79 80 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 81 { 82 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 84 int i; 85 86 /* Lock the graphics update lock */ 87 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 88 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 89 90 /* update the scanout addresses */ 91 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 92 (u32)crtc_base); 93 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 94 (u32)crtc_base); 95 96 /* Wait for update_pending to go high. */ 97 for (i = 0; i < rdev->usec_timeout; i++) { 98 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 99 break; 100 udelay(1); 101 } 102 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 103 104 /* Unlock the lock, so double-buffering can take place inside vblank */ 105 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 106 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 107 108 /* Return current update_pending status: */ 109 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 110 } 111 112 void rs600_pm_misc(struct radeon_device *rdev) 113 { 114 int requested_index = rdev->pm.requested_power_state_index; 115 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 116 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 117 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 118 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 119 120 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 121 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 122 tmp = RREG32(voltage->gpio.reg); 123 if (voltage->active_high) 124 tmp |= voltage->gpio.mask; 125 else 126 tmp &= ~(voltage->gpio.mask); 127 WREG32(voltage->gpio.reg, tmp); 128 if (voltage->delay) 129 udelay(voltage->delay); 130 } else { 131 tmp = RREG32(voltage->gpio.reg); 132 if (voltage->active_high) 133 tmp &= ~voltage->gpio.mask; 134 else 135 tmp |= voltage->gpio.mask; 136 WREG32(voltage->gpio.reg, tmp); 137 if (voltage->delay) 138 udelay(voltage->delay); 139 } 140 } else if (voltage->type == VOLTAGE_VDDC) 141 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); 142 143 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 144 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 145 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 146 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 147 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 148 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 149 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 150 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 151 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 152 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 153 } 154 } else { 155 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 156 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 157 } 158 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 159 160 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 161 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 162 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 163 if (voltage->delay) { 164 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 165 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 166 } else 167 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 168 } else 169 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 170 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 171 172 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 173 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 174 hdp_dyn_cntl &= ~HDP_FORCEON; 175 else 176 hdp_dyn_cntl |= HDP_FORCEON; 177 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 178 #if 0 179 /* mc_host_dyn seems to cause hangs from time to time */ 180 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 181 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 182 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 183 else 184 mc_host_dyn_cntl |= MC_HOST_FORCEON; 185 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 186 #endif 187 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 188 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 189 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 190 else 191 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 192 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 193 194 /* set pcie lanes */ 195 if ((rdev->flags & RADEON_IS_PCIE) && 196 !(rdev->flags & RADEON_IS_IGP) && 197 rdev->asic->pm.set_pcie_lanes && 198 (ps->pcie_lanes != 199 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 200 radeon_set_pcie_lanes(rdev, 201 ps->pcie_lanes); 202 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 203 } 204 } 205 206 void rs600_pm_prepare(struct radeon_device *rdev) 207 { 208 struct drm_device *ddev = rdev->ddev; 209 struct drm_crtc *crtc; 210 struct radeon_crtc *radeon_crtc; 211 u32 tmp; 212 213 /* disable any active CRTCs */ 214 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 215 radeon_crtc = to_radeon_crtc(crtc); 216 if (radeon_crtc->enabled) { 217 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 218 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 219 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 220 } 221 } 222 } 223 224 void rs600_pm_finish(struct radeon_device *rdev) 225 { 226 struct drm_device *ddev = rdev->ddev; 227 struct drm_crtc *crtc; 228 struct radeon_crtc *radeon_crtc; 229 u32 tmp; 230 231 /* enable any active CRTCs */ 232 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 233 radeon_crtc = to_radeon_crtc(crtc); 234 if (radeon_crtc->enabled) { 235 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 236 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 237 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 238 } 239 } 240 } 241 242 /* hpd for digital panel detect/disconnect */ 243 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 244 { 245 u32 tmp; 246 bool connected = false; 247 248 switch (hpd) { 249 case RADEON_HPD_1: 250 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 251 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 252 connected = true; 253 break; 254 case RADEON_HPD_2: 255 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 256 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 257 connected = true; 258 break; 259 default: 260 break; 261 } 262 return connected; 263 } 264 265 void rs600_hpd_set_polarity(struct radeon_device *rdev, 266 enum radeon_hpd_id hpd) 267 { 268 u32 tmp; 269 bool connected = rs600_hpd_sense(rdev, hpd); 270 271 switch (hpd) { 272 case RADEON_HPD_1: 273 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 274 if (connected) 275 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 276 else 277 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 278 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 279 break; 280 case RADEON_HPD_2: 281 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 282 if (connected) 283 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 284 else 285 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 286 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 287 break; 288 default: 289 break; 290 } 291 } 292 293 void rs600_hpd_init(struct radeon_device *rdev) 294 { 295 struct drm_device *dev = rdev->ddev; 296 struct drm_connector *connector; 297 298 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 299 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 300 switch (radeon_connector->hpd.hpd) { 301 case RADEON_HPD_1: 302 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 303 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 304 rdev->irq.hpd[0] = true; 305 break; 306 case RADEON_HPD_2: 307 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 308 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 309 rdev->irq.hpd[1] = true; 310 break; 311 default: 312 break; 313 } 314 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 315 } 316 if (rdev->irq.installed) 317 rs600_irq_set(rdev); 318 } 319 320 void rs600_hpd_fini(struct radeon_device *rdev) 321 { 322 struct drm_device *dev = rdev->ddev; 323 struct drm_connector *connector; 324 325 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 326 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 327 switch (radeon_connector->hpd.hpd) { 328 case RADEON_HPD_1: 329 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 330 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 331 rdev->irq.hpd[0] = false; 332 break; 333 case RADEON_HPD_2: 334 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 335 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 336 rdev->irq.hpd[1] = false; 337 break; 338 default: 339 break; 340 } 341 } 342 } 343 344 int rs600_asic_reset(struct radeon_device *rdev) 345 { 346 struct rv515_mc_save save; 347 u32 status, tmp; 348 int ret = 0; 349 350 status = RREG32(R_000E40_RBBM_STATUS); 351 if (!G_000E40_GUI_ACTIVE(status)) { 352 return 0; 353 } 354 /* Stops all mc clients */ 355 rv515_mc_stop(rdev, &save); 356 status = RREG32(R_000E40_RBBM_STATUS); 357 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 358 /* stop CP */ 359 WREG32(RADEON_CP_CSQ_CNTL, 0); 360 tmp = RREG32(RADEON_CP_RB_CNTL); 361 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 362 WREG32(RADEON_CP_RB_RPTR_WR, 0); 363 WREG32(RADEON_CP_RB_WPTR, 0); 364 WREG32(RADEON_CP_RB_CNTL, tmp); 365 pci_save_state(rdev->pdev); 366 /* disable bus mastering */ 367 pci_clear_master(rdev->pdev); 368 mdelay(1); 369 /* reset GA+VAP */ 370 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 371 S_0000F0_SOFT_RESET_GA(1)); 372 RREG32(R_0000F0_RBBM_SOFT_RESET); 373 mdelay(500); 374 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 375 mdelay(1); 376 status = RREG32(R_000E40_RBBM_STATUS); 377 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 378 /* reset CP */ 379 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 380 RREG32(R_0000F0_RBBM_SOFT_RESET); 381 mdelay(500); 382 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 383 mdelay(1); 384 status = RREG32(R_000E40_RBBM_STATUS); 385 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 386 /* reset MC */ 387 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 388 RREG32(R_0000F0_RBBM_SOFT_RESET); 389 mdelay(500); 390 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 391 mdelay(1); 392 status = RREG32(R_000E40_RBBM_STATUS); 393 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 394 /* restore PCI & busmastering */ 395 pci_restore_state(rdev->pdev); 396 /* Check if GPU is idle */ 397 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 398 dev_err(rdev->dev, "failed to reset GPU\n"); 399 rdev->gpu_lockup = true; 400 ret = -1; 401 } else 402 dev_info(rdev->dev, "GPU reset succeed\n"); 403 rv515_mc_resume(rdev, &save); 404 return ret; 405 } 406 407 /* 408 * GART. 409 */ 410 void rs600_gart_tlb_flush(struct radeon_device *rdev) 411 { 412 uint32_t tmp; 413 414 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 415 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 416 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 417 418 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 419 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 420 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 421 422 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 423 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 424 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 425 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 426 } 427 428 int rs600_gart_init(struct radeon_device *rdev) 429 { 430 int r; 431 432 if (rdev->gart.robj) { 433 WARN(1, "RS600 GART already initialized\n"); 434 return 0; 435 } 436 /* Initialize common gart structure */ 437 r = radeon_gart_init(rdev); 438 if (r) { 439 return r; 440 } 441 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 442 return radeon_gart_table_vram_alloc(rdev); 443 } 444 445 static int rs600_gart_enable(struct radeon_device *rdev) 446 { 447 u32 tmp; 448 int r, i; 449 450 if (rdev->gart.robj == NULL) { 451 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 452 return -EINVAL; 453 } 454 r = radeon_gart_table_vram_pin(rdev); 455 if (r) 456 return r; 457 radeon_gart_restore(rdev); 458 /* Enable bus master */ 459 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 460 WREG32(RADEON_BUS_CNTL, tmp); 461 /* FIXME: setup default page */ 462 WREG32_MC(R_000100_MC_PT0_CNTL, 463 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 464 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 465 466 for (i = 0; i < 19; i++) { 467 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 468 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 469 S_00016C_SYSTEM_ACCESS_MODE_MASK( 470 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 471 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 472 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 473 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 474 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 475 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 476 } 477 /* enable first context */ 478 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 479 S_000102_ENABLE_PAGE_TABLE(1) | 480 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 481 482 /* disable all other contexts */ 483 for (i = 1; i < 8; i++) 484 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 485 486 /* setup the page table */ 487 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 488 rdev->gart.table_addr); 489 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 490 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 491 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 492 493 /* System context maps to VRAM space */ 494 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 495 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 496 497 /* enable page tables */ 498 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 499 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 500 tmp = RREG32_MC(R_000009_MC_CNTL1); 501 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 502 rs600_gart_tlb_flush(rdev); 503 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 504 (unsigned)(rdev->mc.gtt_size >> 20), 505 (unsigned long long)rdev->gart.table_addr); 506 rdev->gart.ready = true; 507 return 0; 508 } 509 510 void rs600_gart_disable(struct radeon_device *rdev) 511 { 512 u32 tmp; 513 514 /* FIXME: disable out of gart access */ 515 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 516 tmp = RREG32_MC(R_000009_MC_CNTL1); 517 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 518 radeon_gart_table_vram_unpin(rdev); 519 } 520 521 void rs600_gart_fini(struct radeon_device *rdev) 522 { 523 radeon_gart_fini(rdev); 524 rs600_gart_disable(rdev); 525 radeon_gart_table_vram_free(rdev); 526 } 527 528 #define R600_PTE_VALID (1 << 0) 529 #define R600_PTE_SYSTEM (1 << 1) 530 #define R600_PTE_SNOOPED (1 << 2) 531 #define R600_PTE_READABLE (1 << 5) 532 #define R600_PTE_WRITEABLE (1 << 6) 533 534 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 535 { 536 void __iomem *ptr = (void *)rdev->gart.ptr; 537 538 if (i < 0 || i > rdev->gart.num_gpu_pages) { 539 return -EINVAL; 540 } 541 addr = addr & 0xFFFFFFFFFFFFF000ULL; 542 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 543 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 544 writeq(addr, ptr + (i * 8)); 545 return 0; 546 } 547 548 int rs600_irq_set(struct radeon_device *rdev) 549 { 550 uint32_t tmp = 0; 551 uint32_t mode_int = 0; 552 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 553 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 554 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 555 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 556 557 if (!rdev->irq.installed) { 558 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 559 WREG32(R_000040_GEN_INT_CNTL, 0); 560 return -EINVAL; 561 } 562 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 563 tmp |= S_000040_SW_INT_EN(1); 564 } 565 if (rdev->irq.gui_idle) { 566 tmp |= S_000040_GUI_IDLE(1); 567 } 568 if (rdev->irq.crtc_vblank_int[0] || 569 rdev->irq.pflip[0]) { 570 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 571 } 572 if (rdev->irq.crtc_vblank_int[1] || 573 rdev->irq.pflip[1]) { 574 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 575 } 576 if (rdev->irq.hpd[0]) { 577 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 578 } 579 if (rdev->irq.hpd[1]) { 580 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 581 } 582 WREG32(R_000040_GEN_INT_CNTL, tmp); 583 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 584 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 585 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 586 return 0; 587 } 588 589 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 590 { 591 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 592 uint32_t irq_mask = S_000044_SW_INT(1); 593 u32 tmp; 594 595 /* the interrupt works, but the status bit is permanently asserted */ 596 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 597 if (!rdev->irq.gui_idle_acked) 598 irq_mask |= S_000044_GUI_IDLE_STAT(1); 599 } 600 601 if (G_000044_DISPLAY_INT_STAT(irqs)) { 602 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 603 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 604 WREG32(R_006534_D1MODE_VBLANK_STATUS, 605 S_006534_D1MODE_VBLANK_ACK(1)); 606 } 607 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 608 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 609 S_006D34_D2MODE_VBLANK_ACK(1)); 610 } 611 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 612 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 613 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 614 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 615 } 616 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 617 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 618 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 619 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 620 } 621 } else { 622 rdev->irq.stat_regs.r500.disp_int = 0; 623 } 624 625 if (irqs) { 626 WREG32(R_000044_GEN_INT_STATUS, irqs); 627 } 628 return irqs & irq_mask; 629 } 630 631 void rs600_irq_disable(struct radeon_device *rdev) 632 { 633 WREG32(R_000040_GEN_INT_CNTL, 0); 634 WREG32(R_006540_DxMODE_INT_MASK, 0); 635 /* Wait and acknowledge irq */ 636 mdelay(1); 637 rs600_irq_ack(rdev); 638 } 639 640 int rs600_irq_process(struct radeon_device *rdev) 641 { 642 u32 status, msi_rearm; 643 bool queue_hotplug = false; 644 645 /* reset gui idle ack. the status bit is broken */ 646 rdev->irq.gui_idle_acked = false; 647 648 status = rs600_irq_ack(rdev); 649 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 650 return IRQ_NONE; 651 } 652 while (status || rdev->irq.stat_regs.r500.disp_int) { 653 /* SW interrupt */ 654 if (G_000044_SW_INT(status)) { 655 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 656 } 657 /* GUI idle */ 658 if (G_000040_GUI_IDLE(status)) { 659 rdev->irq.gui_idle_acked = true; 660 rdev->pm.gui_idle = true; 661 wake_up(&rdev->irq.idle_queue); 662 } 663 /* Vertical blank interrupts */ 664 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 665 if (rdev->irq.crtc_vblank_int[0]) { 666 drm_handle_vblank(rdev->ddev, 0); 667 rdev->pm.vblank_sync = true; 668 wake_up(&rdev->irq.vblank_queue); 669 } 670 if (rdev->irq.pflip[0]) 671 radeon_crtc_handle_flip(rdev, 0); 672 } 673 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 674 if (rdev->irq.crtc_vblank_int[1]) { 675 drm_handle_vblank(rdev->ddev, 1); 676 rdev->pm.vblank_sync = true; 677 wake_up(&rdev->irq.vblank_queue); 678 } 679 if (rdev->irq.pflip[1]) 680 radeon_crtc_handle_flip(rdev, 1); 681 } 682 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 683 queue_hotplug = true; 684 DRM_DEBUG("HPD1\n"); 685 } 686 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 687 queue_hotplug = true; 688 DRM_DEBUG("HPD2\n"); 689 } 690 status = rs600_irq_ack(rdev); 691 } 692 /* reset gui idle ack. the status bit is broken */ 693 rdev->irq.gui_idle_acked = false; 694 if (queue_hotplug) 695 schedule_work(&rdev->hotplug_work); 696 if (rdev->msi_enabled) { 697 switch (rdev->family) { 698 case CHIP_RS600: 699 case CHIP_RS690: 700 case CHIP_RS740: 701 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 702 WREG32(RADEON_BUS_CNTL, msi_rearm); 703 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 704 break; 705 default: 706 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); 707 break; 708 } 709 } 710 return IRQ_HANDLED; 711 } 712 713 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 714 { 715 if (crtc == 0) 716 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 717 else 718 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 719 } 720 721 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 722 { 723 unsigned i; 724 725 for (i = 0; i < rdev->usec_timeout; i++) { 726 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 727 return 0; 728 udelay(1); 729 } 730 return -1; 731 } 732 733 void rs600_gpu_init(struct radeon_device *rdev) 734 { 735 r420_pipes_init(rdev); 736 /* Wait for mc idle */ 737 if (rs600_mc_wait_for_idle(rdev)) 738 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 739 } 740 741 void rs600_mc_init(struct radeon_device *rdev) 742 { 743 u64 base; 744 745 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 746 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 747 rdev->mc.vram_is_ddr = true; 748 rdev->mc.vram_width = 128; 749 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 750 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 751 rdev->mc.visible_vram_size = rdev->mc.aper_size; 752 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 753 base = RREG32_MC(R_000004_MC_FB_LOCATION); 754 base = G_000004_MC_FB_START(base) << 16; 755 radeon_vram_location(rdev, &rdev->mc, base); 756 rdev->mc.gtt_base_align = 0; 757 radeon_gtt_location(rdev, &rdev->mc); 758 radeon_update_bandwidth_info(rdev); 759 } 760 761 void rs600_bandwidth_update(struct radeon_device *rdev) 762 { 763 struct drm_display_mode *mode0 = NULL; 764 struct drm_display_mode *mode1 = NULL; 765 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 766 /* FIXME: implement full support */ 767 768 radeon_update_display_priority(rdev); 769 770 if (rdev->mode_info.crtcs[0]->base.enabled) 771 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 772 if (rdev->mode_info.crtcs[1]->base.enabled) 773 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 774 775 rs690_line_buffer_adjust(rdev, mode0, mode1); 776 777 if (rdev->disp_priority == 2) { 778 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 779 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 780 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 781 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 782 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 783 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 784 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 785 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 786 } 787 } 788 789 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 790 { 791 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 792 S_000070_MC_IND_CITF_ARB0(1)); 793 return RREG32(R_000074_MC_IND_DATA); 794 } 795 796 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 797 { 798 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 799 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 800 WREG32(R_000074_MC_IND_DATA, v); 801 } 802 803 void rs600_debugfs(struct radeon_device *rdev) 804 { 805 if (r100_debugfs_rbbm_init(rdev)) 806 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 807 } 808 809 void rs600_set_safe_registers(struct radeon_device *rdev) 810 { 811 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 812 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 813 } 814 815 static void rs600_mc_program(struct radeon_device *rdev) 816 { 817 struct rv515_mc_save save; 818 819 /* Stops all mc clients */ 820 rv515_mc_stop(rdev, &save); 821 822 /* Wait for mc idle */ 823 if (rs600_mc_wait_for_idle(rdev)) 824 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 825 826 /* FIXME: What does AGP means for such chipset ? */ 827 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 828 WREG32_MC(R_000006_AGP_BASE, 0); 829 WREG32_MC(R_000007_AGP_BASE_2, 0); 830 /* Program MC */ 831 WREG32_MC(R_000004_MC_FB_LOCATION, 832 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 833 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 834 WREG32(R_000134_HDP_FB_LOCATION, 835 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 836 837 rv515_mc_resume(rdev, &save); 838 } 839 840 static int rs600_startup(struct radeon_device *rdev) 841 { 842 int r; 843 844 rs600_mc_program(rdev); 845 /* Resume clock */ 846 rv515_clock_startup(rdev); 847 /* Initialize GPU configuration (# pipes, ...) */ 848 rs600_gpu_init(rdev); 849 /* Initialize GART (initialize after TTM so we can allocate 850 * memory through TTM but finalize after TTM) */ 851 r = rs600_gart_enable(rdev); 852 if (r) 853 return r; 854 855 /* allocate wb buffer */ 856 r = radeon_wb_init(rdev); 857 if (r) 858 return r; 859 860 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 861 if (r) { 862 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 863 return r; 864 } 865 866 /* Enable IRQ */ 867 rs600_irq_set(rdev); 868 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 869 /* 1M ring buffer */ 870 r = r100_cp_init(rdev, 1024 * 1024); 871 if (r) { 872 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 873 return r; 874 } 875 876 r = r600_audio_init(rdev); 877 if (r) { 878 dev_err(rdev->dev, "failed initializing audio\n"); 879 return r; 880 } 881 882 r = radeon_ib_pool_start(rdev); 883 if (r) 884 return r; 885 886 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 887 if (r) { 888 dev_err(rdev->dev, "failed testing IB (%d).\n", r); 889 rdev->accel_working = false; 890 return r; 891 } 892 893 return 0; 894 } 895 896 int rs600_resume(struct radeon_device *rdev) 897 { 898 int r; 899 900 /* Make sur GART are not working */ 901 rs600_gart_disable(rdev); 902 /* Resume clock before doing reset */ 903 rv515_clock_startup(rdev); 904 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 905 if (radeon_asic_reset(rdev)) { 906 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 907 RREG32(R_000E40_RBBM_STATUS), 908 RREG32(R_0007C0_CP_STAT)); 909 } 910 /* post */ 911 atom_asic_init(rdev->mode_info.atom_context); 912 /* Resume clock after posting */ 913 rv515_clock_startup(rdev); 914 /* Initialize surface registers */ 915 radeon_surface_init(rdev); 916 917 rdev->accel_working = true; 918 r = rs600_startup(rdev); 919 if (r) { 920 rdev->accel_working = false; 921 } 922 return r; 923 } 924 925 int rs600_suspend(struct radeon_device *rdev) 926 { 927 radeon_ib_pool_suspend(rdev); 928 r600_audio_fini(rdev); 929 r100_cp_disable(rdev); 930 radeon_wb_disable(rdev); 931 rs600_irq_disable(rdev); 932 rs600_gart_disable(rdev); 933 return 0; 934 } 935 936 void rs600_fini(struct radeon_device *rdev) 937 { 938 r600_audio_fini(rdev); 939 r100_cp_fini(rdev); 940 radeon_wb_fini(rdev); 941 r100_ib_fini(rdev); 942 radeon_gem_fini(rdev); 943 rs600_gart_fini(rdev); 944 radeon_irq_kms_fini(rdev); 945 radeon_fence_driver_fini(rdev); 946 radeon_bo_fini(rdev); 947 radeon_atombios_fini(rdev); 948 kfree(rdev->bios); 949 rdev->bios = NULL; 950 } 951 952 int rs600_init(struct radeon_device *rdev) 953 { 954 int r; 955 956 /* Disable VGA */ 957 rv515_vga_render_disable(rdev); 958 /* Initialize scratch registers */ 959 radeon_scratch_init(rdev); 960 /* Initialize surface registers */ 961 radeon_surface_init(rdev); 962 /* restore some register to sane defaults */ 963 r100_restore_sanity(rdev); 964 /* BIOS */ 965 if (!radeon_get_bios(rdev)) { 966 if (ASIC_IS_AVIVO(rdev)) 967 return -EINVAL; 968 } 969 if (rdev->is_atom_bios) { 970 r = radeon_atombios_init(rdev); 971 if (r) 972 return r; 973 } else { 974 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 975 return -EINVAL; 976 } 977 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 978 if (radeon_asic_reset(rdev)) { 979 dev_warn(rdev->dev, 980 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 981 RREG32(R_000E40_RBBM_STATUS), 982 RREG32(R_0007C0_CP_STAT)); 983 } 984 /* check if cards are posted or not */ 985 if (radeon_boot_test_post_card(rdev) == false) 986 return -EINVAL; 987 988 /* Initialize clocks */ 989 radeon_get_clock_info(rdev->ddev); 990 /* initialize memory controller */ 991 rs600_mc_init(rdev); 992 rs600_debugfs(rdev); 993 /* Fence driver */ 994 r = radeon_fence_driver_init(rdev); 995 if (r) 996 return r; 997 r = radeon_irq_kms_init(rdev); 998 if (r) 999 return r; 1000 /* Memory manager */ 1001 r = radeon_bo_init(rdev); 1002 if (r) 1003 return r; 1004 r = rs600_gart_init(rdev); 1005 if (r) 1006 return r; 1007 rs600_set_safe_registers(rdev); 1008 1009 r = radeon_ib_pool_init(rdev); 1010 rdev->accel_working = true; 1011 if (r) { 1012 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1013 rdev->accel_working = false; 1014 } 1015 1016 r = rs600_startup(rdev); 1017 if (r) { 1018 /* Somethings want wront with the accel init stop accel */ 1019 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1020 r100_cp_fini(rdev); 1021 radeon_wb_fini(rdev); 1022 r100_ib_fini(rdev); 1023 rs600_gart_fini(rdev); 1024 radeon_irq_kms_fini(rdev); 1025 rdev->accel_working = false; 1026 } 1027 return 0; 1028 } 1029