1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include "drmP.h" 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 50 { 51 /* enable the pflip int */ 52 radeon_irq_kms_pflip_irq_get(rdev, crtc); 53 } 54 55 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 56 { 57 /* disable the pflip int */ 58 radeon_irq_kms_pflip_irq_put(rdev, crtc); 59 } 60 61 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 62 { 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 65 66 /* Lock the graphics update lock */ 67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 68 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 69 70 /* update the scanout addresses */ 71 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 72 (u32)crtc_base); 73 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 74 (u32)crtc_base); 75 76 /* Wait for update_pending to go high. */ 77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 79 80 /* Unlock the lock, so double-buffering can take place inside vblank */ 81 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 82 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 83 84 /* Return current update_pending status: */ 85 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 86 } 87 88 void rs600_pm_misc(struct radeon_device *rdev) 89 { 90 int requested_index = rdev->pm.requested_power_state_index; 91 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 92 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 93 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 94 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 95 96 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 97 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 98 tmp = RREG32(voltage->gpio.reg); 99 if (voltage->active_high) 100 tmp |= voltage->gpio.mask; 101 else 102 tmp &= ~(voltage->gpio.mask); 103 WREG32(voltage->gpio.reg, tmp); 104 if (voltage->delay) 105 udelay(voltage->delay); 106 } else { 107 tmp = RREG32(voltage->gpio.reg); 108 if (voltage->active_high) 109 tmp &= ~voltage->gpio.mask; 110 else 111 tmp |= voltage->gpio.mask; 112 WREG32(voltage->gpio.reg, tmp); 113 if (voltage->delay) 114 udelay(voltage->delay); 115 } 116 } else if (voltage->type == VOLTAGE_VDDC) 117 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); 118 119 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 120 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 121 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 122 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 123 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 124 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 125 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 126 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 127 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 128 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 129 } 130 } else { 131 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 133 } 134 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 135 136 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 137 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 138 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 139 if (voltage->delay) { 140 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 141 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 142 } else 143 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 144 } else 145 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 146 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 147 148 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 149 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 150 hdp_dyn_cntl &= ~HDP_FORCEON; 151 else 152 hdp_dyn_cntl |= HDP_FORCEON; 153 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 154 #if 0 155 /* mc_host_dyn seems to cause hangs from time to time */ 156 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 157 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 158 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 159 else 160 mc_host_dyn_cntl |= MC_HOST_FORCEON; 161 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 162 #endif 163 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 164 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 165 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 166 else 167 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 168 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 169 170 /* set pcie lanes */ 171 if ((rdev->flags & RADEON_IS_PCIE) && 172 !(rdev->flags & RADEON_IS_IGP) && 173 rdev->asic->set_pcie_lanes && 174 (ps->pcie_lanes != 175 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 176 radeon_set_pcie_lanes(rdev, 177 ps->pcie_lanes); 178 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 179 } 180 } 181 182 void rs600_pm_prepare(struct radeon_device *rdev) 183 { 184 struct drm_device *ddev = rdev->ddev; 185 struct drm_crtc *crtc; 186 struct radeon_crtc *radeon_crtc; 187 u32 tmp; 188 189 /* disable any active CRTCs */ 190 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 191 radeon_crtc = to_radeon_crtc(crtc); 192 if (radeon_crtc->enabled) { 193 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 194 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 195 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 196 } 197 } 198 } 199 200 void rs600_pm_finish(struct radeon_device *rdev) 201 { 202 struct drm_device *ddev = rdev->ddev; 203 struct drm_crtc *crtc; 204 struct radeon_crtc *radeon_crtc; 205 u32 tmp; 206 207 /* enable any active CRTCs */ 208 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 209 radeon_crtc = to_radeon_crtc(crtc); 210 if (radeon_crtc->enabled) { 211 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 212 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 213 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 214 } 215 } 216 } 217 218 /* hpd for digital panel detect/disconnect */ 219 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 220 { 221 u32 tmp; 222 bool connected = false; 223 224 switch (hpd) { 225 case RADEON_HPD_1: 226 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 227 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 228 connected = true; 229 break; 230 case RADEON_HPD_2: 231 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 232 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 233 connected = true; 234 break; 235 default: 236 break; 237 } 238 return connected; 239 } 240 241 void rs600_hpd_set_polarity(struct radeon_device *rdev, 242 enum radeon_hpd_id hpd) 243 { 244 u32 tmp; 245 bool connected = rs600_hpd_sense(rdev, hpd); 246 247 switch (hpd) { 248 case RADEON_HPD_1: 249 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 250 if (connected) 251 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 252 else 253 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 254 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 255 break; 256 case RADEON_HPD_2: 257 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 258 if (connected) 259 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 260 else 261 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 262 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 263 break; 264 default: 265 break; 266 } 267 } 268 269 void rs600_hpd_init(struct radeon_device *rdev) 270 { 271 struct drm_device *dev = rdev->ddev; 272 struct drm_connector *connector; 273 274 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 275 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 276 switch (radeon_connector->hpd.hpd) { 277 case RADEON_HPD_1: 278 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 279 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 280 rdev->irq.hpd[0] = true; 281 break; 282 case RADEON_HPD_2: 283 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 284 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 285 rdev->irq.hpd[1] = true; 286 break; 287 default: 288 break; 289 } 290 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 291 } 292 if (rdev->irq.installed) 293 rs600_irq_set(rdev); 294 } 295 296 void rs600_hpd_fini(struct radeon_device *rdev) 297 { 298 struct drm_device *dev = rdev->ddev; 299 struct drm_connector *connector; 300 301 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 302 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 303 switch (radeon_connector->hpd.hpd) { 304 case RADEON_HPD_1: 305 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 306 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 307 rdev->irq.hpd[0] = false; 308 break; 309 case RADEON_HPD_2: 310 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 311 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 312 rdev->irq.hpd[1] = false; 313 break; 314 default: 315 break; 316 } 317 } 318 } 319 320 void rs600_bm_disable(struct radeon_device *rdev) 321 { 322 u32 tmp; 323 324 /* disable bus mastering */ 325 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); 326 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); 327 mdelay(1); 328 } 329 330 int rs600_asic_reset(struct radeon_device *rdev) 331 { 332 struct rv515_mc_save save; 333 u32 status, tmp; 334 int ret = 0; 335 336 status = RREG32(R_000E40_RBBM_STATUS); 337 if (!G_000E40_GUI_ACTIVE(status)) { 338 return 0; 339 } 340 /* Stops all mc clients */ 341 rv515_mc_stop(rdev, &save); 342 status = RREG32(R_000E40_RBBM_STATUS); 343 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 344 /* stop CP */ 345 WREG32(RADEON_CP_CSQ_CNTL, 0); 346 tmp = RREG32(RADEON_CP_RB_CNTL); 347 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 348 WREG32(RADEON_CP_RB_RPTR_WR, 0); 349 WREG32(RADEON_CP_RB_WPTR, 0); 350 WREG32(RADEON_CP_RB_CNTL, tmp); 351 pci_save_state(rdev->pdev); 352 /* disable bus mastering */ 353 rs600_bm_disable(rdev); 354 /* reset GA+VAP */ 355 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 356 S_0000F0_SOFT_RESET_GA(1)); 357 RREG32(R_0000F0_RBBM_SOFT_RESET); 358 mdelay(500); 359 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 360 mdelay(1); 361 status = RREG32(R_000E40_RBBM_STATUS); 362 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 363 /* reset CP */ 364 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 365 RREG32(R_0000F0_RBBM_SOFT_RESET); 366 mdelay(500); 367 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 368 mdelay(1); 369 status = RREG32(R_000E40_RBBM_STATUS); 370 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 371 /* reset MC */ 372 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 373 RREG32(R_0000F0_RBBM_SOFT_RESET); 374 mdelay(500); 375 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 376 mdelay(1); 377 status = RREG32(R_000E40_RBBM_STATUS); 378 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 379 /* restore PCI & busmastering */ 380 pci_restore_state(rdev->pdev); 381 /* Check if GPU is idle */ 382 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 383 dev_err(rdev->dev, "failed to reset GPU\n"); 384 rdev->gpu_lockup = true; 385 ret = -1; 386 } else 387 dev_info(rdev->dev, "GPU reset succeed\n"); 388 rv515_mc_resume(rdev, &save); 389 return ret; 390 } 391 392 /* 393 * GART. 394 */ 395 void rs600_gart_tlb_flush(struct radeon_device *rdev) 396 { 397 uint32_t tmp; 398 399 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 400 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 401 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 402 403 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 404 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 405 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 406 407 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 408 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 409 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 410 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 411 } 412 413 int rs600_gart_init(struct radeon_device *rdev) 414 { 415 int r; 416 417 if (rdev->gart.robj) { 418 WARN(1, "RS600 GART already initialized\n"); 419 return 0; 420 } 421 /* Initialize common gart structure */ 422 r = radeon_gart_init(rdev); 423 if (r) { 424 return r; 425 } 426 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 427 return radeon_gart_table_vram_alloc(rdev); 428 } 429 430 static int rs600_gart_enable(struct radeon_device *rdev) 431 { 432 u32 tmp; 433 int r, i; 434 435 if (rdev->gart.robj == NULL) { 436 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 437 return -EINVAL; 438 } 439 r = radeon_gart_table_vram_pin(rdev); 440 if (r) 441 return r; 442 radeon_gart_restore(rdev); 443 /* Enable bus master */ 444 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 445 WREG32(RADEON_BUS_CNTL, tmp); 446 /* FIXME: setup default page */ 447 WREG32_MC(R_000100_MC_PT0_CNTL, 448 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 449 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 450 451 for (i = 0; i < 19; i++) { 452 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 453 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 454 S_00016C_SYSTEM_ACCESS_MODE_MASK( 455 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 456 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 457 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 458 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 459 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 460 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 461 } 462 /* enable first context */ 463 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 464 S_000102_ENABLE_PAGE_TABLE(1) | 465 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 466 467 /* disable all other contexts */ 468 for (i = 1; i < 8; i++) 469 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 470 471 /* setup the page table */ 472 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 473 rdev->gart.table_addr); 474 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 475 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 476 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 477 478 /* System context maps to VRAM space */ 479 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 480 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 481 482 /* enable page tables */ 483 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 484 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 485 tmp = RREG32_MC(R_000009_MC_CNTL1); 486 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 487 rs600_gart_tlb_flush(rdev); 488 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 489 (unsigned)(rdev->mc.gtt_size >> 20), 490 (unsigned long long)rdev->gart.table_addr); 491 rdev->gart.ready = true; 492 return 0; 493 } 494 495 void rs600_gart_disable(struct radeon_device *rdev) 496 { 497 u32 tmp; 498 499 /* FIXME: disable out of gart access */ 500 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 501 tmp = RREG32_MC(R_000009_MC_CNTL1); 502 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 503 radeon_gart_table_vram_unpin(rdev); 504 } 505 506 void rs600_gart_fini(struct radeon_device *rdev) 507 { 508 radeon_gart_fini(rdev); 509 rs600_gart_disable(rdev); 510 radeon_gart_table_vram_free(rdev); 511 } 512 513 #define R600_PTE_VALID (1 << 0) 514 #define R600_PTE_SYSTEM (1 << 1) 515 #define R600_PTE_SNOOPED (1 << 2) 516 #define R600_PTE_READABLE (1 << 5) 517 #define R600_PTE_WRITEABLE (1 << 6) 518 519 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 520 { 521 void __iomem *ptr = (void *)rdev->gart.ptr; 522 523 if (i < 0 || i > rdev->gart.num_gpu_pages) { 524 return -EINVAL; 525 } 526 addr = addr & 0xFFFFFFFFFFFFF000ULL; 527 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 528 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 529 writeq(addr, ptr + (i * 8)); 530 return 0; 531 } 532 533 int rs600_irq_set(struct radeon_device *rdev) 534 { 535 uint32_t tmp = 0; 536 uint32_t mode_int = 0; 537 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 538 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 539 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 540 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 541 542 if (!rdev->irq.installed) { 543 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 544 WREG32(R_000040_GEN_INT_CNTL, 0); 545 return -EINVAL; 546 } 547 if (rdev->irq.sw_int) { 548 tmp |= S_000040_SW_INT_EN(1); 549 } 550 if (rdev->irq.gui_idle) { 551 tmp |= S_000040_GUI_IDLE(1); 552 } 553 if (rdev->irq.crtc_vblank_int[0] || 554 rdev->irq.pflip[0]) { 555 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 556 } 557 if (rdev->irq.crtc_vblank_int[1] || 558 rdev->irq.pflip[1]) { 559 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 560 } 561 if (rdev->irq.hpd[0]) { 562 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 563 } 564 if (rdev->irq.hpd[1]) { 565 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 566 } 567 WREG32(R_000040_GEN_INT_CNTL, tmp); 568 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 569 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 570 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 571 return 0; 572 } 573 574 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 575 { 576 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 577 uint32_t irq_mask = S_000044_SW_INT(1); 578 u32 tmp; 579 580 /* the interrupt works, but the status bit is permanently asserted */ 581 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 582 if (!rdev->irq.gui_idle_acked) 583 irq_mask |= S_000044_GUI_IDLE_STAT(1); 584 } 585 586 if (G_000044_DISPLAY_INT_STAT(irqs)) { 587 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 588 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 589 WREG32(R_006534_D1MODE_VBLANK_STATUS, 590 S_006534_D1MODE_VBLANK_ACK(1)); 591 } 592 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 593 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 594 S_006D34_D2MODE_VBLANK_ACK(1)); 595 } 596 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 597 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 598 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 599 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 600 } 601 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 602 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 603 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 604 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 605 } 606 } else { 607 rdev->irq.stat_regs.r500.disp_int = 0; 608 } 609 610 if (irqs) { 611 WREG32(R_000044_GEN_INT_STATUS, irqs); 612 } 613 return irqs & irq_mask; 614 } 615 616 void rs600_irq_disable(struct radeon_device *rdev) 617 { 618 WREG32(R_000040_GEN_INT_CNTL, 0); 619 WREG32(R_006540_DxMODE_INT_MASK, 0); 620 /* Wait and acknowledge irq */ 621 mdelay(1); 622 rs600_irq_ack(rdev); 623 } 624 625 int rs600_irq_process(struct radeon_device *rdev) 626 { 627 u32 status, msi_rearm; 628 bool queue_hotplug = false; 629 630 /* reset gui idle ack. the status bit is broken */ 631 rdev->irq.gui_idle_acked = false; 632 633 status = rs600_irq_ack(rdev); 634 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 635 return IRQ_NONE; 636 } 637 while (status || rdev->irq.stat_regs.r500.disp_int) { 638 /* SW interrupt */ 639 if (G_000044_SW_INT(status)) { 640 radeon_fence_process(rdev); 641 } 642 /* GUI idle */ 643 if (G_000040_GUI_IDLE(status)) { 644 rdev->irq.gui_idle_acked = true; 645 rdev->pm.gui_idle = true; 646 wake_up(&rdev->irq.idle_queue); 647 } 648 /* Vertical blank interrupts */ 649 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 650 if (rdev->irq.crtc_vblank_int[0]) { 651 drm_handle_vblank(rdev->ddev, 0); 652 rdev->pm.vblank_sync = true; 653 wake_up(&rdev->irq.vblank_queue); 654 } 655 if (rdev->irq.pflip[0]) 656 radeon_crtc_handle_flip(rdev, 0); 657 } 658 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 659 if (rdev->irq.crtc_vblank_int[1]) { 660 drm_handle_vblank(rdev->ddev, 1); 661 rdev->pm.vblank_sync = true; 662 wake_up(&rdev->irq.vblank_queue); 663 } 664 if (rdev->irq.pflip[1]) 665 radeon_crtc_handle_flip(rdev, 1); 666 } 667 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 668 queue_hotplug = true; 669 DRM_DEBUG("HPD1\n"); 670 } 671 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 672 queue_hotplug = true; 673 DRM_DEBUG("HPD2\n"); 674 } 675 status = rs600_irq_ack(rdev); 676 } 677 /* reset gui idle ack. the status bit is broken */ 678 rdev->irq.gui_idle_acked = false; 679 if (queue_hotplug) 680 schedule_work(&rdev->hotplug_work); 681 if (rdev->msi_enabled) { 682 switch (rdev->family) { 683 case CHIP_RS600: 684 case CHIP_RS690: 685 case CHIP_RS740: 686 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 687 WREG32(RADEON_BUS_CNTL, msi_rearm); 688 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 689 break; 690 default: 691 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 692 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 693 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 694 break; 695 } 696 } 697 return IRQ_HANDLED; 698 } 699 700 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 701 { 702 if (crtc == 0) 703 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 704 else 705 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 706 } 707 708 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 709 { 710 unsigned i; 711 712 for (i = 0; i < rdev->usec_timeout; i++) { 713 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 714 return 0; 715 udelay(1); 716 } 717 return -1; 718 } 719 720 void rs600_gpu_init(struct radeon_device *rdev) 721 { 722 r420_pipes_init(rdev); 723 /* Wait for mc idle */ 724 if (rs600_mc_wait_for_idle(rdev)) 725 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 726 } 727 728 void rs600_mc_init(struct radeon_device *rdev) 729 { 730 u64 base; 731 732 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 733 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 734 rdev->mc.vram_is_ddr = true; 735 rdev->mc.vram_width = 128; 736 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 737 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 738 rdev->mc.visible_vram_size = rdev->mc.aper_size; 739 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 740 base = RREG32_MC(R_000004_MC_FB_LOCATION); 741 base = G_000004_MC_FB_START(base) << 16; 742 radeon_vram_location(rdev, &rdev->mc, base); 743 rdev->mc.gtt_base_align = 0; 744 radeon_gtt_location(rdev, &rdev->mc); 745 radeon_update_bandwidth_info(rdev); 746 } 747 748 void rs600_bandwidth_update(struct radeon_device *rdev) 749 { 750 struct drm_display_mode *mode0 = NULL; 751 struct drm_display_mode *mode1 = NULL; 752 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 753 /* FIXME: implement full support */ 754 755 radeon_update_display_priority(rdev); 756 757 if (rdev->mode_info.crtcs[0]->base.enabled) 758 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 759 if (rdev->mode_info.crtcs[1]->base.enabled) 760 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 761 762 rs690_line_buffer_adjust(rdev, mode0, mode1); 763 764 if (rdev->disp_priority == 2) { 765 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 766 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 767 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 768 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 769 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 770 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 771 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 772 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 773 } 774 } 775 776 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 777 { 778 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 779 S_000070_MC_IND_CITF_ARB0(1)); 780 return RREG32(R_000074_MC_IND_DATA); 781 } 782 783 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 784 { 785 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 786 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 787 WREG32(R_000074_MC_IND_DATA, v); 788 } 789 790 void rs600_debugfs(struct radeon_device *rdev) 791 { 792 if (r100_debugfs_rbbm_init(rdev)) 793 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 794 } 795 796 void rs600_set_safe_registers(struct radeon_device *rdev) 797 { 798 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 799 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 800 } 801 802 static void rs600_mc_program(struct radeon_device *rdev) 803 { 804 struct rv515_mc_save save; 805 806 /* Stops all mc clients */ 807 rv515_mc_stop(rdev, &save); 808 809 /* Wait for mc idle */ 810 if (rs600_mc_wait_for_idle(rdev)) 811 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 812 813 /* FIXME: What does AGP means for such chipset ? */ 814 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 815 WREG32_MC(R_000006_AGP_BASE, 0); 816 WREG32_MC(R_000007_AGP_BASE_2, 0); 817 /* Program MC */ 818 WREG32_MC(R_000004_MC_FB_LOCATION, 819 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 820 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 821 WREG32(R_000134_HDP_FB_LOCATION, 822 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 823 824 rv515_mc_resume(rdev, &save); 825 } 826 827 static int rs600_startup(struct radeon_device *rdev) 828 { 829 int r; 830 831 rs600_mc_program(rdev); 832 /* Resume clock */ 833 rv515_clock_startup(rdev); 834 /* Initialize GPU configuration (# pipes, ...) */ 835 rs600_gpu_init(rdev); 836 /* Initialize GART (initialize after TTM so we can allocate 837 * memory through TTM but finalize after TTM) */ 838 r = rs600_gart_enable(rdev); 839 if (r) 840 return r; 841 842 /* allocate wb buffer */ 843 r = radeon_wb_init(rdev); 844 if (r) 845 return r; 846 847 /* Enable IRQ */ 848 rs600_irq_set(rdev); 849 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 850 /* 1M ring buffer */ 851 r = r100_cp_init(rdev, 1024 * 1024); 852 if (r) { 853 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 854 return r; 855 } 856 r = r100_ib_init(rdev); 857 if (r) { 858 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 859 return r; 860 } 861 862 r = r600_audio_init(rdev); 863 if (r) { 864 dev_err(rdev->dev, "failed initializing audio\n"); 865 return r; 866 } 867 868 return 0; 869 } 870 871 int rs600_resume(struct radeon_device *rdev) 872 { 873 /* Make sur GART are not working */ 874 rs600_gart_disable(rdev); 875 /* Resume clock before doing reset */ 876 rv515_clock_startup(rdev); 877 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 878 if (radeon_asic_reset(rdev)) { 879 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 880 RREG32(R_000E40_RBBM_STATUS), 881 RREG32(R_0007C0_CP_STAT)); 882 } 883 /* post */ 884 atom_asic_init(rdev->mode_info.atom_context); 885 /* Resume clock after posting */ 886 rv515_clock_startup(rdev); 887 /* Initialize surface registers */ 888 radeon_surface_init(rdev); 889 return rs600_startup(rdev); 890 } 891 892 int rs600_suspend(struct radeon_device *rdev) 893 { 894 r600_audio_fini(rdev); 895 r100_cp_disable(rdev); 896 radeon_wb_disable(rdev); 897 rs600_irq_disable(rdev); 898 rs600_gart_disable(rdev); 899 return 0; 900 } 901 902 void rs600_fini(struct radeon_device *rdev) 903 { 904 r600_audio_fini(rdev); 905 r100_cp_fini(rdev); 906 radeon_wb_fini(rdev); 907 r100_ib_fini(rdev); 908 radeon_gem_fini(rdev); 909 rs600_gart_fini(rdev); 910 radeon_irq_kms_fini(rdev); 911 radeon_fence_driver_fini(rdev); 912 radeon_bo_fini(rdev); 913 radeon_atombios_fini(rdev); 914 kfree(rdev->bios); 915 rdev->bios = NULL; 916 } 917 918 int rs600_init(struct radeon_device *rdev) 919 { 920 int r; 921 922 /* Disable VGA */ 923 rv515_vga_render_disable(rdev); 924 /* Initialize scratch registers */ 925 radeon_scratch_init(rdev); 926 /* Initialize surface registers */ 927 radeon_surface_init(rdev); 928 /* restore some register to sane defaults */ 929 r100_restore_sanity(rdev); 930 /* BIOS */ 931 if (!radeon_get_bios(rdev)) { 932 if (ASIC_IS_AVIVO(rdev)) 933 return -EINVAL; 934 } 935 if (rdev->is_atom_bios) { 936 r = radeon_atombios_init(rdev); 937 if (r) 938 return r; 939 } else { 940 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 941 return -EINVAL; 942 } 943 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 944 if (radeon_asic_reset(rdev)) { 945 dev_warn(rdev->dev, 946 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 947 RREG32(R_000E40_RBBM_STATUS), 948 RREG32(R_0007C0_CP_STAT)); 949 } 950 /* check if cards are posted or not */ 951 if (radeon_boot_test_post_card(rdev) == false) 952 return -EINVAL; 953 954 /* Initialize clocks */ 955 radeon_get_clock_info(rdev->ddev); 956 /* initialize memory controller */ 957 rs600_mc_init(rdev); 958 rs600_debugfs(rdev); 959 /* Fence driver */ 960 r = radeon_fence_driver_init(rdev); 961 if (r) 962 return r; 963 r = radeon_irq_kms_init(rdev); 964 if (r) 965 return r; 966 /* Memory manager */ 967 r = radeon_bo_init(rdev); 968 if (r) 969 return r; 970 r = rs600_gart_init(rdev); 971 if (r) 972 return r; 973 rs600_set_safe_registers(rdev); 974 rdev->accel_working = true; 975 r = rs600_startup(rdev); 976 if (r) { 977 /* Somethings want wront with the accel init stop accel */ 978 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 979 r100_cp_fini(rdev); 980 radeon_wb_fini(rdev); 981 r100_ib_fini(rdev); 982 rs600_gart_fini(rdev); 983 radeon_irq_kms_fini(rdev); 984 rdev->accel_working = false; 985 } 986 return 0; 987 } 988