1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 /* RS600 / Radeon X1250/X1270 integrated GPU 29 * 30 * This file gather function specific to RS600 which is the IGP of 31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740 32 * is the X1250/X1270 supporting AMD CPU). The display engine are 33 * the avivo one, bios is an atombios, 3D block are the one of the 34 * R4XX family. The GART is different from the RS400 one and is very 35 * close to the one of the R600 family (R600 likely being an evolution 36 * of the RS600 GART block). 37 */ 38 #include "drmP.h" 39 #include "radeon.h" 40 #include "radeon_asic.h" 41 #include "atom.h" 42 #include "rs600d.h" 43 44 #include "rs600_reg_safe.h" 45 46 void rs600_gpu_init(struct radeon_device *rdev); 47 int rs600_mc_wait_for_idle(struct radeon_device *rdev); 48 49 void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) 50 { 51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 52 u32 tmp; 53 54 /* make sure flip is at vb rather than hb */ 55 tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); 56 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 57 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 58 59 /* set pageflip to happen anywhere in vblank interval */ 60 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 61 62 /* enable the pflip int */ 63 radeon_irq_kms_pflip_irq_get(rdev, crtc); 64 } 65 66 void rs600_post_page_flip(struct radeon_device *rdev, int crtc) 67 { 68 /* disable the pflip int */ 69 radeon_irq_kms_pflip_irq_put(rdev, crtc); 70 } 71 72 u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 73 { 74 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 75 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 76 77 /* Lock the graphics update lock */ 78 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 79 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 80 81 /* update the scanout addresses */ 82 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 83 (u32)crtc_base); 84 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 85 (u32)crtc_base); 86 87 /* Wait for update_pending to go high. */ 88 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 89 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 90 91 /* Unlock the lock, so double-buffering can take place inside vblank */ 92 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 93 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 94 95 /* Return current update_pending status: */ 96 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 97 } 98 99 void rs600_pm_misc(struct radeon_device *rdev) 100 { 101 int requested_index = rdev->pm.requested_power_state_index; 102 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; 103 struct radeon_voltage *voltage = &ps->clock_info[0].voltage; 104 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; 105 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; 106 107 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { 108 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 109 tmp = RREG32(voltage->gpio.reg); 110 if (voltage->active_high) 111 tmp |= voltage->gpio.mask; 112 else 113 tmp &= ~(voltage->gpio.mask); 114 WREG32(voltage->gpio.reg, tmp); 115 if (voltage->delay) 116 udelay(voltage->delay); 117 } else { 118 tmp = RREG32(voltage->gpio.reg); 119 if (voltage->active_high) 120 tmp &= ~voltage->gpio.mask; 121 else 122 tmp |= voltage->gpio.mask; 123 WREG32(voltage->gpio.reg, tmp); 124 if (voltage->delay) 125 udelay(voltage->delay); 126 } 127 } else if (voltage->type == VOLTAGE_VDDC) 128 radeon_atom_set_voltage(rdev, voltage->vddc_id); 129 130 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 131 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 132 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); 133 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { 134 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { 135 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); 136 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); 137 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { 138 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); 139 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); 140 } 141 } else { 142 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); 143 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); 144 } 145 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); 146 147 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); 148 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { 149 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; 150 if (voltage->delay) { 151 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; 152 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); 153 } else 154 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; 155 } else 156 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; 157 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); 158 159 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); 160 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) 161 hdp_dyn_cntl &= ~HDP_FORCEON; 162 else 163 hdp_dyn_cntl |= HDP_FORCEON; 164 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); 165 #if 0 166 /* mc_host_dyn seems to cause hangs from time to time */ 167 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); 168 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) 169 mc_host_dyn_cntl &= ~MC_HOST_FORCEON; 170 else 171 mc_host_dyn_cntl |= MC_HOST_FORCEON; 172 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); 173 #endif 174 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); 175 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) 176 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; 177 else 178 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; 179 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); 180 181 /* set pcie lanes */ 182 if ((rdev->flags & RADEON_IS_PCIE) && 183 !(rdev->flags & RADEON_IS_IGP) && 184 rdev->asic->set_pcie_lanes && 185 (ps->pcie_lanes != 186 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { 187 radeon_set_pcie_lanes(rdev, 188 ps->pcie_lanes); 189 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); 190 } 191 } 192 193 void rs600_pm_prepare(struct radeon_device *rdev) 194 { 195 struct drm_device *ddev = rdev->ddev; 196 struct drm_crtc *crtc; 197 struct radeon_crtc *radeon_crtc; 198 u32 tmp; 199 200 /* disable any active CRTCs */ 201 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 202 radeon_crtc = to_radeon_crtc(crtc); 203 if (radeon_crtc->enabled) { 204 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 205 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 206 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 207 } 208 } 209 } 210 211 void rs600_pm_finish(struct radeon_device *rdev) 212 { 213 struct drm_device *ddev = rdev->ddev; 214 struct drm_crtc *crtc; 215 struct radeon_crtc *radeon_crtc; 216 u32 tmp; 217 218 /* enable any active CRTCs */ 219 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 220 radeon_crtc = to_radeon_crtc(crtc); 221 if (radeon_crtc->enabled) { 222 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); 223 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; 224 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); 225 } 226 } 227 } 228 229 /* hpd for digital panel detect/disconnect */ 230 bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 231 { 232 u32 tmp; 233 bool connected = false; 234 235 switch (hpd) { 236 case RADEON_HPD_1: 237 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); 238 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) 239 connected = true; 240 break; 241 case RADEON_HPD_2: 242 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); 243 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) 244 connected = true; 245 break; 246 default: 247 break; 248 } 249 return connected; 250 } 251 252 void rs600_hpd_set_polarity(struct radeon_device *rdev, 253 enum radeon_hpd_id hpd) 254 { 255 u32 tmp; 256 bool connected = rs600_hpd_sense(rdev, hpd); 257 258 switch (hpd) { 259 case RADEON_HPD_1: 260 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 261 if (connected) 262 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 263 else 264 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); 265 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 266 break; 267 case RADEON_HPD_2: 268 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 269 if (connected) 270 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 271 else 272 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); 273 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 274 break; 275 default: 276 break; 277 } 278 } 279 280 void rs600_hpd_init(struct radeon_device *rdev) 281 { 282 struct drm_device *dev = rdev->ddev; 283 struct drm_connector *connector; 284 285 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 286 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 287 switch (radeon_connector->hpd.hpd) { 288 case RADEON_HPD_1: 289 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 290 S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); 291 rdev->irq.hpd[0] = true; 292 break; 293 case RADEON_HPD_2: 294 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 295 S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); 296 rdev->irq.hpd[1] = true; 297 break; 298 default: 299 break; 300 } 301 } 302 if (rdev->irq.installed) 303 rs600_irq_set(rdev); 304 } 305 306 void rs600_hpd_fini(struct radeon_device *rdev) 307 { 308 struct drm_device *dev = rdev->ddev; 309 struct drm_connector *connector; 310 311 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 312 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 313 switch (radeon_connector->hpd.hpd) { 314 case RADEON_HPD_1: 315 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, 316 S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); 317 rdev->irq.hpd[0] = false; 318 break; 319 case RADEON_HPD_2: 320 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, 321 S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); 322 rdev->irq.hpd[1] = false; 323 break; 324 default: 325 break; 326 } 327 } 328 } 329 330 void rs600_bm_disable(struct radeon_device *rdev) 331 { 332 u32 tmp; 333 334 /* disable bus mastering */ 335 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); 336 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); 337 mdelay(1); 338 } 339 340 int rs600_asic_reset(struct radeon_device *rdev) 341 { 342 struct rv515_mc_save save; 343 u32 status, tmp; 344 int ret = 0; 345 346 status = RREG32(R_000E40_RBBM_STATUS); 347 if (!G_000E40_GUI_ACTIVE(status)) { 348 return 0; 349 } 350 /* Stops all mc clients */ 351 rv515_mc_stop(rdev, &save); 352 status = RREG32(R_000E40_RBBM_STATUS); 353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 354 /* stop CP */ 355 WREG32(RADEON_CP_CSQ_CNTL, 0); 356 tmp = RREG32(RADEON_CP_RB_CNTL); 357 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); 358 WREG32(RADEON_CP_RB_RPTR_WR, 0); 359 WREG32(RADEON_CP_RB_WPTR, 0); 360 WREG32(RADEON_CP_RB_CNTL, tmp); 361 pci_save_state(rdev->pdev); 362 /* disable bus mastering */ 363 rs600_bm_disable(rdev); 364 /* reset GA+VAP */ 365 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | 366 S_0000F0_SOFT_RESET_GA(1)); 367 RREG32(R_0000F0_RBBM_SOFT_RESET); 368 mdelay(500); 369 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 370 mdelay(1); 371 status = RREG32(R_000E40_RBBM_STATUS); 372 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 373 /* reset CP */ 374 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); 375 RREG32(R_0000F0_RBBM_SOFT_RESET); 376 mdelay(500); 377 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 378 mdelay(1); 379 status = RREG32(R_000E40_RBBM_STATUS); 380 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 381 /* reset MC */ 382 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); 383 RREG32(R_0000F0_RBBM_SOFT_RESET); 384 mdelay(500); 385 WREG32(R_0000F0_RBBM_SOFT_RESET, 0); 386 mdelay(1); 387 status = RREG32(R_000E40_RBBM_STATUS); 388 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 389 /* restore PCI & busmastering */ 390 pci_restore_state(rdev->pdev); 391 /* Check if GPU is idle */ 392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 393 dev_err(rdev->dev, "failed to reset GPU\n"); 394 rdev->gpu_lockup = true; 395 ret = -1; 396 } else 397 dev_info(rdev->dev, "GPU reset succeed\n"); 398 rv515_mc_resume(rdev, &save); 399 return ret; 400 } 401 402 /* 403 * GART. 404 */ 405 void rs600_gart_tlb_flush(struct radeon_device *rdev) 406 { 407 uint32_t tmp; 408 409 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 410 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 411 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 412 413 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 414 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); 415 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 416 417 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 418 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; 419 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 420 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 421 } 422 423 int rs600_gart_init(struct radeon_device *rdev) 424 { 425 int r; 426 427 if (rdev->gart.table.vram.robj) { 428 WARN(1, "RS600 GART already initialized\n"); 429 return 0; 430 } 431 /* Initialize common gart structure */ 432 r = radeon_gart_init(rdev); 433 if (r) { 434 return r; 435 } 436 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 437 return radeon_gart_table_vram_alloc(rdev); 438 } 439 440 int rs600_gart_enable(struct radeon_device *rdev) 441 { 442 u32 tmp; 443 int r, i; 444 445 if (rdev->gart.table.vram.robj == NULL) { 446 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 447 return -EINVAL; 448 } 449 r = radeon_gart_table_vram_pin(rdev); 450 if (r) 451 return r; 452 radeon_gart_restore(rdev); 453 /* Enable bus master */ 454 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; 455 WREG32(R_00004C_BUS_CNTL, tmp); 456 /* FIXME: setup default page */ 457 WREG32_MC(R_000100_MC_PT0_CNTL, 458 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 459 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 460 461 for (i = 0; i < 19; i++) { 462 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 463 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 464 S_00016C_SYSTEM_ACCESS_MODE_MASK( 465 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | 466 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 467 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | 468 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | 469 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 470 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); 471 } 472 /* enable first context */ 473 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 474 S_000102_ENABLE_PAGE_TABLE(1) | 475 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 476 477 /* disable all other contexts */ 478 for (i = 1; i < 8; i++) 479 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 480 481 /* setup the page table */ 482 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 483 rdev->gart.table_addr); 484 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); 485 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); 486 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 487 488 /* System context maps to VRAM space */ 489 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); 490 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); 491 492 /* enable page tables */ 493 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 494 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 495 tmp = RREG32_MC(R_000009_MC_CNTL1); 496 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); 497 rs600_gart_tlb_flush(rdev); 498 rdev->gart.ready = true; 499 return 0; 500 } 501 502 void rs600_gart_disable(struct radeon_device *rdev) 503 { 504 u32 tmp; 505 int r; 506 507 /* FIXME: disable out of gart access */ 508 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 509 tmp = RREG32_MC(R_000009_MC_CNTL1); 510 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 511 if (rdev->gart.table.vram.robj) { 512 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 513 if (r == 0) { 514 radeon_bo_kunmap(rdev->gart.table.vram.robj); 515 radeon_bo_unpin(rdev->gart.table.vram.robj); 516 radeon_bo_unreserve(rdev->gart.table.vram.robj); 517 } 518 } 519 } 520 521 void rs600_gart_fini(struct radeon_device *rdev) 522 { 523 radeon_gart_fini(rdev); 524 rs600_gart_disable(rdev); 525 radeon_gart_table_vram_free(rdev); 526 } 527 528 #define R600_PTE_VALID (1 << 0) 529 #define R600_PTE_SYSTEM (1 << 1) 530 #define R600_PTE_SNOOPED (1 << 2) 531 #define R600_PTE_READABLE (1 << 5) 532 #define R600_PTE_WRITEABLE (1 << 6) 533 534 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 535 { 536 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 537 538 if (i < 0 || i > rdev->gart.num_gpu_pages) { 539 return -EINVAL; 540 } 541 addr = addr & 0xFFFFFFFFFFFFF000ULL; 542 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 543 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 544 writeq(addr, ((void __iomem *)ptr) + (i * 8)); 545 return 0; 546 } 547 548 int rs600_irq_set(struct radeon_device *rdev) 549 { 550 uint32_t tmp = 0; 551 uint32_t mode_int = 0; 552 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & 553 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 554 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & 555 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 556 557 if (!rdev->irq.installed) { 558 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 559 WREG32(R_000040_GEN_INT_CNTL, 0); 560 return -EINVAL; 561 } 562 if (rdev->irq.sw_int) { 563 tmp |= S_000040_SW_INT_EN(1); 564 } 565 if (rdev->irq.gui_idle) { 566 tmp |= S_000040_GUI_IDLE(1); 567 } 568 if (rdev->irq.crtc_vblank_int[0] || 569 rdev->irq.pflip[0]) { 570 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 571 } 572 if (rdev->irq.crtc_vblank_int[1] || 573 rdev->irq.pflip[1]) { 574 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 575 } 576 if (rdev->irq.hpd[0]) { 577 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); 578 } 579 if (rdev->irq.hpd[1]) { 580 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 581 } 582 WREG32(R_000040_GEN_INT_CNTL, tmp); 583 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 584 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 585 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 586 return 0; 587 } 588 589 static inline u32 rs600_irq_ack(struct radeon_device *rdev) 590 { 591 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 592 uint32_t irq_mask = S_000044_SW_INT(1); 593 u32 tmp; 594 595 /* the interrupt works, but the status bit is permanently asserted */ 596 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { 597 if (!rdev->irq.gui_idle_acked) 598 irq_mask |= S_000044_GUI_IDLE_STAT(1); 599 } 600 601 if (G_000044_DISPLAY_INT_STAT(irqs)) { 602 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 603 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 604 WREG32(R_006534_D1MODE_VBLANK_STATUS, 605 S_006534_D1MODE_VBLANK_ACK(1)); 606 } 607 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 608 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 609 S_006D34_D2MODE_VBLANK_ACK(1)); 610 } 611 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 612 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 613 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 614 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 615 } 616 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 617 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 618 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 619 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 620 } 621 } else { 622 rdev->irq.stat_regs.r500.disp_int = 0; 623 } 624 625 if (irqs) { 626 WREG32(R_000044_GEN_INT_STATUS, irqs); 627 } 628 return irqs & irq_mask; 629 } 630 631 void rs600_irq_disable(struct radeon_device *rdev) 632 { 633 WREG32(R_000040_GEN_INT_CNTL, 0); 634 WREG32(R_006540_DxMODE_INT_MASK, 0); 635 /* Wait and acknowledge irq */ 636 mdelay(1); 637 rs600_irq_ack(rdev); 638 } 639 640 int rs600_irq_process(struct radeon_device *rdev) 641 { 642 u32 status, msi_rearm; 643 bool queue_hotplug = false; 644 645 /* reset gui idle ack. the status bit is broken */ 646 rdev->irq.gui_idle_acked = false; 647 648 status = rs600_irq_ack(rdev); 649 if (!status && !rdev->irq.stat_regs.r500.disp_int) { 650 return IRQ_NONE; 651 } 652 while (status || rdev->irq.stat_regs.r500.disp_int) { 653 /* SW interrupt */ 654 if (G_000044_SW_INT(status)) { 655 radeon_fence_process(rdev); 656 } 657 /* GUI idle */ 658 if (G_000040_GUI_IDLE(status)) { 659 rdev->irq.gui_idle_acked = true; 660 rdev->pm.gui_idle = true; 661 wake_up(&rdev->irq.idle_queue); 662 } 663 /* Vertical blank interrupts */ 664 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 665 if (rdev->irq.crtc_vblank_int[0]) { 666 drm_handle_vblank(rdev->ddev, 0); 667 rdev->pm.vblank_sync = true; 668 wake_up(&rdev->irq.vblank_queue); 669 } 670 if (rdev->irq.pflip[0]) 671 radeon_crtc_handle_flip(rdev, 0); 672 } 673 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 674 if (rdev->irq.crtc_vblank_int[1]) { 675 drm_handle_vblank(rdev->ddev, 1); 676 rdev->pm.vblank_sync = true; 677 wake_up(&rdev->irq.vblank_queue); 678 } 679 if (rdev->irq.pflip[1]) 680 radeon_crtc_handle_flip(rdev, 1); 681 } 682 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 683 queue_hotplug = true; 684 DRM_DEBUG("HPD1\n"); 685 } 686 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 687 queue_hotplug = true; 688 DRM_DEBUG("HPD2\n"); 689 } 690 status = rs600_irq_ack(rdev); 691 } 692 /* reset gui idle ack. the status bit is broken */ 693 rdev->irq.gui_idle_acked = false; 694 if (queue_hotplug) 695 schedule_work(&rdev->hotplug_work); 696 if (rdev->msi_enabled) { 697 switch (rdev->family) { 698 case CHIP_RS600: 699 case CHIP_RS690: 700 case CHIP_RS740: 701 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM; 702 WREG32(RADEON_BUS_CNTL, msi_rearm); 703 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); 704 break; 705 default: 706 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; 707 WREG32(RADEON_MSI_REARM_EN, msi_rearm); 708 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); 709 break; 710 } 711 } 712 return IRQ_HANDLED; 713 } 714 715 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 716 { 717 if (crtc == 0) 718 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); 719 else 720 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); 721 } 722 723 int rs600_mc_wait_for_idle(struct radeon_device *rdev) 724 { 725 unsigned i; 726 727 for (i = 0; i < rdev->usec_timeout; i++) { 728 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) 729 return 0; 730 udelay(1); 731 } 732 return -1; 733 } 734 735 void rs600_gpu_init(struct radeon_device *rdev) 736 { 737 r420_pipes_init(rdev); 738 /* Wait for mc idle */ 739 if (rs600_mc_wait_for_idle(rdev)) 740 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 741 } 742 743 void rs600_mc_init(struct radeon_device *rdev) 744 { 745 u64 base; 746 747 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 748 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 749 rdev->mc.vram_is_ddr = true; 750 rdev->mc.vram_width = 128; 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 755 base = RREG32_MC(R_000004_MC_FB_LOCATION); 756 base = G_000004_MC_FB_START(base) << 16; 757 radeon_vram_location(rdev, &rdev->mc, base); 758 rdev->mc.gtt_base_align = 0; 759 radeon_gtt_location(rdev, &rdev->mc); 760 radeon_update_bandwidth_info(rdev); 761 } 762 763 void rs600_bandwidth_update(struct radeon_device *rdev) 764 { 765 struct drm_display_mode *mode0 = NULL; 766 struct drm_display_mode *mode1 = NULL; 767 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 768 /* FIXME: implement full support */ 769 770 radeon_update_display_priority(rdev); 771 772 if (rdev->mode_info.crtcs[0]->base.enabled) 773 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 774 if (rdev->mode_info.crtcs[1]->base.enabled) 775 mode1 = &rdev->mode_info.crtcs[1]->base.mode; 776 777 rs690_line_buffer_adjust(rdev, mode0, mode1); 778 779 if (rdev->disp_priority == 2) { 780 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT); 781 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT); 782 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); 783 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); 784 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); 785 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); 786 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); 787 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); 788 } 789 } 790 791 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 792 { 793 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 794 S_000070_MC_IND_CITF_ARB0(1)); 795 return RREG32(R_000074_MC_IND_DATA); 796 } 797 798 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 799 { 800 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 801 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 802 WREG32(R_000074_MC_IND_DATA, v); 803 } 804 805 void rs600_debugfs(struct radeon_device *rdev) 806 { 807 if (r100_debugfs_rbbm_init(rdev)) 808 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 809 } 810 811 void rs600_set_safe_registers(struct radeon_device *rdev) 812 { 813 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 814 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 815 } 816 817 static void rs600_mc_program(struct radeon_device *rdev) 818 { 819 struct rv515_mc_save save; 820 821 /* Stops all mc clients */ 822 rv515_mc_stop(rdev, &save); 823 824 /* Wait for mc idle */ 825 if (rs600_mc_wait_for_idle(rdev)) 826 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 827 828 /* FIXME: What does AGP means for such chipset ? */ 829 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); 830 WREG32_MC(R_000006_AGP_BASE, 0); 831 WREG32_MC(R_000007_AGP_BASE_2, 0); 832 /* Program MC */ 833 WREG32_MC(R_000004_MC_FB_LOCATION, 834 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | 835 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); 836 WREG32(R_000134_HDP_FB_LOCATION, 837 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); 838 839 rv515_mc_resume(rdev, &save); 840 } 841 842 static int rs600_startup(struct radeon_device *rdev) 843 { 844 int r; 845 846 rs600_mc_program(rdev); 847 /* Resume clock */ 848 rv515_clock_startup(rdev); 849 /* Initialize GPU configuration (# pipes, ...) */ 850 rs600_gpu_init(rdev); 851 /* Initialize GART (initialize after TTM so we can allocate 852 * memory through TTM but finalize after TTM) */ 853 r = rs600_gart_enable(rdev); 854 if (r) 855 return r; 856 857 /* allocate wb buffer */ 858 r = radeon_wb_init(rdev); 859 if (r) 860 return r; 861 862 /* Enable IRQ */ 863 rs600_irq_set(rdev); 864 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); 865 /* 1M ring buffer */ 866 r = r100_cp_init(rdev, 1024 * 1024); 867 if (r) { 868 dev_err(rdev->dev, "failed initializing CP (%d).\n", r); 869 return r; 870 } 871 r = r100_ib_init(rdev); 872 if (r) { 873 dev_err(rdev->dev, "failed initializing IB (%d).\n", r); 874 return r; 875 } 876 877 r = r600_audio_init(rdev); 878 if (r) { 879 dev_err(rdev->dev, "failed initializing audio\n"); 880 return r; 881 } 882 883 return 0; 884 } 885 886 int rs600_resume(struct radeon_device *rdev) 887 { 888 /* Make sur GART are not working */ 889 rs600_gart_disable(rdev); 890 /* Resume clock before doing reset */ 891 rv515_clock_startup(rdev); 892 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 893 if (radeon_asic_reset(rdev)) { 894 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 895 RREG32(R_000E40_RBBM_STATUS), 896 RREG32(R_0007C0_CP_STAT)); 897 } 898 /* post */ 899 atom_asic_init(rdev->mode_info.atom_context); 900 /* Resume clock after posting */ 901 rv515_clock_startup(rdev); 902 /* Initialize surface registers */ 903 radeon_surface_init(rdev); 904 return rs600_startup(rdev); 905 } 906 907 int rs600_suspend(struct radeon_device *rdev) 908 { 909 r600_audio_fini(rdev); 910 r100_cp_disable(rdev); 911 radeon_wb_disable(rdev); 912 rs600_irq_disable(rdev); 913 rs600_gart_disable(rdev); 914 return 0; 915 } 916 917 void rs600_fini(struct radeon_device *rdev) 918 { 919 r600_audio_fini(rdev); 920 r100_cp_fini(rdev); 921 radeon_wb_fini(rdev); 922 r100_ib_fini(rdev); 923 radeon_gem_fini(rdev); 924 rs600_gart_fini(rdev); 925 radeon_irq_kms_fini(rdev); 926 radeon_fence_driver_fini(rdev); 927 radeon_bo_fini(rdev); 928 radeon_atombios_fini(rdev); 929 kfree(rdev->bios); 930 rdev->bios = NULL; 931 } 932 933 int rs600_init(struct radeon_device *rdev) 934 { 935 int r; 936 937 /* Disable VGA */ 938 rv515_vga_render_disable(rdev); 939 /* Initialize scratch registers */ 940 radeon_scratch_init(rdev); 941 /* Initialize surface registers */ 942 radeon_surface_init(rdev); 943 /* restore some register to sane defaults */ 944 r100_restore_sanity(rdev); 945 /* BIOS */ 946 if (!radeon_get_bios(rdev)) { 947 if (ASIC_IS_AVIVO(rdev)) 948 return -EINVAL; 949 } 950 if (rdev->is_atom_bios) { 951 r = radeon_atombios_init(rdev); 952 if (r) 953 return r; 954 } else { 955 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); 956 return -EINVAL; 957 } 958 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 959 if (radeon_asic_reset(rdev)) { 960 dev_warn(rdev->dev, 961 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 962 RREG32(R_000E40_RBBM_STATUS), 963 RREG32(R_0007C0_CP_STAT)); 964 } 965 /* check if cards are posted or not */ 966 if (radeon_boot_test_post_card(rdev) == false) 967 return -EINVAL; 968 969 /* Initialize clocks */ 970 radeon_get_clock_info(rdev->ddev); 971 /* initialize memory controller */ 972 rs600_mc_init(rdev); 973 rs600_debugfs(rdev); 974 /* Fence driver */ 975 r = radeon_fence_driver_init(rdev); 976 if (r) 977 return r; 978 r = radeon_irq_kms_init(rdev); 979 if (r) 980 return r; 981 /* Memory manager */ 982 r = radeon_bo_init(rdev); 983 if (r) 984 return r; 985 r = rs600_gart_init(rdev); 986 if (r) 987 return r; 988 rs600_set_safe_registers(rdev); 989 rdev->accel_working = true; 990 r = rs600_startup(rdev); 991 if (r) { 992 /* Somethings want wront with the accel init stop accel */ 993 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 994 r100_cp_fini(rdev); 995 radeon_wb_fini(rdev); 996 r100_ib_fini(rdev); 997 rs600_gart_fini(rdev); 998 radeon_irq_kms_fini(rdev); 999 rdev->accel_working = false; 1000 } 1001 return 0; 1002 } 1003