1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "drmP.h" 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "amdgpu_i2c.h" 27 #include "cikd.h" 28 #include "atom.h" 29 #include "amdgpu_atombios.h" 30 #include "atombios_crtc.h" 31 #include "atombios_encoders.h" 32 #include "amdgpu_pll.h" 33 #include "amdgpu_connectors.h" 34 35 #include "dce/dce_8_0_d.h" 36 #include "dce/dce_8_0_sh_mask.h" 37 38 #include "gca/gfx_7_2_enum.h" 39 40 #include "gmc/gmc_7_1_d.h" 41 #include "gmc/gmc_7_1_sh_mask.h" 42 43 #include "oss/oss_2_0_d.h" 44 #include "oss/oss_2_0_sh_mask.h" 45 46 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev); 47 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev); 48 49 static const u32 crtc_offsets[6] = 50 { 51 CRTC0_REGISTER_OFFSET, 52 CRTC1_REGISTER_OFFSET, 53 CRTC2_REGISTER_OFFSET, 54 CRTC3_REGISTER_OFFSET, 55 CRTC4_REGISTER_OFFSET, 56 CRTC5_REGISTER_OFFSET 57 }; 58 59 static const uint32_t dig_offsets[] = { 60 CRTC0_REGISTER_OFFSET, 61 CRTC1_REGISTER_OFFSET, 62 CRTC2_REGISTER_OFFSET, 63 CRTC3_REGISTER_OFFSET, 64 CRTC4_REGISTER_OFFSET, 65 CRTC5_REGISTER_OFFSET, 66 (0x13830 - 0x7030) >> 2, 67 }; 68 69 static const struct { 70 uint32_t reg; 71 uint32_t vblank; 72 uint32_t vline; 73 uint32_t hpd; 74 75 } interrupt_status_offsets[6] = { { 76 .reg = mmDISP_INTERRUPT_STATUS, 77 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 78 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 79 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 80 }, { 81 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 83 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 85 }, { 86 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 88 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 90 }, { 91 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 93 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 95 }, { 96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 100 }, { 101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 105 } }; 106 107 static const uint32_t hpd_int_control_offsets[6] = { 108 mmDC_HPD1_INT_CONTROL, 109 mmDC_HPD2_INT_CONTROL, 110 mmDC_HPD3_INT_CONTROL, 111 mmDC_HPD4_INT_CONTROL, 112 mmDC_HPD5_INT_CONTROL, 113 mmDC_HPD6_INT_CONTROL, 114 }; 115 116 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, 117 u32 block_offset, u32 reg) 118 { 119 unsigned long flags; 120 u32 r; 121 122 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 123 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 124 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 125 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 126 127 return r; 128 } 129 130 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev, 131 u32 block_offset, u32 reg, u32 v) 132 { 133 unsigned long flags; 134 135 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 136 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 137 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 138 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 139 } 140 141 static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 142 { 143 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & 144 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) 145 return true; 146 else 147 return false; 148 } 149 150 static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc) 151 { 152 u32 pos1, pos2; 153 154 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 155 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 156 157 if (pos1 != pos2) 158 return true; 159 else 160 return false; 161 } 162 163 /** 164 * dce_v8_0_vblank_wait - vblank wait asic callback. 165 * 166 * @adev: amdgpu_device pointer 167 * @crtc: crtc to wait for vblank on 168 * 169 * Wait for vblank on the requested crtc (evergreen+). 170 */ 171 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) 172 { 173 unsigned i = 0; 174 175 if (crtc >= adev->mode_info.num_crtc) 176 return; 177 178 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 179 return; 180 181 /* depending on when we hit vblank, we may be close to active; if so, 182 * wait for another frame. 183 */ 184 while (dce_v8_0_is_in_vblank(adev, crtc)) { 185 if (i++ % 100 == 0) { 186 if (!dce_v8_0_is_counter_moving(adev, crtc)) 187 break; 188 } 189 } 190 191 while (!dce_v8_0_is_in_vblank(adev, crtc)) { 192 if (i++ % 100 == 0) { 193 if (!dce_v8_0_is_counter_moving(adev, crtc)) 194 break; 195 } 196 } 197 } 198 199 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 200 { 201 if (crtc >= adev->mode_info.num_crtc) 202 return 0; 203 else 204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 205 } 206 207 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) 208 { 209 unsigned i; 210 211 /* Enable pflip interrupts */ 212 for (i = 0; i < adev->mode_info.num_crtc; i++) 213 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 214 } 215 216 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 217 { 218 unsigned i; 219 220 /* Disable pflip interrupts */ 221 for (i = 0; i < adev->mode_info.num_crtc; i++) 222 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 223 } 224 225 /** 226 * dce_v8_0_page_flip - pageflip callback. 227 * 228 * @adev: amdgpu_device pointer 229 * @crtc_id: crtc to cleanup pageflip on 230 * @crtc_base: new address of the crtc (GPU MC address) 231 * 232 * Triggers the actual pageflip by updating the primary 233 * surface base address. 234 */ 235 static void dce_v8_0_page_flip(struct amdgpu_device *adev, 236 int crtc_id, u64 crtc_base) 237 { 238 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 239 240 /* update the primary scanout addresses */ 241 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 242 upper_32_bits(crtc_base)); 243 /* writing to the low address triggers the update */ 244 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 245 lower_32_bits(crtc_base)); 246 /* post the write */ 247 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 248 } 249 250 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 251 u32 *vbl, u32 *position) 252 { 253 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 254 return -EINVAL; 255 256 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 257 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 258 259 return 0; 260 } 261 262 /** 263 * dce_v8_0_hpd_sense - hpd sense callback. 264 * 265 * @adev: amdgpu_device pointer 266 * @hpd: hpd (hotplug detect) pin 267 * 268 * Checks if a digital monitor is connected (evergreen+). 269 * Returns true if connected, false if not connected. 270 */ 271 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, 272 enum amdgpu_hpd_id hpd) 273 { 274 bool connected = false; 275 276 switch (hpd) { 277 case AMDGPU_HPD_1: 278 if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 279 connected = true; 280 break; 281 case AMDGPU_HPD_2: 282 if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK) 283 connected = true; 284 break; 285 case AMDGPU_HPD_3: 286 if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK) 287 connected = true; 288 break; 289 case AMDGPU_HPD_4: 290 if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK) 291 connected = true; 292 break; 293 case AMDGPU_HPD_5: 294 if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK) 295 connected = true; 296 break; 297 case AMDGPU_HPD_6: 298 if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK) 299 connected = true; 300 break; 301 default: 302 break; 303 } 304 305 return connected; 306 } 307 308 /** 309 * dce_v8_0_hpd_set_polarity - hpd set polarity callback. 310 * 311 * @adev: amdgpu_device pointer 312 * @hpd: hpd (hotplug detect) pin 313 * 314 * Set the polarity of the hpd pin (evergreen+). 315 */ 316 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, 317 enum amdgpu_hpd_id hpd) 318 { 319 u32 tmp; 320 bool connected = dce_v8_0_hpd_sense(adev, hpd); 321 322 switch (hpd) { 323 case AMDGPU_HPD_1: 324 tmp = RREG32(mmDC_HPD1_INT_CONTROL); 325 if (connected) 326 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 327 else 328 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 329 WREG32(mmDC_HPD1_INT_CONTROL, tmp); 330 break; 331 case AMDGPU_HPD_2: 332 tmp = RREG32(mmDC_HPD2_INT_CONTROL); 333 if (connected) 334 tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; 335 else 336 tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; 337 WREG32(mmDC_HPD2_INT_CONTROL, tmp); 338 break; 339 case AMDGPU_HPD_3: 340 tmp = RREG32(mmDC_HPD3_INT_CONTROL); 341 if (connected) 342 tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; 343 else 344 tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; 345 WREG32(mmDC_HPD3_INT_CONTROL, tmp); 346 break; 347 case AMDGPU_HPD_4: 348 tmp = RREG32(mmDC_HPD4_INT_CONTROL); 349 if (connected) 350 tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; 351 else 352 tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; 353 WREG32(mmDC_HPD4_INT_CONTROL, tmp); 354 break; 355 case AMDGPU_HPD_5: 356 tmp = RREG32(mmDC_HPD5_INT_CONTROL); 357 if (connected) 358 tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; 359 else 360 tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; 361 WREG32(mmDC_HPD5_INT_CONTROL, tmp); 362 break; 363 case AMDGPU_HPD_6: 364 tmp = RREG32(mmDC_HPD6_INT_CONTROL); 365 if (connected) 366 tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; 367 else 368 tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; 369 WREG32(mmDC_HPD6_INT_CONTROL, tmp); 370 break; 371 default: 372 break; 373 } 374 } 375 376 /** 377 * dce_v8_0_hpd_init - hpd setup callback. 378 * 379 * @adev: amdgpu_device pointer 380 * 381 * Setup the hpd pins used by the card (evergreen+). 382 * Enable the pin, set the polarity, and enable the hpd interrupts. 383 */ 384 static void dce_v8_0_hpd_init(struct amdgpu_device *adev) 385 { 386 struct drm_device *dev = adev->ddev; 387 struct drm_connector *connector; 388 u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | 389 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) | 390 DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 391 392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 393 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 394 395 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 396 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 397 /* don't try to enable hpd on eDP or LVDS avoid breaking the 398 * aux dp channel on imac and help (but not completely fix) 399 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 400 * also avoid interrupt storms during dpms. 401 */ 402 continue; 403 } 404 switch (amdgpu_connector->hpd.hpd) { 405 case AMDGPU_HPD_1: 406 WREG32(mmDC_HPD1_CONTROL, tmp); 407 break; 408 case AMDGPU_HPD_2: 409 WREG32(mmDC_HPD2_CONTROL, tmp); 410 break; 411 case AMDGPU_HPD_3: 412 WREG32(mmDC_HPD3_CONTROL, tmp); 413 break; 414 case AMDGPU_HPD_4: 415 WREG32(mmDC_HPD4_CONTROL, tmp); 416 break; 417 case AMDGPU_HPD_5: 418 WREG32(mmDC_HPD5_CONTROL, tmp); 419 break; 420 case AMDGPU_HPD_6: 421 WREG32(mmDC_HPD6_CONTROL, tmp); 422 break; 423 default: 424 break; 425 } 426 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 427 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 428 } 429 } 430 431 /** 432 * dce_v8_0_hpd_fini - hpd tear down callback. 433 * 434 * @adev: amdgpu_device pointer 435 * 436 * Tear down the hpd pins used by the card (evergreen+). 437 * Disable the hpd interrupts. 438 */ 439 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) 440 { 441 struct drm_device *dev = adev->ddev; 442 struct drm_connector *connector; 443 444 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 445 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 446 447 switch (amdgpu_connector->hpd.hpd) { 448 case AMDGPU_HPD_1: 449 WREG32(mmDC_HPD1_CONTROL, 0); 450 break; 451 case AMDGPU_HPD_2: 452 WREG32(mmDC_HPD2_CONTROL, 0); 453 break; 454 case AMDGPU_HPD_3: 455 WREG32(mmDC_HPD3_CONTROL, 0); 456 break; 457 case AMDGPU_HPD_4: 458 WREG32(mmDC_HPD4_CONTROL, 0); 459 break; 460 case AMDGPU_HPD_5: 461 WREG32(mmDC_HPD5_CONTROL, 0); 462 break; 463 case AMDGPU_HPD_6: 464 WREG32(mmDC_HPD6_CONTROL, 0); 465 break; 466 default: 467 break; 468 } 469 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 470 } 471 } 472 473 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 474 { 475 return mmDC_GPIO_HPD_A; 476 } 477 478 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) 479 { 480 u32 crtc_hung = 0; 481 u32 crtc_status[6]; 482 u32 i, j, tmp; 483 484 for (i = 0; i < adev->mode_info.num_crtc; i++) { 485 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) { 486 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 487 crtc_hung |= (1 << i); 488 } 489 } 490 491 for (j = 0; j < 10; j++) { 492 for (i = 0; i < adev->mode_info.num_crtc; i++) { 493 if (crtc_hung & (1 << i)) { 494 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 495 if (tmp != crtc_status[i]) 496 crtc_hung &= ~(1 << i); 497 } 498 } 499 if (crtc_hung == 0) 500 return false; 501 udelay(100); 502 } 503 504 return true; 505 } 506 507 static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, 508 struct amdgpu_mode_mc_save *save) 509 { 510 u32 crtc_enabled, tmp; 511 int i; 512 513 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 514 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); 515 516 /* disable VGA render */ 517 tmp = RREG32(mmVGA_RENDER_CONTROL); 518 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 519 WREG32(mmVGA_RENDER_CONTROL, tmp); 520 521 /* blank the display controllers */ 522 for (i = 0; i < adev->mode_info.num_crtc; i++) { 523 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 524 CRTC_CONTROL, CRTC_MASTER_EN); 525 if (crtc_enabled) { 526 #if 0 527 u32 frame_count; 528 int j; 529 530 save->crtc_enabled[i] = true; 531 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 532 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { 533 amdgpu_display_vblank_wait(adev, i); 534 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 535 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); 536 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 537 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 538 } 539 /* wait for the next frame */ 540 frame_count = amdgpu_display_vblank_get_counter(adev, i); 541 for (j = 0; j < adev->usec_timeout; j++) { 542 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 543 break; 544 udelay(1); 545 } 546 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 547 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { 548 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); 549 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 550 } 551 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 552 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { 553 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); 554 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 555 } 556 #else 557 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 558 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 559 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 560 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 561 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 562 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 563 save->crtc_enabled[i] = false; 564 /* ***** */ 565 #endif 566 } else { 567 save->crtc_enabled[i] = false; 568 } 569 } 570 } 571 572 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, 573 struct amdgpu_mode_mc_save *save) 574 { 575 u32 tmp, frame_count; 576 int i, j; 577 578 /* update crtc base addresses */ 579 for (i = 0; i < adev->mode_info.num_crtc; i++) { 580 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 581 upper_32_bits(adev->mc.vram_start)); 582 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 583 upper_32_bits(adev->mc.vram_start)); 584 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 585 (u32)adev->mc.vram_start); 586 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 587 (u32)adev->mc.vram_start); 588 589 if (save->crtc_enabled[i]) { 590 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); 591 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { 592 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); 593 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); 594 } 595 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 596 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { 597 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); 598 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 599 } 600 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 601 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { 602 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); 603 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 604 } 605 for (j = 0; j < adev->usec_timeout; j++) { 606 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 607 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) 608 break; 609 udelay(1); 610 } 611 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 612 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); 613 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 614 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 615 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 616 /* wait for the next frame */ 617 frame_count = amdgpu_display_vblank_get_counter(adev, i); 618 for (j = 0; j < adev->usec_timeout; j++) { 619 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 620 break; 621 udelay(1); 622 } 623 } 624 } 625 626 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 627 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); 628 629 /* Unlock vga access */ 630 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); 631 mdelay(1); 632 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); 633 } 634 635 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, 636 bool render) 637 { 638 u32 tmp; 639 640 /* Lockout access through VGA aperture*/ 641 tmp = RREG32(mmVGA_HDP_CONTROL); 642 if (render) 643 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 644 else 645 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 646 WREG32(mmVGA_HDP_CONTROL, tmp); 647 648 /* disable VGA render */ 649 tmp = RREG32(mmVGA_RENDER_CONTROL); 650 if (render) 651 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 652 else 653 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 654 WREG32(mmVGA_RENDER_CONTROL, tmp); 655 } 656 657 static void dce_v8_0_program_fmt(struct drm_encoder *encoder) 658 { 659 struct drm_device *dev = encoder->dev; 660 struct amdgpu_device *adev = dev->dev_private; 661 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 662 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 663 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 664 int bpc = 0; 665 u32 tmp = 0; 666 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 667 668 if (connector) { 669 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 670 bpc = amdgpu_connector_get_monitor_bpc(connector); 671 dither = amdgpu_connector->dither; 672 } 673 674 /* LVDS/eDP FMT is set up by atom */ 675 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 676 return; 677 678 /* not needed for analog */ 679 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 680 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 681 return; 682 683 if (bpc == 0) 684 return; 685 686 switch (bpc) { 687 case 6: 688 if (dither == AMDGPU_FMT_DITHER_ENABLE) 689 /* XXX sort out optimal dither settings */ 690 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 691 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 692 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 693 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 694 else 695 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 696 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 697 break; 698 case 8: 699 if (dither == AMDGPU_FMT_DITHER_ENABLE) 700 /* XXX sort out optimal dither settings */ 701 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 702 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 703 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 704 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 705 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 706 else 707 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 708 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 709 break; 710 case 10: 711 if (dither == AMDGPU_FMT_DITHER_ENABLE) 712 /* XXX sort out optimal dither settings */ 713 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 714 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 715 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 716 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 717 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 718 else 719 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 720 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 721 break; 722 default: 723 /* not needed */ 724 break; 725 } 726 727 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 728 } 729 730 731 /* display watermark setup */ 732 /** 733 * dce_v8_0_line_buffer_adjust - Set up the line buffer 734 * 735 * @adev: amdgpu_device pointer 736 * @amdgpu_crtc: the selected display controller 737 * @mode: the current display mode on the selected display 738 * controller 739 * 740 * Setup up the line buffer allocation for 741 * the selected display controller (CIK). 742 * Returns the line buffer size in pixels. 743 */ 744 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, 745 struct amdgpu_crtc *amdgpu_crtc, 746 struct drm_display_mode *mode) 747 { 748 u32 tmp, buffer_alloc, i; 749 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 750 /* 751 * Line Buffer Setup 752 * There are 6 line buffers, one for each display controllers. 753 * There are 3 partitions per LB. Select the number of partitions 754 * to enable based on the display width. For display widths larger 755 * than 4096, you need use to use 2 display controllers and combine 756 * them using the stereo blender. 757 */ 758 if (amdgpu_crtc->base.enabled && mode) { 759 if (mode->crtc_hdisplay < 1920) { 760 tmp = 1; 761 buffer_alloc = 2; 762 } else if (mode->crtc_hdisplay < 2560) { 763 tmp = 2; 764 buffer_alloc = 2; 765 } else if (mode->crtc_hdisplay < 4096) { 766 tmp = 0; 767 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 768 } else { 769 DRM_DEBUG_KMS("Mode too big for LB!\n"); 770 tmp = 0; 771 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 772 } 773 } else { 774 tmp = 1; 775 buffer_alloc = 0; 776 } 777 778 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, 779 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) | 780 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT)); 781 782 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 783 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 784 for (i = 0; i < adev->usec_timeout; i++) { 785 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 786 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 787 break; 788 udelay(1); 789 } 790 791 if (amdgpu_crtc->base.enabled && mode) { 792 switch (tmp) { 793 case 0: 794 default: 795 return 4096 * 2; 796 case 1: 797 return 1920 * 2; 798 case 2: 799 return 2560 * 2; 800 } 801 } 802 803 /* controller not enabled, so no lb used */ 804 return 0; 805 } 806 807 /** 808 * cik_get_number_of_dram_channels - get the number of dram channels 809 * 810 * @adev: amdgpu_device pointer 811 * 812 * Look up the number of video ram channels (CIK). 813 * Used for display watermark bandwidth calculations 814 * Returns the number of dram channels 815 */ 816 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 817 { 818 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 819 820 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 821 case 0: 822 default: 823 return 1; 824 case 1: 825 return 2; 826 case 2: 827 return 4; 828 case 3: 829 return 8; 830 case 4: 831 return 3; 832 case 5: 833 return 6; 834 case 6: 835 return 10; 836 case 7: 837 return 12; 838 case 8: 839 return 16; 840 } 841 } 842 843 struct dce8_wm_params { 844 u32 dram_channels; /* number of dram channels */ 845 u32 yclk; /* bandwidth per dram data pin in kHz */ 846 u32 sclk; /* engine clock in kHz */ 847 u32 disp_clk; /* display clock in kHz */ 848 u32 src_width; /* viewport width */ 849 u32 active_time; /* active display time in ns */ 850 u32 blank_time; /* blank time in ns */ 851 bool interlaced; /* mode is interlaced */ 852 fixed20_12 vsc; /* vertical scale ratio */ 853 u32 num_heads; /* number of active crtcs */ 854 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 855 u32 lb_size; /* line buffer allocated to pipe */ 856 u32 vtaps; /* vertical scaler taps */ 857 }; 858 859 /** 860 * dce_v8_0_dram_bandwidth - get the dram bandwidth 861 * 862 * @wm: watermark calculation data 863 * 864 * Calculate the raw dram bandwidth (CIK). 865 * Used for display watermark bandwidth calculations 866 * Returns the dram bandwidth in MBytes/s 867 */ 868 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm) 869 { 870 /* Calculate raw DRAM Bandwidth */ 871 fixed20_12 dram_efficiency; /* 0.7 */ 872 fixed20_12 yclk, dram_channels, bandwidth; 873 fixed20_12 a; 874 875 a.full = dfixed_const(1000); 876 yclk.full = dfixed_const(wm->yclk); 877 yclk.full = dfixed_div(yclk, a); 878 dram_channels.full = dfixed_const(wm->dram_channels * 4); 879 a.full = dfixed_const(10); 880 dram_efficiency.full = dfixed_const(7); 881 dram_efficiency.full = dfixed_div(dram_efficiency, a); 882 bandwidth.full = dfixed_mul(dram_channels, yclk); 883 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 884 885 return dfixed_trunc(bandwidth); 886 } 887 888 /** 889 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display 890 * 891 * @wm: watermark calculation data 892 * 893 * Calculate the dram bandwidth used for display (CIK). 894 * Used for display watermark bandwidth calculations 895 * Returns the dram bandwidth for display in MBytes/s 896 */ 897 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm) 898 { 899 /* Calculate DRAM Bandwidth and the part allocated to display. */ 900 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 901 fixed20_12 yclk, dram_channels, bandwidth; 902 fixed20_12 a; 903 904 a.full = dfixed_const(1000); 905 yclk.full = dfixed_const(wm->yclk); 906 yclk.full = dfixed_div(yclk, a); 907 dram_channels.full = dfixed_const(wm->dram_channels * 4); 908 a.full = dfixed_const(10); 909 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 910 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 911 bandwidth.full = dfixed_mul(dram_channels, yclk); 912 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 913 914 return dfixed_trunc(bandwidth); 915 } 916 917 /** 918 * dce_v8_0_data_return_bandwidth - get the data return bandwidth 919 * 920 * @wm: watermark calculation data 921 * 922 * Calculate the data return bandwidth used for display (CIK). 923 * Used for display watermark bandwidth calculations 924 * Returns the data return bandwidth in MBytes/s 925 */ 926 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm) 927 { 928 /* Calculate the display Data return Bandwidth */ 929 fixed20_12 return_efficiency; /* 0.8 */ 930 fixed20_12 sclk, bandwidth; 931 fixed20_12 a; 932 933 a.full = dfixed_const(1000); 934 sclk.full = dfixed_const(wm->sclk); 935 sclk.full = dfixed_div(sclk, a); 936 a.full = dfixed_const(10); 937 return_efficiency.full = dfixed_const(8); 938 return_efficiency.full = dfixed_div(return_efficiency, a); 939 a.full = dfixed_const(32); 940 bandwidth.full = dfixed_mul(a, sclk); 941 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 942 943 return dfixed_trunc(bandwidth); 944 } 945 946 /** 947 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth 948 * 949 * @wm: watermark calculation data 950 * 951 * Calculate the dmif bandwidth used for display (CIK). 952 * Used for display watermark bandwidth calculations 953 * Returns the dmif bandwidth in MBytes/s 954 */ 955 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm) 956 { 957 /* Calculate the DMIF Request Bandwidth */ 958 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 959 fixed20_12 disp_clk, bandwidth; 960 fixed20_12 a, b; 961 962 a.full = dfixed_const(1000); 963 disp_clk.full = dfixed_const(wm->disp_clk); 964 disp_clk.full = dfixed_div(disp_clk, a); 965 a.full = dfixed_const(32); 966 b.full = dfixed_mul(a, disp_clk); 967 968 a.full = dfixed_const(10); 969 disp_clk_request_efficiency.full = dfixed_const(8); 970 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 971 972 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 973 974 return dfixed_trunc(bandwidth); 975 } 976 977 /** 978 * dce_v8_0_available_bandwidth - get the min available bandwidth 979 * 980 * @wm: watermark calculation data 981 * 982 * Calculate the min available bandwidth used for display (CIK). 983 * Used for display watermark bandwidth calculations 984 * Returns the min available bandwidth in MBytes/s 985 */ 986 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm) 987 { 988 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 989 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm); 990 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm); 991 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm); 992 993 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 994 } 995 996 /** 997 * dce_v8_0_average_bandwidth - get the average available bandwidth 998 * 999 * @wm: watermark calculation data 1000 * 1001 * Calculate the average available bandwidth used for display (CIK). 1002 * Used for display watermark bandwidth calculations 1003 * Returns the average available bandwidth in MBytes/s 1004 */ 1005 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm) 1006 { 1007 /* Calculate the display mode Average Bandwidth 1008 * DisplayMode should contain the source and destination dimensions, 1009 * timing, etc. 1010 */ 1011 fixed20_12 bpp; 1012 fixed20_12 line_time; 1013 fixed20_12 src_width; 1014 fixed20_12 bandwidth; 1015 fixed20_12 a; 1016 1017 a.full = dfixed_const(1000); 1018 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 1019 line_time.full = dfixed_div(line_time, a); 1020 bpp.full = dfixed_const(wm->bytes_per_pixel); 1021 src_width.full = dfixed_const(wm->src_width); 1022 bandwidth.full = dfixed_mul(src_width, bpp); 1023 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 1024 bandwidth.full = dfixed_div(bandwidth, line_time); 1025 1026 return dfixed_trunc(bandwidth); 1027 } 1028 1029 /** 1030 * dce_v8_0_latency_watermark - get the latency watermark 1031 * 1032 * @wm: watermark calculation data 1033 * 1034 * Calculate the latency watermark (CIK). 1035 * Used for display watermark bandwidth calculations 1036 * Returns the latency watermark in ns 1037 */ 1038 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) 1039 { 1040 /* First calculate the latency in ns */ 1041 u32 mc_latency = 2000; /* 2000 ns. */ 1042 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm); 1043 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 1044 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 1045 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 1046 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 1047 (wm->num_heads * cursor_line_pair_return_time); 1048 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 1049 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 1050 u32 tmp, dmif_size = 12288; 1051 fixed20_12 a, b, c; 1052 1053 if (wm->num_heads == 0) 1054 return 0; 1055 1056 a.full = dfixed_const(2); 1057 b.full = dfixed_const(1); 1058 if ((wm->vsc.full > a.full) || 1059 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 1060 (wm->vtaps >= 5) || 1061 ((wm->vsc.full >= a.full) && wm->interlaced)) 1062 max_src_lines_per_dst_line = 4; 1063 else 1064 max_src_lines_per_dst_line = 2; 1065 1066 a.full = dfixed_const(available_bandwidth); 1067 b.full = dfixed_const(wm->num_heads); 1068 a.full = dfixed_div(a, b); 1069 1070 b.full = dfixed_const(mc_latency + 512); 1071 c.full = dfixed_const(wm->disp_clk); 1072 b.full = dfixed_div(b, c); 1073 1074 c.full = dfixed_const(dmif_size); 1075 b.full = dfixed_div(c, b); 1076 1077 tmp = min(dfixed_trunc(a), dfixed_trunc(b)); 1078 1079 b.full = dfixed_const(1000); 1080 c.full = dfixed_const(wm->disp_clk); 1081 b.full = dfixed_div(c, b); 1082 c.full = dfixed_const(wm->bytes_per_pixel); 1083 b.full = dfixed_mul(b, c); 1084 1085 lb_fill_bw = min(tmp, dfixed_trunc(b)); 1086 1087 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1088 b.full = dfixed_const(1000); 1089 c.full = dfixed_const(lb_fill_bw); 1090 b.full = dfixed_div(c, b); 1091 a.full = dfixed_div(a, b); 1092 line_fill_time = dfixed_trunc(a); 1093 1094 if (line_fill_time < wm->active_time) 1095 return latency; 1096 else 1097 return latency + (line_fill_time - wm->active_time); 1098 1099 } 1100 1101 /** 1102 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check 1103 * average and available dram bandwidth 1104 * 1105 * @wm: watermark calculation data 1106 * 1107 * Check if the display average bandwidth fits in the display 1108 * dram bandwidth (CIK). 1109 * Used for display watermark bandwidth calculations 1110 * Returns true if the display fits, false if not. 1111 */ 1112 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm) 1113 { 1114 if (dce_v8_0_average_bandwidth(wm) <= 1115 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 1116 return true; 1117 else 1118 return false; 1119 } 1120 1121 /** 1122 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check 1123 * average and available bandwidth 1124 * 1125 * @wm: watermark calculation data 1126 * 1127 * Check if the display average bandwidth fits in the display 1128 * available bandwidth (CIK). 1129 * Used for display watermark bandwidth calculations 1130 * Returns true if the display fits, false if not. 1131 */ 1132 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm) 1133 { 1134 if (dce_v8_0_average_bandwidth(wm) <= 1135 (dce_v8_0_available_bandwidth(wm) / wm->num_heads)) 1136 return true; 1137 else 1138 return false; 1139 } 1140 1141 /** 1142 * dce_v8_0_check_latency_hiding - check latency hiding 1143 * 1144 * @wm: watermark calculation data 1145 * 1146 * Check latency hiding (CIK). 1147 * Used for display watermark bandwidth calculations 1148 * Returns true if the display fits, false if not. 1149 */ 1150 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm) 1151 { 1152 u32 lb_partitions = wm->lb_size / wm->src_width; 1153 u32 line_time = wm->active_time + wm->blank_time; 1154 u32 latency_tolerant_lines; 1155 u32 latency_hiding; 1156 fixed20_12 a; 1157 1158 a.full = dfixed_const(1); 1159 if (wm->vsc.full > a.full) 1160 latency_tolerant_lines = 1; 1161 else { 1162 if (lb_partitions <= (wm->vtaps + 1)) 1163 latency_tolerant_lines = 1; 1164 else 1165 latency_tolerant_lines = 2; 1166 } 1167 1168 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1169 1170 if (dce_v8_0_latency_watermark(wm) <= latency_hiding) 1171 return true; 1172 else 1173 return false; 1174 } 1175 1176 /** 1177 * dce_v8_0_program_watermarks - program display watermarks 1178 * 1179 * @adev: amdgpu_device pointer 1180 * @amdgpu_crtc: the selected display controller 1181 * @lb_size: line buffer size 1182 * @num_heads: number of display controllers in use 1183 * 1184 * Calculate and program the display watermarks for the 1185 * selected display controller (CIK). 1186 */ 1187 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, 1188 struct amdgpu_crtc *amdgpu_crtc, 1189 u32 lb_size, u32 num_heads) 1190 { 1191 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1192 struct dce8_wm_params wm_low, wm_high; 1193 u32 pixel_period; 1194 u32 line_time = 0; 1195 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1196 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1197 1198 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1199 pixel_period = 1000000 / (u32)mode->clock; 1200 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1201 1202 /* watermark for high clocks */ 1203 if (adev->pm.dpm_enabled) { 1204 wm_high.yclk = 1205 amdgpu_dpm_get_mclk(adev, false) * 10; 1206 wm_high.sclk = 1207 amdgpu_dpm_get_sclk(adev, false) * 10; 1208 } else { 1209 wm_high.yclk = adev->pm.current_mclk * 10; 1210 wm_high.sclk = adev->pm.current_sclk * 10; 1211 } 1212 1213 wm_high.disp_clk = mode->clock; 1214 wm_high.src_width = mode->crtc_hdisplay; 1215 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1216 wm_high.blank_time = line_time - wm_high.active_time; 1217 wm_high.interlaced = false; 1218 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1219 wm_high.interlaced = true; 1220 wm_high.vsc = amdgpu_crtc->vsc; 1221 wm_high.vtaps = 1; 1222 if (amdgpu_crtc->rmx_type != RMX_OFF) 1223 wm_high.vtaps = 2; 1224 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1225 wm_high.lb_size = lb_size; 1226 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1227 wm_high.num_heads = num_heads; 1228 1229 /* set for high clocks */ 1230 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535); 1231 1232 /* possibly force display priority to high */ 1233 /* should really do this at mode validation time... */ 1234 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1235 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1236 !dce_v8_0_check_latency_hiding(&wm_high) || 1237 (adev->mode_info.disp_priority == 2)) { 1238 DRM_DEBUG_KMS("force priority to high\n"); 1239 } 1240 1241 /* watermark for low clocks */ 1242 if (adev->pm.dpm_enabled) { 1243 wm_low.yclk = 1244 amdgpu_dpm_get_mclk(adev, true) * 10; 1245 wm_low.sclk = 1246 amdgpu_dpm_get_sclk(adev, true) * 10; 1247 } else { 1248 wm_low.yclk = adev->pm.current_mclk * 10; 1249 wm_low.sclk = adev->pm.current_sclk * 10; 1250 } 1251 1252 wm_low.disp_clk = mode->clock; 1253 wm_low.src_width = mode->crtc_hdisplay; 1254 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1255 wm_low.blank_time = line_time - wm_low.active_time; 1256 wm_low.interlaced = false; 1257 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1258 wm_low.interlaced = true; 1259 wm_low.vsc = amdgpu_crtc->vsc; 1260 wm_low.vtaps = 1; 1261 if (amdgpu_crtc->rmx_type != RMX_OFF) 1262 wm_low.vtaps = 2; 1263 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1264 wm_low.lb_size = lb_size; 1265 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1266 wm_low.num_heads = num_heads; 1267 1268 /* set for low clocks */ 1269 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535); 1270 1271 /* possibly force display priority to high */ 1272 /* should really do this at mode validation time... */ 1273 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1274 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1275 !dce_v8_0_check_latency_hiding(&wm_low) || 1276 (adev->mode_info.disp_priority == 2)) { 1277 DRM_DEBUG_KMS("force priority to high\n"); 1278 } 1279 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1280 } 1281 1282 /* select wm A */ 1283 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1284 tmp = wm_mask; 1285 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1286 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1287 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1288 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1289 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1290 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1291 /* select wm B */ 1292 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1293 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1294 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1295 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1296 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1297 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1298 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1299 /* restore original selection */ 1300 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1301 1302 /* save values for DPM */ 1303 amdgpu_crtc->line_time = line_time; 1304 amdgpu_crtc->wm_high = latency_watermark_a; 1305 amdgpu_crtc->wm_low = latency_watermark_b; 1306 /* Save number of lines the linebuffer leads before the scanout */ 1307 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1308 } 1309 1310 /** 1311 * dce_v8_0_bandwidth_update - program display watermarks 1312 * 1313 * @adev: amdgpu_device pointer 1314 * 1315 * Calculate and program the display watermarks and line 1316 * buffer allocation (CIK). 1317 */ 1318 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev) 1319 { 1320 struct drm_display_mode *mode = NULL; 1321 u32 num_heads = 0, lb_size; 1322 int i; 1323 1324 amdgpu_update_display_priority(adev); 1325 1326 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1327 if (adev->mode_info.crtcs[i]->base.enabled) 1328 num_heads++; 1329 } 1330 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1331 mode = &adev->mode_info.crtcs[i]->base.mode; 1332 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1333 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1334 lb_size, num_heads); 1335 } 1336 } 1337 1338 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev) 1339 { 1340 int i; 1341 u32 offset, tmp; 1342 1343 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1344 offset = adev->mode_info.audio.pin[i].offset; 1345 tmp = RREG32_AUDIO_ENDPT(offset, 1346 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1347 if (((tmp & 1348 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1349 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1350 adev->mode_info.audio.pin[i].connected = false; 1351 else 1352 adev->mode_info.audio.pin[i].connected = true; 1353 } 1354 } 1355 1356 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev) 1357 { 1358 int i; 1359 1360 dce_v8_0_audio_get_connected_pins(adev); 1361 1362 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1363 if (adev->mode_info.audio.pin[i].connected) 1364 return &adev->mode_info.audio.pin[i]; 1365 } 1366 DRM_ERROR("No connected audio pins found!\n"); 1367 return NULL; 1368 } 1369 1370 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1371 { 1372 struct amdgpu_device *adev = encoder->dev->dev_private; 1373 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1374 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1375 u32 offset; 1376 1377 if (!dig || !dig->afmt || !dig->afmt->pin) 1378 return; 1379 1380 offset = dig->afmt->offset; 1381 1382 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset, 1383 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT)); 1384 } 1385 1386 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, 1387 struct drm_display_mode *mode) 1388 { 1389 struct amdgpu_device *adev = encoder->dev->dev_private; 1390 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1391 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1392 struct drm_connector *connector; 1393 struct amdgpu_connector *amdgpu_connector = NULL; 1394 u32 tmp = 0, offset; 1395 1396 if (!dig || !dig->afmt || !dig->afmt->pin) 1397 return; 1398 1399 offset = dig->afmt->pin->offset; 1400 1401 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1402 if (connector->encoder == encoder) { 1403 amdgpu_connector = to_amdgpu_connector(connector); 1404 break; 1405 } 1406 } 1407 1408 if (!amdgpu_connector) { 1409 DRM_ERROR("Couldn't find encoder's connector\n"); 1410 return; 1411 } 1412 1413 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1414 if (connector->latency_present[1]) 1415 tmp = 1416 (connector->video_latency[1] << 1417 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1418 (connector->audio_latency[1] << 1419 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1420 else 1421 tmp = 1422 (0 << 1423 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1424 (0 << 1425 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1426 } else { 1427 if (connector->latency_present[0]) 1428 tmp = 1429 (connector->video_latency[0] << 1430 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1431 (connector->audio_latency[0] << 1432 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1433 else 1434 tmp = 1435 (0 << 1436 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1437 (0 << 1438 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1439 1440 } 1441 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1442 } 1443 1444 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1445 { 1446 struct amdgpu_device *adev = encoder->dev->dev_private; 1447 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1448 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1449 struct drm_connector *connector; 1450 struct amdgpu_connector *amdgpu_connector = NULL; 1451 u32 offset, tmp; 1452 u8 *sadb = NULL; 1453 int sad_count; 1454 1455 if (!dig || !dig->afmt || !dig->afmt->pin) 1456 return; 1457 1458 offset = dig->afmt->pin->offset; 1459 1460 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1461 if (connector->encoder == encoder) { 1462 amdgpu_connector = to_amdgpu_connector(connector); 1463 break; 1464 } 1465 } 1466 1467 if (!amdgpu_connector) { 1468 DRM_ERROR("Couldn't find encoder's connector\n"); 1469 return; 1470 } 1471 1472 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1473 if (sad_count < 0) { 1474 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1475 sad_count = 0; 1476 } 1477 1478 /* program the speaker allocation */ 1479 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1480 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK | 1481 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK); 1482 /* set HDMI mode */ 1483 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK; 1484 if (sad_count) 1485 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); 1486 else 1487 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */ 1488 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1489 1490 kfree(sadb); 1491 } 1492 1493 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) 1494 { 1495 struct amdgpu_device *adev = encoder->dev->dev_private; 1496 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1497 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1498 u32 offset; 1499 struct drm_connector *connector; 1500 struct amdgpu_connector *amdgpu_connector = NULL; 1501 struct cea_sad *sads; 1502 int i, sad_count; 1503 1504 static const u16 eld_reg_to_type[][2] = { 1505 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1506 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1507 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1508 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1509 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1510 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1511 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1512 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1513 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1514 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1515 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1516 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1517 }; 1518 1519 if (!dig || !dig->afmt || !dig->afmt->pin) 1520 return; 1521 1522 offset = dig->afmt->pin->offset; 1523 1524 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1525 if (connector->encoder == encoder) { 1526 amdgpu_connector = to_amdgpu_connector(connector); 1527 break; 1528 } 1529 } 1530 1531 if (!amdgpu_connector) { 1532 DRM_ERROR("Couldn't find encoder's connector\n"); 1533 return; 1534 } 1535 1536 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1537 if (sad_count <= 0) { 1538 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1539 return; 1540 } 1541 BUG_ON(!sads); 1542 1543 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1544 u32 value = 0; 1545 u8 stereo_freqs = 0; 1546 int max_channels = -1; 1547 int j; 1548 1549 for (j = 0; j < sad_count; j++) { 1550 struct cea_sad *sad = &sads[j]; 1551 1552 if (sad->format == eld_reg_to_type[i][1]) { 1553 if (sad->channels > max_channels) { 1554 value = (sad->channels << 1555 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | 1556 (sad->byte2 << 1557 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | 1558 (sad->freq << 1559 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); 1560 max_channels = sad->channels; 1561 } 1562 1563 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1564 stereo_freqs |= sad->freq; 1565 else 1566 break; 1567 } 1568 } 1569 1570 value |= (stereo_freqs << 1571 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); 1572 1573 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); 1574 } 1575 1576 kfree(sads); 1577 } 1578 1579 static void dce_v8_0_audio_enable(struct amdgpu_device *adev, 1580 struct amdgpu_audio_pin *pin, 1581 bool enable) 1582 { 1583 if (!pin) 1584 return; 1585 1586 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1587 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1588 } 1589 1590 static const u32 pin_offsets[7] = 1591 { 1592 (0x1780 - 0x1780), 1593 (0x1786 - 0x1780), 1594 (0x178c - 0x1780), 1595 (0x1792 - 0x1780), 1596 (0x1798 - 0x1780), 1597 (0x179d - 0x1780), 1598 (0x17a4 - 0x1780), 1599 }; 1600 1601 static int dce_v8_0_audio_init(struct amdgpu_device *adev) 1602 { 1603 int i; 1604 1605 if (!amdgpu_audio) 1606 return 0; 1607 1608 adev->mode_info.audio.enabled = true; 1609 1610 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */ 1611 adev->mode_info.audio.num_pins = 7; 1612 else if ((adev->asic_type == CHIP_KABINI) || 1613 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */ 1614 adev->mode_info.audio.num_pins = 3; 1615 else if ((adev->asic_type == CHIP_BONAIRE) || 1616 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */ 1617 adev->mode_info.audio.num_pins = 7; 1618 else 1619 adev->mode_info.audio.num_pins = 3; 1620 1621 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1622 adev->mode_info.audio.pin[i].channels = -1; 1623 adev->mode_info.audio.pin[i].rate = -1; 1624 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1625 adev->mode_info.audio.pin[i].status_bits = 0; 1626 adev->mode_info.audio.pin[i].category_code = 0; 1627 adev->mode_info.audio.pin[i].connected = false; 1628 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1629 adev->mode_info.audio.pin[i].id = i; 1630 /* disable audio. it will be set up later */ 1631 /* XXX remove once we switch to ip funcs */ 1632 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1633 } 1634 1635 return 0; 1636 } 1637 1638 static void dce_v8_0_audio_fini(struct amdgpu_device *adev) 1639 { 1640 int i; 1641 1642 if (!amdgpu_audio) 1643 return; 1644 1645 if (!adev->mode_info.audio.enabled) 1646 return; 1647 1648 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1649 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1650 1651 adev->mode_info.audio.enabled = false; 1652 } 1653 1654 /* 1655 * update the N and CTS parameters for a given pixel clock rate 1656 */ 1657 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1658 { 1659 struct drm_device *dev = encoder->dev; 1660 struct amdgpu_device *adev = dev->dev_private; 1661 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1662 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1663 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1664 uint32_t offset = dig->afmt->offset; 1665 1666 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); 1667 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); 1668 1669 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); 1670 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz); 1671 1672 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT)); 1673 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz); 1674 } 1675 1676 /* 1677 * build a HDMI Video Info Frame 1678 */ 1679 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1680 void *buffer, size_t size) 1681 { 1682 struct drm_device *dev = encoder->dev; 1683 struct amdgpu_device *adev = dev->dev_private; 1684 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1685 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1686 uint32_t offset = dig->afmt->offset; 1687 uint8_t *frame = buffer + 3; 1688 uint8_t *header = buffer; 1689 1690 WREG32(mmAFMT_AVI_INFO0 + offset, 1691 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1692 WREG32(mmAFMT_AVI_INFO1 + offset, 1693 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1694 WREG32(mmAFMT_AVI_INFO2 + offset, 1695 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1696 WREG32(mmAFMT_AVI_INFO3 + offset, 1697 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1698 } 1699 1700 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1701 { 1702 struct drm_device *dev = encoder->dev; 1703 struct amdgpu_device *adev = dev->dev_private; 1704 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1705 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1706 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1707 u32 dto_phase = 24 * 1000; 1708 u32 dto_modulo = clock; 1709 1710 if (!dig || !dig->afmt) 1711 return; 1712 1713 /* XXX two dtos; generally use dto0 for hdmi */ 1714 /* Express [24MHz / target pixel clock] as an exact rational 1715 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1716 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1717 */ 1718 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT)); 1719 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1720 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1721 } 1722 1723 /* 1724 * update the info frames with the data from the current display mode 1725 */ 1726 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, 1727 struct drm_display_mode *mode) 1728 { 1729 struct drm_device *dev = encoder->dev; 1730 struct amdgpu_device *adev = dev->dev_private; 1731 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1732 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1733 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1734 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1735 struct hdmi_avi_infoframe frame; 1736 uint32_t offset, val; 1737 ssize_t err; 1738 int bpc = 8; 1739 1740 if (!dig || !dig->afmt) 1741 return; 1742 1743 /* Silent, r600_hdmi_enable will raise WARN for us */ 1744 if (!dig->afmt->enabled) 1745 return; 1746 offset = dig->afmt->offset; 1747 1748 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1749 if (encoder->crtc) { 1750 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1751 bpc = amdgpu_crtc->bpc; 1752 } 1753 1754 /* disable audio prior to setting up hw */ 1755 dig->afmt->pin = dce_v8_0_audio_get_pin(adev); 1756 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1757 1758 dce_v8_0_audio_set_dto(encoder, mode->clock); 1759 1760 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1761 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */ 1762 1763 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 1764 1765 val = RREG32(mmHDMI_CONTROL + offset); 1766 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1767 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK; 1768 1769 switch (bpc) { 1770 case 0: 1771 case 6: 1772 case 8: 1773 case 16: 1774 default: 1775 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1776 connector->name, bpc); 1777 break; 1778 case 10: 1779 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1780 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1781 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1782 connector->name); 1783 break; 1784 case 12: 1785 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1786 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1787 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1788 connector->name); 1789 break; 1790 } 1791 1792 WREG32(mmHDMI_CONTROL + offset, val); 1793 1794 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1795 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */ 1796 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */ 1797 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */ 1798 1799 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset, 1800 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */ 1801 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */ 1802 1803 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset, 1804 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */ 1805 1806 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset, 1807 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */ 1808 1809 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 1810 1811 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset, 1812 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */ 1813 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */ 1814 1815 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1816 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */ 1817 1818 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 1819 1820 if (bpc > 8) 1821 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1822 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1823 else 1824 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1825 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */ 1826 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1827 1828 dce_v8_0_afmt_update_ACR(encoder, mode->clock); 1829 1830 WREG32(mmAFMT_60958_0 + offset, 1831 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT)); 1832 1833 WREG32(mmAFMT_60958_1 + offset, 1834 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT)); 1835 1836 WREG32(mmAFMT_60958_2 + offset, 1837 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) | 1838 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) | 1839 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) | 1840 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) | 1841 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) | 1842 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT)); 1843 1844 dce_v8_0_audio_write_speaker_allocation(encoder); 1845 1846 1847 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset, 1848 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1849 1850 dce_v8_0_afmt_audio_select_pin(encoder); 1851 dce_v8_0_audio_write_sad_regs(encoder); 1852 dce_v8_0_audio_write_latency_fields(encoder, mode); 1853 1854 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 1855 if (err < 0) { 1856 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1857 return; 1858 } 1859 1860 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1861 if (err < 0) { 1862 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1863 return; 1864 } 1865 1866 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1867 1868 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, 1869 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ 1870 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ 1871 1872 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, 1873 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ 1874 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK); 1875 1876 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1877 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ 1878 1879 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 1880 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); 1881 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); 1882 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); 1883 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); 1884 1885 /* enable audio after to setting up hw */ 1886 dce_v8_0_audio_enable(adev, dig->afmt->pin, true); 1887 } 1888 1889 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1890 { 1891 struct drm_device *dev = encoder->dev; 1892 struct amdgpu_device *adev = dev->dev_private; 1893 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1894 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1895 1896 if (!dig || !dig->afmt) 1897 return; 1898 1899 /* Silent, r600_hdmi_enable will raise WARN for us */ 1900 if (enable && dig->afmt->enabled) 1901 return; 1902 if (!enable && !dig->afmt->enabled) 1903 return; 1904 1905 if (!enable && dig->afmt->pin) { 1906 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1907 dig->afmt->pin = NULL; 1908 } 1909 1910 dig->afmt->enabled = enable; 1911 1912 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1913 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1914 } 1915 1916 static int dce_v8_0_afmt_init(struct amdgpu_device *adev) 1917 { 1918 int i; 1919 1920 for (i = 0; i < adev->mode_info.num_dig; i++) 1921 adev->mode_info.afmt[i] = NULL; 1922 1923 /* DCE8 has audio blocks tied to DIG encoders */ 1924 for (i = 0; i < adev->mode_info.num_dig; i++) { 1925 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1926 if (adev->mode_info.afmt[i]) { 1927 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1928 adev->mode_info.afmt[i]->id = i; 1929 } else { 1930 int j; 1931 for (j = 0; j < i; j++) { 1932 kfree(adev->mode_info.afmt[j]); 1933 adev->mode_info.afmt[j] = NULL; 1934 } 1935 return -ENOMEM; 1936 } 1937 } 1938 return 0; 1939 } 1940 1941 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev) 1942 { 1943 int i; 1944 1945 for (i = 0; i < adev->mode_info.num_dig; i++) { 1946 kfree(adev->mode_info.afmt[i]); 1947 adev->mode_info.afmt[i] = NULL; 1948 } 1949 } 1950 1951 static const u32 vga_control_regs[6] = 1952 { 1953 mmD1VGA_CONTROL, 1954 mmD2VGA_CONTROL, 1955 mmD3VGA_CONTROL, 1956 mmD4VGA_CONTROL, 1957 mmD5VGA_CONTROL, 1958 mmD6VGA_CONTROL, 1959 }; 1960 1961 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) 1962 { 1963 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1964 struct drm_device *dev = crtc->dev; 1965 struct amdgpu_device *adev = dev->dev_private; 1966 u32 vga_control; 1967 1968 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1969 if (enable) 1970 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 1971 else 1972 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 1973 } 1974 1975 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) 1976 { 1977 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1978 struct drm_device *dev = crtc->dev; 1979 struct amdgpu_device *adev = dev->dev_private; 1980 1981 if (enable) 1982 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 1983 else 1984 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 1985 } 1986 1987 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, 1988 struct drm_framebuffer *fb, 1989 int x, int y, int atomic) 1990 { 1991 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1992 struct drm_device *dev = crtc->dev; 1993 struct amdgpu_device *adev = dev->dev_private; 1994 struct amdgpu_framebuffer *amdgpu_fb; 1995 struct drm_framebuffer *target_fb; 1996 struct drm_gem_object *obj; 1997 struct amdgpu_bo *rbo; 1998 uint64_t fb_location, tiling_flags; 1999 uint32_t fb_format, fb_pitch_pixels; 2000 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2001 u32 pipe_config; 2002 u32 tmp, viewport_w, viewport_h; 2003 int r; 2004 bool bypass_lut = false; 2005 2006 /* no fb bound */ 2007 if (!atomic && !crtc->primary->fb) { 2008 DRM_DEBUG_KMS("No FB bound\n"); 2009 return 0; 2010 } 2011 2012 if (atomic) { 2013 amdgpu_fb = to_amdgpu_framebuffer(fb); 2014 target_fb = fb; 2015 } else { 2016 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2017 target_fb = crtc->primary->fb; 2018 } 2019 2020 /* If atomic, assume fb object is pinned & idle & fenced and 2021 * just update base pointers 2022 */ 2023 obj = amdgpu_fb->obj; 2024 rbo = gem_to_amdgpu_bo(obj); 2025 r = amdgpu_bo_reserve(rbo, false); 2026 if (unlikely(r != 0)) 2027 return r; 2028 2029 if (atomic) { 2030 fb_location = amdgpu_bo_gpu_offset(rbo); 2031 } else { 2032 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); 2033 if (unlikely(r != 0)) { 2034 amdgpu_bo_unreserve(rbo); 2035 return -EINVAL; 2036 } 2037 } 2038 2039 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); 2040 amdgpu_bo_unreserve(rbo); 2041 2042 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 2043 2044 switch (target_fb->pixel_format) { 2045 case DRM_FORMAT_C8: 2046 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2047 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2048 break; 2049 case DRM_FORMAT_XRGB4444: 2050 case DRM_FORMAT_ARGB4444: 2051 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2052 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2053 #ifdef __BIG_ENDIAN 2054 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2055 #endif 2056 break; 2057 case DRM_FORMAT_XRGB1555: 2058 case DRM_FORMAT_ARGB1555: 2059 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2060 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2061 #ifdef __BIG_ENDIAN 2062 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2063 #endif 2064 break; 2065 case DRM_FORMAT_BGRX5551: 2066 case DRM_FORMAT_BGRA5551: 2067 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2068 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2069 #ifdef __BIG_ENDIAN 2070 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2071 #endif 2072 break; 2073 case DRM_FORMAT_RGB565: 2074 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2075 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2076 #ifdef __BIG_ENDIAN 2077 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2078 #endif 2079 break; 2080 case DRM_FORMAT_XRGB8888: 2081 case DRM_FORMAT_ARGB8888: 2082 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2083 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2084 #ifdef __BIG_ENDIAN 2085 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2086 #endif 2087 break; 2088 case DRM_FORMAT_XRGB2101010: 2089 case DRM_FORMAT_ARGB2101010: 2090 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2091 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2092 #ifdef __BIG_ENDIAN 2093 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2094 #endif 2095 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2096 bypass_lut = true; 2097 break; 2098 case DRM_FORMAT_BGRX1010102: 2099 case DRM_FORMAT_BGRA1010102: 2100 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 2101 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 2102 #ifdef __BIG_ENDIAN 2103 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 2104 #endif 2105 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2106 bypass_lut = true; 2107 break; 2108 default: 2109 DRM_ERROR("Unsupported screen format %s\n", 2110 drm_get_format_name(target_fb->pixel_format)); 2111 return -EINVAL; 2112 } 2113 2114 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2115 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2116 2117 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2118 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2119 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2120 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2121 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2122 2123 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); 2124 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2125 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); 2126 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); 2127 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); 2128 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); 2129 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT); 2130 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2131 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 2132 } 2133 2134 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); 2135 2136 dce_v8_0_vga_enable(crtc, false); 2137 2138 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2139 upper_32_bits(fb_location)); 2140 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2141 upper_32_bits(fb_location)); 2142 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2143 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2144 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2145 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2146 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2147 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2148 2149 /* 2150 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2151 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2152 * retain the full precision throughout the pipeline. 2153 */ 2154 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, 2155 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0), 2156 ~LUT_10BIT_BYPASS_EN); 2157 2158 if (bypass_lut) 2159 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2160 2161 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2162 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2163 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2164 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2165 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2166 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2167 2168 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 2169 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2170 2171 dce_v8_0_grph_enable(crtc, true); 2172 2173 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2174 target_fb->height); 2175 2176 x &= ~3; 2177 y &= ~1; 2178 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2179 (x << 16) | y); 2180 viewport_w = crtc->mode.hdisplay; 2181 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2182 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2183 (viewport_w << 16) | viewport_h); 2184 2185 /* pageflip setup */ 2186 /* make sure flip is at vb rather than hb */ 2187 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2188 tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK; 2189 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2190 2191 /* set pageflip to happen only at start of vblank interval (front porch) */ 2192 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); 2193 2194 if (!atomic && fb && fb != crtc->primary->fb) { 2195 amdgpu_fb = to_amdgpu_framebuffer(fb); 2196 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2197 r = amdgpu_bo_reserve(rbo, false); 2198 if (unlikely(r != 0)) 2199 return r; 2200 amdgpu_bo_unpin(rbo); 2201 amdgpu_bo_unreserve(rbo); 2202 } 2203 2204 /* Bytes per pixel may have changed */ 2205 dce_v8_0_bandwidth_update(adev); 2206 2207 return 0; 2208 } 2209 2210 static void dce_v8_0_set_interleave(struct drm_crtc *crtc, 2211 struct drm_display_mode *mode) 2212 { 2213 struct drm_device *dev = crtc->dev; 2214 struct amdgpu_device *adev = dev->dev_private; 2215 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2216 2217 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2218 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 2219 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT); 2220 else 2221 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2222 } 2223 2224 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) 2225 { 2226 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2227 struct drm_device *dev = crtc->dev; 2228 struct amdgpu_device *adev = dev->dev_private; 2229 int i; 2230 2231 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2232 2233 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2234 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2235 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2236 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2237 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2238 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2239 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2240 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2241 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2242 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2243 2244 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2245 2246 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2247 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2248 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2249 2250 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2251 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2252 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2253 2254 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2255 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2256 2257 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2258 for (i = 0; i < 256; i++) { 2259 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2260 (amdgpu_crtc->lut_r[i] << 20) | 2261 (amdgpu_crtc->lut_g[i] << 10) | 2262 (amdgpu_crtc->lut_b[i] << 0)); 2263 } 2264 2265 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2266 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2267 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2268 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2269 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2270 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2271 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2272 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2273 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2274 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2275 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2276 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2277 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2278 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2279 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2280 /* XXX this only needs to be programmed once per crtc at startup, 2281 * not sure where the best place for it is 2282 */ 2283 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, 2284 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK); 2285 } 2286 2287 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) 2288 { 2289 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2290 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2291 2292 switch (amdgpu_encoder->encoder_id) { 2293 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2294 if (dig->linkb) 2295 return 1; 2296 else 2297 return 0; 2298 break; 2299 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2300 if (dig->linkb) 2301 return 3; 2302 else 2303 return 2; 2304 break; 2305 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2306 if (dig->linkb) 2307 return 5; 2308 else 2309 return 4; 2310 break; 2311 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2312 return 6; 2313 break; 2314 default: 2315 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2316 return 0; 2317 } 2318 } 2319 2320 /** 2321 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc. 2322 * 2323 * @crtc: drm crtc 2324 * 2325 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2326 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2327 * monitors a dedicated PPLL must be used. If a particular board has 2328 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2329 * as there is no need to program the PLL itself. If we are not able to 2330 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2331 * avoid messing up an existing monitor. 2332 * 2333 * Asic specific PLL information 2334 * 2335 * DCE 8.x 2336 * KB/KV 2337 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2338 * CI 2339 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2340 * 2341 */ 2342 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) 2343 { 2344 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2345 struct drm_device *dev = crtc->dev; 2346 struct amdgpu_device *adev = dev->dev_private; 2347 u32 pll_in_use; 2348 int pll; 2349 2350 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2351 if (adev->clock.dp_extclk) 2352 /* skip PPLL programming if using ext clock */ 2353 return ATOM_PPLL_INVALID; 2354 else { 2355 /* use the same PPLL for all DP monitors */ 2356 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2357 if (pll != ATOM_PPLL_INVALID) 2358 return pll; 2359 } 2360 } else { 2361 /* use the same PPLL for all monitors with the same clock */ 2362 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2363 if (pll != ATOM_PPLL_INVALID) 2364 return pll; 2365 } 2366 /* otherwise, pick one of the plls */ 2367 if ((adev->asic_type == CHIP_KABINI) || 2368 (adev->asic_type == CHIP_MULLINS)) { 2369 /* KB/ML has PPLL1 and PPLL2 */ 2370 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2371 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2372 return ATOM_PPLL2; 2373 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2374 return ATOM_PPLL1; 2375 DRM_ERROR("unable to allocate a PPLL\n"); 2376 return ATOM_PPLL_INVALID; 2377 } else { 2378 /* CI/KV has PPLL0, PPLL1, and PPLL2 */ 2379 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2380 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2381 return ATOM_PPLL2; 2382 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2383 return ATOM_PPLL1; 2384 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2385 return ATOM_PPLL0; 2386 DRM_ERROR("unable to allocate a PPLL\n"); 2387 return ATOM_PPLL_INVALID; 2388 } 2389 return ATOM_PPLL_INVALID; 2390 } 2391 2392 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2393 { 2394 struct amdgpu_device *adev = crtc->dev->dev_private; 2395 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2396 uint32_t cur_lock; 2397 2398 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2399 if (lock) 2400 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2401 else 2402 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2403 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2404 } 2405 2406 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) 2407 { 2408 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2409 struct amdgpu_device *adev = crtc->dev->dev_private; 2410 2411 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2412 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2413 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2414 } 2415 2416 static void dce_v8_0_show_cursor(struct drm_crtc *crtc) 2417 { 2418 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2419 struct amdgpu_device *adev = crtc->dev->dev_private; 2420 2421 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2422 upper_32_bits(amdgpu_crtc->cursor_addr)); 2423 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2424 lower_32_bits(amdgpu_crtc->cursor_addr)); 2425 2426 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2427 CUR_CONTROL__CURSOR_EN_MASK | 2428 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2429 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2430 } 2431 2432 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, 2433 int x, int y) 2434 { 2435 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2436 struct amdgpu_device *adev = crtc->dev->dev_private; 2437 int xorigin = 0, yorigin = 0; 2438 2439 /* avivo cursor are offset into the total surface */ 2440 x += crtc->x; 2441 y += crtc->y; 2442 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2443 2444 if (x < 0) { 2445 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2446 x = 0; 2447 } 2448 if (y < 0) { 2449 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2450 y = 0; 2451 } 2452 2453 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2454 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2455 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2456 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2457 2458 amdgpu_crtc->cursor_x = x; 2459 amdgpu_crtc->cursor_y = y; 2460 2461 return 0; 2462 } 2463 2464 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc, 2465 int x, int y) 2466 { 2467 int ret; 2468 2469 dce_v8_0_lock_cursor(crtc, true); 2470 ret = dce_v8_0_cursor_move_locked(crtc, x, y); 2471 dce_v8_0_lock_cursor(crtc, false); 2472 2473 return ret; 2474 } 2475 2476 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, 2477 struct drm_file *file_priv, 2478 uint32_t handle, 2479 uint32_t width, 2480 uint32_t height, 2481 int32_t hot_x, 2482 int32_t hot_y) 2483 { 2484 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2485 struct drm_gem_object *obj; 2486 struct amdgpu_bo *aobj; 2487 int ret; 2488 2489 if (!handle) { 2490 /* turn off cursor */ 2491 dce_v8_0_hide_cursor(crtc); 2492 obj = NULL; 2493 goto unpin; 2494 } 2495 2496 if ((width > amdgpu_crtc->max_cursor_width) || 2497 (height > amdgpu_crtc->max_cursor_height)) { 2498 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2499 return -EINVAL; 2500 } 2501 2502 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 2503 if (!obj) { 2504 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2505 return -ENOENT; 2506 } 2507 2508 aobj = gem_to_amdgpu_bo(obj); 2509 ret = amdgpu_bo_reserve(aobj, false); 2510 if (ret != 0) { 2511 drm_gem_object_unreference_unlocked(obj); 2512 return ret; 2513 } 2514 2515 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2516 amdgpu_bo_unreserve(aobj); 2517 if (ret) { 2518 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2519 drm_gem_object_unreference_unlocked(obj); 2520 return ret; 2521 } 2522 2523 amdgpu_crtc->cursor_width = width; 2524 amdgpu_crtc->cursor_height = height; 2525 2526 dce_v8_0_lock_cursor(crtc, true); 2527 2528 if (hot_x != amdgpu_crtc->cursor_hot_x || 2529 hot_y != amdgpu_crtc->cursor_hot_y) { 2530 int x, y; 2531 2532 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2533 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2534 2535 dce_v8_0_cursor_move_locked(crtc, x, y); 2536 2537 amdgpu_crtc->cursor_hot_x = hot_x; 2538 amdgpu_crtc->cursor_hot_y = hot_y; 2539 } 2540 2541 dce_v8_0_show_cursor(crtc); 2542 dce_v8_0_lock_cursor(crtc, false); 2543 2544 unpin: 2545 if (amdgpu_crtc->cursor_bo) { 2546 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2547 ret = amdgpu_bo_reserve(aobj, false); 2548 if (likely(ret == 0)) { 2549 amdgpu_bo_unpin(aobj); 2550 amdgpu_bo_unreserve(aobj); 2551 } 2552 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2553 } 2554 2555 amdgpu_crtc->cursor_bo = obj; 2556 return 0; 2557 } 2558 2559 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2560 { 2561 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2562 2563 if (amdgpu_crtc->cursor_bo) { 2564 dce_v8_0_lock_cursor(crtc, true); 2565 2566 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2567 amdgpu_crtc->cursor_y); 2568 2569 dce_v8_0_show_cursor(crtc); 2570 2571 dce_v8_0_lock_cursor(crtc, false); 2572 } 2573 } 2574 2575 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2576 u16 *blue, uint32_t start, uint32_t size) 2577 { 2578 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2579 int end = (start + size > 256) ? 256 : start + size, i; 2580 2581 /* userspace palettes are always correct as is */ 2582 for (i = start; i < end; i++) { 2583 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2584 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2585 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2586 } 2587 dce_v8_0_crtc_load_lut(crtc); 2588 } 2589 2590 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2591 { 2592 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2593 2594 drm_crtc_cleanup(crtc); 2595 kfree(amdgpu_crtc); 2596 } 2597 2598 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { 2599 .cursor_set2 = dce_v8_0_crtc_cursor_set2, 2600 .cursor_move = dce_v8_0_crtc_cursor_move, 2601 .gamma_set = dce_v8_0_crtc_gamma_set, 2602 .set_config = amdgpu_crtc_set_config, 2603 .destroy = dce_v8_0_crtc_destroy, 2604 .page_flip = amdgpu_crtc_page_flip, 2605 }; 2606 2607 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2608 { 2609 struct drm_device *dev = crtc->dev; 2610 struct amdgpu_device *adev = dev->dev_private; 2611 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2612 unsigned type; 2613 2614 switch (mode) { 2615 case DRM_MODE_DPMS_ON: 2616 amdgpu_crtc->enabled = true; 2617 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2618 dce_v8_0_vga_enable(crtc, true); 2619 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2620 dce_v8_0_vga_enable(crtc, false); 2621 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2622 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2623 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2624 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2625 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2626 dce_v8_0_crtc_load_lut(crtc); 2627 break; 2628 case DRM_MODE_DPMS_STANDBY: 2629 case DRM_MODE_DPMS_SUSPEND: 2630 case DRM_MODE_DPMS_OFF: 2631 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2632 if (amdgpu_crtc->enabled) { 2633 dce_v8_0_vga_enable(crtc, true); 2634 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2635 dce_v8_0_vga_enable(crtc, false); 2636 } 2637 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2638 amdgpu_crtc->enabled = false; 2639 break; 2640 } 2641 /* adjust pm to dpms */ 2642 amdgpu_pm_compute_clocks(adev); 2643 } 2644 2645 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc) 2646 { 2647 /* disable crtc pair power gating before programming */ 2648 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2649 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2650 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2651 } 2652 2653 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc) 2654 { 2655 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2656 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2657 } 2658 2659 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) 2660 { 2661 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2662 struct drm_device *dev = crtc->dev; 2663 struct amdgpu_device *adev = dev->dev_private; 2664 struct amdgpu_atom_ss ss; 2665 int i; 2666 2667 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2668 if (crtc->primary->fb) { 2669 int r; 2670 struct amdgpu_framebuffer *amdgpu_fb; 2671 struct amdgpu_bo *rbo; 2672 2673 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2674 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2675 r = amdgpu_bo_reserve(rbo, false); 2676 if (unlikely(r)) 2677 DRM_ERROR("failed to reserve rbo before unpin\n"); 2678 else { 2679 amdgpu_bo_unpin(rbo); 2680 amdgpu_bo_unreserve(rbo); 2681 } 2682 } 2683 /* disable the GRPH */ 2684 dce_v8_0_grph_enable(crtc, false); 2685 2686 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2687 2688 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2689 if (adev->mode_info.crtcs[i] && 2690 adev->mode_info.crtcs[i]->enabled && 2691 i != amdgpu_crtc->crtc_id && 2692 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2693 /* one other crtc is using this pll don't turn 2694 * off the pll 2695 */ 2696 goto done; 2697 } 2698 } 2699 2700 switch (amdgpu_crtc->pll_id) { 2701 case ATOM_PPLL1: 2702 case ATOM_PPLL2: 2703 /* disable the ppll */ 2704 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2705 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2706 break; 2707 case ATOM_PPLL0: 2708 /* disable the ppll */ 2709 if ((adev->asic_type == CHIP_KAVERI) || 2710 (adev->asic_type == CHIP_BONAIRE) || 2711 (adev->asic_type == CHIP_HAWAII)) 2712 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2713 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2714 break; 2715 default: 2716 break; 2717 } 2718 done: 2719 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2720 amdgpu_crtc->adjusted_clock = 0; 2721 amdgpu_crtc->encoder = NULL; 2722 amdgpu_crtc->connector = NULL; 2723 } 2724 2725 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc, 2726 struct drm_display_mode *mode, 2727 struct drm_display_mode *adjusted_mode, 2728 int x, int y, struct drm_framebuffer *old_fb) 2729 { 2730 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2731 2732 if (!amdgpu_crtc->adjusted_clock) 2733 return -EINVAL; 2734 2735 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2736 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2737 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2738 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2739 amdgpu_atombios_crtc_scaler_setup(crtc); 2740 dce_v8_0_cursor_reset(crtc); 2741 /* update the hw version fpr dpm */ 2742 amdgpu_crtc->hw_mode = *adjusted_mode; 2743 2744 return 0; 2745 } 2746 2747 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc, 2748 const struct drm_display_mode *mode, 2749 struct drm_display_mode *adjusted_mode) 2750 { 2751 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2752 struct drm_device *dev = crtc->dev; 2753 struct drm_encoder *encoder; 2754 2755 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2756 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2757 if (encoder->crtc == crtc) { 2758 amdgpu_crtc->encoder = encoder; 2759 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2760 break; 2761 } 2762 } 2763 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2764 amdgpu_crtc->encoder = NULL; 2765 amdgpu_crtc->connector = NULL; 2766 return false; 2767 } 2768 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2769 return false; 2770 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2771 return false; 2772 /* pick pll */ 2773 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc); 2774 /* if we can't get a PPLL for a non-DP encoder, fail */ 2775 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2776 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2777 return false; 2778 2779 return true; 2780 } 2781 2782 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2783 struct drm_framebuffer *old_fb) 2784 { 2785 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2786 } 2787 2788 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2789 struct drm_framebuffer *fb, 2790 int x, int y, enum mode_set_atomic state) 2791 { 2792 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); 2793 } 2794 2795 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { 2796 .dpms = dce_v8_0_crtc_dpms, 2797 .mode_fixup = dce_v8_0_crtc_mode_fixup, 2798 .mode_set = dce_v8_0_crtc_mode_set, 2799 .mode_set_base = dce_v8_0_crtc_set_base, 2800 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic, 2801 .prepare = dce_v8_0_crtc_prepare, 2802 .commit = dce_v8_0_crtc_commit, 2803 .load_lut = dce_v8_0_crtc_load_lut, 2804 .disable = dce_v8_0_crtc_disable, 2805 }; 2806 2807 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) 2808 { 2809 struct amdgpu_crtc *amdgpu_crtc; 2810 int i; 2811 2812 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2813 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2814 if (amdgpu_crtc == NULL) 2815 return -ENOMEM; 2816 2817 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); 2818 2819 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2820 amdgpu_crtc->crtc_id = index; 2821 adev->mode_info.crtcs[index] = amdgpu_crtc; 2822 2823 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 2824 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 2825 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2826 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2827 2828 for (i = 0; i < 256; i++) { 2829 amdgpu_crtc->lut_r[i] = i << 2; 2830 amdgpu_crtc->lut_g[i] = i << 2; 2831 amdgpu_crtc->lut_b[i] = i << 2; 2832 } 2833 2834 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2835 2836 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2837 amdgpu_crtc->adjusted_clock = 0; 2838 amdgpu_crtc->encoder = NULL; 2839 amdgpu_crtc->connector = NULL; 2840 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs); 2841 2842 return 0; 2843 } 2844 2845 static int dce_v8_0_early_init(void *handle) 2846 { 2847 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2848 2849 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; 2850 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; 2851 2852 dce_v8_0_set_display_funcs(adev); 2853 dce_v8_0_set_irq_funcs(adev); 2854 2855 switch (adev->asic_type) { 2856 case CHIP_BONAIRE: 2857 case CHIP_HAWAII: 2858 adev->mode_info.num_crtc = 6; 2859 adev->mode_info.num_hpd = 6; 2860 adev->mode_info.num_dig = 6; 2861 break; 2862 case CHIP_KAVERI: 2863 adev->mode_info.num_crtc = 4; 2864 adev->mode_info.num_hpd = 6; 2865 adev->mode_info.num_dig = 7; 2866 break; 2867 case CHIP_KABINI: 2868 case CHIP_MULLINS: 2869 adev->mode_info.num_crtc = 2; 2870 adev->mode_info.num_hpd = 6; 2871 adev->mode_info.num_dig = 6; /* ? */ 2872 break; 2873 default: 2874 /* FIXME: not supported yet */ 2875 return -EINVAL; 2876 } 2877 2878 return 0; 2879 } 2880 2881 static int dce_v8_0_sw_init(void *handle) 2882 { 2883 int r, i; 2884 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2885 2886 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2887 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); 2888 if (r) 2889 return r; 2890 } 2891 2892 for (i = 8; i < 20; i += 2) { 2893 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); 2894 if (r) 2895 return r; 2896 } 2897 2898 /* HPD hotplug */ 2899 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); 2900 if (r) 2901 return r; 2902 2903 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2904 2905 adev->ddev->mode_config.max_width = 16384; 2906 adev->ddev->mode_config.max_height = 16384; 2907 2908 adev->ddev->mode_config.preferred_depth = 24; 2909 adev->ddev->mode_config.prefer_shadow = 1; 2910 2911 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2912 2913 r = amdgpu_modeset_create_props(adev); 2914 if (r) 2915 return r; 2916 2917 adev->ddev->mode_config.max_width = 16384; 2918 adev->ddev->mode_config.max_height = 16384; 2919 2920 /* allocate crtcs */ 2921 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2922 r = dce_v8_0_crtc_init(adev, i); 2923 if (r) 2924 return r; 2925 } 2926 2927 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2928 amdgpu_print_display_setup(adev->ddev); 2929 else 2930 return -EINVAL; 2931 2932 /* setup afmt */ 2933 r = dce_v8_0_afmt_init(adev); 2934 if (r) 2935 return r; 2936 2937 r = dce_v8_0_audio_init(adev); 2938 if (r) 2939 return r; 2940 2941 drm_kms_helper_poll_init(adev->ddev); 2942 2943 adev->mode_info.mode_config_initialized = true; 2944 return 0; 2945 } 2946 2947 static int dce_v8_0_sw_fini(void *handle) 2948 { 2949 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2950 2951 kfree(adev->mode_info.bios_hardcoded_edid); 2952 2953 drm_kms_helper_poll_fini(adev->ddev); 2954 2955 dce_v8_0_audio_fini(adev); 2956 2957 dce_v8_0_afmt_fini(adev); 2958 2959 drm_mode_config_cleanup(adev->ddev); 2960 adev->mode_info.mode_config_initialized = false; 2961 2962 return 0; 2963 } 2964 2965 static int dce_v8_0_hw_init(void *handle) 2966 { 2967 int i; 2968 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2969 2970 /* init dig PHYs, disp eng pll */ 2971 amdgpu_atombios_encoder_init_dig(adev); 2972 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2973 2974 /* initialize hpd */ 2975 dce_v8_0_hpd_init(adev); 2976 2977 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2978 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2979 } 2980 2981 dce_v8_0_pageflip_interrupt_init(adev); 2982 2983 return 0; 2984 } 2985 2986 static int dce_v8_0_hw_fini(void *handle) 2987 { 2988 int i; 2989 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2990 2991 dce_v8_0_hpd_fini(adev); 2992 2993 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2994 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2995 } 2996 2997 dce_v8_0_pageflip_interrupt_fini(adev); 2998 2999 return 0; 3000 } 3001 3002 static int dce_v8_0_suspend(void *handle) 3003 { 3004 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3005 3006 amdgpu_atombios_scratch_regs_save(adev); 3007 3008 return dce_v8_0_hw_fini(handle); 3009 } 3010 3011 static int dce_v8_0_resume(void *handle) 3012 { 3013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3014 int ret; 3015 3016 ret = dce_v8_0_hw_init(handle); 3017 3018 amdgpu_atombios_scratch_regs_restore(adev); 3019 3020 /* turn on the BL */ 3021 if (adev->mode_info.bl_encoder) { 3022 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3023 adev->mode_info.bl_encoder); 3024 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3025 bl_level); 3026 } 3027 3028 return ret; 3029 } 3030 3031 static bool dce_v8_0_is_idle(void *handle) 3032 { 3033 return true; 3034 } 3035 3036 static int dce_v8_0_wait_for_idle(void *handle) 3037 { 3038 return 0; 3039 } 3040 3041 static void dce_v8_0_print_status(void *handle) 3042 { 3043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3044 3045 dev_info(adev->dev, "DCE 8.x registers\n"); 3046 /* XXX todo */ 3047 } 3048 3049 static int dce_v8_0_soft_reset(void *handle) 3050 { 3051 u32 srbm_soft_reset = 0, tmp; 3052 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3053 3054 if (dce_v8_0_is_display_hung(adev)) 3055 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3056 3057 if (srbm_soft_reset) { 3058 dce_v8_0_print_status((void *)adev); 3059 3060 tmp = RREG32(mmSRBM_SOFT_RESET); 3061 tmp |= srbm_soft_reset; 3062 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 3063 WREG32(mmSRBM_SOFT_RESET, tmp); 3064 tmp = RREG32(mmSRBM_SOFT_RESET); 3065 3066 udelay(50); 3067 3068 tmp &= ~srbm_soft_reset; 3069 WREG32(mmSRBM_SOFT_RESET, tmp); 3070 tmp = RREG32(mmSRBM_SOFT_RESET); 3071 3072 /* Wait a little for things to settle down */ 3073 udelay(50); 3074 dce_v8_0_print_status((void *)adev); 3075 } 3076 return 0; 3077 } 3078 3079 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 3080 int crtc, 3081 enum amdgpu_interrupt_state state) 3082 { 3083 u32 reg_block, lb_interrupt_mask; 3084 3085 if (crtc >= adev->mode_info.num_crtc) { 3086 DRM_DEBUG("invalid crtc %d\n", crtc); 3087 return; 3088 } 3089 3090 switch (crtc) { 3091 case 0: 3092 reg_block = CRTC0_REGISTER_OFFSET; 3093 break; 3094 case 1: 3095 reg_block = CRTC1_REGISTER_OFFSET; 3096 break; 3097 case 2: 3098 reg_block = CRTC2_REGISTER_OFFSET; 3099 break; 3100 case 3: 3101 reg_block = CRTC3_REGISTER_OFFSET; 3102 break; 3103 case 4: 3104 reg_block = CRTC4_REGISTER_OFFSET; 3105 break; 3106 case 5: 3107 reg_block = CRTC5_REGISTER_OFFSET; 3108 break; 3109 default: 3110 DRM_DEBUG("invalid crtc %d\n", crtc); 3111 return; 3112 } 3113 3114 switch (state) { 3115 case AMDGPU_IRQ_STATE_DISABLE: 3116 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3117 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 3118 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3119 break; 3120 case AMDGPU_IRQ_STATE_ENABLE: 3121 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3122 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 3123 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3124 break; 3125 default: 3126 break; 3127 } 3128 } 3129 3130 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3131 int crtc, 3132 enum amdgpu_interrupt_state state) 3133 { 3134 u32 reg_block, lb_interrupt_mask; 3135 3136 if (crtc >= adev->mode_info.num_crtc) { 3137 DRM_DEBUG("invalid crtc %d\n", crtc); 3138 return; 3139 } 3140 3141 switch (crtc) { 3142 case 0: 3143 reg_block = CRTC0_REGISTER_OFFSET; 3144 break; 3145 case 1: 3146 reg_block = CRTC1_REGISTER_OFFSET; 3147 break; 3148 case 2: 3149 reg_block = CRTC2_REGISTER_OFFSET; 3150 break; 3151 case 3: 3152 reg_block = CRTC3_REGISTER_OFFSET; 3153 break; 3154 case 4: 3155 reg_block = CRTC4_REGISTER_OFFSET; 3156 break; 3157 case 5: 3158 reg_block = CRTC5_REGISTER_OFFSET; 3159 break; 3160 default: 3161 DRM_DEBUG("invalid crtc %d\n", crtc); 3162 return; 3163 } 3164 3165 switch (state) { 3166 case AMDGPU_IRQ_STATE_DISABLE: 3167 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3168 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 3169 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3170 break; 3171 case AMDGPU_IRQ_STATE_ENABLE: 3172 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 3173 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 3174 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 3175 break; 3176 default: 3177 break; 3178 } 3179 } 3180 3181 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, 3182 struct amdgpu_irq_src *src, 3183 unsigned type, 3184 enum amdgpu_interrupt_state state) 3185 { 3186 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; 3187 3188 switch (type) { 3189 case AMDGPU_HPD_1: 3190 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; 3191 break; 3192 case AMDGPU_HPD_2: 3193 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; 3194 break; 3195 case AMDGPU_HPD_3: 3196 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; 3197 break; 3198 case AMDGPU_HPD_4: 3199 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; 3200 break; 3201 case AMDGPU_HPD_5: 3202 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; 3203 break; 3204 case AMDGPU_HPD_6: 3205 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; 3206 break; 3207 default: 3208 DRM_DEBUG("invalid hdp %d\n", type); 3209 return 0; 3210 } 3211 3212 switch (state) { 3213 case AMDGPU_IRQ_STATE_DISABLE: 3214 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3215 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3216 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3217 break; 3218 case AMDGPU_IRQ_STATE_ENABLE: 3219 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); 3220 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 3221 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); 3222 break; 3223 default: 3224 break; 3225 } 3226 3227 return 0; 3228 } 3229 3230 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev, 3231 struct amdgpu_irq_src *src, 3232 unsigned type, 3233 enum amdgpu_interrupt_state state) 3234 { 3235 switch (type) { 3236 case AMDGPU_CRTC_IRQ_VBLANK1: 3237 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3238 break; 3239 case AMDGPU_CRTC_IRQ_VBLANK2: 3240 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3241 break; 3242 case AMDGPU_CRTC_IRQ_VBLANK3: 3243 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3244 break; 3245 case AMDGPU_CRTC_IRQ_VBLANK4: 3246 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3247 break; 3248 case AMDGPU_CRTC_IRQ_VBLANK5: 3249 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3250 break; 3251 case AMDGPU_CRTC_IRQ_VBLANK6: 3252 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3253 break; 3254 case AMDGPU_CRTC_IRQ_VLINE1: 3255 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state); 3256 break; 3257 case AMDGPU_CRTC_IRQ_VLINE2: 3258 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state); 3259 break; 3260 case AMDGPU_CRTC_IRQ_VLINE3: 3261 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state); 3262 break; 3263 case AMDGPU_CRTC_IRQ_VLINE4: 3264 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state); 3265 break; 3266 case AMDGPU_CRTC_IRQ_VLINE5: 3267 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state); 3268 break; 3269 case AMDGPU_CRTC_IRQ_VLINE6: 3270 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state); 3271 break; 3272 default: 3273 break; 3274 } 3275 return 0; 3276 } 3277 3278 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, 3279 struct amdgpu_irq_src *source, 3280 struct amdgpu_iv_entry *entry) 3281 { 3282 unsigned crtc = entry->src_id - 1; 3283 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3284 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3285 3286 switch (entry->src_data) { 3287 case 0: /* vblank */ 3288 if (disp_int & interrupt_status_offsets[crtc].vblank) 3289 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3290 else 3291 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3292 3293 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3294 drm_handle_vblank(adev->ddev, crtc); 3295 } 3296 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3297 3298 break; 3299 case 1: /* vline */ 3300 if (disp_int & interrupt_status_offsets[crtc].vline) 3301 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3302 else 3303 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3304 3305 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3306 3307 break; 3308 default: 3309 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3310 break; 3311 } 3312 3313 return 0; 3314 } 3315 3316 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, 3317 struct amdgpu_irq_src *src, 3318 unsigned type, 3319 enum amdgpu_interrupt_state state) 3320 { 3321 u32 reg; 3322 3323 if (type >= adev->mode_info.num_crtc) { 3324 DRM_ERROR("invalid pageflip crtc %d\n", type); 3325 return -EINVAL; 3326 } 3327 3328 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3329 if (state == AMDGPU_IRQ_STATE_DISABLE) 3330 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3331 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3332 else 3333 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3334 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3335 3336 return 0; 3337 } 3338 3339 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, 3340 struct amdgpu_irq_src *source, 3341 struct amdgpu_iv_entry *entry) 3342 { 3343 unsigned long flags; 3344 unsigned crtc_id; 3345 struct amdgpu_crtc *amdgpu_crtc; 3346 struct amdgpu_flip_work *works; 3347 3348 crtc_id = (entry->src_id - 8) >> 1; 3349 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3350 3351 if (crtc_id >= adev->mode_info.num_crtc) { 3352 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3353 return -EINVAL; 3354 } 3355 3356 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3357 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3358 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3359 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3360 3361 /* IRQ could occur when in initial stage */ 3362 if (amdgpu_crtc == NULL) 3363 return 0; 3364 3365 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3366 works = amdgpu_crtc->pflip_works; 3367 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3368 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3369 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3370 amdgpu_crtc->pflip_status, 3371 AMDGPU_FLIP_SUBMITTED); 3372 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3373 return 0; 3374 } 3375 3376 /* page flip completed. clean up */ 3377 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3378 amdgpu_crtc->pflip_works = NULL; 3379 3380 /* wakeup usersapce */ 3381 if (works->event) 3382 drm_send_vblank_event(adev->ddev, crtc_id, works->event); 3383 3384 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3385 3386 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3387 schedule_work(&works->unpin_work); 3388 3389 return 0; 3390 } 3391 3392 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, 3393 struct amdgpu_irq_src *source, 3394 struct amdgpu_iv_entry *entry) 3395 { 3396 uint32_t disp_int, mask, int_control, tmp; 3397 unsigned hpd; 3398 3399 if (entry->src_data >= adev->mode_info.num_hpd) { 3400 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3401 return 0; 3402 } 3403 3404 hpd = entry->src_data; 3405 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3406 mask = interrupt_status_offsets[hpd].hpd; 3407 int_control = hpd_int_control_offsets[hpd]; 3408 3409 if (disp_int & mask) { 3410 tmp = RREG32(int_control); 3411 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3412 WREG32(int_control, tmp); 3413 schedule_work(&adev->hotplug_work); 3414 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3415 } 3416 3417 return 0; 3418 3419 } 3420 3421 static int dce_v8_0_set_clockgating_state(void *handle, 3422 enum amd_clockgating_state state) 3423 { 3424 return 0; 3425 } 3426 3427 static int dce_v8_0_set_powergating_state(void *handle, 3428 enum amd_powergating_state state) 3429 { 3430 return 0; 3431 } 3432 3433 const struct amd_ip_funcs dce_v8_0_ip_funcs = { 3434 .early_init = dce_v8_0_early_init, 3435 .late_init = NULL, 3436 .sw_init = dce_v8_0_sw_init, 3437 .sw_fini = dce_v8_0_sw_fini, 3438 .hw_init = dce_v8_0_hw_init, 3439 .hw_fini = dce_v8_0_hw_fini, 3440 .suspend = dce_v8_0_suspend, 3441 .resume = dce_v8_0_resume, 3442 .is_idle = dce_v8_0_is_idle, 3443 .wait_for_idle = dce_v8_0_wait_for_idle, 3444 .soft_reset = dce_v8_0_soft_reset, 3445 .print_status = dce_v8_0_print_status, 3446 .set_clockgating_state = dce_v8_0_set_clockgating_state, 3447 .set_powergating_state = dce_v8_0_set_powergating_state, 3448 }; 3449 3450 static void 3451 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, 3452 struct drm_display_mode *mode, 3453 struct drm_display_mode *adjusted_mode) 3454 { 3455 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3456 3457 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3458 3459 /* need to call this here rather than in prepare() since we need some crtc info */ 3460 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3461 3462 /* set scaler clears this on some chips */ 3463 dce_v8_0_set_interleave(encoder->crtc, mode); 3464 3465 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3466 dce_v8_0_afmt_enable(encoder, true); 3467 dce_v8_0_afmt_setmode(encoder, adjusted_mode); 3468 } 3469 } 3470 3471 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) 3472 { 3473 struct amdgpu_device *adev = encoder->dev->dev_private; 3474 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3475 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3476 3477 if ((amdgpu_encoder->active_device & 3478 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3479 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3480 ENCODER_OBJECT_ID_NONE)) { 3481 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3482 if (dig) { 3483 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder); 3484 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3485 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3486 } 3487 } 3488 3489 amdgpu_atombios_scratch_regs_lock(adev, true); 3490 3491 if (connector) { 3492 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3493 3494 /* select the clock/data port if it uses a router */ 3495 if (amdgpu_connector->router.cd_valid) 3496 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3497 3498 /* turn eDP panel on for mode set */ 3499 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3500 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3501 ATOM_TRANSMITTER_ACTION_POWER_ON); 3502 } 3503 3504 /* this is needed for the pll/ss setup to work correctly in some cases */ 3505 amdgpu_atombios_encoder_set_crtc_source(encoder); 3506 /* set up the FMT blocks */ 3507 dce_v8_0_program_fmt(encoder); 3508 } 3509 3510 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) 3511 { 3512 struct drm_device *dev = encoder->dev; 3513 struct amdgpu_device *adev = dev->dev_private; 3514 3515 /* need to call this here as we need the crtc set up */ 3516 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3517 amdgpu_atombios_scratch_regs_lock(adev, false); 3518 } 3519 3520 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder) 3521 { 3522 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3523 struct amdgpu_encoder_atom_dig *dig; 3524 3525 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3526 3527 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3528 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3529 dce_v8_0_afmt_enable(encoder, false); 3530 dig = amdgpu_encoder->enc_priv; 3531 dig->dig_encoder = -1; 3532 } 3533 amdgpu_encoder->active_device = 0; 3534 } 3535 3536 /* these are handled by the primary encoders */ 3537 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder) 3538 { 3539 3540 } 3541 3542 static void dce_v8_0_ext_commit(struct drm_encoder *encoder) 3543 { 3544 3545 } 3546 3547 static void 3548 dce_v8_0_ext_mode_set(struct drm_encoder *encoder, 3549 struct drm_display_mode *mode, 3550 struct drm_display_mode *adjusted_mode) 3551 { 3552 3553 } 3554 3555 static void dce_v8_0_ext_disable(struct drm_encoder *encoder) 3556 { 3557 3558 } 3559 3560 static void 3561 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) 3562 { 3563 3564 } 3565 3566 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { 3567 .dpms = dce_v8_0_ext_dpms, 3568 .prepare = dce_v8_0_ext_prepare, 3569 .mode_set = dce_v8_0_ext_mode_set, 3570 .commit = dce_v8_0_ext_commit, 3571 .disable = dce_v8_0_ext_disable, 3572 /* no detect for TMDS/LVDS yet */ 3573 }; 3574 3575 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = { 3576 .dpms = amdgpu_atombios_encoder_dpms, 3577 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3578 .prepare = dce_v8_0_encoder_prepare, 3579 .mode_set = dce_v8_0_encoder_mode_set, 3580 .commit = dce_v8_0_encoder_commit, 3581 .disable = dce_v8_0_encoder_disable, 3582 .detect = amdgpu_atombios_encoder_dig_detect, 3583 }; 3584 3585 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = { 3586 .dpms = amdgpu_atombios_encoder_dpms, 3587 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3588 .prepare = dce_v8_0_encoder_prepare, 3589 .mode_set = dce_v8_0_encoder_mode_set, 3590 .commit = dce_v8_0_encoder_commit, 3591 .detect = amdgpu_atombios_encoder_dac_detect, 3592 }; 3593 3594 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder) 3595 { 3596 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3597 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3598 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3599 kfree(amdgpu_encoder->enc_priv); 3600 drm_encoder_cleanup(encoder); 3601 kfree(amdgpu_encoder); 3602 } 3603 3604 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = { 3605 .destroy = dce_v8_0_encoder_destroy, 3606 }; 3607 3608 static void dce_v8_0_encoder_add(struct amdgpu_device *adev, 3609 uint32_t encoder_enum, 3610 uint32_t supported_device, 3611 u16 caps) 3612 { 3613 struct drm_device *dev = adev->ddev; 3614 struct drm_encoder *encoder; 3615 struct amdgpu_encoder *amdgpu_encoder; 3616 3617 /* see if we already added it */ 3618 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3619 amdgpu_encoder = to_amdgpu_encoder(encoder); 3620 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3621 amdgpu_encoder->devices |= supported_device; 3622 return; 3623 } 3624 3625 } 3626 3627 /* add a new one */ 3628 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3629 if (!amdgpu_encoder) 3630 return; 3631 3632 encoder = &amdgpu_encoder->base; 3633 switch (adev->mode_info.num_crtc) { 3634 case 1: 3635 encoder->possible_crtcs = 0x1; 3636 break; 3637 case 2: 3638 default: 3639 encoder->possible_crtcs = 0x3; 3640 break; 3641 case 4: 3642 encoder->possible_crtcs = 0xf; 3643 break; 3644 case 6: 3645 encoder->possible_crtcs = 0x3f; 3646 break; 3647 } 3648 3649 amdgpu_encoder->enc_priv = NULL; 3650 3651 amdgpu_encoder->encoder_enum = encoder_enum; 3652 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3653 amdgpu_encoder->devices = supported_device; 3654 amdgpu_encoder->rmx_type = RMX_OFF; 3655 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3656 amdgpu_encoder->is_ext_encoder = false; 3657 amdgpu_encoder->caps = caps; 3658 3659 switch (amdgpu_encoder->encoder_id) { 3660 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3661 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3662 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3663 DRM_MODE_ENCODER_DAC, NULL); 3664 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); 3665 break; 3666 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3667 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3669 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3670 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3671 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3672 amdgpu_encoder->rmx_type = RMX_FULL; 3673 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3674 DRM_MODE_ENCODER_LVDS, NULL); 3675 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3676 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3677 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3678 DRM_MODE_ENCODER_DAC, NULL); 3679 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3680 } else { 3681 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3682 DRM_MODE_ENCODER_TMDS, NULL); 3683 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3684 } 3685 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); 3686 break; 3687 case ENCODER_OBJECT_ID_SI170B: 3688 case ENCODER_OBJECT_ID_CH7303: 3689 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3690 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3691 case ENCODER_OBJECT_ID_TITFP513: 3692 case ENCODER_OBJECT_ID_VT1623: 3693 case ENCODER_OBJECT_ID_HDMI_SI1930: 3694 case ENCODER_OBJECT_ID_TRAVIS: 3695 case ENCODER_OBJECT_ID_NUTMEG: 3696 /* these are handled by the primary encoders */ 3697 amdgpu_encoder->is_ext_encoder = true; 3698 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3699 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3700 DRM_MODE_ENCODER_LVDS, NULL); 3701 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3702 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3703 DRM_MODE_ENCODER_DAC, NULL); 3704 else 3705 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3706 DRM_MODE_ENCODER_TMDS, NULL); 3707 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); 3708 break; 3709 } 3710 } 3711 3712 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { 3713 .set_vga_render_state = &dce_v8_0_set_vga_render_state, 3714 .bandwidth_update = &dce_v8_0_bandwidth_update, 3715 .vblank_get_counter = &dce_v8_0_vblank_get_counter, 3716 .vblank_wait = &dce_v8_0_vblank_wait, 3717 .is_display_hung = &dce_v8_0_is_display_hung, 3718 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3719 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3720 .hpd_sense = &dce_v8_0_hpd_sense, 3721 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity, 3722 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg, 3723 .page_flip = &dce_v8_0_page_flip, 3724 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, 3725 .add_encoder = &dce_v8_0_encoder_add, 3726 .add_connector = &amdgpu_connector_add, 3727 .stop_mc_access = &dce_v8_0_stop_mc_access, 3728 .resume_mc_access = &dce_v8_0_resume_mc_access, 3729 }; 3730 3731 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) 3732 { 3733 if (adev->mode_info.funcs == NULL) 3734 adev->mode_info.funcs = &dce_v8_0_display_funcs; 3735 } 3736 3737 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { 3738 .set = dce_v8_0_set_crtc_interrupt_state, 3739 .process = dce_v8_0_crtc_irq, 3740 }; 3741 3742 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = { 3743 .set = dce_v8_0_set_pageflip_interrupt_state, 3744 .process = dce_v8_0_pageflip_irq, 3745 }; 3746 3747 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = { 3748 .set = dce_v8_0_set_hpd_interrupt_state, 3749 .process = dce_v8_0_hpd_irq, 3750 }; 3751 3752 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) 3753 { 3754 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; 3755 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs; 3756 3757 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; 3758 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs; 3759 3760 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3761 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; 3762 } 3763